zotero/chrome/chromeFiles/content/scholar/fileInterface.js

436 lines
14 KiB
JavaScript
Raw Normal View History

2006-08-14 22:28:22 +00:00
var Scholar_File_Interface = new function() {
var _unresponsiveScriptPreference, _importCollection, _notifyItem, _notifyCollection;
this.exportFile = exportFile;
this.exportCollection = exportCollection;
this.exportItems = exportItems;
closes #78, figure out import/export architecture closes #100, migrate ingester to Scholar.Translate closes #88, migrate scrapers away from RDF closes #9, pull out LC subject heading tags references #87, add fromArray() and toArray() methods to item objects API changes: all translation (import/export/web) now goes through Scholar.Translate all Scholar-specific functions in scrapers start with "Scholar." rather than the jumbled up piggy bank un-namespaced confusion scrapers now longer specify items through RDF (the beginning of an item.fromArray()-like function exists in Scholar.Translate.prototype._itemDone()) scrapers can be any combination of import, export, and web (type is the sum of 1/2/4 respectively) scrapers now contain functions (doImport, doExport, doWeb) rather than loose code scrapers can call functions in other scrapers or just call the function to translate itself export accesses items item-by-item, rather than accepting a huge array of items MARC functions are now in the MARC import translator, and accessed by the web translators new features: import now works rudimentary RDF (unqualified dublin core only), RIS, and MARC import translators are implemented (although they are a little picky with respect to file extensions at the moment) items appear as they are scraped MARC import translator pulls out tags, although this seems to slow things down no icon appears next to a the URL when Scholar hasn't detected metadata, since this seemed somewhat confusing apologizes for the size of this diff. i figured if i was going to re-write the API, i might as well do it all at once and get everything working right.
2006-07-17 04:06:58 +00:00
this.importFile = importFile;
this.bibliographyFromCollection = bibliographyFromCollection;
this.bibliographyFromItems = bibliographyFromItems;
/*
* Creates Scholar.Translate instance and shows file picker for file export
*/
function exportFile(name, items) {
var translation = new Scholar.Translate("export");
var translators = translation.getTranslators();
const nsIFilePicker = Components.interfaces.nsIFilePicker;
var fp = Components.classes["@mozilla.org/filepicker;1"]
.createInstance(nsIFilePicker);
fp.init(window, Scholar.getString("fileInterface.export"), nsIFilePicker.modeSave);
// set file name and extension.
name = (name ? name : Scholar.getString("pane.collections.library"));
fp.defaultString = name+"."+translators[0].target;
// add save filters
for(var i in translators) {
var label = translators[i].label;
// put extensions in parentheses if Mac (Windows users already
// get extension)
label += " (."+translators[i].target+")";
fp.appendFilter(label, "*."+translators[i].target);
}
var rv = fp.show();
if (rv == nsIFilePicker.returnOK || rv == nsIFilePicker.returnReplace) {
if(items) {
translation.setItems(items);
}
translation.setLocation(fp.file);
translation.setTranslator(translators[fp.filterIndex]);
2006-08-08 23:00:33 +00:00
translation.setHandler("options", _exportOptions);
translation.setHandler("done", _exportDone);
_disableUnresponsive();
Scholar_File_Interface.Progress.show(
Scholar.getString("fileInterface.itemsExported"),
function() {
translation.translate();
});
}
}
closes #78, figure out import/export architecture closes #100, migrate ingester to Scholar.Translate closes #88, migrate scrapers away from RDF closes #9, pull out LC subject heading tags references #87, add fromArray() and toArray() methods to item objects API changes: all translation (import/export/web) now goes through Scholar.Translate all Scholar-specific functions in scrapers start with "Scholar." rather than the jumbled up piggy bank un-namespaced confusion scrapers now longer specify items through RDF (the beginning of an item.fromArray()-like function exists in Scholar.Translate.prototype._itemDone()) scrapers can be any combination of import, export, and web (type is the sum of 1/2/4 respectively) scrapers now contain functions (doImport, doExport, doWeb) rather than loose code scrapers can call functions in other scrapers or just call the function to translate itself export accesses items item-by-item, rather than accepting a huge array of items MARC functions are now in the MARC import translator, and accessed by the web translators new features: import now works rudimentary RDF (unqualified dublin core only), RIS, and MARC import translators are implemented (although they are a little picky with respect to file extensions at the moment) items appear as they are scraped MARC import translator pulls out tags, although this seems to slow things down no icon appears next to a the URL when Scholar hasn't detected metadata, since this seemed somewhat confusing apologizes for the size of this diff. i figured if i was going to re-write the API, i might as well do it all at once and get everything working right.
2006-07-17 04:06:58 +00:00
/*
* exports a collection or saved search
*/
function exportCollection() {
var collection = ScholarPane.getSelectedCollection();
if (collection)
{
exportFile(collection.getName(), Scholar.getItems(collection.getID()));
return;
}
var searchRef = ScholarPane.getSelectedSavedSearch();
if (searchRef)
{
var search = new Scholar.Search();
search.load(searchRef['id']);
exportFile(search.getName(), Scholar.Items.get(search.search()));
return;
}
throw ("No collection or saved search currently selected");
}
/*
* exports items
*/
function exportItems() {
var items = ScholarPane.getSelectedItems();
if(!items || !items.length) throw("no items currently selected");
exportFile(Scholar.getString("fileInterface.exportedItems"), items);
}
/*
2006-08-08 23:00:33 +00:00
* closes items exported indicator
*/
2006-08-08 23:00:33 +00:00
function _exportOptions(obj, options) {
var io = {options:options}
window.openDialog("chrome://scholar/content/exportOptions.xul",
"_blank","chrome,modal,centerscreen", io);
if(io.options) {
// refocus dialog
Scholar_File_Interface.Progress.show();
return options;
} else {
return false;
}
}
/*
* closes items exported indicator
*/
function _exportDone(obj, worked) {
Scholar_File_Interface.Progress.close();
_restoreUnresponsive();
}
closes #78, figure out import/export architecture closes #100, migrate ingester to Scholar.Translate closes #88, migrate scrapers away from RDF closes #9, pull out LC subject heading tags references #87, add fromArray() and toArray() methods to item objects API changes: all translation (import/export/web) now goes through Scholar.Translate all Scholar-specific functions in scrapers start with "Scholar." rather than the jumbled up piggy bank un-namespaced confusion scrapers now longer specify items through RDF (the beginning of an item.fromArray()-like function exists in Scholar.Translate.prototype._itemDone()) scrapers can be any combination of import, export, and web (type is the sum of 1/2/4 respectively) scrapers now contain functions (doImport, doExport, doWeb) rather than loose code scrapers can call functions in other scrapers or just call the function to translate itself export accesses items item-by-item, rather than accepting a huge array of items MARC functions are now in the MARC import translator, and accessed by the web translators new features: import now works rudimentary RDF (unqualified dublin core only), RIS, and MARC import translators are implemented (although they are a little picky with respect to file extensions at the moment) items appear as they are scraped MARC import translator pulls out tags, although this seems to slow things down no icon appears next to a the URL when Scholar hasn't detected metadata, since this seemed somewhat confusing apologizes for the size of this diff. i figured if i was going to re-write the API, i might as well do it all at once and get everything working right.
2006-07-17 04:06:58 +00:00
/*
* Creates Scholar.Translate instance and shows file picker for file import
*/
function importFile() {
var translation = new Scholar.Translate("import");
var translators = translation.getTranslators();
const nsIFilePicker = Components.interfaces.nsIFilePicker;
var fp = Components.classes["@mozilla.org/filepicker;1"]
.createInstance(nsIFilePicker);
fp.init(window, Scholar.getString("fileInterface.import"), nsIFilePicker.modeOpen);
fp.appendFilters(nsIFilePicker.filterAll);
closes #78, figure out import/export architecture closes #100, migrate ingester to Scholar.Translate closes #88, migrate scrapers away from RDF closes #9, pull out LC subject heading tags references #87, add fromArray() and toArray() methods to item objects API changes: all translation (import/export/web) now goes through Scholar.Translate all Scholar-specific functions in scrapers start with "Scholar." rather than the jumbled up piggy bank un-namespaced confusion scrapers now longer specify items through RDF (the beginning of an item.fromArray()-like function exists in Scholar.Translate.prototype._itemDone()) scrapers can be any combination of import, export, and web (type is the sum of 1/2/4 respectively) scrapers now contain functions (doImport, doExport, doWeb) rather than loose code scrapers can call functions in other scrapers or just call the function to translate itself export accesses items item-by-item, rather than accepting a huge array of items MARC functions are now in the MARC import translator, and accessed by the web translators new features: import now works rudimentary RDF (unqualified dublin core only), RIS, and MARC import translators are implemented (although they are a little picky with respect to file extensions at the moment) items appear as they are scraped MARC import translator pulls out tags, although this seems to slow things down no icon appears next to a the URL when Scholar hasn't detected metadata, since this seemed somewhat confusing apologizes for the size of this diff. i figured if i was going to re-write the API, i might as well do it all at once and get everything working right.
2006-07-17 04:06:58 +00:00
for(var i in translators) {
fp.appendFilter(translators[i].label, "*."+translators[i].target);
}
var rv = fp.show();
if (rv == nsIFilePicker.returnOK || rv == nsIFilePicker.returnReplace) {
translation.setLocation(fp.file);
// get translators again, bc now we can check against the file
translators = translation.getTranslators();
if(translators.length) {
// create a new collection to take in imported items
var date = new Date();
_importCollection = Scholar.Collections.add(Scholar.getString("fileInterface.imported")+" "+date.toLocaleString());
// import items
closes #78, figure out import/export architecture closes #100, migrate ingester to Scholar.Translate closes #88, migrate scrapers away from RDF closes #9, pull out LC subject heading tags references #87, add fromArray() and toArray() methods to item objects API changes: all translation (import/export/web) now goes through Scholar.Translate all Scholar-specific functions in scrapers start with "Scholar." rather than the jumbled up piggy bank un-namespaced confusion scrapers now longer specify items through RDF (the beginning of an item.fromArray()-like function exists in Scholar.Translate.prototype._itemDone()) scrapers can be any combination of import, export, and web (type is the sum of 1/2/4 respectively) scrapers now contain functions (doImport, doExport, doWeb) rather than loose code scrapers can call functions in other scrapers or just call the function to translate itself export accesses items item-by-item, rather than accepting a huge array of items MARC functions are now in the MARC import translator, and accessed by the web translators new features: import now works rudimentary RDF (unqualified dublin core only), RIS, and MARC import translators are implemented (although they are a little picky with respect to file extensions at the moment) items appear as they are scraped MARC import translator pulls out tags, although this seems to slow things down no icon appears next to a the URL when Scholar hasn't detected metadata, since this seemed somewhat confusing apologizes for the size of this diff. i figured if i was going to re-write the API, i might as well do it all at once and get everything working right.
2006-07-17 04:06:58 +00:00
translation.setTranslator(translators[0]);
translation.setHandler("collectionDone", _importCollectionDone);
translation.setHandler("done", _importDone);
_disableUnresponsive();
// disable notifier
Scholar.Notifier.disable();
// show progress indicator
Scholar_File_Interface.Progress.show(
Scholar.getString("fileInterface.itemsImported"),
function() {
translation.translate();
});
} else {
window.alert(Scholar.getString("fileInterface.fileFormatUnsupported"));
closes #78, figure out import/export architecture closes #100, migrate ingester to Scholar.Translate closes #88, migrate scrapers away from RDF closes #9, pull out LC subject heading tags references #87, add fromArray() and toArray() methods to item objects API changes: all translation (import/export/web) now goes through Scholar.Translate all Scholar-specific functions in scrapers start with "Scholar." rather than the jumbled up piggy bank un-namespaced confusion scrapers now longer specify items through RDF (the beginning of an item.fromArray()-like function exists in Scholar.Translate.prototype._itemDone()) scrapers can be any combination of import, export, and web (type is the sum of 1/2/4 respectively) scrapers now contain functions (doImport, doExport, doWeb) rather than loose code scrapers can call functions in other scrapers or just call the function to translate itself export accesses items item-by-item, rather than accepting a huge array of items MARC functions are now in the MARC import translator, and accessed by the web translators new features: import now works rudimentary RDF (unqualified dublin core only), RIS, and MARC import translators are implemented (although they are a little picky with respect to file extensions at the moment) items appear as they are scraped MARC import translator pulls out tags, although this seems to slow things down no icon appears next to a the URL when Scholar hasn't detected metadata, since this seemed somewhat confusing apologizes for the size of this diff. i figured if i was going to re-write the API, i might as well do it all at once and get everything working right.
2006-07-17 04:06:58 +00:00
}
}
}
/*
* Saves collections after they've been imported. Input item is of the type
* outputted by Scholar.Collection.toArray(); only receives top-level
* collections
*/
function _importCollectionDone(obj, collection) {
Scholar.Notifier.enable();
Scholar.Notifier.trigger("add", "collection", collection.getID());
collection.changeParent(_importCollection.getID());
Scholar.Notifier.disable();
}
/*
* closes items imported indicator
*/
function _importDone(obj) {
// add items to import collection
for each(var itemID in obj.newItems) {
_importCollection.addItem(itemID);
}
// run notify
Scholar.Notifier.enable();
if(obj.newItems.length) {
Scholar.Notifier.trigger("add", "item", obj.newItems);
Scholar.Notifier.trigger("modify", "collection", _importCollection.getID());
}
Scholar_File_Interface.Progress.close();
_restoreUnresponsive();
if(!worked) {
window.alert(Scholar.getString("fileInterface.importError"));
}
}
/*
* disables the "unresponsive script" warning; necessary for import and
* export, which can take quite a while to execute
*/
function _disableUnresponsive() {
var prefService = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
_unresponsiveScriptPreference = prefService.getIntPref("dom.max_script_run_time");
prefService.setIntPref("dom.max_script_run_time", 0);
}
/*
* restores the "unresponsive script" warning
*/
function _restoreUnresponsive() {
var prefService = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
prefService.setIntPref("dom.max_script_run_time", _unresponsiveScriptPreference);
}
/*
* Creates a bibliography from a collection or saved search
*/
function bibliographyFromCollection() {
var collection = ScholarPane.getSelectedCollection();
if (collection)
{
_doBibliographyOptions(collection.getName(), Scholar.getItems(collection.getID()));
return;
}
var searchRef = ScholarPane.getSelectedSavedSearch();
if (searchRef)
{
var search = new Scholar.Search();
search.load(searchRef['id']);
_doBibliographyOptions(search.getName(), Scholar.Items.get(search.search()));
return;
}
throw ("No collection or saved search currently selected");
}
/*
* Creates a bibliography from a items
*/
function bibliographyFromItems() {
var items = ScholarPane.getSelectedItems();
if(!items || !items.length) throw("no items currently selected");
_doBibliographyOptions(Scholar.getString("fileInterface.untitledBibliography"), items);
}
/*
* Shows bibliography options and creates a bibliography
*/
function _doBibliographyOptions(name, items) {
var io = new Object();
var newDialog = window.openDialog("chrome://scholar/content/bibliography.xul",
"_blank","chrome,modal,centerscreen", io);
// determine output format
var format = "HTML";
if(io.output == "save-as-rtf") {
format = "RTF";
}
// generate bibliography
var bibliography = Scholar.Cite.getBibliography(io.style, items, format);
if(io.output == "print") {
// printable bibliography, using a hidden browser
var browser = Scholar.Browser.createHiddenBrowser(window);
browser.contentDocument.write(bibliography);
// this is kinda nasty, but we have to temporarily modify the user's
// settings to eliminate the header and footer. the other way to do
// this would be to attempt to print with an embedded browser, but
// it's not even clear how to attempt to create one
var prefService = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
var prefsToClear = ["print.print_headerleft", "print.print_headercenter",
"print.print_headerright", "print.print_footerleft",
"print.print_footercenter", "print.print_footerright"];
var oldPrefs = new Array();
for(var i in prefsToClear) {
oldPrefs[i] = prefService.getCharPref(prefsToClear[i]);
prefService.setCharPref(prefsToClear[i], "");
}
// print
browser.contentWindow.print();
// set the prefs back
for(var i in prefsToClear) {
prefService.setCharPref(prefsToClear[i], oldPrefs[i]);
}
Scholar.Browser.deleteHiddenBrowser(browser);
bibliographyStream.close();
} else if(io.output == "save-as-html") {
var fStream = _saveBibliography(name, "HTML");
if(fStream !== false) {
var html = "";
html +='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n';
html +='<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n';
html +='<head>\n';
html +='<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>\n';
html +='<title>'+Scholar.getString("fileInterface.bibliographyHTMLTitle")+'</title>\n';
html +='</head>\n';
html +='<body>\n';
html += bibliography;
html +='</body>\n';
html +='</html>\n';
// create UTF-8 output stream
var os = Components.classes["@mozilla.org/intl/converter-output-stream;1"].
createInstance(Components.interfaces.nsIConverterOutputStream);
os.init(fStream, "UTF-8", 0, "?");
os.writeString(html);
os.close();
fStream.close();
}
} else if(io.output == "save-as-rtf") {
var fStream = _saveBibliography(name, "RTF");
if(fStream !== false) {
fStream.write(bibliography, bibliography.length);
fStream.close();
}
} else if(io.output == "copy-to-clipboard") {
// copy to clipboard
var transferable = Components.classes["@mozilla.org/widget/transferable;1"].
createInstance(Components.interfaces.nsITransferable);
var clipboardService = Components.classes["@mozilla.org/widget/clipboard;1"].
getService(Components.interfaces.nsIClipboard);
// add HTML
var str = Components.classes["@mozilla.org/supports-string;1"].
createInstance(Components.interfaces.nsISupportsString);
str.data = bibliography;
transferable.addDataFlavor("text/html");
transferable.setTransferData("text/html", str, bibliography.length*2);
// add text
var bibliography = Scholar.Cite.getBibliography(io.style, items, "Text");
var str = Components.classes["@mozilla.org/supports-string;1"].
createInstance(Components.interfaces.nsISupportsString);
str.data = bibliography;
transferable.addDataFlavor("text/unicode");
transferable.setTransferData("text/unicode", str, bibliography.length*2);
clipboardService.setData(transferable, null, Components.interfaces.nsIClipboard.kGlobalClipboard);
}
}
function _saveBibliography(name, format) {
// savable bibliography, using a file stream
const nsIFilePicker = Components.interfaces.nsIFilePicker;
var fp = Components.classes["@mozilla.org/filepicker;1"]
.createInstance(nsIFilePicker);
fp.init(window, "Save Bibliography", nsIFilePicker.modeSave);
if(format == "RTF") {
var extension = "rtf";
fp.appendFilter("RTF", "*.rtf");
} else {
var extension = "html";
fp.appendFilters(nsIFilePicker.filterHTML);
}
fp.defaultString = name+"."+extension;
var rv = fp.show();
if (rv == nsIFilePicker.returnOK || rv == nsIFilePicker.returnReplace) {
// open file
var fStream = Components.classes["@mozilla.org/network/file-output-stream;1"].
createInstance(Components.interfaces.nsIFileOutputStream);
fStream.init(fp.file, 0x02 | 0x08 | 0x20, 0664, 0); // write, create, truncate
return fStream;
} else {
return false;
}
}
}
// Handles the display of a progress indicator
Scholar_File_Interface.Progress = new function() {
var _windowLoaded = false;
var _windowLoading = false;
var _progressWindow;
// keep track of all of these things in case they're called before we're
// done loading the progress window
var _loadHeadline, _loadNumber, _outOf, _callback;
this.show = show;
this.close = close;
function show(headline, callback) {
if(_windowLoading || _windowLoaded) { // already loading or loaded
2006-08-08 23:00:33 +00:00
_progressWindow.focus();
return false;
}
_windowLoading = true;
_loadHeadline = headline;
_loadNumber = 0;
_outOf = 0;
_callback = callback;
_progressWindow = window.openDialog("chrome://scholar/chrome/fileProgress.xul", "", "chrome,resizable=no,close=no,dependent,dialog,centerscreen");
_progressWindow.addEventListener("pageshow", _onWindowLoaded, false);
2006-08-14 22:28:22 +00:00
return true;
}
function close() {
_windowLoaded = false;
try {
_progressWindow.close();
} catch(ex) {}
}
function _onWindowLoaded() {
_windowLoading = false;
_windowLoaded = true;
// do things we delayed because the winodw was loading
_progressWindow.document.getElementById("progress-label").value = _loadHeadline;
if(_callback) {
window.setTimeout(_callback, 1500);
}
}
}