Remove redundant error handling in Z.Sync.Data.Engine::_uploadObjects()
makeRequest() now retries 5xx errors automatically, so it's not necessary higher up.
This commit is contained in:
parent
4cc6408105
commit
a0c7cf9bee
1 changed files with 101 additions and 126 deletions
|
@ -835,141 +835,116 @@ Zotero.Sync.Data.Engine.prototype._uploadObjects = Zotero.Promise.coroutine(func
|
||||||
|
|
||||||
let results;
|
let results;
|
||||||
let numSuccessful = 0;
|
let numSuccessful = 0;
|
||||||
try {
|
({ libraryVersion, results } = yield this.apiClient.uploadObjects(
|
||||||
({ libraryVersion, results } = yield this.apiClient.uploadObjects(
|
this.library.libraryType,
|
||||||
this.library.libraryType,
|
this.libraryTypeID,
|
||||||
this.libraryTypeID,
|
"POST",
|
||||||
"POST",
|
libraryVersion,
|
||||||
libraryVersion,
|
objectType,
|
||||||
objectType,
|
batch
|
||||||
batch
|
));
|
||||||
));
|
|
||||||
|
|
||||||
Zotero.debug("===");
|
Zotero.debug("===");
|
||||||
Zotero.debug(results);
|
Zotero.debug(results);
|
||||||
|
|
||||||
// Mark successful and unchanged objects as synced with new version,
|
// Mark successful and unchanged objects as synced with new version,
|
||||||
// and save uploaded JSON to cache
|
// and save uploaded JSON to cache
|
||||||
let ids = [];
|
let ids = [];
|
||||||
let toSave = [];
|
let toSave = [];
|
||||||
let toCache = [];
|
let toCache = [];
|
||||||
for (let state of ['successful', 'unchanged']) {
|
for (let state of ['successful', 'unchanged']) {
|
||||||
for (let index in results[state]) {
|
for (let index in results[state]) {
|
||||||
let current = results[state][index];
|
let current = results[state][index];
|
||||||
// 'successful' includes objects, not keys
|
// 'successful' includes objects, not keys
|
||||||
let key = state == 'successful' ? current.key : current;
|
let key = state == 'successful' ? current.key : current;
|
||||||
|
|
||||||
if (key != batch[index].key) {
|
if (key != batch[index].key) {
|
||||||
throw new Error("Key mismatch (" + key + " != " + batch[index].key + ")");
|
throw new Error("Key mismatch (" + key + " != " + batch[index].key + ")");
|
||||||
}
|
|
||||||
|
|
||||||
let obj = yield objectsClass.getByLibraryAndKeyAsync(
|
|
||||||
this.libraryID, key, { noCache: true }
|
|
||||||
)
|
|
||||||
ids.push(obj.id);
|
|
||||||
|
|
||||||
if (state == 'successful') {
|
|
||||||
// Update local object with saved data if necessary
|
|
||||||
yield obj.loadAllData();
|
|
||||||
obj.fromJSON(current.data);
|
|
||||||
toSave.push(obj);
|
|
||||||
toCache.push(current);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// This won't reflect the actual version of the item on the server, but
|
|
||||||
// it will guarantee that the item won't be redownloaded unnecessarily
|
|
||||||
// in the case of a full sync, because the version will be higher than
|
|
||||||
// whatever version is on the server.
|
|
||||||
batch[index].version = libraryVersion
|
|
||||||
toCache.push(batch[index]);
|
|
||||||
}
|
|
||||||
|
|
||||||
numSuccessful++;
|
|
||||||
// Remove from batch to mark as successful
|
|
||||||
delete batch[index];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let obj = yield objectsClass.getByLibraryAndKeyAsync(
|
||||||
|
this.libraryID, key, { noCache: true }
|
||||||
|
)
|
||||||
|
ids.push(obj.id);
|
||||||
|
|
||||||
|
if (state == 'successful') {
|
||||||
|
// Update local object with saved data if necessary
|
||||||
|
yield obj.loadAllData();
|
||||||
|
obj.fromJSON(current.data);
|
||||||
|
toSave.push(obj);
|
||||||
|
toCache.push(current);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// This won't reflect the actual version of the item on the server, but
|
||||||
|
// it will guarantee that the item won't be redownloaded unnecessarily
|
||||||
|
// in the case of a full sync, because the version will be higher than
|
||||||
|
// whatever version is on the server.
|
||||||
|
batch[index].version = libraryVersion
|
||||||
|
toCache.push(batch[index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
numSuccessful++;
|
||||||
|
// Remove from batch to mark as successful
|
||||||
|
delete batch[index];
|
||||||
}
|
}
|
||||||
yield Zotero.Sync.Data.Local.saveCacheObjects(
|
|
||||||
objectType, this.libraryID, toCache
|
|
||||||
);
|
|
||||||
yield Zotero.DB.executeTransaction(function* () {
|
|
||||||
for (let i = 0; i < toSave.length; i++) {
|
|
||||||
yield toSave[i].save();
|
|
||||||
}
|
|
||||||
this.library.libraryVersion = libraryVersion;
|
|
||||||
yield this.library.save();
|
|
||||||
objectsClass.updateVersion(ids, libraryVersion);
|
|
||||||
objectsClass.updateSynced(ids, true);
|
|
||||||
}.bind(this));
|
|
||||||
|
|
||||||
// Handle failed objects
|
|
||||||
for (let index in results.failed) {
|
|
||||||
let { code, message, data } = results.failed[index];
|
|
||||||
let e = new Error(message);
|
|
||||||
e.name = "ZoteroObjectUploadError";
|
|
||||||
e.code = code;
|
|
||||||
if (data) {
|
|
||||||
e.data = data;
|
|
||||||
}
|
|
||||||
Zotero.logError("Error for " + objectType + " " + batch[index].key + " in "
|
|
||||||
+ this.library.name + ":\n\n" + e);
|
|
||||||
|
|
||||||
// This shouldn't happen, because the upload request includes a library
|
|
||||||
// version and should prevent an outdated upload before the object version is
|
|
||||||
// checked. If it does, we need to do a full sync.
|
|
||||||
if (e.code == 412) {
|
|
||||||
return this.UPLOAD_RESULT_OBJECT_CONFLICT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (this.onError) {
|
|
||||||
this.onError(e);
|
|
||||||
}
|
|
||||||
if (this.stopOnError) {
|
|
||||||
throw new Error(e);
|
|
||||||
}
|
|
||||||
batch[index].tries++;
|
|
||||||
// Mark 400 errors as permanently failed
|
|
||||||
if (e.code >= 400 && e.code < 500) {
|
|
||||||
batch[index].failed = true;
|
|
||||||
}
|
|
||||||
// 500 errors should stay in queue and be retried
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add failed objects back to end of queue
|
|
||||||
var numFailed = 0;
|
|
||||||
for (let o of batch) {
|
|
||||||
if (o !== undefined) {
|
|
||||||
queue.push(o);
|
|
||||||
// TODO: Clear JSON?
|
|
||||||
numFailed++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Zotero.debug("Failed: " + numFailed, 2);
|
|
||||||
}
|
}
|
||||||
catch (e) {
|
yield Zotero.Sync.Data.Local.saveCacheObjects(
|
||||||
if (e instanceof Zotero.HTTP.UnexpectedStatusException) {
|
objectType, this.libraryID, toCache
|
||||||
if (e.status == 412) {
|
);
|
||||||
throw e;
|
yield Zotero.DB.executeTransaction(function* () {
|
||||||
}
|
for (let i = 0; i < toSave.length; i++) {
|
||||||
|
yield toSave[i].save();
|
||||||
// On 5xx, delay and retry
|
|
||||||
if (e.status >= 500 && e.status <= 600) {
|
|
||||||
if (!failureDelayGenerator) {
|
|
||||||
// Keep trying for up to an hour
|
|
||||||
failureDelayGenerator = Zotero.Utilities.Internal.delayGenerator(
|
|
||||||
Zotero.Sync.Data.failureDelayIntervals, 60 * 60 * 1000
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let keepGoing = yield failureDelayGenerator.next();
|
|
||||||
if (!keepGoing) {
|
|
||||||
Zotero.logError("Failed too many times");
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
throw e;
|
this.library.libraryVersion = libraryVersion;
|
||||||
|
yield this.library.save();
|
||||||
|
objectsClass.updateVersion(ids, libraryVersion);
|
||||||
|
objectsClass.updateSynced(ids, true);
|
||||||
|
}.bind(this));
|
||||||
|
|
||||||
|
// Handle failed objects
|
||||||
|
for (let index in results.failed) {
|
||||||
|
let { code, message, data } = results.failed[index];
|
||||||
|
let e = new Error(message);
|
||||||
|
e.name = "ZoteroObjectUploadError";
|
||||||
|
e.code = code;
|
||||||
|
if (data) {
|
||||||
|
e.data = data;
|
||||||
|
}
|
||||||
|
Zotero.logError("Error for " + objectType + " " + batch[index].key + " in "
|
||||||
|
+ this.library.name + ":\n\n" + e);
|
||||||
|
|
||||||
|
// This shouldn't happen, because the upload request includes a library
|
||||||
|
// version and should prevent an outdated upload before the object version is
|
||||||
|
// checked. If it does, we need to do a full sync.
|
||||||
|
if (e.code == 412) {
|
||||||
|
return this.UPLOAD_RESULT_OBJECT_CONFLICT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.onError) {
|
||||||
|
this.onError(e);
|
||||||
|
}
|
||||||
|
if (this.stopOnError) {
|
||||||
|
throw new Error(e);
|
||||||
|
}
|
||||||
|
batch[index].tries++;
|
||||||
|
// Mark 400 errors as permanently failed
|
||||||
|
if (e.code >= 400 && e.code < 500) {
|
||||||
|
batch[index].failed = true;
|
||||||
|
}
|
||||||
|
// 500 errors should stay in queue and be retried
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add failed objects back to end of queue
|
||||||
|
var numFailed = 0;
|
||||||
|
for (let o of batch) {
|
||||||
|
if (o !== undefined) {
|
||||||
|
queue.push(o);
|
||||||
|
// TODO: Clear JSON?
|
||||||
|
numFailed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Zotero.debug("Failed: " + numFailed, 2);
|
||||||
|
|
||||||
// If we didn't make any progress, bail
|
// If we didn't make any progress, bail
|
||||||
if (!numSuccessful) {
|
if (!numSuccessful) {
|
||||||
throw new Error("Made no progress during upload -- stopping");
|
throw new Error("Made no progress during upload -- stopping");
|
||||||
|
|
Loading…
Reference in a new issue