Skip to content

Commit

Permalink
Retry requests after certain network errors.
Browse files Browse the repository at this point in the history
Closes #390

Change-Id: I0459e3a314fc777dd350c7fbfb1f44485f515e3c
  • Loading branch information
Timothy Drews committed Jun 9, 2016
1 parent a644900 commit 0d77ddf
Show file tree
Hide file tree
Showing 2 changed files with 259 additions and 51 deletions.
141 changes: 90 additions & 51 deletions lib/media/streaming_engine.js
Original file line number Diff line number Diff line change
Expand Up @@ -1165,63 +1165,99 @@ shaka.media.StreamingEngine.prototype.fetchAndAppend_ = function(

mediaState.performingUpdate = false;

if (error.code != shaka.util.Error.Code.QUOTA_EXCEEDED_ERROR) {
if (error.code == shaka.util.Error.Code.BAD_HTTP_STATUS ||
error.code == shaka.util.Error.Code.HTTP_ERROR ||
error.code == shaka.util.Error.Code.TIMEOUT) {
this.handleNetworkError_(mediaState, error);
} else if (error.code == shaka.util.Error.Code.QUOTA_EXCEEDED_ERROR) {
this.handleQuotaExceeded_(mediaState, error);
} else {
shaka.log.error(logPrefix, 'failed fetch and append: code=' + error.code);
this.onError_(error);
return;
}
}.bind(this));
};

// The segment cannot fit into the SourceBuffer. Ideally, MediaSource would
// have evicted old data to accommodate the segment; however, it may have
// failed to do this if the segment is very large, or if it could not find
// a suitable time range to remove.
//
// We can overcome the latter by trying to append the segment again;
// however, to avoid continuous QuotaExceededErrors we must reduce the size
// of the buffer going forward.
//
// If we've recently reduced the buffering goals, wait until the stream
// which caused the first QuotaExceededError recovers. Doing this ensures
// we don't reduce the buffering goals too quickly.

goog.asserts.assert(this.mediaStates_, 'must not be destroyed');
var mediaStates = shaka.util.MapUtils.values(this.mediaStates_);
var waitingForAnotherStreamToRecover = mediaStates.some(function(ms) {
return ms != mediaState && ms.recovering;
});

if (!waitingForAnotherStreamToRecover) {
// Reduction schedule: 80%, 60%, 40%, 20%, 16%, 12%, 8%, 4%, fail.
// Note: percentages are used for comparisons to avoid rounding errors.
var percentBefore = Math.round(100 * this.bufferingGoalScale_);
if (percentBefore > 20) {
this.bufferingGoalScale_ -= 0.2;
} else if (percentBefore > 4) {
this.bufferingGoalScale_ -= 0.04;
} else {
shaka.log.error(
logPrefix, 'MediaSource threw QuotaExceededError too many times');
this.onError_(error);
return;
}
var percentAfter = Math.round(100 * this.bufferingGoalScale_);
shaka.log.warning(
logPrefix,
'MediaSource threw QuotaExceededError:',
'reducing buffering goals by ' + (100 - percentAfter) + '%');
mediaState.recovering = true;

/**
* Handles a network error.
*
* @param {shaka.media.StreamingEngine.MediaState_} mediaState
* @param {!shaka.util.Error} error
* @private
*/
shaka.media.StreamingEngine.prototype.handleNetworkError_ = function(
mediaState, error) {
var logPrefix = shaka.media.StreamingEngine.logPrefix_(mediaState);

this.onError_(error);

shaka.log.warning(logPrefix, 'Network error. Retrying...');
this.scheduleUpdate_(mediaState, 4);
};


/**
* Handles a QUOTA_EXCEEDED_ERROR.
*
* @param {shaka.media.StreamingEngine.MediaState_} mediaState
* @param {!shaka.util.Error} error
* @private
*/
shaka.media.StreamingEngine.prototype.handleQuotaExceeded_ = function(
mediaState, error) {
var logPrefix = shaka.media.StreamingEngine.logPrefix_(mediaState);

// The segment cannot fit into the SourceBuffer. Ideally, MediaSource would
// have evicted old data to accommodate the segment; however, it may have
// failed to do this if the segment is very large, or if it could not find
// a suitable time range to remove.
//
// We can overcome the latter by trying to append the segment again;
// however, to avoid continuous QuotaExceededErrors we must reduce the size
// of the buffer going forward.
//
// If we've recently reduced the buffering goals, wait until the stream
// which caused the first QuotaExceededError recovers. Doing this ensures
// we don't reduce the buffering goals too quickly.

goog.asserts.assert(this.mediaStates_, 'must not be destroyed');
var mediaStates = shaka.util.MapUtils.values(this.mediaStates_);
var waitingForAnotherStreamToRecover = mediaStates.some(function(ms) {
return ms != mediaState && ms.recovering;
});

if (!waitingForAnotherStreamToRecover) {
// Reduction schedule: 80%, 60%, 40%, 20%, 16%, 12%, 8%, 4%, fail.
// Note: percentages are used for comparisons to avoid rounding errors.
var percentBefore = Math.round(100 * this.bufferingGoalScale_);
if (percentBefore > 20) {
this.bufferingGoalScale_ -= 0.2;
} else if (percentBefore > 4) {
this.bufferingGoalScale_ -= 0.04;
} else {
shaka.log.debug(
logPrefix,
'MediaSource threw QuotaExceededError:',
'waiting for another stream to recover...');
shaka.log.error(
logPrefix, 'MediaSource threw QuotaExceededError too many times');
this.onError_(error);
return;
}
var percentAfter = Math.round(100 * this.bufferingGoalScale_);
shaka.log.warning(
logPrefix,
'MediaSource threw QuotaExceededError:',
'reducing buffering goals by ' + (100 - percentAfter) + '%');
mediaState.recovering = true;
} else {
shaka.log.debug(
logPrefix,
'MediaSource threw QuotaExceededError:',
'waiting for another stream to recover...');
}

// If we're not rebuffering then wait to update: MediaSource may have
// failed to remove a time range because the playhead was too close to the
// range it wanted to remove.
this.scheduleUpdate_(mediaState, mediaState.needRebuffering ? 0 : 4);
}.bind(this));
// If we're not rebuffering then wait to update: MediaSource may have
// failed to remove a time range because the playhead was too close to the
// range it wanted to remove.
this.scheduleUpdate_(mediaState, mediaState.needRebuffering ? 0 : 4);
};


Expand Down Expand Up @@ -1277,7 +1313,10 @@ shaka.media.StreamingEngine.prototype.initSourceBuffer_ = function(

return this.mediaSourceEngine_.appendBuffer(
mediaState.type, initSegment, null /* startTime */, null /* endTime */);
}.bind(this));
}.bind(this)).catch(function(error) {
mediaState.needInitSegment = true;
return Promise.reject(error);
});

return Promise.all([setTimestampOffset, setAppendWindowEnd, appendInit]);
};
Expand Down
169 changes: 169 additions & 0 deletions test/streaming_engine_unit.js
Original file line number Diff line number Diff line change
Expand Up @@ -1395,6 +1395,175 @@ describe('StreamingEngine', function() {
});
});

describe('handles network errors', function() {
function testRecoverableError(targetUri, code, done) {
var loop;

setupVod();

// Wrap the NetworkingEngine to perform errors.
var originalNetEngine = netEngine;
netEngine = {
request: jasmine.createSpy('request')
};
var attempts = 0;
netEngine.request.and.callFake(function(requestType, request) {
if (request.uris[0] == targetUri) {
++attempts;
if (attempts == 1) {
var data = [targetUri];

if (code == shaka.util.Error.Code.BAD_HTTP_STATUS) {
data.push(404);
data.push('');
}

return Promise.reject(new shaka.util.Error(
shaka.util.Error.Category.NETWORK, code, data));
}
}
return originalNetEngine.request(requestType, request);
});

mediaSourceEngine = new shaka.test.FakeMediaSourceEngine(segmentData);
createStreamingEngine();

playhead.getTime.and.returnValue(0);
onStartupComplete.and.callFake(function() {
setupFakeGetTime(0);
mediaSourceEngine.endOfStream.and.callFake(loop.stop);
});

onError.and.callFake(function(error) {
expect(error.category).toBe(shaka.util.Error.Category.NETWORK);
expect(error.code).toBe(code);
});

// Here we go!
onChooseStreams.and.callFake(defaultOnChooseStreams.bind(null));
streamingEngine.init();

loop = runTest();
loop.then(function() {
expect(onError.calls.count()).toBe(1);
expect(attempts).toBe(2);
expect(mediaSourceEngine.endOfStream).toHaveBeenCalled();
return streamingEngine.destroy();
}).catch(fail).then(done);
}

it('from missing init, first Period',
testRecoverableError.bind(
null, '1_audio_init', shaka.util.Error.Code.BAD_HTTP_STATUS));
it('from missing init, second Period',
testRecoverableError.bind(
null, '2_video_init', shaka.util.Error.Code.BAD_HTTP_STATUS));
it('from missing media, first Period',
testRecoverableError.bind(
null, '1_video_1', shaka.util.Error.Code.BAD_HTTP_STATUS));
it('from missing media, second Period',
testRecoverableError.bind(
null, '2_audio_2', shaka.util.Error.Code.BAD_HTTP_STATUS));

it('from missing init, first Period',
testRecoverableError.bind(
null, '1_video_init', shaka.util.Error.Code.HTTP_ERROR));
it('from missing init, second Period',
testRecoverableError.bind(
null, '2_audio_init', shaka.util.Error.Code.HTTP_ERROR));
it('from missing media, first Period',
testRecoverableError.bind(
null, '1_audio_1', shaka.util.Error.Code.HTTP_ERROR));
it('from missing media, second Period',
testRecoverableError.bind(
null, '2_video_2', shaka.util.Error.Code.HTTP_ERROR));

it('from missing init, first Period',
testRecoverableError.bind(
null, '1_audio_init', shaka.util.Error.Code.TIMEOUT));
it('from missing init, second Period',
testRecoverableError.bind(
null, '2_video_init', shaka.util.Error.Code.TIMEOUT));
it('from missing media, first Period',
testRecoverableError.bind(
null, '1_video_2', shaka.util.Error.Code.TIMEOUT));
it('from missing media, second Period',
testRecoverableError.bind(
null, '2_audio_1', shaka.util.Error.Code.TIMEOUT));

function testNonRecoverableError(targetUri, code, done) {
var loop;

setupVod();

// Wrap the NetworkingEngine to perform 404 Not Found errors.
var originalNetEngine = netEngine;
netEngine = {
request: jasmine.createSpy('request')
};
netEngine.request.and.callFake(function(requestType, request) {
if (request.uris[0] == targetUri) {
return Promise.reject(new shaka.util.Error(
shaka.util.Error.Category.NETWORK, code, [targetUri]));
}
return originalNetEngine.request(requestType, request);
});

mediaSourceEngine = new shaka.test.FakeMediaSourceEngine(segmentData);
createStreamingEngine();

playhead.getTime.and.returnValue(0);
onStartupComplete.and.callFake(function() {
setupFakeGetTime(0);
mediaSourceEngine.endOfStream.and.callFake(loop.stop);
});

onError.and.callFake(function(error) {
expect(error.category).toBe(shaka.util.Error.Category.NETWORK);
expect(error.code).toBe(code);
});

// Here we go!
onChooseStreams.and.callFake(defaultOnChooseStreams.bind(null));
streamingEngine.init();

loop = runTest();
loop.then(function() {
expect(onError.calls.count()).toBe(1);
expect(mediaSourceEngine.endOfStream).not.toHaveBeenCalled();
return streamingEngine.destroy();
}).catch(fail).then(done);
}

it('from unsupported scheme, init',
testNonRecoverableError.bind(
null, '1_audio_init', shaka.util.Error.Code.UNSUPPORTED_SCHEME));

it('from unsupported scheme, media',
testNonRecoverableError.bind(
null, '1_video_2', shaka.util.Error.Code.UNSUPPORTED_SCHEME));

it('from malformed data URI, init',
testNonRecoverableError.bind(
null, '1_video_init', shaka.util.Error.Code.MALFORMED_DATA_URI));

it('from malformed data URI, media',
testNonRecoverableError.bind(
null, '1_audio_2', shaka.util.Error.Code.MALFORMED_DATA_URI));

it('from unknown data URI encoding, init',
testNonRecoverableError.bind(
null,
'1_video_init',
shaka.util.Error.Code.UNKNOWN_DATA_URI_ENCODING));

it('from unknown data URI encoding, media',
testNonRecoverableError.bind(
null,
'1_audio_2',
shaka.util.Error.Code.UNKNOWN_DATA_URI_ENCODING));
});

describe('eviction', function() {
it('evicts media to meet the max buffer tail limit', function(done) {
setupVod();
Expand Down

0 comments on commit 0d77ddf

Please sign in to comment.