Commit updated Javascript packages
This commit is contained in:
92
build/javascript/node_modules/@videojs/http-streaming/src/ad-cue-tags.js
generated
vendored
Normal file
92
build/javascript/node_modules/@videojs/http-streaming/src/ad-cue-tags.js
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
/**
|
||||
* @file ad-cue-tags.js
|
||||
*/
|
||||
import window from 'global/window';
|
||||
|
||||
/**
|
||||
* Searches for an ad cue that overlaps with the given mediaTime
|
||||
*/
|
||||
export const findAdCue = function(track, mediaTime) {
|
||||
const cues = track.cues;
|
||||
|
||||
for (let i = 0; i < cues.length; i++) {
|
||||
const cue = cues[i];
|
||||
|
||||
if (mediaTime >= cue.adStartTime && mediaTime <= cue.adEndTime) {
|
||||
return cue;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
export const updateAdCues = function(media, track, offset = 0) {
|
||||
if (!media.segments) {
|
||||
return;
|
||||
}
|
||||
|
||||
let mediaTime = offset;
|
||||
let cue;
|
||||
|
||||
for (let i = 0; i < media.segments.length; i++) {
|
||||
const segment = media.segments[i];
|
||||
|
||||
if (!cue) {
|
||||
// Since the cues will span for at least the segment duration, adding a fudge
|
||||
// factor of half segment duration will prevent duplicate cues from being
|
||||
// created when timing info is not exact (e.g. cue start time initialized
|
||||
// at 10.006677, but next call mediaTime is 10.003332 )
|
||||
cue = findAdCue(track, mediaTime + (segment.duration / 2));
|
||||
}
|
||||
|
||||
if (cue) {
|
||||
if ('cueIn' in segment) {
|
||||
// Found a CUE-IN so end the cue
|
||||
cue.endTime = mediaTime;
|
||||
cue.adEndTime = mediaTime;
|
||||
mediaTime += segment.duration;
|
||||
cue = null;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (mediaTime < cue.endTime) {
|
||||
// Already processed this mediaTime for this cue
|
||||
mediaTime += segment.duration;
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise extend cue until a CUE-IN is found
|
||||
cue.endTime += segment.duration;
|
||||
|
||||
} else {
|
||||
if ('cueOut' in segment) {
|
||||
cue = new window.VTTCue(
|
||||
mediaTime,
|
||||
mediaTime + segment.duration,
|
||||
segment.cueOut
|
||||
);
|
||||
cue.adStartTime = mediaTime;
|
||||
// Assumes tag format to be
|
||||
// #EXT-X-CUE-OUT:30
|
||||
cue.adEndTime = mediaTime + parseFloat(segment.cueOut);
|
||||
track.addCue(cue);
|
||||
}
|
||||
|
||||
if ('cueOutCont' in segment) {
|
||||
// Entered into the middle of an ad cue
|
||||
// Assumes tag formate to be
|
||||
// #EXT-X-CUE-OUT-CONT:10/30
|
||||
const [adOffset, adTotal] = segment.cueOutCont.split('/').map(parseFloat);
|
||||
|
||||
cue = new window.VTTCue(
|
||||
mediaTime,
|
||||
mediaTime + segment.duration,
|
||||
''
|
||||
);
|
||||
cue.adStartTime = mediaTime - adOffset;
|
||||
cue.adEndTime = cue.adStartTime + adTotal;
|
||||
track.addCue(cue);
|
||||
}
|
||||
}
|
||||
mediaTime += segment.duration;
|
||||
}
|
||||
};
|
||||
114
build/javascript/node_modules/@videojs/http-streaming/src/bin-utils.js
generated
vendored
Normal file
114
build/javascript/node_modules/@videojs/http-streaming/src/bin-utils.js
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
/**
|
||||
* @file bin-utils.js
|
||||
*/
|
||||
|
||||
/**
|
||||
* convert a TimeRange to text
|
||||
*
|
||||
* @param {TimeRange} range the timerange to use for conversion
|
||||
* @param {number} i the iterator on the range to convert
|
||||
*/
|
||||
const textRange = function(range, i) {
|
||||
return range.start(i) + '-' + range.end(i);
|
||||
};
|
||||
|
||||
/**
|
||||
* format a number as hex string
|
||||
*
|
||||
* @param {number} e The number
|
||||
* @param {number} i the iterator
|
||||
*/
|
||||
const formatHexString = function(e, i) {
|
||||
const value = e.toString(16);
|
||||
|
||||
return '00'.substring(0, 2 - value.length) + value + (i % 2 ? ' ' : '');
|
||||
};
|
||||
const formatAsciiString = function(e) {
|
||||
if (e >= 0x20 && e < 0x7e) {
|
||||
return String.fromCharCode(e);
|
||||
}
|
||||
return '.';
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates an object for sending to a web worker modifying properties that are TypedArrays
|
||||
* into a new object with seperated properties for the buffer, byteOffset, and byteLength.
|
||||
*
|
||||
* @param {Object} message
|
||||
* Object of properties and values to send to the web worker
|
||||
* @return {Object}
|
||||
* Modified message with TypedArray values expanded
|
||||
* @function createTransferableMessage
|
||||
*/
|
||||
export const createTransferableMessage = function(message) {
|
||||
const transferable = {};
|
||||
|
||||
Object.keys(message).forEach((key) => {
|
||||
const value = message[key];
|
||||
|
||||
if (ArrayBuffer.isView(value)) {
|
||||
transferable[key] = {
|
||||
bytes: value.buffer,
|
||||
byteOffset: value.byteOffset,
|
||||
byteLength: value.byteLength
|
||||
};
|
||||
} else {
|
||||
transferable[key] = value;
|
||||
}
|
||||
});
|
||||
|
||||
return transferable;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a unique string identifier for a media initialization
|
||||
* segment.
|
||||
*/
|
||||
export const initSegmentId = function(initSegment) {
|
||||
const byterange = initSegment.byterange || {
|
||||
length: Infinity,
|
||||
offset: 0
|
||||
};
|
||||
|
||||
return [
|
||||
byterange.length, byterange.offset, initSegment.resolvedUri
|
||||
].join(',');
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a unique string identifier for a media segment key.
|
||||
*/
|
||||
export const segmentKeyId = function(key) {
|
||||
return key.resolvedUri;
|
||||
};
|
||||
|
||||
/**
|
||||
* utils to help dump binary data to the console
|
||||
*/
|
||||
export const hexDump = (data) => {
|
||||
const bytes = Array.prototype.slice.call(data);
|
||||
const step = 16;
|
||||
let result = '';
|
||||
let hex;
|
||||
let ascii;
|
||||
|
||||
for (let j = 0; j < bytes.length / step; j++) {
|
||||
hex = bytes.slice(j * step, j * step + step).map(formatHexString).join('');
|
||||
ascii = bytes.slice(j * step, j * step + step).map(formatAsciiString).join('');
|
||||
result += hex + ' ' + ascii + '\n';
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
export const tagDump = ({ bytes }) => hexDump(bytes);
|
||||
|
||||
export const textRanges = (ranges) => {
|
||||
let result = '';
|
||||
let i;
|
||||
|
||||
for (i = 0; i < ranges.length; i++) {
|
||||
result += textRange(ranges, i) + ' ';
|
||||
}
|
||||
return result;
|
||||
};
|
||||
15
build/javascript/node_modules/@videojs/http-streaming/src/config.js
generated
vendored
Normal file
15
build/javascript/node_modules/@videojs/http-streaming/src/config.js
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
export default {
|
||||
GOAL_BUFFER_LENGTH: 30,
|
||||
MAX_GOAL_BUFFER_LENGTH: 60,
|
||||
BACK_BUFFER_LENGTH: 30,
|
||||
GOAL_BUFFER_LENGTH_RATE: 1,
|
||||
// 0.5 MB/s
|
||||
INITIAL_BANDWIDTH: 4194304,
|
||||
// A fudge factor to apply to advertised playlist bitrates to account for
|
||||
// temporary flucations in client bandwidth
|
||||
BANDWIDTH_VARIANCE: 1.2,
|
||||
// How much of the buffer must be filled before we consider upswitching
|
||||
BUFFER_LOW_WATER_LINE: 0,
|
||||
MAX_BUFFER_LOW_WATER_LINE: 30,
|
||||
BUFFER_LOW_WATER_LINE_RATE: 1
|
||||
};
|
||||
866
build/javascript/node_modules/@videojs/http-streaming/src/dash-playlist-loader.js
generated
vendored
Normal file
866
build/javascript/node_modules/@videojs/http-streaming/src/dash-playlist-loader.js
generated
vendored
Normal file
@@ -0,0 +1,866 @@
|
||||
import videojs from 'video.js';
|
||||
import {
|
||||
parse as parseMpd,
|
||||
parseUTCTiming
|
||||
} from 'mpd-parser';
|
||||
import {
|
||||
refreshDelay,
|
||||
updateMaster as updatePlaylist
|
||||
} from './playlist-loader';
|
||||
import { resolveUrl, resolveManifestRedirect } from './resolve-url';
|
||||
import parseSidx from 'mux.js/lib/tools/parse-sidx';
|
||||
import { segmentXhrHeaders } from './xhr';
|
||||
import window from 'global/window';
|
||||
import {
|
||||
forEachMediaGroup,
|
||||
addPropertiesToMaster
|
||||
} from './manifest';
|
||||
import containerRequest from './util/container-request.js';
|
||||
import {toUint8} from '@videojs/vhs-utils/dist/byte-helpers';
|
||||
|
||||
const { EventTarget, mergeOptions } = videojs;
|
||||
|
||||
/**
|
||||
* Parses the master XML string and updates playlist URI references.
|
||||
*
|
||||
* @param {Object} config
|
||||
* Object of arguments
|
||||
* @param {string} config.masterXml
|
||||
* The mpd XML
|
||||
* @param {string} config.srcUrl
|
||||
* The mpd URL
|
||||
* @param {Date} config.clientOffset
|
||||
* A time difference between server and client
|
||||
* @param {Object} config.sidxMapping
|
||||
* SIDX mappings for moof/mdat URIs and byte ranges
|
||||
* @return {Object}
|
||||
* The parsed mpd manifest object
|
||||
*/
|
||||
export const parseMasterXml = ({ masterXml, srcUrl, clientOffset, sidxMapping }) => {
|
||||
const master = parseMpd(masterXml, {
|
||||
manifestUri: srcUrl,
|
||||
clientOffset,
|
||||
sidxMapping
|
||||
});
|
||||
|
||||
addPropertiesToMaster(master, srcUrl);
|
||||
|
||||
return master;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a new master manifest that is the result of merging an updated master manifest
|
||||
* into the original version.
|
||||
*
|
||||
* @param {Object} oldMaster
|
||||
* The old parsed mpd object
|
||||
* @param {Object} newMaster
|
||||
* The updated parsed mpd object
|
||||
* @return {Object}
|
||||
* A new object representing the original master manifest with the updated media
|
||||
* playlists merged in
|
||||
*/
|
||||
export const updateMaster = (oldMaster, newMaster) => {
|
||||
let noChanges = true;
|
||||
let update = mergeOptions(oldMaster, {
|
||||
// These are top level properties that can be updated
|
||||
duration: newMaster.duration,
|
||||
minimumUpdatePeriod: newMaster.minimumUpdatePeriod
|
||||
});
|
||||
|
||||
// First update the playlists in playlist list
|
||||
for (let i = 0; i < newMaster.playlists.length; i++) {
|
||||
const playlistUpdate = updatePlaylist(update, newMaster.playlists[i]);
|
||||
|
||||
if (playlistUpdate) {
|
||||
update = playlistUpdate;
|
||||
noChanges = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Then update media group playlists
|
||||
forEachMediaGroup(newMaster, (properties, type, group, label) => {
|
||||
if (properties.playlists && properties.playlists.length) {
|
||||
const id = properties.playlists[0].id;
|
||||
const playlistUpdate = updatePlaylist(update, properties.playlists[0]);
|
||||
|
||||
if (playlistUpdate) {
|
||||
update = playlistUpdate;
|
||||
// update the playlist reference within media groups
|
||||
update.mediaGroups[type][group][label].playlists[0] = update.playlists[id];
|
||||
noChanges = false;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (newMaster.minimumUpdatePeriod !== oldMaster.minimumUpdatePeriod) {
|
||||
noChanges = false;
|
||||
}
|
||||
|
||||
if (noChanges) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return update;
|
||||
};
|
||||
|
||||
export const generateSidxKey = (sidxInfo) => {
|
||||
// should be non-inclusive
|
||||
const sidxByteRangeEnd =
|
||||
sidxInfo.byterange.offset +
|
||||
sidxInfo.byterange.length -
|
||||
1;
|
||||
|
||||
return sidxInfo.uri + '-' +
|
||||
sidxInfo.byterange.offset + '-' +
|
||||
sidxByteRangeEnd;
|
||||
};
|
||||
|
||||
// SIDX should be equivalent if the URI and byteranges of the SIDX match.
|
||||
// If the SIDXs have maps, the two maps should match,
|
||||
// both `a` and `b` missing SIDXs is considered matching.
|
||||
// If `a` or `b` but not both have a map, they aren't matching.
|
||||
const equivalentSidx = (a, b) => {
|
||||
const neitherMap = Boolean(!a.map && !b.map);
|
||||
|
||||
const equivalentMap = neitherMap || Boolean(a.map && b.map &&
|
||||
a.map.byterange.offset === b.map.byterange.offset &&
|
||||
a.map.byterange.length === b.map.byterange.length);
|
||||
|
||||
return equivalentMap &&
|
||||
a.uri === b.uri &&
|
||||
a.byterange.offset === b.byterange.offset &&
|
||||
a.byterange.length === b.byterange.length;
|
||||
};
|
||||
|
||||
// exported for testing
|
||||
export const compareSidxEntry = (playlists, oldSidxMapping) => {
|
||||
const newSidxMapping = {};
|
||||
|
||||
for (const id in playlists) {
|
||||
const playlist = playlists[id];
|
||||
const currentSidxInfo = playlist.sidx;
|
||||
|
||||
if (currentSidxInfo) {
|
||||
const key = generateSidxKey(currentSidxInfo);
|
||||
|
||||
if (!oldSidxMapping[key]) {
|
||||
break;
|
||||
}
|
||||
|
||||
const savedSidxInfo = oldSidxMapping[key].sidxInfo;
|
||||
|
||||
if (equivalentSidx(savedSidxInfo, currentSidxInfo)) {
|
||||
newSidxMapping[key] = oldSidxMapping[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newSidxMapping;
|
||||
};
|
||||
|
||||
/**
|
||||
* A function that filters out changed items as they need to be requested separately.
|
||||
*
|
||||
* The method is exported for testing
|
||||
*
|
||||
* @param {Object} masterXml the mpd XML
|
||||
* @param {string} srcUrl the mpd url
|
||||
* @param {Date} clientOffset a time difference between server and client (passed through and not used)
|
||||
* @param {Object} oldSidxMapping the SIDX to compare against
|
||||
*/
|
||||
export const filterChangedSidxMappings = (masterXml, srcUrl, clientOffset, oldSidxMapping) => {
|
||||
// Don't pass current sidx mapping
|
||||
const master = parseMpd(masterXml, {
|
||||
manifestUri: srcUrl,
|
||||
clientOffset
|
||||
});
|
||||
|
||||
const videoSidx = compareSidxEntry(master.playlists, oldSidxMapping);
|
||||
let mediaGroupSidx = videoSidx;
|
||||
|
||||
forEachMediaGroup(master, (properties, mediaType, groupKey, labelKey) => {
|
||||
if (properties.playlists && properties.playlists.length) {
|
||||
const playlists = properties.playlists;
|
||||
|
||||
mediaGroupSidx = mergeOptions(
|
||||
mediaGroupSidx,
|
||||
compareSidxEntry(playlists, oldSidxMapping)
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
return mediaGroupSidx;
|
||||
};
|
||||
|
||||
// exported for testing
|
||||
export const requestSidx_ = (loader, sidxRange, playlist, xhr, options, finishProcessingFn) => {
|
||||
const sidxInfo = {
|
||||
// resolve the segment URL relative to the playlist
|
||||
uri: resolveManifestRedirect(options.handleManifestRedirects, sidxRange.resolvedUri),
|
||||
// resolvedUri: sidxRange.resolvedUri,
|
||||
byterange: sidxRange.byterange,
|
||||
// the segment's playlist
|
||||
playlist
|
||||
};
|
||||
|
||||
const sidxRequestOptions = videojs.mergeOptions(sidxInfo, {
|
||||
responseType: 'arraybuffer',
|
||||
headers: segmentXhrHeaders(sidxInfo)
|
||||
});
|
||||
|
||||
return containerRequest(sidxInfo.uri, xhr, (err, request, container, bytes) => {
|
||||
if (err) {
|
||||
return finishProcessingFn(err, request);
|
||||
}
|
||||
|
||||
if (!container || container !== 'mp4') {
|
||||
return finishProcessingFn({
|
||||
status: request.status,
|
||||
message: `Unsupported ${container || 'unknown'} container type for sidx segment at URL: ${sidxInfo.uri}`,
|
||||
// response is just bytes in this case
|
||||
// but we really don't want to return that.
|
||||
response: '',
|
||||
playlist,
|
||||
internal: true,
|
||||
blacklistDuration: Infinity,
|
||||
// MEDIA_ERR_NETWORK
|
||||
code: 2
|
||||
}, request);
|
||||
}
|
||||
|
||||
// if we already downloaded the sidx bytes in the container request, use them
|
||||
const {offset, length} = sidxInfo.byterange;
|
||||
|
||||
if (bytes.length >= (length + offset)) {
|
||||
return finishProcessingFn(err, {
|
||||
response: bytes.subarray(offset, offset + length),
|
||||
status: request.status,
|
||||
uri: request.uri
|
||||
});
|
||||
}
|
||||
|
||||
// otherwise request sidx bytes
|
||||
loader.request = xhr(sidxRequestOptions, finishProcessingFn);
|
||||
});
|
||||
};
|
||||
|
||||
export default class DashPlaylistLoader extends EventTarget {
|
||||
// DashPlaylistLoader must accept either a src url or a playlist because subsequent
|
||||
// playlist loader setups from media groups will expect to be able to pass a playlist
|
||||
// (since there aren't external URLs to media playlists with DASH)
|
||||
constructor(srcUrlOrPlaylist, vhs, options = { }, masterPlaylistLoader) {
|
||||
super();
|
||||
|
||||
const { withCredentials = false, handleManifestRedirects = false } = options;
|
||||
|
||||
this.vhs_ = vhs;
|
||||
this.withCredentials = withCredentials;
|
||||
this.handleManifestRedirects = handleManifestRedirects;
|
||||
|
||||
if (!srcUrlOrPlaylist) {
|
||||
throw new Error('A non-empty playlist URL or object is required');
|
||||
}
|
||||
|
||||
// event naming?
|
||||
this.on('minimumUpdatePeriod', () => {
|
||||
this.refreshXml_();
|
||||
});
|
||||
|
||||
// live playlist staleness timeout
|
||||
this.on('mediaupdatetimeout', () => {
|
||||
this.refreshMedia_(this.media().id);
|
||||
});
|
||||
|
||||
this.state = 'HAVE_NOTHING';
|
||||
this.loadedPlaylists_ = {};
|
||||
|
||||
// initialize the loader state
|
||||
// The masterPlaylistLoader will be created with a string
|
||||
if (typeof srcUrlOrPlaylist === 'string') {
|
||||
this.srcUrl = srcUrlOrPlaylist;
|
||||
// TODO: reset sidxMapping between period changes
|
||||
// once multi-period is refactored
|
||||
this.sidxMapping_ = {};
|
||||
return;
|
||||
}
|
||||
|
||||
this.setupChildLoader(masterPlaylistLoader, srcUrlOrPlaylist);
|
||||
}
|
||||
|
||||
setupChildLoader(masterPlaylistLoader, playlist) {
|
||||
this.masterPlaylistLoader_ = masterPlaylistLoader;
|
||||
this.childPlaylist_ = playlist;
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.trigger('dispose');
|
||||
this.stopRequest();
|
||||
this.loadedPlaylists_ = {};
|
||||
window.clearTimeout(this.minimumUpdatePeriodTimeout_);
|
||||
window.clearTimeout(this.mediaRequest_);
|
||||
window.clearTimeout(this.mediaUpdateTimeout);
|
||||
|
||||
this.off();
|
||||
}
|
||||
|
||||
hasPendingRequest() {
|
||||
return this.request || this.mediaRequest_;
|
||||
}
|
||||
|
||||
stopRequest() {
|
||||
if (this.request) {
|
||||
const oldRequest = this.request;
|
||||
|
||||
this.request = null;
|
||||
oldRequest.onreadystatechange = null;
|
||||
oldRequest.abort();
|
||||
}
|
||||
}
|
||||
|
||||
sidxRequestFinished_(playlist, master, startingState, doneFn) {
|
||||
return (err, request) => {
|
||||
// disposed
|
||||
if (!this.request) {
|
||||
return;
|
||||
}
|
||||
|
||||
// pending request is cleared
|
||||
this.request = null;
|
||||
|
||||
if (err) {
|
||||
// use the provided error or create one
|
||||
// see requestSidx_ for the container request
|
||||
// that can cause this.
|
||||
this.error = typeof err === 'object' ? err : {
|
||||
status: request.status,
|
||||
message: 'DASH playlist request error at URL: ' + playlist.uri,
|
||||
response: request.response,
|
||||
// MEDIA_ERR_NETWORK
|
||||
code: 2
|
||||
};
|
||||
if (startingState) {
|
||||
this.state = startingState;
|
||||
}
|
||||
|
||||
this.trigger('error');
|
||||
return;
|
||||
}
|
||||
|
||||
const bytes = toUint8(request.response);
|
||||
const sidx = parseSidx(bytes.subarray(8));
|
||||
|
||||
return doneFn(master, sidx);
|
||||
};
|
||||
}
|
||||
|
||||
media(playlist) {
|
||||
// getter
|
||||
if (!playlist) {
|
||||
return this.media_;
|
||||
}
|
||||
|
||||
// setter
|
||||
if (this.state === 'HAVE_NOTHING') {
|
||||
throw new Error('Cannot switch media playlist from ' + this.state);
|
||||
}
|
||||
|
||||
const startingState = this.state;
|
||||
|
||||
// find the playlist object if the target playlist has been specified by URI
|
||||
if (typeof playlist === 'string') {
|
||||
if (!this.master.playlists[playlist]) {
|
||||
throw new Error('Unknown playlist URI: ' + playlist);
|
||||
}
|
||||
playlist = this.master.playlists[playlist];
|
||||
}
|
||||
|
||||
const mediaChange = !this.media_ || playlist.id !== this.media_.id;
|
||||
|
||||
// switch to previously loaded playlists immediately
|
||||
if (mediaChange &&
|
||||
this.loadedPlaylists_[playlist.id] &&
|
||||
this.loadedPlaylists_[playlist.id].endList) {
|
||||
this.state = 'HAVE_METADATA';
|
||||
this.media_ = playlist;
|
||||
|
||||
// trigger media change if the active media has been updated
|
||||
if (mediaChange) {
|
||||
this.trigger('mediachanging');
|
||||
this.trigger('mediachange');
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// switching to the active playlist is a no-op
|
||||
if (!mediaChange) {
|
||||
return;
|
||||
}
|
||||
|
||||
// switching from an already loaded playlist
|
||||
if (this.media_) {
|
||||
this.trigger('mediachanging');
|
||||
}
|
||||
|
||||
if (!playlist.sidx) {
|
||||
// Continue asynchronously if there is no sidx
|
||||
// wait one tick to allow haveMaster to run first on a child loader
|
||||
this.mediaRequest_ = window.setTimeout(
|
||||
this.haveMetadata.bind(this, { startingState, playlist }),
|
||||
0
|
||||
);
|
||||
|
||||
// exit early and don't do sidx work
|
||||
return;
|
||||
}
|
||||
|
||||
// we have sidx mappings
|
||||
let oldMaster;
|
||||
let sidxMapping;
|
||||
|
||||
// sidxMapping is used when parsing the masterXml, so store
|
||||
// it on the masterPlaylistLoader
|
||||
if (this.masterPlaylistLoader_) {
|
||||
oldMaster = this.masterPlaylistLoader_.master;
|
||||
sidxMapping = this.masterPlaylistLoader_.sidxMapping_;
|
||||
} else {
|
||||
oldMaster = this.master;
|
||||
sidxMapping = this.sidxMapping_;
|
||||
}
|
||||
|
||||
const sidxKey = generateSidxKey(playlist.sidx);
|
||||
|
||||
sidxMapping[sidxKey] = {
|
||||
sidxInfo: playlist.sidx
|
||||
};
|
||||
|
||||
this.request = requestSidx_(
|
||||
this,
|
||||
playlist.sidx,
|
||||
playlist,
|
||||
this.vhs_.xhr,
|
||||
{ handleManifestRedirects: this.handleManifestRedirects },
|
||||
this.sidxRequestFinished_(playlist, oldMaster, startingState, (newMaster, sidx) => {
|
||||
if (!newMaster || !sidx) {
|
||||
throw new Error('failed to request sidx');
|
||||
}
|
||||
|
||||
// update loader's sidxMapping with parsed sidx box
|
||||
sidxMapping[sidxKey].sidx = sidx;
|
||||
|
||||
// everything is ready just continue to haveMetadata
|
||||
this.haveMetadata({
|
||||
startingState,
|
||||
playlist: newMaster.playlists[playlist.id]
|
||||
});
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
haveMetadata({startingState, playlist}) {
|
||||
this.state = 'HAVE_METADATA';
|
||||
this.loadedPlaylists_[playlist.id] = playlist;
|
||||
this.mediaRequest_ = null;
|
||||
|
||||
// This will trigger loadedplaylist
|
||||
this.refreshMedia_(playlist.id);
|
||||
|
||||
// fire loadedmetadata the first time a media playlist is loaded
|
||||
// to resolve setup of media groups
|
||||
if (startingState === 'HAVE_MASTER') {
|
||||
this.trigger('loadedmetadata');
|
||||
} else {
|
||||
// trigger media change if the active media has been updated
|
||||
this.trigger('mediachange');
|
||||
}
|
||||
}
|
||||
|
||||
pause() {
|
||||
this.stopRequest();
|
||||
window.clearTimeout(this.mediaUpdateTimeout);
|
||||
window.clearTimeout(this.minimumUpdatePeriodTimeout_);
|
||||
if (this.state === 'HAVE_NOTHING') {
|
||||
// If we pause the loader before any data has been retrieved, its as if we never
|
||||
// started, so reset to an unstarted state.
|
||||
this.started = false;
|
||||
}
|
||||
}
|
||||
|
||||
load(isFinalRendition) {
|
||||
window.clearTimeout(this.mediaUpdateTimeout);
|
||||
window.clearTimeout(this.minimumUpdatePeriodTimeout_);
|
||||
|
||||
const media = this.media();
|
||||
|
||||
if (isFinalRendition) {
|
||||
const delay = media ? (media.targetDuration / 2) * 1000 : 5 * 1000;
|
||||
|
||||
this.mediaUpdateTimeout = window.setTimeout(() => this.load(), delay);
|
||||
return;
|
||||
}
|
||||
|
||||
// because the playlists are internal to the manifest, load should either load the
|
||||
// main manifest, or do nothing but trigger an event
|
||||
if (!this.started) {
|
||||
this.start();
|
||||
return;
|
||||
}
|
||||
|
||||
if (media && !media.endList) {
|
||||
this.trigger('mediaupdatetimeout');
|
||||
} else {
|
||||
this.trigger('loadedplaylist');
|
||||
}
|
||||
}
|
||||
|
||||
start() {
|
||||
this.started = true;
|
||||
|
||||
// We don't need to request the master manifest again
|
||||
// Call this asynchronously to match the xhr request behavior below
|
||||
if (this.masterPlaylistLoader_) {
|
||||
this.mediaRequest_ = window.setTimeout(
|
||||
this.haveMaster_.bind(this),
|
||||
0
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// request the specified URL
|
||||
this.request = this.vhs_.xhr({
|
||||
uri: this.srcUrl,
|
||||
withCredentials: this.withCredentials
|
||||
}, (error, req) => {
|
||||
// disposed
|
||||
if (!this.request) {
|
||||
return;
|
||||
}
|
||||
|
||||
// clear the loader's request reference
|
||||
this.request = null;
|
||||
|
||||
if (error) {
|
||||
this.error = {
|
||||
status: req.status,
|
||||
message: 'DASH playlist request error at URL: ' + this.srcUrl,
|
||||
responseText: req.responseText,
|
||||
// MEDIA_ERR_NETWORK
|
||||
code: 2
|
||||
};
|
||||
if (this.state === 'HAVE_NOTHING') {
|
||||
this.started = false;
|
||||
}
|
||||
return this.trigger('error');
|
||||
}
|
||||
|
||||
this.masterXml_ = req.responseText;
|
||||
|
||||
if (req.responseHeaders && req.responseHeaders.date) {
|
||||
this.masterLoaded_ = Date.parse(req.responseHeaders.date);
|
||||
} else {
|
||||
this.masterLoaded_ = Date.now();
|
||||
}
|
||||
|
||||
this.srcUrl = resolveManifestRedirect(this.handleManifestRedirects, this.srcUrl, req);
|
||||
|
||||
this.syncClientServerClock_(this.onClientServerClockSync_.bind(this));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the master xml for UTCTiming node to sync the client clock to the server
|
||||
* clock. If the UTCTiming node requires a HEAD or GET request, that request is made.
|
||||
*
|
||||
* @param {Function} done
|
||||
* Function to call when clock sync has completed
|
||||
*/
|
||||
syncClientServerClock_(done) {
|
||||
const utcTiming = parseUTCTiming(this.masterXml_);
|
||||
|
||||
// No UTCTiming element found in the mpd. Use Date header from mpd request as the
|
||||
// server clock
|
||||
if (utcTiming === null) {
|
||||
this.clientOffset_ = this.masterLoaded_ - Date.now();
|
||||
return done();
|
||||
}
|
||||
|
||||
if (utcTiming.method === 'DIRECT') {
|
||||
this.clientOffset_ = utcTiming.value - Date.now();
|
||||
return done();
|
||||
}
|
||||
|
||||
this.request = this.vhs_.xhr({
|
||||
uri: resolveUrl(this.srcUrl, utcTiming.value),
|
||||
method: utcTiming.method,
|
||||
withCredentials: this.withCredentials
|
||||
}, (error, req) => {
|
||||
// disposed
|
||||
if (!this.request) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
// sync request failed, fall back to using date header from mpd
|
||||
// TODO: log warning
|
||||
this.clientOffset_ = this.masterLoaded_ - Date.now();
|
||||
return done();
|
||||
}
|
||||
|
||||
let serverTime;
|
||||
|
||||
if (utcTiming.method === 'HEAD') {
|
||||
if (!req.responseHeaders || !req.responseHeaders.date) {
|
||||
// expected date header not preset, fall back to using date header from mpd
|
||||
// TODO: log warning
|
||||
serverTime = this.masterLoaded_;
|
||||
} else {
|
||||
serverTime = Date.parse(req.responseHeaders.date);
|
||||
}
|
||||
} else {
|
||||
serverTime = Date.parse(req.responseText);
|
||||
}
|
||||
|
||||
this.clientOffset_ = serverTime - Date.now();
|
||||
|
||||
done();
|
||||
});
|
||||
}
|
||||
|
||||
haveMaster_() {
|
||||
this.state = 'HAVE_MASTER';
|
||||
// clear media request
|
||||
this.mediaRequest_ = null;
|
||||
|
||||
if (!this.masterPlaylistLoader_) {
|
||||
this.updateMainManifest_(parseMasterXml({
|
||||
masterXml: this.masterXml_,
|
||||
srcUrl: this.srcUrl,
|
||||
clientOffset: this.clientOffset_,
|
||||
sidxMapping: this.sidxMapping_
|
||||
}));
|
||||
// We have the master playlist at this point, so
|
||||
// trigger this to allow MasterPlaylistController
|
||||
// to make an initial playlist selection
|
||||
this.trigger('loadedplaylist');
|
||||
} else if (!this.media_) {
|
||||
// no media playlist was specifically selected so select
|
||||
// the one the child playlist loader was created with
|
||||
this.media(this.childPlaylist_);
|
||||
}
|
||||
}
|
||||
|
||||
updateMinimumUpdatePeriodTimeout_() {
|
||||
// Clear existing timeout
|
||||
window.clearTimeout(this.minimumUpdatePeriodTimeout_);
|
||||
|
||||
const createMUPTimeout = (mup) => {
|
||||
this.minimumUpdatePeriodTimeout_ = window.setTimeout(() => {
|
||||
this.trigger('minimumUpdatePeriod');
|
||||
}, mup);
|
||||
};
|
||||
|
||||
const minimumUpdatePeriod = this.master && this.master.minimumUpdatePeriod;
|
||||
|
||||
if (minimumUpdatePeriod > 0) {
|
||||
createMUPTimeout(minimumUpdatePeriod);
|
||||
|
||||
// If the minimumUpdatePeriod has a value of 0, that indicates that the current
|
||||
// MPD has no future validity, so a new one will need to be acquired when new
|
||||
// media segments are to be made available. Thus, we use the target duration
|
||||
// in this case
|
||||
} else if (minimumUpdatePeriod === 0) {
|
||||
// If we haven't yet selected a playlist, wait until then so we know the
|
||||
// target duration
|
||||
if (!this.media()) {
|
||||
this.one('loadedplaylist', () => {
|
||||
createMUPTimeout(this.media().targetDuration * 1000);
|
||||
});
|
||||
} else {
|
||||
createMUPTimeout(this.media().targetDuration * 1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for after client/server clock synchronization has happened. Sets up
|
||||
* xml refresh timer if specificed by the manifest.
|
||||
*/
|
||||
onClientServerClockSync_() {
|
||||
this.haveMaster_();
|
||||
|
||||
if (!this.hasPendingRequest() && !this.media_) {
|
||||
this.media(this.master.playlists[0]);
|
||||
}
|
||||
|
||||
this.updateMinimumUpdatePeriodTimeout_();
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a new manifest, update our pointer to it and update the srcUrl based on the location elements of the manifest, if they exist.
|
||||
*
|
||||
* @param {Object} updatedManifest the manifest to update to
|
||||
*/
|
||||
updateMainManifest_(updatedManifest) {
|
||||
this.master = updatedManifest;
|
||||
|
||||
// if locations isn't set or is an empty array, exit early
|
||||
if (!this.master.locations || !this.master.locations.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
const location = this.master.locations[0];
|
||||
|
||||
if (location !== this.srcUrl) {
|
||||
this.srcUrl = location;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends request to refresh the master xml and updates the parsed master manifest
|
||||
* TODO: Does the client offset need to be recalculated when the xml is refreshed?
|
||||
*/
|
||||
refreshXml_() {
|
||||
// The srcUrl here *may* need to pass through handleManifestsRedirects when
|
||||
// sidx is implemented
|
||||
this.request = this.vhs_.xhr({
|
||||
uri: this.srcUrl,
|
||||
withCredentials: this.withCredentials
|
||||
}, (error, req) => {
|
||||
// disposed
|
||||
if (!this.request) {
|
||||
return;
|
||||
}
|
||||
|
||||
// clear the loader's request reference
|
||||
this.request = null;
|
||||
|
||||
if (error) {
|
||||
this.error = {
|
||||
status: req.status,
|
||||
message: 'DASH playlist request error at URL: ' + this.srcUrl,
|
||||
responseText: req.responseText,
|
||||
// MEDIA_ERR_NETWORK
|
||||
code: 2
|
||||
};
|
||||
if (this.state === 'HAVE_NOTHING') {
|
||||
this.started = false;
|
||||
}
|
||||
return this.trigger('error');
|
||||
}
|
||||
|
||||
this.masterXml_ = req.responseText;
|
||||
|
||||
// This will filter out updated sidx info from the mapping
|
||||
this.sidxMapping_ = filterChangedSidxMappings(
|
||||
this.masterXml_,
|
||||
this.srcUrl,
|
||||
this.clientOffset_,
|
||||
this.sidxMapping_
|
||||
);
|
||||
|
||||
const master = parseMasterXml({
|
||||
masterXml: this.masterXml_,
|
||||
srcUrl: this.srcUrl,
|
||||
clientOffset: this.clientOffset_,
|
||||
sidxMapping: this.sidxMapping_
|
||||
});
|
||||
const updatedMaster = updateMaster(this.master, master);
|
||||
const currentSidxInfo = this.media().sidx;
|
||||
|
||||
if (updatedMaster) {
|
||||
if (currentSidxInfo) {
|
||||
const sidxKey = generateSidxKey(currentSidxInfo);
|
||||
|
||||
// the sidx was updated, so the previous mapping was removed
|
||||
if (!this.sidxMapping_[sidxKey]) {
|
||||
const playlist = this.media();
|
||||
|
||||
this.request = requestSidx_(
|
||||
this,
|
||||
playlist.sidx,
|
||||
playlist,
|
||||
this.vhs_.xhr,
|
||||
{ handleManifestRedirects: this.handleManifestRedirects },
|
||||
this.sidxRequestFinished_(playlist, master, this.state, (newMaster, sidx) => {
|
||||
if (!newMaster || !sidx) {
|
||||
throw new Error('failed to request sidx on minimumUpdatePeriod');
|
||||
}
|
||||
|
||||
// update loader's sidxMapping with parsed sidx box
|
||||
this.sidxMapping_[sidxKey].sidx = sidx;
|
||||
|
||||
this.updateMinimumUpdatePeriodTimeout_();
|
||||
|
||||
// TODO: do we need to reload the current playlist?
|
||||
this.refreshMedia_(this.media().id);
|
||||
|
||||
return;
|
||||
})
|
||||
);
|
||||
}
|
||||
} else {
|
||||
this.updateMainManifest_(updatedMaster);
|
||||
if (this.media_) {
|
||||
this.media_ = this.master.playlists[this.media_.id];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.updateMinimumUpdatePeriodTimeout_();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes the media playlist by re-parsing the master xml and updating playlist
|
||||
* references. If this is an alternate loader, the updated parsed manifest is retrieved
|
||||
* from the master loader.
|
||||
*/
|
||||
refreshMedia_(mediaID) {
|
||||
if (!mediaID) {
|
||||
throw new Error('refreshMedia_ must take a media id');
|
||||
}
|
||||
|
||||
let oldMaster;
|
||||
let newMaster;
|
||||
|
||||
if (this.masterPlaylistLoader_) {
|
||||
oldMaster = this.masterPlaylistLoader_.master;
|
||||
newMaster = parseMasterXml({
|
||||
masterXml: this.masterPlaylistLoader_.masterXml_,
|
||||
srcUrl: this.masterPlaylistLoader_.srcUrl,
|
||||
clientOffset: this.masterPlaylistLoader_.clientOffset_,
|
||||
sidxMapping: this.masterPlaylistLoader_.sidxMapping_
|
||||
});
|
||||
} else {
|
||||
oldMaster = this.master;
|
||||
newMaster = parseMasterXml({
|
||||
masterXml: this.masterXml_,
|
||||
srcUrl: this.srcUrl,
|
||||
clientOffset: this.clientOffset_,
|
||||
sidxMapping: this.sidxMapping_
|
||||
});
|
||||
}
|
||||
|
||||
const updatedMaster = updateMaster(oldMaster, newMaster);
|
||||
|
||||
if (updatedMaster) {
|
||||
if (this.masterPlaylistLoader_) {
|
||||
this.masterPlaylistLoader_.master = updatedMaster;
|
||||
} else {
|
||||
this.master = updatedMaster;
|
||||
}
|
||||
this.media_ = updatedMaster.playlists[mediaID];
|
||||
} else {
|
||||
this.media_ = oldMaster.playlists[mediaID];
|
||||
this.trigger('playlistunchanged');
|
||||
}
|
||||
|
||||
if (!this.media().endList) {
|
||||
this.mediaUpdateTimeout = window.setTimeout(() => {
|
||||
this.trigger('mediaupdatetimeout');
|
||||
}, refreshDelay(this.media(), !!updatedMaster));
|
||||
}
|
||||
|
||||
this.trigger('loadedplaylist');
|
||||
}
|
||||
}
|
||||
48
build/javascript/node_modules/@videojs/http-streaming/src/decrypter-worker.js
generated
vendored
Normal file
48
build/javascript/node_modules/@videojs/http-streaming/src/decrypter-worker.js
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
/* global self */
|
||||
import { Decrypter } from 'aes-decrypter';
|
||||
import { createTransferableMessage } from './bin-utils';
|
||||
|
||||
/**
|
||||
* Our web worker interface so that things can talk to aes-decrypter
|
||||
* that will be running in a web worker. the scope is passed to this by
|
||||
* webworkify.
|
||||
*
|
||||
* @param {Object} self
|
||||
* the scope for the web worker
|
||||
*/
|
||||
const DecrypterWorker = function(self) {
|
||||
self.onmessage = function(event) {
|
||||
const data = event.data;
|
||||
const encrypted = new Uint8Array(
|
||||
data.encrypted.bytes,
|
||||
data.encrypted.byteOffset,
|
||||
data.encrypted.byteLength
|
||||
);
|
||||
const key = new Uint32Array(
|
||||
data.key.bytes,
|
||||
data.key.byteOffset,
|
||||
data.key.byteLength / 4
|
||||
);
|
||||
const iv = new Uint32Array(
|
||||
data.iv.bytes,
|
||||
data.iv.byteOffset,
|
||||
data.iv.byteLength / 4
|
||||
);
|
||||
|
||||
/* eslint-disable no-new, handle-callback-err */
|
||||
new Decrypter(
|
||||
encrypted,
|
||||
key,
|
||||
iv,
|
||||
function(err, bytes) {
|
||||
self.postMessage(createTransferableMessage({
|
||||
source: data.source,
|
||||
decrypted: bytes
|
||||
}), [bytes.buffer]);
|
||||
}
|
||||
);
|
||||
/* eslint-enable */
|
||||
};
|
||||
};
|
||||
|
||||
export default new DecrypterWorker(self);
|
||||
666
build/javascript/node_modules/@videojs/http-streaming/src/decrypter-worker.worker.js
generated
vendored
Normal file
666
build/javascript/node_modules/@videojs/http-streaming/src/decrypter-worker.worker.js
generated
vendored
Normal file
@@ -0,0 +1,666 @@
|
||||
/*! @name @videojs/http-streaming @version 2.2.0 @license Apache-2.0 */
|
||||
var decrypterWorker = (function () {
|
||||
'use strict';
|
||||
|
||||
function _defineProperties(target, props) {
|
||||
for (var i = 0; i < props.length; i++) {
|
||||
var descriptor = props[i];
|
||||
descriptor.enumerable = descriptor.enumerable || false;
|
||||
descriptor.configurable = true;
|
||||
if ("value" in descriptor) descriptor.writable = true;
|
||||
Object.defineProperty(target, descriptor.key, descriptor);
|
||||
}
|
||||
}
|
||||
|
||||
function _createClass(Constructor, protoProps, staticProps) {
|
||||
if (protoProps) _defineProperties(Constructor.prototype, protoProps);
|
||||
if (staticProps) _defineProperties(Constructor, staticProps);
|
||||
return Constructor;
|
||||
}
|
||||
|
||||
var createClass = _createClass;
|
||||
|
||||
function _inheritsLoose(subClass, superClass) {
|
||||
subClass.prototype = Object.create(superClass.prototype);
|
||||
subClass.prototype.constructor = subClass;
|
||||
subClass.__proto__ = superClass;
|
||||
}
|
||||
|
||||
var inheritsLoose = _inheritsLoose;
|
||||
|
||||
/*! @name @videojs/vhs-utils @version 2.2.0 @license MIT */
|
||||
|
||||
/**
|
||||
* @file stream.js
|
||||
*/
|
||||
|
||||
/**
|
||||
* A lightweight readable stream implemention that handles event dispatching.
|
||||
*
|
||||
* @class Stream
|
||||
*/
|
||||
var Stream =
|
||||
/*#__PURE__*/
|
||||
function () {
|
||||
function Stream() {
|
||||
this.listeners = {};
|
||||
}
|
||||
/**
|
||||
* Add a listener for a specified event type.
|
||||
*
|
||||
* @param {string} type the event name
|
||||
* @param {Function} listener the callback to be invoked when an event of
|
||||
* the specified type occurs
|
||||
*/
|
||||
|
||||
|
||||
var _proto = Stream.prototype;
|
||||
|
||||
_proto.on = function on(type, listener) {
|
||||
if (!this.listeners[type]) {
|
||||
this.listeners[type] = [];
|
||||
}
|
||||
|
||||
this.listeners[type].push(listener);
|
||||
}
|
||||
/**
|
||||
* Remove a listener for a specified event type.
|
||||
*
|
||||
* @param {string} type the event name
|
||||
* @param {Function} listener a function previously registered for this
|
||||
* type of event through `on`
|
||||
* @return {boolean} if we could turn it off or not
|
||||
*/
|
||||
;
|
||||
|
||||
_proto.off = function off(type, listener) {
|
||||
if (!this.listeners[type]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
var index = this.listeners[type].indexOf(listener); // TODO: which is better?
|
||||
// In Video.js we slice listener functions
|
||||
// on trigger so that it does not mess up the order
|
||||
// while we loop through.
|
||||
//
|
||||
// Here we slice on off so that the loop in trigger
|
||||
// can continue using it's old reference to loop without
|
||||
// messing up the order.
|
||||
|
||||
this.listeners[type] = this.listeners[type].slice(0);
|
||||
this.listeners[type].splice(index, 1);
|
||||
return index > -1;
|
||||
}
|
||||
/**
|
||||
* Trigger an event of the specified type on this stream. Any additional
|
||||
* arguments to this function are passed as parameters to event listeners.
|
||||
*
|
||||
* @param {string} type the event name
|
||||
*/
|
||||
;
|
||||
|
||||
_proto.trigger = function trigger(type) {
|
||||
var callbacks = this.listeners[type];
|
||||
|
||||
if (!callbacks) {
|
||||
return;
|
||||
} // Slicing the arguments on every invocation of this method
|
||||
// can add a significant amount of overhead. Avoid the
|
||||
// intermediate object creation for the common case of a
|
||||
// single callback argument
|
||||
|
||||
|
||||
if (arguments.length === 2) {
|
||||
var length = callbacks.length;
|
||||
|
||||
for (var i = 0; i < length; ++i) {
|
||||
callbacks[i].call(this, arguments[1]);
|
||||
}
|
||||
} else {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
var _length = callbacks.length;
|
||||
|
||||
for (var _i = 0; _i < _length; ++_i) {
|
||||
callbacks[_i].apply(this, args);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Destroys the stream and cleans up.
|
||||
*/
|
||||
;
|
||||
|
||||
_proto.dispose = function dispose() {
|
||||
this.listeners = {};
|
||||
}
|
||||
/**
|
||||
* Forwards all `data` events on this stream to the destination stream. The
|
||||
* destination stream should provide a method `push` to receive the data
|
||||
* events as they arrive.
|
||||
*
|
||||
* @param {Stream} destination the stream that will receive all `data` events
|
||||
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options
|
||||
*/
|
||||
;
|
||||
|
||||
_proto.pipe = function pipe(destination) {
|
||||
this.on('data', function (data) {
|
||||
destination.push(data);
|
||||
});
|
||||
};
|
||||
|
||||
return Stream;
|
||||
}();
|
||||
|
||||
var stream = Stream;
|
||||
|
||||
/*! @name pkcs7 @version 1.0.4 @license Apache-2.0 */
|
||||
|
||||
/**
|
||||
* Returns the subarray of a Uint8Array without PKCS#7 padding.
|
||||
*
|
||||
* @param padded {Uint8Array} unencrypted bytes that have been padded
|
||||
* @return {Uint8Array} the unpadded bytes
|
||||
* @see http://tools.ietf.org/html/rfc5652
|
||||
*/
|
||||
function unpad(padded) {
|
||||
return padded.subarray(0, padded.byteLength - padded[padded.byteLength - 1]);
|
||||
}
|
||||
|
||||
/*! @name aes-decrypter @version 3.0.2 @license Apache-2.0 */
|
||||
|
||||
/**
|
||||
* @file aes.js
|
||||
*
|
||||
* This file contains an adaptation of the AES decryption algorithm
|
||||
* from the Standford Javascript Cryptography Library. That work is
|
||||
* covered by the following copyright and permissions notice:
|
||||
*
|
||||
* Copyright 2009-2010 Emily Stark, Mike Hamburg, Dan Boneh.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials provided
|
||||
* with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* The views and conclusions contained in the software and documentation
|
||||
* are those of the authors and should not be interpreted as representing
|
||||
* official policies, either expressed or implied, of the authors.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Expand the S-box tables.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
var precompute = function precompute() {
|
||||
var tables = [[[], [], [], [], []], [[], [], [], [], []]];
|
||||
var encTable = tables[0];
|
||||
var decTable = tables[1];
|
||||
var sbox = encTable[4];
|
||||
var sboxInv = decTable[4];
|
||||
var i;
|
||||
var x;
|
||||
var xInv;
|
||||
var d = [];
|
||||
var th = [];
|
||||
var x2;
|
||||
var x4;
|
||||
var x8;
|
||||
var s;
|
||||
var tEnc;
|
||||
var tDec; // Compute double and third tables
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
th[(d[i] = i << 1 ^ (i >> 7) * 283) ^ i] = i;
|
||||
}
|
||||
|
||||
for (x = xInv = 0; !sbox[x]; x ^= x2 || 1, xInv = th[xInv] || 1) {
|
||||
// Compute sbox
|
||||
s = xInv ^ xInv << 1 ^ xInv << 2 ^ xInv << 3 ^ xInv << 4;
|
||||
s = s >> 8 ^ s & 255 ^ 99;
|
||||
sbox[x] = s;
|
||||
sboxInv[s] = x; // Compute MixColumns
|
||||
|
||||
x8 = d[x4 = d[x2 = d[x]]];
|
||||
tDec = x8 * 0x1010101 ^ x4 * 0x10001 ^ x2 * 0x101 ^ x * 0x1010100;
|
||||
tEnc = d[s] * 0x101 ^ s * 0x1010100;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
encTable[i][x] = tEnc = tEnc << 24 ^ tEnc >>> 8;
|
||||
decTable[i][s] = tDec = tDec << 24 ^ tDec >>> 8;
|
||||
}
|
||||
} // Compactify. Considerable speedup on Firefox.
|
||||
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
encTable[i] = encTable[i].slice(0);
|
||||
decTable[i] = decTable[i].slice(0);
|
||||
}
|
||||
|
||||
return tables;
|
||||
};
|
||||
|
||||
var aesTables = null;
|
||||
/**
|
||||
* Schedule out an AES key for both encryption and decryption. This
|
||||
* is a low-level class. Use a cipher mode to do bulk encryption.
|
||||
*
|
||||
* @class AES
|
||||
* @param key {Array} The key as an array of 4, 6 or 8 words.
|
||||
*/
|
||||
|
||||
var AES =
|
||||
/*#__PURE__*/
|
||||
function () {
|
||||
function AES(key) {
|
||||
/**
|
||||
* The expanded S-box and inverse S-box tables. These will be computed
|
||||
* on the client so that we don't have to send them down the wire.
|
||||
*
|
||||
* There are two tables, _tables[0] is for encryption and
|
||||
* _tables[1] is for decryption.
|
||||
*
|
||||
* The first 4 sub-tables are the expanded S-box with MixColumns. The
|
||||
* last (_tables[01][4]) is the S-box itself.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
// if we have yet to precompute the S-box tables
|
||||
// do so now
|
||||
if (!aesTables) {
|
||||
aesTables = precompute();
|
||||
} // then make a copy of that object for use
|
||||
|
||||
|
||||
this._tables = [[aesTables[0][0].slice(), aesTables[0][1].slice(), aesTables[0][2].slice(), aesTables[0][3].slice(), aesTables[0][4].slice()], [aesTables[1][0].slice(), aesTables[1][1].slice(), aesTables[1][2].slice(), aesTables[1][3].slice(), aesTables[1][4].slice()]];
|
||||
var i;
|
||||
var j;
|
||||
var tmp;
|
||||
var sbox = this._tables[0][4];
|
||||
var decTable = this._tables[1];
|
||||
var keyLen = key.length;
|
||||
var rcon = 1;
|
||||
|
||||
if (keyLen !== 4 && keyLen !== 6 && keyLen !== 8) {
|
||||
throw new Error('Invalid aes key size');
|
||||
}
|
||||
|
||||
var encKey = key.slice(0);
|
||||
var decKey = [];
|
||||
this._key = [encKey, decKey]; // schedule encryption keys
|
||||
|
||||
for (i = keyLen; i < 4 * keyLen + 28; i++) {
|
||||
tmp = encKey[i - 1]; // apply sbox
|
||||
|
||||
if (i % keyLen === 0 || keyLen === 8 && i % keyLen === 4) {
|
||||
tmp = sbox[tmp >>> 24] << 24 ^ sbox[tmp >> 16 & 255] << 16 ^ sbox[tmp >> 8 & 255] << 8 ^ sbox[tmp & 255]; // shift rows and add rcon
|
||||
|
||||
if (i % keyLen === 0) {
|
||||
tmp = tmp << 8 ^ tmp >>> 24 ^ rcon << 24;
|
||||
rcon = rcon << 1 ^ (rcon >> 7) * 283;
|
||||
}
|
||||
}
|
||||
|
||||
encKey[i] = encKey[i - keyLen] ^ tmp;
|
||||
} // schedule decryption keys
|
||||
|
||||
|
||||
for (j = 0; i; j++, i--) {
|
||||
tmp = encKey[j & 3 ? i : i - 4];
|
||||
|
||||
if (i <= 4 || j < 4) {
|
||||
decKey[j] = tmp;
|
||||
} else {
|
||||
decKey[j] = decTable[0][sbox[tmp >>> 24]] ^ decTable[1][sbox[tmp >> 16 & 255]] ^ decTable[2][sbox[tmp >> 8 & 255]] ^ decTable[3][sbox[tmp & 255]];
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Decrypt 16 bytes, specified as four 32-bit words.
|
||||
*
|
||||
* @param {number} encrypted0 the first word to decrypt
|
||||
* @param {number} encrypted1 the second word to decrypt
|
||||
* @param {number} encrypted2 the third word to decrypt
|
||||
* @param {number} encrypted3 the fourth word to decrypt
|
||||
* @param {Int32Array} out the array to write the decrypted words
|
||||
* into
|
||||
* @param {number} offset the offset into the output array to start
|
||||
* writing results
|
||||
* @return {Array} The plaintext.
|
||||
*/
|
||||
|
||||
|
||||
var _proto = AES.prototype;
|
||||
|
||||
_proto.decrypt = function decrypt(encrypted0, encrypted1, encrypted2, encrypted3, out, offset) {
|
||||
var key = this._key[1]; // state variables a,b,c,d are loaded with pre-whitened data
|
||||
|
||||
var a = encrypted0 ^ key[0];
|
||||
var b = encrypted3 ^ key[1];
|
||||
var c = encrypted2 ^ key[2];
|
||||
var d = encrypted1 ^ key[3];
|
||||
var a2;
|
||||
var b2;
|
||||
var c2; // key.length === 2 ?
|
||||
|
||||
var nInnerRounds = key.length / 4 - 2;
|
||||
var i;
|
||||
var kIndex = 4;
|
||||
var table = this._tables[1]; // load up the tables
|
||||
|
||||
var table0 = table[0];
|
||||
var table1 = table[1];
|
||||
var table2 = table[2];
|
||||
var table3 = table[3];
|
||||
var sbox = table[4]; // Inner rounds. Cribbed from OpenSSL.
|
||||
|
||||
for (i = 0; i < nInnerRounds; i++) {
|
||||
a2 = table0[a >>> 24] ^ table1[b >> 16 & 255] ^ table2[c >> 8 & 255] ^ table3[d & 255] ^ key[kIndex];
|
||||
b2 = table0[b >>> 24] ^ table1[c >> 16 & 255] ^ table2[d >> 8 & 255] ^ table3[a & 255] ^ key[kIndex + 1];
|
||||
c2 = table0[c >>> 24] ^ table1[d >> 16 & 255] ^ table2[a >> 8 & 255] ^ table3[b & 255] ^ key[kIndex + 2];
|
||||
d = table0[d >>> 24] ^ table1[a >> 16 & 255] ^ table2[b >> 8 & 255] ^ table3[c & 255] ^ key[kIndex + 3];
|
||||
kIndex += 4;
|
||||
a = a2;
|
||||
b = b2;
|
||||
c = c2;
|
||||
} // Last round.
|
||||
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
out[(3 & -i) + offset] = sbox[a >>> 24] << 24 ^ sbox[b >> 16 & 255] << 16 ^ sbox[c >> 8 & 255] << 8 ^ sbox[d & 255] ^ key[kIndex++];
|
||||
a2 = a;
|
||||
a = b;
|
||||
b = c;
|
||||
c = d;
|
||||
d = a2;
|
||||
}
|
||||
};
|
||||
|
||||
return AES;
|
||||
}();
|
||||
|
||||
/**
|
||||
* A wrapper around the Stream class to use setTimeout
|
||||
* and run stream "jobs" Asynchronously
|
||||
*
|
||||
* @class AsyncStream
|
||||
* @extends Stream
|
||||
*/
|
||||
|
||||
var AsyncStream =
|
||||
/*#__PURE__*/
|
||||
function (_Stream) {
|
||||
inheritsLoose(AsyncStream, _Stream);
|
||||
|
||||
function AsyncStream() {
|
||||
var _this;
|
||||
|
||||
_this = _Stream.call(this, stream) || this;
|
||||
_this.jobs = [];
|
||||
_this.delay = 1;
|
||||
_this.timeout_ = null;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* process an async job
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
|
||||
|
||||
var _proto = AsyncStream.prototype;
|
||||
|
||||
_proto.processJob_ = function processJob_() {
|
||||
this.jobs.shift()();
|
||||
|
||||
if (this.jobs.length) {
|
||||
this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay);
|
||||
} else {
|
||||
this.timeout_ = null;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* push a job into the stream
|
||||
*
|
||||
* @param {Function} job the job to push into the stream
|
||||
*/
|
||||
;
|
||||
|
||||
_proto.push = function push(job) {
|
||||
this.jobs.push(job);
|
||||
|
||||
if (!this.timeout_) {
|
||||
this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay);
|
||||
}
|
||||
};
|
||||
|
||||
return AsyncStream;
|
||||
}(stream);
|
||||
|
||||
/**
|
||||
* Convert network-order (big-endian) bytes into their little-endian
|
||||
* representation.
|
||||
*/
|
||||
|
||||
var ntoh = function ntoh(word) {
|
||||
return word << 24 | (word & 0xff00) << 8 | (word & 0xff0000) >> 8 | word >>> 24;
|
||||
};
|
||||
/**
|
||||
* Decrypt bytes using AES-128 with CBC and PKCS#7 padding.
|
||||
*
|
||||
* @param {Uint8Array} encrypted the encrypted bytes
|
||||
* @param {Uint32Array} key the bytes of the decryption key
|
||||
* @param {Uint32Array} initVector the initialization vector (IV) to
|
||||
* use for the first round of CBC.
|
||||
* @return {Uint8Array} the decrypted bytes
|
||||
*
|
||||
* @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
|
||||
* @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Block_Chaining_.28CBC.29
|
||||
* @see https://tools.ietf.org/html/rfc2315
|
||||
*/
|
||||
|
||||
|
||||
var decrypt = function decrypt(encrypted, key, initVector) {
|
||||
// word-level access to the encrypted bytes
|
||||
var encrypted32 = new Int32Array(encrypted.buffer, encrypted.byteOffset, encrypted.byteLength >> 2);
|
||||
var decipher = new AES(Array.prototype.slice.call(key)); // byte and word-level access for the decrypted output
|
||||
|
||||
var decrypted = new Uint8Array(encrypted.byteLength);
|
||||
var decrypted32 = new Int32Array(decrypted.buffer); // temporary variables for working with the IV, encrypted, and
|
||||
// decrypted data
|
||||
|
||||
var init0;
|
||||
var init1;
|
||||
var init2;
|
||||
var init3;
|
||||
var encrypted0;
|
||||
var encrypted1;
|
||||
var encrypted2;
|
||||
var encrypted3; // iteration variable
|
||||
|
||||
var wordIx; // pull out the words of the IV to ensure we don't modify the
|
||||
// passed-in reference and easier access
|
||||
|
||||
init0 = initVector[0];
|
||||
init1 = initVector[1];
|
||||
init2 = initVector[2];
|
||||
init3 = initVector[3]; // decrypt four word sequences, applying cipher-block chaining (CBC)
|
||||
// to each decrypted block
|
||||
|
||||
for (wordIx = 0; wordIx < encrypted32.length; wordIx += 4) {
|
||||
// convert big-endian (network order) words into little-endian
|
||||
// (javascript order)
|
||||
encrypted0 = ntoh(encrypted32[wordIx]);
|
||||
encrypted1 = ntoh(encrypted32[wordIx + 1]);
|
||||
encrypted2 = ntoh(encrypted32[wordIx + 2]);
|
||||
encrypted3 = ntoh(encrypted32[wordIx + 3]); // decrypt the block
|
||||
|
||||
decipher.decrypt(encrypted0, encrypted1, encrypted2, encrypted3, decrypted32, wordIx); // XOR with the IV, and restore network byte-order to obtain the
|
||||
// plaintext
|
||||
|
||||
decrypted32[wordIx] = ntoh(decrypted32[wordIx] ^ init0);
|
||||
decrypted32[wordIx + 1] = ntoh(decrypted32[wordIx + 1] ^ init1);
|
||||
decrypted32[wordIx + 2] = ntoh(decrypted32[wordIx + 2] ^ init2);
|
||||
decrypted32[wordIx + 3] = ntoh(decrypted32[wordIx + 3] ^ init3); // setup the IV for the next round
|
||||
|
||||
init0 = encrypted0;
|
||||
init1 = encrypted1;
|
||||
init2 = encrypted2;
|
||||
init3 = encrypted3;
|
||||
}
|
||||
|
||||
return decrypted;
|
||||
};
|
||||
/**
|
||||
* The `Decrypter` class that manages decryption of AES
|
||||
* data through `AsyncStream` objects and the `decrypt`
|
||||
* function
|
||||
*
|
||||
* @param {Uint8Array} encrypted the encrypted bytes
|
||||
* @param {Uint32Array} key the bytes of the decryption key
|
||||
* @param {Uint32Array} initVector the initialization vector (IV) to
|
||||
* @param {Function} done the function to run when done
|
||||
* @class Decrypter
|
||||
*/
|
||||
|
||||
|
||||
var Decrypter =
|
||||
/*#__PURE__*/
|
||||
function () {
|
||||
function Decrypter(encrypted, key, initVector, done) {
|
||||
var step = Decrypter.STEP;
|
||||
var encrypted32 = new Int32Array(encrypted.buffer);
|
||||
var decrypted = new Uint8Array(encrypted.byteLength);
|
||||
var i = 0;
|
||||
this.asyncStream_ = new AsyncStream(); // split up the encryption job and do the individual chunks asynchronously
|
||||
|
||||
this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted));
|
||||
|
||||
for (i = step; i < encrypted32.length; i += step) {
|
||||
initVector = new Uint32Array([ntoh(encrypted32[i - 4]), ntoh(encrypted32[i - 3]), ntoh(encrypted32[i - 2]), ntoh(encrypted32[i - 1])]);
|
||||
this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted));
|
||||
} // invoke the done() callback when everything is finished
|
||||
|
||||
|
||||
this.asyncStream_.push(function () {
|
||||
// remove pkcs#7 padding from the decrypted bytes
|
||||
done(null, unpad(decrypted));
|
||||
});
|
||||
}
|
||||
/**
|
||||
* a getter for step the maximum number of bytes to process at one time
|
||||
*
|
||||
* @return {number} the value of step 32000
|
||||
*/
|
||||
|
||||
|
||||
var _proto = Decrypter.prototype;
|
||||
|
||||
/**
|
||||
* @private
|
||||
*/
|
||||
_proto.decryptChunk_ = function decryptChunk_(encrypted, key, initVector, decrypted) {
|
||||
return function () {
|
||||
var bytes = decrypt(encrypted, key, initVector);
|
||||
decrypted.set(bytes, encrypted.byteOffset);
|
||||
};
|
||||
};
|
||||
|
||||
createClass(Decrypter, null, [{
|
||||
key: "STEP",
|
||||
get: function get() {
|
||||
// 4 * 8000;
|
||||
return 32000;
|
||||
}
|
||||
}]);
|
||||
|
||||
return Decrypter;
|
||||
}();
|
||||
|
||||
/**
|
||||
* @file bin-utils.js
|
||||
*/
|
||||
/**
|
||||
* Creates an object for sending to a web worker modifying properties that are TypedArrays
|
||||
* into a new object with seperated properties for the buffer, byteOffset, and byteLength.
|
||||
*
|
||||
* @param {Object} message
|
||||
* Object of properties and values to send to the web worker
|
||||
* @return {Object}
|
||||
* Modified message with TypedArray values expanded
|
||||
* @function createTransferableMessage
|
||||
*/
|
||||
|
||||
|
||||
var createTransferableMessage = function createTransferableMessage(message) {
|
||||
var transferable = {};
|
||||
Object.keys(message).forEach(function (key) {
|
||||
var value = message[key];
|
||||
|
||||
if (ArrayBuffer.isView(value)) {
|
||||
transferable[key] = {
|
||||
bytes: value.buffer,
|
||||
byteOffset: value.byteOffset,
|
||||
byteLength: value.byteLength
|
||||
};
|
||||
} else {
|
||||
transferable[key] = value;
|
||||
}
|
||||
});
|
||||
return transferable;
|
||||
};
|
||||
|
||||
/* global self */
|
||||
/**
|
||||
* Our web worker interface so that things can talk to aes-decrypter
|
||||
* that will be running in a web worker. the scope is passed to this by
|
||||
* webworkify.
|
||||
*
|
||||
* @param {Object} self
|
||||
* the scope for the web worker
|
||||
*/
|
||||
|
||||
var DecrypterWorker = function DecrypterWorker(self) {
|
||||
self.onmessage = function (event) {
|
||||
var data = event.data;
|
||||
var encrypted = new Uint8Array(data.encrypted.bytes, data.encrypted.byteOffset, data.encrypted.byteLength);
|
||||
var key = new Uint32Array(data.key.bytes, data.key.byteOffset, data.key.byteLength / 4);
|
||||
var iv = new Uint32Array(data.iv.bytes, data.iv.byteOffset, data.iv.byteLength / 4);
|
||||
/* eslint-disable no-new, handle-callback-err */
|
||||
|
||||
new Decrypter(encrypted, key, iv, function (err, bytes) {
|
||||
self.postMessage(createTransferableMessage({
|
||||
source: data.source,
|
||||
decrypted: bytes
|
||||
}), [bytes.buffer]);
|
||||
});
|
||||
/* eslint-enable */
|
||||
};
|
||||
};
|
||||
|
||||
var decrypterWorker = new DecrypterWorker(self);
|
||||
|
||||
return decrypterWorker;
|
||||
|
||||
}());
|
||||
226
build/javascript/node_modules/@videojs/http-streaming/src/manifest.js
generated
vendored
Normal file
226
build/javascript/node_modules/@videojs/http-streaming/src/manifest.js
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
import videojs from 'video.js';
|
||||
import window from 'global/window';
|
||||
import { Parser as M3u8Parser } from 'm3u8-parser';
|
||||
import { resolveUrl } from './resolve-url';
|
||||
|
||||
const { log } = videojs;
|
||||
|
||||
export const createPlaylistID = (index, uri) => {
|
||||
return `${index}-${uri}`;
|
||||
};
|
||||
|
||||
/**
|
||||
* Parses a given m3u8 playlist
|
||||
*
|
||||
* @param {string} manifestString
|
||||
* The downloaded manifest string
|
||||
* @param {Object[]} [customTagParsers]
|
||||
* An array of custom tag parsers for the m3u8-parser instance
|
||||
* @param {Object[]} [customTagMappers]
|
||||
* An array of custom tag mappers for the m3u8-parser instance
|
||||
* @return {Object}
|
||||
* The manifest object
|
||||
*/
|
||||
export const parseManifest = ({
|
||||
manifestString,
|
||||
customTagParsers = [],
|
||||
customTagMappers = []
|
||||
}) => {
|
||||
const parser = new M3u8Parser();
|
||||
|
||||
customTagParsers.forEach(customParser => parser.addParser(customParser));
|
||||
customTagMappers.forEach(mapper => parser.addTagMapper(mapper));
|
||||
|
||||
parser.push(manifestString);
|
||||
parser.end();
|
||||
|
||||
return parser.manifest;
|
||||
};
|
||||
|
||||
/**
|
||||
* Loops through all supported media groups in master and calls the provided
|
||||
* callback for each group
|
||||
*
|
||||
* @param {Object} master
|
||||
* The parsed master manifest object
|
||||
* @param {Function} callback
|
||||
* Callback to call for each media group
|
||||
*/
|
||||
export const forEachMediaGroup = (master, callback) => {
|
||||
['AUDIO', 'SUBTITLES'].forEach((mediaType) => {
|
||||
for (const groupKey in master.mediaGroups[mediaType]) {
|
||||
for (const labelKey in master.mediaGroups[mediaType][groupKey]) {
|
||||
const mediaProperties = master.mediaGroups[mediaType][groupKey][labelKey];
|
||||
|
||||
callback(mediaProperties, mediaType, groupKey, labelKey);
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds properties and attributes to the playlist to keep consistent functionality for
|
||||
* playlists throughout VHS.
|
||||
*
|
||||
* @param {Object} config
|
||||
* Arguments object
|
||||
* @param {Object} config.playlist
|
||||
* The media playlist
|
||||
* @param {string} [config.uri]
|
||||
* The uri to the media playlist (if media playlist is not from within a master
|
||||
* playlist)
|
||||
* @param {string} id
|
||||
* ID to use for the playlist
|
||||
*/
|
||||
export const setupMediaPlaylist = ({ playlist, uri, id }) => {
|
||||
playlist.id = id;
|
||||
|
||||
if (uri) {
|
||||
// For media playlists, m3u8-parser does not have access to a URI, as HLS media
|
||||
// playlists do not contain their own source URI, but one is needed for consistency in
|
||||
// VHS.
|
||||
playlist.uri = uri;
|
||||
}
|
||||
|
||||
// For HLS master playlists, even though certain attributes MUST be defined, the
|
||||
// stream may still be played without them.
|
||||
// For HLS media playlists, m3u8-parser does not attach an attributes object to the
|
||||
// manifest.
|
||||
//
|
||||
// To avoid undefined reference errors through the project, and make the code easier
|
||||
// to write/read, add an empty attributes object for these cases.
|
||||
playlist.attributes = playlist.attributes || {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds ID, resolvedUri, and attributes properties to each playlist of the master, where
|
||||
* necessary. In addition, creates playlist IDs for each playlist and adds playlist ID to
|
||||
* playlist references to the playlists array.
|
||||
*
|
||||
* @param {Object} master
|
||||
* The master playlist
|
||||
*/
|
||||
export const setupMediaPlaylists = (master) => {
|
||||
let i = master.playlists.length;
|
||||
|
||||
while (i--) {
|
||||
const playlist = master.playlists[i];
|
||||
|
||||
setupMediaPlaylist({
|
||||
playlist,
|
||||
id: createPlaylistID(i, playlist.uri)
|
||||
});
|
||||
playlist.resolvedUri = resolveUrl(master.uri, playlist.uri);
|
||||
master.playlists[playlist.id] = playlist;
|
||||
// URI reference added for backwards compatibility
|
||||
master.playlists[playlist.uri] = playlist;
|
||||
|
||||
// Although the spec states an #EXT-X-STREAM-INF tag MUST have a BANDWIDTH attribute,
|
||||
// the stream can be played without it. Although an attributes property may have been
|
||||
// added to the playlist to prevent undefined references, issue a warning to fix the
|
||||
// manifest.
|
||||
if (!playlist.attributes.BANDWIDTH) {
|
||||
log.warn('Invalid playlist STREAM-INF detected. Missing BANDWIDTH attribute.');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds resolvedUri properties to each media group.
|
||||
*
|
||||
* @param {Object} master
|
||||
* The master playlist
|
||||
*/
|
||||
export const resolveMediaGroupUris = (master) => {
|
||||
forEachMediaGroup(master, (properties) => {
|
||||
if (properties.uri) {
|
||||
properties.resolvedUri = resolveUrl(master.uri, properties.uri);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a master playlist wrapper to insert a sole media playlist into.
|
||||
*
|
||||
* @param {Object} media
|
||||
* Media playlist
|
||||
* @param {string} uri
|
||||
* The media URI
|
||||
*
|
||||
* @return {Object}
|
||||
* Master playlist
|
||||
*/
|
||||
export const masterForMedia = (media, uri) => {
|
||||
const id = createPlaylistID(0, uri);
|
||||
const master = {
|
||||
mediaGroups: {
|
||||
'AUDIO': {},
|
||||
'VIDEO': {},
|
||||
'CLOSED-CAPTIONS': {},
|
||||
'SUBTITLES': {}
|
||||
},
|
||||
uri: window.location.href,
|
||||
resolvedUri: window.location.href,
|
||||
playlists: [{
|
||||
uri,
|
||||
id,
|
||||
resolvedUri: uri,
|
||||
// m3u8-parser does not attach an attributes property to media playlists so make
|
||||
// sure that the property is attached to avoid undefined reference errors
|
||||
attributes: {}
|
||||
}]
|
||||
};
|
||||
|
||||
// set up ID reference
|
||||
master.playlists[id] = master.playlists[0];
|
||||
// URI reference added for backwards compatibility
|
||||
master.playlists[uri] = master.playlists[0];
|
||||
|
||||
return master;
|
||||
};
|
||||
|
||||
/**
|
||||
* Does an in-place update of the master manifest to add updated playlist URI references
|
||||
* as well as other properties needed by VHS that aren't included by the parser.
|
||||
*
|
||||
* @param {Object} master
|
||||
* Master manifest object
|
||||
* @param {string} uri
|
||||
* The source URI
|
||||
*/
|
||||
export const addPropertiesToMaster = (master, uri) => {
|
||||
master.uri = uri;
|
||||
|
||||
for (let i = 0; i < master.playlists.length; i++) {
|
||||
if (!master.playlists[i].uri) {
|
||||
// Set up phony URIs for the playlists since playlists are referenced by their URIs
|
||||
// throughout VHS, but some formats (e.g., DASH) don't have external URIs
|
||||
// TODO: consider adding dummy URIs in mpd-parser
|
||||
const phonyUri = `placeholder-uri-${i}`;
|
||||
|
||||
master.playlists[i].uri = phonyUri;
|
||||
}
|
||||
}
|
||||
|
||||
forEachMediaGroup(master, (properties, mediaType, groupKey, labelKey) => {
|
||||
if (!properties.playlists ||
|
||||
!properties.playlists.length ||
|
||||
properties.playlists[0].uri) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Set up phony URIs for the media group playlists since playlists are referenced by
|
||||
// their URIs throughout VHS, but some formats (e.g., DASH) don't have external URIs
|
||||
const phonyUri = `placeholder-uri-${mediaType}-${groupKey}-${labelKey}`;
|
||||
const id = createPlaylistID(0, phonyUri);
|
||||
|
||||
properties.playlists[0].uri = phonyUri;
|
||||
properties.playlists[0].id = id;
|
||||
// setup ID and URI references (URI for backwards compatibility)
|
||||
master.playlists[id] = properties.playlists[0];
|
||||
master.playlists[phonyUri] = properties.playlists[0];
|
||||
});
|
||||
|
||||
setupMediaPlaylists(master);
|
||||
resolveMediaGroupUris(master);
|
||||
};
|
||||
1640
build/javascript/node_modules/@videojs/http-streaming/src/master-playlist-controller.js
generated
vendored
Normal file
1640
build/javascript/node_modules/@videojs/http-streaming/src/master-playlist-controller.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
839
build/javascript/node_modules/@videojs/http-streaming/src/media-groups.js
generated
vendored
Normal file
839
build/javascript/node_modules/@videojs/http-streaming/src/media-groups.js
generated
vendored
Normal file
@@ -0,0 +1,839 @@
|
||||
import videojs from 'video.js';
|
||||
import PlaylistLoader from './playlist-loader';
|
||||
import DashPlaylistLoader from './dash-playlist-loader';
|
||||
import noop from './util/noop';
|
||||
|
||||
/**
|
||||
* Convert the properties of an HLS track into an audioTrackKind.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
const audioTrackKind_ = (properties) => {
|
||||
let kind = properties.default ? 'main' : 'alternative';
|
||||
|
||||
if (properties.characteristics &&
|
||||
properties.characteristics.indexOf('public.accessibility.describes-video') >= 0) {
|
||||
kind = 'main-desc';
|
||||
}
|
||||
|
||||
return kind;
|
||||
};
|
||||
|
||||
/**
|
||||
* Pause provided segment loader and playlist loader if active
|
||||
*
|
||||
* @param {SegmentLoader} segmentLoader
|
||||
* SegmentLoader to pause
|
||||
* @param {Object} mediaType
|
||||
* Active media type
|
||||
* @function stopLoaders
|
||||
*/
|
||||
export const stopLoaders = (segmentLoader, mediaType) => {
|
||||
segmentLoader.abort();
|
||||
segmentLoader.pause();
|
||||
|
||||
if (mediaType && mediaType.activePlaylistLoader) {
|
||||
mediaType.activePlaylistLoader.pause();
|
||||
mediaType.activePlaylistLoader = null;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Start loading provided segment loader and playlist loader
|
||||
*
|
||||
* @param {PlaylistLoader} playlistLoader
|
||||
* PlaylistLoader to start loading
|
||||
* @param {Object} mediaType
|
||||
* Active media type
|
||||
* @function startLoaders
|
||||
*/
|
||||
export const startLoaders = (playlistLoader, mediaType) => {
|
||||
// Segment loader will be started after `loadedmetadata` or `loadedplaylist` from the
|
||||
// playlist loader
|
||||
mediaType.activePlaylistLoader = playlistLoader;
|
||||
playlistLoader.load();
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a function to be called when the media group changes. It performs a
|
||||
* non-destructive (preserve the buffer) resync of the SegmentLoader. This is because a
|
||||
* change of group is merely a rendition switch of the same content at another encoding,
|
||||
* rather than a change of content, such as switching audio from English to Spanish.
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @return {Function}
|
||||
* Handler for a non-destructive resync of SegmentLoader when the active media
|
||||
* group changes.
|
||||
* @function onGroupChanged
|
||||
*/
|
||||
export const onGroupChanged = (type, settings) => () => {
|
||||
const {
|
||||
segmentLoaders: {
|
||||
[type]: segmentLoader,
|
||||
main: mainSegmentLoader
|
||||
},
|
||||
mediaTypes: { [type]: mediaType }
|
||||
} = settings;
|
||||
const activeTrack = mediaType.activeTrack();
|
||||
const activeGroup = mediaType.activeGroup(activeTrack);
|
||||
const previousActiveLoader = mediaType.activePlaylistLoader;
|
||||
|
||||
stopLoaders(segmentLoader, mediaType);
|
||||
|
||||
if (!activeGroup) {
|
||||
// there is no group active
|
||||
return;
|
||||
}
|
||||
|
||||
if (!activeGroup.playlistLoader) {
|
||||
if (previousActiveLoader) {
|
||||
// The previous group had a playlist loader but the new active group does not
|
||||
// this means we are switching from demuxed to muxed audio. In this case we want to
|
||||
// do a destructive reset of the main segment loader and not restart the audio
|
||||
// loaders.
|
||||
mainSegmentLoader.resetEverything();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Non-destructive resync
|
||||
segmentLoader.resyncLoader();
|
||||
|
||||
startLoaders(activeGroup.playlistLoader, mediaType);
|
||||
};
|
||||
|
||||
export const onGroupChanging = (type, settings) => () => {
|
||||
const {
|
||||
segmentLoaders: {
|
||||
[type]: segmentLoader
|
||||
}
|
||||
} = settings;
|
||||
|
||||
segmentLoader.abort();
|
||||
segmentLoader.pause();
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a function to be called when the media track changes. It performs a
|
||||
* destructive reset of the SegmentLoader to ensure we start loading as close to
|
||||
* currentTime as possible.
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @return {Function}
|
||||
* Handler for a destructive reset of SegmentLoader when the active media
|
||||
* track changes.
|
||||
* @function onTrackChanged
|
||||
*/
|
||||
export const onTrackChanged = (type, settings) => () => {
|
||||
const {
|
||||
segmentLoaders: {
|
||||
[type]: segmentLoader,
|
||||
main: mainSegmentLoader
|
||||
},
|
||||
mediaTypes: { [type]: mediaType }
|
||||
} = settings;
|
||||
const activeTrack = mediaType.activeTrack();
|
||||
const activeGroup = mediaType.activeGroup(activeTrack);
|
||||
const previousActiveLoader = mediaType.activePlaylistLoader;
|
||||
|
||||
stopLoaders(segmentLoader, mediaType);
|
||||
|
||||
if (!activeGroup) {
|
||||
// there is no group active so we do not want to restart loaders
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === 'AUDIO') {
|
||||
if (!activeGroup.playlistLoader) {
|
||||
// when switching from demuxed audio/video to muxed audio/video (noted by no
|
||||
// playlist loader for the audio group), we want to do a destructive reset of the
|
||||
// main segment loader and not restart the audio loaders
|
||||
mainSegmentLoader.setAudio(true);
|
||||
// don't have to worry about disabling the audio of the audio segment loader since
|
||||
// it should be stopped
|
||||
mainSegmentLoader.resetEverything();
|
||||
return;
|
||||
}
|
||||
|
||||
// although the segment loader is an audio segment loader, call the setAudio
|
||||
// function to ensure it is prepared to re-append the init segment (or handle other
|
||||
// config changes)
|
||||
segmentLoader.setAudio(true);
|
||||
mainSegmentLoader.setAudio(false);
|
||||
}
|
||||
|
||||
if (previousActiveLoader === activeGroup.playlistLoader) {
|
||||
// Nothing has actually changed. This can happen because track change events can fire
|
||||
// multiple times for a "single" change. One for enabling the new active track, and
|
||||
// one for disabling the track that was active
|
||||
startLoaders(activeGroup.playlistLoader, mediaType);
|
||||
return;
|
||||
}
|
||||
|
||||
if (segmentLoader.track) {
|
||||
// For WebVTT, set the new text track in the segmentloader
|
||||
segmentLoader.track(activeTrack);
|
||||
}
|
||||
|
||||
// destructive reset
|
||||
segmentLoader.resetEverything();
|
||||
|
||||
startLoaders(activeGroup.playlistLoader, mediaType);
|
||||
};
|
||||
|
||||
export const onError = {
|
||||
/**
|
||||
* Returns a function to be called when a SegmentLoader or PlaylistLoader encounters
|
||||
* an error.
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @return {Function}
|
||||
* Error handler. Logs warning (or error if the playlist is blacklisted) to
|
||||
* console and switches back to default audio track.
|
||||
* @function onError.AUDIO
|
||||
*/
|
||||
AUDIO: (type, settings) => () => {
|
||||
const {
|
||||
segmentLoaders: { [type]: segmentLoader},
|
||||
mediaTypes: { [type]: mediaType },
|
||||
blacklistCurrentPlaylist
|
||||
} = settings;
|
||||
|
||||
stopLoaders(segmentLoader, mediaType);
|
||||
|
||||
// switch back to default audio track
|
||||
const activeTrack = mediaType.activeTrack();
|
||||
const activeGroup = mediaType.activeGroup();
|
||||
const id = (activeGroup.filter(group => group.default)[0] || activeGroup[0]).id;
|
||||
const defaultTrack = mediaType.tracks[id];
|
||||
|
||||
if (activeTrack === defaultTrack) {
|
||||
// Default track encountered an error. All we can do now is blacklist the current
|
||||
// rendition and hope another will switch audio groups
|
||||
blacklistCurrentPlaylist({
|
||||
message: 'Problem encountered loading the default audio track.'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
videojs.log.warn('Problem encountered loading the alternate audio track.' +
|
||||
'Switching back to default.');
|
||||
|
||||
for (const trackId in mediaType.tracks) {
|
||||
mediaType.tracks[trackId].enabled = mediaType.tracks[trackId] === defaultTrack;
|
||||
}
|
||||
|
||||
mediaType.onTrackChanged();
|
||||
},
|
||||
/**
|
||||
* Returns a function to be called when a SegmentLoader or PlaylistLoader encounters
|
||||
* an error.
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @return {Function}
|
||||
* Error handler. Logs warning to console and disables the active subtitle track
|
||||
* @function onError.SUBTITLES
|
||||
*/
|
||||
SUBTITLES: (type, settings) => () => {
|
||||
const {
|
||||
segmentLoaders: { [type]: segmentLoader},
|
||||
mediaTypes: { [type]: mediaType }
|
||||
} = settings;
|
||||
|
||||
videojs.log.warn('Problem encountered loading the subtitle track.' +
|
||||
'Disabling subtitle track.');
|
||||
|
||||
stopLoaders(segmentLoader, mediaType);
|
||||
|
||||
const track = mediaType.activeTrack();
|
||||
|
||||
if (track) {
|
||||
track.mode = 'disabled';
|
||||
}
|
||||
|
||||
mediaType.onTrackChanged();
|
||||
}
|
||||
};
|
||||
|
||||
export const setupListeners = {
|
||||
/**
|
||||
* Setup event listeners for audio playlist loader
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {PlaylistLoader|null} playlistLoader
|
||||
* PlaylistLoader to register listeners on
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @function setupListeners.AUDIO
|
||||
*/
|
||||
AUDIO: (type, playlistLoader, settings) => {
|
||||
if (!playlistLoader) {
|
||||
// no playlist loader means audio will be muxed with the video
|
||||
return;
|
||||
}
|
||||
|
||||
const {
|
||||
tech,
|
||||
requestOptions,
|
||||
segmentLoaders: { [type]: segmentLoader }
|
||||
} = settings;
|
||||
|
||||
playlistLoader.on('loadedmetadata', () => {
|
||||
const media = playlistLoader.media();
|
||||
|
||||
segmentLoader.playlist(media, requestOptions);
|
||||
|
||||
// if the video is already playing, or if this isn't a live video and preload
|
||||
// permits, start downloading segments
|
||||
if (!tech.paused() || (media.endList && tech.preload() !== 'none')) {
|
||||
segmentLoader.load();
|
||||
}
|
||||
});
|
||||
|
||||
playlistLoader.on('loadedplaylist', () => {
|
||||
segmentLoader.playlist(playlistLoader.media(), requestOptions);
|
||||
|
||||
// If the player isn't paused, ensure that the segment loader is running
|
||||
if (!tech.paused()) {
|
||||
segmentLoader.load();
|
||||
}
|
||||
});
|
||||
|
||||
playlistLoader.on('error', onError[type](type, settings));
|
||||
},
|
||||
/**
|
||||
* Setup event listeners for subtitle playlist loader
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {PlaylistLoader|null} playlistLoader
|
||||
* PlaylistLoader to register listeners on
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @function setupListeners.SUBTITLES
|
||||
*/
|
||||
SUBTITLES: (type, playlistLoader, settings) => {
|
||||
const {
|
||||
tech,
|
||||
requestOptions,
|
||||
segmentLoaders: { [type]: segmentLoader },
|
||||
mediaTypes: { [type]: mediaType }
|
||||
} = settings;
|
||||
|
||||
playlistLoader.on('loadedmetadata', () => {
|
||||
const media = playlistLoader.media();
|
||||
|
||||
segmentLoader.playlist(media, requestOptions);
|
||||
segmentLoader.track(mediaType.activeTrack());
|
||||
|
||||
// if the video is already playing, or if this isn't a live video and preload
|
||||
// permits, start downloading segments
|
||||
if (!tech.paused() || (media.endList && tech.preload() !== 'none')) {
|
||||
segmentLoader.load();
|
||||
}
|
||||
});
|
||||
|
||||
playlistLoader.on('loadedplaylist', () => {
|
||||
segmentLoader.playlist(playlistLoader.media(), requestOptions);
|
||||
|
||||
// If the player isn't paused, ensure that the segment loader is running
|
||||
if (!tech.paused()) {
|
||||
segmentLoader.load();
|
||||
}
|
||||
});
|
||||
|
||||
playlistLoader.on('error', onError[type](type, settings));
|
||||
}
|
||||
};
|
||||
|
||||
export const initialize = {
|
||||
/**
|
||||
* Setup PlaylistLoaders and AudioTracks for the audio groups
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @function initialize.AUDIO
|
||||
*/
|
||||
'AUDIO': (type, settings) => {
|
||||
const {
|
||||
vhs,
|
||||
sourceType,
|
||||
segmentLoaders: { [type]: segmentLoader },
|
||||
requestOptions,
|
||||
master: { mediaGroups, playlists },
|
||||
mediaTypes: {
|
||||
[type]: {
|
||||
groups,
|
||||
tracks
|
||||
}
|
||||
},
|
||||
masterPlaylistLoader
|
||||
} = settings;
|
||||
|
||||
// force a default if we have none
|
||||
if (!mediaGroups[type] ||
|
||||
Object.keys(mediaGroups[type]).length === 0) {
|
||||
mediaGroups[type] = { main: { default: { default: true } } };
|
||||
}
|
||||
|
||||
for (const groupId in mediaGroups[type]) {
|
||||
if (!groups[groupId]) {
|
||||
groups[groupId] = [];
|
||||
}
|
||||
|
||||
// List of playlists that have an AUDIO attribute value matching the current
|
||||
// group ID
|
||||
const groupPlaylists = playlists.filter(playlist => {
|
||||
return playlist.attributes[type] === groupId;
|
||||
});
|
||||
|
||||
for (const variantLabel in mediaGroups[type][groupId]) {
|
||||
let properties = mediaGroups[type][groupId][variantLabel];
|
||||
|
||||
// List of playlists for the current group ID that have a matching uri with
|
||||
// this alternate audio variant
|
||||
const matchingPlaylists = groupPlaylists.filter(playlist => {
|
||||
return playlist.resolvedUri === properties.resolvedUri;
|
||||
});
|
||||
|
||||
if (matchingPlaylists.length) {
|
||||
// If there is a playlist that has the same uri as this audio variant, assume
|
||||
// that the playlist is audio only. We delete the resolvedUri property here
|
||||
// to prevent a playlist loader from being created so that we don't have
|
||||
// both the main and audio segment loaders loading the same audio segments
|
||||
// from the same playlist.
|
||||
delete properties.resolvedUri;
|
||||
}
|
||||
|
||||
let playlistLoader;
|
||||
|
||||
// if vhs-json was provided as the source, and the media playlist was resolved,
|
||||
// use the resolved media playlist object
|
||||
if (sourceType === 'vhs-json' && properties.playlists) {
|
||||
playlistLoader = new PlaylistLoader(
|
||||
properties.playlists[0],
|
||||
vhs,
|
||||
requestOptions
|
||||
);
|
||||
} else if (properties.resolvedUri) {
|
||||
playlistLoader = new PlaylistLoader(
|
||||
properties.resolvedUri,
|
||||
vhs,
|
||||
requestOptions
|
||||
);
|
||||
} else if (properties.playlists && sourceType === 'dash') {
|
||||
playlistLoader = new DashPlaylistLoader(
|
||||
properties.playlists[0],
|
||||
vhs,
|
||||
requestOptions,
|
||||
masterPlaylistLoader
|
||||
);
|
||||
} else {
|
||||
// no resolvedUri means the audio is muxed with the video when using this
|
||||
// audio track
|
||||
playlistLoader = null;
|
||||
}
|
||||
|
||||
properties = videojs.mergeOptions(
|
||||
{ id: variantLabel, playlistLoader },
|
||||
properties
|
||||
);
|
||||
|
||||
setupListeners[type](type, properties.playlistLoader, settings);
|
||||
|
||||
groups[groupId].push(properties);
|
||||
|
||||
if (typeof tracks[variantLabel] === 'undefined') {
|
||||
const track = new videojs.AudioTrack({
|
||||
id: variantLabel,
|
||||
kind: audioTrackKind_(properties),
|
||||
enabled: false,
|
||||
language: properties.language,
|
||||
default: properties.default,
|
||||
label: variantLabel
|
||||
});
|
||||
|
||||
tracks[variantLabel] = track;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setup single error event handler for the segment loader
|
||||
segmentLoader.on('error', onError[type](type, settings));
|
||||
},
|
||||
/**
|
||||
* Setup PlaylistLoaders and TextTracks for the subtitle groups
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @function initialize.SUBTITLES
|
||||
*/
|
||||
'SUBTITLES': (type, settings) => {
|
||||
const {
|
||||
tech,
|
||||
vhs,
|
||||
sourceType,
|
||||
segmentLoaders: { [type]: segmentLoader },
|
||||
requestOptions,
|
||||
master: { mediaGroups },
|
||||
mediaTypes: {
|
||||
[type]: {
|
||||
groups,
|
||||
tracks
|
||||
}
|
||||
},
|
||||
masterPlaylistLoader
|
||||
} = settings;
|
||||
|
||||
for (const groupId in mediaGroups[type]) {
|
||||
if (!groups[groupId]) {
|
||||
groups[groupId] = [];
|
||||
}
|
||||
|
||||
for (const variantLabel in mediaGroups[type][groupId]) {
|
||||
if (mediaGroups[type][groupId][variantLabel].forced) {
|
||||
// Subtitle playlists with the forced attribute are not selectable in Safari.
|
||||
// According to Apple's HLS Authoring Specification:
|
||||
// If content has forced subtitles and regular subtitles in a given language,
|
||||
// the regular subtitles track in that language MUST contain both the forced
|
||||
// subtitles and the regular subtitles for that language.
|
||||
// Because of this requirement and that Safari does not add forced subtitles,
|
||||
// forced subtitles are skipped here to maintain consistent experience across
|
||||
// all platforms
|
||||
continue;
|
||||
}
|
||||
|
||||
let properties = mediaGroups[type][groupId][variantLabel];
|
||||
|
||||
let playlistLoader;
|
||||
|
||||
if (sourceType === 'hls') {
|
||||
playlistLoader =
|
||||
new PlaylistLoader(properties.resolvedUri, vhs, requestOptions);
|
||||
} else if (sourceType === 'dash') {
|
||||
playlistLoader = new DashPlaylistLoader(
|
||||
properties.playlists[0],
|
||||
vhs,
|
||||
requestOptions,
|
||||
masterPlaylistLoader
|
||||
);
|
||||
} else if (sourceType === 'vhs-json') {
|
||||
playlistLoader = new PlaylistLoader(
|
||||
// if the vhs-json object included the media playlist, use the media playlist
|
||||
// as provided, otherwise use the resolved URI to load the playlist
|
||||
properties.playlists ? properties.playlists[0] : properties.resolvedUri,
|
||||
vhs,
|
||||
requestOptions
|
||||
);
|
||||
}
|
||||
|
||||
properties = videojs.mergeOptions({
|
||||
id: variantLabel,
|
||||
playlistLoader
|
||||
}, properties);
|
||||
|
||||
setupListeners[type](type, properties.playlistLoader, settings);
|
||||
|
||||
groups[groupId].push(properties);
|
||||
|
||||
if (typeof tracks[variantLabel] === 'undefined') {
|
||||
const track = tech.addRemoteTextTrack({
|
||||
id: variantLabel,
|
||||
kind: 'subtitles',
|
||||
default: properties.default && properties.autoselect,
|
||||
language: properties.language,
|
||||
label: variantLabel
|
||||
}, false).track;
|
||||
|
||||
tracks[variantLabel] = track;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setup single error event handler for the segment loader
|
||||
segmentLoader.on('error', onError[type](type, settings));
|
||||
},
|
||||
/**
|
||||
* Setup TextTracks for the closed-caption groups
|
||||
*
|
||||
* @param {String} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @function initialize['CLOSED-CAPTIONS']
|
||||
*/
|
||||
'CLOSED-CAPTIONS': (type, settings) => {
|
||||
const {
|
||||
tech,
|
||||
master: { mediaGroups },
|
||||
mediaTypes: {
|
||||
[type]: {
|
||||
groups,
|
||||
tracks
|
||||
}
|
||||
}
|
||||
} = settings;
|
||||
|
||||
for (const groupId in mediaGroups[type]) {
|
||||
if (!groups[groupId]) {
|
||||
groups[groupId] = [];
|
||||
}
|
||||
|
||||
for (const variantLabel in mediaGroups[type][groupId]) {
|
||||
const properties = mediaGroups[type][groupId][variantLabel];
|
||||
|
||||
// We only support CEA608 captions for now, so ignore anything that
|
||||
// doesn't use a CCx INSTREAM-ID
|
||||
if (!properties.instreamId.match(/CC\d/)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// No PlaylistLoader is required for Closed-Captions because the captions are
|
||||
// embedded within the video stream
|
||||
groups[groupId].push(videojs.mergeOptions({ id: variantLabel }, properties));
|
||||
|
||||
if (typeof tracks[variantLabel] === 'undefined') {
|
||||
const track = tech.addRemoteTextTrack({
|
||||
id: properties.instreamId,
|
||||
kind: 'captions',
|
||||
default: properties.default && properties.autoselect,
|
||||
language: properties.language,
|
||||
label: variantLabel
|
||||
}, false).track;
|
||||
|
||||
tracks[variantLabel] = track;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a function used to get the active group of the provided type
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @return {Function}
|
||||
* Function that returns the active media group for the provided type. Takes an
|
||||
* optional parameter {TextTrack} track. If no track is provided, a list of all
|
||||
* variants in the group, otherwise the variant corresponding to the provided
|
||||
* track is returned.
|
||||
* @function activeGroup
|
||||
*/
|
||||
export const activeGroup = (type, settings) => (track) => {
|
||||
const {
|
||||
masterPlaylistLoader,
|
||||
mediaTypes: { [type]: { groups } }
|
||||
} = settings;
|
||||
|
||||
const media = masterPlaylistLoader.media();
|
||||
|
||||
if (!media) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let variants = null;
|
||||
|
||||
if (media.attributes[type]) {
|
||||
variants = groups[media.attributes[type]];
|
||||
}
|
||||
|
||||
variants = variants || groups.main;
|
||||
|
||||
if (typeof track === 'undefined') {
|
||||
return variants;
|
||||
}
|
||||
|
||||
if (track === null) {
|
||||
// An active track was specified so a corresponding group is expected. track === null
|
||||
// means no track is currently active so there is no corresponding group
|
||||
return null;
|
||||
}
|
||||
|
||||
return variants.filter((props) => props.id === track.id)[0] || null;
|
||||
};
|
||||
|
||||
export const activeTrack = {
|
||||
/**
|
||||
* Returns a function used to get the active track of type provided
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @return {Function}
|
||||
* Function that returns the active media track for the provided type. Returns
|
||||
* null if no track is active
|
||||
* @function activeTrack.AUDIO
|
||||
*/
|
||||
AUDIO: (type, settings) => () => {
|
||||
const { mediaTypes: { [type]: { tracks } } } = settings;
|
||||
|
||||
for (const id in tracks) {
|
||||
if (tracks[id].enabled) {
|
||||
return tracks[id];
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
},
|
||||
/**
|
||||
* Returns a function used to get the active track of type provided
|
||||
*
|
||||
* @param {string} type
|
||||
* MediaGroup type
|
||||
* @param {Object} settings
|
||||
* Object containing required information for media groups
|
||||
* @return {Function}
|
||||
* Function that returns the active media track for the provided type. Returns
|
||||
* null if no track is active
|
||||
* @function activeTrack.SUBTITLES
|
||||
*/
|
||||
SUBTITLES: (type, settings) => () => {
|
||||
const { mediaTypes: { [type]: { tracks } } } = settings;
|
||||
|
||||
for (const id in tracks) {
|
||||
if (tracks[id].mode === 'showing' || tracks[id].mode === 'hidden') {
|
||||
return tracks[id];
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Setup PlaylistLoaders and Tracks for media groups (Audio, Subtitles,
|
||||
* Closed-Captions) specified in the master manifest.
|
||||
*
|
||||
* @param {Object} settings
|
||||
* Object containing required information for setting up the media groups
|
||||
* @param {Tech} settings.tech
|
||||
* The tech of the player
|
||||
* @param {Object} settings.requestOptions
|
||||
* XHR request options used by the segment loaders
|
||||
* @param {PlaylistLoader} settings.masterPlaylistLoader
|
||||
* PlaylistLoader for the master source
|
||||
* @param {VhsHandler} settings.vhs
|
||||
* VHS SourceHandler
|
||||
* @param {Object} settings.master
|
||||
* The parsed master manifest
|
||||
* @param {Object} settings.mediaTypes
|
||||
* Object to store the loaders, tracks, and utility methods for each media type
|
||||
* @param {Function} settings.blacklistCurrentPlaylist
|
||||
* Blacklists the current rendition and forces a rendition switch.
|
||||
* @function setupMediaGroups
|
||||
*/
|
||||
export const setupMediaGroups = (settings) => {
|
||||
['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach((type) => {
|
||||
initialize[type](type, settings);
|
||||
});
|
||||
|
||||
const {
|
||||
mediaTypes,
|
||||
masterPlaylistLoader,
|
||||
tech,
|
||||
vhs
|
||||
} = settings;
|
||||
|
||||
// setup active group and track getters and change event handlers
|
||||
['AUDIO', 'SUBTITLES'].forEach((type) => {
|
||||
mediaTypes[type].activeGroup = activeGroup(type, settings);
|
||||
mediaTypes[type].activeTrack = activeTrack[type](type, settings);
|
||||
mediaTypes[type].onGroupChanged = onGroupChanged(type, settings);
|
||||
mediaTypes[type].onGroupChanging = onGroupChanging(type, settings);
|
||||
mediaTypes[type].onTrackChanged = onTrackChanged(type, settings);
|
||||
});
|
||||
|
||||
// DO NOT enable the default subtitle or caption track.
|
||||
// DO enable the default audio track
|
||||
const audioGroup = mediaTypes.AUDIO.activeGroup();
|
||||
|
||||
if (audioGroup) {
|
||||
const groupId = (audioGroup.filter(group => group.default)[0] || audioGroup[0]).id;
|
||||
|
||||
mediaTypes.AUDIO.tracks[groupId].enabled = true;
|
||||
mediaTypes.AUDIO.onTrackChanged();
|
||||
}
|
||||
|
||||
masterPlaylistLoader.on('mediachange', () => {
|
||||
['AUDIO', 'SUBTITLES'].forEach(type => mediaTypes[type].onGroupChanged());
|
||||
});
|
||||
|
||||
masterPlaylistLoader.on('mediachanging', () => {
|
||||
['AUDIO', 'SUBTITLES'].forEach(type => mediaTypes[type].onGroupChanging());
|
||||
});
|
||||
|
||||
// custom audio track change event handler for usage event
|
||||
const onAudioTrackChanged = () => {
|
||||
mediaTypes.AUDIO.onTrackChanged();
|
||||
tech.trigger({ type: 'usage', name: 'vhs-audio-change' });
|
||||
tech.trigger({ type: 'usage', name: 'hls-audio-change' });
|
||||
};
|
||||
|
||||
tech.audioTracks().addEventListener('change', onAudioTrackChanged);
|
||||
tech.remoteTextTracks().addEventListener(
|
||||
'change',
|
||||
mediaTypes.SUBTITLES.onTrackChanged
|
||||
);
|
||||
|
||||
vhs.on('dispose', () => {
|
||||
tech.audioTracks().removeEventListener('change', onAudioTrackChanged);
|
||||
tech.remoteTextTracks().removeEventListener(
|
||||
'change',
|
||||
mediaTypes.SUBTITLES.onTrackChanged
|
||||
);
|
||||
});
|
||||
|
||||
// clear existing audio tracks and add the ones we just created
|
||||
tech.clearTracks('audio');
|
||||
|
||||
for (const id in mediaTypes.AUDIO.tracks) {
|
||||
tech.audioTracks().addTrack(mediaTypes.AUDIO.tracks[id]);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates skeleton object used to store the loaders, tracks, and utility methods for each
|
||||
* media type
|
||||
*
|
||||
* @return {Object}
|
||||
* Object to store the loaders, tracks, and utility methods for each media type
|
||||
* @function createMediaTypes
|
||||
*/
|
||||
export const createMediaTypes = () => {
|
||||
const mediaTypes = {};
|
||||
|
||||
['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach((type) => {
|
||||
mediaTypes[type] = {
|
||||
groups: {},
|
||||
tracks: {},
|
||||
activePlaylistLoader: null,
|
||||
activeGroup: noop,
|
||||
activeTrack: noop,
|
||||
onGroupChanged: noop,
|
||||
onTrackChanged: noop
|
||||
};
|
||||
});
|
||||
|
||||
return mediaTypes;
|
||||
};
|
||||
932
build/javascript/node_modules/@videojs/http-streaming/src/media-segment-request.js
generated
vendored
Normal file
932
build/javascript/node_modules/@videojs/http-streaming/src/media-segment-request.js
generated
vendored
Normal file
@@ -0,0 +1,932 @@
|
||||
import videojs from 'video.js';
|
||||
import { createTransferableMessage } from './bin-utils';
|
||||
import { stringToArrayBuffer } from './util/string-to-array-buffer';
|
||||
import { transmux } from './segment-transmuxer';
|
||||
import { probeTsSegment } from './util/segment';
|
||||
import mp4probe from 'mux.js/lib/mp4/probe';
|
||||
import { segmentXhrHeaders } from './xhr';
|
||||
import {
|
||||
detectContainerForBytes,
|
||||
isLikelyFmp4MediaSegment
|
||||
} from '@videojs/vhs-utils/dist/containers';
|
||||
|
||||
export const REQUEST_ERRORS = {
|
||||
FAILURE: 2,
|
||||
TIMEOUT: -101,
|
||||
ABORTED: -102
|
||||
};
|
||||
|
||||
/**
|
||||
* Abort all requests
|
||||
*
|
||||
* @param {Object} activeXhrs - an object that tracks all XHR requests
|
||||
*/
|
||||
const abortAll = (activeXhrs) => {
|
||||
activeXhrs.forEach((xhr) => {
|
||||
xhr.abort();
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Gather important bandwidth stats once a request has completed
|
||||
*
|
||||
* @param {Object} request - the XHR request from which to gather stats
|
||||
*/
|
||||
const getRequestStats = (request) => {
|
||||
return {
|
||||
bandwidth: request.bandwidth,
|
||||
bytesReceived: request.bytesReceived || 0,
|
||||
roundTripTime: request.roundTripTime || 0
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* If possible gather bandwidth stats as a request is in
|
||||
* progress
|
||||
*
|
||||
* @param {Event} progressEvent - an event object from an XHR's progress event
|
||||
*/
|
||||
const getProgressStats = (progressEvent) => {
|
||||
const request = progressEvent.target;
|
||||
const roundTripTime = Date.now() - request.requestTime;
|
||||
const stats = {
|
||||
bandwidth: Infinity,
|
||||
bytesReceived: 0,
|
||||
roundTripTime: roundTripTime || 0
|
||||
};
|
||||
|
||||
stats.bytesReceived = progressEvent.loaded;
|
||||
// This can result in Infinity if stats.roundTripTime is 0 but that is ok
|
||||
// because we should only use bandwidth stats on progress to determine when
|
||||
// abort a request early due to insufficient bandwidth
|
||||
stats.bandwidth = Math.floor((stats.bytesReceived / stats.roundTripTime) * 8 * 1000);
|
||||
|
||||
return stats;
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle all error conditions in one place and return an object
|
||||
* with all the information
|
||||
*
|
||||
* @param {Error|null} error - if non-null signals an error occured with the XHR
|
||||
* @param {Object} request - the XHR request that possibly generated the error
|
||||
*/
|
||||
const handleErrors = (error, request) => {
|
||||
if (request.timedout) {
|
||||
return {
|
||||
status: request.status,
|
||||
message: 'HLS request timed-out at URL: ' + request.uri,
|
||||
code: REQUEST_ERRORS.TIMEOUT,
|
||||
xhr: request
|
||||
};
|
||||
}
|
||||
|
||||
if (request.aborted) {
|
||||
return {
|
||||
status: request.status,
|
||||
message: 'HLS request aborted at URL: ' + request.uri,
|
||||
code: REQUEST_ERRORS.ABORTED,
|
||||
xhr: request
|
||||
};
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return {
|
||||
status: request.status,
|
||||
message: 'HLS request errored at URL: ' + request.uri,
|
||||
code: REQUEST_ERRORS.FAILURE,
|
||||
xhr: request
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle responses for key data and convert the key data to the correct format
|
||||
* for the decryption step later
|
||||
*
|
||||
* @param {Object} segment - a simplified copy of the segmentInfo object
|
||||
* from SegmentLoader
|
||||
* @param {Function} finishProcessingFn - a callback to execute to continue processing
|
||||
* this request
|
||||
*/
|
||||
const handleKeyResponse = (segment, finishProcessingFn) => (error, request) => {
|
||||
const response = request.response;
|
||||
const errorObj = handleErrors(error, request);
|
||||
|
||||
if (errorObj) {
|
||||
return finishProcessingFn(errorObj, segment);
|
||||
}
|
||||
|
||||
if (response.byteLength !== 16) {
|
||||
return finishProcessingFn({
|
||||
status: request.status,
|
||||
message: 'Invalid HLS key at URL: ' + request.uri,
|
||||
code: REQUEST_ERRORS.FAILURE,
|
||||
xhr: request
|
||||
}, segment);
|
||||
}
|
||||
|
||||
const view = new DataView(response);
|
||||
|
||||
segment.key.bytes = new Uint32Array([
|
||||
view.getUint32(0),
|
||||
view.getUint32(4),
|
||||
view.getUint32(8),
|
||||
view.getUint32(12)
|
||||
]);
|
||||
return finishProcessingFn(null, segment);
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle init-segment responses
|
||||
*
|
||||
* @param {Object} segment - a simplified copy of the segmentInfo object
|
||||
* from SegmentLoader
|
||||
* @param {Function} finishProcessingFn - a callback to execute to continue processing
|
||||
* this request
|
||||
*/
|
||||
const handleInitSegmentResponse =
|
||||
({segment, finishProcessingFn}) => (error, request) => {
|
||||
const response = request.response;
|
||||
const errorObj = handleErrors(error, request);
|
||||
|
||||
if (errorObj) {
|
||||
return finishProcessingFn(errorObj, segment);
|
||||
}
|
||||
|
||||
// stop processing if received empty content
|
||||
if (response.byteLength === 0) {
|
||||
return finishProcessingFn({
|
||||
status: request.status,
|
||||
message: 'Empty HLS segment content at URL: ' + request.uri,
|
||||
code: REQUEST_ERRORS.FAILURE,
|
||||
xhr: request
|
||||
}, segment);
|
||||
}
|
||||
|
||||
segment.map.bytes = new Uint8Array(request.response);
|
||||
|
||||
const type = detectContainerForBytes(segment.map.bytes);
|
||||
|
||||
// TODO: We should also handle ts init segments here, but we
|
||||
// only know how to parse mp4 init segments at the moment
|
||||
if (type !== 'mp4') {
|
||||
return finishProcessingFn({
|
||||
status: request.status,
|
||||
message: `Found unsupported ${type || 'unknown'} container for initialization segment at URL: ${request.uri}`,
|
||||
code: REQUEST_ERRORS.FAILURE,
|
||||
internal: true,
|
||||
xhr: request
|
||||
}, segment);
|
||||
}
|
||||
|
||||
const tracks = mp4probe.tracks(segment.map.bytes);
|
||||
|
||||
tracks.forEach(function(track) {
|
||||
segment.map.tracks = segment.map.tracks || {};
|
||||
|
||||
// only support one track of each type for now
|
||||
if (segment.map.tracks[track.type]) {
|
||||
return;
|
||||
}
|
||||
|
||||
segment.map.tracks[track.type] = track;
|
||||
|
||||
if (track.id && track.timescale) {
|
||||
segment.map.timescales = segment.map.timescales || {};
|
||||
segment.map.timescales[track.id] = track.timescale;
|
||||
}
|
||||
});
|
||||
|
||||
return finishProcessingFn(null, segment);
|
||||
};
|
||||
|
||||
/**
|
||||
* Response handler for segment-requests being sure to set the correct
|
||||
* property depending on whether the segment is encryped or not
|
||||
* Also records and keeps track of stats that are used for ABR purposes
|
||||
*
|
||||
* @param {Object} segment - a simplified copy of the segmentInfo object
|
||||
* from SegmentLoader
|
||||
* @param {Function} finishProcessingFn - a callback to execute to continue processing
|
||||
* this request
|
||||
*/
|
||||
const handleSegmentResponse = ({
|
||||
segment,
|
||||
finishProcessingFn,
|
||||
responseType
|
||||
}) => (error, request) => {
|
||||
const response = request.response;
|
||||
const errorObj = handleErrors(error, request);
|
||||
|
||||
if (errorObj) {
|
||||
return finishProcessingFn(errorObj, segment);
|
||||
}
|
||||
|
||||
const newBytes =
|
||||
// although responseText "should" exist, this guard serves to prevent an error being
|
||||
// thrown for two primary cases:
|
||||
// 1. the mime type override stops working, or is not implemented for a specific
|
||||
// browser
|
||||
// 2. when using mock XHR libraries like sinon that do not allow the override behavior
|
||||
(responseType === 'arraybuffer' || !request.responseText) ?
|
||||
request.response :
|
||||
stringToArrayBuffer(request.responseText.substring(segment.lastReachedChar || 0));
|
||||
|
||||
// stop processing if received empty content
|
||||
if (response.byteLength === 0) {
|
||||
return finishProcessingFn({
|
||||
status: request.status,
|
||||
message: 'Empty HLS segment content at URL: ' + request.uri,
|
||||
code: REQUEST_ERRORS.FAILURE,
|
||||
xhr: request
|
||||
}, segment);
|
||||
}
|
||||
|
||||
segment.stats = getRequestStats(request);
|
||||
|
||||
if (segment.key) {
|
||||
segment.encryptedBytes = new Uint8Array(newBytes);
|
||||
} else {
|
||||
segment.bytes = new Uint8Array(newBytes);
|
||||
}
|
||||
|
||||
return finishProcessingFn(null, segment);
|
||||
};
|
||||
|
||||
const transmuxAndNotify = ({
|
||||
segment,
|
||||
bytes,
|
||||
isPartial,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
}) => {
|
||||
const fmp4Tracks = segment.map && segment.map.tracks || {};
|
||||
const isMuxed = Boolean(fmp4Tracks.audio && fmp4Tracks.video);
|
||||
|
||||
// Keep references to each function so we can null them out after we're done with them.
|
||||
// One reason for this is that in the case of full segments, we want to trust start
|
||||
// times from the probe, rather than the transmuxer.
|
||||
let audioStartFn = timingInfoFn.bind(null, segment, 'audio', 'start');
|
||||
const audioEndFn = timingInfoFn.bind(null, segment, 'audio', 'end');
|
||||
let videoStartFn = timingInfoFn.bind(null, segment, 'video', 'start');
|
||||
const videoEndFn = timingInfoFn.bind(null, segment, 'video', 'end');
|
||||
|
||||
// Check to see if we are appending a full segment.
|
||||
if (!isPartial && !segment.lastReachedChar) {
|
||||
// In the full segment transmuxer, we don't yet have the ability to extract a "proper"
|
||||
// start time. Meaning cached frame data may corrupt our notion of where this segment
|
||||
// really starts. To get around this, full segment appends should probe for the info
|
||||
// needed.
|
||||
const probeResult = probeTsSegment(bytes, segment.baseStartTime);
|
||||
|
||||
if (probeResult) {
|
||||
trackInfoFn(segment, {
|
||||
hasAudio: probeResult.hasAudio,
|
||||
hasVideo: probeResult.hasVideo,
|
||||
isMuxed
|
||||
});
|
||||
trackInfoFn = null;
|
||||
|
||||
if (probeResult.hasAudio && !isMuxed) {
|
||||
audioStartFn(probeResult.audioStart);
|
||||
}
|
||||
if (probeResult.hasVideo) {
|
||||
videoStartFn(probeResult.videoStart);
|
||||
}
|
||||
audioStartFn = null;
|
||||
videoStartFn = null;
|
||||
}
|
||||
}
|
||||
|
||||
transmux({
|
||||
bytes,
|
||||
transmuxer: segment.transmuxer,
|
||||
audioAppendStart: segment.audioAppendStart,
|
||||
gopsToAlignWith: segment.gopsToAlignWith,
|
||||
isPartial,
|
||||
remux: isMuxed,
|
||||
onData: (result) => {
|
||||
result.type = result.type === 'combined' ? 'video' : result.type;
|
||||
dataFn(segment, result);
|
||||
},
|
||||
onTrackInfo: (trackInfo) => {
|
||||
if (trackInfoFn) {
|
||||
if (isMuxed) {
|
||||
trackInfo.isMuxed = true;
|
||||
}
|
||||
trackInfoFn(segment, trackInfo);
|
||||
}
|
||||
},
|
||||
onAudioTimingInfo: (audioTimingInfo) => {
|
||||
// we only want the first start value we encounter
|
||||
if (audioStartFn && typeof audioTimingInfo.start !== 'undefined') {
|
||||
audioStartFn(audioTimingInfo.start);
|
||||
audioStartFn = null;
|
||||
}
|
||||
// we want to continually update the end time
|
||||
if (audioEndFn && typeof audioTimingInfo.end !== 'undefined') {
|
||||
audioEndFn(audioTimingInfo.end);
|
||||
}
|
||||
},
|
||||
onVideoTimingInfo: (videoTimingInfo) => {
|
||||
// we only want the first start value we encounter
|
||||
if (videoStartFn && typeof videoTimingInfo.start !== 'undefined') {
|
||||
videoStartFn(videoTimingInfo.start);
|
||||
videoStartFn = null;
|
||||
}
|
||||
// we want to continually update the end time
|
||||
if (videoEndFn && typeof videoTimingInfo.end !== 'undefined') {
|
||||
videoEndFn(videoTimingInfo.end);
|
||||
}
|
||||
},
|
||||
onVideoSegmentTimingInfo: (videoSegmentTimingInfo) => {
|
||||
videoSegmentTimingInfoFn(videoSegmentTimingInfo);
|
||||
},
|
||||
onId3: (id3Frames, dispatchType) => {
|
||||
id3Fn(segment, id3Frames, dispatchType);
|
||||
},
|
||||
onCaptions: (captions) => {
|
||||
captionsFn(segment, [captions]);
|
||||
},
|
||||
onDone: (result) => {
|
||||
// To handle partial appends, there won't be a done function passed in (since
|
||||
// there's still, potentially, more segment to process), so there's nothing to do.
|
||||
if (!doneFn || isPartial) {
|
||||
return;
|
||||
}
|
||||
result.type = result.type === 'combined' ? 'video' : result.type;
|
||||
doneFn(null, segment, result);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const handleSegmentBytes = ({
|
||||
segment,
|
||||
bytes,
|
||||
isPartial,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
}) => {
|
||||
const bytesAsUint8Array = new Uint8Array(bytes);
|
||||
|
||||
// TODO:
|
||||
// We should have a handler that fetches the number of bytes required
|
||||
// to check if something is fmp4. This will allow us to save bandwidth
|
||||
// because we can only blacklist a playlist and abort requests
|
||||
// by codec after trackinfo triggers.
|
||||
if (isLikelyFmp4MediaSegment(bytesAsUint8Array)) {
|
||||
segment.isFmp4 = true;
|
||||
const {tracks} = segment.map;
|
||||
|
||||
const trackInfo = {
|
||||
isFmp4: true,
|
||||
hasVideo: !!tracks.video,
|
||||
hasAudio: !!tracks.audio
|
||||
};
|
||||
|
||||
// if we have a audio track, with a codec that is not set to
|
||||
// encrypted audio
|
||||
if (tracks.audio && tracks.audio.codec && tracks.audio.codec !== 'enca') {
|
||||
trackInfo.audioCodec = tracks.audio.codec;
|
||||
}
|
||||
|
||||
// if we have a video track, with a codec that is not set to
|
||||
// encrypted video
|
||||
if (tracks.video && tracks.video.codec && tracks.video.codec !== 'encv') {
|
||||
trackInfo.videoCodec = tracks.video.codec;
|
||||
}
|
||||
|
||||
if (tracks.video && tracks.audio) {
|
||||
trackInfo.isMuxed = true;
|
||||
}
|
||||
|
||||
// since we don't support appending fmp4 data on progress, we know we have the full
|
||||
// segment here
|
||||
trackInfoFn(segment, trackInfo);
|
||||
// The probe doesn't provide the segment end time, so only callback with the start
|
||||
// time. The end time can be roughly calculated by the receiver using the duration.
|
||||
//
|
||||
// Note that the start time returned by the probe reflects the baseMediaDecodeTime, as
|
||||
// that is the true start of the segment (where the playback engine should begin
|
||||
// decoding).
|
||||
const timingInfo = mp4probe.startTime(segment.map.timescales, bytesAsUint8Array);
|
||||
|
||||
if (trackInfo.hasAudio && !trackInfo.isMuxed) {
|
||||
timingInfoFn(segment, 'audio', 'start', timingInfo);
|
||||
}
|
||||
|
||||
if (trackInfo.hasVideo) {
|
||||
timingInfoFn(segment, 'video', 'start', timingInfo);
|
||||
}
|
||||
|
||||
const finishLoading = (captions) => {
|
||||
// if the track still has audio at this point it is only possible
|
||||
// for it to be audio only. See `tracks.video && tracks.audio` if statement
|
||||
// above.
|
||||
// we make sure to use segment.bytes here as that
|
||||
dataFn(segment, {data: bytes, type: trackInfo.hasAudio && !trackInfo.isMuxed ? 'audio' : 'video'});
|
||||
if (captions && captions.length) {
|
||||
captionsFn(segment, captions);
|
||||
}
|
||||
doneFn(null, segment, {});
|
||||
};
|
||||
|
||||
// Run through the CaptionParser in case there are captions.
|
||||
// Initialize CaptionParser if it hasn't been yet
|
||||
if (!tracks.video || !bytes.byteLength || !segment.transmuxer) {
|
||||
finishLoading();
|
||||
return;
|
||||
}
|
||||
|
||||
const buffer = bytes instanceof ArrayBuffer ? bytes : bytes.buffer;
|
||||
const byteOffset = bytes instanceof ArrayBuffer ? 0 : bytes.byteOffset;
|
||||
const listenForCaptions = (event) => {
|
||||
if (event.data.action !== 'mp4Captions') {
|
||||
return;
|
||||
}
|
||||
segment.transmuxer.removeEventListener('message', listenForCaptions);
|
||||
|
||||
const data = event.data.data;
|
||||
|
||||
// transfer ownership of bytes back to us.
|
||||
segment.bytes = bytes = new Uint8Array(data, data.byteOffset || 0, data.byteLength);
|
||||
|
||||
finishLoading(event.data.captions);
|
||||
};
|
||||
|
||||
segment.transmuxer.addEventListener('message', listenForCaptions);
|
||||
|
||||
// transfer ownership of bytes to worker.
|
||||
segment.transmuxer.postMessage({
|
||||
action: 'pushMp4Captions',
|
||||
timescales: segment.map.timescales,
|
||||
trackIds: [tracks.video.id],
|
||||
data: buffer,
|
||||
byteOffset,
|
||||
byteLength: bytes.byteLength
|
||||
}, [ buffer ]);
|
||||
return;
|
||||
}
|
||||
|
||||
// VTT or other segments that don't need processing
|
||||
if (!segment.transmuxer) {
|
||||
doneFn(null, segment, {});
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof segment.container === 'undefined') {
|
||||
segment.container = detectContainerForBytes(bytesAsUint8Array);
|
||||
}
|
||||
|
||||
if (segment.container !== 'ts' && segment.container !== 'aac') {
|
||||
trackInfoFn(segment, {hasAudio: false, hasVideo: false});
|
||||
doneFn(null, segment, {});
|
||||
return;
|
||||
}
|
||||
|
||||
// ts or aac
|
||||
transmuxAndNotify({
|
||||
segment,
|
||||
bytes,
|
||||
isPartial,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Decrypt the segment via the decryption web worker
|
||||
*
|
||||
* @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128 decryption
|
||||
* routines
|
||||
* @param {Object} segment - a simplified copy of the segmentInfo object
|
||||
* from SegmentLoader
|
||||
* @param {Function} trackInfoFn - a callback that receives track info
|
||||
* @param {Function} dataFn - a callback that is executed when segment bytes are available
|
||||
* and ready to use
|
||||
* @param {Function} doneFn - a callback that is executed after decryption has completed
|
||||
*/
|
||||
const decryptSegment = ({
|
||||
decryptionWorker,
|
||||
segment,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
}) => {
|
||||
const decryptionHandler = (event) => {
|
||||
if (event.data.source === segment.requestId) {
|
||||
decryptionWorker.removeEventListener('message', decryptionHandler);
|
||||
const decrypted = event.data.decrypted;
|
||||
|
||||
segment.bytes = new Uint8Array(
|
||||
decrypted.bytes,
|
||||
decrypted.byteOffset,
|
||||
decrypted.byteLength
|
||||
);
|
||||
|
||||
handleSegmentBytes({
|
||||
segment,
|
||||
bytes: segment.bytes,
|
||||
isPartial: false,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
decryptionWorker.addEventListener('message', decryptionHandler);
|
||||
|
||||
let keyBytes;
|
||||
|
||||
if (segment.key.bytes.slice) {
|
||||
keyBytes = segment.key.bytes.slice();
|
||||
} else {
|
||||
keyBytes = new Uint32Array(Array.prototype.slice.call(segment.key.bytes));
|
||||
}
|
||||
|
||||
// this is an encrypted segment
|
||||
// incrementally decrypt the segment
|
||||
decryptionWorker.postMessage(createTransferableMessage({
|
||||
source: segment.requestId,
|
||||
encrypted: segment.encryptedBytes,
|
||||
key: keyBytes,
|
||||
iv: segment.key.iv
|
||||
}), [
|
||||
segment.encryptedBytes.buffer,
|
||||
keyBytes.buffer
|
||||
]);
|
||||
};
|
||||
|
||||
/**
|
||||
* This function waits for all XHRs to finish (with either success or failure)
|
||||
* before continueing processing via it's callback. The function gathers errors
|
||||
* from each request into a single errors array so that the error status for
|
||||
* each request can be examined later.
|
||||
*
|
||||
* @param {Object} activeXhrs - an object that tracks all XHR requests
|
||||
* @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128 decryption
|
||||
* routines
|
||||
* @param {Function} trackInfoFn - a callback that receives track info
|
||||
* @param {Function} timingInfoFn - a callback that receives timing info
|
||||
* @param {Function} id3Fn - a callback that receives ID3 metadata
|
||||
* @param {Function} captionsFn - a callback that receives captions
|
||||
* @param {Function} dataFn - a callback that is executed when segment bytes are available
|
||||
* and ready to use
|
||||
* @param {Function} doneFn - a callback that is executed after all resources have been
|
||||
* downloaded and any decryption completed
|
||||
*/
|
||||
const waitForCompletion = ({
|
||||
activeXhrs,
|
||||
decryptionWorker,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
}) => {
|
||||
let count = 0;
|
||||
let didError = false;
|
||||
|
||||
return (error, segment) => {
|
||||
if (didError) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
didError = true;
|
||||
// If there are errors, we have to abort any outstanding requests
|
||||
abortAll(activeXhrs);
|
||||
|
||||
// Even though the requests above are aborted, and in theory we could wait until we
|
||||
// handle the aborted events from those requests, there are some cases where we may
|
||||
// never get an aborted event. For instance, if the network connection is lost and
|
||||
// there were two requests, the first may have triggered an error immediately, while
|
||||
// the second request remains unsent. In that case, the aborted algorithm will not
|
||||
// trigger an abort: see https://xhr.spec.whatwg.org/#the-abort()-method
|
||||
//
|
||||
// We also can't rely on the ready state of the XHR, since the request that
|
||||
// triggered the connection error may also show as a ready state of 0 (unsent).
|
||||
// Therefore, we have to finish this group of requests immediately after the first
|
||||
// seen error.
|
||||
return doneFn(error, segment);
|
||||
}
|
||||
|
||||
count += 1;
|
||||
|
||||
if (count === activeXhrs.length) {
|
||||
// Keep track of when *all* of the requests have completed
|
||||
segment.endOfAllRequests = Date.now();
|
||||
|
||||
if (segment.encryptedBytes) {
|
||||
return decryptSegment({
|
||||
decryptionWorker,
|
||||
segment,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
});
|
||||
}
|
||||
// Otherwise, everything is ready just continue
|
||||
handleSegmentBytes({
|
||||
segment,
|
||||
bytes: segment.bytes,
|
||||
isPartial: false,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
});
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Calls the abort callback if any request within the batch was aborted. Will only call
|
||||
* the callback once per batch of requests, even if multiple were aborted.
|
||||
*
|
||||
* @param {Object} loadendState - state to check to see if the abort function was called
|
||||
* @param {Function} abortFn - callback to call for abort
|
||||
*/
|
||||
const handleLoadEnd = ({ loadendState, abortFn }) => (event) => {
|
||||
const request = event.target;
|
||||
|
||||
if (request.aborted && abortFn && !loadendState.calledAbortFn) {
|
||||
abortFn();
|
||||
loadendState.calledAbortFn = true;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Simple progress event callback handler that gathers some stats before
|
||||
* executing a provided callback with the `segment` object
|
||||
*
|
||||
* @param {Object} segment - a simplified copy of the segmentInfo object
|
||||
* from SegmentLoader
|
||||
* @param {Function} progressFn - a callback that is executed each time a progress event
|
||||
* is received
|
||||
* @param {Function} trackInfoFn - a callback that receives track info
|
||||
* @param {Function} dataFn - a callback that is executed when segment bytes are available
|
||||
* and ready to use
|
||||
* @param {Event} event - the progress event object from XMLHttpRequest
|
||||
*/
|
||||
const handleProgress = ({
|
||||
segment,
|
||||
progressFn,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
handlePartialData
|
||||
}) => (event) => {
|
||||
const request = event.target;
|
||||
|
||||
if (request.aborted) {
|
||||
return;
|
||||
}
|
||||
|
||||
// don't support encrypted segments or fmp4 for now
|
||||
if (
|
||||
handlePartialData &&
|
||||
!segment.key &&
|
||||
// although responseText "should" exist, this guard serves to prevent an error being
|
||||
// thrown on the next check for two primary cases:
|
||||
// 1. the mime type override stops working, or is not implemented for a specific
|
||||
// browser
|
||||
// 2. when using mock XHR libraries like sinon that do not allow the override behavior
|
||||
request.responseText &&
|
||||
// in order to determine if it's an fmp4 we need at least 8 bytes
|
||||
request.responseText.length >= 8
|
||||
) {
|
||||
const newBytes = stringToArrayBuffer(request.responseText.substring(segment.lastReachedChar || 0));
|
||||
|
||||
if (segment.lastReachedChar || !isLikelyFmp4MediaSegment(new Uint8Array(newBytes))) {
|
||||
segment.lastReachedChar = request.responseText.length;
|
||||
|
||||
handleSegmentBytes({
|
||||
segment,
|
||||
bytes: newBytes,
|
||||
isPartial: true,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
segment.stats = videojs.mergeOptions(segment.stats, getProgressStats(event));
|
||||
|
||||
// record the time that we receive the first byte of data
|
||||
if (!segment.stats.firstBytesReceivedAt && segment.stats.bytesReceived) {
|
||||
segment.stats.firstBytesReceivedAt = Date.now();
|
||||
}
|
||||
|
||||
return progressFn(event, segment);
|
||||
};
|
||||
|
||||
/**
|
||||
* Load all resources and does any processing necessary for a media-segment
|
||||
*
|
||||
* Features:
|
||||
* decrypts the media-segment if it has a key uri and an iv
|
||||
* aborts *all* requests if *any* one request fails
|
||||
*
|
||||
* The segment object, at minimum, has the following format:
|
||||
* {
|
||||
* resolvedUri: String,
|
||||
* [transmuxer]: Object,
|
||||
* [byterange]: {
|
||||
* offset: Number,
|
||||
* length: Number
|
||||
* },
|
||||
* [key]: {
|
||||
* resolvedUri: String
|
||||
* [byterange]: {
|
||||
* offset: Number,
|
||||
* length: Number
|
||||
* },
|
||||
* iv: {
|
||||
* bytes: Uint32Array
|
||||
* }
|
||||
* },
|
||||
* [map]: {
|
||||
* resolvedUri: String,
|
||||
* [byterange]: {
|
||||
* offset: Number,
|
||||
* length: Number
|
||||
* },
|
||||
* [bytes]: Uint8Array
|
||||
* }
|
||||
* }
|
||||
* ...where [name] denotes optional properties
|
||||
*
|
||||
* @param {Function} xhr - an instance of the xhr wrapper in xhr.js
|
||||
* @param {Object} xhrOptions - the base options to provide to all xhr requests
|
||||
* @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128
|
||||
* decryption routines
|
||||
* @param {Object} segment - a simplified copy of the segmentInfo object
|
||||
* from SegmentLoader
|
||||
* @param {Function} abortFn - a callback called (only once) if any piece of a request was
|
||||
* aborted
|
||||
* @param {Function} progressFn - a callback that receives progress events from the main
|
||||
* segment's xhr request
|
||||
* @param {Function} trackInfoFn - a callback that receives track info
|
||||
* @param {Function} id3Fn - a callback that receives ID3 metadata
|
||||
* @param {Function} captionsFn - a callback that receives captions
|
||||
* @param {Function} dataFn - a callback that receives data from the main segment's xhr
|
||||
* request, transmuxed if needed
|
||||
* @param {Function} doneFn - a callback that is executed only once all requests have
|
||||
* succeeded or failed
|
||||
* @return {Function} a function that, when invoked, immediately aborts all
|
||||
* outstanding requests
|
||||
*/
|
||||
export const mediaSegmentRequest = ({
|
||||
xhr,
|
||||
xhrOptions,
|
||||
decryptionWorker,
|
||||
segment,
|
||||
abortFn,
|
||||
progressFn,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn,
|
||||
handlePartialData
|
||||
}) => {
|
||||
const activeXhrs = [];
|
||||
const finishProcessingFn = waitForCompletion({
|
||||
activeXhrs,
|
||||
decryptionWorker,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
doneFn
|
||||
});
|
||||
|
||||
// optionally, request the decryption key
|
||||
if (segment.key && !segment.key.bytes) {
|
||||
const keyRequestOptions = videojs.mergeOptions(xhrOptions, {
|
||||
uri: segment.key.resolvedUri,
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
const keyRequestCallback = handleKeyResponse(segment, finishProcessingFn);
|
||||
const keyXhr = xhr(keyRequestOptions, keyRequestCallback);
|
||||
|
||||
activeXhrs.push(keyXhr);
|
||||
}
|
||||
|
||||
// optionally, request the associated media init segment
|
||||
if (segment.map && !segment.map.bytes) {
|
||||
const initSegmentOptions = videojs.mergeOptions(xhrOptions, {
|
||||
uri: segment.map.resolvedUri,
|
||||
responseType: 'arraybuffer',
|
||||
headers: segmentXhrHeaders(segment.map)
|
||||
});
|
||||
const initSegmentRequestCallback = handleInitSegmentResponse({
|
||||
segment,
|
||||
finishProcessingFn
|
||||
});
|
||||
const initSegmentXhr = xhr(initSegmentOptions, initSegmentRequestCallback);
|
||||
|
||||
activeXhrs.push(initSegmentXhr);
|
||||
}
|
||||
|
||||
const segmentRequestOptions = videojs.mergeOptions(xhrOptions, {
|
||||
uri: segment.resolvedUri,
|
||||
responseType: 'arraybuffer',
|
||||
headers: segmentXhrHeaders(segment)
|
||||
});
|
||||
|
||||
if (handlePartialData) {
|
||||
// setting to text is required for partial responses
|
||||
// conversion to ArrayBuffer happens later
|
||||
segmentRequestOptions.responseType = 'text';
|
||||
segmentRequestOptions.beforeSend = (xhrObject) => {
|
||||
// XHR binary charset opt by Marcus Granado 2006 [http://mgran.blogspot.com]
|
||||
// makes the browser pass through the "text" unparsed
|
||||
xhrObject.overrideMimeType('text/plain; charset=x-user-defined');
|
||||
};
|
||||
}
|
||||
|
||||
const segmentRequestCallback = handleSegmentResponse({
|
||||
segment,
|
||||
finishProcessingFn,
|
||||
responseType: segmentRequestOptions.responseType
|
||||
});
|
||||
const segmentXhr = xhr(segmentRequestOptions, segmentRequestCallback);
|
||||
|
||||
segmentXhr.addEventListener(
|
||||
'progress',
|
||||
handleProgress({
|
||||
segment,
|
||||
progressFn,
|
||||
trackInfoFn,
|
||||
timingInfoFn,
|
||||
videoSegmentTimingInfoFn,
|
||||
id3Fn,
|
||||
captionsFn,
|
||||
dataFn,
|
||||
handlePartialData
|
||||
})
|
||||
);
|
||||
activeXhrs.push(segmentXhr);
|
||||
|
||||
// since all parts of the request must be considered, but should not make callbacks
|
||||
// multiple times, provide a shared state object
|
||||
const loadendState = {};
|
||||
|
||||
activeXhrs.forEach((activeXhr) => {
|
||||
activeXhr.addEventListener(
|
||||
'loadend',
|
||||
handleLoadEnd({ loadendState, abortFn })
|
||||
);
|
||||
});
|
||||
|
||||
return () => abortAll(activeXhrs);
|
||||
};
|
||||
634
build/javascript/node_modules/@videojs/http-streaming/src/playback-watcher.js
generated
vendored
Normal file
634
build/javascript/node_modules/@videojs/http-streaming/src/playback-watcher.js
generated
vendored
Normal file
@@ -0,0 +1,634 @@
|
||||
/**
|
||||
* @file playback-watcher.js
|
||||
*
|
||||
* Playback starts, and now my watch begins. It shall not end until my death. I shall
|
||||
* take no wait, hold no uncleared timeouts, father no bad seeks. I shall wear no crowns
|
||||
* and win no glory. I shall live and die at my post. I am the corrector of the underflow.
|
||||
* I am the watcher of gaps. I am the shield that guards the realms of seekable. I pledge
|
||||
* my life and honor to the Playback Watch, for this Player and all the Players to come.
|
||||
*/
|
||||
|
||||
import window from 'global/window';
|
||||
import * as Ranges from './ranges';
|
||||
import logger from './util/logger';
|
||||
import videojs from 'video.js';
|
||||
|
||||
// Set of events that reset the playback-watcher time check logic and clear the timeout
|
||||
const timerCancelEvents = [
|
||||
'seeking',
|
||||
'seeked',
|
||||
'pause',
|
||||
'playing',
|
||||
'error'
|
||||
];
|
||||
|
||||
/**
|
||||
* Returns whether or not the current time should be considered close to buffered content,
|
||||
* taking into consideration whether there's enough buffered content for proper playback.
|
||||
*
|
||||
* @param {Object} options
|
||||
* Options object
|
||||
* @param {TimeRange} options.buffered
|
||||
* Current buffer
|
||||
* @param {number} options.targetDuration
|
||||
* The active playlist's target duration
|
||||
* @param {number} options.currentTime
|
||||
* The current time of the player
|
||||
* @return {boolean}
|
||||
* Whether the current time should be considered close to the buffer
|
||||
*/
|
||||
export const closeToBufferedContent = ({ buffered, targetDuration, currentTime }) => {
|
||||
if (!buffered.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// At least two to three segments worth of content should be buffered before there's a
|
||||
// full enough buffer to consider taking any actions.
|
||||
if (buffered.end(0) - buffered.start(0) < targetDuration * 2) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// It's possible that, on seek, a remove hasn't completed and the buffered range is
|
||||
// somewhere past the current time. In that event, don't consider the buffered content
|
||||
// close.
|
||||
if (currentTime > buffered.start(0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Since target duration generally represents the max (or close to max) duration of a
|
||||
// segment, if the buffer is within a segment of the current time, the gap probably
|
||||
// won't be closed, and current time should be considered close to buffered content.
|
||||
return buffered.start(0) - currentTime < targetDuration;
|
||||
};
|
||||
|
||||
/**
|
||||
* @class PlaybackWatcher
|
||||
*/
|
||||
export default class PlaybackWatcher {
|
||||
/**
|
||||
* Represents an PlaybackWatcher object.
|
||||
*
|
||||
* @class
|
||||
* @param {Object} options an object that includes the tech and settings
|
||||
*/
|
||||
constructor(options) {
|
||||
this.masterPlaylistController_ = options.masterPlaylistController;
|
||||
this.tech_ = options.tech;
|
||||
this.seekable = options.seekable;
|
||||
this.allowSeeksWithinUnsafeLiveWindow = options.allowSeeksWithinUnsafeLiveWindow;
|
||||
this.media = options.media;
|
||||
|
||||
this.consecutiveUpdates = 0;
|
||||
this.lastRecordedTime = null;
|
||||
this.timer_ = null;
|
||||
this.checkCurrentTimeTimeout_ = null;
|
||||
this.logger_ = logger('PlaybackWatcher');
|
||||
|
||||
this.logger_('initialize');
|
||||
|
||||
const canPlayHandler = () => this.monitorCurrentTime_();
|
||||
const waitingHandler = () => this.techWaiting_();
|
||||
const cancelTimerHandler = () => this.cancelTimer_();
|
||||
const fixesBadSeeksHandler = () => this.fixesBadSeeks_();
|
||||
|
||||
const mpc = this.masterPlaylistController_;
|
||||
|
||||
const loaderTypes = ['main', 'subtitle', 'audio'];
|
||||
const loaderChecks = {};
|
||||
|
||||
loaderTypes.forEach((type) => {
|
||||
loaderChecks[type] = {
|
||||
reset: () => this.resetSegmentDownloads_(type),
|
||||
updateend: () => this.checkSegmentDownloads_(type)
|
||||
};
|
||||
|
||||
mpc[`${type}SegmentLoader_`].on('appendsdone', loaderChecks[type].updateend);
|
||||
// If a rendition switch happens during a playback stall where the buffer
|
||||
// isn't changing we want to reset. We cannot assume that the new rendition
|
||||
// will also be stalled, until after new appends.
|
||||
mpc[`${type}SegmentLoader_`].on('playlistupdate', loaderChecks[type].reset);
|
||||
// Playback stalls should not be detected right after seeking.
|
||||
// This prevents one segment playlists (single vtt or single segment content)
|
||||
// from being detected as stalling. As the buffer will not change in those cases, since
|
||||
// the buffer is the entire video duration.
|
||||
this.tech_.on(['seeked', 'seeking'], loaderChecks[type].reset);
|
||||
});
|
||||
|
||||
this.tech_.on('seekablechanged', fixesBadSeeksHandler);
|
||||
this.tech_.on('waiting', waitingHandler);
|
||||
this.tech_.on(timerCancelEvents, cancelTimerHandler);
|
||||
this.tech_.on('canplay', canPlayHandler);
|
||||
|
||||
// Define the dispose function to clean up our events
|
||||
this.dispose = () => {
|
||||
this.logger_('dispose');
|
||||
this.tech_.off('seekablechanged', fixesBadSeeksHandler);
|
||||
this.tech_.off('waiting', waitingHandler);
|
||||
this.tech_.off(timerCancelEvents, cancelTimerHandler);
|
||||
this.tech_.off('canplay', canPlayHandler);
|
||||
|
||||
loaderTypes.forEach((type) => {
|
||||
mpc[`${type}SegmentLoader_`].off('appendsdone', loaderChecks[type].updateend);
|
||||
mpc[`${type}SegmentLoader_`].off('playlistupdate', loaderChecks[type].reset);
|
||||
this.tech_.off(['seeked', 'seeking'], loaderChecks[type].reset);
|
||||
});
|
||||
if (this.checkCurrentTimeTimeout_) {
|
||||
window.clearTimeout(this.checkCurrentTimeTimeout_);
|
||||
}
|
||||
this.cancelTimer_();
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Periodically check current time to see if playback stopped
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
monitorCurrentTime_() {
|
||||
this.checkCurrentTime_();
|
||||
|
||||
if (this.checkCurrentTimeTimeout_) {
|
||||
window.clearTimeout(this.checkCurrentTimeTimeout_);
|
||||
}
|
||||
|
||||
// 42 = 24 fps // 250 is what Webkit uses // FF uses 15
|
||||
this.checkCurrentTimeTimeout_ =
|
||||
window.setTimeout(this.monitorCurrentTime_.bind(this), 250);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset stalled download stats for a specific type of loader
|
||||
*
|
||||
* @param {string} type
|
||||
* The segment loader type to check.
|
||||
*
|
||||
* @listens SegmentLoader#playlistupdate
|
||||
* @listens Tech#seeking
|
||||
* @listens Tech#seeked
|
||||
*/
|
||||
resetSegmentDownloads_(type) {
|
||||
const loader = this.masterPlaylistController_[`${type}SegmentLoader_`];
|
||||
|
||||
if (this[`${type}StalledDownloads_`] > 0) {
|
||||
this.logger_(`resetting possible stalled download count for ${type} loader`);
|
||||
}
|
||||
this[`${type}StalledDownloads_`] = 0;
|
||||
this[`${type}Buffered_`] = loader.buffered_();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks on every segment `appendsdone` to see
|
||||
* if segment appends are making progress. If they are not
|
||||
* and we are still downloading bytes. We blacklist the playlist.
|
||||
*
|
||||
* @param {string} type
|
||||
* The segment loader type to check.
|
||||
*
|
||||
* @listens SegmentLoader#appendsdone
|
||||
*/
|
||||
checkSegmentDownloads_(type) {
|
||||
const mpc = this.masterPlaylistController_;
|
||||
const loader = mpc[`${type}SegmentLoader_`];
|
||||
const buffered = loader.buffered_();
|
||||
const isBufferedDifferent = Ranges.isRangeDifferent(this[`${type}Buffered_`], buffered);
|
||||
|
||||
this[`${type}Buffered_`] = buffered;
|
||||
|
||||
// if another watcher is going to fix the issue or
|
||||
// the buffered value for this loader changed
|
||||
// appends are working
|
||||
if (isBufferedDifferent) {
|
||||
this.resetSegmentDownloads_(type);
|
||||
return;
|
||||
}
|
||||
|
||||
this[`${type}StalledDownloads_`]++;
|
||||
|
||||
this.logger_(`found #${this[`${type}StalledDownloads_`]} ${type} appends that did not increase buffer (possible stalled download)`, {
|
||||
playlistId: loader.playlist_ && loader.playlist_.id,
|
||||
buffered: Ranges.timeRangesToArray(buffered)
|
||||
|
||||
});
|
||||
|
||||
// after 10 possibly stalled appends with no reset, exclude
|
||||
if (this[`${type}StalledDownloads_`] < 10) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger_(`${type} loader stalled download exclusion`);
|
||||
this.resetSegmentDownloads_(type);
|
||||
this.tech_.trigger({type: 'usage', name: `vhs-${type}-download-exclusion`});
|
||||
|
||||
if (type === 'subtitle') {
|
||||
// TODO: Is there anything else that we can do here?
|
||||
// removing the track and disabling could have accesiblity implications.
|
||||
const track = loader.track();
|
||||
const label = track.label || track.language || 'Unknown';
|
||||
|
||||
videojs.log.warn(`Text track "${label}" is not working correctly. It will be disabled and excluded.`);
|
||||
track.mode = 'disabled';
|
||||
this.tech_.textTracks().removeTrack(track);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: should we exclude audio tracks rather than main tracks
|
||||
// when type is audio?
|
||||
mpc.blacklistCurrentPlaylist({
|
||||
message: `Excessive ${type} segment downloading detected.`
|
||||
}, Infinity);
|
||||
}
|
||||
|
||||
/**
|
||||
* The purpose of this function is to emulate the "waiting" event on
|
||||
* browsers that do not emit it when they are waiting for more
|
||||
* data to continue playback
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
checkCurrentTime_() {
|
||||
if (this.tech_.seeking() && this.fixesBadSeeks_()) {
|
||||
this.consecutiveUpdates = 0;
|
||||
this.lastRecordedTime = this.tech_.currentTime();
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.tech_.paused() || this.tech_.seeking()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const currentTime = this.tech_.currentTime();
|
||||
const buffered = this.tech_.buffered();
|
||||
|
||||
if (this.lastRecordedTime === currentTime &&
|
||||
(!buffered.length ||
|
||||
currentTime + Ranges.SAFE_TIME_DELTA >= buffered.end(buffered.length - 1))) {
|
||||
// If current time is at the end of the final buffered region, then any playback
|
||||
// stall is most likely caused by buffering in a low bandwidth environment. The tech
|
||||
// should fire a `waiting` event in this scenario, but due to browser and tech
|
||||
// inconsistencies. Calling `techWaiting_` here allows us to simulate
|
||||
// responding to a native `waiting` event when the tech fails to emit one.
|
||||
return this.techWaiting_();
|
||||
}
|
||||
|
||||
if (this.consecutiveUpdates >= 5 &&
|
||||
currentTime === this.lastRecordedTime) {
|
||||
this.consecutiveUpdates++;
|
||||
this.waiting_();
|
||||
} else if (currentTime === this.lastRecordedTime) {
|
||||
this.consecutiveUpdates++;
|
||||
} else {
|
||||
this.consecutiveUpdates = 0;
|
||||
this.lastRecordedTime = currentTime;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels any pending timers and resets the 'timeupdate' mechanism
|
||||
* designed to detect that we are stalled
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
cancelTimer_() {
|
||||
this.consecutiveUpdates = 0;
|
||||
|
||||
if (this.timer_) {
|
||||
this.logger_('cancelTimer_');
|
||||
clearTimeout(this.timer_);
|
||||
}
|
||||
|
||||
this.timer_ = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fixes situations where there's a bad seek
|
||||
*
|
||||
* @return {boolean} whether an action was taken to fix the seek
|
||||
* @private
|
||||
*/
|
||||
fixesBadSeeks_() {
|
||||
const seeking = this.tech_.seeking();
|
||||
|
||||
if (!seeking) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const seekable = this.seekable();
|
||||
const currentTime = this.tech_.currentTime();
|
||||
const isAfterSeekableRange = this.afterSeekableWindow_(
|
||||
seekable,
|
||||
currentTime,
|
||||
this.media(),
|
||||
this.allowSeeksWithinUnsafeLiveWindow
|
||||
);
|
||||
let seekTo;
|
||||
|
||||
if (isAfterSeekableRange) {
|
||||
const seekableEnd = seekable.end(seekable.length - 1);
|
||||
|
||||
// sync to live point (if VOD, our seekable was updated and we're simply adjusting)
|
||||
seekTo = seekableEnd;
|
||||
}
|
||||
|
||||
if (this.beforeSeekableWindow_(seekable, currentTime)) {
|
||||
const seekableStart = seekable.start(0);
|
||||
|
||||
// sync to the beginning of the live window
|
||||
// provide a buffer of .1 seconds to handle rounding/imprecise numbers
|
||||
seekTo = seekableStart +
|
||||
// if the playlist is too short and the seekable range is an exact time (can
|
||||
// happen in live with a 3 segment playlist), then don't use a time delta
|
||||
(seekableStart === seekable.end(0) ? 0 : Ranges.SAFE_TIME_DELTA);
|
||||
}
|
||||
|
||||
if (typeof seekTo !== 'undefined') {
|
||||
this.logger_(`Trying to seek outside of seekable at time ${currentTime} with ` +
|
||||
`seekable range ${Ranges.printableRange(seekable)}. Seeking to ` +
|
||||
`${seekTo}.`);
|
||||
|
||||
this.tech_.setCurrentTime(seekTo);
|
||||
return true;
|
||||
}
|
||||
|
||||
const buffered = this.tech_.buffered();
|
||||
|
||||
if (
|
||||
closeToBufferedContent({
|
||||
buffered,
|
||||
targetDuration: this.media().targetDuration,
|
||||
currentTime
|
||||
})
|
||||
) {
|
||||
seekTo = buffered.start(0) + Ranges.SAFE_TIME_DELTA;
|
||||
this.logger_(`Buffered region starts (${buffered.start(0)}) ` +
|
||||
` just beyond seek point (${currentTime}). Seeking to ${seekTo}.`);
|
||||
|
||||
this.tech_.setCurrentTime(seekTo);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for situations when we determine the player is waiting.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
waiting_() {
|
||||
if (this.techWaiting_()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// All tech waiting checks failed. Use last resort correction
|
||||
const currentTime = this.tech_.currentTime();
|
||||
const buffered = this.tech_.buffered();
|
||||
const currentRange = Ranges.findRange(buffered, currentTime);
|
||||
|
||||
// Sometimes the player can stall for unknown reasons within a contiguous buffered
|
||||
// region with no indication that anything is amiss (seen in Firefox). Seeking to
|
||||
// currentTime is usually enough to kickstart the player. This checks that the player
|
||||
// is currently within a buffered region before attempting a corrective seek.
|
||||
// Chrome does not appear to continue `timeupdate` events after a `waiting` event
|
||||
// until there is ~ 3 seconds of forward buffer available. PlaybackWatcher should also
|
||||
// make sure there is ~3 seconds of forward buffer before taking any corrective action
|
||||
// to avoid triggering an `unknownwaiting` event when the network is slow.
|
||||
if (currentRange.length && currentTime + 3 <= currentRange.end(0)) {
|
||||
this.cancelTimer_();
|
||||
this.tech_.setCurrentTime(currentTime);
|
||||
|
||||
this.logger_(`Stopped at ${currentTime} while inside a buffered region ` +
|
||||
`[${currentRange.start(0)} -> ${currentRange.end(0)}]. Attempting to resume ` +
|
||||
'playback by seeking to the current time.');
|
||||
|
||||
// unknown waiting corrections may be useful for monitoring QoS
|
||||
this.tech_.trigger({type: 'usage', name: 'vhs-unknown-waiting'});
|
||||
this.tech_.trigger({type: 'usage', name: 'hls-unknown-waiting'});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for situations when the tech fires a `waiting` event
|
||||
*
|
||||
* @return {boolean}
|
||||
* True if an action (or none) was needed to correct the waiting. False if no
|
||||
* checks passed
|
||||
* @private
|
||||
*/
|
||||
techWaiting_() {
|
||||
const seekable = this.seekable();
|
||||
const currentTime = this.tech_.currentTime();
|
||||
|
||||
if (this.tech_.seeking() && this.fixesBadSeeks_()) {
|
||||
// Tech is seeking or bad seek fixed, no action needed
|
||||
return true;
|
||||
}
|
||||
|
||||
if (this.tech_.seeking() || this.timer_ !== null) {
|
||||
// Tech is seeking or already waiting on another action, no action needed
|
||||
return true;
|
||||
}
|
||||
|
||||
if (this.beforeSeekableWindow_(seekable, currentTime)) {
|
||||
const livePoint = seekable.end(seekable.length - 1);
|
||||
|
||||
this.logger_(`Fell out of live window at time ${currentTime}. Seeking to ` +
|
||||
`live point (seekable end) ${livePoint}`);
|
||||
this.cancelTimer_();
|
||||
this.tech_.setCurrentTime(livePoint);
|
||||
|
||||
// live window resyncs may be useful for monitoring QoS
|
||||
this.tech_.trigger({type: 'usage', name: 'vhs-live-resync'});
|
||||
this.tech_.trigger({type: 'usage', name: 'hls-live-resync'});
|
||||
return true;
|
||||
}
|
||||
|
||||
const sourceUpdater = this.tech_.vhs.masterPlaylistController_.sourceUpdater_;
|
||||
const buffered = this.tech_.buffered();
|
||||
const videoUnderflow = this.videoUnderflow_({
|
||||
audioBuffered: sourceUpdater.audioBuffered(),
|
||||
videoBuffered: sourceUpdater.videoBuffered(),
|
||||
currentTime
|
||||
});
|
||||
|
||||
if (videoUnderflow) {
|
||||
// Even though the video underflowed and was stuck in a gap, the audio overplayed
|
||||
// the gap, leading currentTime into a buffered range. Seeking to currentTime
|
||||
// allows the video to catch up to the audio position without losing any audio
|
||||
// (only suffering ~3 seconds of frozen video and a pause in audio playback).
|
||||
this.cancelTimer_();
|
||||
this.tech_.setCurrentTime(currentTime);
|
||||
|
||||
// video underflow may be useful for monitoring QoS
|
||||
this.tech_.trigger({type: 'usage', name: 'vhs-video-underflow'});
|
||||
this.tech_.trigger({type: 'usage', name: 'hls-video-underflow'});
|
||||
return true;
|
||||
}
|
||||
const nextRange = Ranges.findNextRange(buffered, currentTime);
|
||||
|
||||
// check for gap
|
||||
if (nextRange.length > 0) {
|
||||
const difference = nextRange.start(0) - currentTime;
|
||||
|
||||
this.logger_(`Stopped at ${currentTime}, setting timer for ${difference}, seeking ` +
|
||||
`to ${nextRange.start(0)}`);
|
||||
|
||||
this.cancelTimer_();
|
||||
|
||||
this.timer_ = setTimeout(
|
||||
this.skipTheGap_.bind(this),
|
||||
difference * 1000,
|
||||
currentTime
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// All checks failed. Returning false to indicate failure to correct waiting
|
||||
return false;
|
||||
}
|
||||
|
||||
afterSeekableWindow_(seekable, currentTime, playlist, allowSeeksWithinUnsafeLiveWindow = false) {
|
||||
if (!seekable.length) {
|
||||
// we can't make a solid case if there's no seekable, default to false
|
||||
return false;
|
||||
}
|
||||
|
||||
let allowedEnd = seekable.end(seekable.length - 1) + Ranges.SAFE_TIME_DELTA;
|
||||
const isLive = !playlist.endList;
|
||||
|
||||
if (isLive && allowSeeksWithinUnsafeLiveWindow) {
|
||||
allowedEnd = seekable.end(seekable.length - 1) + (playlist.targetDuration * 3);
|
||||
}
|
||||
|
||||
if (currentTime > allowedEnd) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
beforeSeekableWindow_(seekable, currentTime) {
|
||||
if (seekable.length &&
|
||||
// can't fall before 0 and 0 seekable start identifies VOD stream
|
||||
seekable.start(0) > 0 &&
|
||||
currentTime < seekable.start(0) - Ranges.SAFE_TIME_DELTA) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
videoUnderflow_({videoBuffered, audioBuffered, currentTime}) {
|
||||
// audio only content will not have video underflow :)
|
||||
if (!videoBuffered) {
|
||||
return;
|
||||
}
|
||||
let gap;
|
||||
|
||||
// find a gap in demuxed content.
|
||||
if (videoBuffered.length && audioBuffered.length) {
|
||||
// in Chrome audio will continue to play for ~3s when we run out of video
|
||||
// so we have to check that the video buffer did have some buffer in the
|
||||
// past.
|
||||
const lastVideoRange = Ranges.findRange(videoBuffered, currentTime - 3);
|
||||
const videoRange = Ranges.findRange(videoBuffered, currentTime);
|
||||
const audioRange = Ranges.findRange(audioBuffered, currentTime);
|
||||
|
||||
if (audioRange.length && !videoRange.length && lastVideoRange.length) {
|
||||
gap = {start: lastVideoRange.end(0), end: audioRange.end(0)};
|
||||
}
|
||||
|
||||
// find a gap in muxed content.
|
||||
} else {
|
||||
const nextRange = Ranges.findNextRange(videoBuffered, currentTime);
|
||||
|
||||
// Even if there is no available next range, there is still a possibility we are
|
||||
// stuck in a gap due to video underflow.
|
||||
if (!nextRange.length) {
|
||||
gap = this.gapFromVideoUnderflow_(videoBuffered, currentTime);
|
||||
}
|
||||
}
|
||||
|
||||
if (gap) {
|
||||
this.logger_(`Encountered a gap in video from ${gap.start} to ${gap.end}. ` +
|
||||
`Seeking to current time ${currentTime}`);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timer callback. If playback still has not proceeded, then we seek
|
||||
* to the start of the next buffered region.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
skipTheGap_(scheduledCurrentTime) {
|
||||
const buffered = this.tech_.buffered();
|
||||
const currentTime = this.tech_.currentTime();
|
||||
const nextRange = Ranges.findNextRange(buffered, currentTime);
|
||||
|
||||
this.cancelTimer_();
|
||||
|
||||
if (nextRange.length === 0 ||
|
||||
currentTime !== scheduledCurrentTime) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger_(
|
||||
'skipTheGap_:',
|
||||
'currentTime:', currentTime,
|
||||
'scheduled currentTime:', scheduledCurrentTime,
|
||||
'nextRange start:', nextRange.start(0)
|
||||
);
|
||||
|
||||
// only seek if we still have not played
|
||||
this.tech_.setCurrentTime(nextRange.start(0) + Ranges.TIME_FUDGE_FACTOR);
|
||||
|
||||
this.tech_.trigger({type: 'usage', name: 'vhs-gap-skip'});
|
||||
this.tech_.trigger({type: 'usage', name: 'hls-gap-skip'});
|
||||
}
|
||||
|
||||
gapFromVideoUnderflow_(buffered, currentTime) {
|
||||
// At least in Chrome, if there is a gap in the video buffer, the audio will continue
|
||||
// playing for ~3 seconds after the video gap starts. This is done to account for
|
||||
// video buffer underflow/underrun (note that this is not done when there is audio
|
||||
// buffer underflow/underrun -- in that case the video will stop as soon as it
|
||||
// encounters the gap, as audio stalls are more noticeable/jarring to a user than
|
||||
// video stalls). The player's time will reflect the playthrough of audio, so the
|
||||
// time will appear as if we are in a buffered region, even if we are stuck in a
|
||||
// "gap."
|
||||
//
|
||||
// Example:
|
||||
// video buffer: 0 => 10.1, 10.2 => 20
|
||||
// audio buffer: 0 => 20
|
||||
// overall buffer: 0 => 10.1, 10.2 => 20
|
||||
// current time: 13
|
||||
//
|
||||
// Chrome's video froze at 10 seconds, where the video buffer encountered the gap,
|
||||
// however, the audio continued playing until it reached ~3 seconds past the gap
|
||||
// (13 seconds), at which point it stops as well. Since current time is past the
|
||||
// gap, findNextRange will return no ranges.
|
||||
//
|
||||
// To check for this issue, we see if there is a gap that starts somewhere within
|
||||
// a 3 second range (3 seconds +/- 1 second) back from our current time.
|
||||
const gaps = Ranges.findGaps(buffered);
|
||||
|
||||
for (let i = 0; i < gaps.length; i++) {
|
||||
const start = gaps.start(i);
|
||||
const end = gaps.end(i);
|
||||
|
||||
// gap is starts no more than 4 seconds back
|
||||
if (currentTime - start < 4 && currentTime - start > 2) {
|
||||
return {
|
||||
start,
|
||||
end
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
623
build/javascript/node_modules/@videojs/http-streaming/src/playlist-loader.js
generated
vendored
Normal file
623
build/javascript/node_modules/@videojs/http-streaming/src/playlist-loader.js
generated
vendored
Normal file
@@ -0,0 +1,623 @@
|
||||
/**
|
||||
* @file playlist-loader.js
|
||||
*
|
||||
* A state machine that manages the loading, caching, and updating of
|
||||
* M3U8 playlists.
|
||||
*
|
||||
*/
|
||||
import { resolveUrl, resolveManifestRedirect } from './resolve-url';
|
||||
import videojs from 'video.js';
|
||||
import window from 'global/window';
|
||||
import {
|
||||
parseManifest,
|
||||
addPropertiesToMaster,
|
||||
masterForMedia,
|
||||
setupMediaPlaylist
|
||||
} from './manifest';
|
||||
|
||||
const { mergeOptions, EventTarget } = videojs;
|
||||
|
||||
/**
|
||||
* Returns a new array of segments that is the result of merging
|
||||
* properties from an older list of segments onto an updated
|
||||
* list. No properties on the updated playlist will be overridden.
|
||||
*
|
||||
* @param {Array} original the outdated list of segments
|
||||
* @param {Array} update the updated list of segments
|
||||
* @param {number=} offset the index of the first update
|
||||
* segment in the original segment list. For non-live playlists,
|
||||
* this should always be zero and does not need to be
|
||||
* specified. For live playlists, it should be the difference
|
||||
* between the media sequence numbers in the original and updated
|
||||
* playlists.
|
||||
* @return a list of merged segment objects
|
||||
*/
|
||||
export const updateSegments = (original, update, offset) => {
|
||||
const result = update.slice();
|
||||
|
||||
offset = offset || 0;
|
||||
const length = Math.min(original.length, update.length + offset);
|
||||
|
||||
for (let i = offset; i < length; i++) {
|
||||
result[i - offset] = mergeOptions(original[i], result[i - offset]);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
export const resolveSegmentUris = (segment, baseUri) => {
|
||||
if (!segment.resolvedUri) {
|
||||
segment.resolvedUri = resolveUrl(baseUri, segment.uri);
|
||||
}
|
||||
if (segment.key && !segment.key.resolvedUri) {
|
||||
segment.key.resolvedUri = resolveUrl(baseUri, segment.key.uri);
|
||||
}
|
||||
if (segment.map && !segment.map.resolvedUri) {
|
||||
segment.map.resolvedUri = resolveUrl(baseUri, segment.map.uri);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a new master playlist that is the result of merging an
|
||||
* updated media playlist into the original version. If the
|
||||
* updated media playlist does not match any of the playlist
|
||||
* entries in the original master playlist, null is returned.
|
||||
*
|
||||
* @param {Object} master a parsed master M3U8 object
|
||||
* @param {Object} media a parsed media M3U8 object
|
||||
* @return {Object} a new object that represents the original
|
||||
* master playlist with the updated media playlist merged in, or
|
||||
* null if the merge produced no change.
|
||||
*/
|
||||
export const updateMaster = (master, media) => {
|
||||
const result = mergeOptions(master, {});
|
||||
const playlist = result.playlists[media.id];
|
||||
|
||||
if (!playlist) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// consider the playlist unchanged if the number of segments is equal, the media
|
||||
// sequence number is unchanged, and this playlist hasn't become the end of the playlist
|
||||
if (playlist.segments &&
|
||||
media.segments &&
|
||||
playlist.segments.length === media.segments.length &&
|
||||
playlist.endList === media.endList &&
|
||||
playlist.mediaSequence === media.mediaSequence) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const mergedPlaylist = mergeOptions(playlist, media);
|
||||
|
||||
// if the update could overlap existing segment information, merge the two segment lists
|
||||
if (playlist.segments) {
|
||||
mergedPlaylist.segments = updateSegments(
|
||||
playlist.segments,
|
||||
media.segments,
|
||||
media.mediaSequence - playlist.mediaSequence
|
||||
);
|
||||
}
|
||||
|
||||
// resolve any segment URIs to prevent us from having to do it later
|
||||
mergedPlaylist.segments.forEach((segment) => {
|
||||
resolveSegmentUris(segment, mergedPlaylist.resolvedUri);
|
||||
});
|
||||
|
||||
// TODO Right now in the playlists array there are two references to each playlist, one
|
||||
// that is referenced by index, and one by URI. The index reference may no longer be
|
||||
// necessary.
|
||||
for (let i = 0; i < result.playlists.length; i++) {
|
||||
if (result.playlists[i].id === media.id) {
|
||||
result.playlists[i] = mergedPlaylist;
|
||||
}
|
||||
}
|
||||
result.playlists[media.id] = mergedPlaylist;
|
||||
// URI reference added for backwards compatibility
|
||||
result.playlists[media.uri] = mergedPlaylist;
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the time to wait before refreshing a live playlist
|
||||
*
|
||||
* @param {Object} media
|
||||
* The current media
|
||||
* @param {boolean} update
|
||||
* True if there were any updates from the last refresh, false otherwise
|
||||
* @return {number}
|
||||
* The time in ms to wait before refreshing the live playlist
|
||||
*/
|
||||
export const refreshDelay = (media, update) => {
|
||||
const lastSegment = media.segments[media.segments.length - 1];
|
||||
let delay;
|
||||
|
||||
if (update && lastSegment && lastSegment.duration) {
|
||||
delay = lastSegment.duration * 1000;
|
||||
} else {
|
||||
// if the playlist is unchanged since the last reload or last segment duration
|
||||
// cannot be determined, try again after half the target duration
|
||||
delay = (media.targetDuration || 10) * 500;
|
||||
}
|
||||
return delay;
|
||||
};
|
||||
|
||||
/**
|
||||
* Load a playlist from a remote location
|
||||
*
|
||||
* @class PlaylistLoader
|
||||
* @extends Stream
|
||||
* @param {string|Object} src url or object of manifest
|
||||
* @param {boolean} withCredentials the withCredentials xhr option
|
||||
* @class
|
||||
*/
|
||||
export default class PlaylistLoader extends EventTarget {
|
||||
constructor(src, vhs, options = { }) {
|
||||
super();
|
||||
|
||||
if (!src) {
|
||||
throw new Error('A non-empty playlist URL or object is required');
|
||||
}
|
||||
|
||||
const { withCredentials = false, handleManifestRedirects = false } = options;
|
||||
|
||||
this.src = src;
|
||||
this.vhs_ = vhs;
|
||||
this.withCredentials = withCredentials;
|
||||
this.handleManifestRedirects = handleManifestRedirects;
|
||||
|
||||
const vhsOptions = vhs.options_;
|
||||
|
||||
this.customTagParsers = (vhsOptions && vhsOptions.customTagParsers) || [];
|
||||
this.customTagMappers = (vhsOptions && vhsOptions.customTagMappers) || [];
|
||||
|
||||
// initialize the loader state
|
||||
this.state = 'HAVE_NOTHING';
|
||||
|
||||
// live playlist staleness timeout
|
||||
this.on('mediaupdatetimeout', () => {
|
||||
if (this.state !== 'HAVE_METADATA') {
|
||||
// only refresh the media playlist if no other activity is going on
|
||||
return;
|
||||
}
|
||||
|
||||
this.state = 'HAVE_CURRENT_METADATA';
|
||||
|
||||
this.request = this.vhs_.xhr({
|
||||
uri: resolveUrl(this.master.uri, this.media().uri),
|
||||
withCredentials: this.withCredentials
|
||||
}, (error, req) => {
|
||||
// disposed
|
||||
if (!this.request) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return this.playlistRequestError(this.request, this.media(), 'HAVE_METADATA');
|
||||
}
|
||||
|
||||
this.haveMetadata({
|
||||
playlistString: this.request.responseText,
|
||||
url: this.media().uri,
|
||||
id: this.media().id
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
playlistRequestError(xhr, playlist, startingState) {
|
||||
const {
|
||||
uri,
|
||||
id
|
||||
} = playlist;
|
||||
|
||||
// any in-flight request is now finished
|
||||
this.request = null;
|
||||
|
||||
if (startingState) {
|
||||
this.state = startingState;
|
||||
}
|
||||
|
||||
this.error = {
|
||||
playlist: this.master.playlists[id],
|
||||
status: xhr.status,
|
||||
message: `HLS playlist request error at URL: ${uri}.`,
|
||||
responseText: xhr.responseText,
|
||||
code: (xhr.status >= 500) ? 4 : 2
|
||||
};
|
||||
|
||||
this.trigger('error');
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the playlist loader's state in response to a new or updated playlist.
|
||||
*
|
||||
* @param {string} [playlistString]
|
||||
* Playlist string (if playlistObject is not provided)
|
||||
* @param {Object} [playlistObject]
|
||||
* Playlist object (if playlistString is not provided)
|
||||
* @param {string} url
|
||||
* URL of playlist
|
||||
* @param {string} id
|
||||
* ID to use for playlist
|
||||
*/
|
||||
haveMetadata({ playlistString, playlistObject, url, id }) {
|
||||
// any in-flight request is now finished
|
||||
this.request = null;
|
||||
this.state = 'HAVE_METADATA';
|
||||
|
||||
const playlist = playlistObject || parseManifest({
|
||||
manifestString: playlistString,
|
||||
customTagParsers: this.customTagParsers,
|
||||
customTagMappers: this.customTagMappers
|
||||
});
|
||||
|
||||
setupMediaPlaylist({
|
||||
playlist,
|
||||
uri: url,
|
||||
id
|
||||
});
|
||||
|
||||
// merge this playlist into the master
|
||||
const update = updateMaster(this.master, playlist);
|
||||
|
||||
this.targetDuration = playlist.targetDuration;
|
||||
|
||||
if (update) {
|
||||
this.master = update;
|
||||
this.media_ = this.master.playlists[id];
|
||||
} else {
|
||||
this.trigger('playlistunchanged');
|
||||
}
|
||||
|
||||
// refresh live playlists after a target duration passes
|
||||
if (!this.media().endList) {
|
||||
window.clearTimeout(this.mediaUpdateTimeout);
|
||||
this.mediaUpdateTimeout = window.setTimeout(() => {
|
||||
this.trigger('mediaupdatetimeout');
|
||||
}, refreshDelay(this.media(), !!update));
|
||||
}
|
||||
|
||||
this.trigger('loadedplaylist');
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort any outstanding work and clean up.
|
||||
*/
|
||||
dispose() {
|
||||
this.trigger('dispose');
|
||||
this.stopRequest();
|
||||
window.clearTimeout(this.mediaUpdateTimeout);
|
||||
window.clearTimeout(this.finalRenditionTimeout);
|
||||
|
||||
this.off();
|
||||
}
|
||||
|
||||
stopRequest() {
|
||||
if (this.request) {
|
||||
const oldRequest = this.request;
|
||||
|
||||
this.request = null;
|
||||
oldRequest.onreadystatechange = null;
|
||||
oldRequest.abort();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* When called without any arguments, returns the currently
|
||||
* active media playlist. When called with a single argument,
|
||||
* triggers the playlist loader to asynchronously switch to the
|
||||
* specified media playlist. Calling this method while the
|
||||
* loader is in the HAVE_NOTHING causes an error to be emitted
|
||||
* but otherwise has no effect.
|
||||
*
|
||||
* @param {Object=} playlist the parsed media playlist
|
||||
* object to switch to
|
||||
* @param {boolean=} is this the last available playlist
|
||||
*
|
||||
* @return {Playlist} the current loaded media
|
||||
*/
|
||||
media(playlist, isFinalRendition) {
|
||||
// getter
|
||||
if (!playlist) {
|
||||
return this.media_;
|
||||
}
|
||||
|
||||
// setter
|
||||
if (this.state === 'HAVE_NOTHING') {
|
||||
throw new Error('Cannot switch media playlist from ' + this.state);
|
||||
}
|
||||
|
||||
// find the playlist object if the target playlist has been
|
||||
// specified by URI
|
||||
if (typeof playlist === 'string') {
|
||||
if (!this.master.playlists[playlist]) {
|
||||
throw new Error('Unknown playlist URI: ' + playlist);
|
||||
}
|
||||
playlist = this.master.playlists[playlist];
|
||||
}
|
||||
|
||||
window.clearTimeout(this.finalRenditionTimeout);
|
||||
|
||||
if (isFinalRendition) {
|
||||
const delay = (playlist.targetDuration / 2) * 1000 || 5 * 1000;
|
||||
|
||||
this.finalRenditionTimeout =
|
||||
window.setTimeout(this.media.bind(this, playlist, false), delay);
|
||||
return;
|
||||
}
|
||||
|
||||
const startingState = this.state;
|
||||
const mediaChange = !this.media_ || playlist.id !== this.media_.id;
|
||||
|
||||
// switch to fully loaded playlists immediately
|
||||
if (this.master.playlists[playlist.id].endList ||
|
||||
// handle the case of a playlist object (e.g., if using vhs-json with a resolved
|
||||
// media playlist or, for the case of demuxed audio, a resolved audio media group)
|
||||
(playlist.endList && playlist.segments.length)) {
|
||||
// abort outstanding playlist requests
|
||||
if (this.request) {
|
||||
this.request.onreadystatechange = null;
|
||||
this.request.abort();
|
||||
this.request = null;
|
||||
}
|
||||
this.state = 'HAVE_METADATA';
|
||||
this.media_ = playlist;
|
||||
|
||||
// trigger media change if the active media has been updated
|
||||
if (mediaChange) {
|
||||
this.trigger('mediachanging');
|
||||
|
||||
if (startingState === 'HAVE_MASTER') {
|
||||
// The initial playlist was a master manifest, and the first media selected was
|
||||
// also provided (in the form of a resolved playlist object) as part of the
|
||||
// source object (rather than just a URL). Therefore, since the media playlist
|
||||
// doesn't need to be requested, loadedmetadata won't trigger as part of the
|
||||
// normal flow, and needs an explicit trigger here.
|
||||
this.trigger('loadedmetadata');
|
||||
} else {
|
||||
this.trigger('mediachange');
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// switching to the active playlist is a no-op
|
||||
if (!mediaChange) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.state = 'SWITCHING_MEDIA';
|
||||
|
||||
// there is already an outstanding playlist request
|
||||
if (this.request) {
|
||||
if (playlist.resolvedUri === this.request.url) {
|
||||
// requesting to switch to the same playlist multiple times
|
||||
// has no effect after the first
|
||||
return;
|
||||
}
|
||||
this.request.onreadystatechange = null;
|
||||
this.request.abort();
|
||||
this.request = null;
|
||||
}
|
||||
|
||||
// request the new playlist
|
||||
if (this.media_) {
|
||||
this.trigger('mediachanging');
|
||||
}
|
||||
|
||||
this.request = this.vhs_.xhr({
|
||||
uri: playlist.resolvedUri,
|
||||
withCredentials: this.withCredentials
|
||||
}, (error, req) => {
|
||||
// disposed
|
||||
if (!this.request) {
|
||||
return;
|
||||
}
|
||||
|
||||
playlist.resolvedUri = resolveManifestRedirect(this.handleManifestRedirects, playlist.resolvedUri, req);
|
||||
|
||||
if (error) {
|
||||
return this.playlistRequestError(this.request, playlist, startingState);
|
||||
}
|
||||
|
||||
this.haveMetadata({
|
||||
playlistString: req.responseText,
|
||||
url: playlist.uri,
|
||||
id: playlist.id
|
||||
});
|
||||
|
||||
// fire loadedmetadata the first time a media playlist is loaded
|
||||
if (startingState === 'HAVE_MASTER') {
|
||||
this.trigger('loadedmetadata');
|
||||
} else {
|
||||
this.trigger('mediachange');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* pause loading of the playlist
|
||||
*/
|
||||
pause() {
|
||||
this.stopRequest();
|
||||
window.clearTimeout(this.mediaUpdateTimeout);
|
||||
if (this.state === 'HAVE_NOTHING') {
|
||||
// If we pause the loader before any data has been retrieved, its as if we never
|
||||
// started, so reset to an unstarted state.
|
||||
this.started = false;
|
||||
}
|
||||
// Need to restore state now that no activity is happening
|
||||
if (this.state === 'SWITCHING_MEDIA') {
|
||||
// if the loader was in the process of switching media, it should either return to
|
||||
// HAVE_MASTER or HAVE_METADATA depending on if the loader has loaded a media
|
||||
// playlist yet. This is determined by the existence of loader.media_
|
||||
if (this.media_) {
|
||||
this.state = 'HAVE_METADATA';
|
||||
} else {
|
||||
this.state = 'HAVE_MASTER';
|
||||
}
|
||||
} else if (this.state === 'HAVE_CURRENT_METADATA') {
|
||||
this.state = 'HAVE_METADATA';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* start loading of the playlist
|
||||
*/
|
||||
load(isFinalRendition) {
|
||||
window.clearTimeout(this.mediaUpdateTimeout);
|
||||
|
||||
const media = this.media();
|
||||
|
||||
if (isFinalRendition) {
|
||||
const delay = media ? (media.targetDuration / 2) * 1000 : 5 * 1000;
|
||||
|
||||
this.mediaUpdateTimeout = window.setTimeout(() => this.load(), delay);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.started) {
|
||||
this.start();
|
||||
return;
|
||||
}
|
||||
|
||||
if (media && !media.endList) {
|
||||
this.trigger('mediaupdatetimeout');
|
||||
} else {
|
||||
this.trigger('loadedplaylist');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* start loading of the playlist
|
||||
*/
|
||||
start() {
|
||||
this.started = true;
|
||||
|
||||
if (typeof this.src === 'object') {
|
||||
// in the case of an entirely constructed manifest object (meaning there's no actual
|
||||
// manifest on a server), default the uri to the page's href
|
||||
if (!this.src.uri) {
|
||||
this.src.uri = window.location.href;
|
||||
}
|
||||
|
||||
// resolvedUri is added on internally after the initial request. Since there's no
|
||||
// request for pre-resolved manifests, add on resolvedUri here.
|
||||
this.src.resolvedUri = this.src.uri;
|
||||
|
||||
// Since a manifest object was passed in as the source (instead of a URL), the first
|
||||
// request can be skipped (since the top level of the manifest, at a minimum, is
|
||||
// already available as a parsed manifest object). However, if the manifest object
|
||||
// represents a master playlist, some media playlists may need to be resolved before
|
||||
// the starting segment list is available. Therefore, go directly to setup of the
|
||||
// initial playlist, and let the normal flow continue from there.
|
||||
//
|
||||
// Note that the call to setup is asynchronous, as other sections of VHS may assume
|
||||
// that the first request is asynchronous.
|
||||
setTimeout(() => {
|
||||
this.setupInitialPlaylist(this.src);
|
||||
}, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
// request the specified URL
|
||||
this.request = this.vhs_.xhr({
|
||||
uri: this.src,
|
||||
withCredentials: this.withCredentials
|
||||
}, (error, req) => {
|
||||
// disposed
|
||||
if (!this.request) {
|
||||
return;
|
||||
}
|
||||
|
||||
// clear the loader's request reference
|
||||
this.request = null;
|
||||
|
||||
if (error) {
|
||||
this.error = {
|
||||
status: req.status,
|
||||
message: `HLS playlist request error at URL: ${this.src}.`,
|
||||
responseText: req.responseText,
|
||||
// MEDIA_ERR_NETWORK
|
||||
code: 2
|
||||
};
|
||||
if (this.state === 'HAVE_NOTHING') {
|
||||
this.started = false;
|
||||
}
|
||||
return this.trigger('error');
|
||||
}
|
||||
|
||||
this.src = resolveManifestRedirect(this.handleManifestRedirects, this.src, req);
|
||||
|
||||
const manifest = parseManifest({
|
||||
manifestString: req.responseText,
|
||||
customTagParsers: this.customTagParsers,
|
||||
customTagMappers: this.customTagMappers
|
||||
});
|
||||
|
||||
this.setupInitialPlaylist(manifest);
|
||||
});
|
||||
}
|
||||
|
||||
srcUri() {
|
||||
return typeof this.src === 'string' ? this.src : this.src.uri;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a manifest object that's either a master or media playlist, trigger the proper
|
||||
* events and set the state of the playlist loader.
|
||||
*
|
||||
* If the manifest object represents a master playlist, `loadedplaylist` will be
|
||||
* triggered to allow listeners to select a playlist. If none is selected, the loader
|
||||
* will default to the first one in the playlists array.
|
||||
*
|
||||
* If the manifest object represents a media playlist, `loadedplaylist` will be
|
||||
* triggered followed by `loadedmetadata`, as the only available playlist is loaded.
|
||||
*
|
||||
* In the case of a media playlist, a master playlist object wrapper with one playlist
|
||||
* will be created so that all logic can handle playlists in the same fashion (as an
|
||||
* assumed manifest object schema).
|
||||
*
|
||||
* @param {Object} manifest
|
||||
* The parsed manifest object
|
||||
*/
|
||||
setupInitialPlaylist(manifest) {
|
||||
this.state = 'HAVE_MASTER';
|
||||
|
||||
if (manifest.playlists) {
|
||||
this.master = manifest;
|
||||
addPropertiesToMaster(this.master, this.srcUri());
|
||||
// If the initial master playlist has playlists wtih segments already resolved,
|
||||
// then resolve URIs in advance, as they are usually done after a playlist request,
|
||||
// which may not happen if the playlist is resolved.
|
||||
manifest.playlists.forEach((playlist) => {
|
||||
if (playlist.segments) {
|
||||
playlist.segments.forEach((segment) => {
|
||||
resolveSegmentUris(segment, playlist.resolvedUri);
|
||||
});
|
||||
}
|
||||
});
|
||||
this.trigger('loadedplaylist');
|
||||
if (!this.request) {
|
||||
// no media playlist was specifically selected so start
|
||||
// from the first listed one
|
||||
this.media(this.master.playlists[0]);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// In order to support media playlists passed in as vhs-json, the case where the uri
|
||||
// is not provided as part of the manifest should be considered, and an appropriate
|
||||
// default used.
|
||||
const uri = this.srcUri() || window.location.href;
|
||||
|
||||
this.master = masterForMedia(manifest, uri);
|
||||
this.haveMetadata({
|
||||
playlistObject: manifest,
|
||||
url: uri,
|
||||
id: this.master.playlists[0].id
|
||||
});
|
||||
this.trigger('loadedmetadata');
|
||||
}
|
||||
|
||||
}
|
||||
489
build/javascript/node_modules/@videojs/http-streaming/src/playlist-selectors.js
generated
vendored
Normal file
489
build/javascript/node_modules/@videojs/http-streaming/src/playlist-selectors.js
generated
vendored
Normal file
@@ -0,0 +1,489 @@
|
||||
import window from 'global/window';
|
||||
import Config from './config';
|
||||
import Playlist from './playlist';
|
||||
import { codecsForPlaylist } from './util/codecs.js';
|
||||
import logger from './util/logger';
|
||||
|
||||
const logFn = logger('PlaylistSelector');
|
||||
const representationToString = function(representation) {
|
||||
if (!representation || !representation.playlist) {
|
||||
return;
|
||||
}
|
||||
const playlist = representation.playlist;
|
||||
|
||||
return JSON.stringify({
|
||||
id: playlist.id,
|
||||
bandwidth: representation.bandwidth,
|
||||
width: representation.width,
|
||||
height: representation.height,
|
||||
codecs: playlist.attributes && playlist.attributes.CODECS || ''
|
||||
});
|
||||
};
|
||||
|
||||
// Utilities
|
||||
|
||||
/**
|
||||
* Returns the CSS value for the specified property on an element
|
||||
* using `getComputedStyle`. Firefox has a long-standing issue where
|
||||
* getComputedStyle() may return null when running in an iframe with
|
||||
* `display: none`.
|
||||
*
|
||||
* @see https://bugzilla.mozilla.org/show_bug.cgi?id=548397
|
||||
* @param {HTMLElement} el the htmlelement to work on
|
||||
* @param {string} the proprety to get the style for
|
||||
*/
|
||||
const safeGetComputedStyle = function(el, property) {
|
||||
if (!el) {
|
||||
return '';
|
||||
}
|
||||
|
||||
const result = window.getComputedStyle(el);
|
||||
|
||||
if (!result) {
|
||||
return '';
|
||||
}
|
||||
|
||||
return result[property];
|
||||
};
|
||||
|
||||
/**
|
||||
* Resuable stable sort function
|
||||
*
|
||||
* @param {Playlists} array
|
||||
* @param {Function} sortFn Different comparators
|
||||
* @function stableSort
|
||||
*/
|
||||
const stableSort = function(array, sortFn) {
|
||||
const newArray = array.slice();
|
||||
|
||||
array.sort(function(left, right) {
|
||||
const cmp = sortFn(left, right);
|
||||
|
||||
if (cmp === 0) {
|
||||
return newArray.indexOf(left) - newArray.indexOf(right);
|
||||
}
|
||||
return cmp;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* A comparator function to sort two playlist object by bandwidth.
|
||||
*
|
||||
* @param {Object} left a media playlist object
|
||||
* @param {Object} right a media playlist object
|
||||
* @return {number} Greater than zero if the bandwidth attribute of
|
||||
* left is greater than the corresponding attribute of right. Less
|
||||
* than zero if the bandwidth of right is greater than left and
|
||||
* exactly zero if the two are equal.
|
||||
*/
|
||||
export const comparePlaylistBandwidth = function(left, right) {
|
||||
let leftBandwidth;
|
||||
let rightBandwidth;
|
||||
|
||||
if (left.attributes.BANDWIDTH) {
|
||||
leftBandwidth = left.attributes.BANDWIDTH;
|
||||
}
|
||||
leftBandwidth = leftBandwidth || window.Number.MAX_VALUE;
|
||||
if (right.attributes.BANDWIDTH) {
|
||||
rightBandwidth = right.attributes.BANDWIDTH;
|
||||
}
|
||||
rightBandwidth = rightBandwidth || window.Number.MAX_VALUE;
|
||||
|
||||
return leftBandwidth - rightBandwidth;
|
||||
};
|
||||
|
||||
/**
|
||||
* A comparator function to sort two playlist object by resolution (width).
|
||||
*
|
||||
* @param {Object} left a media playlist object
|
||||
* @param {Object} right a media playlist object
|
||||
* @return {number} Greater than zero if the resolution.width attribute of
|
||||
* left is greater than the corresponding attribute of right. Less
|
||||
* than zero if the resolution.width of right is greater than left and
|
||||
* exactly zero if the two are equal.
|
||||
*/
|
||||
export const comparePlaylistResolution = function(left, right) {
|
||||
let leftWidth;
|
||||
let rightWidth;
|
||||
|
||||
if (left.attributes.RESOLUTION &&
|
||||
left.attributes.RESOLUTION.width) {
|
||||
leftWidth = left.attributes.RESOLUTION.width;
|
||||
}
|
||||
|
||||
leftWidth = leftWidth || window.Number.MAX_VALUE;
|
||||
|
||||
if (right.attributes.RESOLUTION &&
|
||||
right.attributes.RESOLUTION.width) {
|
||||
rightWidth = right.attributes.RESOLUTION.width;
|
||||
}
|
||||
|
||||
rightWidth = rightWidth || window.Number.MAX_VALUE;
|
||||
|
||||
// NOTE - Fallback to bandwidth sort as appropriate in cases where multiple renditions
|
||||
// have the same media dimensions/ resolution
|
||||
if (leftWidth === rightWidth &&
|
||||
left.attributes.BANDWIDTH &&
|
||||
right.attributes.BANDWIDTH) {
|
||||
return left.attributes.BANDWIDTH - right.attributes.BANDWIDTH;
|
||||
}
|
||||
return leftWidth - rightWidth;
|
||||
};
|
||||
|
||||
/**
|
||||
* Chooses the appropriate media playlist based on bandwidth and player size
|
||||
*
|
||||
* @param {Object} master
|
||||
* Object representation of the master manifest
|
||||
* @param {number} playerBandwidth
|
||||
* Current calculated bandwidth of the player
|
||||
* @param {number} playerWidth
|
||||
* Current width of the player element (should account for the device pixel ratio)
|
||||
* @param {number} playerHeight
|
||||
* Current height of the player element (should account for the device pixel ratio)
|
||||
* @param {boolean} limitRenditionByPlayerDimensions
|
||||
* True if the player width and height should be used during the selection, false otherwise
|
||||
* @return {Playlist} the highest bitrate playlist less than the
|
||||
* currently detected bandwidth, accounting for some amount of
|
||||
* bandwidth variance
|
||||
*/
|
||||
export const simpleSelector = function(
|
||||
master,
|
||||
playerBandwidth,
|
||||
playerWidth,
|
||||
playerHeight,
|
||||
limitRenditionByPlayerDimensions
|
||||
) {
|
||||
|
||||
const options = {
|
||||
bandwidth: playerBandwidth,
|
||||
width: playerWidth,
|
||||
height: playerHeight,
|
||||
limitRenditionByPlayerDimensions
|
||||
};
|
||||
// convert the playlists to an intermediary representation to make comparisons easier
|
||||
let sortedPlaylistReps = master.playlists.map((playlist) => {
|
||||
let bandwidth;
|
||||
const width = playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.width;
|
||||
const height = playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.height;
|
||||
|
||||
bandwidth = playlist.attributes.BANDWIDTH;
|
||||
|
||||
bandwidth = bandwidth || window.Number.MAX_VALUE;
|
||||
|
||||
return {
|
||||
bandwidth,
|
||||
width,
|
||||
height,
|
||||
playlist
|
||||
};
|
||||
});
|
||||
|
||||
stableSort(sortedPlaylistReps, (left, right) => left.bandwidth - right.bandwidth);
|
||||
|
||||
// filter out any playlists that have been excluded due to
|
||||
// incompatible configurations
|
||||
sortedPlaylistReps = sortedPlaylistReps.filter((rep) => !Playlist.isIncompatible(rep.playlist));
|
||||
|
||||
// filter out any playlists that have been disabled manually through the representations
|
||||
// api or blacklisted temporarily due to playback errors.
|
||||
let enabledPlaylistReps = sortedPlaylistReps.filter((rep) => Playlist.isEnabled(rep.playlist));
|
||||
|
||||
if (!enabledPlaylistReps.length) {
|
||||
// if there are no enabled playlists, then they have all been blacklisted or disabled
|
||||
// by the user through the representations api. In this case, ignore blacklisting and
|
||||
// fallback to what the user wants by using playlists the user has not disabled.
|
||||
enabledPlaylistReps = sortedPlaylistReps.filter((rep) => !Playlist.isDisabled(rep.playlist));
|
||||
}
|
||||
|
||||
// filter out any variant that has greater effective bitrate
|
||||
// than the current estimated bandwidth
|
||||
const bandwidthPlaylistReps = enabledPlaylistReps.filter((rep) => rep.bandwidth * Config.BANDWIDTH_VARIANCE < playerBandwidth);
|
||||
|
||||
let highestRemainingBandwidthRep =
|
||||
bandwidthPlaylistReps[bandwidthPlaylistReps.length - 1];
|
||||
|
||||
// get all of the renditions with the same (highest) bandwidth
|
||||
// and then taking the very first element
|
||||
const bandwidthBestRep = bandwidthPlaylistReps.filter((rep) => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0];
|
||||
|
||||
// if we're not going to limit renditions by player size, make an early decision.
|
||||
if (limitRenditionByPlayerDimensions === false) {
|
||||
const chosenRep = (
|
||||
bandwidthBestRep ||
|
||||
enabledPlaylistReps[0] ||
|
||||
sortedPlaylistReps[0]
|
||||
);
|
||||
|
||||
if (chosenRep && chosenRep.playlist) {
|
||||
let type = 'sortedPlaylistReps';
|
||||
|
||||
if (bandwidthBestRep) {
|
||||
type = 'bandwidthBestRep';
|
||||
}
|
||||
if (enabledPlaylistReps[0]) {
|
||||
type = 'enabledPlaylistReps';
|
||||
}
|
||||
logFn(`choosing ${representationToString(chosenRep)} using ${type} with options`, options);
|
||||
|
||||
return chosenRep.playlist;
|
||||
}
|
||||
|
||||
logFn('could not choose a playlist with options', options);
|
||||
return null;
|
||||
}
|
||||
|
||||
// filter out playlists without resolution information
|
||||
const haveResolution = bandwidthPlaylistReps.filter((rep) => rep.width && rep.height);
|
||||
|
||||
// sort variants by resolution
|
||||
stableSort(haveResolution, (left, right) => left.width - right.width);
|
||||
|
||||
// if we have the exact resolution as the player use it
|
||||
const resolutionBestRepList = haveResolution.filter((rep) => rep.width === playerWidth && rep.height === playerHeight);
|
||||
|
||||
highestRemainingBandwidthRep = resolutionBestRepList[resolutionBestRepList.length - 1];
|
||||
// ensure that we pick the highest bandwidth variant that have exact resolution
|
||||
const resolutionBestRep = resolutionBestRepList.filter((rep) => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0];
|
||||
|
||||
let resolutionPlusOneList;
|
||||
let resolutionPlusOneSmallest;
|
||||
let resolutionPlusOneRep;
|
||||
|
||||
// find the smallest variant that is larger than the player
|
||||
// if there is no match of exact resolution
|
||||
if (!resolutionBestRep) {
|
||||
resolutionPlusOneList = haveResolution.filter((rep) => rep.width > playerWidth || rep.height > playerHeight);
|
||||
|
||||
// find all the variants have the same smallest resolution
|
||||
resolutionPlusOneSmallest = resolutionPlusOneList.filter((rep) => rep.width === resolutionPlusOneList[0].width &&
|
||||
rep.height === resolutionPlusOneList[0].height);
|
||||
|
||||
// ensure that we also pick the highest bandwidth variant that
|
||||
// is just-larger-than the video player
|
||||
highestRemainingBandwidthRep =
|
||||
resolutionPlusOneSmallest[resolutionPlusOneSmallest.length - 1];
|
||||
resolutionPlusOneRep = resolutionPlusOneSmallest.filter((rep) => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0];
|
||||
}
|
||||
|
||||
// fallback chain of variants
|
||||
const chosenRep = (
|
||||
resolutionPlusOneRep ||
|
||||
resolutionBestRep ||
|
||||
bandwidthBestRep ||
|
||||
enabledPlaylistReps[0] ||
|
||||
sortedPlaylistReps[0]
|
||||
);
|
||||
|
||||
if (chosenRep && chosenRep.playlist) {
|
||||
let type = 'sortedPlaylistReps';
|
||||
|
||||
if (resolutionPlusOneRep) {
|
||||
type = 'resolutionPlusOneRep';
|
||||
} else if (resolutionBestRep) {
|
||||
type = 'resolutionBestRep';
|
||||
} else if (bandwidthBestRep) {
|
||||
type = 'bandwidthBestRep';
|
||||
} else if (enabledPlaylistReps[0]) {
|
||||
type = 'enabledPlaylistReps';
|
||||
}
|
||||
|
||||
logFn(`choosing ${representationToString(chosenRep)} using ${type} with options`, options);
|
||||
return chosenRep.playlist;
|
||||
}
|
||||
logFn('could not choose a playlist with options', options);
|
||||
return null;
|
||||
};
|
||||
|
||||
// Playlist Selectors
|
||||
|
||||
/**
|
||||
* Chooses the appropriate media playlist based on the most recent
|
||||
* bandwidth estimate and the player size.
|
||||
*
|
||||
* Expects to be called within the context of an instance of VhsHandler
|
||||
*
|
||||
* @return {Playlist} the highest bitrate playlist less than the
|
||||
* currently detected bandwidth, accounting for some amount of
|
||||
* bandwidth variance
|
||||
*/
|
||||
export const lastBandwidthSelector = function() {
|
||||
const pixelRatio = this.useDevicePixelRatio ? window.devicePixelRatio || 1 : 1;
|
||||
|
||||
return simpleSelector(
|
||||
this.playlists.master,
|
||||
this.systemBandwidth,
|
||||
parseInt(safeGetComputedStyle(this.tech_.el(), 'width'), 10) * pixelRatio,
|
||||
parseInt(safeGetComputedStyle(this.tech_.el(), 'height'), 10) * pixelRatio,
|
||||
this.limitRenditionByPlayerDimensions
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Chooses the appropriate media playlist based on an
|
||||
* exponential-weighted moving average of the bandwidth after
|
||||
* filtering for player size.
|
||||
*
|
||||
* Expects to be called within the context of an instance of VhsHandler
|
||||
*
|
||||
* @param {number} decay - a number between 0 and 1. Higher values of
|
||||
* this parameter will cause previous bandwidth estimates to lose
|
||||
* significance more quickly.
|
||||
* @return {Function} a function which can be invoked to create a new
|
||||
* playlist selector function.
|
||||
* @see https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
|
||||
*/
|
||||
export const movingAverageBandwidthSelector = function(decay) {
|
||||
let average = -1;
|
||||
|
||||
if (decay < 0 || decay > 1) {
|
||||
throw new Error('Moving average bandwidth decay must be between 0 and 1.');
|
||||
}
|
||||
|
||||
return function() {
|
||||
const pixelRatio = this.useDevicePixelRatio ? window.devicePixelRatio || 1 : 1;
|
||||
|
||||
if (average < 0) {
|
||||
average = this.systemBandwidth;
|
||||
}
|
||||
|
||||
average = decay * this.systemBandwidth + (1 - decay) * average;
|
||||
return simpleSelector(
|
||||
this.playlists.master,
|
||||
average,
|
||||
parseInt(safeGetComputedStyle(this.tech_.el(), 'width'), 10) * pixelRatio,
|
||||
parseInt(safeGetComputedStyle(this.tech_.el(), 'height'), 10) * pixelRatio,
|
||||
this.limitRenditionByPlayerDimensions
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Chooses the appropriate media playlist based on the potential to rebuffer
|
||||
*
|
||||
* @param {Object} settings
|
||||
* Object of information required to use this selector
|
||||
* @param {Object} settings.master
|
||||
* Object representation of the master manifest
|
||||
* @param {number} settings.currentTime
|
||||
* The current time of the player
|
||||
* @param {number} settings.bandwidth
|
||||
* Current measured bandwidth
|
||||
* @param {number} settings.duration
|
||||
* Duration of the media
|
||||
* @param {number} settings.segmentDuration
|
||||
* Segment duration to be used in round trip time calculations
|
||||
* @param {number} settings.timeUntilRebuffer
|
||||
* Time left in seconds until the player has to rebuffer
|
||||
* @param {number} settings.currentTimeline
|
||||
* The current timeline segments are being loaded from
|
||||
* @param {SyncController} settings.syncController
|
||||
* SyncController for determining if we have a sync point for a given playlist
|
||||
* @return {Object|null}
|
||||
* {Object} return.playlist
|
||||
* The highest bandwidth playlist with the least amount of rebuffering
|
||||
* {Number} return.rebufferingImpact
|
||||
* The amount of time in seconds switching to this playlist will rebuffer. A
|
||||
* negative value means that switching will cause zero rebuffering.
|
||||
*/
|
||||
export const minRebufferMaxBandwidthSelector = function(settings) {
|
||||
const {
|
||||
master,
|
||||
currentTime,
|
||||
bandwidth,
|
||||
duration,
|
||||
segmentDuration,
|
||||
timeUntilRebuffer,
|
||||
currentTimeline,
|
||||
syncController
|
||||
} = settings;
|
||||
|
||||
// filter out any playlists that have been excluded due to
|
||||
// incompatible configurations
|
||||
const compatiblePlaylists = master.playlists.filter(playlist => !Playlist.isIncompatible(playlist));
|
||||
|
||||
// filter out any playlists that have been disabled manually through the representations
|
||||
// api or blacklisted temporarily due to playback errors.
|
||||
let enabledPlaylists = compatiblePlaylists.filter(Playlist.isEnabled);
|
||||
|
||||
if (!enabledPlaylists.length) {
|
||||
// if there are no enabled playlists, then they have all been blacklisted or disabled
|
||||
// by the user through the representations api. In this case, ignore blacklisting and
|
||||
// fallback to what the user wants by using playlists the user has not disabled.
|
||||
enabledPlaylists = compatiblePlaylists.filter(playlist => !Playlist.isDisabled(playlist));
|
||||
}
|
||||
|
||||
const bandwidthPlaylists =
|
||||
enabledPlaylists.filter(Playlist.hasAttribute.bind(null, 'BANDWIDTH'));
|
||||
|
||||
const rebufferingEstimates = bandwidthPlaylists.map((playlist) => {
|
||||
const syncPoint = syncController.getSyncPoint(
|
||||
playlist,
|
||||
duration,
|
||||
currentTimeline,
|
||||
currentTime
|
||||
);
|
||||
// If there is no sync point for this playlist, switching to it will require a
|
||||
// sync request first. This will double the request time
|
||||
const numRequests = syncPoint ? 1 : 2;
|
||||
const requestTimeEstimate = Playlist.estimateSegmentRequestTime(
|
||||
segmentDuration,
|
||||
bandwidth,
|
||||
playlist
|
||||
);
|
||||
const rebufferingImpact = (requestTimeEstimate * numRequests) - timeUntilRebuffer;
|
||||
|
||||
return {
|
||||
playlist,
|
||||
rebufferingImpact
|
||||
};
|
||||
});
|
||||
|
||||
const noRebufferingPlaylists = rebufferingEstimates.filter((estimate) => estimate.rebufferingImpact <= 0);
|
||||
|
||||
// Sort by bandwidth DESC
|
||||
stableSort(
|
||||
noRebufferingPlaylists,
|
||||
(a, b) => comparePlaylistBandwidth(b.playlist, a.playlist)
|
||||
);
|
||||
|
||||
if (noRebufferingPlaylists.length) {
|
||||
return noRebufferingPlaylists[0];
|
||||
}
|
||||
|
||||
stableSort(rebufferingEstimates, (a, b) => a.rebufferingImpact - b.rebufferingImpact);
|
||||
|
||||
return rebufferingEstimates[0] || null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Chooses the appropriate media playlist, which in this case is the lowest bitrate
|
||||
* one with video. If no renditions with video exist, return the lowest audio rendition.
|
||||
*
|
||||
* Expects to be called within the context of an instance of VhsHandler
|
||||
*
|
||||
* @return {Object|null}
|
||||
* {Object} return.playlist
|
||||
* The lowest bitrate playlist that contains a video codec. If no such rendition
|
||||
* exists pick the lowest audio rendition.
|
||||
*/
|
||||
export const lowestBitrateCompatibleVariantSelector = function() {
|
||||
// filter out any playlists that have been excluded due to
|
||||
// incompatible configurations or playback errors
|
||||
const playlists = this.playlists.master.playlists.filter(Playlist.isEnabled);
|
||||
|
||||
// Sort ascending by bitrate
|
||||
stableSort(
|
||||
playlists,
|
||||
(a, b) => comparePlaylistBandwidth(a, b)
|
||||
);
|
||||
|
||||
// Parse and assume that playlists with no video codec have no video
|
||||
// (this is not necessarily true, although it is generally true).
|
||||
//
|
||||
// If an entire manifest has no valid videos everything will get filtered
|
||||
// out.
|
||||
const playlistsWithVideo = playlists.filter(playlist => !!codecsForPlaylist(this.playlists.master, playlist).video);
|
||||
|
||||
return playlistsWithVideo[0] || null;
|
||||
};
|
||||
559
build/javascript/node_modules/@videojs/http-streaming/src/playlist.js
generated
vendored
Normal file
559
build/javascript/node_modules/@videojs/http-streaming/src/playlist.js
generated
vendored
Normal file
@@ -0,0 +1,559 @@
|
||||
/**
|
||||
* @file playlist.js
|
||||
*
|
||||
* Playlist related utilities.
|
||||
*/
|
||||
import videojs from 'video.js';
|
||||
import window from 'global/window';
|
||||
import {TIME_FUDGE_FACTOR} from './ranges.js';
|
||||
|
||||
const {createTimeRange} = videojs;
|
||||
|
||||
/**
|
||||
* walk backward until we find a duration we can use
|
||||
* or return a failure
|
||||
*
|
||||
* @param {Playlist} playlist the playlist to walk through
|
||||
* @param {Number} endSequence the mediaSequence to stop walking on
|
||||
*/
|
||||
|
||||
const backwardDuration = function(playlist, endSequence) {
|
||||
let result = 0;
|
||||
let i = endSequence - playlist.mediaSequence;
|
||||
// if a start time is available for segment immediately following
|
||||
// the interval, use it
|
||||
let segment = playlist.segments[i];
|
||||
|
||||
// Walk backward until we find the latest segment with timeline
|
||||
// information that is earlier than endSequence
|
||||
if (segment) {
|
||||
if (typeof segment.start !== 'undefined') {
|
||||
return { result: segment.start, precise: true };
|
||||
}
|
||||
if (typeof segment.end !== 'undefined') {
|
||||
return {
|
||||
result: segment.end - segment.duration,
|
||||
precise: true
|
||||
};
|
||||
}
|
||||
}
|
||||
while (i--) {
|
||||
segment = playlist.segments[i];
|
||||
if (typeof segment.end !== 'undefined') {
|
||||
return { result: result + segment.end, precise: true };
|
||||
}
|
||||
|
||||
result += segment.duration;
|
||||
|
||||
if (typeof segment.start !== 'undefined') {
|
||||
return { result: result + segment.start, precise: true };
|
||||
}
|
||||
}
|
||||
return { result, precise: false };
|
||||
};
|
||||
|
||||
/**
|
||||
* walk forward until we find a duration we can use
|
||||
* or return a failure
|
||||
*
|
||||
* @param {Playlist} playlist the playlist to walk through
|
||||
* @param {number} endSequence the mediaSequence to stop walking on
|
||||
*/
|
||||
const forwardDuration = function(playlist, endSequence) {
|
||||
let result = 0;
|
||||
let segment;
|
||||
let i = endSequence - playlist.mediaSequence;
|
||||
// Walk forward until we find the earliest segment with timeline
|
||||
// information
|
||||
|
||||
for (; i < playlist.segments.length; i++) {
|
||||
segment = playlist.segments[i];
|
||||
if (typeof segment.start !== 'undefined') {
|
||||
return {
|
||||
result: segment.start - result,
|
||||
precise: true
|
||||
};
|
||||
}
|
||||
|
||||
result += segment.duration;
|
||||
|
||||
if (typeof segment.end !== 'undefined') {
|
||||
return {
|
||||
result: segment.end - result,
|
||||
precise: true
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
// indicate we didn't find a useful duration estimate
|
||||
return { result: -1, precise: false };
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculate the media duration from the segments associated with a
|
||||
* playlist. The duration of a subinterval of the available segments
|
||||
* may be calculated by specifying an end index.
|
||||
*
|
||||
* @param {Object} playlist a media playlist object
|
||||
* @param {number=} endSequence an exclusive upper boundary
|
||||
* for the playlist. Defaults to playlist length.
|
||||
* @param {number} expired the amount of time that has dropped
|
||||
* off the front of the playlist in a live scenario
|
||||
* @return {number} the duration between the first available segment
|
||||
* and end index.
|
||||
*/
|
||||
const intervalDuration = function(playlist, endSequence, expired) {
|
||||
if (typeof endSequence === 'undefined') {
|
||||
endSequence = playlist.mediaSequence + playlist.segments.length;
|
||||
}
|
||||
|
||||
if (endSequence < playlist.mediaSequence) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// do a backward walk to estimate the duration
|
||||
const backward = backwardDuration(playlist, endSequence);
|
||||
|
||||
if (backward.precise) {
|
||||
// if we were able to base our duration estimate on timing
|
||||
// information provided directly from the Media Source, return
|
||||
// it
|
||||
return backward.result;
|
||||
}
|
||||
|
||||
// walk forward to see if a precise duration estimate can be made
|
||||
// that way
|
||||
const forward = forwardDuration(playlist, endSequence);
|
||||
|
||||
if (forward.precise) {
|
||||
// we found a segment that has been buffered and so it's
|
||||
// position is known precisely
|
||||
return forward.result;
|
||||
}
|
||||
|
||||
// return the less-precise, playlist-based duration estimate
|
||||
return backward.result + expired;
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the duration of a playlist. If a start and end index
|
||||
* are specified, the duration will be for the subset of the media
|
||||
* timeline between those two indices. The total duration for live
|
||||
* playlists is always Infinity.
|
||||
*
|
||||
* @param {Object} playlist a media playlist object
|
||||
* @param {number=} endSequence an exclusive upper
|
||||
* boundary for the playlist. Defaults to the playlist media
|
||||
* sequence number plus its length.
|
||||
* @param {number=} expired the amount of time that has
|
||||
* dropped off the front of the playlist in a live scenario
|
||||
* @return {number} the duration between the start index and end
|
||||
* index.
|
||||
*/
|
||||
export const duration = function(playlist, endSequence, expired) {
|
||||
if (!playlist) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (typeof expired !== 'number') {
|
||||
expired = 0;
|
||||
}
|
||||
|
||||
// if a slice of the total duration is not requested, use
|
||||
// playlist-level duration indicators when they're present
|
||||
if (typeof endSequence === 'undefined') {
|
||||
// if present, use the duration specified in the playlist
|
||||
if (playlist.totalDuration) {
|
||||
return playlist.totalDuration;
|
||||
}
|
||||
|
||||
// duration should be Infinity for live playlists
|
||||
if (!playlist.endList) {
|
||||
return window.Infinity;
|
||||
}
|
||||
}
|
||||
|
||||
// calculate the total duration based on the segment durations
|
||||
return intervalDuration(
|
||||
playlist,
|
||||
endSequence,
|
||||
expired
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculate the time between two indexes in the current playlist
|
||||
* neight the start- nor the end-index need to be within the current
|
||||
* playlist in which case, the targetDuration of the playlist is used
|
||||
* to approximate the durations of the segments
|
||||
*
|
||||
* @param {Object} playlist a media playlist object
|
||||
* @param {number} startIndex
|
||||
* @param {number} endIndex
|
||||
* @return {number} the number of seconds between startIndex and endIndex
|
||||
*/
|
||||
export const sumDurations = function(playlist, startIndex, endIndex) {
|
||||
let durations = 0;
|
||||
|
||||
if (startIndex > endIndex) {
|
||||
[startIndex, endIndex] = [endIndex, startIndex];
|
||||
}
|
||||
|
||||
if (startIndex < 0) {
|
||||
for (let i = startIndex; i < Math.min(0, endIndex); i++) {
|
||||
durations += playlist.targetDuration;
|
||||
}
|
||||
startIndex = 0;
|
||||
}
|
||||
|
||||
for (let i = startIndex; i < endIndex; i++) {
|
||||
durations += playlist.segments[i].duration;
|
||||
}
|
||||
|
||||
return durations;
|
||||
};
|
||||
|
||||
/**
|
||||
* Determines the media index of the segment corresponding to the safe edge of the live
|
||||
* window which is the duration of the last segment plus 2 target durations from the end
|
||||
* of the playlist.
|
||||
*
|
||||
* A liveEdgePadding can be provided which will be used instead of calculating the safe live edge.
|
||||
* This corresponds to suggestedPresentationDelay in DASH manifests.
|
||||
*
|
||||
* @param {Object} playlist
|
||||
* a media playlist object
|
||||
* @param {number} [liveEdgePadding]
|
||||
* A number in seconds indicating how far from the end we want to be.
|
||||
* If provided, this value is used instead of calculating the safe live index from the target durations.
|
||||
* Corresponds to suggestedPresentationDelay in DASH manifests.
|
||||
* @return {number}
|
||||
* The media index of the segment at the safe live point. 0 if there is no "safe"
|
||||
* point.
|
||||
* @function safeLiveIndex
|
||||
*/
|
||||
export const safeLiveIndex = function(playlist, liveEdgePadding) {
|
||||
if (!playlist.segments.length) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let i = playlist.segments.length;
|
||||
const lastSegmentDuration = playlist.segments[i - 1].duration || playlist.targetDuration;
|
||||
const safeDistance = typeof liveEdgePadding === 'number' ?
|
||||
liveEdgePadding :
|
||||
lastSegmentDuration + playlist.targetDuration * 2;
|
||||
|
||||
if (safeDistance === 0) {
|
||||
return i;
|
||||
}
|
||||
|
||||
let distanceFromEnd = 0;
|
||||
|
||||
while (i--) {
|
||||
distanceFromEnd += playlist.segments[i].duration;
|
||||
|
||||
if (distanceFromEnd >= safeDistance) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return Math.max(0, i);
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the playlist end time
|
||||
*
|
||||
* @param {Object} playlist a media playlist object
|
||||
* @param {number=} expired the amount of time that has
|
||||
* dropped off the front of the playlist in a live scenario
|
||||
* @param {boolean|false} useSafeLiveEnd a boolean value indicating whether or not the
|
||||
* playlist end calculation should consider the safe live end
|
||||
* (truncate the playlist end by three segments). This is normally
|
||||
* used for calculating the end of the playlist's seekable range.
|
||||
* This takes into account the value of liveEdgePadding.
|
||||
* Setting liveEdgePadding to 0 is equivalent to setting this to false.
|
||||
* @param {number} liveEdgePadding a number indicating how far from the end of the playlist we should be in seconds.
|
||||
* If this is provided, it is used in the safe live end calculation.
|
||||
* Setting useSafeLiveEnd=false or liveEdgePadding=0 are equivalent.
|
||||
* Corresponds to suggestedPresentationDelay in DASH manifests.
|
||||
* @return {number} the end time of playlist
|
||||
* @function playlistEnd
|
||||
*/
|
||||
export const playlistEnd = function(playlist, expired, useSafeLiveEnd, liveEdgePadding) {
|
||||
if (!playlist || !playlist.segments) {
|
||||
return null;
|
||||
}
|
||||
if (playlist.endList) {
|
||||
return duration(playlist);
|
||||
}
|
||||
|
||||
if (expired === null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
expired = expired || 0;
|
||||
|
||||
const endSequence = useSafeLiveEnd ? safeLiveIndex(playlist, liveEdgePadding) : playlist.segments.length;
|
||||
|
||||
return intervalDuration(
|
||||
playlist,
|
||||
playlist.mediaSequence + endSequence,
|
||||
expired
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the interval of time that is currently seekable in a
|
||||
* playlist. The returned time ranges are relative to the earliest
|
||||
* moment in the specified playlist that is still available. A full
|
||||
* seekable implementation for live streams would need to offset
|
||||
* these values by the duration of content that has expired from the
|
||||
* stream.
|
||||
*
|
||||
* @param {Object} playlist a media playlist object
|
||||
* dropped off the front of the playlist in a live scenario
|
||||
* @param {number=} expired the amount of time that has
|
||||
* dropped off the front of the playlist in a live scenario
|
||||
* @param {number} liveEdgePadding how far from the end of the playlist we should be in seconds.
|
||||
* Corresponds to suggestedPresentationDelay in DASH manifests.
|
||||
* @return {TimeRanges} the periods of time that are valid targets
|
||||
* for seeking
|
||||
*/
|
||||
export const seekable = function(playlist, expired, liveEdgePadding) {
|
||||
const useSafeLiveEnd = true;
|
||||
const seekableStart = expired || 0;
|
||||
const seekableEnd = playlistEnd(playlist, expired, useSafeLiveEnd, liveEdgePadding);
|
||||
|
||||
if (seekableEnd === null) {
|
||||
return createTimeRange();
|
||||
}
|
||||
return createTimeRange(seekableStart, seekableEnd);
|
||||
};
|
||||
|
||||
/**
|
||||
* Determine the index and estimated starting time of the segment that
|
||||
* contains a specified playback position in a media playlist.
|
||||
*
|
||||
* @param {Object} playlist the media playlist to query
|
||||
* @param {number} currentTime The number of seconds since the earliest
|
||||
* possible position to determine the containing segment for
|
||||
* @param {number} startIndex
|
||||
* @param {number} startTime
|
||||
* @return {Object}
|
||||
*/
|
||||
export const getMediaInfoForTime = function(
|
||||
playlist,
|
||||
currentTime,
|
||||
startIndex,
|
||||
startTime
|
||||
) {
|
||||
let i;
|
||||
let segment;
|
||||
const numSegments = playlist.segments.length;
|
||||
|
||||
let time = currentTime - startTime;
|
||||
|
||||
if (time < 0) {
|
||||
// Walk backward from startIndex in the playlist, adding durations
|
||||
// until we find a segment that contains `time` and return it
|
||||
if (startIndex > 0) {
|
||||
for (i = startIndex - 1; i >= 0; i--) {
|
||||
segment = playlist.segments[i];
|
||||
time += (segment.duration + TIME_FUDGE_FACTOR);
|
||||
if (time > 0) {
|
||||
return {
|
||||
mediaIndex: i,
|
||||
startTime: startTime - sumDurations(playlist, startIndex, i)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
// We were unable to find a good segment within the playlist
|
||||
// so select the first segment
|
||||
return {
|
||||
mediaIndex: 0,
|
||||
startTime: currentTime
|
||||
};
|
||||
}
|
||||
|
||||
// When startIndex is negative, we first walk forward to first segment
|
||||
// adding target durations. If we "run out of time" before getting to
|
||||
// the first segment, return the first segment
|
||||
if (startIndex < 0) {
|
||||
for (i = startIndex; i < 0; i++) {
|
||||
time -= playlist.targetDuration;
|
||||
if (time < 0) {
|
||||
return {
|
||||
mediaIndex: 0,
|
||||
startTime: currentTime
|
||||
};
|
||||
}
|
||||
}
|
||||
startIndex = 0;
|
||||
}
|
||||
|
||||
// Walk forward from startIndex in the playlist, subtracting durations
|
||||
// until we find a segment that contains `time` and return it
|
||||
for (i = startIndex; i < numSegments; i++) {
|
||||
segment = playlist.segments[i];
|
||||
time -= segment.duration + TIME_FUDGE_FACTOR;
|
||||
if (time < 0) {
|
||||
return {
|
||||
mediaIndex: i,
|
||||
startTime: startTime + sumDurations(playlist, startIndex, i)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// We are out of possible candidates so load the last one...
|
||||
return {
|
||||
mediaIndex: numSegments - 1,
|
||||
startTime: currentTime
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Check whether the playlist is blacklisted or not.
|
||||
*
|
||||
* @param {Object} playlist the media playlist object
|
||||
* @return {boolean} whether the playlist is blacklisted or not
|
||||
* @function isBlacklisted
|
||||
*/
|
||||
export const isBlacklisted = function(playlist) {
|
||||
return playlist.excludeUntil && playlist.excludeUntil > Date.now();
|
||||
};
|
||||
|
||||
/**
|
||||
* Check whether the playlist is compatible with current playback configuration or has
|
||||
* been blacklisted permanently for being incompatible.
|
||||
*
|
||||
* @param {Object} playlist the media playlist object
|
||||
* @return {boolean} whether the playlist is incompatible or not
|
||||
* @function isIncompatible
|
||||
*/
|
||||
export const isIncompatible = function(playlist) {
|
||||
return playlist.excludeUntil && playlist.excludeUntil === Infinity;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check whether the playlist is enabled or not.
|
||||
*
|
||||
* @param {Object} playlist the media playlist object
|
||||
* @return {boolean} whether the playlist is enabled or not
|
||||
* @function isEnabled
|
||||
*/
|
||||
export const isEnabled = function(playlist) {
|
||||
const blacklisted = isBlacklisted(playlist);
|
||||
|
||||
return (!playlist.disabled && !blacklisted);
|
||||
};
|
||||
|
||||
/**
|
||||
* Check whether the playlist has been manually disabled through the representations api.
|
||||
*
|
||||
* @param {Object} playlist the media playlist object
|
||||
* @return {boolean} whether the playlist is disabled manually or not
|
||||
* @function isDisabled
|
||||
*/
|
||||
export const isDisabled = function(playlist) {
|
||||
return playlist.disabled;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns whether the current playlist is an AES encrypted HLS stream
|
||||
*
|
||||
* @return {boolean} true if it's an AES encrypted HLS stream
|
||||
*/
|
||||
export const isAes = function(media) {
|
||||
for (let i = 0; i < media.segments.length; i++) {
|
||||
if (media.segments[i].key) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the playlist has a value for the specified attribute
|
||||
*
|
||||
* @param {string} attr
|
||||
* Attribute to check for
|
||||
* @param {Object} playlist
|
||||
* The media playlist object
|
||||
* @return {boolean}
|
||||
* Whether the playlist contains a value for the attribute or not
|
||||
* @function hasAttribute
|
||||
*/
|
||||
export const hasAttribute = function(attr, playlist) {
|
||||
return playlist.attributes && playlist.attributes[attr];
|
||||
};
|
||||
|
||||
/**
|
||||
* Estimates the time required to complete a segment download from the specified playlist
|
||||
*
|
||||
* @param {number} segmentDuration
|
||||
* Duration of requested segment
|
||||
* @param {number} bandwidth
|
||||
* Current measured bandwidth of the player
|
||||
* @param {Object} playlist
|
||||
* The media playlist object
|
||||
* @param {number=} bytesReceived
|
||||
* Number of bytes already received for the request. Defaults to 0
|
||||
* @return {number|NaN}
|
||||
* The estimated time to request the segment. NaN if bandwidth information for
|
||||
* the given playlist is unavailable
|
||||
* @function estimateSegmentRequestTime
|
||||
*/
|
||||
export const estimateSegmentRequestTime = function(
|
||||
segmentDuration,
|
||||
bandwidth,
|
||||
playlist,
|
||||
bytesReceived = 0
|
||||
) {
|
||||
if (!hasAttribute('BANDWIDTH', playlist)) {
|
||||
return NaN;
|
||||
}
|
||||
|
||||
const size = segmentDuration * playlist.attributes.BANDWIDTH;
|
||||
|
||||
return (size - (bytesReceived * 8)) / bandwidth;
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns whether the current playlist is the lowest rendition
|
||||
*
|
||||
* @return {Boolean} true if on lowest rendition
|
||||
*/
|
||||
export const isLowestEnabledRendition = (master, media) => {
|
||||
if (master.playlists.length === 1) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const currentBandwidth = media.attributes.BANDWIDTH || Number.MAX_VALUE;
|
||||
|
||||
return (master.playlists.filter((playlist) => {
|
||||
if (!isEnabled(playlist)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (playlist.attributes.BANDWIDTH || 0) < currentBandwidth;
|
||||
|
||||
}).length === 0);
|
||||
};
|
||||
|
||||
// exports
|
||||
export default {
|
||||
duration,
|
||||
seekable,
|
||||
safeLiveIndex,
|
||||
getMediaInfoForTime,
|
||||
isEnabled,
|
||||
isDisabled,
|
||||
isBlacklisted,
|
||||
isIncompatible,
|
||||
playlistEnd,
|
||||
isAes,
|
||||
hasAttribute,
|
||||
estimateSegmentRequestTime,
|
||||
isLowestEnabledRendition
|
||||
};
|
||||
439
build/javascript/node_modules/@videojs/http-streaming/src/ranges.js
generated
vendored
Normal file
439
build/javascript/node_modules/@videojs/http-streaming/src/ranges.js
generated
vendored
Normal file
@@ -0,0 +1,439 @@
|
||||
/**
|
||||
* ranges
|
||||
*
|
||||
* Utilities for working with TimeRanges.
|
||||
*
|
||||
*/
|
||||
|
||||
import videojs from 'video.js';
|
||||
|
||||
// Fudge factor to account for TimeRanges rounding
|
||||
export const TIME_FUDGE_FACTOR = 1 / 30;
|
||||
// Comparisons between time values such as current time and the end of the buffered range
|
||||
// can be misleading because of precision differences or when the current media has poorly
|
||||
// aligned audio and video, which can cause values to be slightly off from what you would
|
||||
// expect. This value is what we consider to be safe to use in such comparisons to account
|
||||
// for these scenarios.
|
||||
export const SAFE_TIME_DELTA = TIME_FUDGE_FACTOR * 3;
|
||||
|
||||
/**
|
||||
* Clamps a value to within a range
|
||||
*
|
||||
* @param {number} num - the value to clamp
|
||||
* @param {number} start - the start of the range to clamp within, inclusive
|
||||
* @param {number} end - the end of the range to clamp within, inclusive
|
||||
* @return {number}
|
||||
*/
|
||||
const clamp = function(num, [start, end]) {
|
||||
return Math.min(Math.max(start, num), end);
|
||||
};
|
||||
const filterRanges = function(timeRanges, predicate) {
|
||||
const results = [];
|
||||
let i;
|
||||
|
||||
if (timeRanges && timeRanges.length) {
|
||||
// Search for ranges that match the predicate
|
||||
for (i = 0; i < timeRanges.length; i++) {
|
||||
if (predicate(timeRanges.start(i), timeRanges.end(i))) {
|
||||
results.push([timeRanges.start(i), timeRanges.end(i)]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return videojs.createTimeRanges(results);
|
||||
};
|
||||
|
||||
/**
|
||||
* Attempts to find the buffered TimeRange that contains the specified
|
||||
* time.
|
||||
*
|
||||
* @param {TimeRanges} buffered - the TimeRanges object to query
|
||||
* @param {number} time - the time to filter on.
|
||||
* @return {TimeRanges} a new TimeRanges object
|
||||
*/
|
||||
export const findRange = function(buffered, time) {
|
||||
return filterRanges(buffered, function(start, end) {
|
||||
return start - SAFE_TIME_DELTA <= time &&
|
||||
end + SAFE_TIME_DELTA >= time;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the TimeRanges that begin later than the specified time.
|
||||
*
|
||||
* @param {TimeRanges} timeRanges - the TimeRanges object to query
|
||||
* @param {number} time - the time to filter on.
|
||||
* @return {TimeRanges} a new TimeRanges object.
|
||||
*/
|
||||
export const findNextRange = function(timeRanges, time) {
|
||||
return filterRanges(timeRanges, function(start) {
|
||||
return start - TIME_FUDGE_FACTOR >= time;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns gaps within a list of TimeRanges
|
||||
*
|
||||
* @param {TimeRanges} buffered - the TimeRanges object
|
||||
* @return {TimeRanges} a TimeRanges object of gaps
|
||||
*/
|
||||
export const findGaps = function(buffered) {
|
||||
if (buffered.length < 2) {
|
||||
return videojs.createTimeRanges();
|
||||
}
|
||||
|
||||
const ranges = [];
|
||||
|
||||
for (let i = 1; i < buffered.length; i++) {
|
||||
const start = buffered.end(i - 1);
|
||||
const end = buffered.start(i);
|
||||
|
||||
ranges.push([start, end]);
|
||||
}
|
||||
|
||||
return videojs.createTimeRanges(ranges);
|
||||
};
|
||||
|
||||
/**
|
||||
* Search for a likely end time for the segment that was just appened
|
||||
* based on the state of the `buffered` property before and after the
|
||||
* append. If we fin only one such uncommon end-point return it.
|
||||
*
|
||||
* @param {TimeRanges} original - the buffered time ranges before the update
|
||||
* @param {TimeRanges} update - the buffered time ranges after the update
|
||||
* @return {number|null} the end time added between `original` and `update`,
|
||||
* or null if one cannot be unambiguously determined.
|
||||
*/
|
||||
export const findSoleUncommonTimeRangesEnd = function(original, update) {
|
||||
let i;
|
||||
let start;
|
||||
let end;
|
||||
const result = [];
|
||||
const edges = [];
|
||||
|
||||
// In order to qualify as a possible candidate, the end point must:
|
||||
// 1) Not have already existed in the `original` ranges
|
||||
// 2) Not result from the shrinking of a range that already existed
|
||||
// in the `original` ranges
|
||||
// 3) Not be contained inside of a range that existed in `original`
|
||||
const overlapsCurrentEnd = function(span) {
|
||||
return (span[0] <= end && span[1] >= end);
|
||||
};
|
||||
|
||||
if (original) {
|
||||
// Save all the edges in the `original` TimeRanges object
|
||||
for (i = 0; i < original.length; i++) {
|
||||
start = original.start(i);
|
||||
end = original.end(i);
|
||||
|
||||
edges.push([start, end]);
|
||||
}
|
||||
}
|
||||
|
||||
if (update) {
|
||||
// Save any end-points in `update` that are not in the `original`
|
||||
// TimeRanges object
|
||||
for (i = 0; i < update.length; i++) {
|
||||
start = update.start(i);
|
||||
end = update.end(i);
|
||||
|
||||
if (edges.some(overlapsCurrentEnd)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// at this point it must be a unique non-shrinking end edge
|
||||
result.push(end);
|
||||
}
|
||||
}
|
||||
|
||||
// we err on the side of caution and return null if didn't find
|
||||
// exactly *one* differing end edge in the search above
|
||||
if (result.length !== 1) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return result[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculate the intersection of two TimeRanges
|
||||
*
|
||||
* @param {TimeRanges} bufferA
|
||||
* @param {TimeRanges} bufferB
|
||||
* @return {TimeRanges} The interesection of `bufferA` with `bufferB`
|
||||
*/
|
||||
export const bufferIntersection = function(bufferA, bufferB) {
|
||||
let start = null;
|
||||
let end = null;
|
||||
let arity = 0;
|
||||
const extents = [];
|
||||
const ranges = [];
|
||||
|
||||
if (!bufferA || !bufferA.length || !bufferB || !bufferB.length) {
|
||||
return videojs.createTimeRange();
|
||||
}
|
||||
|
||||
// Handle the case where we have both buffers and create an
|
||||
// intersection of the two
|
||||
let count = bufferA.length;
|
||||
|
||||
// A) Gather up all start and end times
|
||||
while (count--) {
|
||||
extents.push({time: bufferA.start(count), type: 'start'});
|
||||
extents.push({time: bufferA.end(count), type: 'end'});
|
||||
}
|
||||
count = bufferB.length;
|
||||
while (count--) {
|
||||
extents.push({time: bufferB.start(count), type: 'start'});
|
||||
extents.push({time: bufferB.end(count), type: 'end'});
|
||||
}
|
||||
// B) Sort them by time
|
||||
extents.sort(function(a, b) {
|
||||
return a.time - b.time;
|
||||
});
|
||||
|
||||
// C) Go along one by one incrementing arity for start and decrementing
|
||||
// arity for ends
|
||||
for (count = 0; count < extents.length; count++) {
|
||||
if (extents[count].type === 'start') {
|
||||
arity++;
|
||||
|
||||
// D) If arity is ever incremented to 2 we are entering an
|
||||
// overlapping range
|
||||
if (arity === 2) {
|
||||
start = extents[count].time;
|
||||
}
|
||||
} else if (extents[count].type === 'end') {
|
||||
arity--;
|
||||
|
||||
// E) If arity is ever decremented to 1 we leaving an
|
||||
// overlapping range
|
||||
if (arity === 1) {
|
||||
end = extents[count].time;
|
||||
}
|
||||
}
|
||||
|
||||
// F) Record overlapping ranges
|
||||
if (start !== null && end !== null) {
|
||||
ranges.push([start, end]);
|
||||
start = null;
|
||||
end = null;
|
||||
}
|
||||
}
|
||||
|
||||
return videojs.createTimeRanges(ranges);
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the percentage of `segmentRange` that overlaps the
|
||||
* `buffered` time ranges.
|
||||
*
|
||||
* @param {TimeRanges} segmentRange - the time range that the segment
|
||||
* covers adjusted according to currentTime
|
||||
* @param {TimeRanges} referenceRange - the original time range that the
|
||||
* segment covers
|
||||
* @param {number} currentTime - time in seconds where the current playback
|
||||
* is at
|
||||
* @param {TimeRanges} buffered - the currently buffered time ranges
|
||||
* @return {number} percent of the segment currently buffered
|
||||
*/
|
||||
const calculateBufferedPercent = function(
|
||||
adjustedRange,
|
||||
referenceRange,
|
||||
currentTime,
|
||||
buffered
|
||||
) {
|
||||
const referenceDuration = referenceRange.end(0) - referenceRange.start(0);
|
||||
const adjustedDuration = adjustedRange.end(0) - adjustedRange.start(0);
|
||||
const bufferMissingFromAdjusted = referenceDuration - adjustedDuration;
|
||||
const adjustedIntersection = bufferIntersection(adjustedRange, buffered);
|
||||
const referenceIntersection = bufferIntersection(referenceRange, buffered);
|
||||
let adjustedOverlap = 0;
|
||||
let referenceOverlap = 0;
|
||||
|
||||
let count = adjustedIntersection.length;
|
||||
|
||||
while (count--) {
|
||||
adjustedOverlap += adjustedIntersection.end(count) -
|
||||
adjustedIntersection.start(count);
|
||||
|
||||
// If the current overlap segment starts at currentTime, then increase the
|
||||
// overlap duration so that it actually starts at the beginning of referenceRange
|
||||
// by including the difference between the two Range's durations
|
||||
// This is a work around for the way Flash has no buffer before currentTime
|
||||
// TODO: see if this is still necessary since Flash isn't included
|
||||
if (adjustedIntersection.start(count) === currentTime) {
|
||||
adjustedOverlap += bufferMissingFromAdjusted;
|
||||
}
|
||||
}
|
||||
|
||||
count = referenceIntersection.length;
|
||||
|
||||
while (count--) {
|
||||
referenceOverlap += referenceIntersection.end(count) -
|
||||
referenceIntersection.start(count);
|
||||
}
|
||||
|
||||
// Use whichever value is larger for the percentage-buffered since that value
|
||||
// is likely more accurate because the only way
|
||||
return Math.max(adjustedOverlap, referenceOverlap) / referenceDuration * 100;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the amount of a range specified by the startOfSegment and segmentDuration
|
||||
* overlaps the current buffered content.
|
||||
*
|
||||
* @param {number} startOfSegment - the time where the segment begins
|
||||
* @param {number} segmentDuration - the duration of the segment in seconds
|
||||
* @param {number} currentTime - time in seconds where the current playback
|
||||
* is at
|
||||
* @param {TimeRanges} buffered - the state of the buffer
|
||||
* @return {number} percentage of the segment's time range that is
|
||||
* already in `buffered`
|
||||
*/
|
||||
export const getSegmentBufferedPercent = function(
|
||||
startOfSegment,
|
||||
segmentDuration,
|
||||
currentTime,
|
||||
buffered
|
||||
) {
|
||||
const endOfSegment = startOfSegment + segmentDuration;
|
||||
|
||||
// The entire time range of the segment
|
||||
const originalSegmentRange = videojs.createTimeRanges([[
|
||||
startOfSegment,
|
||||
endOfSegment
|
||||
]]);
|
||||
|
||||
// The adjusted segment time range that is setup such that it starts
|
||||
// no earlier than currentTime
|
||||
// Flash has no notion of a back-buffer so adjustedSegmentRange adjusts
|
||||
// for that and the function will still return 100% if a only half of a
|
||||
// segment is actually in the buffer as long as the currentTime is also
|
||||
// half-way through the segment
|
||||
const adjustedSegmentRange = videojs.createTimeRanges([[
|
||||
clamp(startOfSegment, [currentTime, endOfSegment]),
|
||||
endOfSegment
|
||||
]]);
|
||||
|
||||
// This condition happens when the currentTime is beyond the segment's
|
||||
// end time
|
||||
if (adjustedSegmentRange.start(0) === adjustedSegmentRange.end(0)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const percent = calculateBufferedPercent(
|
||||
adjustedSegmentRange,
|
||||
originalSegmentRange,
|
||||
currentTime,
|
||||
buffered
|
||||
);
|
||||
|
||||
// If the segment is reported as having a zero duration, return 0%
|
||||
// since it is likely that we will need to fetch the segment
|
||||
if (isNaN(percent) || percent === Infinity || percent === -Infinity) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return percent;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets a human readable string for a TimeRange
|
||||
*
|
||||
* @param {TimeRange} range
|
||||
* @return {string} a human readable string
|
||||
*/
|
||||
export const printableRange = (range) => {
|
||||
const strArr = [];
|
||||
|
||||
if (!range || !range.length) {
|
||||
return '';
|
||||
}
|
||||
|
||||
for (let i = 0; i < range.length; i++) {
|
||||
strArr.push(range.start(i) + ' => ' + range.end(i));
|
||||
}
|
||||
|
||||
return strArr.join(', ');
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the amount of time left in seconds until the player hits the end of the
|
||||
* buffer and causes a rebuffer
|
||||
*
|
||||
* @param {TimeRange} buffered
|
||||
* The state of the buffer
|
||||
* @param {Numnber} currentTime
|
||||
* The current time of the player
|
||||
* @param {number} playbackRate
|
||||
* The current playback rate of the player. Defaults to 1.
|
||||
* @return {number}
|
||||
* Time until the player has to start rebuffering in seconds.
|
||||
* @function timeUntilRebuffer
|
||||
*/
|
||||
export const timeUntilRebuffer = function(buffered, currentTime, playbackRate = 1) {
|
||||
const bufferedEnd = buffered.length ? buffered.end(buffered.length - 1) : 0;
|
||||
|
||||
return (bufferedEnd - currentTime) / playbackRate;
|
||||
};
|
||||
|
||||
/**
|
||||
* Converts a TimeRanges object into an array representation
|
||||
*
|
||||
* @param {TimeRanges} timeRanges
|
||||
* @return {Array}
|
||||
*/
|
||||
export const timeRangesToArray = (timeRanges) => {
|
||||
const timeRangesList = [];
|
||||
|
||||
for (let i = 0; i < timeRanges.length; i++) {
|
||||
timeRangesList.push({
|
||||
start: timeRanges.start(i),
|
||||
end: timeRanges.end(i)
|
||||
});
|
||||
}
|
||||
|
||||
return timeRangesList;
|
||||
};
|
||||
|
||||
/**
|
||||
* Determines if two time range objects are different.
|
||||
*
|
||||
* @param {TimeRange} a
|
||||
* the first time range object to check
|
||||
*
|
||||
* @param {TimeRange} b
|
||||
* the second time range object to check
|
||||
*
|
||||
* @return {Boolean}
|
||||
* Whether the time range objects differ
|
||||
*/
|
||||
|
||||
export const isRangeDifferent = function(a, b) {
|
||||
// same object
|
||||
if (a === b) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// one or the other is undefined
|
||||
if (!a && b || (!b && a)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// length is different
|
||||
if (a.length !== b.length) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// see if any start/end pair is different
|
||||
for (let i = 0; i < a.length; i++) {
|
||||
if (a.start(i) !== b.start(i) || a.end(i) !== b.end(i)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// if the length and every pair is the same
|
||||
// this is the same time range
|
||||
return false;
|
||||
};
|
||||
127
build/javascript/node_modules/@videojs/http-streaming/src/reload-source-on-error.js
generated
vendored
Normal file
127
build/javascript/node_modules/@videojs/http-streaming/src/reload-source-on-error.js
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
import videojs from 'video.js';
|
||||
|
||||
const defaultOptions = {
|
||||
errorInterval: 30,
|
||||
getSource(next) {
|
||||
const tech = this.tech({ IWillNotUseThisInPlugins: true });
|
||||
const sourceObj = tech.currentSource_ || this.currentSource();
|
||||
|
||||
return next(sourceObj);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Main entry point for the plugin
|
||||
*
|
||||
* @param {Player} player a reference to a videojs Player instance
|
||||
* @param {Object} [options] an object with plugin options
|
||||
* @private
|
||||
*/
|
||||
const initPlugin = function(player, options) {
|
||||
let lastCalled = 0;
|
||||
let seekTo = 0;
|
||||
const localOptions = videojs.mergeOptions(defaultOptions, options);
|
||||
|
||||
player.ready(() => {
|
||||
player.trigger({type: 'usage', name: 'vhs-error-reload-initialized'});
|
||||
player.trigger({type: 'usage', name: 'hls-error-reload-initialized'});
|
||||
});
|
||||
|
||||
/**
|
||||
* Player modifications to perform that must wait until `loadedmetadata`
|
||||
* has been triggered
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
const loadedMetadataHandler = function() {
|
||||
if (seekTo) {
|
||||
player.currentTime(seekTo);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Set the source on the player element, play, and seek if necessary
|
||||
*
|
||||
* @param {Object} sourceObj An object specifying the source url and mime-type to play
|
||||
* @private
|
||||
*/
|
||||
const setSource = function(sourceObj) {
|
||||
if (sourceObj === null || sourceObj === undefined) {
|
||||
return;
|
||||
}
|
||||
seekTo = (player.duration() !== Infinity && player.currentTime()) || 0;
|
||||
|
||||
player.one('loadedmetadata', loadedMetadataHandler);
|
||||
|
||||
player.src(sourceObj);
|
||||
player.trigger({type: 'usage', name: 'vhs-error-reload'});
|
||||
player.trigger({type: 'usage', name: 'hls-error-reload'});
|
||||
player.play();
|
||||
};
|
||||
|
||||
/**
|
||||
* Attempt to get a source from either the built-in getSource function
|
||||
* or a custom function provided via the options
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
const errorHandler = function() {
|
||||
// Do not attempt to reload the source if a source-reload occurred before
|
||||
// 'errorInterval' time has elapsed since the last source-reload
|
||||
if (Date.now() - lastCalled < localOptions.errorInterval * 1000) {
|
||||
player.trigger({type: 'usage', name: 'vhs-error-reload-canceled'});
|
||||
player.trigger({type: 'usage', name: 'hls-error-reload-canceled'});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!localOptions.getSource ||
|
||||
typeof localOptions.getSource !== 'function') {
|
||||
videojs.log.error('ERROR: reloadSourceOnError - The option getSource must be a function!');
|
||||
return;
|
||||
}
|
||||
lastCalled = Date.now();
|
||||
|
||||
return localOptions.getSource.call(player, setSource);
|
||||
};
|
||||
|
||||
/**
|
||||
* Unbind any event handlers that were bound by the plugin
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
const cleanupEvents = function() {
|
||||
player.off('loadedmetadata', loadedMetadataHandler);
|
||||
player.off('error', errorHandler);
|
||||
player.off('dispose', cleanupEvents);
|
||||
};
|
||||
|
||||
/**
|
||||
* Cleanup before re-initializing the plugin
|
||||
*
|
||||
* @param {Object} [newOptions] an object with plugin options
|
||||
* @private
|
||||
*/
|
||||
const reinitPlugin = function(newOptions) {
|
||||
cleanupEvents();
|
||||
initPlugin(player, newOptions);
|
||||
};
|
||||
|
||||
player.on('error', errorHandler);
|
||||
player.on('dispose', cleanupEvents);
|
||||
|
||||
// Overwrite the plugin function so that we can correctly cleanup before
|
||||
// initializing the plugin
|
||||
player.reloadSourceOnError = reinitPlugin;
|
||||
};
|
||||
|
||||
/**
|
||||
* Reload the source when an error is detected as long as there
|
||||
* wasn't an error previously within the last 30 seconds
|
||||
*
|
||||
* @param {Object} [options] an object with plugin options
|
||||
*/
|
||||
const reloadSourceOnError = function(options) {
|
||||
initPlugin(this, options);
|
||||
};
|
||||
|
||||
export default reloadSourceOnError;
|
||||
111
build/javascript/node_modules/@videojs/http-streaming/src/rendition-mixin.js
generated
vendored
Normal file
111
build/javascript/node_modules/@videojs/http-streaming/src/rendition-mixin.js
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
import { isIncompatible, isEnabled } from './playlist.js';
|
||||
import { codecsForPlaylist } from './util/codecs.js';
|
||||
|
||||
/**
|
||||
* Returns a function that acts as the Enable/disable playlist function.
|
||||
*
|
||||
* @param {PlaylistLoader} loader - The master playlist loader
|
||||
* @param {string} playlistID - id of the playlist
|
||||
* @param {Function} changePlaylistFn - A function to be called after a
|
||||
* playlist's enabled-state has been changed. Will NOT be called if a
|
||||
* playlist's enabled-state is unchanged
|
||||
* @param {boolean=} enable - Value to set the playlist enabled-state to
|
||||
* or if undefined returns the current enabled-state for the playlist
|
||||
* @return {Function} Function for setting/getting enabled
|
||||
*/
|
||||
const enableFunction = (loader, playlistID, changePlaylistFn) => (enable) => {
|
||||
const playlist = loader.master.playlists[playlistID];
|
||||
const incompatible = isIncompatible(playlist);
|
||||
const currentlyEnabled = isEnabled(playlist);
|
||||
|
||||
if (typeof enable === 'undefined') {
|
||||
return currentlyEnabled;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
delete playlist.disabled;
|
||||
} else {
|
||||
playlist.disabled = true;
|
||||
}
|
||||
|
||||
if (enable !== currentlyEnabled && !incompatible) {
|
||||
// Ensure the outside world knows about our changes
|
||||
changePlaylistFn();
|
||||
if (enable) {
|
||||
loader.trigger('renditionenabled');
|
||||
} else {
|
||||
loader.trigger('renditiondisabled');
|
||||
}
|
||||
}
|
||||
return enable;
|
||||
};
|
||||
|
||||
/**
|
||||
* The representation object encapsulates the publicly visible information
|
||||
* in a media playlist along with a setter/getter-type function (enabled)
|
||||
* for changing the enabled-state of a particular playlist entry
|
||||
*
|
||||
* @class Representation
|
||||
*/
|
||||
class Representation {
|
||||
constructor(vhsHandler, playlist, id) {
|
||||
const {
|
||||
masterPlaylistController_: mpc,
|
||||
options_: { smoothQualityChange }
|
||||
} = vhsHandler;
|
||||
// Get a reference to a bound version of the quality change function
|
||||
const changeType = smoothQualityChange ? 'smooth' : 'fast';
|
||||
const qualityChangeFunction = mpc[`${changeType}QualityChange_`].bind(mpc);
|
||||
|
||||
// some playlist attributes are optional
|
||||
if (playlist.attributes.RESOLUTION) {
|
||||
const resolution = playlist.attributes.RESOLUTION;
|
||||
|
||||
this.width = resolution.width;
|
||||
this.height = resolution.height;
|
||||
}
|
||||
|
||||
this.bandwidth = playlist.attributes.BANDWIDTH;
|
||||
|
||||
this.codecs = codecsForPlaylist(mpc.master(), playlist);
|
||||
|
||||
this.playlist = playlist;
|
||||
|
||||
// The id is simply the ordinality of the media playlist
|
||||
// within the master playlist
|
||||
this.id = id;
|
||||
|
||||
// Partially-apply the enableFunction to create a playlist-
|
||||
// specific variant
|
||||
this.enabled = enableFunction(
|
||||
vhsHandler.playlists,
|
||||
playlist.id,
|
||||
qualityChangeFunction
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A mixin function that adds the `representations` api to an instance
|
||||
* of the VhsHandler class
|
||||
*
|
||||
* @param {VhsHandler} vhsHandler - An instance of VhsHandler to add the
|
||||
* representation API into
|
||||
*/
|
||||
const renditionSelectionMixin = function(vhsHandler) {
|
||||
const playlists = vhsHandler.playlists;
|
||||
|
||||
// Add a single API-specific function to the VhsHandler instance
|
||||
vhsHandler.representations = () => {
|
||||
if (!playlists || !playlists.master || !playlists.master.playlists) {
|
||||
return [];
|
||||
}
|
||||
return playlists
|
||||
.master
|
||||
.playlists
|
||||
.filter((media) => !isIncompatible(media))
|
||||
.map((e, i) => new Representation(vhsHandler, e, e.id));
|
||||
};
|
||||
};
|
||||
|
||||
export default renditionSelectionMixin;
|
||||
36
build/javascript/node_modules/@videojs/http-streaming/src/resolve-url.js
generated
vendored
Normal file
36
build/javascript/node_modules/@videojs/http-streaming/src/resolve-url.js
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
/**
|
||||
* @file resolve-url.js - Handling how URLs are resolved and manipulated
|
||||
*/
|
||||
|
||||
import _resolveUrl from '@videojs/vhs-utils/dist/resolve-url.js';
|
||||
|
||||
export const resolveUrl = _resolveUrl;
|
||||
|
||||
/**
|
||||
* Checks whether xhr request was redirected and returns correct url depending
|
||||
* on `handleManifestRedirects` option
|
||||
*
|
||||
* @api private
|
||||
*
|
||||
* @param {string} url - an url being requested
|
||||
* @param {XMLHttpRequest} req - xhr request result
|
||||
*
|
||||
* @return {string}
|
||||
*/
|
||||
export const resolveManifestRedirect = (handleManifestRedirect, url, req) => {
|
||||
// To understand how the responseURL below is set and generated:
|
||||
// - https://fetch.spec.whatwg.org/#concept-response-url
|
||||
// - https://fetch.spec.whatwg.org/#atomic-http-redirect-handling
|
||||
if (
|
||||
handleManifestRedirect &&
|
||||
req &&
|
||||
req.responseURL &&
|
||||
url !== req.responseURL
|
||||
) {
|
||||
return req.responseURL;
|
||||
}
|
||||
|
||||
return url;
|
||||
};
|
||||
|
||||
export default resolveUrl;
|
||||
2726
build/javascript/node_modules/@videojs/http-streaming/src/segment-loader.js
generated
vendored
Normal file
2726
build/javascript/node_modules/@videojs/http-streaming/src/segment-loader.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
240
build/javascript/node_modules/@videojs/http-streaming/src/segment-transmuxer.js
generated
vendored
Normal file
240
build/javascript/node_modules/@videojs/http-streaming/src/segment-transmuxer.js
generated
vendored
Normal file
@@ -0,0 +1,240 @@
|
||||
const transmuxQueue = [];
|
||||
let currentTransmux;
|
||||
|
||||
export const handleData_ = (event, transmuxedData, callback) => {
|
||||
const {
|
||||
type,
|
||||
initSegment,
|
||||
captions,
|
||||
captionStreams,
|
||||
metadata,
|
||||
videoFrameDtsTime,
|
||||
videoFramePtsTime
|
||||
} = event.data.segment;
|
||||
|
||||
transmuxedData.buffer.push({
|
||||
captions,
|
||||
captionStreams,
|
||||
metadata
|
||||
});
|
||||
|
||||
// right now, boxes will come back from partial transmuxer, data from full
|
||||
const boxes = event.data.segment.boxes || {
|
||||
data: event.data.segment.data
|
||||
};
|
||||
|
||||
const result = {
|
||||
type,
|
||||
// cast ArrayBuffer to TypedArray
|
||||
data: new Uint8Array(
|
||||
boxes.data,
|
||||
boxes.data.byteOffset,
|
||||
boxes.data.byteLength
|
||||
),
|
||||
initSegment: new Uint8Array(
|
||||
initSegment.data,
|
||||
initSegment.byteOffset,
|
||||
initSegment.byteLength
|
||||
)
|
||||
};
|
||||
|
||||
if (typeof videoFrameDtsTime !== 'undefined') {
|
||||
result.videoFrameDtsTime = videoFrameDtsTime;
|
||||
}
|
||||
|
||||
if (typeof videoFramePtsTime !== 'undefined') {
|
||||
result.videoFramePtsTime = videoFramePtsTime;
|
||||
}
|
||||
|
||||
callback(result);
|
||||
};
|
||||
|
||||
export const handleDone_ = ({
|
||||
transmuxedData,
|
||||
callback
|
||||
}) => {
|
||||
// Previously we only returned data on data events,
|
||||
// not on done events. Clear out the buffer to keep that consistent.
|
||||
transmuxedData.buffer = [];
|
||||
|
||||
// all buffers should have been flushed from the muxer, so start processing anything we
|
||||
// have received
|
||||
callback(transmuxedData);
|
||||
};
|
||||
|
||||
export const handleGopInfo_ = (event, transmuxedData) => {
|
||||
transmuxedData.gopInfo = event.data.gopInfo;
|
||||
};
|
||||
|
||||
export const processTransmux = ({
|
||||
transmuxer,
|
||||
bytes,
|
||||
audioAppendStart,
|
||||
gopsToAlignWith,
|
||||
isPartial,
|
||||
remux,
|
||||
onData,
|
||||
onTrackInfo,
|
||||
onAudioTimingInfo,
|
||||
onVideoTimingInfo,
|
||||
onVideoSegmentTimingInfo,
|
||||
onId3,
|
||||
onCaptions,
|
||||
onDone
|
||||
}) => {
|
||||
const transmuxedData = {
|
||||
isPartial,
|
||||
buffer: []
|
||||
};
|
||||
|
||||
const handleMessage = (event) => {
|
||||
if (!currentTransmux) {
|
||||
// disposed
|
||||
return;
|
||||
}
|
||||
|
||||
if (event.data.action === 'data') {
|
||||
handleData_(event, transmuxedData, onData);
|
||||
}
|
||||
if (event.data.action === 'trackinfo') {
|
||||
onTrackInfo(event.data.trackInfo);
|
||||
}
|
||||
if (event.data.action === 'gopInfo') {
|
||||
handleGopInfo_(event, transmuxedData);
|
||||
}
|
||||
if (event.data.action === 'audioTimingInfo') {
|
||||
onAudioTimingInfo(event.data.audioTimingInfo);
|
||||
}
|
||||
if (event.data.action === 'videoTimingInfo') {
|
||||
onVideoTimingInfo(event.data.videoTimingInfo);
|
||||
}
|
||||
if (event.data.action === 'videoSegmentTimingInfo') {
|
||||
onVideoSegmentTimingInfo(event.data.videoSegmentTimingInfo);
|
||||
}
|
||||
if (event.data.action === 'id3Frame') {
|
||||
onId3([event.data.id3Frame], event.data.id3Frame.dispatchType);
|
||||
}
|
||||
if (event.data.action === 'caption') {
|
||||
onCaptions(event.data.caption);
|
||||
}
|
||||
|
||||
// wait for the transmuxed event since we may have audio and video
|
||||
if (event.data.type !== 'transmuxed') {
|
||||
return;
|
||||
}
|
||||
|
||||
transmuxer.onmessage = null;
|
||||
handleDone_({
|
||||
transmuxedData,
|
||||
callback: onDone
|
||||
});
|
||||
|
||||
/* eslint-disable no-use-before-define */
|
||||
dequeue();
|
||||
/* eslint-enable */
|
||||
};
|
||||
|
||||
transmuxer.onmessage = handleMessage;
|
||||
|
||||
if (audioAppendStart) {
|
||||
transmuxer.postMessage({
|
||||
action: 'setAudioAppendStart',
|
||||
appendStart: audioAppendStart
|
||||
});
|
||||
}
|
||||
|
||||
// allow empty arrays to be passed to clear out GOPs
|
||||
if (Array.isArray(gopsToAlignWith)) {
|
||||
transmuxer.postMessage({
|
||||
action: 'alignGopsWith',
|
||||
gopsToAlignWith
|
||||
});
|
||||
}
|
||||
|
||||
if (typeof remux !== 'undefined') {
|
||||
transmuxer.postMessage({
|
||||
action: 'setRemux',
|
||||
remux
|
||||
});
|
||||
}
|
||||
|
||||
if (bytes.byteLength) {
|
||||
const buffer = bytes instanceof ArrayBuffer ? bytes : bytes.buffer;
|
||||
const byteOffset = bytes instanceof ArrayBuffer ? 0 : bytes.byteOffset;
|
||||
|
||||
transmuxer.postMessage(
|
||||
{
|
||||
action: 'push',
|
||||
// Send the typed-array of data as an ArrayBuffer so that
|
||||
// it can be sent as a "Transferable" and avoid the costly
|
||||
// memory copy
|
||||
data: buffer,
|
||||
// To recreate the original typed-array, we need information
|
||||
// about what portion of the ArrayBuffer it was a view into
|
||||
byteOffset,
|
||||
byteLength: bytes.byteLength
|
||||
},
|
||||
[ buffer ]
|
||||
);
|
||||
}
|
||||
|
||||
// even if we didn't push any bytes, we have to make sure we flush in case we reached
|
||||
// the end of the segment
|
||||
transmuxer.postMessage({ action: isPartial ? 'partialFlush' : 'flush' });
|
||||
};
|
||||
|
||||
export const dequeue = () => {
|
||||
currentTransmux = null;
|
||||
if (transmuxQueue.length) {
|
||||
currentTransmux = transmuxQueue.shift();
|
||||
if (typeof currentTransmux === 'function') {
|
||||
currentTransmux();
|
||||
} else {
|
||||
processTransmux(currentTransmux);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const processAction = (transmuxer, action) => {
|
||||
transmuxer.postMessage({ action });
|
||||
dequeue();
|
||||
};
|
||||
|
||||
export const enqueueAction = (action, transmuxer) => {
|
||||
if (!currentTransmux) {
|
||||
currentTransmux = action;
|
||||
processAction(transmuxer, action);
|
||||
return;
|
||||
}
|
||||
transmuxQueue.push(processAction.bind(null, transmuxer, action));
|
||||
};
|
||||
|
||||
export const reset = (transmuxer) => {
|
||||
enqueueAction('reset', transmuxer);
|
||||
};
|
||||
|
||||
export const endTimeline = (transmuxer) => {
|
||||
enqueueAction('endTimeline', transmuxer);
|
||||
};
|
||||
|
||||
export const transmux = (options) => {
|
||||
if (!currentTransmux) {
|
||||
currentTransmux = options;
|
||||
processTransmux(options);
|
||||
return;
|
||||
}
|
||||
transmuxQueue.push(options);
|
||||
};
|
||||
|
||||
export const dispose = () => {
|
||||
// clear out module-level references
|
||||
currentTransmux = null;
|
||||
transmuxQueue.length = 0;
|
||||
};
|
||||
|
||||
export default {
|
||||
reset,
|
||||
dispose,
|
||||
endTimeline,
|
||||
transmux
|
||||
};
|
||||
806
build/javascript/node_modules/@videojs/http-streaming/src/source-updater.js
generated
vendored
Normal file
806
build/javascript/node_modules/@videojs/http-streaming/src/source-updater.js
generated
vendored
Normal file
@@ -0,0 +1,806 @@
|
||||
/**
|
||||
* @file source-updater.js
|
||||
*/
|
||||
import videojs from 'video.js';
|
||||
import logger from './util/logger';
|
||||
import noop from './util/noop';
|
||||
import { bufferIntersection } from './ranges.js';
|
||||
import {getMimeForCodec} from '@videojs/vhs-utils/dist/codecs.js';
|
||||
import window from 'global/window';
|
||||
import toTitleCase from './util/to-title-case.js';
|
||||
|
||||
const bufferTypes = [
|
||||
'video',
|
||||
'audio'
|
||||
];
|
||||
|
||||
const updating = (type, sourceUpdater) => {
|
||||
const sourceBuffer = sourceUpdater[`${type}Buffer`];
|
||||
|
||||
return (sourceBuffer && sourceBuffer.updating) || sourceUpdater.queuePending[type];
|
||||
};
|
||||
|
||||
const nextQueueIndexOfType = (type, queue) => {
|
||||
for (let i = 0; i < queue.length; i++) {
|
||||
const queueEntry = queue[i];
|
||||
|
||||
if (queueEntry.type === 'mediaSource') {
|
||||
// If the next entry is a media source entry (uses multiple source buffers), block
|
||||
// processing to allow it to go through first.
|
||||
return null;
|
||||
}
|
||||
|
||||
if (queueEntry.type === type) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
const shiftQueue = (type, sourceUpdater) => {
|
||||
if (sourceUpdater.queue.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
let queueIndex = 0;
|
||||
let queueEntry = sourceUpdater.queue[queueIndex];
|
||||
|
||||
if (queueEntry.type === 'mediaSource') {
|
||||
if (!sourceUpdater.updating() && sourceUpdater.mediaSource.readyState !== 'closed') {
|
||||
sourceUpdater.queue.shift();
|
||||
queueEntry.action(sourceUpdater);
|
||||
|
||||
if (queueEntry.doneFn) {
|
||||
queueEntry.doneFn();
|
||||
}
|
||||
|
||||
// Only specific source buffer actions must wait for async updateend events. Media
|
||||
// Source actions process synchronously. Therefore, both audio and video source
|
||||
// buffers are now clear to process the next queue entries.
|
||||
shiftQueue('audio', sourceUpdater);
|
||||
shiftQueue('video', sourceUpdater);
|
||||
}
|
||||
|
||||
// Media Source actions require both source buffers, so if the media source action
|
||||
// couldn't process yet (because one or both source buffers are busy), block other
|
||||
// queue actions until both are available and the media source action can process.
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === 'mediaSource') {
|
||||
// If the queue was shifted by a media source action (this happens when pushing a
|
||||
// media source action onto the queue), then it wasn't from an updateend event from an
|
||||
// audio or video source buffer, so there's no change from previous state, and no
|
||||
// processing should be done.
|
||||
return;
|
||||
}
|
||||
|
||||
// Media source queue entries don't need to consider whether the source updater is
|
||||
// started (i.e., source buffers are created) as they don't need the source buffers, but
|
||||
// source buffer queue entries do.
|
||||
if (!sourceUpdater.started_ || sourceUpdater.mediaSource.readyState === 'closed' || updating(type, sourceUpdater)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (queueEntry.type !== type) {
|
||||
queueIndex = nextQueueIndexOfType(type, sourceUpdater.queue);
|
||||
|
||||
if (queueIndex === null) {
|
||||
// Either there's no queue entry that uses this source buffer type in the queue, or
|
||||
// there's a media source queue entry before the next entry of this type, in which
|
||||
// case wait for that action to process first.
|
||||
return;
|
||||
}
|
||||
|
||||
queueEntry = sourceUpdater.queue[queueIndex];
|
||||
}
|
||||
|
||||
sourceUpdater.queue.splice(queueIndex, 1);
|
||||
queueEntry.action(type, sourceUpdater);
|
||||
|
||||
if (!queueEntry.doneFn) {
|
||||
// synchronous operation, process next entry
|
||||
shiftQueue(type, sourceUpdater);
|
||||
return;
|
||||
}
|
||||
|
||||
// asynchronous operation, so keep a record that this source buffer type is in use
|
||||
sourceUpdater.queuePending[type] = queueEntry;
|
||||
};
|
||||
|
||||
const cleanupBuffer = (type, sourceUpdater) => {
|
||||
const buffer = sourceUpdater[`${type}Buffer`];
|
||||
const titleType = toTitleCase(type);
|
||||
|
||||
if (!buffer) {
|
||||
return;
|
||||
}
|
||||
|
||||
buffer.removeEventListener('updateend', sourceUpdater[`on${titleType}UpdateEnd_`]);
|
||||
buffer.removeEventListener('error', sourceUpdater[`on${titleType}Error_`]);
|
||||
|
||||
sourceUpdater.codecs[type] = null;
|
||||
sourceUpdater[`${type}Buffer`] = null;
|
||||
};
|
||||
|
||||
const inSourceBuffers = (mediaSource, sourceBuffer) => mediaSource && sourceBuffer &&
|
||||
Array.prototype.indexOf.call(mediaSource.sourceBuffers, sourceBuffer) !== -1;
|
||||
|
||||
const actions = {
|
||||
appendBuffer: (bytes, segmentInfo) => (type, sourceUpdater) => {
|
||||
const sourceBuffer = sourceUpdater[`${type}Buffer`];
|
||||
|
||||
// can't do anything if the media source / source buffer is null
|
||||
// or the media source does not contain this source buffer.
|
||||
if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
sourceUpdater.logger_(`Appending segment ${segmentInfo.mediaIndex}'s ${bytes.length} bytes to ${type}Buffer`);
|
||||
|
||||
sourceBuffer.appendBuffer(bytes);
|
||||
},
|
||||
remove: (start, end) => (type, sourceUpdater) => {
|
||||
const sourceBuffer = sourceUpdater[`${type}Buffer`];
|
||||
|
||||
// can't do anything if the media source / source buffer is null
|
||||
// or the media source does not contain this source buffer.
|
||||
if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
sourceUpdater.logger_(`Removing ${start} to ${end} from ${type}Buffer`);
|
||||
sourceBuffer.remove(start, end);
|
||||
},
|
||||
timestampOffset: (offset) => (type, sourceUpdater) => {
|
||||
const sourceBuffer = sourceUpdater[`${type}Buffer`];
|
||||
|
||||
// can't do anything if the media source / source buffer is null
|
||||
// or the media source does not contain this source buffer.
|
||||
if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
sourceUpdater.logger_(`Setting ${type}timestampOffset to ${offset}`);
|
||||
|
||||
sourceBuffer.timestampOffset = offset;
|
||||
},
|
||||
callback: (callback) => (type, sourceUpdater) => {
|
||||
callback();
|
||||
},
|
||||
endOfStream: (error) => (sourceUpdater) => {
|
||||
if (sourceUpdater.mediaSource.readyState !== 'open') {
|
||||
return;
|
||||
}
|
||||
sourceUpdater.logger_(`Calling mediaSource endOfStream(${error || ''})`);
|
||||
|
||||
try {
|
||||
sourceUpdater.mediaSource.endOfStream(error);
|
||||
} catch (e) {
|
||||
videojs.log.warn('Failed to call media source endOfStream', e);
|
||||
}
|
||||
},
|
||||
duration: (duration) => (sourceUpdater) => {
|
||||
sourceUpdater.logger_(`Setting mediaSource duration to ${duration}`);
|
||||
try {
|
||||
sourceUpdater.mediaSource.duration = duration;
|
||||
} catch (e) {
|
||||
videojs.log.warn('Failed to set media source duration', e);
|
||||
}
|
||||
},
|
||||
abort: () => (type, sourceUpdater) => {
|
||||
if (sourceUpdater.mediaSource.readyState !== 'open') {
|
||||
return;
|
||||
}
|
||||
const sourceBuffer = sourceUpdater[`${type}Buffer`];
|
||||
|
||||
// can't do anything if the media source / source buffer is null
|
||||
// or the media source does not contain this source buffer.
|
||||
if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
sourceUpdater.logger_(`calling abort on ${type}Buffer`);
|
||||
try {
|
||||
sourceBuffer.abort();
|
||||
} catch (e) {
|
||||
videojs.log.warn(`Failed to abort on ${type}Buffer`, e);
|
||||
}
|
||||
},
|
||||
addSourceBuffer: (type, codec) => (sourceUpdater) => {
|
||||
const titleType = toTitleCase(type);
|
||||
const mime = getMimeForCodec(codec);
|
||||
|
||||
sourceUpdater.logger_(`Adding ${type}Buffer with codec ${codec} to mediaSource`);
|
||||
|
||||
const sourceBuffer = sourceUpdater.mediaSource.addSourceBuffer(mime);
|
||||
|
||||
sourceBuffer.addEventListener('updateend', sourceUpdater[`on${titleType}UpdateEnd_`]);
|
||||
sourceBuffer.addEventListener('error', sourceUpdater[`on${titleType}Error_`]);
|
||||
sourceUpdater.codecs[type] = codec;
|
||||
sourceUpdater[`${type}Buffer`] = sourceBuffer;
|
||||
},
|
||||
removeSourceBuffer: (type) => (sourceUpdater) => {
|
||||
const sourceBuffer = sourceUpdater[`${type}Buffer`];
|
||||
|
||||
cleanupBuffer(type, sourceUpdater);
|
||||
|
||||
// can't do anything if the media source / source buffer is null
|
||||
// or the media source does not contain this source buffer.
|
||||
if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
sourceUpdater.logger_(`Removing ${type}Buffer with codec ${sourceUpdater.codecs[type]} from mediaSource`);
|
||||
|
||||
try {
|
||||
sourceUpdater.mediaSource.removeSourceBuffer(sourceBuffer);
|
||||
} catch (e) {
|
||||
videojs.log.warn(`Failed to removeSourceBuffer ${type}Buffer`, e);
|
||||
}
|
||||
},
|
||||
changeType: (codec) => (type, sourceUpdater) => {
|
||||
const sourceBuffer = sourceUpdater[`${type}Buffer`];
|
||||
const mime = getMimeForCodec(codec);
|
||||
|
||||
// can't do anything if the media source / source buffer is null
|
||||
// or the media source does not contain this source buffer.
|
||||
if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// do not update codec if we don't need to.
|
||||
if (sourceUpdater.codecs[type] === codec) {
|
||||
return;
|
||||
}
|
||||
|
||||
sourceUpdater.logger_(`changing ${type}Buffer codec from ${sourceUpdater.codecs[type]} to ${codec}`);
|
||||
|
||||
sourceBuffer.changeType(mime);
|
||||
sourceUpdater.codecs[type] = codec;
|
||||
}
|
||||
};
|
||||
|
||||
const pushQueue = ({type, sourceUpdater, action, doneFn, name}) => {
|
||||
sourceUpdater.queue.push({
|
||||
type,
|
||||
action,
|
||||
doneFn,
|
||||
name
|
||||
});
|
||||
shiftQueue(type, sourceUpdater);
|
||||
};
|
||||
|
||||
const onUpdateend = (type, sourceUpdater) => (e) => {
|
||||
// Although there should, in theory, be a pending action for any updateend receieved,
|
||||
// there are some actions that may trigger updateend events without set definitions in
|
||||
// the w3c spec. For instance, setting the duration on the media source may trigger
|
||||
// updateend events on source buffers. This does not appear to be in the spec. As such,
|
||||
// if we encounter an updateend without a corresponding pending action from our queue
|
||||
// for that source buffer type, process the next action.
|
||||
if (sourceUpdater.queuePending[type]) {
|
||||
const doneFn = sourceUpdater.queuePending[type].doneFn;
|
||||
|
||||
sourceUpdater.queuePending[type] = null;
|
||||
|
||||
if (doneFn) {
|
||||
// if there's an error, report it
|
||||
doneFn(sourceUpdater[`${type}Error_`]);
|
||||
}
|
||||
}
|
||||
|
||||
shiftQueue(type, sourceUpdater);
|
||||
};
|
||||
|
||||
/**
|
||||
* A queue of callbacks to be serialized and applied when a
|
||||
* MediaSource and its associated SourceBuffers are not in the
|
||||
* updating state. It is used by the segment loader to update the
|
||||
* underlying SourceBuffers when new data is loaded, for instance.
|
||||
*
|
||||
* @class SourceUpdater
|
||||
* @param {MediaSource} mediaSource the MediaSource to create the SourceBuffer from
|
||||
* @param {string} mimeType the desired MIME type of the underlying SourceBuffer
|
||||
*/
|
||||
export default class SourceUpdater extends videojs.EventTarget {
|
||||
constructor(mediaSource) {
|
||||
super();
|
||||
this.mediaSource = mediaSource;
|
||||
this.sourceopenListener_ = () => shiftQueue('mediaSource', this);
|
||||
this.mediaSource.addEventListener('sourceopen', this.sourceopenListener_);
|
||||
this.logger_ = logger('SourceUpdater');
|
||||
// initial timestamp offset is 0
|
||||
this.audioTimestampOffset_ = 0;
|
||||
this.videoTimestampOffset_ = 0;
|
||||
this.queue = [];
|
||||
this.queuePending = {
|
||||
audio: null,
|
||||
video: null
|
||||
};
|
||||
this.delayedAudioAppendQueue_ = [];
|
||||
this.videoAppendQueued_ = false;
|
||||
this.codecs = {};
|
||||
this.onVideoUpdateEnd_ = onUpdateend('video', this);
|
||||
this.onAudioUpdateEnd_ = onUpdateend('audio', this);
|
||||
this.onVideoError_ = (e) => {
|
||||
// used for debugging
|
||||
this.videoError_ = e;
|
||||
};
|
||||
this.onAudioError_ = (e) => {
|
||||
// used for debugging
|
||||
this.audioError_ = e;
|
||||
};
|
||||
this.started_ = false;
|
||||
}
|
||||
|
||||
ready() {
|
||||
return this.started_;
|
||||
}
|
||||
|
||||
createSourceBuffers(codecs) {
|
||||
if (this.ready()) {
|
||||
// already created them before
|
||||
return;
|
||||
}
|
||||
|
||||
// the intial addOrChangeSourceBuffers will always be
|
||||
// two add buffers.
|
||||
this.addOrChangeSourceBuffers(codecs);
|
||||
this.started_ = true;
|
||||
this.trigger('ready');
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a type of source buffer to the media source.
|
||||
*
|
||||
* @param {string} type
|
||||
* The type of source buffer to add.
|
||||
*
|
||||
* @param {string} codec
|
||||
* The codec to add the source buffer with.
|
||||
*/
|
||||
addSourceBuffer(type, codec) {
|
||||
pushQueue({
|
||||
type: 'mediaSource',
|
||||
sourceUpdater: this,
|
||||
action: actions.addSourceBuffer(type, codec),
|
||||
name: 'addSourceBuffer'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* call abort on a source buffer.
|
||||
*
|
||||
* @param {string} type
|
||||
* The type of source buffer to call abort on.
|
||||
*/
|
||||
abort(type) {
|
||||
pushQueue({
|
||||
type,
|
||||
sourceUpdater: this,
|
||||
action: actions.abort(type),
|
||||
name: 'abort'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Call removeSourceBuffer and remove a specific type
|
||||
* of source buffer on the mediaSource.
|
||||
*
|
||||
* @param {string} type
|
||||
* The type of source buffer to remove.
|
||||
*/
|
||||
removeSourceBuffer(type) {
|
||||
if (!this.canRemoveSourceBuffer()) {
|
||||
videojs.log.error('removeSourceBuffer is not supported!');
|
||||
return;
|
||||
}
|
||||
|
||||
pushQueue({
|
||||
type: 'mediaSource',
|
||||
sourceUpdater: this,
|
||||
action: actions.removeSourceBuffer(type),
|
||||
name: 'removeSourceBuffer'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether or not the removeSourceBuffer function is supported
|
||||
* on the mediaSource.
|
||||
*
|
||||
* @return {boolean}
|
||||
* if removeSourceBuffer can be called.
|
||||
*/
|
||||
canRemoveSourceBuffer() {
|
||||
// IE reports that it supports removeSourceBuffer, but often throws
|
||||
// errors when attempting to use the function. So we report that it
|
||||
// does not support removeSourceBuffer.
|
||||
return !videojs.browser.IE_VERSION && window.MediaSource &&
|
||||
window.MediaSource.prototype &&
|
||||
typeof window.MediaSource.prototype.removeSourceBuffer === 'function';
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether or not the changeType function is supported
|
||||
* on our SourceBuffers.
|
||||
*
|
||||
* @return {boolean}
|
||||
* if changeType can be called.
|
||||
*/
|
||||
static canChangeType() {
|
||||
return window.SourceBuffer &&
|
||||
window.SourceBuffer.prototype &&
|
||||
typeof window.SourceBuffer.prototype.changeType === 'function';
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether or not the changeType function is supported
|
||||
* on our SourceBuffers.
|
||||
*
|
||||
* @return {boolean}
|
||||
* if changeType can be called.
|
||||
*/
|
||||
canChangeType() {
|
||||
return this.constructor.canChangeType();
|
||||
}
|
||||
|
||||
/**
|
||||
* Call the changeType function on a source buffer, given the code and type.
|
||||
*
|
||||
* @param {string} type
|
||||
* The type of source buffer to call changeType on.
|
||||
*
|
||||
* @param {string} codec
|
||||
* The codec string to change type with on the source buffer.
|
||||
*/
|
||||
changeType(type, codec) {
|
||||
if (!this.canChangeType()) {
|
||||
videojs.log.error('changeType is not supported!');
|
||||
return;
|
||||
}
|
||||
|
||||
pushQueue({
|
||||
type,
|
||||
sourceUpdater: this,
|
||||
action: actions.changeType(codec),
|
||||
name: 'changeType'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Add source buffers with a codec or, if they are already created,
|
||||
* call changeType on source buffers using changeType.
|
||||
*
|
||||
* @param {Object} codecs
|
||||
* Codecs to switch to
|
||||
*/
|
||||
addOrChangeSourceBuffers(codecs) {
|
||||
if (!codecs || typeof codecs !== 'object' || Object.keys(codecs).length === 0) {
|
||||
throw new Error('Cannot addOrChangeSourceBuffers to undefined codecs');
|
||||
}
|
||||
|
||||
Object.keys(codecs).forEach((type) => {
|
||||
const codec = codecs[type];
|
||||
|
||||
if (!this.ready()) {
|
||||
return this.addSourceBuffer(type, codec);
|
||||
}
|
||||
|
||||
if (this.canChangeType()) {
|
||||
this.changeType(type, codec);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Queue an update to append an ArrayBuffer.
|
||||
*
|
||||
* @param {MediaObject} object containing audioBytes and/or videoBytes
|
||||
* @param {Function} done the function to call when done
|
||||
* @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-appendBuffer-void-ArrayBuffer-data
|
||||
*/
|
||||
appendBuffer(options, doneFn) {
|
||||
const {segmentInfo, type, bytes} = options;
|
||||
|
||||
this.processedAppend_ = true;
|
||||
if (type === 'audio' && this.videoBuffer && !this.videoAppendQueued_) {
|
||||
this.delayedAudioAppendQueue_.push([options, doneFn]);
|
||||
this.logger_(`delayed audio append of ${bytes.length} until video append`);
|
||||
return;
|
||||
}
|
||||
|
||||
pushQueue({
|
||||
type,
|
||||
sourceUpdater: this,
|
||||
action: actions.appendBuffer(bytes, segmentInfo || {mediaIndex: -1}),
|
||||
doneFn,
|
||||
name: 'appendBuffer'
|
||||
});
|
||||
|
||||
if (type === 'video') {
|
||||
this.videoAppendQueued_ = true;
|
||||
if (!this.delayedAudioAppendQueue_.length) {
|
||||
return;
|
||||
}
|
||||
const queue = this.delayedAudioAppendQueue_.slice();
|
||||
|
||||
this.logger_(`queuing delayed audio ${queue.length} appendBuffers`);
|
||||
|
||||
this.delayedAudioAppendQueue_.length = 0;
|
||||
queue.forEach((que) => {
|
||||
this.appendBuffer.apply(this, que);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the audio buffer's buffered timerange.
|
||||
*
|
||||
* @return {TimeRange}
|
||||
* The audio buffer's buffered time range
|
||||
*/
|
||||
audioBuffered() {
|
||||
// no media source/source buffer or it isn't in the media sources
|
||||
// source buffer list
|
||||
if (!inSourceBuffers(this.mediaSource, this.audioBuffer)) {
|
||||
return videojs.createTimeRange();
|
||||
}
|
||||
|
||||
return this.audioBuffer.buffered ? this.audioBuffer.buffered :
|
||||
videojs.createTimeRange();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the video buffer's buffered timerange.
|
||||
*
|
||||
* @return {TimeRange}
|
||||
* The video buffer's buffered time range
|
||||
*/
|
||||
videoBuffered() {
|
||||
// no media source/source buffer or it isn't in the media sources
|
||||
// source buffer list
|
||||
if (!inSourceBuffers(this.mediaSource, this.videoBuffer)) {
|
||||
return videojs.createTimeRange();
|
||||
}
|
||||
return this.videoBuffer.buffered ? this.videoBuffer.buffered :
|
||||
videojs.createTimeRange();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a combined video/audio buffer's buffered timerange.
|
||||
*
|
||||
* @return {TimeRange}
|
||||
* the combined time range
|
||||
*/
|
||||
buffered() {
|
||||
const video = inSourceBuffers(this.mediaSource, this.videoBuffer) ? this.videoBuffer : null;
|
||||
const audio = inSourceBuffers(this.mediaSource, this.audioBuffer) ? this.audioBuffer : null;
|
||||
|
||||
if (audio && !video) {
|
||||
return this.audioBuffered();
|
||||
}
|
||||
|
||||
if (video && !audio) {
|
||||
return this.videoBuffered();
|
||||
}
|
||||
|
||||
return bufferIntersection(this.audioBuffered(), this.videoBuffered());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a callback to the queue that will set duration on the mediaSource.
|
||||
*
|
||||
* @param {number} duration
|
||||
* The duration to set
|
||||
*
|
||||
* @param {Function} [doneFn]
|
||||
* function to run after duration has been set.
|
||||
*/
|
||||
setDuration(duration, doneFn = noop) {
|
||||
// In order to set the duration on the media source, it's necessary to wait for all
|
||||
// source buffers to no longer be updating. "If the updating attribute equals true on
|
||||
// any SourceBuffer in sourceBuffers, then throw an InvalidStateError exception and
|
||||
// abort these steps." (source: https://www.w3.org/TR/media-source/#attributes).
|
||||
pushQueue({
|
||||
type: 'mediaSource',
|
||||
sourceUpdater: this,
|
||||
action: actions.duration(duration),
|
||||
name: 'duration',
|
||||
doneFn
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a mediaSource endOfStream call to the queue
|
||||
*
|
||||
* @param {Error} [error]
|
||||
* Call endOfStream with an error
|
||||
*
|
||||
* @param {Function} [doneFn]
|
||||
* A function that should be called when the
|
||||
* endOfStream call has finished.
|
||||
*/
|
||||
endOfStream(error = null, doneFn = noop) {
|
||||
if (typeof error !== 'string') {
|
||||
error = undefined;
|
||||
}
|
||||
// In order to set the duration on the media source, it's necessary to wait for all
|
||||
// source buffers to no longer be updating. "If the updating attribute equals true on
|
||||
// any SourceBuffer in sourceBuffers, then throw an InvalidStateError exception and
|
||||
// abort these steps." (source: https://www.w3.org/TR/media-source/#attributes).
|
||||
pushQueue({
|
||||
type: 'mediaSource',
|
||||
sourceUpdater: this,
|
||||
action: actions.endOfStream(error),
|
||||
name: 'endOfStream',
|
||||
doneFn
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Queue an update to remove a time range from the buffer.
|
||||
*
|
||||
* @param {number} start where to start the removal
|
||||
* @param {number} end where to end the removal
|
||||
* @param {Function} [done=noop] optional callback to be executed when the remove
|
||||
* operation is complete
|
||||
* @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-remove-void-double-start-unrestricted-double-end
|
||||
*/
|
||||
removeAudio(start, end, done = noop) {
|
||||
if (!this.audioBuffered().length || this.audioBuffered().end(0) === 0) {
|
||||
done();
|
||||
return;
|
||||
}
|
||||
|
||||
pushQueue({
|
||||
type: 'audio',
|
||||
sourceUpdater: this,
|
||||
action: actions.remove(start, end),
|
||||
doneFn: done,
|
||||
name: 'remove'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Queue an update to remove a time range from the buffer.
|
||||
*
|
||||
* @param {number} start where to start the removal
|
||||
* @param {number} end where to end the removal
|
||||
* @param {Function} [done=noop] optional callback to be executed when the remove
|
||||
* operation is complete
|
||||
* @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-remove-void-double-start-unrestricted-double-end
|
||||
*/
|
||||
removeVideo(start, end, done = noop) {
|
||||
if (!this.videoBuffered().length || this.videoBuffered().end(0) === 0) {
|
||||
done();
|
||||
return;
|
||||
}
|
||||
|
||||
pushQueue({
|
||||
type: 'video',
|
||||
sourceUpdater: this,
|
||||
action: actions.remove(start, end),
|
||||
doneFn: done,
|
||||
name: 'remove'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether the underlying sourceBuffer is updating or not
|
||||
*
|
||||
* @return {boolean} the updating status of the SourceBuffer
|
||||
*/
|
||||
updating() {
|
||||
// the audio/video source buffer is updating
|
||||
if (updating('audio', this) || updating('video', this)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set/get the timestampoffset on the audio SourceBuffer
|
||||
*
|
||||
* @return {number} the timestamp offset
|
||||
*/
|
||||
audioTimestampOffset(offset) {
|
||||
if (typeof offset !== 'undefined' &&
|
||||
this.audioBuffer &&
|
||||
// no point in updating if it's the same
|
||||
this.audioTimestampOffset_ !== offset) {
|
||||
pushQueue({
|
||||
type: 'audio',
|
||||
sourceUpdater: this,
|
||||
action: actions.timestampOffset(offset),
|
||||
name: 'timestampOffset'
|
||||
});
|
||||
this.audioTimestampOffset_ = offset;
|
||||
}
|
||||
return this.audioTimestampOffset_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set/get the timestampoffset on the video SourceBuffer
|
||||
*
|
||||
* @return {number} the timestamp offset
|
||||
*/
|
||||
videoTimestampOffset(offset) {
|
||||
if (typeof offset !== 'undefined' &&
|
||||
this.videoBuffer &&
|
||||
// no point in updating if it's the same
|
||||
this.videoTimestampOffset !== offset) {
|
||||
pushQueue({
|
||||
type: 'video',
|
||||
sourceUpdater: this,
|
||||
action: actions.timestampOffset(offset),
|
||||
name: 'timestampOffset'
|
||||
});
|
||||
this.videoTimestampOffset_ = offset;
|
||||
}
|
||||
return this.videoTimestampOffset_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a function to the queue that will be called
|
||||
* when it is its turn to run in the audio queue.
|
||||
*
|
||||
* @param {Function} callback
|
||||
* The callback to queue.
|
||||
*/
|
||||
audioQueueCallback(callback) {
|
||||
if (!this.audioBuffer) {
|
||||
return;
|
||||
}
|
||||
pushQueue({
|
||||
type: 'audio',
|
||||
sourceUpdater: this,
|
||||
action: actions.callback(callback),
|
||||
name: 'callback'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a function to the queue that will be called
|
||||
* when it is its turn to run in the video queue.
|
||||
*
|
||||
* @param {Function} callback
|
||||
* The callback to queue.
|
||||
*/
|
||||
videoQueueCallback(callback) {
|
||||
if (!this.videoBuffer) {
|
||||
return;
|
||||
}
|
||||
pushQueue({
|
||||
type: 'video',
|
||||
sourceUpdater: this,
|
||||
action: actions.callback(callback),
|
||||
name: 'callback'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* dispose of the source updater and the underlying sourceBuffer
|
||||
*/
|
||||
dispose() {
|
||||
this.trigger('dispose');
|
||||
bufferTypes.forEach((type) => {
|
||||
this.abort(type);
|
||||
if (this.canRemoveSourceBuffer()) {
|
||||
this.removeSourceBuffer(type);
|
||||
} else {
|
||||
this[`${type}QueueCallback`](() => cleanupBuffer(type, this));
|
||||
}
|
||||
});
|
||||
|
||||
this.videoAppendQueued_ = false;
|
||||
this.delayedAudioAppendQueue_.length = 0;
|
||||
|
||||
if (this.sourceopenListener_) {
|
||||
this.mediaSource.removeEventListener('sourceopen', this.sourceopenListener_);
|
||||
}
|
||||
|
||||
this.off();
|
||||
}
|
||||
}
|
||||
521
build/javascript/node_modules/@videojs/http-streaming/src/sync-controller.js
generated
vendored
Normal file
521
build/javascript/node_modules/@videojs/http-streaming/src/sync-controller.js
generated
vendored
Normal file
@@ -0,0 +1,521 @@
|
||||
/**
|
||||
* @file sync-controller.js
|
||||
*/
|
||||
|
||||
import {sumDurations} from './playlist';
|
||||
import videojs from 'video.js';
|
||||
import logger from './util/logger';
|
||||
|
||||
export const syncPointStrategies = [
|
||||
// Stategy "VOD": Handle the VOD-case where the sync-point is *always*
|
||||
// the equivalence display-time 0 === segment-index 0
|
||||
{
|
||||
name: 'VOD',
|
||||
run: (syncController, playlist, duration, currentTimeline, currentTime) => {
|
||||
if (duration !== Infinity) {
|
||||
const syncPoint = {
|
||||
time: 0,
|
||||
segmentIndex: 0
|
||||
};
|
||||
|
||||
return syncPoint;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
},
|
||||
// Stategy "ProgramDateTime": We have a program-date-time tag in this playlist
|
||||
{
|
||||
name: 'ProgramDateTime',
|
||||
run: (syncController, playlist, duration, currentTimeline, currentTime) => {
|
||||
if (!syncController.datetimeToDisplayTime) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const segments = playlist.segments || [];
|
||||
let syncPoint = null;
|
||||
let lastDistance = null;
|
||||
|
||||
currentTime = currentTime || 0;
|
||||
|
||||
for (let i = 0; i < segments.length; i++) {
|
||||
const segment = segments[i];
|
||||
|
||||
if (segment.dateTimeObject) {
|
||||
const segmentTime = segment.dateTimeObject.getTime() / 1000;
|
||||
const segmentStart = segmentTime + syncController.datetimeToDisplayTime;
|
||||
const distance = Math.abs(currentTime - segmentStart);
|
||||
|
||||
// Once the distance begins to increase, or if distance is 0, we have passed
|
||||
// currentTime and can stop looking for better candidates
|
||||
if (lastDistance !== null && (distance === 0 || lastDistance < distance)) {
|
||||
break;
|
||||
}
|
||||
|
||||
lastDistance = distance;
|
||||
syncPoint = {
|
||||
time: segmentStart,
|
||||
segmentIndex: i
|
||||
};
|
||||
}
|
||||
}
|
||||
return syncPoint;
|
||||
}
|
||||
},
|
||||
// Stategy "Segment": We have a known time mapping for a timeline and a
|
||||
// segment in the current timeline with timing data
|
||||
{
|
||||
name: 'Segment',
|
||||
run: (syncController, playlist, duration, currentTimeline, currentTime) => {
|
||||
const segments = playlist.segments || [];
|
||||
let syncPoint = null;
|
||||
let lastDistance = null;
|
||||
|
||||
currentTime = currentTime || 0;
|
||||
|
||||
for (let i = 0; i < segments.length; i++) {
|
||||
const segment = segments[i];
|
||||
|
||||
if (segment.timeline === currentTimeline &&
|
||||
typeof segment.start !== 'undefined') {
|
||||
const distance = Math.abs(currentTime - segment.start);
|
||||
|
||||
// Once the distance begins to increase, we have passed
|
||||
// currentTime and can stop looking for better candidates
|
||||
if (lastDistance !== null && lastDistance < distance) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!syncPoint || lastDistance === null || lastDistance >= distance) {
|
||||
lastDistance = distance;
|
||||
syncPoint = {
|
||||
time: segment.start,
|
||||
segmentIndex: i
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return syncPoint;
|
||||
}
|
||||
},
|
||||
// Stategy "Discontinuity": We have a discontinuity with a known
|
||||
// display-time
|
||||
{
|
||||
name: 'Discontinuity',
|
||||
run: (syncController, playlist, duration, currentTimeline, currentTime) => {
|
||||
let syncPoint = null;
|
||||
|
||||
currentTime = currentTime || 0;
|
||||
|
||||
if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) {
|
||||
let lastDistance = null;
|
||||
|
||||
for (let i = 0; i < playlist.discontinuityStarts.length; i++) {
|
||||
const segmentIndex = playlist.discontinuityStarts[i];
|
||||
const discontinuity = playlist.discontinuitySequence + i + 1;
|
||||
const discontinuitySync = syncController.discontinuities[discontinuity];
|
||||
|
||||
if (discontinuitySync) {
|
||||
const distance = Math.abs(currentTime - discontinuitySync.time);
|
||||
|
||||
// Once the distance begins to increase, we have passed
|
||||
// currentTime and can stop looking for better candidates
|
||||
if (lastDistance !== null && lastDistance < distance) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!syncPoint || lastDistance === null || lastDistance >= distance) {
|
||||
lastDistance = distance;
|
||||
syncPoint = {
|
||||
time: discontinuitySync.time,
|
||||
segmentIndex
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return syncPoint;
|
||||
}
|
||||
},
|
||||
// Stategy "Playlist": We have a playlist with a known mapping of
|
||||
// segment index to display time
|
||||
{
|
||||
name: 'Playlist',
|
||||
run: (syncController, playlist, duration, currentTimeline, currentTime) => {
|
||||
if (playlist.syncInfo) {
|
||||
const syncPoint = {
|
||||
time: playlist.syncInfo.time,
|
||||
segmentIndex: playlist.syncInfo.mediaSequence - playlist.mediaSequence
|
||||
};
|
||||
|
||||
return syncPoint;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
export default class SyncController extends videojs.EventTarget {
|
||||
constructor(options = {}) {
|
||||
super();
|
||||
// ...for synching across variants
|
||||
this.timelines = [];
|
||||
this.discontinuities = [];
|
||||
this.datetimeToDisplayTime = null;
|
||||
|
||||
this.logger_ = logger('SyncController');
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a sync-point for the playlist specified
|
||||
*
|
||||
* A sync-point is defined as a known mapping from display-time to
|
||||
* a segment-index in the current playlist.
|
||||
*
|
||||
* @param {Playlist} playlist
|
||||
* The playlist that needs a sync-point
|
||||
* @param {number} duration
|
||||
* Duration of the MediaSource (Infinite if playing a live source)
|
||||
* @param {number} currentTimeline
|
||||
* The last timeline from which a segment was loaded
|
||||
* @return {Object}
|
||||
* A sync-point object
|
||||
*/
|
||||
getSyncPoint(playlist, duration, currentTimeline, currentTime) {
|
||||
const syncPoints = this.runStrategies_(
|
||||
playlist,
|
||||
duration,
|
||||
currentTimeline,
|
||||
currentTime
|
||||
);
|
||||
|
||||
if (!syncPoints.length) {
|
||||
// Signal that we need to attempt to get a sync-point manually
|
||||
// by fetching a segment in the playlist and constructing
|
||||
// a sync-point from that information
|
||||
return null;
|
||||
}
|
||||
|
||||
// Now find the sync-point that is closest to the currentTime because
|
||||
// that should result in the most accurate guess about which segment
|
||||
// to fetch
|
||||
return this.selectSyncPoint_(syncPoints, { key: 'time', value: currentTime });
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the amount of time that has expired off the playlist during playback
|
||||
*
|
||||
* @param {Playlist} playlist
|
||||
* Playlist object to calculate expired from
|
||||
* @param {number} duration
|
||||
* Duration of the MediaSource (Infinity if playling a live source)
|
||||
* @return {number|null}
|
||||
* The amount of time that has expired off the playlist during playback. Null
|
||||
* if no sync-points for the playlist can be found.
|
||||
*/
|
||||
getExpiredTime(playlist, duration) {
|
||||
if (!playlist || !playlist.segments) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const syncPoints = this.runStrategies_(
|
||||
playlist,
|
||||
duration,
|
||||
playlist.discontinuitySequence,
|
||||
0
|
||||
);
|
||||
|
||||
// Without sync-points, there is not enough information to determine the expired time
|
||||
if (!syncPoints.length) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const syncPoint = this.selectSyncPoint_(syncPoints, {
|
||||
key: 'segmentIndex',
|
||||
value: 0
|
||||
});
|
||||
|
||||
// If the sync-point is beyond the start of the playlist, we want to subtract the
|
||||
// duration from index 0 to syncPoint.segmentIndex instead of adding.
|
||||
if (syncPoint.segmentIndex > 0) {
|
||||
syncPoint.time *= -1;
|
||||
}
|
||||
|
||||
return Math.abs(syncPoint.time + sumDurations(playlist, syncPoint.segmentIndex, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs each sync-point strategy and returns a list of sync-points returned by the
|
||||
* strategies
|
||||
*
|
||||
* @private
|
||||
* @param {Playlist} playlist
|
||||
* The playlist that needs a sync-point
|
||||
* @param {number} duration
|
||||
* Duration of the MediaSource (Infinity if playing a live source)
|
||||
* @param {number} currentTimeline
|
||||
* The last timeline from which a segment was loaded
|
||||
* @return {Array}
|
||||
* A list of sync-point objects
|
||||
*/
|
||||
runStrategies_(playlist, duration, currentTimeline, currentTime) {
|
||||
const syncPoints = [];
|
||||
|
||||
// Try to find a sync-point in by utilizing various strategies...
|
||||
for (let i = 0; i < syncPointStrategies.length; i++) {
|
||||
const strategy = syncPointStrategies[i];
|
||||
const syncPoint = strategy.run(
|
||||
this,
|
||||
playlist,
|
||||
duration,
|
||||
currentTimeline,
|
||||
currentTime
|
||||
);
|
||||
|
||||
if (syncPoint) {
|
||||
syncPoint.strategy = strategy.name;
|
||||
syncPoints.push({
|
||||
strategy: strategy.name,
|
||||
syncPoint
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return syncPoints;
|
||||
}
|
||||
|
||||
/**
|
||||
* Selects the sync-point nearest the specified target
|
||||
*
|
||||
* @private
|
||||
* @param {Array} syncPoints
|
||||
* List of sync-points to select from
|
||||
* @param {Object} target
|
||||
* Object specifying the property and value we are targeting
|
||||
* @param {string} target.key
|
||||
* Specifies the property to target. Must be either 'time' or 'segmentIndex'
|
||||
* @param {number} target.value
|
||||
* The value to target for the specified key.
|
||||
* @return {Object}
|
||||
* The sync-point nearest the target
|
||||
*/
|
||||
selectSyncPoint_(syncPoints, target) {
|
||||
let bestSyncPoint = syncPoints[0].syncPoint;
|
||||
let bestDistance = Math.abs(syncPoints[0].syncPoint[target.key] - target.value);
|
||||
let bestStrategy = syncPoints[0].strategy;
|
||||
|
||||
for (let i = 1; i < syncPoints.length; i++) {
|
||||
const newDistance = Math.abs(syncPoints[i].syncPoint[target.key] - target.value);
|
||||
|
||||
if (newDistance < bestDistance) {
|
||||
bestDistance = newDistance;
|
||||
bestSyncPoint = syncPoints[i].syncPoint;
|
||||
bestStrategy = syncPoints[i].strategy;
|
||||
}
|
||||
}
|
||||
|
||||
this.logger_(`syncPoint for [${target.key}: ${target.value}] chosen with strategy` +
|
||||
` [${bestStrategy}]: [time:${bestSyncPoint.time},` +
|
||||
` segmentIndex:${bestSyncPoint.segmentIndex}]`);
|
||||
|
||||
return bestSyncPoint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save any meta-data present on the segments when segments leave
|
||||
* the live window to the playlist to allow for synchronization at the
|
||||
* playlist level later.
|
||||
*
|
||||
* @param {Playlist} oldPlaylist - The previous active playlist
|
||||
* @param {Playlist} newPlaylist - The updated and most current playlist
|
||||
*/
|
||||
saveExpiredSegmentInfo(oldPlaylist, newPlaylist) {
|
||||
const mediaSequenceDiff = newPlaylist.mediaSequence - oldPlaylist.mediaSequence;
|
||||
|
||||
// When a segment expires from the playlist and it has a start time
|
||||
// save that information as a possible sync-point reference in future
|
||||
for (let i = mediaSequenceDiff - 1; i >= 0; i--) {
|
||||
const lastRemovedSegment = oldPlaylist.segments[i];
|
||||
|
||||
if (lastRemovedSegment && typeof lastRemovedSegment.start !== 'undefined') {
|
||||
newPlaylist.syncInfo = {
|
||||
mediaSequence: oldPlaylist.mediaSequence + i,
|
||||
time: lastRemovedSegment.start
|
||||
};
|
||||
this.logger_(`playlist refresh sync: [time:${newPlaylist.syncInfo.time},` +
|
||||
` mediaSequence: ${newPlaylist.syncInfo.mediaSequence}]`);
|
||||
this.trigger('syncinfoupdate');
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the mapping from playlist's ProgramDateTime to display. This should
|
||||
* only ever happen once at the start of playback.
|
||||
*
|
||||
* @param {Playlist} playlist - The currently active playlist
|
||||
*/
|
||||
setDateTimeMapping(playlist) {
|
||||
if (!this.datetimeToDisplayTime &&
|
||||
playlist.segments &&
|
||||
playlist.segments.length &&
|
||||
playlist.segments[0].dateTimeObject) {
|
||||
const playlistTimestamp = playlist.segments[0].dateTimeObject.getTime() / 1000;
|
||||
|
||||
this.datetimeToDisplayTime = -playlistTimestamp;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates and saves timeline mappings, playlist sync info, and segment timing values
|
||||
* based on the latest timing information.
|
||||
*
|
||||
* @param {Object} options
|
||||
* Options object
|
||||
* @param {SegmentInfo} options.segmentInfo
|
||||
* The current active request information
|
||||
* @param {boolean} options.shouldSaveTimelineMapping
|
||||
* If there's a timeline change, determines if the timeline mapping should be
|
||||
* saved in timelines.
|
||||
*/
|
||||
saveSegmentTimingInfo({ segmentInfo, shouldSaveTimelineMapping }) {
|
||||
const didCalculateSegmentTimeMapping = this.calculateSegmentTimeMapping_(
|
||||
segmentInfo,
|
||||
segmentInfo.timingInfo,
|
||||
shouldSaveTimelineMapping
|
||||
);
|
||||
|
||||
if (didCalculateSegmentTimeMapping) {
|
||||
this.saveDiscontinuitySyncInfo_(segmentInfo);
|
||||
|
||||
// If the playlist does not have sync information yet, record that information
|
||||
// now with segment timing information
|
||||
if (!segmentInfo.playlist.syncInfo) {
|
||||
segmentInfo.playlist.syncInfo = {
|
||||
mediaSequence: segmentInfo.playlist.mediaSequence + segmentInfo.mediaIndex,
|
||||
time: segmentInfo.segment.start
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timestampOffsetForTimeline(timeline) {
|
||||
if (typeof this.timelines[timeline] === 'undefined') {
|
||||
return null;
|
||||
}
|
||||
return this.timelines[timeline].time;
|
||||
}
|
||||
|
||||
mappingForTimeline(timeline) {
|
||||
if (typeof this.timelines[timeline] === 'undefined') {
|
||||
return null;
|
||||
}
|
||||
return this.timelines[timeline].mapping;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use the "media time" for a segment to generate a mapping to "display time" and
|
||||
* save that display time to the segment.
|
||||
*
|
||||
* @private
|
||||
* @param {SegmentInfo} segmentInfo
|
||||
* The current active request information
|
||||
* @param {Object} timingInfo
|
||||
* The start and end time of the current segment in "media time"
|
||||
* @param {boolean} shouldSaveTimelineMapping
|
||||
* If there's a timeline change, determines if the timeline mapping should be
|
||||
* saved in timelines.
|
||||
* @return {boolean}
|
||||
* Returns false if segment time mapping could not be calculated
|
||||
*/
|
||||
calculateSegmentTimeMapping_(segmentInfo, timingInfo, shouldSaveTimelineMapping) {
|
||||
const segment = segmentInfo.segment;
|
||||
let mappingObj = this.timelines[segmentInfo.timeline];
|
||||
|
||||
if (segmentInfo.timestampOffset !== null) {
|
||||
mappingObj = {
|
||||
time: segmentInfo.startOfSegment,
|
||||
mapping: segmentInfo.startOfSegment - timingInfo.start
|
||||
};
|
||||
if (shouldSaveTimelineMapping) {
|
||||
this.timelines[segmentInfo.timeline] = mappingObj;
|
||||
this.trigger('timestampoffset');
|
||||
|
||||
this.logger_(`time mapping for timeline ${segmentInfo.timeline}: ` +
|
||||
`[time: ${mappingObj.time}] [mapping: ${mappingObj.mapping}]`);
|
||||
}
|
||||
|
||||
segment.start = segmentInfo.startOfSegment;
|
||||
segment.end = timingInfo.end + mappingObj.mapping;
|
||||
} else if (mappingObj) {
|
||||
segment.start = timingInfo.start + mappingObj.mapping;
|
||||
segment.end = timingInfo.end + mappingObj.mapping;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Each time we have discontinuity in the playlist, attempt to calculate the location
|
||||
* in display of the start of the discontinuity and save that. We also save an accuracy
|
||||
* value so that we save values with the most accuracy (closest to 0.)
|
||||
*
|
||||
* @private
|
||||
* @param {SegmentInfo} segmentInfo - The current active request information
|
||||
*/
|
||||
saveDiscontinuitySyncInfo_(segmentInfo) {
|
||||
const playlist = segmentInfo.playlist;
|
||||
const segment = segmentInfo.segment;
|
||||
|
||||
// If the current segment is a discontinuity then we know exactly where
|
||||
// the start of the range and it's accuracy is 0 (greater accuracy values
|
||||
// mean more approximation)
|
||||
if (segment.discontinuity) {
|
||||
this.discontinuities[segment.timeline] = {
|
||||
time: segment.start,
|
||||
accuracy: 0
|
||||
};
|
||||
} else if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) {
|
||||
// Search for future discontinuities that we can provide better timing
|
||||
// information for and save that information for sync purposes
|
||||
for (let i = 0; i < playlist.discontinuityStarts.length; i++) {
|
||||
const segmentIndex = playlist.discontinuityStarts[i];
|
||||
const discontinuity = playlist.discontinuitySequence + i + 1;
|
||||
const mediaIndexDiff = segmentIndex - segmentInfo.mediaIndex;
|
||||
const accuracy = Math.abs(mediaIndexDiff);
|
||||
|
||||
if (!this.discontinuities[discontinuity] ||
|
||||
this.discontinuities[discontinuity].accuracy > accuracy) {
|
||||
let time;
|
||||
|
||||
if (mediaIndexDiff < 0) {
|
||||
time = segment.start - sumDurations(
|
||||
playlist,
|
||||
segmentInfo.mediaIndex,
|
||||
segmentIndex
|
||||
);
|
||||
} else {
|
||||
time = segment.end + sumDurations(
|
||||
playlist,
|
||||
segmentInfo.mediaIndex + 1,
|
||||
segmentIndex
|
||||
);
|
||||
}
|
||||
|
||||
this.discontinuities[discontinuity] = {
|
||||
time,
|
||||
accuracy
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.trigger('dispose');
|
||||
this.off();
|
||||
}
|
||||
}
|
||||
48
build/javascript/node_modules/@videojs/http-streaming/src/timeline-change-controller.js
generated
vendored
Normal file
48
build/javascript/node_modules/@videojs/http-streaming/src/timeline-change-controller.js
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
import videojs from 'video.js';
|
||||
|
||||
/**
|
||||
* The TimelineChangeController acts as a source for segment loaders to listen for and
|
||||
* keep track of latest and pending timeline changes. This is useful to ensure proper
|
||||
* sync, as each loader may need to make a consideration for what timeline the other
|
||||
* loader is on before making changes which could impact the other loader's media.
|
||||
*
|
||||
* @class TimelineChangeController
|
||||
* @extends videojs.EventTarget
|
||||
*/
|
||||
export default class TimelineChangeController extends videojs.EventTarget {
|
||||
constructor() {
|
||||
super();
|
||||
|
||||
this.pendingTimelineChanges_ = {};
|
||||
this.lastTimelineChanges_ = {};
|
||||
}
|
||||
|
||||
clearPendingTimelineChange(type) {
|
||||
this.pendingTimelineChanges_[type] = null;
|
||||
this.trigger('pendingtimelinechange');
|
||||
}
|
||||
|
||||
pendingTimelineChange({ type, from, to }) {
|
||||
if (typeof from === 'number' && typeof to === 'number') {
|
||||
this.pendingTimelineChanges_[type] = { type, from, to };
|
||||
this.trigger('pendingtimelinechange');
|
||||
}
|
||||
return this.pendingTimelineChanges_[type];
|
||||
}
|
||||
|
||||
lastTimelineChange({ type, from, to }) {
|
||||
if (typeof from === 'number' && typeof to === 'number') {
|
||||
this.lastTimelineChanges_[type] = { type, from, to };
|
||||
delete this.pendingTimelineChanges_[type];
|
||||
this.trigger('timelinechange');
|
||||
}
|
||||
return this.lastTimelineChanges_[type];
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.trigger('dispose');
|
||||
this.pendingTimelineChanges_ = {};
|
||||
this.lastTimelineChanges_ = {};
|
||||
this.off();
|
||||
}
|
||||
}
|
||||
433
build/javascript/node_modules/@videojs/http-streaming/src/transmuxer-worker.js
generated
vendored
Normal file
433
build/javascript/node_modules/@videojs/http-streaming/src/transmuxer-worker.js
generated
vendored
Normal file
@@ -0,0 +1,433 @@
|
||||
/* global self */
|
||||
/**
|
||||
* @file transmuxer-worker.js
|
||||
*/
|
||||
|
||||
/**
|
||||
* videojs-contrib-media-sources
|
||||
*
|
||||
* Copyright (c) 2015 Brightcove
|
||||
* All rights reserved.
|
||||
*
|
||||
* Handles communication between the browser-world and the mux.js
|
||||
* transmuxer running inside of a WebWorker by exposing a simple
|
||||
* message-based interface to a Transmuxer object.
|
||||
*/
|
||||
|
||||
import {Transmuxer as FullMux} from 'mux.js/lib/mp4/transmuxer';
|
||||
import PartialMux from 'mux.js/lib/partial/transmuxer';
|
||||
import CaptionParser from 'mux.js/lib/mp4/caption-parser';
|
||||
import {
|
||||
secondsToVideoTs,
|
||||
videoTsToSeconds
|
||||
} from 'mux.js/lib/utils/clock';
|
||||
|
||||
const typeFromStreamString = (streamString) => {
|
||||
if (streamString === 'AudioSegmentStream') {
|
||||
return 'audio';
|
||||
}
|
||||
return streamString === 'VideoSegmentStream' ? 'video' : '';
|
||||
};
|
||||
|
||||
/**
|
||||
* Re-emits transmuxer events by converting them into messages to the
|
||||
* world outside the worker.
|
||||
*
|
||||
* @param {Object} transmuxer the transmuxer to wire events on
|
||||
* @private
|
||||
*/
|
||||
const wireFullTransmuxerEvents = function(self, transmuxer) {
|
||||
transmuxer.on('data', function(segment) {
|
||||
// transfer ownership of the underlying ArrayBuffer
|
||||
// instead of doing a copy to save memory
|
||||
// ArrayBuffers are transferable but generic TypedArrays are not
|
||||
// @link https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers#Passing_data_by_transferring_ownership_(transferable_objects)
|
||||
const initArray = segment.initSegment;
|
||||
|
||||
segment.initSegment = {
|
||||
data: initArray.buffer,
|
||||
byteOffset: initArray.byteOffset,
|
||||
byteLength: initArray.byteLength
|
||||
};
|
||||
|
||||
const typedArray = segment.data;
|
||||
|
||||
segment.data = typedArray.buffer;
|
||||
self.postMessage({
|
||||
action: 'data',
|
||||
segment,
|
||||
byteOffset: typedArray.byteOffset,
|
||||
byteLength: typedArray.byteLength
|
||||
}, [segment.data]);
|
||||
});
|
||||
|
||||
transmuxer.on('done', function(data) {
|
||||
self.postMessage({ action: 'done' });
|
||||
});
|
||||
|
||||
transmuxer.on('gopInfo', function(gopInfo) {
|
||||
self.postMessage({
|
||||
action: 'gopInfo',
|
||||
gopInfo
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('videoSegmentTimingInfo', function(timingInfo) {
|
||||
const videoSegmentTimingInfo = {
|
||||
start: {
|
||||
decode: videoTsToSeconds(timingInfo.start.dts),
|
||||
presentation: videoTsToSeconds(timingInfo.start.pts)
|
||||
},
|
||||
end: {
|
||||
decode: videoTsToSeconds(timingInfo.end.dts),
|
||||
presentation: videoTsToSeconds(timingInfo.end.pts)
|
||||
},
|
||||
baseMediaDecodeTime: videoTsToSeconds(timingInfo.baseMediaDecodeTime)
|
||||
};
|
||||
|
||||
if (timingInfo.prependedContentDuration) {
|
||||
videoSegmentTimingInfo.prependedContentDuration = videoTsToSeconds(timingInfo.prependedContentDuration);
|
||||
}
|
||||
self.postMessage({
|
||||
action: 'videoSegmentTimingInfo',
|
||||
videoSegmentTimingInfo
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('id3Frame', function(id3Frame) {
|
||||
self.postMessage({
|
||||
action: 'id3Frame',
|
||||
id3Frame
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('caption', function(caption) {
|
||||
self.postMessage({
|
||||
action: 'caption',
|
||||
caption
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('trackinfo', function(trackInfo) {
|
||||
self.postMessage({
|
||||
action: 'trackinfo',
|
||||
trackInfo
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('audioTimingInfo', function(audioTimingInfo) {
|
||||
// convert to video TS since we prioritize video time over audio
|
||||
self.postMessage({
|
||||
action: 'audioTimingInfo',
|
||||
audioTimingInfo: {
|
||||
start: videoTsToSeconds(audioTimingInfo.start),
|
||||
end: videoTsToSeconds(audioTimingInfo.end)
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('videoTimingInfo', function(videoTimingInfo) {
|
||||
self.postMessage({
|
||||
action: 'videoTimingInfo',
|
||||
videoTimingInfo: {
|
||||
start: videoTsToSeconds(videoTimingInfo.start),
|
||||
end: videoTsToSeconds(videoTimingInfo.end)
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
const wirePartialTransmuxerEvents = function(self, transmuxer) {
|
||||
transmuxer.on('data', function(event) {
|
||||
// transfer ownership of the underlying ArrayBuffer
|
||||
// instead of doing a copy to save memory
|
||||
// ArrayBuffers are transferable but generic TypedArrays are not
|
||||
// @link https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers#Passing_data_by_transferring_ownership_(transferable_objects)
|
||||
|
||||
const initSegment = {
|
||||
data: event.data.track.initSegment.buffer,
|
||||
byteOffset: event.data.track.initSegment.byteOffset,
|
||||
byteLength: event.data.track.initSegment.byteLength
|
||||
};
|
||||
const boxes = {
|
||||
data: event.data.boxes.buffer,
|
||||
byteOffset: event.data.boxes.byteOffset,
|
||||
byteLength: event.data.boxes.byteLength
|
||||
};
|
||||
const segment = {
|
||||
boxes,
|
||||
initSegment,
|
||||
type: event.type,
|
||||
sequence: event.data.sequence
|
||||
};
|
||||
|
||||
if (typeof event.data.videoFrameDts !== 'undefined') {
|
||||
segment.videoFrameDtsTime = videoTsToSeconds(event.data.videoFrameDts);
|
||||
}
|
||||
|
||||
if (typeof event.data.videoFramePts !== 'undefined') {
|
||||
segment.videoFramePtsTime = videoTsToSeconds(event.data.videoFramePts);
|
||||
}
|
||||
|
||||
self.postMessage({
|
||||
action: 'data',
|
||||
segment
|
||||
}, [ segment.boxes.data, segment.initSegment.data ]);
|
||||
});
|
||||
|
||||
transmuxer.on('id3Frame', function(id3Frame) {
|
||||
self.postMessage({
|
||||
action: 'id3Frame',
|
||||
id3Frame
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('caption', function(caption) {
|
||||
self.postMessage({
|
||||
action: 'caption',
|
||||
caption
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('done', function(data) {
|
||||
self.postMessage({
|
||||
action: 'done',
|
||||
type: typeFromStreamString(data)
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('partialdone', function(data) {
|
||||
self.postMessage({
|
||||
action: 'partialdone',
|
||||
type: typeFromStreamString(data)
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('endedsegment', function(data) {
|
||||
self.postMessage({
|
||||
action: 'endedSegment',
|
||||
type: typeFromStreamString(data)
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('trackinfo', function(trackInfo) {
|
||||
self.postMessage({ action: 'trackinfo', trackInfo });
|
||||
});
|
||||
|
||||
transmuxer.on('audioTimingInfo', function(audioTimingInfo) {
|
||||
// This can happen if flush is called when no
|
||||
// audio has been processed. This should be an
|
||||
// unusual case, but if it does occur should not
|
||||
// result in valid data being returned
|
||||
if (audioTimingInfo.start === null) {
|
||||
self.postMessage({
|
||||
action: 'audioTimingInfo',
|
||||
audioTimingInfo
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// convert to video TS since we prioritize video time over audio
|
||||
const timingInfoInSeconds = {
|
||||
start: videoTsToSeconds(audioTimingInfo.start)
|
||||
};
|
||||
|
||||
if (audioTimingInfo.end) {
|
||||
timingInfoInSeconds.end = videoTsToSeconds(audioTimingInfo.end);
|
||||
}
|
||||
|
||||
self.postMessage({
|
||||
action: 'audioTimingInfo',
|
||||
audioTimingInfo: timingInfoInSeconds
|
||||
});
|
||||
});
|
||||
|
||||
transmuxer.on('videoTimingInfo', function(videoTimingInfo) {
|
||||
const timingInfoInSeconds = {
|
||||
start: videoTsToSeconds(videoTimingInfo.start)
|
||||
};
|
||||
|
||||
if (videoTimingInfo.end) {
|
||||
timingInfoInSeconds.end = videoTsToSeconds(videoTimingInfo.end);
|
||||
}
|
||||
|
||||
self.postMessage({
|
||||
action: 'videoTimingInfo',
|
||||
videoTimingInfo: timingInfoInSeconds
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* All incoming messages route through this hash. If no function exists
|
||||
* to handle an incoming message, then we ignore the message.
|
||||
*
|
||||
* @class MessageHandlers
|
||||
* @param {Object} options the options to initialize with
|
||||
*/
|
||||
class MessageHandlers {
|
||||
constructor(self, options) {
|
||||
this.options = options || {};
|
||||
this.self = self;
|
||||
this.init();
|
||||
}
|
||||
|
||||
/**
|
||||
* initialize our web worker and wire all the events.
|
||||
*/
|
||||
init() {
|
||||
if (this.transmuxer) {
|
||||
this.transmuxer.dispose();
|
||||
}
|
||||
this.transmuxer = this.options.handlePartialData ?
|
||||
new PartialMux(this.options) :
|
||||
new FullMux(this.options);
|
||||
|
||||
if (this.options.handlePartialData) {
|
||||
wirePartialTransmuxerEvents(this.self, this.transmuxer);
|
||||
} else {
|
||||
wireFullTransmuxerEvents(this.self, this.transmuxer);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pushMp4Captions(data) {
|
||||
if (!this.captionParser) {
|
||||
this.captionParser = new CaptionParser();
|
||||
this.captionParser.init();
|
||||
}
|
||||
const segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);
|
||||
const parsed = this.captionParser.parse(
|
||||
segment,
|
||||
data.trackIds,
|
||||
data.timescales
|
||||
);
|
||||
|
||||
this.self.postMessage({
|
||||
action: 'mp4Captions',
|
||||
captions: parsed && parsed.captions || [],
|
||||
data: segment.buffer
|
||||
}, [segment.buffer]);
|
||||
}
|
||||
|
||||
clearAllMp4Captions() {
|
||||
if (this.captionParser) {
|
||||
this.captionParser.clearAllCaptions();
|
||||
}
|
||||
}
|
||||
|
||||
clearParsedMp4Captions() {
|
||||
if (this.captionParser) {
|
||||
this.captionParser.clearParsedCaptions();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds data (a ts segment) to the start of the transmuxer pipeline for
|
||||
* processing.
|
||||
*
|
||||
* @param {ArrayBuffer} data data to push into the muxer
|
||||
*/
|
||||
push(data) {
|
||||
// Cast array buffer to correct type for transmuxer
|
||||
const segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);
|
||||
|
||||
this.transmuxer.push(segment);
|
||||
}
|
||||
|
||||
/**
|
||||
* Recreate the transmuxer so that the next segment added via `push`
|
||||
* start with a fresh transmuxer.
|
||||
*/
|
||||
reset() {
|
||||
this.transmuxer.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value that will be used as the `baseMediaDecodeTime` time for the
|
||||
* next segment pushed in. Subsequent segments will have their `baseMediaDecodeTime`
|
||||
* set relative to the first based on the PTS values.
|
||||
*
|
||||
* @param {Object} data used to set the timestamp offset in the muxer
|
||||
*/
|
||||
setTimestampOffset(data) {
|
||||
const timestampOffset = data.timestampOffset || 0;
|
||||
|
||||
this.transmuxer.setBaseMediaDecodeTime(Math.round(secondsToVideoTs(timestampOffset)));
|
||||
}
|
||||
|
||||
setAudioAppendStart(data) {
|
||||
this.transmuxer.setAudioAppendStart(Math.ceil(secondsToVideoTs(data.appendStart)));
|
||||
}
|
||||
|
||||
setRemux(data) {
|
||||
this.transmuxer.setRemux(data.remux);
|
||||
}
|
||||
|
||||
/**
|
||||
* Forces the pipeline to finish processing the last segment and emit it's
|
||||
* results.
|
||||
*
|
||||
* @param {Object} data event data, not really used
|
||||
*/
|
||||
flush(data) {
|
||||
this.transmuxer.flush();
|
||||
// transmuxed done action is fired after both audio/video pipelines are flushed
|
||||
self.postMessage({
|
||||
action: 'done',
|
||||
type: 'transmuxed'
|
||||
});
|
||||
}
|
||||
|
||||
partialFlush(data) {
|
||||
this.transmuxer.partialFlush();
|
||||
// transmuxed partialdone action is fired after both audio/video pipelines are flushed
|
||||
self.postMessage({
|
||||
action: 'partialdone',
|
||||
type: 'transmuxed'
|
||||
});
|
||||
}
|
||||
|
||||
endTimeline() {
|
||||
this.transmuxer.endTimeline();
|
||||
// transmuxed endedtimeline action is fired after both audio/video pipelines end their
|
||||
// timelines
|
||||
self.postMessage({
|
||||
action: 'endedtimeline',
|
||||
type: 'transmuxed'
|
||||
});
|
||||
}
|
||||
|
||||
alignGopsWith(data) {
|
||||
this.transmuxer.alignGopsWith(data.gopsToAlignWith.slice());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Our web worker interface so that things can talk to mux.js
|
||||
* that will be running in a web worker. the scope is passed to this by
|
||||
* webworkify.
|
||||
*
|
||||
* @param {Object} self the scope for the web worker
|
||||
*/
|
||||
const TransmuxerWorker = function(self) {
|
||||
self.onmessage = function(event) {
|
||||
if (event.data.action === 'init' && event.data.options) {
|
||||
this.messageHandlers = new MessageHandlers(self, event.data.options);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.messageHandlers) {
|
||||
this.messageHandlers = new MessageHandlers(self);
|
||||
}
|
||||
|
||||
if (event.data && event.data.action && event.data.action !== 'init') {
|
||||
if (this.messageHandlers[event.data.action]) {
|
||||
this.messageHandlers[event.data.action](event.data);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
export default new TransmuxerWorker(self);
|
||||
7610
build/javascript/node_modules/@videojs/http-streaming/src/transmuxer-worker.worker.js
generated
vendored
Normal file
7610
build/javascript/node_modules/@videojs/http-streaming/src/transmuxer-worker.worker.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
102
build/javascript/node_modules/@videojs/http-streaming/src/util/codecs.js
generated
vendored
Normal file
102
build/javascript/node_modules/@videojs/http-streaming/src/util/codecs.js
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
/**
|
||||
* @file - codecs.js - Handles tasks regarding codec strings such as translating them to
|
||||
* codec strings, or translating codec strings into objects that can be examined.
|
||||
*/
|
||||
|
||||
import {
|
||||
translateLegacyCodec,
|
||||
parseCodecs,
|
||||
codecsFromDefault
|
||||
} from '@videojs/vhs-utils/dist/codecs.js';
|
||||
|
||||
/**
|
||||
* Returns a set of codec strings parsed from the playlist or the default
|
||||
* codec strings if no codecs were specified in the playlist
|
||||
*
|
||||
* @param {Playlist} media the current media playlist
|
||||
* @return {Object} an object with the video and audio codecs
|
||||
*/
|
||||
const getCodecs = function(media) {
|
||||
// if the codecs were explicitly specified, use them instead of the
|
||||
// defaults
|
||||
const mediaAttributes = media.attributes || {};
|
||||
|
||||
if (mediaAttributes.CODECS) {
|
||||
return parseCodecs(mediaAttributes.CODECS);
|
||||
}
|
||||
};
|
||||
|
||||
export const isMaat = (master, media) => {
|
||||
const mediaAttributes = media.attributes || {};
|
||||
|
||||
return master && master.mediaGroups && master.mediaGroups.AUDIO &&
|
||||
mediaAttributes.AUDIO &&
|
||||
master.mediaGroups.AUDIO[mediaAttributes.AUDIO];
|
||||
};
|
||||
|
||||
export const isMuxed = (master, media) => {
|
||||
if (!isMaat(master, media)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const mediaAttributes = media.attributes || {};
|
||||
const audioGroup = master.mediaGroups.AUDIO[mediaAttributes.AUDIO];
|
||||
|
||||
for (const groupId in audioGroup) {
|
||||
// If an audio group has a URI (the case for HLS, as HLS will use external playlists),
|
||||
// or there are listed playlists (the case for DASH, as the manifest will have already
|
||||
// provided all of the details necessary to generate the audio playlist, as opposed to
|
||||
// HLS' externally requested playlists), then the content is demuxed.
|
||||
if (!audioGroup[groupId].uri && !audioGroup[groupId].playlists) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the codec strings for a working configuration of
|
||||
* SourceBuffers to play variant streams in a master playlist. If
|
||||
* there is no possible working configuration, an empty object will be
|
||||
* returned.
|
||||
*
|
||||
* @param master {Object} the m3u8 object for the master playlist
|
||||
* @param media {Object} the m3u8 object for the variant playlist
|
||||
* @return {Object} the codec strings.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
export const codecsForPlaylist = function(master, media) {
|
||||
const mediaAttributes = media.attributes || {};
|
||||
const codecInfo = getCodecs(media) || {};
|
||||
|
||||
// HLS with multiple-audio tracks must always get an audio codec.
|
||||
// Put another way, there is no way to have a video-only multiple-audio HLS!
|
||||
if (isMaat(master, media) && !codecInfo.audio) {
|
||||
if (!isMuxed(master, media)) {
|
||||
// It is possible for codecs to be specified on the audio media group playlist but
|
||||
// not on the rendition playlist. This is mostly the case for DASH, where audio and
|
||||
// video are always separate (and separately specified).
|
||||
const defaultCodecs = codecsFromDefault(master, mediaAttributes.AUDIO);
|
||||
|
||||
if (defaultCodecs) {
|
||||
codecInfo.audio = defaultCodecs.audio;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
const codecs = {};
|
||||
|
||||
if (codecInfo.video) {
|
||||
codecs.video = translateLegacyCodec(`${codecInfo.video.type}${codecInfo.video.details}`);
|
||||
}
|
||||
|
||||
if (codecInfo.audio) {
|
||||
codecs.audio = translateLegacyCodec(`${codecInfo.audio.type}${codecInfo.audio.details}`);
|
||||
}
|
||||
|
||||
return codecs;
|
||||
};
|
||||
|
||||
85
build/javascript/node_modules/@videojs/http-streaming/src/util/container-request.js
generated
vendored
Normal file
85
build/javascript/node_modules/@videojs/http-streaming/src/util/container-request.js
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
import {detectContainerForBytes, getId3Offset} from '@videojs/vhs-utils/dist/containers';
|
||||
import {stringToBytes, concatTypedArrays} from '@videojs/vhs-utils/dist/byte-helpers';
|
||||
import {callbackWrapper} from '../xhr';
|
||||
|
||||
// calls back if the request is readyState DONE
|
||||
// which will only happen if the request is complete.
|
||||
const callbackOnCompleted = (request, cb) => {
|
||||
if (request.readyState === 4) {
|
||||
return cb();
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
const containerRequest = (uri, xhr, cb) => {
|
||||
let bytes = [];
|
||||
let id3Offset;
|
||||
let finished = false;
|
||||
|
||||
const endRequestAndCallback = function(err, req, type, _bytes) {
|
||||
req.abort();
|
||||
finished = true;
|
||||
return cb(err, req, type, _bytes);
|
||||
};
|
||||
|
||||
const progressListener = function(error, request) {
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
if (error) {
|
||||
return endRequestAndCallback(error, request, '', bytes);
|
||||
}
|
||||
|
||||
// grap the new part of content that was just downloaded
|
||||
const newPart = request.responseText.substring(
|
||||
bytes && bytes.byteLength || 0,
|
||||
request.responseText.length
|
||||
);
|
||||
|
||||
// add that onto bytes
|
||||
bytes = concatTypedArrays(bytes, stringToBytes(newPart, true));
|
||||
id3Offset = id3Offset || getId3Offset(bytes);
|
||||
|
||||
// we need at least 10 bytes to determine a type
|
||||
// or we need at least two bytes after an id3Offset
|
||||
if (bytes.length < 10 || (id3Offset && bytes.length < id3Offset + 2)) {
|
||||
return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));
|
||||
}
|
||||
|
||||
const type = detectContainerForBytes(bytes);
|
||||
|
||||
// if this looks like a ts segment but we don't have enough data
|
||||
// to see the second sync byte, wait until we have enough data
|
||||
// before declaring it ts
|
||||
if (type === 'ts' && bytes.length < 188) {
|
||||
return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));
|
||||
}
|
||||
|
||||
// this may be an unsynced ts segment
|
||||
// wait for 376 bytes before detecting no container
|
||||
if (!type && bytes.length < 376) {
|
||||
return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));
|
||||
}
|
||||
|
||||
return endRequestAndCallback(null, request, type, bytes);
|
||||
};
|
||||
|
||||
const options = {
|
||||
uri,
|
||||
beforeSend(request) {
|
||||
// this forces the browser to pass the bytes to us unprocessed
|
||||
request.overrideMimeType('text/plain; charset=x-user-defined');
|
||||
request.addEventListener('progress', function({total, loaded}) {
|
||||
return callbackWrapper(request, null, {statusCode: request.status}, progressListener);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const request = xhr(options, function(error, response) {
|
||||
return callbackWrapper(request, error, response, progressListener);
|
||||
});
|
||||
|
||||
return request;
|
||||
};
|
||||
|
||||
export default containerRequest;
|
||||
119
build/javascript/node_modules/@videojs/http-streaming/src/util/gops.js
generated
vendored
Normal file
119
build/javascript/node_modules/@videojs/http-streaming/src/util/gops.js
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
import { ONE_SECOND_IN_TS } from 'mux.js/lib/utils/clock';
|
||||
|
||||
/**
|
||||
* Returns a list of gops in the buffer that have a pts value of 3 seconds or more in
|
||||
* front of current time.
|
||||
*
|
||||
* @param {Array} buffer
|
||||
* The current buffer of gop information
|
||||
* @param {number} currentTime
|
||||
* The current time
|
||||
* @param {Double} mapping
|
||||
* Offset to map display time to stream presentation time
|
||||
* @return {Array}
|
||||
* List of gops considered safe to append over
|
||||
*/
|
||||
export const gopsSafeToAlignWith = (buffer, currentTime, mapping) => {
|
||||
if (typeof currentTime === 'undefined' || currentTime === null || !buffer.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// pts value for current time + 3 seconds to give a bit more wiggle room
|
||||
const currentTimePts = Math.ceil((currentTime - mapping + 3) * ONE_SECOND_IN_TS);
|
||||
|
||||
let i;
|
||||
|
||||
for (i = 0; i < buffer.length; i++) {
|
||||
if (buffer[i].pts > currentTimePts) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return buffer.slice(i);
|
||||
};
|
||||
|
||||
/**
|
||||
* Appends gop information (timing and byteLength) received by the transmuxer for the
|
||||
* gops appended in the last call to appendBuffer
|
||||
*
|
||||
* @param {Array} buffer
|
||||
* The current buffer of gop information
|
||||
* @param {Array} gops
|
||||
* List of new gop information
|
||||
* @param {boolean} replace
|
||||
* If true, replace the buffer with the new gop information. If false, append the
|
||||
* new gop information to the buffer in the right location of time.
|
||||
* @return {Array}
|
||||
* Updated list of gop information
|
||||
*/
|
||||
export const updateGopBuffer = (buffer, gops, replace) => {
|
||||
if (!gops.length) {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
if (replace) {
|
||||
// If we are in safe append mode, then completely overwrite the gop buffer
|
||||
// with the most recent appeneded data. This will make sure that when appending
|
||||
// future segments, we only try to align with gops that are both ahead of current
|
||||
// time and in the last segment appended.
|
||||
return gops.slice();
|
||||
}
|
||||
|
||||
const start = gops[0].pts;
|
||||
|
||||
let i = 0;
|
||||
|
||||
for (i; i < buffer.length; i++) {
|
||||
if (buffer[i].pts >= start) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return buffer.slice(0, i).concat(gops);
|
||||
};
|
||||
|
||||
/**
|
||||
* Removes gop information in buffer that overlaps with provided start and end
|
||||
*
|
||||
* @param {Array} buffer
|
||||
* The current buffer of gop information
|
||||
* @param {Double} start
|
||||
* position to start the remove at
|
||||
* @param {Double} end
|
||||
* position to end the remove at
|
||||
* @param {Double} mapping
|
||||
* Offset to map display time to stream presentation time
|
||||
*/
|
||||
export const removeGopBuffer = (buffer, start, end, mapping) => {
|
||||
const startPts = Math.ceil((start - mapping) * ONE_SECOND_IN_TS);
|
||||
const endPts = Math.ceil((end - mapping) * ONE_SECOND_IN_TS);
|
||||
const updatedBuffer = buffer.slice();
|
||||
|
||||
let i = buffer.length;
|
||||
|
||||
while (i--) {
|
||||
if (buffer[i].pts <= endPts) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i === -1) {
|
||||
// no removal because end of remove range is before start of buffer
|
||||
return updatedBuffer;
|
||||
}
|
||||
|
||||
let j = i + 1;
|
||||
|
||||
while (j--) {
|
||||
if (buffer[j].pts <= startPts) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// clamp remove range start to 0 index
|
||||
j = Math.max(j, 0);
|
||||
|
||||
updatedBuffer.splice(j, i - j + 1);
|
||||
|
||||
return updatedBuffer;
|
||||
};
|
||||
11
build/javascript/node_modules/@videojs/http-streaming/src/util/logger.js
generated
vendored
Normal file
11
build/javascript/node_modules/@videojs/http-streaming/src/util/logger.js
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
import videojs from 'video.js';
|
||||
|
||||
const logger = (source) => {
|
||||
if (videojs.log.debug) {
|
||||
return videojs.log.debug.bind(videojs, 'VHS:', `${source} >`);
|
||||
}
|
||||
|
||||
return function() {};
|
||||
};
|
||||
|
||||
export default logger;
|
||||
1
build/javascript/node_modules/@videojs/http-streaming/src/util/noop.js
generated
vendored
Normal file
1
build/javascript/node_modules/@videojs/http-streaming/src/util/noop.js
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export default function noop() {}
|
||||
58
build/javascript/node_modules/@videojs/http-streaming/src/util/segment.js
generated
vendored
Normal file
58
build/javascript/node_modules/@videojs/http-streaming/src/util/segment.js
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
import tsInspector from 'mux.js/lib/tools/ts-inspector.js';
|
||||
import { ONE_SECOND_IN_TS } from 'mux.js/lib/utils/clock';
|
||||
|
||||
/**
|
||||
* Probe an mpeg2-ts segment to determine the start time of the segment in it's
|
||||
* internal "media time," as well as whether it contains video and/or audio.
|
||||
*
|
||||
* @private
|
||||
* @param {Uint8Array} bytes - segment bytes
|
||||
* @return {Object} The start time of the current segment in "media time" as well as
|
||||
* whether it contains video and/or audio
|
||||
*/
|
||||
export const probeTsSegment = (bytes, baseStartTime) => {
|
||||
const timeInfo = tsInspector.inspect(bytes, baseStartTime * ONE_SECOND_IN_TS);
|
||||
|
||||
if (!timeInfo) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const result = {
|
||||
// each type's time info comes back as an array of 2 times, start and end
|
||||
hasVideo: timeInfo.video && timeInfo.video.length === 2 || false,
|
||||
hasAudio: timeInfo.audio && timeInfo.audio.length === 2 || false
|
||||
};
|
||||
|
||||
if (result.hasVideo) {
|
||||
result.videoStart = timeInfo.video[0].ptsTime;
|
||||
}
|
||||
if (result.hasAudio) {
|
||||
result.audioStart = timeInfo.audio[0].ptsTime;
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Combine all segments into a single Uint8Array
|
||||
*
|
||||
* @param {Object} segmentObj
|
||||
* @return {Uint8Array} concatenated bytes
|
||||
* @private
|
||||
*/
|
||||
export const concatSegments = (segmentObj) => {
|
||||
let offset = 0;
|
||||
let tempBuffer;
|
||||
|
||||
if (segmentObj.bytes) {
|
||||
tempBuffer = new Uint8Array(segmentObj.bytes);
|
||||
|
||||
// combine the individual segments into one large typed-array
|
||||
segmentObj.segments.forEach((segment) => {
|
||||
tempBuffer.set(segment, offset);
|
||||
offset += segment.byteLength;
|
||||
});
|
||||
}
|
||||
|
||||
return tempBuffer;
|
||||
};
|
||||
41
build/javascript/node_modules/@videojs/http-streaming/src/util/shallow-equal.js
generated
vendored
Normal file
41
build/javascript/node_modules/@videojs/http-streaming/src/util/shallow-equal.js
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
const shallowEqual = function(a, b) {
|
||||
// if both are undefined
|
||||
// or one or the other is undefined
|
||||
// they are not equal
|
||||
if ((!a && !b) || (!a && b) || (a && !b)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// they are the same object and thus, equal
|
||||
if (a === b) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// sort keys so we can make sure they have
|
||||
// all the same keys later.
|
||||
const akeys = Object.keys(a).sort();
|
||||
const bkeys = Object.keys(b).sort();
|
||||
|
||||
// different number of keys, not equal
|
||||
if (akeys.length !== bkeys.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (let i = 0; i < akeys.length; i++) {
|
||||
const key = akeys[i];
|
||||
|
||||
// different sorted keys, not equal
|
||||
if (key !== bkeys[i]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// different values, not equal
|
||||
if (a[key] !== b[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
export default shallowEqual;
|
||||
9
build/javascript/node_modules/@videojs/http-streaming/src/util/string-to-array-buffer.js
generated
vendored
Normal file
9
build/javascript/node_modules/@videojs/http-streaming/src/util/string-to-array-buffer.js
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
export const stringToArrayBuffer = (string) => {
|
||||
const view = new Uint8Array(new ArrayBuffer(string.length));
|
||||
|
||||
for (let i = 0; i < string.length; i++) {
|
||||
view[i] = string.charCodeAt(i);
|
||||
}
|
||||
|
||||
return view.buffer;
|
||||
};
|
||||
2
build/javascript/node_modules/@videojs/http-streaming/src/util/string.js
generated
vendored
Normal file
2
build/javascript/node_modules/@videojs/http-streaming/src/util/string.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export const uint8ToUtf8 = (uintArray) =>
|
||||
decodeURIComponent(escape(String.fromCharCode.apply(null, uintArray)));
|
||||
248
build/javascript/node_modules/@videojs/http-streaming/src/util/text-tracks.js
generated
vendored
Normal file
248
build/javascript/node_modules/@videojs/http-streaming/src/util/text-tracks.js
generated
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
/**
|
||||
* @file text-tracks.js
|
||||
*/
|
||||
import window from 'global/window';
|
||||
import videojs from 'video.js';
|
||||
|
||||
/**
|
||||
* Create captions text tracks on video.js if they do not exist
|
||||
*
|
||||
* @param {Object} inbandTextTracks a reference to current inbandTextTracks
|
||||
* @param {Object} tech the video.js tech
|
||||
* @param {Object} captionStream the caption stream to create
|
||||
* @private
|
||||
*/
|
||||
export const createCaptionsTrackIfNotExists = function(inbandTextTracks, tech, captionStream) {
|
||||
if (!inbandTextTracks[captionStream]) {
|
||||
tech.trigger({type: 'usage', name: 'vhs-608'});
|
||||
tech.trigger({type: 'usage', name: 'hls-608'});
|
||||
const track = tech.textTracks().getTrackById(captionStream);
|
||||
|
||||
if (track) {
|
||||
// Resuse an existing track with a CC# id because this was
|
||||
// very likely created by videojs-contrib-hls from information
|
||||
// in the m3u8 for us to use
|
||||
inbandTextTracks[captionStream] = track;
|
||||
} else {
|
||||
// Otherwise, create a track with the default `CC#` label and
|
||||
// without a language
|
||||
inbandTextTracks[captionStream] = tech.addRemoteTextTrack({
|
||||
kind: 'captions',
|
||||
id: captionStream,
|
||||
label: captionStream
|
||||
}, false).track;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Add caption text track data to a source handler given an array of captions
|
||||
*
|
||||
* @param {Object}
|
||||
* @param {Object} inbandTextTracks the inband text tracks
|
||||
* @param {number} timestampOffset the timestamp offset of the source buffer
|
||||
* @param {Array} captionArray an array of caption data
|
||||
* @private
|
||||
*/
|
||||
export const addCaptionData = function({
|
||||
inbandTextTracks,
|
||||
captionArray,
|
||||
timestampOffset
|
||||
}) {
|
||||
if (!captionArray) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Cue = window.WebKitDataCue || window.VTTCue;
|
||||
|
||||
captionArray.forEach((caption) => {
|
||||
const track = caption.stream;
|
||||
|
||||
inbandTextTracks[track].addCue(new Cue(
|
||||
caption.startTime + timestampOffset,
|
||||
caption.endTime + timestampOffset,
|
||||
caption.text
|
||||
));
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Define properties on a cue for backwards compatability,
|
||||
* but warn the user that the way that they are using it
|
||||
* is depricated and will be removed at a later date.
|
||||
*
|
||||
* @param {Cue} cue the cue to add the properties on
|
||||
* @private
|
||||
*/
|
||||
const deprecateOldCue = function(cue) {
|
||||
Object.defineProperties(cue.frame, {
|
||||
id: {
|
||||
get() {
|
||||
videojs.log.warn('cue.frame.id is deprecated. Use cue.value.key instead.');
|
||||
return cue.value.key;
|
||||
}
|
||||
},
|
||||
value: {
|
||||
get() {
|
||||
videojs.log.warn('cue.frame.value is deprecated. Use cue.value.data instead.');
|
||||
return cue.value.data;
|
||||
}
|
||||
},
|
||||
privateData: {
|
||||
get() {
|
||||
videojs.log.warn('cue.frame.privateData is deprecated. Use cue.value.data instead.');
|
||||
return cue.value.data;
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Add metadata text track data to a source handler given an array of metadata
|
||||
*
|
||||
* @param {Object}
|
||||
* @param {Object} inbandTextTracks the inband text tracks
|
||||
* @param {Array} metadataArray an array of meta data
|
||||
* @param {number} timestampOffset the timestamp offset of the source buffer
|
||||
* @param {number} videoDuration the duration of the video
|
||||
* @private
|
||||
*/
|
||||
export const addMetadata = ({
|
||||
inbandTextTracks,
|
||||
metadataArray,
|
||||
timestampOffset,
|
||||
videoDuration
|
||||
}) => {
|
||||
if (!metadataArray) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Cue = window.WebKitDataCue || window.VTTCue;
|
||||
const metadataTrack = inbandTextTracks.metadataTrack_;
|
||||
|
||||
if (!metadataTrack) {
|
||||
return;
|
||||
}
|
||||
|
||||
metadataArray.forEach((metadata) => {
|
||||
const time = metadata.cueTime + timestampOffset;
|
||||
|
||||
// if time isn't a finite number between 0 and Infinity, like NaN,
|
||||
// ignore this bit of metadata.
|
||||
// This likely occurs when you have an non-timed ID3 tag like TIT2,
|
||||
// which is the "Title/Songname/Content description" frame
|
||||
if (typeof time !== 'number' || window.isNaN(time) || time < 0 || !(time < Infinity)) {
|
||||
return;
|
||||
}
|
||||
|
||||
metadata.frames.forEach((frame) => {
|
||||
const cue = new Cue(
|
||||
time,
|
||||
time,
|
||||
frame.value || frame.url || frame.data || ''
|
||||
);
|
||||
|
||||
cue.frame = frame;
|
||||
cue.value = frame;
|
||||
deprecateOldCue(cue);
|
||||
|
||||
metadataTrack.addCue(cue);
|
||||
});
|
||||
});
|
||||
|
||||
if (!metadataTrack.cues || !metadataTrack.cues.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Updating the metadeta cues so that
|
||||
// the endTime of each cue is the startTime of the next cue
|
||||
// the endTime of last cue is the duration of the video
|
||||
const cues = metadataTrack.cues;
|
||||
const cuesArray = [];
|
||||
|
||||
// Create a copy of the TextTrackCueList...
|
||||
// ...disregarding cues with a falsey value
|
||||
for (let i = 0; i < cues.length; i++) {
|
||||
if (cues[i]) {
|
||||
cuesArray.push(cues[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Group cues by their startTime value
|
||||
const cuesGroupedByStartTime = cuesArray.reduce((obj, cue) => {
|
||||
const timeSlot = obj[cue.startTime] || [];
|
||||
|
||||
timeSlot.push(cue);
|
||||
obj[cue.startTime] = timeSlot;
|
||||
|
||||
return obj;
|
||||
}, {});
|
||||
|
||||
// Sort startTimes by ascending order
|
||||
const sortedStartTimes = Object.keys(cuesGroupedByStartTime)
|
||||
.sort((a, b) => Number(a) - Number(b));
|
||||
|
||||
// Map each cue group's endTime to the next group's startTime
|
||||
sortedStartTimes.forEach((startTime, idx) => {
|
||||
const cueGroup = cuesGroupedByStartTime[startTime];
|
||||
const nextTime = Number(sortedStartTimes[idx + 1]) || videoDuration;
|
||||
|
||||
// Map each cue's endTime the next group's startTime
|
||||
cueGroup.forEach((cue) => {
|
||||
cue.endTime = nextTime;
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Create metadata text track on video.js if it does not exist
|
||||
*
|
||||
* @param {Object} inbandTextTracks a reference to current inbandTextTracks
|
||||
* @param {string} dispatchType the inband metadata track dispatch type
|
||||
* @param {Object} tech the video.js tech
|
||||
* @private
|
||||
*/
|
||||
export const createMetadataTrackIfNotExists = (inbandTextTracks, dispatchType, tech) => {
|
||||
if (inbandTextTracks.metadataTrack_) {
|
||||
return;
|
||||
}
|
||||
|
||||
inbandTextTracks.metadataTrack_ = tech.addRemoteTextTrack({
|
||||
kind: 'metadata',
|
||||
label: 'Timed Metadata'
|
||||
}, false).track;
|
||||
|
||||
inbandTextTracks.metadataTrack_.inBandMetadataTrackDispatchType = dispatchType;
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove cues from a track on video.js.
|
||||
*
|
||||
* @param {Double} start start of where we should remove the cue
|
||||
* @param {Double} end end of where the we should remove the cue
|
||||
* @param {Object} track the text track to remove the cues from
|
||||
* @private
|
||||
*/
|
||||
export const removeCuesFromTrack = function(start, end, track) {
|
||||
let i;
|
||||
let cue;
|
||||
|
||||
if (!track) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!track.cues) {
|
||||
return;
|
||||
}
|
||||
|
||||
i = track.cues.length;
|
||||
|
||||
while (i--) {
|
||||
cue = track.cues[i];
|
||||
|
||||
// Remove any overlapping cue
|
||||
if (cue.startTime >= start && cue.endTime <= end) {
|
||||
track.removeCue(cue);
|
||||
}
|
||||
}
|
||||
};
|
||||
402
build/javascript/node_modules/@videojs/http-streaming/src/util/time.js
generated
vendored
Normal file
402
build/javascript/node_modules/@videojs/http-streaming/src/util/time.js
generated
vendored
Normal file
@@ -0,0 +1,402 @@
|
||||
// TODO handle fmp4 case where the timing info is accurate and doesn't involve transmux
|
||||
|
||||
/**
|
||||
* @file time.js
|
||||
*/
|
||||
|
||||
import Playlist from '../playlist';
|
||||
|
||||
// Add 25% to the segment duration to account for small discrepencies in segment timing.
|
||||
// 25% was arbitrarily chosen, and may need to be refined over time.
|
||||
const SEGMENT_END_FUDGE_PERCENT = 0.25;
|
||||
|
||||
/**
|
||||
* Converts a player time (any time that can be gotten/set from player.currentTime(),
|
||||
* e.g., any time within player.seekable().start(0) to player.seekable().end(0)) to a
|
||||
* program time (any time referencing the real world (e.g., EXT-X-PROGRAM-DATE-TIME)).
|
||||
*
|
||||
* The containing segment is required as the EXT-X-PROGRAM-DATE-TIME serves as an "anchor
|
||||
* point" (a point where we have a mapping from program time to player time, with player
|
||||
* time being the post transmux start of the segment).
|
||||
*
|
||||
* For more details, see [this doc](../../docs/program-time-from-player-time.md).
|
||||
*
|
||||
* @param {number} playerTime the player time
|
||||
* @param {Object} segment the segment which contains the player time
|
||||
* @return {Date} program time
|
||||
*/
|
||||
export const playerTimeToProgramTime = (playerTime, segment) => {
|
||||
if (!segment.dateTimeObject) {
|
||||
// Can't convert without an "anchor point" for the program time (i.e., a time that can
|
||||
// be used to map the start of a segment with a real world time).
|
||||
return null;
|
||||
}
|
||||
|
||||
const transmuxerPrependedSeconds = segment.videoTimingInfo.transmuxerPrependedSeconds;
|
||||
const transmuxedStart = segment.videoTimingInfo.transmuxedPresentationStart;
|
||||
|
||||
// get the start of the content from before old content is prepended
|
||||
const startOfSegment = transmuxedStart + transmuxerPrependedSeconds;
|
||||
const offsetFromSegmentStart = playerTime - startOfSegment;
|
||||
|
||||
return new Date(segment.dateTimeObject.getTime() + offsetFromSegmentStart * 1000);
|
||||
};
|
||||
|
||||
export const originalSegmentVideoDuration = (videoTimingInfo) => {
|
||||
return videoTimingInfo.transmuxedPresentationEnd -
|
||||
videoTimingInfo.transmuxedPresentationStart -
|
||||
videoTimingInfo.transmuxerPrependedSeconds;
|
||||
};
|
||||
|
||||
/**
|
||||
* Finds a segment that contains the time requested given as an ISO-8601 string. The
|
||||
* returned segment might be an estimate or an accurate match.
|
||||
*
|
||||
* @param {string} programTime The ISO-8601 programTime to find a match for
|
||||
* @param {Object} playlist A playlist object to search within
|
||||
*/
|
||||
export const findSegmentForProgramTime = (programTime, playlist) => {
|
||||
// Assumptions:
|
||||
// - verifyProgramDateTimeTags has already been run
|
||||
// - live streams have been started
|
||||
|
||||
let dateTimeObject;
|
||||
|
||||
try {
|
||||
dateTimeObject = new Date(programTime);
|
||||
} catch (e) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!playlist || !playlist.segments || playlist.segments.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let segment = playlist.segments[0];
|
||||
|
||||
if (dateTimeObject < segment.dateTimeObject) {
|
||||
// Requested time is before stream start.
|
||||
return null;
|
||||
}
|
||||
|
||||
for (let i = 0; i < playlist.segments.length - 1; i++) {
|
||||
segment = playlist.segments[i];
|
||||
|
||||
const nextSegmentStart = playlist.segments[i + 1].dateTimeObject;
|
||||
|
||||
if (dateTimeObject < nextSegmentStart) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const lastSegment = playlist.segments[playlist.segments.length - 1];
|
||||
const lastSegmentStart = lastSegment.dateTimeObject;
|
||||
const lastSegmentDuration = lastSegment.videoTimingInfo ?
|
||||
originalSegmentVideoDuration(lastSegment.videoTimingInfo) :
|
||||
lastSegment.duration + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT;
|
||||
const lastSegmentEnd =
|
||||
new Date(lastSegmentStart.getTime() + lastSegmentDuration * 1000);
|
||||
|
||||
if (dateTimeObject > lastSegmentEnd) {
|
||||
// Beyond the end of the stream, or our best guess of the end of the stream.
|
||||
return null;
|
||||
}
|
||||
|
||||
if (dateTimeObject > lastSegmentStart) {
|
||||
segment = lastSegment;
|
||||
}
|
||||
|
||||
return {
|
||||
segment,
|
||||
estimatedStart: segment.videoTimingInfo ?
|
||||
segment.videoTimingInfo.transmuxedPresentationStart :
|
||||
Playlist.duration(
|
||||
playlist,
|
||||
playlist.mediaSequence + playlist.segments.indexOf(segment)
|
||||
),
|
||||
// Although, given that all segments have accurate date time objects, the segment
|
||||
// selected should be accurate, unless the video has been transmuxed at some point
|
||||
// (determined by the presence of the videoTimingInfo object), the segment's "player
|
||||
// time" (the start time in the player) can't be considered accurate.
|
||||
type: segment.videoTimingInfo ? 'accurate' : 'estimate'
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Finds a segment that contains the given player time(in seconds).
|
||||
*
|
||||
* @param {number} time The player time to find a match for
|
||||
* @param {Object} playlist A playlist object to search within
|
||||
*/
|
||||
export const findSegmentForPlayerTime = (time, playlist) => {
|
||||
// Assumptions:
|
||||
// - there will always be a segment.duration
|
||||
// - we can start from zero
|
||||
// - segments are in time order
|
||||
|
||||
if (!playlist || !playlist.segments || playlist.segments.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let segmentEnd = 0;
|
||||
let segment;
|
||||
|
||||
for (let i = 0; i < playlist.segments.length; i++) {
|
||||
segment = playlist.segments[i];
|
||||
|
||||
// videoTimingInfo is set after the segment is downloaded and transmuxed, and
|
||||
// should contain the most accurate values we have for the segment's player times.
|
||||
//
|
||||
// Use the accurate transmuxedPresentationEnd value if it is available, otherwise fall
|
||||
// back to an estimate based on the manifest derived (inaccurate) segment.duration, to
|
||||
// calculate an end value.
|
||||
segmentEnd = segment.videoTimingInfo ?
|
||||
segment.videoTimingInfo.transmuxedPresentationEnd : segmentEnd + segment.duration;
|
||||
|
||||
if (time <= segmentEnd) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const lastSegment = playlist.segments[playlist.segments.length - 1];
|
||||
|
||||
if (lastSegment.videoTimingInfo &&
|
||||
lastSegment.videoTimingInfo.transmuxedPresentationEnd < time) {
|
||||
// The time requested is beyond the stream end.
|
||||
return null;
|
||||
}
|
||||
|
||||
if (time > segmentEnd) {
|
||||
// The time is within or beyond the last segment.
|
||||
//
|
||||
// Check to see if the time is beyond a reasonable guess of the end of the stream.
|
||||
if (time > segmentEnd + (lastSegment.duration * SEGMENT_END_FUDGE_PERCENT)) {
|
||||
// Technically, because the duration value is only an estimate, the time may still
|
||||
// exist in the last segment, however, there isn't enough information to make even
|
||||
// a reasonable estimate.
|
||||
return null;
|
||||
}
|
||||
|
||||
segment = lastSegment;
|
||||
}
|
||||
|
||||
return {
|
||||
segment,
|
||||
estimatedStart: segment.videoTimingInfo ?
|
||||
segment.videoTimingInfo.transmuxedPresentationStart : segmentEnd - segment.duration,
|
||||
// Because videoTimingInfo is only set after transmux, it is the only way to get
|
||||
// accurate timing values.
|
||||
type: segment.videoTimingInfo ? 'accurate' : 'estimate'
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Gives the offset of the comparisonTimestamp from the programTime timestamp in seconds.
|
||||
* If the offset returned is positive, the programTime occurs after the
|
||||
* comparisonTimestamp.
|
||||
* If the offset is negative, the programTime occurs before the comparisonTimestamp.
|
||||
*
|
||||
* @param {string} comparisonTimeStamp An ISO-8601 timestamp to compare against
|
||||
* @param {string} programTime The programTime as an ISO-8601 string
|
||||
* @return {number} offset
|
||||
*/
|
||||
export const getOffsetFromTimestamp = (comparisonTimeStamp, programTime) => {
|
||||
let segmentDateTime;
|
||||
let programDateTime;
|
||||
|
||||
try {
|
||||
segmentDateTime = new Date(comparisonTimeStamp);
|
||||
programDateTime = new Date(programTime);
|
||||
} catch (e) {
|
||||
// TODO handle error
|
||||
}
|
||||
|
||||
const segmentTimeEpoch = segmentDateTime.getTime();
|
||||
const programTimeEpoch = programDateTime.getTime();
|
||||
|
||||
return (programTimeEpoch - segmentTimeEpoch) / 1000;
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks that all segments in this playlist have programDateTime tags.
|
||||
*
|
||||
* @param {Object} playlist A playlist object
|
||||
*/
|
||||
export const verifyProgramDateTimeTags = (playlist) => {
|
||||
if (!playlist.segments || playlist.segments.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (let i = 0; i < playlist.segments.length; i++) {
|
||||
const segment = playlist.segments[i];
|
||||
|
||||
if (!segment.dateTimeObject) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the programTime of the media given a playlist and a playerTime.
|
||||
* The playlist must have programDateTime tags for a programDateTime tag to be returned.
|
||||
* If the segments containing the time requested have not been buffered yet, an estimate
|
||||
* may be returned to the callback.
|
||||
*
|
||||
* @param {Object} args
|
||||
* @param {Object} args.playlist A playlist object to search within
|
||||
* @param {number} time A playerTime in seconds
|
||||
* @param {Function} callback(err, programTime)
|
||||
* @return {string} err.message A detailed error message
|
||||
* @return {Object} programTime
|
||||
* @return {number} programTime.mediaSeconds The streamTime in seconds
|
||||
* @return {string} programTime.programDateTime The programTime as an ISO-8601 String
|
||||
*/
|
||||
export const getProgramTime = ({
|
||||
playlist,
|
||||
time = undefined,
|
||||
callback
|
||||
}) => {
|
||||
|
||||
if (!callback) {
|
||||
throw new Error('getProgramTime: callback must be provided');
|
||||
}
|
||||
|
||||
if (!playlist || time === undefined) {
|
||||
return callback({
|
||||
message: 'getProgramTime: playlist and time must be provided'
|
||||
});
|
||||
}
|
||||
|
||||
const matchedSegment = findSegmentForPlayerTime(time, playlist);
|
||||
|
||||
if (!matchedSegment) {
|
||||
return callback({
|
||||
message: 'valid programTime was not found'
|
||||
});
|
||||
}
|
||||
|
||||
if (matchedSegment.type === 'estimate') {
|
||||
return callback({
|
||||
message:
|
||||
'Accurate programTime could not be determined.' +
|
||||
' Please seek to e.seekTime and try again',
|
||||
seekTime: matchedSegment.estimatedStart
|
||||
});
|
||||
}
|
||||
|
||||
const programTimeObject = {
|
||||
mediaSeconds: time
|
||||
};
|
||||
const programTime = playerTimeToProgramTime(time, matchedSegment.segment);
|
||||
|
||||
if (programTime) {
|
||||
programTimeObject.programDateTime = programTime.toISOString();
|
||||
}
|
||||
|
||||
return callback(null, programTimeObject);
|
||||
};
|
||||
|
||||
/**
|
||||
* Seeks in the player to a time that matches the given programTime ISO-8601 string.
|
||||
*
|
||||
* @param {Object} args
|
||||
* @param {string} args.programTime A programTime to seek to as an ISO-8601 String
|
||||
* @param {Object} args.playlist A playlist to look within
|
||||
* @param {number} args.retryCount The number of times to try for an accurate seek. Default is 2.
|
||||
* @param {Function} args.seekTo A method to perform a seek
|
||||
* @param {boolean} args.pauseAfterSeek Whether to end in a paused state after seeking. Default is true.
|
||||
* @param {Object} args.tech The tech to seek on
|
||||
* @param {Function} args.callback(err, newTime) A callback to return the new time to
|
||||
* @return {string} err.message A detailed error message
|
||||
* @return {number} newTime The exact time that was seeked to in seconds
|
||||
*/
|
||||
export const seekToProgramTime = ({
|
||||
programTime,
|
||||
playlist,
|
||||
retryCount = 2,
|
||||
seekTo,
|
||||
pauseAfterSeek = true,
|
||||
tech,
|
||||
callback
|
||||
}) => {
|
||||
|
||||
if (!callback) {
|
||||
throw new Error('seekToProgramTime: callback must be provided');
|
||||
}
|
||||
|
||||
if (typeof programTime === 'undefined' || !playlist || !seekTo) {
|
||||
return callback({
|
||||
message: 'seekToProgramTime: programTime, seekTo and playlist must be provided'
|
||||
});
|
||||
}
|
||||
|
||||
if (!playlist.endList && !tech.hasStarted_) {
|
||||
return callback({
|
||||
message: 'player must be playing a live stream to start buffering'
|
||||
});
|
||||
}
|
||||
|
||||
if (!verifyProgramDateTimeTags(playlist)) {
|
||||
return callback({
|
||||
message: 'programDateTime tags must be provided in the manifest ' + playlist.resolvedUri
|
||||
});
|
||||
}
|
||||
|
||||
const matchedSegment = findSegmentForProgramTime(programTime, playlist);
|
||||
|
||||
// no match
|
||||
if (!matchedSegment) {
|
||||
return callback({
|
||||
message: `${programTime} was not found in the stream`
|
||||
});
|
||||
}
|
||||
|
||||
const segment = matchedSegment.segment;
|
||||
const mediaOffset = getOffsetFromTimestamp(
|
||||
segment.dateTimeObject,
|
||||
programTime
|
||||
);
|
||||
|
||||
if (matchedSegment.type === 'estimate') {
|
||||
// we've run out of retries
|
||||
if (retryCount === 0) {
|
||||
return callback({
|
||||
message: `${programTime} is not buffered yet. Try again`
|
||||
});
|
||||
}
|
||||
|
||||
seekTo(matchedSegment.estimatedStart + mediaOffset);
|
||||
|
||||
tech.one('seeked', () => {
|
||||
seekToProgramTime({
|
||||
programTime,
|
||||
playlist,
|
||||
retryCount: retryCount - 1,
|
||||
seekTo,
|
||||
pauseAfterSeek,
|
||||
tech,
|
||||
callback
|
||||
});
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Since the segment.start value is determined from the buffered end or ending time
|
||||
// of the prior segment, the seekToTime doesn't need to account for any transmuxer
|
||||
// modifications.
|
||||
const seekToTime = segment.start + mediaOffset;
|
||||
const seekedCallback = () => {
|
||||
return callback(null, tech.currentTime());
|
||||
};
|
||||
|
||||
// listen for seeked event
|
||||
tech.one('seeked', seekedCallback);
|
||||
// pause before seeking as video.js will restore this state
|
||||
if (pauseAfterSeek) {
|
||||
tech.pause();
|
||||
}
|
||||
seekTo(seekToTime);
|
||||
};
|
||||
9
build/javascript/node_modules/@videojs/http-streaming/src/util/to-title-case.js
generated
vendored
Normal file
9
build/javascript/node_modules/@videojs/http-streaming/src/util/to-title-case.js
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
const toTitleCase = function(string) {
|
||||
if (typeof string !== 'string') {
|
||||
return string;
|
||||
}
|
||||
|
||||
return string.replace(/./, (w) => w.toUpperCase());
|
||||
};
|
||||
|
||||
export default toTitleCase;
|
||||
1105
build/javascript/node_modules/@videojs/http-streaming/src/videojs-http-streaming.js
generated
vendored
Normal file
1105
build/javascript/node_modules/@videojs/http-streaming/src/videojs-http-streaming.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
491
build/javascript/node_modules/@videojs/http-streaming/src/vtt-segment-loader.js
generated
vendored
Normal file
491
build/javascript/node_modules/@videojs/http-streaming/src/vtt-segment-loader.js
generated
vendored
Normal file
@@ -0,0 +1,491 @@
|
||||
/**
|
||||
* @file vtt-segment-loader.js
|
||||
*/
|
||||
import SegmentLoader from './segment-loader';
|
||||
import videojs from 'video.js';
|
||||
import window from 'global/window';
|
||||
import { removeCuesFromTrack } from './util/text-tracks';
|
||||
import { initSegmentId } from './bin-utils';
|
||||
import { uint8ToUtf8 } from './util/string';
|
||||
import { REQUEST_ERRORS } from './media-segment-request';
|
||||
import { ONE_SECOND_IN_TS } from 'mux.js/lib/utils/clock';
|
||||
|
||||
const VTT_LINE_TERMINATORS =
|
||||
new Uint8Array('\n\n'.split('').map(char => char.charCodeAt(0)));
|
||||
|
||||
/**
|
||||
* An object that manages segment loading and appending.
|
||||
*
|
||||
* @class VTTSegmentLoader
|
||||
* @param {Object} options required and optional options
|
||||
* @extends videojs.EventTarget
|
||||
*/
|
||||
export default class VTTSegmentLoader extends SegmentLoader {
|
||||
constructor(settings, options = {}) {
|
||||
super(settings, options);
|
||||
|
||||
// VTT can't handle partial data
|
||||
this.handlePartialData_ = false;
|
||||
|
||||
// SegmentLoader requires a MediaSource be specified or it will throw an error;
|
||||
// however, VTTSegmentLoader has no need of a media source, so delete the reference
|
||||
this.mediaSource_ = null;
|
||||
|
||||
this.subtitlesTrack_ = null;
|
||||
|
||||
this.loaderType_ = 'subtitle';
|
||||
|
||||
this.featuresNativeTextTracks_ = settings.featuresNativeTextTracks;
|
||||
|
||||
// The VTT segment will have its own time mappings. Saving VTT segment timing info in
|
||||
// the sync controller leads to improper behavior.
|
||||
this.shouldSaveSegmentTimingInfo_ = false;
|
||||
}
|
||||
|
||||
createTransmuxer_() {
|
||||
// don't need to transmux any subtitles
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates which time ranges are buffered
|
||||
*
|
||||
* @return {TimeRange}
|
||||
* TimeRange object representing the current buffered ranges
|
||||
*/
|
||||
buffered_() {
|
||||
if (!this.subtitlesTrack_ || !this.subtitlesTrack_.cues.length) {
|
||||
return videojs.createTimeRanges();
|
||||
}
|
||||
|
||||
const cues = this.subtitlesTrack_.cues;
|
||||
const start = cues[0].startTime;
|
||||
const end = cues[cues.length - 1].startTime;
|
||||
|
||||
return videojs.createTimeRanges([[start, end]]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets and sets init segment for the provided map
|
||||
*
|
||||
* @param {Object} map
|
||||
* The map object representing the init segment to get or set
|
||||
* @param {boolean=} set
|
||||
* If true, the init segment for the provided map should be saved
|
||||
* @return {Object}
|
||||
* map object for desired init segment
|
||||
*/
|
||||
initSegmentForMap(map, set = false) {
|
||||
if (!map) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const id = initSegmentId(map);
|
||||
let storedMap = this.initSegments_[id];
|
||||
|
||||
if (set && !storedMap && map.bytes) {
|
||||
// append WebVTT line terminators to the media initialization segment if it exists
|
||||
// to follow the WebVTT spec (https://w3c.github.io/webvtt/#file-structure) that
|
||||
// requires two or more WebVTT line terminators between the WebVTT header and the
|
||||
// rest of the file
|
||||
const combinedByteLength = VTT_LINE_TERMINATORS.byteLength + map.bytes.byteLength;
|
||||
const combinedSegment = new Uint8Array(combinedByteLength);
|
||||
|
||||
combinedSegment.set(map.bytes);
|
||||
combinedSegment.set(VTT_LINE_TERMINATORS, map.bytes.byteLength);
|
||||
|
||||
this.initSegments_[id] = storedMap = {
|
||||
resolvedUri: map.resolvedUri,
|
||||
byterange: map.byterange,
|
||||
bytes: combinedSegment
|
||||
};
|
||||
}
|
||||
|
||||
return storedMap || map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if all configuration required for loading is present, otherwise false.
|
||||
*
|
||||
* @return {boolean} True if the all configuration is ready for loading
|
||||
* @private
|
||||
*/
|
||||
couldBeginLoading_() {
|
||||
return this.playlist_ &&
|
||||
this.subtitlesTrack_ &&
|
||||
!this.paused();
|
||||
}
|
||||
|
||||
/**
|
||||
* Once all the starting parameters have been specified, begin
|
||||
* operation. This method should only be invoked from the INIT
|
||||
* state.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
init_() {
|
||||
this.state = 'READY';
|
||||
this.resetEverything();
|
||||
return this.monitorBuffer_();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a subtitle track on the segment loader to add subtitles to
|
||||
*
|
||||
* @param {TextTrack=} track
|
||||
* The text track to add loaded subtitles to
|
||||
* @return {TextTrack}
|
||||
* Returns the subtitles track
|
||||
*/
|
||||
track(track) {
|
||||
if (typeof track === 'undefined') {
|
||||
return this.subtitlesTrack_;
|
||||
}
|
||||
|
||||
this.subtitlesTrack_ = track;
|
||||
|
||||
// if we were unpaused but waiting for a sourceUpdater, start
|
||||
// buffering now
|
||||
if (this.state === 'INIT' && this.couldBeginLoading_()) {
|
||||
this.init_();
|
||||
}
|
||||
|
||||
return this.subtitlesTrack_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove any data in the source buffer between start and end times
|
||||
*
|
||||
* @param {number} start - the start time of the region to remove from the buffer
|
||||
* @param {number} end - the end time of the region to remove from the buffer
|
||||
*/
|
||||
remove(start, end) {
|
||||
removeCuesFromTrack(start, end, this.subtitlesTrack_);
|
||||
}
|
||||
|
||||
/**
|
||||
* fill the buffer with segements unless the sourceBuffers are
|
||||
* currently updating
|
||||
*
|
||||
* Note: this function should only ever be called by monitorBuffer_
|
||||
* and never directly
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
fillBuffer_() {
|
||||
if (!this.syncPoint_) {
|
||||
this.syncPoint_ = this.syncController_.getSyncPoint(
|
||||
this.playlist_,
|
||||
this.duration_(),
|
||||
this.currentTimeline_,
|
||||
this.currentTime_()
|
||||
);
|
||||
}
|
||||
|
||||
// see if we need to begin loading immediately
|
||||
let segmentInfo = this.checkBuffer_(
|
||||
this.buffered_(),
|
||||
this.playlist_,
|
||||
this.mediaIndex,
|
||||
this.hasPlayed_(),
|
||||
this.currentTime_(),
|
||||
this.syncPoint_
|
||||
);
|
||||
|
||||
segmentInfo = this.skipEmptySegments_(segmentInfo);
|
||||
|
||||
if (!segmentInfo) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.syncController_.timestampOffsetForTimeline(segmentInfo.timeline) === null) {
|
||||
// We don't have the timestamp offset that we need to sync subtitles.
|
||||
// Rerun on a timestamp offset or user interaction.
|
||||
const checkTimestampOffset = () => {
|
||||
this.state = 'READY';
|
||||
if (!this.paused()) {
|
||||
// if not paused, queue a buffer check as soon as possible
|
||||
this.monitorBuffer_();
|
||||
}
|
||||
};
|
||||
|
||||
this.syncController_.one('timestampoffset', checkTimestampOffset);
|
||||
this.state = 'WAITING_ON_TIMELINE';
|
||||
return;
|
||||
}
|
||||
|
||||
this.loadSegment_(segmentInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevents the segment loader from requesting segments we know contain no subtitles
|
||||
* by walking forward until we find the next segment that we don't know whether it is
|
||||
* empty or not.
|
||||
*
|
||||
* @param {Object} segmentInfo
|
||||
* a segment info object that describes the current segment
|
||||
* @return {Object}
|
||||
* a segment info object that describes the current segment
|
||||
*/
|
||||
skipEmptySegments_(segmentInfo) {
|
||||
while (segmentInfo && segmentInfo.segment.empty) {
|
||||
segmentInfo = this.generateSegmentInfo_(
|
||||
segmentInfo.playlist,
|
||||
segmentInfo.mediaIndex + 1,
|
||||
segmentInfo.startOfSegment + segmentInfo.duration,
|
||||
segmentInfo.isSyncRequest
|
||||
);
|
||||
}
|
||||
return segmentInfo;
|
||||
}
|
||||
|
||||
stopForError(error) {
|
||||
this.error(error);
|
||||
this.state = 'READY';
|
||||
this.pause();
|
||||
this.trigger('error');
|
||||
}
|
||||
|
||||
/**
|
||||
* append a decrypted segement to the SourceBuffer through a SourceUpdater
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
segmentRequestFinished_(error, simpleSegment, result) {
|
||||
if (!this.subtitlesTrack_) {
|
||||
this.state = 'READY';
|
||||
return;
|
||||
}
|
||||
|
||||
this.saveTransferStats_(simpleSegment.stats);
|
||||
|
||||
// the request was aborted
|
||||
if (!this.pendingSegment_) {
|
||||
this.state = 'READY';
|
||||
this.mediaRequestsAborted += 1;
|
||||
return;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
if (error.code === REQUEST_ERRORS.TIMEOUT) {
|
||||
this.handleTimeout_();
|
||||
}
|
||||
|
||||
if (error.code === REQUEST_ERRORS.ABORTED) {
|
||||
this.mediaRequestsAborted += 1;
|
||||
} else {
|
||||
this.mediaRequestsErrored += 1;
|
||||
}
|
||||
|
||||
this.stopForError(error);
|
||||
return;
|
||||
}
|
||||
|
||||
// although the VTT segment loader bandwidth isn't really used, it's good to
|
||||
// maintain functionality between segment loaders
|
||||
this.saveBandwidthRelatedStats_(simpleSegment.stats);
|
||||
|
||||
this.state = 'APPENDING';
|
||||
|
||||
// used for tests
|
||||
this.trigger('appending');
|
||||
|
||||
const segmentInfo = this.pendingSegment_;
|
||||
const segment = segmentInfo.segment;
|
||||
|
||||
if (segment.map) {
|
||||
segment.map.bytes = simpleSegment.map.bytes;
|
||||
}
|
||||
segmentInfo.bytes = simpleSegment.bytes;
|
||||
|
||||
// Make sure that vttjs has loaded, otherwise, wait till it finished loading
|
||||
if (typeof window.WebVTT !== 'function' &&
|
||||
this.subtitlesTrack_ &&
|
||||
this.subtitlesTrack_.tech_) {
|
||||
|
||||
let loadHandler;
|
||||
const errorHandler = () => {
|
||||
this.subtitlesTrack_.tech_.off('vttjsloaded', loadHandler);
|
||||
this.stopForError({
|
||||
message: 'Error loading vtt.js'
|
||||
});
|
||||
return;
|
||||
};
|
||||
|
||||
loadHandler = () => {
|
||||
this.subtitlesTrack_.tech_.off('vttjserror', errorHandler);
|
||||
this.segmentRequestFinished_(error, simpleSegment, result);
|
||||
};
|
||||
|
||||
this.state = 'WAITING_ON_VTTJS';
|
||||
this.subtitlesTrack_.tech_.one('vttjsloaded', loadHandler);
|
||||
this.subtitlesTrack_.tech_.one('vttjserror', errorHandler);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
segment.requested = true;
|
||||
|
||||
try {
|
||||
this.parseVTTCues_(segmentInfo);
|
||||
} catch (e) {
|
||||
this.stopForError({
|
||||
message: e.message
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
this.updateTimeMapping_(
|
||||
segmentInfo,
|
||||
this.syncController_.timelines[segmentInfo.timeline],
|
||||
this.playlist_
|
||||
);
|
||||
|
||||
if (segmentInfo.cues.length) {
|
||||
segmentInfo.timingInfo = {
|
||||
start: segmentInfo.cues[0].startTime,
|
||||
end: segmentInfo.cues[segmentInfo.cues.length - 1].endTime
|
||||
};
|
||||
} else {
|
||||
segmentInfo.timingInfo = {
|
||||
start: segmentInfo.startOfSegment,
|
||||
end: segmentInfo.startOfSegment + segmentInfo.duration
|
||||
};
|
||||
}
|
||||
|
||||
if (segmentInfo.isSyncRequest) {
|
||||
this.trigger('syncinfoupdate');
|
||||
this.pendingSegment_ = null;
|
||||
this.state = 'READY';
|
||||
return;
|
||||
}
|
||||
|
||||
segmentInfo.byteLength = segmentInfo.bytes.byteLength;
|
||||
|
||||
this.mediaSecondsLoaded += segment.duration;
|
||||
|
||||
segmentInfo.cues.forEach((cue) => {
|
||||
// remove any overlapping cues to prevent doubling
|
||||
this.remove(cue.startTime, cue.endTime);
|
||||
this.subtitlesTrack_.addCue(this.featuresNativeTextTracks_ ?
|
||||
new window.VTTCue(cue.startTime, cue.endTime, cue.text) :
|
||||
cue);
|
||||
});
|
||||
|
||||
this.handleAppendsDone_();
|
||||
}
|
||||
|
||||
handleData_() {
|
||||
// noop as we shouldn't be getting video/audio data captions
|
||||
// that we do not support here.
|
||||
}
|
||||
updateTimingInfoEnd_() {
|
||||
// noop
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses the WebVTT parser to parse the segment response
|
||||
*
|
||||
* @param {Object} segmentInfo
|
||||
* a segment info object that describes the current segment
|
||||
* @private
|
||||
*/
|
||||
parseVTTCues_(segmentInfo) {
|
||||
let decoder;
|
||||
let decodeBytesToString = false;
|
||||
|
||||
if (typeof window.TextDecoder === 'function') {
|
||||
decoder = new window.TextDecoder('utf8');
|
||||
} else {
|
||||
decoder = window.WebVTT.StringDecoder();
|
||||
decodeBytesToString = true;
|
||||
}
|
||||
|
||||
const parser = new window.WebVTT.Parser(
|
||||
window,
|
||||
window.vttjs,
|
||||
decoder
|
||||
);
|
||||
|
||||
segmentInfo.cues = [];
|
||||
segmentInfo.timestampmap = { MPEGTS: 0, LOCAL: 0 };
|
||||
|
||||
parser.oncue = segmentInfo.cues.push.bind(segmentInfo.cues);
|
||||
parser.ontimestampmap = (map) => {
|
||||
segmentInfo.timestampmap = map;
|
||||
};
|
||||
parser.onparsingerror = (error) => {
|
||||
videojs.log.warn('Error encountered when parsing cues: ' + error.message);
|
||||
};
|
||||
|
||||
if (segmentInfo.segment.map) {
|
||||
let mapData = segmentInfo.segment.map.bytes;
|
||||
|
||||
if (decodeBytesToString) {
|
||||
mapData = uint8ToUtf8(mapData);
|
||||
}
|
||||
|
||||
parser.parse(mapData);
|
||||
}
|
||||
|
||||
let segmentData = segmentInfo.bytes;
|
||||
|
||||
if (decodeBytesToString) {
|
||||
segmentData = uint8ToUtf8(segmentData);
|
||||
}
|
||||
|
||||
parser.parse(segmentData);
|
||||
parser.flush();
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the start and end times of any cues parsed by the WebVTT parser using
|
||||
* the information parsed from the X-TIMESTAMP-MAP header and a TS to media time mapping
|
||||
* from the SyncController
|
||||
*
|
||||
* @param {Object} segmentInfo
|
||||
* a segment info object that describes the current segment
|
||||
* @param {Object} mappingObj
|
||||
* object containing a mapping from TS to media time
|
||||
* @param {Object} playlist
|
||||
* the playlist object containing the segment
|
||||
* @private
|
||||
*/
|
||||
updateTimeMapping_(segmentInfo, mappingObj, playlist) {
|
||||
const segment = segmentInfo.segment;
|
||||
|
||||
if (!mappingObj) {
|
||||
// If the sync controller does not have a mapping of TS to Media Time for the
|
||||
// timeline, then we don't have enough information to update the cue
|
||||
// start/end times
|
||||
return;
|
||||
}
|
||||
|
||||
if (!segmentInfo.cues.length) {
|
||||
// If there are no cues, we also do not have enough information to figure out
|
||||
// segment timing. Mark that the segment contains no cues so we don't re-request
|
||||
// an empty segment.
|
||||
segment.empty = true;
|
||||
return;
|
||||
}
|
||||
|
||||
const timestampmap = segmentInfo.timestampmap;
|
||||
const diff = (timestampmap.MPEGTS / ONE_SECOND_IN_TS) - timestampmap.LOCAL + mappingObj.mapping;
|
||||
|
||||
segmentInfo.cues.forEach((cue) => {
|
||||
// First convert cue time to TS time using the timestamp-map provided within the vtt
|
||||
cue.startTime += diff;
|
||||
cue.endTime += diff;
|
||||
});
|
||||
|
||||
if (!playlist.syncInfo) {
|
||||
const firstStart = segmentInfo.cues[0].startTime;
|
||||
const lastStart = segmentInfo.cues[segmentInfo.cues.length - 1].startTime;
|
||||
|
||||
playlist.syncInfo = {
|
||||
mediaSequence: playlist.mediaSequence + segmentInfo.mediaIndex,
|
||||
time: Math.min(firstStart, lastStart - segment.duration)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
127
build/javascript/node_modules/@videojs/http-streaming/src/xhr.js
generated
vendored
Normal file
127
build/javascript/node_modules/@videojs/http-streaming/src/xhr.js
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
/**
|
||||
* @file xhr.js
|
||||
*/
|
||||
|
||||
/**
|
||||
* A wrapper for videojs.xhr that tracks bandwidth.
|
||||
*
|
||||
* @param {Object} options options for the XHR
|
||||
* @param {Function} callback the callback to call when done
|
||||
* @return {Request} the xhr request that is going to be made
|
||||
*/
|
||||
import videojs from 'video.js';
|
||||
|
||||
const {
|
||||
xhr: videojsXHR,
|
||||
mergeOptions
|
||||
} = videojs;
|
||||
|
||||
const callbackWrapper = function(request, error, response, callback) {
|
||||
const reqResponse = request.responseType === 'arraybuffer' ? request.response : request.responseText;
|
||||
|
||||
if (!error && reqResponse) {
|
||||
request.responseTime = Date.now();
|
||||
request.roundTripTime = request.responseTime - request.requestTime;
|
||||
request.bytesReceived = reqResponse.byteLength || reqResponse.length;
|
||||
if (!request.bandwidth) {
|
||||
request.bandwidth =
|
||||
Math.floor((request.bytesReceived / request.roundTripTime) * 8 * 1000);
|
||||
}
|
||||
}
|
||||
|
||||
if (response.headers) {
|
||||
request.responseHeaders = response.headers;
|
||||
}
|
||||
|
||||
// videojs.xhr now uses a specific code on the error
|
||||
// object to signal that a request has timed out instead
|
||||
// of setting a boolean on the request object
|
||||
if (error && error.code === 'ETIMEDOUT') {
|
||||
request.timedout = true;
|
||||
}
|
||||
|
||||
// videojs.xhr no longer considers status codes outside of 200 and 0
|
||||
// (for file uris) to be errors, but the old XHR did, so emulate that
|
||||
// behavior. Status 206 may be used in response to byterange requests.
|
||||
if (!error &&
|
||||
!request.aborted &&
|
||||
response.statusCode !== 200 &&
|
||||
response.statusCode !== 206 &&
|
||||
response.statusCode !== 0) {
|
||||
error = new Error('XHR Failed with a response of: ' +
|
||||
(request && (reqResponse || request.responseText)));
|
||||
}
|
||||
|
||||
callback(error, request);
|
||||
};
|
||||
|
||||
const xhrFactory = function() {
|
||||
const xhr = function XhrFunction(options, callback) {
|
||||
// Add a default timeout
|
||||
options = mergeOptions({
|
||||
timeout: 45e3
|
||||
}, options);
|
||||
|
||||
// Allow an optional user-specified function to modify the option
|
||||
// object before we construct the xhr request
|
||||
const beforeRequest = XhrFunction.beforeRequest || videojs.Vhs.xhr.beforeRequest;
|
||||
|
||||
if (beforeRequest && typeof beforeRequest === 'function') {
|
||||
const newOptions = beforeRequest(options);
|
||||
|
||||
if (newOptions) {
|
||||
options = newOptions;
|
||||
}
|
||||
}
|
||||
|
||||
const request = videojsXHR(options, function(error, response) {
|
||||
return callbackWrapper(request, error, response, callback);
|
||||
});
|
||||
const originalAbort = request.abort;
|
||||
|
||||
request.abort = function() {
|
||||
request.aborted = true;
|
||||
return originalAbort.apply(request, arguments);
|
||||
};
|
||||
request.uri = options.uri;
|
||||
request.requestTime = Date.now();
|
||||
return request;
|
||||
};
|
||||
|
||||
return xhr;
|
||||
};
|
||||
|
||||
/**
|
||||
* Turns segment byterange into a string suitable for use in
|
||||
* HTTP Range requests
|
||||
*
|
||||
* @param {Object} byterange - an object with two values defining the start and end
|
||||
* of a byte-range
|
||||
*/
|
||||
const byterangeStr = function(byterange) {
|
||||
// `byterangeEnd` is one less than `offset + length` because the HTTP range
|
||||
// header uses inclusive ranges
|
||||
const byterangeEnd = byterange.offset + byterange.length - 1;
|
||||
const byterangeStart = byterange.offset;
|
||||
|
||||
return 'bytes=' + byterangeStart + '-' + byterangeEnd;
|
||||
};
|
||||
|
||||
/**
|
||||
* Defines headers for use in the xhr request for a particular segment.
|
||||
*
|
||||
* @param {Object} segment - a simplified copy of the segmentInfo object
|
||||
* from SegmentLoader
|
||||
*/
|
||||
const segmentXhrHeaders = function(segment) {
|
||||
const headers = {};
|
||||
|
||||
if (segment.byterange) {
|
||||
headers.Range = byterangeStr(segment.byterange);
|
||||
}
|
||||
return headers;
|
||||
};
|
||||
|
||||
export {segmentXhrHeaders, callbackWrapper, xhrFactory};
|
||||
|
||||
export default xhrFactory;
|
||||
Reference in New Issue
Block a user