Home Reference Source

src/controller/timeline-controller.ts

  1. import { Events } from '../events';
  2. import Cea608Parser, { CaptionScreen } from '../utils/cea-608-parser';
  3. import OutputFilter from '../utils/output-filter';
  4. import { parseWebVTT } from '../utils/webvtt-parser';
  5. import {
  6. sendAddTrackEvent,
  7. clearCurrentCues,
  8. addCueToTrack,
  9. removeCuesInRange,
  10. } from '../utils/texttrack-utils';
  11. import { parseIMSC1, IMSC1_CODEC } from '../utils/imsc1-ttml-parser';
  12. import { appendUint8Array } from '../utils/mp4-tools';
  13. import { PlaylistLevelType } from '../types/loader';
  14. import { Fragment } from '../loader/fragment';
  15. import {
  16. FragParsingUserdataData,
  17. FragLoadedData,
  18. FragDecryptedData,
  19. MediaAttachingData,
  20. ManifestLoadedData,
  21. InitPTSFoundData,
  22. SubtitleTracksUpdatedData,
  23. BufferFlushingData,
  24. FragLoadingData,
  25. } from '../types/events';
  26. import { logger } from '../utils/logger';
  27. import type Hls from '../hls';
  28. import type { ComponentAPI } from '../types/component-api';
  29. import type { HlsConfig } from '../config';
  30. import type { CuesInterface } from '../utils/cues';
  31. import type { MediaPlaylist } from '../types/media-playlist';
  32. import type { VTTCCs } from '../types/vtt';
  33.  
  34. type TrackProperties = {
  35. label: string;
  36. languageCode: string;
  37. media?: MediaPlaylist;
  38. };
  39.  
  40. type NonNativeCaptionsTrack = {
  41. _id?: string;
  42. label: string;
  43. kind: string;
  44. default: boolean;
  45. closedCaptions?: MediaPlaylist;
  46. subtitleTrack?: MediaPlaylist;
  47. };
  48.  
  49. export class TimelineController implements ComponentAPI {
  50. private hls: Hls;
  51. private media: HTMLMediaElement | null = null;
  52. private config: HlsConfig;
  53. private enabled: boolean = true;
  54. private Cues: CuesInterface;
  55. private textTracks: Array<TextTrack> = [];
  56. private tracks: Array<MediaPlaylist> = [];
  57. private initPTS: Array<number> = [];
  58. private timescale: Array<number> = [];
  59. private unparsedVttFrags: Array<FragLoadedData | FragDecryptedData> = [];
  60. private captionsTracks: Record<string, TextTrack> = {};
  61. private nonNativeCaptionsTracks: Record<string, NonNativeCaptionsTrack> = {};
  62. private cea608Parser1!: Cea608Parser;
  63. private cea608Parser2!: Cea608Parser;
  64. private lastSn: number = -1;
  65. private lastPartIndex: number = -1;
  66. private prevCC: number = -1;
  67. private vttCCs: VTTCCs = newVTTCCs();
  68. private captionsProperties: {
  69. textTrack1: TrackProperties;
  70. textTrack2: TrackProperties;
  71. textTrack3: TrackProperties;
  72. textTrack4: TrackProperties;
  73. };
  74.  
  75. constructor(hls: Hls) {
  76. this.hls = hls;
  77. this.config = hls.config;
  78. this.Cues = hls.config.cueHandler;
  79.  
  80. this.captionsProperties = {
  81. textTrack1: {
  82. label: this.config.captionsTextTrack1Label,
  83. languageCode: this.config.captionsTextTrack1LanguageCode,
  84. },
  85. textTrack2: {
  86. label: this.config.captionsTextTrack2Label,
  87. languageCode: this.config.captionsTextTrack2LanguageCode,
  88. },
  89. textTrack3: {
  90. label: this.config.captionsTextTrack3Label,
  91. languageCode: this.config.captionsTextTrack3LanguageCode,
  92. },
  93. textTrack4: {
  94. label: this.config.captionsTextTrack4Label,
  95. languageCode: this.config.captionsTextTrack4LanguageCode,
  96. },
  97. };
  98.  
  99. if (this.config.enableCEA708Captions) {
  100. const channel1 = new OutputFilter(this, 'textTrack1');
  101. const channel2 = new OutputFilter(this, 'textTrack2');
  102. const channel3 = new OutputFilter(this, 'textTrack3');
  103. const channel4 = new OutputFilter(this, 'textTrack4');
  104. this.cea608Parser1 = new Cea608Parser(1, channel1, channel2);
  105. this.cea608Parser2 = new Cea608Parser(3, channel3, channel4);
  106. }
  107.  
  108. hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  109. hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  110. hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  111. hls.on(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  112. hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  113. hls.on(Events.FRAG_LOADING, this.onFragLoading, this);
  114. hls.on(Events.FRAG_LOADED, this.onFragLoaded, this);
  115. hls.on(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  116. hls.on(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  117. hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  118. hls.on(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  119. hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  120. }
  121.  
  122. public destroy(): void {
  123. const { hls } = this;
  124. hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  125. hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  126. hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  127. hls.off(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  128. hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  129. hls.off(Events.FRAG_LOADING, this.onFragLoading, this);
  130. hls.off(Events.FRAG_LOADED, this.onFragLoaded, this);
  131. hls.off(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  132. hls.off(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  133. hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  134. hls.off(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  135. hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  136. // @ts-ignore
  137. this.hls = this.config = this.cea608Parser1 = this.cea608Parser2 = null;
  138. }
  139.  
  140. public addCues(
  141. trackName: string,
  142. startTime: number,
  143. endTime: number,
  144. screen: CaptionScreen,
  145. cueRanges: Array<[number, number]>
  146. ) {
  147. // skip cues which overlap more than 50% with previously parsed time ranges
  148. let merged = false;
  149. for (let i = cueRanges.length; i--; ) {
  150. const cueRange = cueRanges[i];
  151. const overlap = intersection(
  152. cueRange[0],
  153. cueRange[1],
  154. startTime,
  155. endTime
  156. );
  157. if (overlap >= 0) {
  158. cueRange[0] = Math.min(cueRange[0], startTime);
  159. cueRange[1] = Math.max(cueRange[1], endTime);
  160. merged = true;
  161. if (overlap / (endTime - startTime) > 0.5) {
  162. return;
  163. }
  164. }
  165. }
  166. if (!merged) {
  167. cueRanges.push([startTime, endTime]);
  168. }
  169.  
  170. if (this.config.renderTextTracksNatively) {
  171. const track = this.captionsTracks[trackName];
  172. this.Cues.newCue(track, startTime, endTime, screen);
  173. } else {
  174. const cues = this.Cues.newCue(null, startTime, endTime, screen);
  175. this.hls.trigger(Events.CUES_PARSED, {
  176. type: 'captions',
  177. cues,
  178. track: trackName,
  179. });
  180. }
  181. }
  182.  
  183. // Triggered when an initial PTS is found; used for synchronisation of WebVTT.
  184. private onInitPtsFound(
  185. event: Events.INIT_PTS_FOUND,
  186. { frag, id, initPTS, timescale }: InitPTSFoundData
  187. ) {
  188. const { unparsedVttFrags } = this;
  189. if (id === 'main') {
  190. this.initPTS[frag.cc] = initPTS;
  191. this.timescale[frag.cc] = timescale;
  192. }
  193.  
  194. // Due to asynchronous processing, initial PTS may arrive later than the first VTT fragments are loaded.
  195. // Parse any unparsed fragments upon receiving the initial PTS.
  196. if (unparsedVttFrags.length) {
  197. this.unparsedVttFrags = [];
  198. unparsedVttFrags.forEach((frag) => {
  199. this.onFragLoaded(Events.FRAG_LOADED, frag as FragLoadedData);
  200. });
  201. }
  202. }
  203.  
  204. private getExistingTrack(trackName: string): TextTrack | null {
  205. const { media } = this;
  206. if (media) {
  207. for (let i = 0; i < media.textTracks.length; i++) {
  208. const textTrack = media.textTracks[i];
  209. if (textTrack[trackName]) {
  210. return textTrack;
  211. }
  212. }
  213. }
  214. return null;
  215. }
  216.  
  217. public createCaptionsTrack(trackName: string) {
  218. if (this.config.renderTextTracksNatively) {
  219. this.createNativeTrack(trackName);
  220. } else {
  221. this.createNonNativeTrack(trackName);
  222. }
  223. }
  224.  
  225. private createNativeTrack(trackName: string) {
  226. if (this.captionsTracks[trackName]) {
  227. return;
  228. }
  229. const { captionsProperties, captionsTracks, media } = this;
  230. const { label, languageCode } = captionsProperties[trackName];
  231. // Enable reuse of existing text track.
  232. const existingTrack = this.getExistingTrack(trackName);
  233. if (!existingTrack) {
  234. const textTrack = this.createTextTrack('captions', label, languageCode);
  235. if (textTrack) {
  236. // Set a special property on the track so we know it's managed by Hls.js
  237. textTrack[trackName] = true;
  238. captionsTracks[trackName] = textTrack;
  239. }
  240. } else {
  241. captionsTracks[trackName] = existingTrack;
  242. clearCurrentCues(captionsTracks[trackName]);
  243. sendAddTrackEvent(captionsTracks[trackName], media as HTMLMediaElement);
  244. }
  245. }
  246.  
  247. private createNonNativeTrack(trackName: string) {
  248. if (this.nonNativeCaptionsTracks[trackName]) {
  249. return;
  250. }
  251. // Create a list of a single track for the provider to consume
  252. const trackProperties: TrackProperties = this.captionsProperties[trackName];
  253. if (!trackProperties) {
  254. return;
  255. }
  256. const label = trackProperties.label as string;
  257. const track = {
  258. _id: trackName,
  259. label,
  260. kind: 'captions',
  261. default: trackProperties.media ? !!trackProperties.media.default : false,
  262. closedCaptions: trackProperties.media,
  263. };
  264. this.nonNativeCaptionsTracks[trackName] = track;
  265. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, { tracks: [track] });
  266. }
  267.  
  268. private createTextTrack(
  269. kind: TextTrackKind,
  270. label: string,
  271. lang?: string
  272. ): TextTrack | undefined {
  273. const media = this.media;
  274. if (!media) {
  275. return;
  276. }
  277. return media.addTextTrack(kind, label, lang);
  278. }
  279.  
  280. private onMediaAttaching(
  281. event: Events.MEDIA_ATTACHING,
  282. data: MediaAttachingData
  283. ) {
  284. this.media = data.media;
  285. this._cleanTracks();
  286. }
  287.  
  288. private onMediaDetaching() {
  289. const { captionsTracks } = this;
  290. Object.keys(captionsTracks).forEach((trackName) => {
  291. clearCurrentCues(captionsTracks[trackName]);
  292. delete captionsTracks[trackName];
  293. });
  294. this.nonNativeCaptionsTracks = {};
  295. }
  296.  
  297. private onManifestLoading() {
  298. this.lastSn = -1; // Detect discontinuity in fragment parsing
  299. this.lastPartIndex = -1;
  300. this.prevCC = -1;
  301. this.vttCCs = newVTTCCs(); // Detect discontinuity in subtitle manifests
  302. this._cleanTracks();
  303. this.tracks = [];
  304. this.captionsTracks = {};
  305. this.nonNativeCaptionsTracks = {};
  306. this.textTracks = [];
  307. this.unparsedVttFrags = this.unparsedVttFrags || [];
  308. this.initPTS = [];
  309. this.timescale = [];
  310. if (this.cea608Parser1 && this.cea608Parser2) {
  311. this.cea608Parser1.reset();
  312. this.cea608Parser2.reset();
  313. }
  314. }
  315.  
  316. private _cleanTracks() {
  317. // clear outdated subtitles
  318. const { media } = this;
  319. if (!media) {
  320. return;
  321. }
  322. const textTracks = media.textTracks;
  323. if (textTracks) {
  324. for (let i = 0; i < textTracks.length; i++) {
  325. clearCurrentCues(textTracks[i]);
  326. }
  327. }
  328. }
  329.  
  330. private onSubtitleTracksUpdated(
  331. event: Events.SUBTITLE_TRACKS_UPDATED,
  332. data: SubtitleTracksUpdatedData
  333. ) {
  334. this.textTracks = [];
  335. const tracks: Array<MediaPlaylist> = data.subtitleTracks || [];
  336. const hasIMSC1 = tracks.some((track) => track.textCodec === IMSC1_CODEC);
  337. if (this.config.enableWebVTT || (hasIMSC1 && this.config.enableIMSC1)) {
  338. const sameTracks =
  339. this.tracks && tracks && this.tracks.length === tracks.length;
  340. this.tracks = tracks || [];
  341.  
  342. if (this.config.renderTextTracksNatively) {
  343. const inUseTracks = this.media ? this.media.textTracks : [];
  344.  
  345. this.tracks.forEach((track, index) => {
  346. let textTrack: TextTrack | undefined;
  347. if (index < inUseTracks.length) {
  348. let inUseTrack: TextTrack | null = null;
  349.  
  350. for (let i = 0; i < inUseTracks.length; i++) {
  351. if (canReuseVttTextTrack(inUseTracks[i], track)) {
  352. inUseTrack = inUseTracks[i];
  353. break;
  354. }
  355. }
  356.  
  357. // Reuse tracks with the same label, but do not reuse 608/708 tracks
  358. if (inUseTrack) {
  359. textTrack = inUseTrack;
  360. }
  361. }
  362. if (textTrack) {
  363. clearCurrentCues(textTrack);
  364. } else {
  365. const textTrackKind =
  366. this._captionsOrSubtitlesFromCharacteristics(track);
  367. textTrack = this.createTextTrack(
  368. textTrackKind,
  369. track.name,
  370. track.lang
  371. );
  372. if (textTrack) {
  373. textTrack.mode = 'disabled';
  374. }
  375. }
  376. if (textTrack) {
  377. (textTrack as any).groupId = track.groupId;
  378. this.textTracks.push(textTrack);
  379. }
  380. });
  381. } else if (!sameTracks && this.tracks && this.tracks.length) {
  382. // Create a list of tracks for the provider to consume
  383. const tracksList = this.tracks.map((track) => {
  384. return {
  385. label: track.name,
  386. kind: track.type.toLowerCase(),
  387. default: track.default,
  388. subtitleTrack: track,
  389. };
  390. });
  391. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, {
  392. tracks: tracksList,
  393. });
  394. }
  395. }
  396. }
  397.  
  398. private _captionsOrSubtitlesFromCharacteristics(
  399. track: MediaPlaylist
  400. ): TextTrackKind {
  401. if (track.attrs?.CHARACTERISTICS) {
  402. const transcribesSpokenDialog = /transcribes-spoken-dialog/gi.test(
  403. track.attrs.CHARACTERISTICS
  404. );
  405. const describesMusicAndSound = /describes-music-and-sound/gi.test(
  406. track.attrs.CHARACTERISTICS
  407. );
  408.  
  409. if (transcribesSpokenDialog && describesMusicAndSound) {
  410. return 'captions';
  411. }
  412. }
  413.  
  414. return 'subtitles';
  415. }
  416.  
  417. private onManifestLoaded(
  418. event: Events.MANIFEST_LOADED,
  419. data: ManifestLoadedData
  420. ) {
  421. if (this.config.enableCEA708Captions && data.captions) {
  422. data.captions.forEach((captionsTrack) => {
  423. const instreamIdMatch = /(?:CC|SERVICE)([1-4])/.exec(
  424. captionsTrack.instreamId as string
  425. );
  426. if (!instreamIdMatch) {
  427. return;
  428. }
  429. const trackName = `textTrack${instreamIdMatch[1]}`;
  430. const trackProperties: TrackProperties =
  431. this.captionsProperties[trackName];
  432. if (!trackProperties) {
  433. return;
  434. }
  435. trackProperties.label = captionsTrack.name;
  436. if (captionsTrack.lang) {
  437. // optional attribute
  438. trackProperties.languageCode = captionsTrack.lang;
  439. }
  440. trackProperties.media = captionsTrack;
  441. });
  442. }
  443. }
  444.  
  445. private closedCaptionsForLevel(frag: Fragment): string | undefined {
  446. const level = this.hls.levels[frag.level];
  447. return level?.attrs['CLOSED-CAPTIONS'];
  448. }
  449.  
  450. private onFragLoading(event: Events.FRAG_LOADING, data: FragLoadingData) {
  451. const { cea608Parser1, cea608Parser2, lastSn, lastPartIndex } = this;
  452. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  453. return;
  454. }
  455. // if this frag isn't contiguous, clear the parser so cues with bad start/end times aren't added to the textTrack
  456. if (data.frag.type === PlaylistLevelType.MAIN) {
  457. const sn = data.frag.sn;
  458. const partIndex = data?.part?.index ?? -1;
  459. if (
  460. !(
  461. sn === lastSn + 1 ||
  462. (sn === lastSn && partIndex === lastPartIndex + 1)
  463. )
  464. ) {
  465. cea608Parser1.reset();
  466. cea608Parser2.reset();
  467. }
  468. this.lastSn = sn as number;
  469. this.lastPartIndex = partIndex;
  470. }
  471. }
  472.  
  473. private onFragLoaded(
  474. event: Events.FRAG_LOADED,
  475. data: FragDecryptedData | FragLoadedData
  476. ) {
  477. const { frag, payload } = data;
  478. const { initPTS, unparsedVttFrags } = this;
  479. if (frag.type === PlaylistLevelType.SUBTITLE) {
  480. // If fragment is subtitle type, parse as WebVTT.
  481. if (payload.byteLength) {
  482. // We need an initial synchronisation PTS. Store fragments as long as none has arrived.
  483. if (!Number.isFinite(initPTS[frag.cc])) {
  484. unparsedVttFrags.push(data);
  485. if (initPTS.length) {
  486. // finish unsuccessfully, otherwise the subtitle-stream-controller could be blocked from loading new frags.
  487. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  488. success: false,
  489. frag,
  490. error: new Error('Missing initial subtitle PTS'),
  491. });
  492. }
  493. return;
  494. }
  495.  
  496. const decryptData = frag.decryptdata;
  497. // fragment after decryption has a stats object
  498. const decrypted = 'stats' in data;
  499. // If the subtitles are not encrypted, parse VTTs now. Otherwise, we need to wait.
  500. if (decryptData == null || !decryptData.encrypted || decrypted) {
  501. const trackPlaylistMedia = this.tracks[frag.level];
  502. const vttCCs = this.vttCCs;
  503. if (!vttCCs[frag.cc]) {
  504. vttCCs[frag.cc] = {
  505. start: frag.start,
  506. prevCC: this.prevCC,
  507. new: true,
  508. };
  509. this.prevCC = frag.cc;
  510. }
  511. if (
  512. trackPlaylistMedia &&
  513. trackPlaylistMedia.textCodec === IMSC1_CODEC
  514. ) {
  515. this._parseIMSC1(frag, payload);
  516. } else {
  517. this._parseVTTs(frag, payload, vttCCs);
  518. }
  519. }
  520. } else {
  521. // In case there is no payload, finish unsuccessfully.
  522. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  523. success: false,
  524. frag,
  525. error: new Error('Empty subtitle payload'),
  526. });
  527. }
  528. }
  529. }
  530.  
  531. private _parseIMSC1(frag: Fragment, payload: ArrayBuffer) {
  532. const hls = this.hls;
  533. parseIMSC1(
  534. payload,
  535. this.initPTS[frag.cc],
  536. this.timescale[frag.cc],
  537. (cues) => {
  538. this._appendCues(cues, frag.level);
  539. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  540. success: true,
  541. frag: frag,
  542. });
  543. },
  544. (error) => {
  545. logger.log(`Failed to parse IMSC1: ${error}`);
  546. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  547. success: false,
  548. frag: frag,
  549. error,
  550. });
  551. }
  552. );
  553. }
  554.  
  555. private _parseVTTs(frag: Fragment, payload: ArrayBuffer, vttCCs: any) {
  556. const hls = this.hls;
  557. // Parse the WebVTT file contents.
  558. const payloadWebVTT = frag.initSegment?.data
  559. ? appendUint8Array(frag.initSegment.data, new Uint8Array(payload))
  560. : payload;
  561. parseWebVTT(
  562. payloadWebVTT,
  563. this.initPTS[frag.cc],
  564. this.timescale[frag.cc],
  565. vttCCs,
  566. frag.cc,
  567. frag.start,
  568. (cues) => {
  569. this._appendCues(cues, frag.level);
  570. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  571. success: true,
  572. frag: frag,
  573. });
  574. },
  575. (error) => {
  576. this._fallbackToIMSC1(frag, payload);
  577. // Something went wrong while parsing. Trigger event with success false.
  578. logger.log(`Failed to parse VTT cue: ${error}`);
  579. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  580. success: false,
  581. frag: frag,
  582. error,
  583. });
  584. }
  585. );
  586. }
  587.  
  588. private _fallbackToIMSC1(frag: Fragment, payload: ArrayBuffer) {
  589. // If textCodec is unknown, try parsing as IMSC1. Set textCodec based on the result
  590. const trackPlaylistMedia = this.tracks[frag.level];
  591. if (!trackPlaylistMedia.textCodec) {
  592. parseIMSC1(
  593. payload,
  594. this.initPTS[frag.cc],
  595. this.timescale[frag.cc],
  596. () => {
  597. trackPlaylistMedia.textCodec = IMSC1_CODEC;
  598. this._parseIMSC1(frag, payload);
  599. },
  600. () => {
  601. trackPlaylistMedia.textCodec = 'wvtt';
  602. }
  603. );
  604. }
  605. }
  606.  
  607. private _appendCues(cues: VTTCue[], fragLevel: number) {
  608. const hls = this.hls;
  609. if (this.config.renderTextTracksNatively) {
  610. const textTrack = this.textTracks[fragLevel];
  611. // WebVTTParser.parse is an async method and if the currently selected text track mode is set to "disabled"
  612. // before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
  613. // and trying to access getCueById method of cues will throw an exception
  614. // Because we check if the mode is disabled, we can force check `cues` below. They can't be null.
  615. if (!textTrack || textTrack.mode === 'disabled') {
  616. return;
  617. }
  618. cues.forEach((cue) => addCueToTrack(textTrack, cue));
  619. } else {
  620. const currentTrack = this.tracks[fragLevel];
  621. if (!currentTrack) {
  622. return;
  623. }
  624. const track = currentTrack.default ? 'default' : 'subtitles' + fragLevel;
  625. hls.trigger(Events.CUES_PARSED, { type: 'subtitles', cues, track });
  626. }
  627. }
  628.  
  629. private onFragDecrypted(
  630. event: Events.FRAG_DECRYPTED,
  631. data: FragDecryptedData
  632. ) {
  633. const { frag } = data;
  634. if (frag.type === PlaylistLevelType.SUBTITLE) {
  635. if (!Number.isFinite(this.initPTS[frag.cc])) {
  636. this.unparsedVttFrags.push(data as unknown as FragLoadedData);
  637. return;
  638. }
  639. this.onFragLoaded(Events.FRAG_LOADED, data as unknown as FragLoadedData);
  640. }
  641. }
  642.  
  643. private onSubtitleTracksCleared() {
  644. this.tracks = [];
  645. this.captionsTracks = {};
  646. }
  647.  
  648. private onFragParsingUserdata(
  649. event: Events.FRAG_PARSING_USERDATA,
  650. data: FragParsingUserdataData
  651. ) {
  652. const { cea608Parser1, cea608Parser2 } = this;
  653. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  654. return;
  655. }
  656.  
  657. const { frag, samples } = data;
  658. if (
  659. frag.type === PlaylistLevelType.MAIN &&
  660. this.closedCaptionsForLevel(frag) === 'NONE'
  661. ) {
  662. return;
  663. }
  664. // If the event contains captions (found in the bytes property), push all bytes into the parser immediately
  665. // It will create the proper timestamps based on the PTS value
  666. for (let i = 0; i < samples.length; i++) {
  667. const ccBytes = samples[i].bytes;
  668. if (ccBytes) {
  669. const ccdatas = this.extractCea608Data(ccBytes);
  670. cea608Parser1.addData(samples[i].pts, ccdatas[0]);
  671. cea608Parser2.addData(samples[i].pts, ccdatas[1]);
  672. }
  673. }
  674. }
  675.  
  676. onBufferFlushing(
  677. event: Events.BUFFER_FLUSHING,
  678. { startOffset, endOffset, endOffsetSubtitles, type }: BufferFlushingData
  679. ) {
  680. const { media } = this;
  681. if (!media || media.currentTime < endOffset) {
  682. return;
  683. }
  684. // Clear 608 caption cues from the captions TextTracks when the video back buffer is flushed
  685. // Forward cues are never removed because we can loose streamed 608 content from recent fragments
  686. if (!type || type === 'video') {
  687. const { captionsTracks } = this;
  688. Object.keys(captionsTracks).forEach((trackName) =>
  689. removeCuesInRange(captionsTracks[trackName], startOffset, endOffset)
  690. );
  691. }
  692. if (this.config.renderTextTracksNatively) {
  693. // Clear VTT/IMSC1 subtitle cues from the subtitle TextTracks when the back buffer is flushed
  694. if (startOffset === 0 && endOffsetSubtitles !== undefined) {
  695. const { textTracks } = this;
  696. Object.keys(textTracks).forEach((trackName) =>
  697. removeCuesInRange(
  698. textTracks[trackName],
  699. startOffset,
  700. endOffsetSubtitles
  701. )
  702. );
  703. }
  704. }
  705. }
  706.  
  707. private extractCea608Data(byteArray: Uint8Array): number[][] {
  708. const actualCCBytes: number[][] = [[], []];
  709. const count = byteArray[0] & 0x1f;
  710. let position = 2;
  711.  
  712. for (let j = 0; j < count; j++) {
  713. const tmpByte = byteArray[position++];
  714. const ccbyte1 = 0x7f & byteArray[position++];
  715. const ccbyte2 = 0x7f & byteArray[position++];
  716. if (ccbyte1 === 0 && ccbyte2 === 0) {
  717. continue;
  718. }
  719. const ccValid = (0x04 & tmpByte) !== 0; // Support all four channels
  720. if (ccValid) {
  721. const ccType = 0x03 & tmpByte;
  722. if (
  723. 0x00 /* CEA608 field1*/ === ccType ||
  724. 0x01 /* CEA608 field2*/ === ccType
  725. ) {
  726. // Exclude CEA708 CC data.
  727. actualCCBytes[ccType].push(ccbyte1);
  728. actualCCBytes[ccType].push(ccbyte2);
  729. }
  730. }
  731. }
  732. return actualCCBytes;
  733. }
  734. }
  735.  
  736. function canReuseVttTextTrack(inUseTrack, manifestTrack): boolean {
  737. return (
  738. inUseTrack &&
  739. inUseTrack.label === manifestTrack.name &&
  740. !(inUseTrack.textTrack1 || inUseTrack.textTrack2)
  741. );
  742. }
  743.  
  744. function intersection(x1: number, x2: number, y1: number, y2: number): number {
  745. return Math.min(x2, y2) - Math.max(x1, y1);
  746. }
  747.  
  748. function newVTTCCs(): VTTCCs {
  749. return {
  750. ccOffset: 0,
  751. presentationOffset: 0,
  752. 0: {
  753. start: 0,
  754. prevCC: -1,
  755. new: true,
  756. },
  757. };
  758. }