Source: AudioPlayer.js

  1. /*
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing,
  13. * software distributed under the License is distributed on an
  14. * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  15. * KIND, either express or implied. See the License for the
  16. * specific language governing permissions and limitations
  17. * under the License.
  18. */
  19. var Guacamole = Guacamole || {};
  20. /**
  21. * Abstract audio player which accepts, queues and plays back arbitrary audio
  22. * data. It is up to implementations of this class to provide some means of
  23. * handling a provided Guacamole.InputStream. Data received along the provided
  24. * stream is to be played back immediately.
  25. *
  26. * @constructor
  27. */
  28. Guacamole.AudioPlayer = function AudioPlayer() {
  29. /**
  30. * Notifies this Guacamole.AudioPlayer that all audio up to the current
  31. * point in time has been given via the underlying stream, and that any
  32. * difference in time between queued audio data and the current time can be
  33. * considered latency.
  34. */
  35. this.sync = function sync() {
  36. // Default implementation - do nothing
  37. };
  38. };
  39. /**
  40. * Determines whether the given mimetype is supported by any built-in
  41. * implementation of Guacamole.AudioPlayer, and thus will be properly handled
  42. * by Guacamole.AudioPlayer.getInstance().
  43. *
  44. * @param {String} mimetype
  45. * The mimetype to check.
  46. *
  47. * @returns {Boolean}
  48. * true if the given mimetype is supported by any built-in
  49. * Guacamole.AudioPlayer, false otherwise.
  50. */
  51. Guacamole.AudioPlayer.isSupportedType = function isSupportedType(mimetype) {
  52. return Guacamole.RawAudioPlayer.isSupportedType(mimetype);
  53. };
  54. /**
  55. * Returns a list of all mimetypes supported by any built-in
  56. * Guacamole.AudioPlayer, in rough order of priority. Beware that only the core
  57. * mimetypes themselves will be listed. Any mimetype parameters, even required
  58. * ones, will not be included in the list. For example, "audio/L8" is a
  59. * supported raw audio mimetype that is supported, but it is invalid without
  60. * additional parameters. Something like "audio/L8;rate=44100" would be valid,
  61. * however (see https://tools.ietf.org/html/rfc4856).
  62. *
  63. * @returns {String[]}
  64. * A list of all mimetypes supported by any built-in Guacamole.AudioPlayer,
  65. * excluding any parameters.
  66. */
  67. Guacamole.AudioPlayer.getSupportedTypes = function getSupportedTypes() {
  68. return Guacamole.RawAudioPlayer.getSupportedTypes();
  69. };
  70. /**
  71. * Returns an instance of Guacamole.AudioPlayer providing support for the given
  72. * audio format. If support for the given audio format is not available, null
  73. * is returned.
  74. *
  75. * @param {Guacamole.InputStream} stream
  76. * The Guacamole.InputStream to read audio data from.
  77. *
  78. * @param {String} mimetype
  79. * The mimetype of the audio data in the provided stream.
  80. *
  81. * @return {Guacamole.AudioPlayer}
  82. * A Guacamole.AudioPlayer instance supporting the given mimetype and
  83. * reading from the given stream, or null if support for the given mimetype
  84. * is absent.
  85. */
  86. Guacamole.AudioPlayer.getInstance = function getInstance(stream, mimetype) {
  87. // Use raw audio player if possible
  88. if (Guacamole.RawAudioPlayer.isSupportedType(mimetype))
  89. return new Guacamole.RawAudioPlayer(stream, mimetype);
  90. // No support for given mimetype
  91. return null;
  92. };
  93. /**
  94. * Implementation of Guacamole.AudioPlayer providing support for raw PCM format
  95. * audio. This player relies only on the Web Audio API and does not require any
  96. * browser-level support for its audio formats.
  97. *
  98. * @constructor
  99. * @augments Guacamole.AudioPlayer
  100. * @param {Guacamole.InputStream} stream
  101. * The Guacamole.InputStream to read audio data from.
  102. *
  103. * @param {String} mimetype
  104. * The mimetype of the audio data in the provided stream, which must be a
  105. * "audio/L8" or "audio/L16" mimetype with necessary parameters, such as:
  106. * "audio/L16;rate=44100,channels=2".
  107. */
  108. Guacamole.RawAudioPlayer = function RawAudioPlayer(stream, mimetype) {
  109. /**
  110. * The format of audio this player will decode.
  111. *
  112. * @private
  113. * @type {Guacamole.RawAudioFormat}
  114. */
  115. var format = Guacamole.RawAudioFormat.parse(mimetype);
  116. /**
  117. * An instance of a Web Audio API AudioContext object, or null if the
  118. * Web Audio API is not supported.
  119. *
  120. * @private
  121. * @type {AudioContext}
  122. */
  123. var context = Guacamole.AudioContextFactory.getAudioContext();
  124. /**
  125. * The earliest possible time that the next packet could play without
  126. * overlapping an already-playing packet, in seconds. Note that while this
  127. * value is in seconds, it is not an integer value and has microsecond
  128. * resolution.
  129. *
  130. * @private
  131. * @type {Number}
  132. */
  133. var nextPacketTime = context.currentTime;
  134. /**
  135. * Guacamole.ArrayBufferReader wrapped around the audio input stream
  136. * provided with this Guacamole.RawAudioPlayer was created.
  137. *
  138. * @private
  139. * @type {Guacamole.ArrayBufferReader}
  140. */
  141. var reader = new Guacamole.ArrayBufferReader(stream);
  142. /**
  143. * The minimum size of an audio packet split by splitAudioPacket(), in
  144. * seconds. Audio packets smaller than this will not be split, nor will the
  145. * split result of a larger packet ever be smaller in size than this
  146. * minimum.
  147. *
  148. * @private
  149. * @constant
  150. * @type {Number}
  151. */
  152. var MIN_SPLIT_SIZE = 0.02;
  153. /**
  154. * The maximum amount of latency to allow between the buffered data stream
  155. * and the playback position, in seconds. Initially, this is set to
  156. * roughly one third of a second.
  157. *
  158. * @private
  159. * @type {Number}
  160. */
  161. var maxLatency = 0.3;
  162. /**
  163. * The type of typed array that will be used to represent each audio packet
  164. * internally. This will be either Int8Array or Int16Array, depending on
  165. * whether the raw audio format is 8-bit or 16-bit.
  166. *
  167. * @private
  168. * @constructor
  169. */
  170. var SampleArray = (format.bytesPerSample === 1) ? window.Int8Array : window.Int16Array;
  171. /**
  172. * The maximum absolute value of any sample within a raw audio packet
  173. * received by this audio player. This depends only on the size of each
  174. * sample, and will be 128 for 8-bit audio and 32768 for 16-bit audio.
  175. *
  176. * @private
  177. * @type {Number}
  178. */
  179. var maxSampleValue = (format.bytesPerSample === 1) ? 128 : 32768;
  180. /**
  181. * The queue of all pending audio packets, as an array of sample arrays.
  182. * Audio packets which are pending playback will be added to this queue for
  183. * further manipulation prior to scheduling via the Web Audio API. Once an
  184. * audio packet leaves this queue and is scheduled via the Web Audio API,
  185. * no further modifications can be made to that packet.
  186. *
  187. * @private
  188. * @type {SampleArray[]}
  189. */
  190. var packetQueue = [];
  191. /**
  192. * Given an array of audio packets, returns a single audio packet
  193. * containing the concatenation of those packets.
  194. *
  195. * @private
  196. * @param {SampleArray[]} packets
  197. * The array of audio packets to concatenate.
  198. *
  199. * @returns {SampleArray}
  200. * A single audio packet containing the concatenation of all given
  201. * audio packets. If no packets are provided, this will be undefined.
  202. */
  203. var joinAudioPackets = function joinAudioPackets(packets) {
  204. // Do not bother joining if one or fewer packets are in the queue
  205. if (packets.length <= 1)
  206. return packets[0];
  207. // Determine total sample length of the entire queue
  208. var totalLength = 0;
  209. packets.forEach(function addPacketLengths(packet) {
  210. totalLength += packet.length;
  211. });
  212. // Append each packet within queue
  213. var offset = 0;
  214. var joined = new SampleArray(totalLength);
  215. packets.forEach(function appendPacket(packet) {
  216. joined.set(packet, offset);
  217. offset += packet.length;
  218. });
  219. return joined;
  220. };
  221. /**
  222. * Given a single packet of audio data, splits off an arbitrary length of
  223. * audio data from the beginning of that packet, returning the split result
  224. * as an array of two packets. The split location is determined through an
  225. * algorithm intended to minimize the liklihood of audible clicking between
  226. * packets. If no such split location is possible, an array containing only
  227. * the originally-provided audio packet is returned.
  228. *
  229. * @private
  230. * @param {SampleArray} data
  231. * The audio packet to split.
  232. *
  233. * @returns {SampleArray[]}
  234. * An array of audio packets containing the result of splitting the
  235. * provided audio packet. If splitting is possible, this array will
  236. * contain two packets. If splitting is not possible, this array will
  237. * contain only the originally-provided packet.
  238. */
  239. var splitAudioPacket = function splitAudioPacket(data) {
  240. var minValue = Number.MAX_VALUE;
  241. var optimalSplitLength = data.length;
  242. // Calculate number of whole samples in the provided audio packet AND
  243. // in the minimum possible split packet
  244. var samples = Math.floor(data.length / format.channels);
  245. var minSplitSamples = Math.floor(format.rate * MIN_SPLIT_SIZE);
  246. // Calculate the beginning of the "end" of the audio packet
  247. var start = Math.max(
  248. format.channels * minSplitSamples,
  249. format.channels * (samples - minSplitSamples)
  250. );
  251. // For all samples at the end of the given packet, find a point where
  252. // the perceptible volume across all channels is lowest (and thus is
  253. // the optimal point to split)
  254. for (var offset = start; offset < data.length; offset += format.channels) {
  255. // Calculate the sum of all values across all channels (the result
  256. // will be proportional to the average volume of a sample)
  257. var totalValue = 0;
  258. for (var channel = 0; channel < format.channels; channel++) {
  259. totalValue += Math.abs(data[offset + channel]);
  260. }
  261. // If this is the smallest average value thus far, set the split
  262. // length such that the first packet ends with the current sample
  263. if (totalValue <= minValue) {
  264. optimalSplitLength = offset + format.channels;
  265. minValue = totalValue;
  266. }
  267. }
  268. // If packet is not split, return the supplied packet untouched
  269. if (optimalSplitLength === data.length)
  270. return [data];
  271. // Otherwise, split the packet into two new packets according to the
  272. // calculated optimal split length
  273. return [
  274. new SampleArray(data.buffer.slice(0, optimalSplitLength * format.bytesPerSample)),
  275. new SampleArray(data.buffer.slice(optimalSplitLength * format.bytesPerSample))
  276. ];
  277. };
  278. /**
  279. * Pushes the given packet of audio data onto the playback queue. Unlike
  280. * other private functions within Guacamole.RawAudioPlayer, the type of the
  281. * ArrayBuffer packet of audio data here need not be specific to the type
  282. * of audio (as with SampleArray). The ArrayBuffer type provided by a
  283. * Guacamole.ArrayBufferReader, for example, is sufficient. Any necessary
  284. * conversions will be performed automatically internally.
  285. *
  286. * @private
  287. * @param {ArrayBuffer} data
  288. * A raw packet of audio data that should be pushed onto the audio
  289. * playback queue.
  290. */
  291. var pushAudioPacket = function pushAudioPacket(data) {
  292. packetQueue.push(new SampleArray(data));
  293. };
  294. /**
  295. * Shifts off and returns a packet of audio data from the beginning of the
  296. * playback queue. The length of this audio packet is determined
  297. * dynamically according to the click-reduction algorithm implemented by
  298. * splitAudioPacket().
  299. *
  300. * @private
  301. * @returns {SampleArray}
  302. * A packet of audio data pulled from the beginning of the playback
  303. * queue.
  304. */
  305. var shiftAudioPacket = function shiftAudioPacket() {
  306. // Flatten data in packet queue
  307. var data = joinAudioPackets(packetQueue);
  308. if (!data)
  309. return null;
  310. // Pull an appropriate amount of data from the front of the queue
  311. packetQueue = splitAudioPacket(data);
  312. data = packetQueue.shift();
  313. return data;
  314. };
  315. /**
  316. * Converts the given audio packet into an AudioBuffer, ready for playback
  317. * by the Web Audio API. Unlike the raw audio packets received by this
  318. * audio player, AudioBuffers require floating point samples and are split
  319. * into isolated planes of channel-specific data.
  320. *
  321. * @private
  322. * @param {SampleArray} data
  323. * The raw audio packet that should be converted into a Web Audio API
  324. * AudioBuffer.
  325. *
  326. * @returns {AudioBuffer}
  327. * A new Web Audio API AudioBuffer containing the provided audio data,
  328. * converted to the format used by the Web Audio API.
  329. */
  330. var toAudioBuffer = function toAudioBuffer(data) {
  331. // Calculate total number of samples
  332. var samples = data.length / format.channels;
  333. // Determine exactly when packet CAN play
  334. var packetTime = context.currentTime;
  335. if (nextPacketTime < packetTime)
  336. nextPacketTime = packetTime;
  337. // Get audio buffer for specified format
  338. var audioBuffer = context.createBuffer(format.channels, samples, format.rate);
  339. // Convert each channel
  340. for (var channel = 0; channel < format.channels; channel++) {
  341. var audioData = audioBuffer.getChannelData(channel);
  342. // Fill audio buffer with data for channel
  343. var offset = channel;
  344. for (var i = 0; i < samples; i++) {
  345. audioData[i] = data[offset] / maxSampleValue;
  346. offset += format.channels;
  347. }
  348. }
  349. return audioBuffer;
  350. };
  351. // Defer playback of received audio packets slightly
  352. reader.ondata = function playReceivedAudio(data) {
  353. // Push received samples onto queue
  354. pushAudioPacket(new SampleArray(data));
  355. // Shift off an arbitrary packet of audio data from the queue (this may
  356. // be different in size from the packet just pushed)
  357. var packet = shiftAudioPacket();
  358. if (!packet)
  359. return;
  360. // Determine exactly when packet CAN play
  361. var packetTime = context.currentTime;
  362. if (nextPacketTime < packetTime)
  363. nextPacketTime = packetTime;
  364. // Set up buffer source
  365. var source = context.createBufferSource();
  366. source.connect(context.destination);
  367. // Use noteOn() instead of start() if necessary
  368. if (!source.start)
  369. source.start = source.noteOn;
  370. // Schedule packet
  371. source.buffer = toAudioBuffer(packet);
  372. source.start(nextPacketTime);
  373. // Update timeline by duration of scheduled packet
  374. nextPacketTime += packet.length / format.channels / format.rate;
  375. };
  376. /** @override */
  377. this.sync = function sync() {
  378. // Calculate elapsed time since last sync
  379. var now = context.currentTime;
  380. // Reschedule future playback time such that playback latency is
  381. // bounded within a reasonable latency threshold
  382. nextPacketTime = Math.min(nextPacketTime, now + maxLatency);
  383. };
  384. };
  385. Guacamole.RawAudioPlayer.prototype = new Guacamole.AudioPlayer();
  386. /**
  387. * Determines whether the given mimetype is supported by
  388. * Guacamole.RawAudioPlayer.
  389. *
  390. * @param {String} mimetype
  391. * The mimetype to check.
  392. *
  393. * @returns {Boolean}
  394. * true if the given mimetype is supported by Guacamole.RawAudioPlayer,
  395. * false otherwise.
  396. */
  397. Guacamole.RawAudioPlayer.isSupportedType = function isSupportedType(mimetype) {
  398. // No supported types if no Web Audio API
  399. if (!Guacamole.AudioContextFactory.getAudioContext())
  400. return false;
  401. return Guacamole.RawAudioFormat.parse(mimetype) !== null;
  402. };
  403. /**
  404. * Returns a list of all mimetypes supported by Guacamole.RawAudioPlayer. Only
  405. * the core mimetypes themselves will be listed. Any mimetype parameters, even
  406. * required ones, will not be included in the list. For example, "audio/L8" is
  407. * a raw audio mimetype that may be supported, but it is invalid without
  408. * additional parameters. Something like "audio/L8;rate=44100" would be valid,
  409. * however (see https://tools.ietf.org/html/rfc4856).
  410. *
  411. * @returns {String[]}
  412. * A list of all mimetypes supported by Guacamole.RawAudioPlayer, excluding
  413. * any parameters. If the necessary JavaScript APIs for playing raw audio
  414. * are absent, this list will be empty.
  415. */
  416. Guacamole.RawAudioPlayer.getSupportedTypes = function getSupportedTypes() {
  417. // No supported types if no Web Audio API
  418. if (!Guacamole.AudioContextFactory.getAudioContext())
  419. return [];
  420. // We support 8-bit and 16-bit raw PCM
  421. return [
  422. 'audio/L8',
  423. 'audio/L16'
  424. ];
  425. };