From 80f1cccde837ad513d03cd33d34e8cead04d8d67 Mon Sep 17 00:00:00 2001 From: Alain Pitiot Date: Mon, 31 May 2021 13:49:13 +0200 Subject: [PATCH] added missing docs --- docs/data_Shelf.js.html | 267 ++++++ docs/module-data.Shelf.html | 412 ++++++++ docs/module-sound.AudioClip.html | 544 +++++++++++ docs/module-sound.Microphone.html | 1441 ++++++++++++++++++++++++++++ docs/module-sound.Transcriber.html | 1395 +++++++++++++++++++++++++++ docs/module-sound.Transcript.html | 171 ++++ docs/sound_AudioClip.js.html | 345 +++++++ docs/sound_Microphone.js.html | 636 ++++++++++++ docs/sound_Transcriber.js.html | 444 +++++++++ src/core/PsychoJS.js | 2 +- 10 files changed, 5656 insertions(+), 1 deletion(-) create mode 100644 docs/data_Shelf.js.html create mode 100644 docs/module-data.Shelf.html create mode 100644 docs/module-sound.AudioClip.html create mode 100644 docs/module-sound.Microphone.html create mode 100644 docs/module-sound.Transcriber.html create mode 100644 docs/module-sound.Transcript.html create mode 100644 docs/sound_AudioClip.js.html create mode 100644 docs/sound_Microphone.js.html create mode 100644 docs/sound_Transcriber.js.html diff --git a/docs/data_Shelf.js.html b/docs/data_Shelf.js.html new file mode 100644 index 0000000..aee3631 --- /dev/null +++ b/docs/data_Shelf.js.html @@ -0,0 +1,267 @@ + + + + + JSDoc: Source: data/Shelf.js + + + + + + + + + + +
+ +

Source: data/Shelf.js

+ + + + + + +
+
+
/**
+ * Shelf handles persistent key/value pairs, which are stored in the shelf collection on the
+ * server, and accesses in a concurrent fashion.
+ *
+ * @author Alain Pitiot
+ * @version 2021.1.4
+ * @copyright (c) 2021 Open Science Tools Ltd. (https://opensciencetools.org)
+ * @license Distributed under the terms of the MIT License
+ */
+
+import {PsychObject} from "../util/PsychObject";
+
+
+/**
+ * <p>Shelf handles persistent key/value pairs, which are stored in the shelf collection on the
+ * server, and accesses in a concurrent fashion</p>
+ *
+ * @name module:data.Shelf
+ * @class
+ * @extends PsychObject
+ * @param {Object} options
+ * @param {module:core.PsychoJS} options.psychoJS - the PsychoJS instance
+ * @param {boolean} [options.autoLog= false] - whether or not to log
+ */
+export class Shelf extends PsychObject
+{
+
+	constructor({
+								psychoJS,
+								autoLog = false
+							} = {})
+	{
+		super(psychoJS);
+
+		this._addAttribute('autoLog', autoLog);
+		this._addAttribute('status', Shelf.Status.READY);
+	}
+
+
+	increment()
+	{/*
+		// prepare a PsychoJS component:
+		this._waitForDownloadComponent = {
+			status: PsychoJS.Status.NOT_STARTED,
+			clock: new Clock(),
+			resources: new Set()
+		};
+
+		const self = this;
+		return () =>
+		{
+			const t = self._waitForDownloadComponent.clock.getTime();
+
+			// start the component:
+			if (t >= 0.0 && self._waitForDownloadComponent.status === PsychoJS.Status.NOT_STARTED)
+			{
+				self._waitForDownloadComponent.tStart = t;
+				self._waitForDownloadComponent.status = PsychoJS.Status.STARTED;
+
+				// if resources is an empty array, we consider all registered resources:
+				if (resources.length === 0)
+				{
+					for (const [name, {status, path, data}] of this._resources)
+					{
+						resources.append({ name, path });
+					}
+				}
+
+				// only download those resources not already downloaded or downloading:
+				const resourcesToDownload = new Set();
+				for (let {name, path} of resources)
+				{
+					// to deal with potential CORS issues, we use the pavlovia.org proxy for resources
+					// not hosted on pavlovia.org:
+					if ( (path.toLowerCase().indexOf('www.') === 0 ||
+						path.toLowerCase().indexOf('http:') === 0 ||
+						path.toLowerCase().indexOf('https:') === 0) &&
+						(path.indexOf('pavlovia.org') === -1) )
+					{
+						path = 'https://devlovia.org/api/v2/proxy/' + path;
+					}
+
+					const pathStatusData = this._resources.get(name);
+
+					// the resource has not been registered yet:
+					if (typeof pathStatusData === 'undefined')
+					{
+						self._resources.set(name, {
+							status: ServerManager.ResourceStatus.REGISTERED,
+							path,
+							data: undefined
+						});
+						self._waitForDownloadComponent.resources.add(name);
+						resourcesToDownload.add(name);
+						self._psychoJS.logger.debug('registered resource:', name, path);
+					}
+
+					// the resource has been registered but is not downloaded yet:
+					else if (typeof pathStatusData.status !== ServerManager.ResourceStatus.DOWNLOADED)
+						// else if (typeof pathStatusData.data === 'undefined')
+					{
+						self._waitForDownloadComponent.resources.add(name);
+					}
+
+				}
+
+				// start the download:
+				self._downloadResources(resourcesToDownload);
+			}
+
+			// check whether all resources have been downloaded:
+			for (const name of self._waitForDownloadComponent.resources)
+			{
+				const pathStatusData = this._resources.get(name);
+
+				// the resource has not been downloaded yet: loop this component
+				if (typeof pathStatusData.status !== ServerManager.ResourceStatus.DOWNLOADED)
+					// if (typeof pathStatusData.data === 'undefined')
+				{
+					return Scheduler.Event.FLIP_REPEAT;
+				}
+			}
+
+			// all resources have been downloaded: move to the next component:
+			self._waitForDownloadComponent.status = PsychoJS.Status.FINISHED;
+			return Scheduler.Event.NEXT;
+		};*/
+	}
+
+
+
+
+	/**
+	 * Increment the integer counter corresponding to the given key by the given amount.
+	 *
+	 * @param {string[]} [key = [] ] key as an array of key components
+	 * @param {number} [increment = 1] increment
+	 * @return {Promise<any>}
+	 */
+	async _increment(key = [], increment = 1)
+	{
+		const response = {
+			origin: 'Shelf.increment',
+			context: 'when incrementing an integer counter'
+		};
+
+		try
+		{
+			this._status = Shelf.Status.BUSY;
+
+			if (!Array.isArray(key) || key.length === 0)
+			{
+				throw 'the key must be a non empty array';
+			}
+
+			// prepare the request:
+			const componentList = key.reduce((list, component) => list + '+' + component, '');
+			const url = this._psychoJS.config.pavlovia.URL + '/api/v2/shelf/' + componentList;
+			const data = { increment };
+
+			// query the server:
+			const response = await fetch(url, {
+				method: 'POST',
+				mode: 'cors', // no-cors, *cors, same-origin
+				cache: 'no-cache', // *default, no-cache, reload, force-cache, only-if-cached
+				credentials: 'same-origin', // include, *same-origin, omit
+				headers: {
+					'Content-Type': 'application/json',
+					'session-token': ''
+				},
+				redirect: 'follow', // manual, *follow, error
+				referrerPolicy: 'no-referrer', // no-referrer, *no-referrer-when-downgrade, origin, origin-when-cross-origin, same-origin, strict-origin, strict-origin-when-cross-origin, unsafe-url
+				body: JSON.stringify(data)
+			});
+
+			// convert the response to json:
+			const document = await response.json();
+
+			// return the updated value:
+			this._status = Shelf.Status.READY;
+			return document['value'];
+		}
+		catch (error)
+		{
+			this._status = Shelf.Status.ERROR;
+			throw {...response, error};
+		}
+	}
+}
+
+
+
+/**
+ * Shelf status
+ *
+ * @name module:data.Shelf#Status
+ * @enum {Symbol}
+ * @readonly
+ * @public
+ */
+Shelf.Status = {
+	/**
+	 * The shelf is ready.
+	 */
+	READY: Symbol.for('READY'),
+
+	/**
+	 * The shelf is busy, e.g. storing or retrieving values.
+	 */
+	BUSY: Symbol.for('BUSY'),
+
+	/**
+	 * The shelf has encountered an error.
+	 */
+	ERROR: Symbol.for('ERROR')
+};
+
+
+
+ + + + +
+ + + +
+ + + + + + + diff --git a/docs/module-data.Shelf.html b/docs/module-data.Shelf.html new file mode 100644 index 0000000..9ba3f78 --- /dev/null +++ b/docs/module-data.Shelf.html @@ -0,0 +1,412 @@ + + + + + JSDoc: Class: Shelf + + + + + + + + + + +
+ +

Class: Shelf

+ + + + + + +
+ +
+ +

+ data.Shelf(options)

+ + +
+ +
+
+ + + + + + +

new Shelf(options)

+ + + + + + +
+

Shelf handles persistent key/value pairs, which are stored in the shelf collection on the +server, and accesses in a concurrent fashion

+
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
options + + +Object + + + + +
Properties
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeAttributesDefaultDescription
psychoJS + + +module:core.PsychoJS + + + + + + + + + + + + the PsychoJS instance
autoLog + + +boolean + + + + + + <optional>
+ + + + + +
+ + false + + whether or not to log
+ +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+ + +

Extends

+ + + + +
    +
  • PsychObject
  • +
+ + + + + + + + + + + + + + + +

Members

+ + + +

(readonly) Status :Symbol

+ + + + +
+ Shelf status +
+ + + +
Type:
+
    +
  • + +Symbol + + +
  • +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + +
+ + + + + + + \ No newline at end of file diff --git a/docs/module-sound.AudioClip.html b/docs/module-sound.AudioClip.html new file mode 100644 index 0000000..8df4890 --- /dev/null +++ b/docs/module-sound.AudioClip.html @@ -0,0 +1,544 @@ + + + + + JSDoc: Class: AudioClip + + + + + + + + + + +
+ +

Class: AudioClip

+ + + + + + +
+ +
+ +

+ sound.AudioClip(options)

+ + +
+ +
+
+ + + + + + +

new AudioClip(options)

+ + + + + + +
+

AudioClip encapsulate an audio recording.

+
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
options + + +Object + + + + +
Properties
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeAttributesDefaultDescription
psychoJS + + +module:core.PsychoJS + + + + + + + + + + + + the PsychoJS instance
name + + +String + + + + + + <optional>
+ + + + + +
+ + 'audioclip' + + the name used when logging messages
format + + +string + + + + + + + + + + + + the format for the audio file
sampleRateHz + + +number + + + + + + + + + + + + the sampling rate
data + + +Blob + + + + + + + + + + + + the audio data, in the given format, at the given sampling rate
autoLog + + +boolean + + + + + + <optional>
+ + + + + +
+ + false + + whether or not to log
+ +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +

Members

+ + + +

(readonly) Engine :Symbol

+ + + + +
+ Recognition engines. +
+ + + +
Type:
+
    +
  • + +Symbol + + +
  • +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + +
+ + + + + + + \ No newline at end of file diff --git a/docs/module-sound.Microphone.html b/docs/module-sound.Microphone.html new file mode 100644 index 0000000..96ed23b --- /dev/null +++ b/docs/module-sound.Microphone.html @@ -0,0 +1,1441 @@ + + + + + JSDoc: Class: Microphone + + + + + + + + + + +
+ +

Class: Microphone

+ + + + + + +
+ +
+ +

+ sound.Microphone(options)

+ + +
+ +
+
+ + + + + + +

new Microphone(options)

+ + + + + + +
+

This manager handles the recording of audio signal.

+
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
options + + +Object + + + + +
Properties
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeAttributesDefaultDescription
psychoJS + + +module:core.PsychoJS + + + + + + + + + + + + the PsychoJS instance
name + + +String + + + + + + + + + + + + the name used when logging messages
format + + +string + + + + + + <optional>
+ + + + + +
+ + 'audio/webm;codecs=opus' + + the format for the audio file
sampleRateHz + + +number + + + + + + <optional>
+ + + + + +
+ + 48000 + + the audio sampling rate, in Hz
clock + + +Clock + + + + + + <optional>
+ + + + + +
+ + an optional clock
autoLog + + +boolean + + + + + + <optional>
+ + + + + +
+ + false + + whether or not to log
+ +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +

Members

+ + + +

flush

+ + + + +
+ Submit a request to flush the recording. +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + +

pause

+ + + + +
+ Submit a request to pause the recording. +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + +

resume

+ + + + +
+ Submit a request to resume the recording. + +

resume has no effect if the recording was not previously paused.

+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + +

start

+ + + + +
+ Submit a request to start the recording. + +

Note that it typically takes 50ms-200ms for the recording to actually starts once +a request to start has been submitted.

+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + +

stop

+ + + + +
+ Submit a request to stop the recording. +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + +

Methods

+ + + + + + + +

(protected) _onChange()

+ + + + + + +
+ Callback for changes to the recording settings. + +

Changes to the settings require the recording to stop and be re-started.

+
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

(protected) _prepareRecording()

+ + + + + + +
+ Prepare the recording. +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

download(filename)

+ + + + + + +
+ Offer the audio recording to the participant as a sound file to download. +
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
filename + + +string + + + + the filename
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

getRecording(tag, flushopt)

+ + + + + + +
+ Get the current audio recording as an AudioClip in the given format. +
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeAttributesDefaultDescription
tag + + +string + + + + + + + + + + + + an optional tag for the audio clip
flush + + +boolean + + + + + + <optional>
+ + + + + +
+ + false + + whether or not to first flush the recording
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

upload(tag)

+ + + + + + +
+ Upload the audio recording to the pavlovia server. +
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
tag + + +string + + + + an optional tag for the audio file
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + +
+ + + + + + + \ No newline at end of file diff --git a/docs/module-sound.Transcriber.html b/docs/module-sound.Transcriber.html new file mode 100644 index 0000000..2231e9f --- /dev/null +++ b/docs/module-sound.Transcriber.html @@ -0,0 +1,1395 @@ + + + + + JSDoc: Class: Transcriber + + + + + + + + + + +
+ +

Class: Transcriber

+ + + + + + +
+ +
+ +

+ sound.Transcriber(options)

+ + +
+ +
+
+ + + + + + +

new Transcriber(options)

+ + + + + + +
+

This manager handles the transcription of speech into text.

+
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
options + + +Object + + + + +
Properties
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeAttributesDefaultDescription
psychoJS + + +module:core.PsychoJS + + + + + + + + + + + + the PsychoJS instance
name + + +String + + + + + + + + + + + + the name used when logging messages
bufferSize + + +number + + + + + + <optional>
+ + + + + +
+ + 10000 + + the maximum size of the circular transcript buffer
continuous + + +Array.<String> + + + + + + <optional>
+ + + + + +
+ + true + + whether or not to continuously recognise
lang + + +Array.<String> + + + + + + <optional>
+ + + + + +
+ + 'en-US' + + the spoken language
interimResults + + +Array.<String> + + + + + + <optional>
+ + + + + +
+ + false + + whether or not to make interim results available
maxAlternatives + + +Array.<String> + + + + + + <optional>
+ + + + + +
+ + 1 + + the maximum number of recognition alternatives
tokens + + +Array.<String> + + + + + + <optional>
+ + + + + +
+ + [] + + the tokens to be recognised. This is experimental technology, not available in all browser.
clock + + +Clock + + + + + + <optional>
+ + + + + +
+ + an optional clock
autoLog + + +boolean + + + + + + <optional>
+ + + + + +
+ + false + + whether or not to log
+ +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
To Do:
+
+
    +
  • deal with alternatives, interim results, and recognition errors
  • +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +

Methods

+ + + + + + + +

(protected) _onChange()

+ + + + + + +
+ Callback for changes to the recognition settings. + +

Changes to the recognition settings require the recognition to stop and be re-started.

+
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

(protected) _prepareTranscription()

+ + + + + + +
+ Prepare the transcription. +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

clearTranscripts()

+ + + + + + +
+ Clear all transcripts and resets the circular buffers. +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

getTranscripts(options) → {Array.<Transcript>}

+ + + + + + +
+ Get the list of transcripts still in the buffer, i.e. those that have not been +previously cleared by calls to getTranscripts with clear = true. +
+ + + + + + + + + +
Parameters:
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
options + + +Object + + + + +
Properties
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeAttributesDefaultDescription
transcriptList + + +Array.<string> + + + + + + <optional>
+ + + + + +
+ + [] + + the list of transcripts texts to consider. If transcriptList is empty, we consider all transcripts.
clear + + +boolean + + + + + + <optional>
+ + + + + +
+ + false + + whether or not to keep in the buffer the transcripts for a subsequent call to getTranscripts. If a keyList has been given and clear = true, we only remove from the buffer those keys in keyList
+ +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + +
Returns:
+ + +
+ the list of transcripts still in the buffer +
+ + + +
+
+ Type +
+
+ +Array.<Transcript> + + +
+
+ + + + + + + + + + + + + +

start() → {Promise}

+ + + + + + +
+ Start the transcription. +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + +
Returns:
+ + +
+ promise fulfilled when the transcription actually started +
+ + + +
+
+ Type +
+
+ +Promise + + +
+
+ + + + + + + + + + + + + +

stop() → {Promise}

+ + + + + + +
+ Stop the transcription. +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + +
Returns:
+ + +
+ promise fulfilled when the speech recognition actually stopped +
+ + + +
+
+ Type +
+
+ +Promise + + +
+
+ + + + + + + + + + + + + +
+ +
+ + + + +
+ + + +
+ + + + + + + \ No newline at end of file diff --git a/docs/module-sound.Transcript.html b/docs/module-sound.Transcript.html new file mode 100644 index 0000000..3462a8b --- /dev/null +++ b/docs/module-sound.Transcript.html @@ -0,0 +1,171 @@ + + + + + JSDoc: Class: Transcript + + + + + + + + + + +
+ +

Class: Transcript

+ + + + + + +
+ +
+ +

+ sound.Transcript()

+ + +
+ +
+
+ + + + + + +

new Transcript()

+ + + + + + +
+ Transcript returned by the transcriber +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Source:
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + + +
+ + + + + + + \ No newline at end of file diff --git a/docs/sound_AudioClip.js.html b/docs/sound_AudioClip.js.html new file mode 100644 index 0000000..a375291 --- /dev/null +++ b/docs/sound_AudioClip.js.html @@ -0,0 +1,345 @@ + + + + + JSDoc: Source: sound/AudioClip.js + + + + + + + + + + +
+ +

Source: sound/AudioClip.js

+ + + + + + +
+
+
/**
+ * AudioClip encapsulate an audio recording.
+ *
+ * @author Alain Pitiot and Sotiri Bakagiannis
+ * @version 2021.x
+ * @copyright (c) 2021 Open Science Tools Ltd. (https://opensciencetools.org)
+ * @license Distributed under the terms of the MIT License
+ */
+
+import {PsychObject} from "../util/PsychObject";
+import {PsychoJS} from "../core/PsychoJS";
+
+
+/**
+ * <p>AudioClip encapsulate an audio recording.</p>
+ *
+ * @name module:sound.AudioClip
+ * @class
+ * @param {Object} options
+ * @param {module:core.PsychoJS} options.psychoJS - the PsychoJS instance
+ * @param {String} [options.name= 'audioclip'] - the name used when logging messages
+ * @param {string} options.format the format for the audio file
+ * @param {number} options.sampleRateHz - the sampling rate
+ * @param {Blob} options.data - the audio data, in the given format, at the given sampling rate
+ * @param {boolean} [options.autoLog= false] - whether or not to log
+ */
+export class AudioClip extends PsychObject
+{
+
+	constructor({psychoJS, name, sampleRateHz, format, data, autoLog} = {})
+	{
+		super(psychoJS);
+
+		this._addAttribute('name', name, 'audioclip');
+		this._addAttribute('format', format);
+		this._addAttribute('sampleRateHz', sampleRateHz);
+		this._addAttribute('data', data);
+		this._addAttribute('autoLog', false, autoLog);
+		this._addAttribute('status', AudioClip.Status.CREATED);
+
+		if (this._autoLog)
+		{
+			this._psychoJS.experimentLogger.exp(`Created ${this.name} = ${this.toString()}`);
+		}
+
+		// decode the blob into an audio buffer:
+		this._decodeAudio();
+	}
+
+
+	play()
+	{
+		// TODO check that the audio buffer is ready
+
+		// play the audio buffer:
+		const source = this._audioContext.createBufferSource();
+		source.buffer = this._audioBuffer;
+		source.connect(this._audioContext.destination);
+		source.start();
+	}
+
+
+	upload()
+	{
+		// TODO
+	}
+
+
+	/**
+	 * Transcribe the audio clip.
+	 *
+	 * ref: https://cloud.google.com/speech-to-text/docs/reference/rest/v1/speech/recognize
+	 *
+	 * @param {Object} options
+	 * @param engine
+	 * @param {String} options.languageCode - the BCP-47 language code for the recognition,
+	 * 	e.g. 'en-gb'
+	 * @return {Promise<void>}
+	 */
+	async transcribe({engine, languageCode} = {})
+	{
+		// wait for the decoding to complete:
+		await this._decodeAudio();
+
+		return new Promise(async (resolve, reject) =>
+		{
+			// convert the Float32 PCM audio data to UInt16:
+			const buffer = new ArrayBuffer(this._audioData.length * 2);
+			const uint16View = new Uint16Array(buffer);
+			for (let t = 0; t < this._audioData.length; ++t)
+			{
+				uint16View[t] = (this._audioData[t] < 0)
+					? this._audioData[t] * 0x8000
+					: this._audioData[t] * 0x7FFF;
+			}
+
+			// encode it to base64:
+			const base64Data = this._base64ArrayBuffer(new Uint8Array(buffer));
+
+			// query the Google speech-to-text service:
+			const body = {
+				config: {
+					encoding: 'LINEAR16',
+					sampleRateHertz: this._sampleRateHz,
+					languageCode
+				},
+				audio: {
+					content: base64Data
+				},
+			};
+
+			// TODO get the key from the designer's pavlovia account
+			const GOOGLE_API_KEY = 'AIzaSyDngTi-pJcVrm_Kr2yTKV8OYLtfRN180gY';
+			const url = `https://speech.googleapis.com/v1/speech:recognize?key=${GOOGLE_API_KEY}`;
+
+			const response = await fetch(url, {
+				method: 'POST',
+				headers: {
+					'Content-Type': 'application/json',
+				},
+				body: JSON.stringify(body)
+			});
+
+			// convert the response to json:
+			const results = await response.json();
+			console.log('>>>> ', results);
+
+			resolve(results[0].alternatives[0]);
+		});
+	}
+
+
+	_decodeAudio()
+	{
+		// if the audio clip is ready, the PCM audio data is available in _audioData, a Float32Array:
+		if (this._status === AudioClip.Status.READY)
+		{
+			return;
+		}
+
+
+		// if we are already decoding, wait until the process completed:
+		if (this._status === AudioClip.Status.DECODING)
+		{
+			const self = this;
+			return new Promise(function (resolve, reject)
+			{
+				self._decodingCallbacks.push(resolve);
+				// TODO
+				// self._errorCallback = reject;
+			}.bind(this));
+		}
+
+
+		// otherwise, start decoding the input formatted audio data:
+		this._status = AudioClip.Status.DECODING;
+		this._audioData = null;
+		this._decodingCallbacks = [];
+
+		this._audioContext = new (window.AudioContext || window.webkitAudioContext)({
+			sampleRate: this._sampleRateHz
+		});
+
+		const reader = new window.FileReader();
+		reader.onloadend = async () =>
+		{
+			// decode the ArrayBuffer containing the formatted audio data (e.g. webm)
+			// into an audio buffer:
+			this._audioBuffer = await this._audioContext.decodeAudioData(reader.result);
+
+			// get the Float32Array containing the PCM data:
+			this._audioData = this._audioBuffer.getChannelData(0);
+
+			// we are now ready to translate and play:
+			this._status = AudioClip.Status.READY;
+
+			// resolve all the promises waiting for the decoding to complete:
+			for (const callback of this._decodingCallbacks)
+			{
+				callback();
+			}
+		};
+
+		reader.onerror = (error) =>
+		{
+			// TODO
+		}
+
+		reader.readAsArrayBuffer(this._data);
+	}
+
+
+	/**
+	 * Convert an array buffer to a base64 string.
+	 *
+	 * @note this is only very lightly adapted from the folowing post of @Grantlyk:
+	 * https://gist.github.com/jonleighton/958841#gistcomment-1953137
+	 *
+	 * the following only works for small buffers:
+	 * const dataAsString = String.fromCharCode.apply(null, new Uint8Array(buffer));
+	 * base64Data = window.btoa(dataAsString);
+	 *
+	 * @param arrayBuffer
+	 * @return {string} the base64 encoded input buffer
+	 */
+	_base64ArrayBuffer(arrayBuffer)
+	{
+	let base64 = '';
+	const encodings = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+
+	const bytes = new Uint8Array(arrayBuffer);
+	const byteLength = bytes.byteLength;
+	const byteRemainder = byteLength % 3;
+	const mainLength = byteLength - byteRemainder;
+
+	let a;
+	let b;
+	let c;
+	let d;
+	let chunk;
+
+	// Main loop deals with bytes in chunks of 3
+	for (let i = 0; i < mainLength; i += 3) {
+		// Combine the three bytes into a single integer
+		chunk = (bytes[i] << 16) | (bytes[i + 1] << 8) | bytes[i + 2];
+
+		// Use bitmasks to extract 6-bit segments from the triplet
+		a = (chunk & 16515072) >> 18; // 16515072 = (2^6 - 1) << 18
+		b = (chunk & 258048) >> 12; // 258048   = (2^6 - 1) << 12
+		c = (chunk & 4032) >> 6; // 4032     = (2^6 - 1) << 6
+		d = chunk & 63;        // 63       = 2^6 - 1
+
+		// Convert the raw binary segments to the appropriate ASCII encoding
+		base64 += encodings[a] + encodings[b] + encodings[c] + encodings[d];
+	}
+
+	// Deal with the remaining bytes and padding
+	if (byteRemainder === 1) {
+		chunk = bytes[mainLength];
+
+		a = (chunk & 252) >> 2; // 252 = (2^6 - 1) << 2
+
+		// Set the 4 least significant bits to zero
+		b = (chunk & 3) << 4; // 3   = 2^2 - 1
+
+		base64 += `${encodings[a]}${encodings[b]}==`;
+	} else if (byteRemainder === 2) {
+		chunk = (bytes[mainLength] << 8) | bytes[mainLength + 1];
+
+		a = (chunk & 64512) >> 10; // 64512 = (2^6 - 1) << 10
+		b = (chunk & 1008) >> 4; // 1008  = (2^6 - 1) << 4
+
+		// Set the 2 least significant bits to zero
+		c = (chunk & 15) << 2; // 15    = 2^4 - 1
+
+		base64 += `${encodings[a]}${encodings[b]}${encodings[c]}=`;
+	}
+
+	return base64;
+}
+
+}
+
+
+/**
+ * Recognition engines.
+ *
+ * @name module:sound.AudioClip#Engine
+ * @enum {Symbol}
+ * @readonly
+ * @public
+ */
+AudioClip.Engine = {
+	/**
+	 * Google Cloud Speech-to-Text.
+	 */
+	GOOGLE: Symbol.for('GOOGLE')
+};
+
+
+/**
+ * AudioClip status.
+ *
+ * @enum {Symbol}
+ * @readonly
+ * @public
+ */
+AudioClip.Status = {
+	CREATED: Symbol.for('CREATED'),
+
+	DECODING: Symbol.for('DECODING'),
+
+	READY: Symbol.for('READY')
+};
+
+
+
+ + + + +
+ + + +
+ + + + + + + diff --git a/docs/sound_Microphone.js.html b/docs/sound_Microphone.js.html new file mode 100644 index 0000000..20b9035 --- /dev/null +++ b/docs/sound_Microphone.js.html @@ -0,0 +1,636 @@ + + + + + JSDoc: Source: sound/Microphone.js + + + + + + + + + + +
+ +

Source: sound/Microphone.js

+ + + + + + +
+
+
/**
+ * Manager handling the recording of audio signal.
+ *
+ * @author Alain Pitiot and Sotiri Bakagiannis
+ * @version 2021.x
+ * @copyright (c) 2021 Open Science Tools Ltd. (https://opensciencetools.org)
+ * @license Distributed under the terms of the MIT License
+ */
+
+import {Clock} from "../util/Clock";
+import {PsychObject} from "../util/PsychObject";
+import {PsychoJS} from "../core/PsychoJS";
+import * as util from '../util/Util';
+import {ExperimentHandler} from "../data/ExperimentHandler";
+import {AudioClip} from "./AudioClip";
+
+/**
+ * <p>This manager handles the recording of audio signal.</p>
+ *
+ * @name module:sound.Microphone
+ * @class
+ * @param {Object} options
+ * @param {module:core.PsychoJS} options.psychoJS - the PsychoJS instance
+ * @param {String} options.name - the name used when logging messages
+ * @param {string} [options.format='audio/webm;codecs=opus'] the format for the audio file
+ * @param {number} [options.sampleRateHz= 48000] - the audio sampling rate, in Hz
+ * @param {Clock} [options.clock= undefined] - an optional clock
+ * @param {boolean} [options.autoLog= false] - whether or not to log
+ */
+export class Microphone extends PsychObject
+{
+
+	constructor({psychoJS, name, format, sampleRateHz, clock, autoLog} = {})
+	{
+		super(psychoJS);
+
+		this._addAttribute('name', name, 'microphone');
+		this._addAttribute('format', format, 'audio/webm;codecs=opus', this._onChange);
+		this._addAttribute('sampleRateHz', sampleRateHz, 48000, this._onChange);
+		this._addAttribute('clock', clock, new Clock());
+		this._addAttribute('autoLog', false, autoLog);
+		this._addAttribute('status', PsychoJS.Status.NOT_STARTED);
+
+		// prepare the recording:
+		this._prepareRecording();
+
+		if (this._autoLog)
+		{
+			this._psychoJS.experimentLogger.exp(`Created ${this.name} = ${this.toString()}`);
+		}
+	}
+
+
+	/**
+	 * Submit a request to start the recording.
+	 *
+	 * <p>Note that it typically takes 50ms-200ms for the recording to actually starts once
+	 * a request to start has been submitted.</p>
+	 *
+	 * @name module:sound.Microphone#start
+	 * @public
+	 * @return {Promise} promise fulfilled when the recording actually started
+	 */
+	start()
+	{
+		// if the microphone is currently paused, a call to start resumes it
+		// with a new recording:
+		if (this._status !== PsychoJS.Status.PAUSED)
+		{
+			return this.resume({clear: true});
+		}
+
+
+		if (this._status !== PsychoJS.Status.STARTED)
+		{
+			this._psychoJS.logger.debug('request to start audio recording');
+
+			try
+			{
+				if (!this._recorder)
+				{
+					throw 'the recorder has not been created yet, possibly because the participant has not given the authorisation to record audio';
+				}
+
+				this._recorder.start();
+
+				// return a promise, which will be satisfied when the recording actually starts, which
+				// is also when the reset of the clock and the change of status takes place
+				const self = this;
+				return new Promise((resolve, reject) =>
+				{
+					self._startCallback = resolve;
+					self._errorCallback = reject;
+				});
+			}
+			catch (error)
+			{
+				this._psychoJS.logger.error('unable to start the audio recording: ' + JSON.stringify(error));
+				this._status = PsychoJS.Status.ERROR;
+
+				throw {
+					origin: 'Microphone.start',
+					context: 'when starting the audio recording for microphone: ' + this._name,
+					error
+				};
+			}
+
+		}
+
+	}
+
+
+	/**
+	 * Submit a request to stop the recording.
+	 *
+	 * @name module:sound.Microphone#stop
+	 * @public
+	 * @param {Object} options
+	 * @param {string} [options.filename] the name of the file to which the audio recording will be
+	 * 	saved
+	 * @return {Promise} promise fulfilled when the recording actually stopped, and the recorded
+	 * 	data was made available
+	 */
+	stop({filename} = {})
+	{
+		if (this._status === PsychoJS.Status.STARTED || this._status === PsychoJS.Status.PAUSED)
+		{
+			this._psychoJS.logger.debug('request to stop audio recording');
+
+			this._stopOptions = {
+				filename
+			};
+
+			// note: calling the stop method of the MediaRecorder will first raise a dataavailable event,
+			// and then a stop event
+			// ref: https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/stop
+			this._recorder.stop();
+
+			// return a promise, which will be satisfied when the recording actually stops and the data
+			// has been made available:
+			const self = this;
+			return new Promise((resolve, reject) =>
+			{
+				self._stopCallback = resolve;
+				self._errorCallback = reject;
+			});
+		}
+	}
+
+
+	/**
+	 * Submit a request to pause the recording.
+	 *
+	 * @name module:sound.Microphone#pause
+	 * @public
+	 * @return {Promise} promise fulfilled when the recording actually paused
+	 */
+	pause()
+	{
+		if (this._status === PsychoJS.Status.STARTED)
+		{
+			this._psychoJS.logger.debug('request to pause audio recording');
+
+			try
+			{
+				if (!this._recorder)
+				{
+					throw 'the recorder has not been created yet, possibly because the participant has not given the authorisation to record audio';
+				}
+
+				// note: calling the pause method of the MediaRecorder raises a pause event
+				this._recorder.pause();
+
+				// return a promise, which will be satisfied when the recording actually pauses:
+				const self = this;
+				return new Promise((resolve, reject) =>
+				{
+					self._pauseCallback = resolve;
+					self._errorCallback = reject;
+				});
+			}
+			catch (error)
+			{
+				self._psychoJS.logger.error('unable to pause the audio recording: ' + JSON.stringify(error));
+				this._status = PsychoJS.Status.ERROR;
+
+				throw {
+					origin: 'Microphone.pause',
+					context: 'when pausing the audio recording for microphone: ' + this._name,
+					error
+				};
+			}
+
+		}
+	}
+
+
+	/**
+	 * Submit a request to resume the recording.
+	 *
+	 * <p>resume has no effect if the recording was not previously paused.</p>
+	 *
+	 * @name module:sound.Microphone#resume
+	 * @param {Object} options
+	 * @param {boolean} [options.clear= false] whether or not to empty the audio buffer before
+	 * 	resuming the recording
+	 * @return {Promise} promise fulfilled when the recording actually resumed
+	 */
+	resume({clear = false } = {})
+	{
+		if (this._status === PsychoJS.Status.PAUSED)
+		{
+			this._psychoJS.logger.debug('request to resume audio recording');
+
+			try
+			{
+				if (!this._recorder)
+				{
+					throw 'the recorder has not been created yet, possibly because the participant has not given the authorisation to record audio';
+				}
+
+				// empty the audio buffer is needed:
+				if (clear)
+				{
+					this._audioBuffer = [];
+					// this._audioBuffer.length = 0;
+				}
+
+				this._recorder.resume();
+
+				// return a promise, which will be satisfied when the recording actually resumes:
+				const self = this;
+				return new Promise((resolve, reject) =>
+				{
+					self._resumeCallback = resolve;
+					self._errorCallback = reject;
+				});
+			}
+			catch (error)
+			{
+				self._psychoJS.logger.error('unable to resume the audio recording: ' + JSON.stringify(error));
+				this._status = PsychoJS.Status.ERROR;
+
+				throw {
+					origin: 'Microphone.resume',
+					context: 'when resuming the audio recording for microphone: ' + this._name,
+					error
+				};
+			}
+
+		}
+	}
+
+
+	/**
+	 * Submit a request to flush the recording.
+	 *
+	 * @name module:sound.Microphone#flush
+	 * @public
+	 * @return {Promise} promise fulfilled when the data has actually been made available
+	 */
+	flush()
+	{
+		if (this._status === PsychoJS.Status.STARTED || this._status === PsychoJS.Status.PAUSED)
+		{
+			this._psychoJS.logger.debug('request to flush audio recording');
+
+			// note: calling the requestData method of the MediaRecorder will raise a
+			// dataavailable event
+			// ref: https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/requestData
+			this._recorder.requestData();
+
+			// return a promise, which will be satisfied when the data has been made available:
+			const self = this;
+			return new Promise((resolve, reject) =>
+			{
+				self._dataAvailableCallback = resolve;
+				self._errorCallback = reject;
+			});
+		}
+	}
+
+
+	/**
+	 * Offer the audio recording to the participant as a sound file to download.
+	 *
+	 * @name module:sound.Microphone#download
+	 * @function
+	 * @public
+	 * @param {string} filename the filename
+	 */
+	download(filename = 'audio.webm')
+	{
+		const audioBlob = new Blob(this._audioBuffer);
+
+		const anchor = document.createElement('a');
+		anchor.href = window.URL.createObjectURL(audioBlob);
+		anchor.download = filename;
+		document.body.appendChild(anchor);
+		anchor.click();
+		document.body.removeChild(anchor);
+	}
+
+
+	/**
+	 * Upload the audio recording to the pavlovia server.
+	 *
+	 * @name module:sound.Microphone#upload
+	 * @function
+	 * @public
+	 * @param {string} tag an optional tag for the audio file
+	 */
+	async upload({tag} = {})
+	{
+		// default tag: the name of this Microphone object
+		if (typeof tag === 'undefined')
+		{
+			tag = this._name;
+		}
+
+		// add a format-dependent audio extension to the tag:
+		tag += util.extensionFromMimeType(this._format);
+
+
+		// if the audio recording cannot be uploaded, e.g. the experiment is running locally, or
+		// if it is piloting mode, then we offer the audio recording as a file for download:
+		if (this._psychoJS.getEnvironment() !== ExperimentHandler.Environment.SERVER ||
+			this._psychoJS.config.experiment.status !== 'RUNNING' ||
+			this._psychoJS._serverMsg.has('__pilotToken'))
+		{
+			return this.download(tag);
+		}
+
+		// upload the blob:
+		const audioBlob = new Blob(this._audioBuffer);
+		return this._psychoJS.serverManager.uploadAudio(audioBlob, tag);
+	}
+
+
+	/**
+	 * Get the current audio recording as an AudioClip in the given format.
+	 *
+	 * @name module:sound.Microphone#getRecording
+	 * @function
+	 * @public
+ 	 * @param {string} tag an optional tag for the audio clip
+	 * @param {boolean} [flush=false] whether or not to first flush the recording
+	 */
+	async getRecording({tag, flush = false} = {})
+	{
+		// default tag: the name of this Microphone object
+		if (typeof tag === 'undefined')
+		{
+			tag = this._name;
+		}
+
+		/*return new Promise(async (resolve, reject) =>
+		{
+			if (flush)
+			{
+				await this.flush();
+			}*/
+
+		const audioClip = new AudioClip({
+			psychoJS: this._psychoJS,
+			name: tag,
+			format: this._format,
+			sampleRateHz: this._sampleRateHz,
+			data: new Blob(this._audioBuffer)
+		});
+
+		return audioClip;
+
+			/*resolve(audioClip);
+		});*/
+/*
+		return new Promise((resolve, reject) =>
+		{
+			// turn the audio buffer into an ArrayBuffer:
+			const reader = new window.FileReader();
+			reader.onloadend = async () =>
+			{
+				// decode the audio buffer:
+				const audioBuffer = await this._audioContext.decodeAudioData(reader.result);
+				console.log(audioBuffer);
+
+				// play the audio buffer:
+				const source = this._audioContext.createBufferSource();
+				source.buffer = audioBuffer;
+				source.connect(this._audioContext.destination);
+				source.start();
+
+				// get the Float32Array containing the PCM data:
+				const audioData = audioBuffer.getChannelData(0);
+
+				const audioClip = new AudioClip({
+					psychoJS: this._psychoJS,
+					name: tag,
+					sampleRateHz: this._sampleRateHz,
+					data: audioData
+				});
+
+				resolve(audioClip);
+			};
+			reader.onerror = (error) =>
+			{
+				reject(error);
+			}
+			reader.readAsArrayBuffer(new Blob(this._audioBuffer));
+		});
+*/
+	}
+
+
+	/**
+	 * Callback for changes to the recording settings.
+	 *
+	 * <p>Changes to the settings require the recording to stop and be re-started.</p>
+	 *
+	 * @name module:sound.Microphone#_onChange
+	 * @function
+	 * @protected
+	 */
+	_onChange()
+	{
+		if (this._status === PsychoJS.Status.STARTED)
+		{
+			this.stop();
+		}
+
+		this._prepareRecording();
+
+		this.start();
+	}
+
+
+	/**
+	 * Prepare the recording.
+	 *
+	 * @name module:sound.Microphone#_prepareRecording
+	 * @function
+	 * @protected
+	 */
+	async _prepareRecording()
+	{
+		// empty the audio buffer:
+		this._audioBuffer = [];
+		this._recorder = null;
+
+		// // create an audio context (mostly used for getRecording() ):
+		// this._audioContext = new (window.AudioContext || window.webkitAudioContext)({
+		// 	sampleRate: this._sampleRateHz
+		// });
+
+		// create a new audio recorder:
+		const stream = await navigator.mediaDevices.getUserMedia({
+			audio: {
+				advanced: [
+					{
+						channelCount: 1,
+						sampleRate: this._sampleRateHz
+					}
+				]
+			}
+		});
+
+		// check that the specified format is supported, use default if it is not:
+		let options;
+		if (typeof this._format === 'string' && MediaRecorder.isTypeSupported(this._format))
+		{
+			options = { type: this._format };
+		}
+		else
+		{
+			this._psychoJS.logger.warn(`The specified audio format, ${this._format}, is not supported by this browser, using the default format instead`);
+		}
+
+		this._recorder = new MediaRecorder(stream, options);
+
+
+		// setup the callbacks:
+		const self = this;
+
+		// called upon Microphone.start(), at which point the audio data starts being gathered
+		// into a blob:
+		this._recorder.onstart = () =>
+		{
+			self._audioBuffer.length = 0;
+			self._clock.reset();
+			self._status = PsychoJS.Status.STARTED;
+			self._psychoJS.logger.debug('audio recording started');
+
+			// resolve the Microphone.start promise:
+			if (self._startCallback)
+			{
+				self._startCallback({
+					time: self._psychoJS.monotonicClock.getTime()
+				});
+			}
+		};
+
+		// called upon Microphone.pause():
+		this._recorder.onpause = () =>
+		{
+			self._status = PsychoJS.Status.PAUSED;
+			self._psychoJS.logger.debug('audio recording paused');
+
+			// resolve the Microphone.pause promise:
+			if (self._pauseCallback)
+			{
+				self._pauseCallback({
+					time: self._psychoJS.monotonicClock.getTime()
+				});
+			}
+		};
+
+		// called upon Microphone.resume():
+		this._recorder.onresume = () =>
+		{
+			self._status = PsychoJS.Status.STARTED;
+			self._psychoJS.logger.debug('audio recording resumed');
+
+			// resolve the Microphone.resume promise:
+			if (self._resumeCallback)
+			{
+				self._resumeCallback({
+					time: self._psychoJS.monotonicClock.getTime()
+				});
+			}
+		};
+
+		// called when audio data is available, typically upon Microphone.stop() or Microphone.flush():
+		this._recorder.ondataavailable = (event) =>
+		{
+			const data = event.data;
+
+			// add data to the buffer:
+			self._audioBuffer.push(data);
+			self._psychoJS.logger.debug('audio data added to the buffer');
+
+			// resolve the data available promise, if needed:
+			if (self._dataAvailableCallback)
+			{
+				self._dataAvailableCallback({
+					time: self._psychoJS.monotonicClock.getTime()
+				});
+			}
+		};
+
+		// called upon Microphone.stop(), after data has been made available:
+		this._recorder.onstop = () =>
+		{
+			self._psychoJS.logger.debug('audio recording stopped');
+			self._status = PsychoJS.Status.STOPPED;
+
+			// resolve the Microphone.stop promise:
+			if (self._stopCallback)
+			{
+				self._stopCallback({
+					time: self._psychoJS.monotonicClock.getTime()
+				});
+			}
+
+			// treat stop options if there are any:
+
+			// download to a file, immediately offered to the participant:
+			if (typeof self._stopOptions.filename === 'string')
+			{
+				self.download(self._stopOptions.filename);
+			}
+		};
+
+		// called upon recording errors:
+		this._recorder.onerror = (event) =>
+		{
+			// TODO
+			self._psychoJS.logger.error('audio recording error: ' + JSON.stringify(event));
+			self._status = PsychoJS.Status.ERROR;
+		};
+
+	}
+
+}
+
+
+
+
+
+ + + + +
+ + + +
+ + + + + + + diff --git a/docs/sound_Transcriber.js.html b/docs/sound_Transcriber.js.html new file mode 100644 index 0000000..be1baa2 --- /dev/null +++ b/docs/sound_Transcriber.js.html @@ -0,0 +1,444 @@ + + + + + JSDoc: Source: sound/Transcriber.js + + + + + + + + + + +
+ +

Source: sound/Transcriber.js

+ + + + + + +
+
+
/**
+ * Manager handling the transcription of Speech into Text.
+ *
+ * @author Sotiri Bakagiannis and Alain Pitiot
+ * @version 2021.x
+ * @copyright (c) 2021 Open Science Tools Ltd. (https://opensciencetools.org)
+ * @license Distributed under the terms of the MIT License
+ */
+
+import {Clock} from "../util/Clock";
+import {PsychObject} from "../util/PsychObject";
+import {PsychoJS} from "../core/PsychoJS";
+
+
+/**
+ * Transcript returned by the transcriber
+ *
+ * @name module:sound.Transcript
+ * @class
+ */
+export class Transcript
+{
+	constructor(transcriber, text = '', confidence = 0.0)
+	{
+		// recognised text:
+		this.text = text;
+
+		// confidence in the recognition:
+		this.confidence = confidence;
+
+		// time the speech started, relative to the Transcriber clock:
+		this.speechStart = transcriber._speechStart;
+
+		// time the speech ended, relative to the Transcriber clock:
+		this.speechEnd = transcriber._speechEnd;
+
+		// time a recognition result was produced, relative to the Transcriber clock:
+		this.time = transcriber._recognitionTime;
+	}
+}
+
+
+/**
+ * <p>This manager handles the transcription of speech into text.</p>
+ *
+ * @name module:sound.Transcriber
+ * @class
+ * @param {Object} options
+ * @param {module:core.PsychoJS} options.psychoJS - the PsychoJS instance
+ * @param {String} options.name - the name used when logging messages
+ * @param {number} [options.bufferSize= 10000] - the maximum size of the circular transcript buffer
+ * @param {String[]} [options.continuous= true] - whether or not to continuously recognise
+ * @param {String[]} [options.lang= 'en-US'] - the spoken language
+ * @param {String[]} [options.interimResults= false] - whether or not to make interim results available
+ * @param {String[]} [options.maxAlternatives= 1] - the maximum number of recognition alternatives
+ * @param {String[]} [options.tokens= [] ] - the tokens to be recognised. This is experimental technology, not available in all browser.
+ * @param {Clock} [options.clock= undefined] - an optional clock
+ * @param {boolean} [options.autoLog= false] - whether or not to log
+ *
+ * @todo deal with alternatives, interim results, and recognition errors
+ */
+export class Transcriber extends PsychObject
+{
+
+	constructor({psychoJS, name, bufferSize, continuous, lang, interimResults, maxAlternatives, tokens, clock, autoLog} = {})
+	{
+		super(psychoJS);
+
+		this._addAttribute('name', name, 'transcriber');
+		this._addAttribute('bufferSize', bufferSize, 10000);
+		this._addAttribute('continuous', continuous, true, this._onChange);
+		this._addAttribute('lang', lang, 'en-US', this._onChange);
+		this._addAttribute('interimResults', interimResults, false, this._onChange);
+		this._addAttribute('maxAlternatives', maxAlternatives, 1, this._onChange);
+		this._addAttribute('tokens', tokens, [], this._onChange);
+		this._addAttribute('clock', clock, new Clock());
+		this._addAttribute('autoLog', false, autoLog);
+		this._addAttribute('status', PsychoJS.Status.NOT_STARTED);
+
+		// prepare the transcription:
+		this._prepareTranscription();
+
+		if (this._autoLog)
+		{
+			this._psychoJS.experimentLogger.exp(`Created ${this.name} = ${this.toString()}`);
+		}
+	}
+
+
+	/**
+	 * Start the transcription.
+	 *
+	 * @name module:sound.Transcriber#start
+	 * @function
+	 * @public
+	 * @return {Promise} promise fulfilled when the transcription actually started
+	 */
+	start()
+	{
+		if (this._status !== PsychoJS.Status.STARTED)
+		{
+			this._psychoJS.logger.debug('request to start speech to text transcription');
+
+			try
+			{
+				if (!this._recognition)
+				{
+					throw 'the speech recognition has not been initialised yet, possibly because the participant has not given the authorisation to record audio';
+				}
+
+				this._recognition.start();
+
+				// return a promise, which will be satisfied when the transcription actually starts,
+				// which is also when the reset of the clock and the change of status takes place
+				const self = this;
+				return new Promise((resolve, reject) =>
+				{
+					self._startCallback = resolve;
+					self._errorCallback = reject;
+				});
+			}
+			catch (error)
+			{
+				// TODO Strangely, start sometimes fails with the message that the recognition has already started. It is most probably a bug in the implementation of the Web Speech API. We need to catch this particular error and no throw on this occasion
+
+				this._psychoJS.logger.error('unable to start the speech to text transcription: ' + JSON.stringify(error));
+				this._status = PsychoJS.Status.ERROR;
+
+				throw {
+					origin: 'Transcriber.start',
+					context: 'when starting the speech to text transcription with transcriber: ' + this._name,
+					error
+				};
+			}
+
+		}
+
+	}
+
+
+	/**
+	 * Stop the transcription.
+	 *
+	 * @name module:sound.Transcriber#stop
+	 * @function
+	 * @public
+	 * @return {Promise} promise fulfilled when the speech recognition actually stopped
+	 */
+	stop()
+	{
+		if (this._status === PsychoJS.Status.STARTED)
+		{
+			this._psychoJS.logger.debug('request to stop speech to text transcription');
+
+			this._recognition.stop();
+
+			// return a promise, which will be satisfied when the recognition actually stops:
+			const self = this;
+			return new Promise((resolve, reject) =>
+			{
+				self._stopCallback = resolve;
+				self._errorCallback = reject;
+			});
+		}
+	}
+
+
+	/**
+	 * Get the list of transcripts still in the buffer, i.e. those that have not been
+	 * previously cleared by calls to getTranscripts with clear = true.
+	 *
+	 * @name module:sound.Transcriber#getTranscripts
+	 * @function
+	 * @public
+	 * @param {Object} options
+	 * @param {string[]} [options.transcriptList= []]] - the list of transcripts texts to consider. If transcriptList is empty, we consider all transcripts.
+	 * @param {boolean} [options.clear= false] - whether or not to keep in the buffer the transcripts for a subsequent call to getTranscripts. If a keyList has been given and clear = true, we only remove from the buffer those keys in keyList
+	 * @return {Transcript[]} the list of transcripts still in the buffer
+	 */
+	getTranscripts({
+									 transcriptList = [],
+									 clear = true
+								 } = {})
+	{
+		// if nothing in the buffer, return immediately:
+		if (this._bufferLength === 0)
+		{
+			return [];
+		}
+
+
+		// iterate over the buffer, from start to end, and discard the null transcripts (i.e. those
+		// previously cleared):
+		const filteredTranscripts = [];
+		const bufferWrap = (this._bufferLength === this._bufferSize);
+		let i = bufferWrap ? this._bufferIndex : -1;
+		do
+		{
+			i = (i + 1) % this._bufferSize;
+
+			const transcript = this._circularBuffer[i];
+			if (transcript)
+			{
+				// if the transcriptList is empty of the transcript text is in the transcriptList:
+				if (transcriptList.length === 0 || transcriptList.includes(transcript.text))
+				{
+					filteredTranscripts.push(transcript);
+
+					if (clear)
+					{
+						this._circularBuffer[i] = null;
+					}
+				}
+			}
+		} while (i !== this._bufferIndex);
+
+		return filteredTranscripts;
+	}
+
+
+	/**
+	 * Clear all transcripts and resets the circular buffers.
+	 *
+	 * @name module:sound.Transcriber#clearTranscripts
+	 * @function
+	 */
+	clearTranscripts()
+	{
+		// circular buffer of transcripts:
+		this._circularBuffer = new Array(this._bufferSize);
+		this._bufferLength = 0;
+		this._bufferIndex = -1;
+	}
+
+
+	/**
+	 * Callback for changes to the recognition settings.
+	 *
+	 * <p>Changes to the recognition settings require the recognition to stop and be re-started.</p>
+	 *
+	 * @name module:sound.Transcriber#_onChange
+	 * @function
+	 * @protected
+	 */
+	_onChange()
+	{
+		if (this._status === PsychoJS.Status.STARTED)
+		{
+			this.stop();
+		}
+
+		this._prepareTranscription();
+
+		this.start();
+	}
+
+
+	/**
+	 * Prepare the transcription.
+	 *
+	 * @name module:sound.Transcriber#_prepareTranscription
+	 * @function
+	 * @protected
+	 */
+	_prepareTranscription()
+	{
+		// setup the circular buffer of transcripts:
+		this.clearTranscripts();
+
+
+		// recognition settings:
+		const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
+		this._recognition = new SpeechRecognition();
+		this._recognition.continuous = this._continuous;
+		this._recognition.lang = this._lang;
+		this._recognition.interimResults = this._interimResults;
+		this._recognition.maxAlternatives = this._maxAlternatives;
+
+		// grammar list with tokens added:
+		if (Array.isArray(this._tokens) && this._tokens.length > 0)
+		{
+			const SpeechGrammarList = window.SpeechGrammarList || window.webkitSpeechGrammarList;
+
+			// note: we accepts JSGF encoded strings, and relative weight indicator between 0.0 and 1.0
+			// ref: https://www.w3.org/TR/jsgf/
+			const name = 'NULL';
+			const grammar = `#JSGF V1.0; grammar ${name}; public <${name}> = ${this._tokens.join('|')};`
+			const grammarList = new SpeechGrammarList();
+			grammarList.addFromString(grammar, 1);
+			this._recognition.grammars = grammarList;
+		}
+
+
+		// setup the callbacks:
+		const self = this;
+
+		// called when the start of a speech is detected:
+		this._recognition.onspeechstart = (e) =>
+		{
+			this._currentSpeechStart = this._clock.getTime();
+			self._psychoJS.logger.debug('speech started');
+		}
+
+		// called when the end of a speech is detected:
+		this._recognition.onspeechend = () =>
+		{
+			this._currentSpeechEnd = this._clock.getTime();
+			// this._recognition.stop();
+			self._psychoJS.logger.debug('speech ended');
+		}
+
+		// called when the recognition actually started:
+		this._recognition.onstart = () =>
+		{
+			this._clock.reset();
+			this._status = PsychoJS.Status.STARTED;
+			self._psychoJS.logger.debug('speech recognition started');
+
+			// resolve the Transcriber.start promise, if need be:
+			if (self._startCallback())
+			{
+				self._startCallback({
+					time: self._psychoJS.monotonicClock.getTime()
+				});
+			}
+		}
+
+		// called whenever stop() or abort() are called:
+		this._recognition.onend = () =>
+		{
+			this._status = PsychoJS.Status.STOPPED;
+			self._psychoJS.logger.debug('speech recognition ended');
+
+			// resolve the Transcriber.stop promise, if need be:
+			if (self._stopCallback)
+			{
+				self._stopCallback({
+					time: self._psychoJS.monotonicClock.getTime()
+				});
+			}
+		}
+
+		// called whenever a new result is available:
+		this._recognition.onresult = (event) =>
+		{
+			this._recognitionTime = this._clock.getTime();
+
+			// do not process the results if the Recogniser is not STARTED:
+			if (self._status !== PsychoJS.Status.STARTED)
+			{
+				return;
+			}
+
+			// in continuous recognition mode, we need to get the result at resultIndex,
+			// otherwise we pick the first result
+			const resultIndex = (self._continuous) ? event.resultIndex : 0;
+
+			// TODO at the moment we consider only the first alternative:
+			const alternativeIndex = 0;
+
+			const results = event.results;
+			const text = results[resultIndex][alternativeIndex].transcript;
+			const confidence = results[resultIndex][alternativeIndex].confidence;
+
+			// create a new transcript:
+			const transcript = new Transcript(self, text, confidence);
+
+			// insert it in the circular transcript buffer:
+			self._bufferIndex = (self._bufferIndex + 1) % self._bufferSize;
+			self._bufferLength = Math.min(self._bufferLength + 1, self._bufferSize);
+			self._circularBuffer[self._bufferIndex] = transcript;
+
+			self._psychoJS.logger.debug('speech recognition transcript: ', JSON.stringify(transcript));
+		}
+
+		// called upon recognition errors:
+		this._recognition.onerror = (event) =>
+		{
+			// lack of speech is not an error:
+			if (event.error === 'no-speech')
+			{
+				return;
+			}
+
+			self._psychoJS.logger.error('speech recognition error: ', JSON.stringify(event));
+			self._status = PsychoJS.Status.ERROR;
+		}
+
+	}
+
+}
+
+
+
+
+
+ + + + +
+ + + +
+ + + + + + + diff --git a/src/core/PsychoJS.js b/src/core/PsychoJS.js index 62725fc..60572d5 100644 --- a/src/core/PsychoJS.js +++ b/src/core/PsychoJS.js @@ -733,7 +733,7 @@ export class PsychoJS source: source, lineno: lineno, colno: colno, - error: error.stack + error: error })); self._gui.dialog({"error": error});