-
Notifications
You must be signed in to change notification settings - Fork 31
/
microphone.ts
92 lines (85 loc) · 3.51 KB
/
microphone.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
/// <reference path="api.d.ts" />
/// <reference path="typings/MediaStream.d.ts" />
/// <reference path="ring_buffer.ts" />
interface MediaStreamAudioSourceNode extends AudioNode {}
interface AudioContext {
createMediaStreamSource(strm: MediaStream): MediaStreamAudioSourceNode;
}
class MicrophoneReader implements IAudioReader {
in_flight: boolean;
private context: AudioContext;
private src_node: MediaStreamAudioSourceNode;
private proc_node: ScriptProcessorNode;
private ringbuf: RingBuffer;
private read_unit: number;
open(buffer_samples_per_ch: number, params: any): Promise<IAudioInfo> {
this.context = new AudioContext();
return new Promise<IAudioInfo>((resolve, reject) => {
var callback = (strm) => {
this.src_node = this.context.createMediaStreamSource(strm);
this.ringbuf = new RingBuffer(new Float32Array(buffer_samples_per_ch * this.src_node.channelCount * 8));
this.proc_node = this.context.createScriptProcessor(0, 1, this.src_node.channelCount);
this.proc_node.onaudioprocess = (ev: AudioProcessingEvent) => {
this._onaudioprocess(ev);
};
this.src_node.connect(this.proc_node);
this.proc_node.connect(this.context.destination);
this.read_unit = buffer_samples_per_ch * this.src_node.channelCount;
resolve({
sampling_rate: this.context.sampleRate / 2,
num_of_channels: this.src_node.channelCount,
});
};
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({
audio: true,
video: false
}).then(callback, reject);
} else {
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
navigator.getUserMedia({
audio: true,
video: false,
}, callback, reject);
}
});
}
private _onaudioprocess(ev: AudioProcessingEvent) {
var num_of_ch = ev.inputBuffer.numberOfChannels;
var samples_per_ch = ev.inputBuffer.getChannelData(0).length;
var data = new Float32Array(num_of_ch * samples_per_ch);
for (var i = 0; i < num_of_ch; ++i) {
var ch = ev.inputBuffer.getChannelData(i);
for (var j = 0; j < samples_per_ch; ++j)
data[j * num_of_ch + i] = ch[j];
}
this.ringbuf.append(data);
}
read(): Promise<IAudioBuffer> {
this.in_flight = true;
return new Promise<IAudioBuffer>((resolve, reject) => {
var buf = new Float32Array(this.read_unit);
var func = () => {
var size = this.ringbuf.read_some(buf);
if (size == 0) {
window.setTimeout(() => {
func();
}, 10);
return;
}
this.in_flight = false;
resolve({
timestamp: 0,
samples: buf.subarray(0, size),
transferable: true,
});
};
func();
});
}
close() {
}
}