[room-controller] room.voice.nav() toggles AudioManager.launch() and AudioManager.kill()

[lib.audio-manager] added microphone voice filters + added debugger (every 10s) + added for each read source (from WS) a local gain [toworkon] + can listen without microphone permissions
This commit is contained in:
xdrm-brackets 2018-04-06 12:43:49 +02:00
parent 10bd316093
commit 854adb244d
2 changed files with 204 additions and 66 deletions

View File

@ -5,124 +5,229 @@ export default class AudioManager{
constructor(){
/* (1) Initialise our AudioNodes
---------------------------------------------------------*/
/* (1) Build Audio Context */
this.ctx = new (window.AudioContext || window.webkitAudioContext)();
this.gain = this.ctx.createGain();
this.ctx = new (window.AudioContext || window.webkitAudioContext)();
/* (3) Create input (typically recorder) */
/* (2) Create the MASTER gain */
this.master = this.ctx.createGain();
/* (3) Initialise input (typically bound from recorder) */
this.input = null;
/* (4) Create network I/O controllers (WebSocket) */
this.network = {
out: this.ctx.createScriptProcessor(AudioManager.BUFFER_SIZE, 1, 1),
in: null // will contain NETWORK:IN source node
};
/* (5) Bind network routines */
this.network.out.onaudioprocess = this.send.bind(this);
/* (5) Specify node chains */
this.node = {
input: [ this.ctx.createAnalyser() ], // INPUT connects to it
netout: [] // INPUT chains through it until NETOUT
};
/* (6) Create output + bind gain */
/* (4) Shortcut our output */
this.output = this.ctx.destination;
/* (7) Initialise websocket */
/* (5) Connect MASTER gain to output */
this.master.connect(this.output);
/* (2) Initialise processing attributes
---------------------------------------------------------*/
/* (1) Container for our recorder */
this.recorder = null;
/* (2) Initialise filters */
this.filters = {
voice_clarity: this.ctx.createBiquadFilter(),
voice_fullness: this.ctx.createBiquadFilter(),
voice_presence: this.ctx.createBiquadFilter(),
voice_sss: this.ctx.createBiquadFilter()
};
/* (3) Set up our filters' parameters */
this.setUpFilters();
/* (4) Create network I/O controller (WebSocket) */
this.network = {
out: this.ctx.createScriptProcessor(AudioManager.BUFFER_SIZE, 1, 1)
};
/* (5) Bind network controller to send() function */
this.network.out.onaudioprocess = this.send.bind(this);
/* (6) Initialise websocket */
this.ws = null;
/* (9) Debug data */
this.dbg = {
interval: 10, // debug every ... second
def: {
packets_received: 0,
packets_sent: 0,
kB_received: 0,
kB_sent: 0
},
data: {
packets_received: 0,
packets_sent: 0,
kB_received: 0,
kB_sent: 0
}
};
setInterval(function(){
console.group('debug');
for( let k in this.data ){
console.log(`${this.data[k]} ${k}`)
this.data[k] = this.def[k]
}
console.groupEnd('debug');
}.bind(this.dbg), this.dbg.interval*1000);
}
/* (1) Binds an input stream
/* (2) Setup filters
*
---------------------------------------------------------*/
bind(){
setUpFilters(){
let current_node = null;
/* (1) Bind INPUT ------> NETWORK:OUT circuit
/* (1) Setup filter parameters
---------------------------------------------------------*/
current_node = this.input;
/* (1) Setup EQ#1 -> voice clarity */
this.filters.voice_clarity.type = 'peaking';
this.filters.voice_clarity.frequency.value = 3000;
this.filters.voice_clarity.Q.value = .8;
this.filters.voice_clarity.gain.value = 2;
/* (1) Connect INPUT to input list */
for( let node of this.node.input )
current_node.connect(node);
/* (2) Setup EQ#2 -> voice fullness */
this.filters.voice_fullness.type = 'peaking';
this.filters.voice_fullness.frequency.value = 200;
this.filters.voice_fullness.Q.value = .8;
this.filters.voice_fullness.gain.value = 2;
/* (2) Chain INPUT to input chain */
for( let node of this.node.netout ){
current_node.connect(node);
current_node = node;
}
/* (3) Setup EQ#3 -> reduce voice presence */
this.filters.voice_presence.type = 'peaking';
this.filters.voice_presence.frequency.value = 5000;
this.filters.voice_presence.Q.value = .8;
this.filters.voice_presence.gain.value = -2;
/* (3) Finally connect to NETWORK:OUT */
current_node.connect(this.network.out);
/* (4) Setup EQ#3 -> reduce 'sss' metallic sound */
this.filters.voice_sss.type = 'peaking';
this.filters.voice_sss.frequency.value = 7000;
this.filters.voice_sss.Q.value = .8;
this.filters.voice_sss.gain.value = -8;
/* (2) Bind NETWORK:IN ------> OUTPUT circuit
/* (2) Connect filters
---------------------------------------------------------*/
// WILL BE DONE ON receive()
/* (1) Connect clarity to fullness */
this.filters.voice_clarity.connect( this.filters.voice_fullness );
/* (2) Connect fullness to presence reduction */
this.filters.voice_fullness.connect( this.filters.voice_presence );
/* (3) Connect presence reduction to 'ss' removal */
this.filters.voice_presence.connect( this.filters.voice_sss );
/* (4) Connect last filter to MASTER gain */
this.filters.voice_sss.connect(this.master);
/* (1) Finally connect to OUTPUT */
// current_node.connect(this.gain);
this.gain.connect(this.output);
}
/* (3) Filter toggle
*
* @unlink<boolean> Whether to unlink filters (directly bind to output)
*
---------------------------------------------------------*/
linkFilters(unlink=false){
/* (1) Disconnect all by default */
this.input.disconnect();
/* (2) Get first filter */
let first_filter = this.filters.voice_clarity;
/* (3) If unlink -> connect directly to MASTER gain */
if( unlink === true )
return this.input.connect(this.master);
/* (4) If linking -> connect input to filter stack */
this.input.connect(first_filter);
}
/* (2) Binds an input stream
*
---------------------------------------------------------*/
bindRecorderStream(_stream){
/* (1) Bind audio stream */
/* (1) Bind audio stream
---------------------------------------------------------*/
this.input = this.ctx.createMediaStreamSource(_stream);
/* (2) By default: link through filters to output
---------------------------------------------------------*/
this.linkFilters();
}
/* (3) Sharing process implementation
/* (3) Send chunks (Float32Array)
*
---------------------------------------------------------*/
send(_audioprocess){
/*DEBUG*///console.warn('time of', 16*2048/8, 'bytes in ', new Date().getTime()-window.timer);
/*DEBUG*///window.timer = new Date().getTime();
let buf32 = new Float32Array(AudioManager.BUFFER_SIZE);
// _audioprocess.inputBuffer.copyFromChannel(buf32, 0);
_audioprocess.inputBuffer.copyFromChannel(buf32, 0);
let buf16 = this.f32toi16(buf32);
this.ws.send(buf16);
this.dbg.data.packets_sent++;
this.dbg.data.kB_sent += buf16.length * 16. / 8 / 1024;
}
/* (4) Play received chunks (Int16Array)
*
---------------------------------------------------------*/
receive(_buffer){
/* (1) Convert to Float32Array */
let buf32 = this.i16tof32(_buffer);
/* (2) Create source node */
this.network.in = new AudioBufferSourceNode(this.ctx, {
playbackRate: 1.1
});
let source = this.ctx.createBufferSource();
/* (3) Create buffer and dump data */
let input_buffer = this.ctx.createBuffer(1, AudioManager.BUFFER_SIZE, this.ctx.sampleRate);
input_buffer.getChannelData(0).set(buf32);
/* (4) Pass buffer to source node */
this.network.in.buffer = input_buffer;
/* (4) Bind buffer to source node */
source.buffer = input_buffer;
/* (5) Connect and play audio */
this.network.in.connect(this.gain);
this.network.in.start(0);
/* (5) Create a dedicated *muted* gain */
let gain = this.ctx.createGain();
/* (6) source -> gain -> MASTER + play() */
source.connect(gain);
gain.connect(this.master);
/* (7) Start playing */
source.start(this.ctx.currentTime);
this.dbg.data.packets_received++;
this.dbg.data.kB_received += _buffer.length * 16. / 8 / 1024;
}
@ -203,6 +308,11 @@ export default class AudioManager{
}.bind(this);
/* (3) Debug */
this.ws.onopen = () => console.warn('[audio] websocket connected');
this.ws.onclose = () => console.warn('[audio] websocket closed');
}
@ -215,32 +325,49 @@ export default class AudioManager{
/* (1) Start websocket */
this.wsconnect(wsAddress);
window.recorder = null;
if( navigator.mediaDevices && navigator.mediaDevices.getUserMedia ){
navigator.mediaDevices.getUserMedia({ audio: true })
.then( stream => {
recorder = new MediaRecorder(stream);
this.recorder = new MediaRecorder(stream);
this.bindRecorderStream(stream);
this.bind();
recorder.onstart = () => console.warn('start');
recorder.onstop = () => {
recorder.stream.getTracks().map( t => t.stop() );
this.recorder.onstart = () => console.warn('[audio] recording');
this.recorder.onstop = () => {
this.recorder.stream.getTracks().map( t => t.stop() );
this.recorder = null;
console.warn('[audio] stopped recording');
};
// start recording
recorder.start();
this.recorder.start();
})
.catch( e => console.warn('error getting audio stream', e) );
.catch( e => console.warn('[audio] microphone permission issue', e) );
}else
console.warn('getUserMedia() not supported');
console.warn('[audio] microphone not supported');
}
/* (x) Shut down microphone + kill all
*
---------------------------------------------------------*/
kill(){
/* (1) Close websocket */
this.ws.close();
/* (2) Stop recording */
this.recorder.stop();
/* (3) Volume 0 */
this.master.gain.setValueAtTime(0, this.ctx.currentTime);
}
}

View File

@ -50,6 +50,17 @@ export default class RoomController{
if( type === 'text' && window.csock instanceof wscd )
csock.send({ buffer: { rid: room.id } });
/* (6) If 'voice' room -> launch audio */
if( type === 'voice' ){
if( typeof this[type].current === 'number' )
AudioManager.launch();
else
AudioManager.kill();
}
/* (6) Update buffer */
this._buffer[type] = {};
for( let r of this[type].list )