JS 录制压缩16K采样率(SampleRate)音频

背景

考研复习期间压力大,写点代码放松一下。另外,没接触过音频编码相关知识…

开搞

目的

涉及到 JS 的语音识别,录音我使用的是 Recorder.js。各个平台只接受 16K/8K SampleRate 的音频,JS 录制默认是 44.1K SampleRate,这时需要压缩采样率。

可选方案

  1. ffmpeg.js(ffmpeg 的 JS 移植版),大略看一下,用起来貌似有点麻烦,库还很大,暂且放弃。
  2. 手动压。

手动压

参考了 http://www.cnblogs.com/blqw/p/3782420.html ,可是目的不完全相同,于是我在它的基础上操刀。
主要针对两个函数入手:interleave、encodeWAV。
首先在上部分声明一个“newSampleRate”

1
var newSampleRate = 16000;

原版

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// 原版
function interleave(inputL, inputR) {
var length = inputL.length + inputR.length;
var result = new Float32Array(length);

var index = 0,
inputIndex = 0;

while (index < length) {
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
function encodeWAV(samples) {
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);

/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, numChannels, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, numChannels * 2, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);

floatTo16BitPCM(view, 44, samples);

return view;
}
}, self);

修改后单声道版(大部分语音识别都只支持单声道)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
// 修改后 单声道
function interleave(inputL) {
var compression = 44100 / newSampleRate;
var length = inputL.length / compression;
console.log('length:' + length)
var result = new Float32Array(length);

var index = 0,
inputIndex = 0;

while (index < length){
result[index++] = inputL[parseInt(inputIndex)];
inputIndex += compression;
}
return result;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
// 修改后 单声道版
function encodeWAV(samples) {
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);

numChannels = 1;

let bitPerSample = 16 * 2,
blockAlign = numChannels * (bitPerSample / 8);

/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, numChannels, true);
/* sample rate */
view.setUint32(24, newSampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, newSampleRate * blockAlign, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, blockAlign, true);
/* bits per sample */
view.setUint16(34, bitPerSample, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);

floatTo16BitPCM(view, 44, samples);

return view;
}

修改后双声道版

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
// 修改后 双声道版
function interleave(inputL, inputR) {
var compression = 44100 / newSampleRate; //
var length = (inputL.length + inputR.length) / compression;
console.log('length:' + length)
var result = new Float32Array(length);

var index = 0,
inputIndex = 0;

while (index < length){
result[index++] = inputL[parseInt(inputIndex)];
result[index++] = inputR[parseInt(inputIndex)];
inputIndex += compression;
}
return result;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
// 修改后 双声道版
function encodeWAV(samples) {
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);

numChannels = 2;

let bitPerSample = 16,
blockAlign = numChannels * (bitPerSample / 8);

/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, numChannels, true);
/* sample rate */
view.setUint32(24, newSampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, newSampleRate * blockAlign, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, blockAlign, true);
/* bits per sample */
view.setUint16(34, bitPerSample, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);

floatTo16BitPCM(view, 44, samples);

return view;
}
}, self);

不过最后效果好像还有些缺陷…不保证能用…以后有时间再研究研究…

不知道 >_< 本站总访问量