H5录音实践总结(Preact)
目录
获取PCM数据
处理PCM数据
Float32 转 Int16
ArrayBuffer 转 Base64
PCM文件播放
重采样
PCM转MP3
PCM转WAV
短时能量计算
Web Worker优化性能
音频存储(IndexedDB)
WebView开启WebRTC
获取PCM数据
查看DEMO, 建议在浏览器/微信内打开, Webview内WebRTC需要加上配置才能授权通过
https://github.com/deepkolos/pc-pcm-wave
image
样例代码:
const mediaStream = await window.navigator.mediaDevices.getUserMedia({
audio: {
// sampleRate: 44100, // 采样率 不生效需要手动重采样
channelCount: 1, // 声道
// echoCancellation: true,
// noiseSuppression: true, // 降噪 实测效果不错
},
})
const audioContext = new window.AudioContext()
const inputSampleRate = audioContext.sampleRate
const mediaNode = audioContext.createMediaStreamSource(mediaStream)
if (!audioContext.createScriptProcessor) {
audioContext.createScriptProcessor = audioContext.createJavaScriptNode
}
// 创建一个jsNode
const jsNode = audioContext.createScriptProcessor(4096, 1, 1)
jsNode.connect(audioContext.destination)
jsNode.onaudioprocess = (e) => {
// e.inputBuffer.getChannelData(0) (left)
// 双通道通过e.inputBuffer.getChannelData(1)获取 (right)
}
mediaNode.connect(jsNode)
简要流程如下:
start=>start: 开始
getUserMedia=>operation: 获取MediaStream
audioContext=>operation: 创建AudioContext
scriptNode=>operation: 创建scriptNode并关联AudioContext
onaudioprocess=>operation: 设置onaudioprocess并处理数据
end=>end: 结束
start->getUserMedia->audioContext->scriptNode->onaudioprocess->end
停止录制只需要把audioContext挂在的node卸载即可,然后把存储的每一帧数据合并即可产出PCM数据
jsNode.disconnect()
mediaNode.disconnect()
jsNode.onaudioprocess = null
PCM数据处理
通过WebRTC获取的PCM数据格式是Float32的, 如果是双通道录音的话, 还需要增加合并通道
const leftDataList = [];
const rightDataList = [];
function onAudioProcess(event) {
// 一帧的音频PCM数据
let audioBuffer = event.inputBuffer;
leftDataList.push(audioBuffer.getChannelData(0).slice(0));
rightDataList.push(audioBuffer.getChannelData(1).slice(0));
}
// 交叉合并左右声道的数据
function interleaveLeftAndRight(left, right) {
let totalLength = left.length + right.length;
let data = new Float32Array(totalLength);
for (let i = 0; i < left.length; i++) { let k = i * 2; data[k] = left[i]; data[k + 1] = right[i]; } return data; } Float32 转 Int16 const float32 = new Float32Array(1) const int16 = Int16Array.from( float32.map(x => (x > 0 ? x * 0x7fff : x * 0x8000)),
)
arrayBuffer 转 Base64
注意: 在浏览器上有个btoa()函数也是可以转换为Base64但是输入参数必须为字符串, 如果传递buffer参数会先被toString()然后再Base64, 使用ffplay播放反序列化的Base64, 会比较刺耳
使用base64-arraybuffer即可完成
import { encode } from 'base64-arraybuffer'
const float32 = new Float32Array(1)
const int16 = Int16Array.from(
float32.map(x => (x > 0 ? x * 0x7fff : x * 0x8000)),
)
console.log(encode(int16.buffer))
验证Base64是否正确, 可以在node下把产出的Base64转换为Int16的PCM文件, 然后使用FFPlay播放, 看看音频是否正常播放
PCM文件播放
# 单通道 采样率:16000 Int16
ffplay -f s16le -ar 16k -ac 1 test.pcm
# 双通道 采样率:48000 Float32
ffplay -f f32le -ar 48000 -ac 2 test.pcm
重采样/调整采样率
虽然getUserMedia参数可设置采样率, 但是在最新Chrome也不生效, 所以需要手动做个重采样
const mediaStream = await window.navigator.mediaDevices.getUserMedia({
audio: {
// sampleRate: 44100, // 采样率 设置不生效
channelCount: 1, // 声道
// echoCancellation: true, // 减低回音
// noiseSuppression: true, // 降噪, 实测效果不错
},
})
使用wave-resampler即可完成
import { resample } from 'wave-resampler'
const inputSampleRate = 44100
const outputSampleRate = 16000
const resampledBuffers = resample(
// 需要onAudioProcess每一帧的buffer合并后的数组
mergeArray(audioBuffers),
inputSampleRate,
outputSampleRate,
)
PCM转MP3
import { Mp3Encoder } from 'lamejs'
let mp3buf
const mp3Data = []
const sampleBlockSize = 576 * 10 // 工作缓存区, 576的倍数
const mp3Encoder = new Mp3Encoder(1, outputSampleRate, kbps)
const samples = float32ToInt16(
audioBuffers,
inputSampleRate,
outputSampleRate,
)
let remaining = samples.length
for (let i = 0; remaining >= 0; i += sampleBlockSize) {
const left = samples.subarray(i, i + sampleBlockSize)
mp3buf = mp3Encoder.encodeBuffer(left)
mp3Data.push(new Int8Array(mp3buf))
remaining -= sampleBlockSize
}
mp3Data.push(new Int8Array(mp3Encoder.flush()))
console.log(mp3Data)
// 工具函数
function float32ToInt16(audioBuffers, inputSampleRate, outputSampleRate) {
const float32 = resample(
// 需要onAudioProcess每一帧的buffer合并后的数组
mergeArray(audioBuffers),
inputSampleRate,
outputSampleRate,
)
const int16 = Int16Array.from(
float32.map(x => (x > 0 ? x * 0x7fff : x * 0x8000)),
)
return int16
}
使用lamejs即可, 但是体积较大(160+KB), 如果没有存储需求可使用WAV格式
> ls -alh
-rwxrwxrwx 1 root root 95K 4月 22 12:45 12s.mp3*
-rwxrwxrwx 1 root root 1.1M 4月 22 12:44 12s.wav*
-rwxrwxrwx 1 root root 235K 4月 22 12:41 30s.mp3*
-rwxrwxrwx 1 root root 2.6M 4月 22 12:40 30s.wav*
-rwxrwxrwx 1 root root 63K 4月 22 12:49 8s.mp3*
-rwxrwxrwx 1 root root 689K 4月 22 12:48 8s.wav*
PCM转WAV
function mergeArray(list) {
const length = list.length * list[0].length
const data = new Float32Array(length)
let offset = 0
for (let i = 0; i < list.length; i++) { data.set(list[i], offset) offset += list[i].length } return data } function writeUTFBytes(view, offset, string) { var lng = string.length for (let i = 0; i < lng; i++) { view.setUint8(offset + i, string.charCodeAt(i)) } } function createWavBuffer(audioData, sampleRate = 44100, channels = 1) { const WAV_HEAD_SIZE = 44 const buffer = new ArrayBuffer(audioData.length * 2 + WAV_HEAD_SIZE) // 需要用一个view来操控buffer const view = new DataView(buffer) // 写入wav头部信息 // RIFF chunk descriptor/identifier writeUTFBytes(view, 0, 'RIFF') // RIFF chunk length view.setUint32(4, 44 + audioData.length * 2, true) // RIFF type writeUTFBytes(view, 8, 'WAVE') // format chunk identifier // FMT sub-chunk writeUTFBytes(view, 12, 'fmt') // format chunk length view.setUint32(16, 16, true) // sample format (raw) view.setUint16(20, 1, true) // stereo (2 channels) view.setUint16(22, channels, true) // sample rate view.setUint32(24, sampleRate, true) // byte rate (sample rate * block align) view.setUint32(28, sampleRate * 2, true) // block align (channel count * bytes per sample) view.setUint16(32, channels * 2, true) // bits per sample view.setUint16(34, 16, true) // data sub-chunk // data chunk identifier writeUTFBytes(view, 36, 'data') // data chunk length view.setUint32(40, audioData.length * 2, true) // 写入PCM数据 let index = 44 const volume = 1 const { length } = audioData for (let i = 0; i < length; i++) { view.setInt16(index, audioData[i] * (0x7fff * volume), true) index += 2 } return buffer } // 需要onAudioProcess每一帧的buffer合并后的数组 createWavBuffer(mergeArray(audioBuffers)) WAV基本上是PCM加上一些音频信息 简单的短时能量计算 function shortTimeEnergy(audioData) { let sum = 0 const energy = [] const { length } = audioData for (let i = 0; i < length; i++) { sum += audioData[i] ** 2 if ((i + 1) % 256 === 0) { energy.push(sum) sum = 0 } else if (i === length - 1) { energy.push(sum) } } return energy } 由于计算结果有会因设备的录音增益差异较大, 计算出数据也较大, 所以使用比值简单区分人声和噪音 查看DEMO const NoiseVoiceWatershedWave = 2.3 const energy = shortTimeEnergy(e.inputBuffer.getChannelData(0).slice(0)) const avg = energy.reduce((a, b) => a + b) / energy.length
const nextState = Math.max(...energy) / avg > NoiseVoiceWatershedWave ? 'voice' : 'noise'
Web Worker优化性能
音频数据数据量较大, 所以可以使用Web Worker进行优化, 不卡UI线程
在Webpack项目里Web Worker比较简单, 安装worker-loader即可
preact.config.js
export default (config, env, helpers) => {
config.module.rules.push({
test: /\.worker\.js$/,
use: { loader: 'worker-loader', options: { inline: true } },
})
}
recorder.worker.js
self.addEventListener('message', event => {
console.log(event.data)
// 转MP3/转Base64/转WAV等等
const output = ''
self.postMessage(output)
}
使用Worker
async function toMP3(audioBuffers, inputSampleRate, outputSampleRate = 16000) {
const { default: Worker } = await import('./recorder.worker')
const worker = new Worker()
// 简单使用, 项目可以在recorder实例化的时候创建worker实例, 有并法需求可多个实例
return new Promise(resolve => {
worker.postMessage({
audioBuffers: audioBuffers,
inputSampleRate: inputSampleRate,
outputSampleRate: outputSampleRate,
type: 'mp3',
})
worker.onmessage = event => resolve(event.data)
})
}
音频的存储
浏览器持久化储存的地方有LocalStorage和IndexedDB, 其中LocalStorage较为常用, 但是只能储存字符串, 而IndexedDB可直接储存Blob, 所以优先选择IndexedDB,使用LocalStorage则需要转Base64体积将会更大
所以为了避免占用用户太多空间, 所以选择MP3格式进行存储
> ls -alh
-rwxrwxrwx 1 root root 95K 4月 22 12:45 12s.mp3*
-rwxrwxrwx 1 root root 1.1M 4月 22 12:44 12s.wav*
-rwxrwxrwx 1 root root 235K 4月 22 12:41 30s.mp3*
-rwxrwxrwx 1 root root 2.6M 4月 22 12:40 30s.wav*
-rwxrwxrwx 1 root root 63K 4月 22 12:49 8s.mp3*
-rwxrwxrwx 1 root root 689K 4月 22 12:48 8s.wav*
IndexedDB简单封装如下, 熟悉后台的同学可以找个ORM库方便数据读写
const indexedDB =
window.indexedDB ||
window.webkitIndexedDB ||
window.mozIndexedDB ||
window.OIndexedDB ||
window.msIndexedDB
const IDBTransaction =
window.IDBTransaction ||
window.webkitIDBTransaction ||
window.OIDBTransaction ||
window.msIDBTransaction
const readWriteMode =
typeof IDBTransaction.READ_WRITE === 'undefined'
? 'readwrite'
: IDBTransaction.READ_WRITE
const dbVersion = 1
const storeDefault = 'mp3'
let dbLink
function initDB(store) {
return new Promise((resolve, reject) => {
if (dbLink) resolve(dbLink)
// Create/open database
const request = indexedDB.open('audio', dbVersion)
request.onsuccess = event => {
const db = request.result
db.onerror = event => {
reject(event)
}
if (db.version === dbVersion) resolve(db)
}
request.onerror = event => {
reject(event)
}
// For future use. Currently only in latest Firefox versions
request.onupgradeneeded = event => {
dbLink = event.target.result
const { transaction } = event.target
if (!dbLink.objectStoreNames.contains(store)) {
dbLink.createObjectStore(store)
}
transaction.oncomplete = event => {
// Now store is available to be populated
resolve(dbLink)
}
}
})
}
export const writeIDB = async (name, blob, store = storeDefault) => {
const db = await initDB(store)
const transaction = db.transaction([store], readWriteMode)
const objStore = transaction.objectStore(store)
return new Promise((resolve, reject) => {
const request = objStore.put(blob, name)
request.onsuccess = event => resolve(event)
request.onerror = event => reject(event)
transaction.commit && transaction.commit()
})
}
export const readIDB = async (name, store = storeDefault) => {
const db = await initDB(store)
const transaction = db.transaction([store], readWriteMode)
const objStore = transaction.objectStore(store)
return new Promise((resolve, reject) => {
const request = objStore.get(name)
request.onsuccess = event => resolve(event.target.result)
request.onerror = event => reject(event)
transaction.commit && transaction.commit()
})
}
export const clearIDB = async (store = storeDefault) => {
const db = await initDB(store)
const transaction = db.transaction([store], readWriteMode)
const objStore = transaction.objectStore(store)
return new Promise((resolve, reject) => {
const request = objStore.clear()
request.onsuccess = event => resolve(event)
request.onerror = event => reject(event)
transaction.commit && transaction.commit()
})
}
WebView开启WebRTC
见WebView WebRTC not working
webView.setWebChromeClient(new WebChromeClient(){
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
@Override
public void onPermissionRequest(final PermissionRequest request) {
request.grant(request.getResources());
}
});
最后
以上是个人录音项目简单的实践总结, 希望对有录音需求的同学有所帮助, 后面会分享DEMO中波浪效果的实践
参考
https://juejin.im/post/5b8bf7e3e51d4538c210c6b0
https://blog.addpipe.com/recording-audio-in-the-browser-using-pure-html5-and-minimal-javascript/
https://stackoverflow.com/questions/38917751/webview-webrtc-not-working
https://juejin.im/post/5b7939a46fb9a01a15728272
https://www.cnblogs.com/xingshansi/p/6815217.html
https://blog.csdn.net/qq_39516859/article/details/81084469
https://www.cnblogs.com/CoderTian/p/6657844.html
https://blog.csdn.net/qcyfred/article/details/53006847
发表评论 (审核通过后显示评论):