123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193 |
- interface CrunkerConstructorOptions {
- sampleRate: number
- concurrentNetworkRequests: number
- }
- type CrunkerInputTypes = string | File | Blob | undefined
- export default class Crunker {
- private readonly _sampleRate: number
- private readonly _concurrentNetworkRequests: number
- private readonly _context: AudioContext
- constructor({ sampleRate, concurrentNetworkRequests = 200 }: Partial<CrunkerConstructorOptions> = {}) {
- this._context = this._createContext(sampleRate)
- sampleRate ||= this._context.sampleRate
- this._sampleRate = sampleRate
- this._concurrentNetworkRequests = concurrentNetworkRequests
- }
- private _createContext(sampleRate = 22050): AudioContext {
- window.AudioContext = window.AudioContext || (window as any).webkitAudioContext || (window as any).mozAudioContext
- return new AudioContext({ sampleRate })
- }
- /**
- *转换url等类型为buffer
- */
- async fetchAudio(...filepaths: CrunkerInputTypes[]): Promise<(AudioBuffer | undefined)[]> {
- const buffers: (AudioBuffer | undefined)[] = []
- const groups = Math.ceil(filepaths.length / this._concurrentNetworkRequests)
- for (let i = 0; i < groups; i++) {
- const group = filepaths.slice(i * this._concurrentNetworkRequests, (i + 1) * this._concurrentNetworkRequests)
- buffers.push(...(await this._fetchAudio(...group)))
- }
- return buffers
- }
- private async _fetchAudio(...filepaths: CrunkerInputTypes[]): Promise<(AudioBuffer | undefined)[]> {
- return await Promise.all(
- filepaths.map(async filepath => {
- if (!filepath) {
- return Promise.resolve(undefined)
- }
- let buffer: ArrayBuffer
- if (filepath instanceof File || filepath instanceof Blob) {
- buffer = await filepath.arrayBuffer()
- } else {
- buffer = await fetch(filepath).then(response => {
- if (response.headers.has("Content-Type") && !response.headers.get("Content-Type")!.includes("audio/")) {
- console.warn(
- `Crunker: Attempted to fetch an audio file, but its MIME type is \`${
- response.headers.get("Content-Type")!.split(";")[0]
- }\`. We'll try and continue anyway. (file: "${filepath}")`
- )
- }
- return response.arrayBuffer()
- })
- }
- /* 这里有个坑 safa浏览器老一点的版本不支持decodeAudioData返回promise 所以用这种老式写法 */
- return await new Promise((res, rej) => {
- this._context.decodeAudioData(
- buffer,
- buffer => {
- res(buffer)
- },
- err => {
- rej(err)
- }
- )
- })
- })
- )
- }
- /**
- * 根据时间合并音频
- */
- mergeAudioBuffers(buffers: AudioBuffer[], times: number[]): AudioBuffer {
- if (buffers.length !== times.length) {
- throw new Error("buffer数量和times数量必须一致")
- }
- const output = this._context.createBuffer(this._maxNumberOfChannels(buffers), this._sampleRate * this._maxDuration(buffers), this._sampleRate)
- buffers.forEach((buffer, index) => {
- const offsetNum = Math.round(times[index] * this._sampleRate) //时间偏差
- for (let channelNumber = 0; channelNumber < output.numberOfChannels; channelNumber++) {
- const outputData = output.getChannelData(channelNumber)
- // buffers 有可能是单声道,当单声道的时候 取第一个声道的值
- const bufferData = buffer.getChannelData(buffer.numberOfChannels < 2 ? 0 : channelNumber)
- for (let i = bufferData.length - 1; i >= 0; i--) {
- // 当合并大于1或者小于-1的时候可能会爆音 所以这里取最大值和最小值
- const combinedValue = outputData[i + offsetNum] + bufferData[i]
- outputData[i + offsetNum] = Math.max(-1, Math.min(1, combinedValue))
- }
- }
- })
- return output
- }
- /**
- * 根据buffer导出audio标签
- */
- exportAudioElement(buffer: AudioBuffer, type = "audio/mp3"): HTMLAudioElement {
- const recorded = this._interleave(buffer)
- const dataview = this._writeHeaders(recorded, buffer.numberOfChannels, buffer.sampleRate)
- const audioBlob = new Blob([dataview], { type })
- return this._renderAudioElement(audioBlob)
- }
- /**
- * 计算音频前面的空白
- */
- calculateSilenceDuration(buffer: AudioBuffer) {
- const threshold = 0.01 // 静音阈值,低于此值的部分认为是静音
- const sampleRate = buffer.sampleRate
- const channelData = buffer.getChannelData(0) // 只处理单声道数据
- let silenceDuration = 0
- for (let i = 0; i < channelData.length; i++) {
- if (Math.abs(channelData[i]) > threshold) {
- break
- }
- silenceDuration++
- }
- // 将样本数转换为秒
- silenceDuration = silenceDuration / sampleRate
- return silenceDuration
- }
- private _maxNumberOfChannels(buffers: AudioBuffer[]): number {
- return Math.max(...buffers.map(buffer => buffer.numberOfChannels))
- }
- private _maxDuration(buffers: AudioBuffer[]): number {
- return Math.max(...buffers.map(buffer => buffer.duration))
- }
- private _interleave(input: AudioBuffer): Float32Array {
- if (input.numberOfChannels === 1) {
- return input.getChannelData(0)
- }
- const channels = []
- for (let i = 0; i < input.numberOfChannels; i++) {
- channels.push(input.getChannelData(i))
- }
- const length = channels.reduce((prev, channelData) => prev + channelData.length, 0)
- const result = new Float32Array(length)
- let index = 0
- let inputIndex = 0
- while (index < length) {
- channels.forEach(channelData => {
- result[index++] = channelData[inputIndex]
- })
- inputIndex++
- }
- return result
- }
- private _renderAudioElement(blob: Blob): HTMLAudioElement {
- const audio = document.createElement("audio")
- audio.src = this._renderURL(blob)
- audio.load()
- return audio
- }
- private _renderURL(blob: Blob): string {
- return (window.URL || window.webkitURL).createObjectURL(blob)
- }
- private _writeHeaders(buffer: Float32Array, numOfChannels: number, sampleRate: number): DataView {
- const bitDepth = 16
- const bytesPerSample = bitDepth / 8
- const sampleSize = numOfChannels * bytesPerSample
- const fileHeaderSize = 8
- const chunkHeaderSize = 36
- const chunkDataSize = buffer.length * bytesPerSample
- const chunkTotalSize = chunkHeaderSize + chunkDataSize
- const arrayBuffer = new ArrayBuffer(fileHeaderSize + chunkTotalSize)
- const view = new DataView(arrayBuffer)
- this._writeString(view, 0, "RIFF")
- view.setUint32(4, chunkTotalSize, true)
- this._writeString(view, 8, "WAVE")
- this._writeString(view, 12, "fmt ")
- view.setUint32(16, 16, true)
- view.setUint16(20, 1, true)
- view.setUint16(22, numOfChannels, true)
- view.setUint32(24, sampleRate, true)
- view.setUint32(28, sampleRate * sampleSize, true)
- view.setUint16(32, sampleSize, true)
- view.setUint16(34, bitDepth, true)
- this._writeString(view, 36, "data")
- view.setUint32(40, chunkDataSize, true)
- return this._floatTo16BitPCM(view, buffer, fileHeaderSize + chunkHeaderSize)
- }
- private _floatTo16BitPCM(dataview: DataView, buffer: Float32Array, offset: number): DataView {
- for (let i = 0; i < buffer.length; i++, offset += 2) {
- const tmp = Math.max(-1, Math.min(1, buffer[i]))
- dataview.setInt16(offset, tmp < 0 ? tmp * 0x8000 : tmp * 0x7fff, true)
- }
- return dataview
- }
- private _writeString(dataview: DataView, offset: number, header: string): void {
- for (let i = 0; i < header.length; i++) {
- dataview.setUint8(offset + i, header.charCodeAt(i))
- }
- }
- }
|