|
@@ -0,0 +1,204 @@
|
|
|
|
+interface CrunkerConstructorOptions {
|
|
|
|
+ sampleRate: number
|
|
|
|
+ concurrentNetworkRequests: number
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+type CrunkerInputTypes = string | File | Blob
|
|
|
|
+
|
|
|
|
+export default class Crunker {
|
|
|
|
+ private readonly _sampleRate: number
|
|
|
|
+ private readonly _concurrentNetworkRequests: number
|
|
|
|
+ private readonly _context: AudioContext
|
|
|
|
+
|
|
|
|
+ constructor({ sampleRate, concurrentNetworkRequests = 200 }: Partial<CrunkerConstructorOptions> = {}) {
|
|
|
|
+ this._context = this._createContext(sampleRate)
|
|
|
|
+ sampleRate ||= this._context.sampleRate
|
|
|
|
+ this._sampleRate = sampleRate
|
|
|
|
+ this._concurrentNetworkRequests = concurrentNetworkRequests
|
|
|
|
+ }
|
|
|
|
+ private _createContext(sampleRate = 44_100): AudioContext {
|
|
|
|
+ window.AudioContext = window.AudioContext || (window as any).webkitAudioContext || (window as any).mozAudioContext
|
|
|
|
+ return new AudioContext({ sampleRate })
|
|
|
|
+ }
|
|
|
|
+ /**
|
|
|
|
+ *转换url等类型为buffer
|
|
|
|
+ */
|
|
|
|
+ async fetchAudio(...filepaths: CrunkerInputTypes[]): Promise<AudioBuffer[]> {
|
|
|
|
+ const buffers: AudioBuffer[] = []
|
|
|
|
+ const groups = Math.ceil(filepaths.length / this._concurrentNetworkRequests)
|
|
|
|
+ for (let i = 0; i < groups; i++) {
|
|
|
|
+ const group = filepaths.slice(i * this._concurrentNetworkRequests, (i + 1) * this._concurrentNetworkRequests)
|
|
|
|
+ buffers.push(...(await this._fetchAudio(...group)))
|
|
|
|
+ }
|
|
|
|
+ return buffers
|
|
|
|
+ }
|
|
|
|
+ private async _fetchAudio(...filepaths: CrunkerInputTypes[]): Promise<AudioBuffer[]> {
|
|
|
|
+ return await Promise.all(
|
|
|
|
+ filepaths.map(async filepath => {
|
|
|
|
+ let buffer: ArrayBuffer
|
|
|
|
+ if (filepath instanceof File || filepath instanceof Blob) {
|
|
|
|
+ buffer = await filepath.arrayBuffer()
|
|
|
|
+ } else {
|
|
|
|
+ buffer = await fetch(filepath).then(response => {
|
|
|
|
+ if (response.headers.has("Content-Type") && !response.headers.get("Content-Type")!.includes("audio/")) {
|
|
|
|
+ console.warn(
|
|
|
|
+ `Crunker: Attempted to fetch an audio file, but its MIME type is \`${
|
|
|
|
+ response.headers.get("Content-Type")!.split(";")[0]
|
|
|
|
+ }\`. We'll try and continue anyway. (file: "${filepath}")`
|
|
|
|
+ )
|
|
|
|
+ }
|
|
|
|
+ return response.arrayBuffer()
|
|
|
|
+ })
|
|
|
|
+ }
|
|
|
|
+ /* 这里有个坑 safa浏览器老一点的版本不支持decodeAudioData返回promise 所以用这种老式写法 */
|
|
|
|
+ return await new Promise((res, rej) => {
|
|
|
|
+ this._context.decodeAudioData(
|
|
|
|
+ buffer,
|
|
|
|
+ buffer => {
|
|
|
|
+ res(buffer)
|
|
|
|
+ },
|
|
|
|
+ err => {
|
|
|
|
+ rej(err)
|
|
|
|
+ }
|
|
|
|
+ )
|
|
|
|
+ })
|
|
|
|
+ })
|
|
|
|
+ )
|
|
|
|
+ }
|
|
|
|
+ /**
|
|
|
|
+ * 根据时间合并音频
|
|
|
|
+ */
|
|
|
|
+ mergeAudioBuffers(buffers: AudioBuffer[], times: number[]): AudioBuffer {
|
|
|
|
+ if (buffers.length !== times.length) {
|
|
|
|
+ throw new Error("buffer数量和times数量必须一致")
|
|
|
|
+ }
|
|
|
|
+ const output = this._context.createBuffer(this._maxNumberOfChannels(buffers), this._sampleRate * this._maxDuration(buffers), this._sampleRate)
|
|
|
|
+ buffers.forEach((buffer, index) => {
|
|
|
|
+ for (let channelNumber = 0; channelNumber < buffer.numberOfChannels; channelNumber++) {
|
|
|
|
+ const outputData = output.getChannelData(channelNumber)
|
|
|
|
+ const bufferData = buffer.getChannelData(channelNumber)
|
|
|
|
+ const offsetNum = Math.round(times[index] * this._sampleRate) //时间偏差
|
|
|
|
+ for (let i = buffer.getChannelData(channelNumber).length - 1; i >= 0; i--) {
|
|
|
|
+ outputData[i + offsetNum] += bufferData[i]
|
|
|
|
+ // 当合并大于1或者小于-1的时候可能会爆音 所以这里取最大值和最小值
|
|
|
|
+ if (outputData[i + offsetNum] > 1) {
|
|
|
|
+ outputData[i + offsetNum] = 1
|
|
|
|
+ }
|
|
|
|
+ if (outputData[i + offsetNum] < -1) {
|
|
|
|
+ outputData[i + offsetNum] = -1
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ output.getChannelData(channelNumber).set(outputData)
|
|
|
|
+ }
|
|
|
|
+ })
|
|
|
|
+
|
|
|
|
+ return output
|
|
|
|
+ }
|
|
|
|
+ /**
|
|
|
|
+ * 根据buffer导出audio标签
|
|
|
|
+ */
|
|
|
|
+ exportAudioElement(buffer: AudioBuffer, type = "audio/mp3"): HTMLAudioElement {
|
|
|
|
+ const recorded = this._interleave(buffer)
|
|
|
|
+ const dataview = this._writeHeaders(recorded, buffer.numberOfChannels, buffer.sampleRate)
|
|
|
|
+ const audioBlob = new Blob([dataview], { type })
|
|
|
|
+ return this._renderAudioElement(audioBlob)
|
|
|
|
+ }
|
|
|
|
+ /**
|
|
|
|
+ * 计算音频前面的空白
|
|
|
|
+ */
|
|
|
|
+ calculateSilenceDuration(buffer: AudioBuffer) {
|
|
|
|
+ const threshold = 0.01 // 静音阈值,低于此值的部分认为是静音
|
|
|
|
+ const sampleRate = buffer.sampleRate
|
|
|
|
+ const channelData = buffer.getChannelData(0) // 只处理单声道数据
|
|
|
|
+ let silenceDuration = 0
|
|
|
|
+ for (let i = 0; i < channelData.length; i++) {
|
|
|
|
+ if (Math.abs(channelData[i]) > threshold) {
|
|
|
|
+ break
|
|
|
|
+ }
|
|
|
|
+ silenceDuration++
|
|
|
|
+ }
|
|
|
|
+ // 将样本数转换为秒
|
|
|
|
+ silenceDuration = silenceDuration / sampleRate
|
|
|
|
+ return silenceDuration
|
|
|
|
+ }
|
|
|
|
+ /**
|
|
|
|
+ * buffer 转为 blob
|
|
|
|
+ */
|
|
|
|
+ audioBuffToBlob(buffer: AudioBuffer, type = "audio/mp3") {
|
|
|
|
+ const recorded = this._interleave(buffer)
|
|
|
|
+ const dataview = this._writeHeaders(recorded, buffer.numberOfChannels, buffer.sampleRate)
|
|
|
|
+ return new Blob([dataview], { type })
|
|
|
|
+ }
|
|
|
|
+ private _maxNumberOfChannels(buffers: AudioBuffer[]): number {
|
|
|
|
+ return Math.max(...buffers.map(buffer => buffer.numberOfChannels))
|
|
|
|
+ }
|
|
|
|
+ private _maxDuration(buffers: AudioBuffer[]): number {
|
|
|
|
+ return Math.max(...buffers.map(buffer => buffer.duration))
|
|
|
|
+ }
|
|
|
|
+ private _interleave(input: AudioBuffer): Float32Array {
|
|
|
|
+ if (input.numberOfChannels === 1) {
|
|
|
|
+ return input.getChannelData(0)
|
|
|
|
+ }
|
|
|
|
+ const channels = []
|
|
|
|
+ for (let i = 0; i < input.numberOfChannels; i++) {
|
|
|
|
+ channels.push(input.getChannelData(i))
|
|
|
|
+ }
|
|
|
|
+ const length = channels.reduce((prev, channelData) => prev + channelData.length, 0)
|
|
|
|
+ const result = new Float32Array(length)
|
|
|
|
+ let index = 0
|
|
|
|
+ let inputIndex = 0
|
|
|
|
+ while (index < length) {
|
|
|
|
+ channels.forEach(channelData => {
|
|
|
|
+ result[index++] = channelData[inputIndex]
|
|
|
|
+ })
|
|
|
|
+ inputIndex++
|
|
|
|
+ }
|
|
|
|
+ return result
|
|
|
|
+ }
|
|
|
|
+ private _renderAudioElement(blob: Blob): HTMLAudioElement {
|
|
|
|
+ const audio = document.createElement("audio")
|
|
|
|
+ audio.src = this._renderURL(blob)
|
|
|
|
+ audio.load()
|
|
|
|
+ return audio
|
|
|
|
+ }
|
|
|
|
+ private _renderURL(blob: Blob): string {
|
|
|
|
+ return (window.URL || window.webkitURL).createObjectURL(blob)
|
|
|
|
+ }
|
|
|
|
+ private _writeHeaders(buffer: Float32Array, numOfChannels: number, sampleRate: number): DataView {
|
|
|
|
+ const bitDepth = 16
|
|
|
|
+ const bytesPerSample = bitDepth / 8
|
|
|
|
+ const sampleSize = numOfChannels * bytesPerSample
|
|
|
|
+ const fileHeaderSize = 8
|
|
|
|
+ const chunkHeaderSize = 36
|
|
|
|
+ const chunkDataSize = buffer.length * bytesPerSample
|
|
|
|
+ const chunkTotalSize = chunkHeaderSize + chunkDataSize
|
|
|
|
+ const arrayBuffer = new ArrayBuffer(fileHeaderSize + chunkTotalSize)
|
|
|
|
+ const view = new DataView(arrayBuffer)
|
|
|
|
+ this._writeString(view, 0, "RIFF")
|
|
|
|
+ view.setUint32(4, chunkTotalSize, true)
|
|
|
|
+ this._writeString(view, 8, "WAVE")
|
|
|
|
+ this._writeString(view, 12, "fmt ")
|
|
|
|
+ view.setUint32(16, 16, true)
|
|
|
|
+ view.setUint16(20, 1, true)
|
|
|
|
+ view.setUint16(22, numOfChannels, true)
|
|
|
|
+ view.setUint32(24, sampleRate, true)
|
|
|
|
+ view.setUint32(28, sampleRate * sampleSize, true)
|
|
|
|
+ view.setUint16(32, sampleSize, true)
|
|
|
|
+ view.setUint16(34, bitDepth, true)
|
|
|
|
+ this._writeString(view, 36, "data")
|
|
|
|
+ view.setUint32(40, chunkDataSize, true)
|
|
|
|
+ return this._floatTo16BitPCM(view, buffer, fileHeaderSize + chunkHeaderSize)
|
|
|
|
+ }
|
|
|
|
+ private _floatTo16BitPCM(dataview: DataView, buffer: Float32Array, offset: number): DataView {
|
|
|
|
+ for (let i = 0; i < buffer.length; i++, offset += 2) {
|
|
|
|
+ const tmp = Math.max(-1, Math.min(1, buffer[i]))
|
|
|
|
+ dataview.setInt16(offset, tmp < 0 ? tmp * 0x8000 : tmp * 0x7fff, true)
|
|
|
|
+ }
|
|
|
|
+ return dataview
|
|
|
|
+ }
|
|
|
|
+ private _writeString(dataview: DataView, offset: number, header: string): void {
|
|
|
|
+ for (let i = 0; i < header.length; i++) {
|
|
|
|
+ dataview.setUint8(offset + i, header.charCodeAt(i))
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|