|
@@ -0,0 +1,725 @@
|
|
|
+package com.cooleshow.metronome.Utils;
|
|
|
+
|
|
|
+import android.media.AudioFormat;
|
|
|
+import android.media.AudioManager;
|
|
|
+import android.media.AudioRecord;
|
|
|
+import android.media.AudioTrack;
|
|
|
+import android.media.PlaybackParams;
|
|
|
+import android.os.Build;
|
|
|
+import android.util.Log;
|
|
|
+
|
|
|
+import com.cooleshow.base.utils.ConvertUtils;
|
|
|
+import com.cooleshow.base.utils.LOG;
|
|
|
+import com.cooleshow.base.utils.Utils;
|
|
|
+import com.cooleshow.metronome.constants.MetronomeType;
|
|
|
+import com.cooleshow.metronome.constants.QuarterNoteRhythmType;
|
|
|
+
|
|
|
+import java.io.ByteArrayOutputStream;
|
|
|
+import java.io.File;
|
|
|
+import java.io.FileInputStream;
|
|
|
+import java.io.InputStream;
|
|
|
+
|
|
|
+import io.reactivex.rxjava3.android.schedulers.AndroidSchedulers;
|
|
|
+import io.reactivex.rxjava3.annotations.NonNull;
|
|
|
+import io.reactivex.rxjava3.core.Observable;
|
|
|
+import io.reactivex.rxjava3.core.ObservableEmitter;
|
|
|
+import io.reactivex.rxjava3.core.ObservableOnSubscribe;
|
|
|
+import io.reactivex.rxjava3.core.Observer;
|
|
|
+import io.reactivex.rxjava3.disposables.Disposable;
|
|
|
+import io.reactivex.rxjava3.schedulers.Schedulers;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Author by pq, Date on 2022/10/26.
|
|
|
+ */
|
|
|
+public class AudioTrackManager2 {
|
|
|
+ private static final int DEFAULT_STREAM_TYPE = AudioManager.STREAM_MUSIC;
|
|
|
+ private static final int DEFAULT_PLAY_MODE = AudioTrack.MODE_STREAM;
|
|
|
+ private static final int DEFAULT_CHANNEL = AudioFormat.CHANNEL_OUT_MONO;
|
|
|
+
|
|
|
+
|
|
|
+ AudioTrack mAudioTrack;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 总长度
|
|
|
+ **/
|
|
|
+ int length;
|
|
|
+ /**
|
|
|
+ * 是否循环播放
|
|
|
+ */
|
|
|
+ private boolean ISPLAYSOUND = false;
|
|
|
+ private volatile static AudioTrackManager2 mInstance;
|
|
|
+
|
|
|
+ private float[] mWeghts;
|
|
|
+
|
|
|
+ private long totalDuration = -1;
|
|
|
+ private int minBufferSize;
|
|
|
+ private byte[][] mAllAudioData;
|
|
|
+
|
|
|
+ public static AudioTrackManager2 getInstance() {
|
|
|
+ if (mInstance == null) {
|
|
|
+ synchronized (AudioTrackManager2.class) {
|
|
|
+ if (mInstance == null) {
|
|
|
+ mInstance = new AudioTrackManager2();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return mInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void init() {
|
|
|
+ minBufferSize = AudioTrack.getMinBufferSize(RATE, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
|
|
|
+ LOG.i("bufferSize:" + minBufferSize);
|
|
|
+ mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, RATE,
|
|
|
+ AudioFormat.CHANNEL_OUT_STEREO, // CHANNEL_CONFIGURATION_MONO,
|
|
|
+ AudioFormat.ENCODING_PCM_16BIT, minBufferSize, AudioTrack.MODE_STREAM);
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final int RATE = 44100;
|
|
|
+ public static final float MAX_VOLUME = 1f;
|
|
|
+
|
|
|
+
|
|
|
+ public void setWeight(int pos, float value) {
|
|
|
+ if (mWeghts == null) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (value < 0f || value > 1.0f) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (pos < mWeghts.length) {
|
|
|
+ LOG.i("setWeight:" + pos + "--value:" + value);
|
|
|
+ mWeghts[pos] = value * MAX_VOLUME;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public void setOnlyPlay(int pos) {
|
|
|
+ if (mWeghts == null) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (pos < mWeghts.length) {
|
|
|
+ for (int i = 0; i < mWeghts.length; i++) {
|
|
|
+ mWeghts[i] = i == pos ? MAX_VOLUME : 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public void setPlaySpeed(float value) {
|
|
|
+ // 设置播放速度
|
|
|
+ if (value < 0.5f) {
|
|
|
+ value = 0.5f;
|
|
|
+ }
|
|
|
+ if (value > 2.5f) {
|
|
|
+ value = 2.5f;
|
|
|
+ }
|
|
|
+ if (mAudioTrack != null) {
|
|
|
+ PlaybackParams playbackParams = null;
|
|
|
+// int i = mAudioTrack.setPlaybackRate((int) (value * RATE));
|
|
|
+// LOG.i("setPlaybackRate:"+i);
|
|
|
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
|
|
|
+ playbackParams = mAudioTrack.getPlaybackParams();
|
|
|
+ playbackParams.setSpeed(value);
|
|
|
+ LOG.i("setPlaybackRate:" + value);
|
|
|
+ mAudioTrack.setPlaybackParams(playbackParams);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public void pause() {
|
|
|
+ if (mAudioTrack != null) {
|
|
|
+ mAudioTrack.pause();
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public void initAudio(int[] musicRes) {
|
|
|
+ if (musicRes == null || musicRes.length == 0) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ mAllAudioData = parseFile2(musicRes);
|
|
|
+ }
|
|
|
+
|
|
|
+ private QuarterNoteRhythmType currentRhythmType;
|
|
|
+ private MetronomeType currentBeatType;
|
|
|
+ private int currentSpeed;
|
|
|
+ private float currentNoteRate;
|
|
|
+
|
|
|
+ public void setCurrentNoteRate(int playSpeed, float currentNoteRate) {
|
|
|
+ this.currentSpeed = playSpeed;
|
|
|
+ this.currentNoteRate = currentNoteRate;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void playBeat(QuarterNoteRhythmType rhythmType, MetronomeType metronomeType) {
|
|
|
+ if (ISPLAYSOUND) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ this.currentRhythmType = rhythmType;
|
|
|
+ this.currentBeatType = metronomeType;
|
|
|
+ Observable.create(new ObservableOnSubscribe<String>() {
|
|
|
+ @Override
|
|
|
+ public void subscribe(@NonNull ObservableEmitter<String> emitter) throws Throwable {
|
|
|
+ ISPLAYSOUND = true;
|
|
|
+ mAudioTrack.play();
|
|
|
+ int playPos = 0;
|
|
|
+ while (ISPLAYSOUND) {
|
|
|
+ byte[] audiodata;
|
|
|
+ if (isTickOrTock(playPos)) {
|
|
|
+ audiodata = mAllAudioData[0];
|
|
|
+ } else {
|
|
|
+ audiodata = mAllAudioData[1];
|
|
|
+ }
|
|
|
+ audiodata = countBySpeed(playPos, audiodata);
|
|
|
+ mAudioTrack.write(audiodata, 0, audiodata.length);
|
|
|
+ playPos++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }).subscribeOn(Schedulers.newThread())
|
|
|
+ .observeOn(AndroidSchedulers.mainThread())
|
|
|
+ .subscribe(new Observer<String>() {
|
|
|
+ @Override
|
|
|
+ public void onSubscribe(@NonNull Disposable d) {
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onNext(@NonNull String s) {
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onError(@NonNull Throwable e) {
|
|
|
+ e.printStackTrace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onComplete() {
|
|
|
+
|
|
|
+ }
|
|
|
+ });
|
|
|
+ }
|
|
|
+
|
|
|
+ private byte[] countBySpeed(int playPos, byte[] audiodata) {
|
|
|
+ int duration = (int) (1 / (currentSpeed / 60d) * 1000 * currentNoteRate * getRhythmParams(playPos));
|
|
|
+ LOG.i("当前拍时值:" + duration);
|
|
|
+ //totalDuration = byteLength / ((long) rate * channelConfig * audioFormat) * 1000;
|
|
|
+ //byteLength / ((long) rate * channelConfig * audioFormat) * 1000;
|
|
|
+ float d = duration / 1000f;
|
|
|
+ int exceptAudioLength = (int) (d * (RATE * 2 * AudioFormat.ENCODING_PCM_16BIT));
|
|
|
+ byte[] lastLengthFromExcept = getLastLengthFromExcept(audiodata, exceptAudioLength);
|
|
|
+ LOG.i("lastLengthFromExcept:" + lastLengthFromExcept.length);
|
|
|
+ return lastLengthFromExcept;
|
|
|
+ }
|
|
|
+
|
|
|
+ private float getRhythmParams(int playPosition) {
|
|
|
+ float v = QuarterNoteRhythmType.countSingleNoteDuration(currentRhythmType, playPosition);
|
|
|
+ LOG.i("getRhythmParams:" + v);
|
|
|
+ return v;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ private boolean isTickOrTock(int playPosition) {
|
|
|
+ if (currentRhythmType != QuarterNoteRhythmType.METRONOME_1_TYPE) {
|
|
|
+ return playPosition % currentRhythmType.getNoteCount() == 0;
|
|
|
+ }
|
|
|
+ if (currentBeatType.getValue() == MetronomeType.METRONOME_04_TYPE.getValue()) {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ int beatValue = currentBeatType.getBeatValue();
|
|
|
+ return playPosition % beatValue == 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void play2(String[] filePaths) {
|
|
|
+ if (ISPLAYSOUND) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ Observable.create(new ObservableOnSubscribe<String>() {
|
|
|
+ @Override
|
|
|
+ public void subscribe(@NonNull ObservableEmitter<String> emitter) throws Throwable {
|
|
|
+ mWeghts = new float[filePaths.length];
|
|
|
+ mAllAudioData = parseFile(filePaths);
|
|
|
+ WeightAudioMixer weightAudioMixer = new WeightAudioMixer(mWeghts);
|
|
|
+ ISPLAYSOUND = true;
|
|
|
+ mAudioTrack.play();
|
|
|
+ byte[][] allAudioData2 = new byte[filePaths.length][];
|
|
|
+ int bytesRead = 0;
|
|
|
+ while (ISPLAYSOUND) {
|
|
|
+ int result = 0;
|
|
|
+ for (int i = 0; i < mAllAudioData.length; i++) {
|
|
|
+ byte[] buffer = new byte[minBufferSize];
|
|
|
+ result = splitByteArray(mAllAudioData[i], bytesRead, buffer);
|
|
|
+ allAudioData2[i] = buffer;
|
|
|
+ }
|
|
|
+ bytesRead = result != -1 ? ++result : -1;
|
|
|
+ LOG.i("bytesRead:" + bytesRead);
|
|
|
+ byte[] audiodata = weightAudioMixer.mixRawAudioBytes(allAudioData2);
|
|
|
+ mAudioTrack.write(audiodata, 0, minBufferSize);
|
|
|
+ if (bytesRead == -1) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }).subscribeOn(Schedulers.newThread())
|
|
|
+ .observeOn(AndroidSchedulers.mainThread())
|
|
|
+ .subscribe(new Observer<String>() {
|
|
|
+ @Override
|
|
|
+ public void onSubscribe(@NonNull Disposable d) {
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onNext(@NonNull String s) {
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onError(@NonNull Throwable e) {
|
|
|
+ e.printStackTrace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onComplete() {
|
|
|
+
|
|
|
+ }
|
|
|
+ });
|
|
|
+ }
|
|
|
+
|
|
|
+ @androidx.annotation.NonNull
|
|
|
+ private byte[][] parseFile(String[] filePaths) {
|
|
|
+ byte[][] allAudioData = new byte[filePaths.length][];
|
|
|
+ LOG.i("filePaths:" + filePaths.length);
|
|
|
+ for (int i = 0; i < filePaths.length; i++) {
|
|
|
+ mWeghts[i] = MAX_VOLUME;
|
|
|
+ String filePath = filePaths[i];
|
|
|
+ byte[] bytes = readAudioDataFromFile(filePath);
|
|
|
+ LOG.i("readAudioDataFromFile:" + bytes.length);
|
|
|
+ allAudioData[i] = bytes;
|
|
|
+ }
|
|
|
+ makeLengthConsistent(allAudioData);
|
|
|
+ int byteLength = allAudioData[0].length;
|
|
|
+ countPlayTotalTime(RATE, 2, AudioFormat.ENCODING_PCM_16BIT, byteLength);
|
|
|
+ return allAudioData;
|
|
|
+ }
|
|
|
+
|
|
|
+ private byte[][] parseFile2(int[] resIds) {
|
|
|
+ byte[][] allAudioData = new byte[resIds.length][];
|
|
|
+ LOG.i("allAudioData:" + allAudioData.length);
|
|
|
+ for (int i = 0; i < resIds.length; i++) {
|
|
|
+// mWeghts[i] = MAX_VOLUME;
|
|
|
+ byte[] bytes = readAudioDataFromRawFile(resIds[i]);
|
|
|
+ LOG.i("readAudioDataFromFile:" + bytes.length);
|
|
|
+ allAudioData[i] = bytes;
|
|
|
+ }
|
|
|
+// makeLengthConsistent(allAudioData);
|
|
|
+ int byteLength = allAudioData[0].length;
|
|
|
+ countPlayTotalTime(RATE, 2, AudioFormat.ENCODING_PCM_16BIT, byteLength);
|
|
|
+ return allAudioData;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void countPlayTotalTime(int rate, int channelConfig, int audioFormat, int byteLength) {
|
|
|
+ //一帧音频的大小(字节) = 通道数 x 采样个数 x 采样位数
|
|
|
+ LOG.i("countPlayTotalTime: rate:" + rate + "-channelConfig:" + channelConfig + "-audioFormat:" + audioFormat + "-byteLength:" + byteLength);
|
|
|
+ totalDuration = byteLength / ((long) rate * channelConfig * audioFormat) * 1000;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int splitByteArray(byte[] source, int startIndex, byte[] destination) {
|
|
|
+ if (startIndex * destination.length >= source.length) {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ int remainByteLength = source.length - startIndex * destination.length;
|
|
|
+ boolean isLast = remainByteLength <= destination.length;
|
|
|
+ if (isLast) {
|
|
|
+ System.arraycopy(source, startIndex * destination.length, destination, 0, remainByteLength);
|
|
|
+ } else {
|
|
|
+ System.arraycopy(source, startIndex * destination.length, destination, 0, destination.length);
|
|
|
+ }
|
|
|
+ if (isLast) {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ return startIndex;
|
|
|
+ }
|
|
|
+
|
|
|
+ public long getTotalDuration() {
|
|
|
+ return totalDuration;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void seekPercent(float percent) {
|
|
|
+ if (getTotalDuration() == -1) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (percent < 0 || percent > 1.0f) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ long totalDuration = getTotalDuration();
|
|
|
+ LOG.i("seek percent:" + percent);
|
|
|
+ LOG.i("seek percent:" + totalDuration);
|
|
|
+ int posTime = (int) (totalDuration * percent);
|
|
|
+ LOG.i("seek posTime:" + posTime);
|
|
|
+ int seekToFrame = (int) (RATE * (posTime * 1.0f / 1000)); // 转换为对应的采样帧
|
|
|
+ LOG.i("seek seekToFrame:" + seekToFrame);
|
|
|
+ seekTo(seekToFrame);
|
|
|
+ }
|
|
|
+
|
|
|
+ private void seekTo(int seekToFrame) {
|
|
|
+ if (mAudioTrack != null) {
|
|
|
+ if (!isPlaying()) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ LOG.i("seekTo:" + seekToFrame);
|
|
|
+
|
|
|
+ int currentFrame = mAudioTrack.getPlaybackHeadPosition(); // 获取当前播放头位置
|
|
|
+ int targetFrame = currentFrame + seekToFrame; // 计算目标播放头位置
|
|
|
+ mAudioTrack.setPlaybackHeadPosition(targetFrame);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private boolean isPlaying() {
|
|
|
+ if (mAudioTrack != null) {
|
|
|
+ return mAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING;
|
|
|
+ }
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ public long getPlayProgress() {
|
|
|
+ if (mAudioTrack != null) {
|
|
|
+ int currentFrame = mAudioTrack.getPlaybackHeadPosition();
|
|
|
+ LOG.i("currentFrame:" + currentFrame);
|
|
|
+
|
|
|
+ int rate = RATE;
|
|
|
+ if (rate > 0) {
|
|
|
+ float playTime = currentFrame * 1.0f / rate;
|
|
|
+ long currentPlayTimeMs = (long) (1000 * playTime);
|
|
|
+ LOG.i("currentPlayTimeMs=" + currentPlayTimeMs);
|
|
|
+ return currentPlayTimeMs;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Play beep.
|
|
|
+ *
|
|
|
+ * @param
|
|
|
+ * @param
|
|
|
+ */
|
|
|
+ public void play(String[] filePaths) {
|
|
|
+ Observable.create(new ObservableOnSubscribe<String>() {
|
|
|
+ @Override
|
|
|
+ public void subscribe(@NonNull ObservableEmitter<String> emitter) throws Throwable {
|
|
|
+ int bufferSize = AudioTrack.getMinBufferSize(RATE, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
|
|
|
+ float[] weghts = new float[filePaths.length];
|
|
|
+ byte[][] allAudioData = new byte[filePaths.length][];
|
|
|
+ for (int i = 0; i < filePaths.length; i++) {
|
|
|
+ weghts[i] = 1f;
|
|
|
+ String filePath = filePaths[i];
|
|
|
+ byte[] bytes = readAudioDataFromFile(filePath);
|
|
|
+ allAudioData[i] = bytes;
|
|
|
+ }
|
|
|
+ WeightAudioMixer weightAudioMixer = new WeightAudioMixer(weghts);
|
|
|
+ makeLengthConsistent(allAudioData);
|
|
|
+ byte[] audiodata = weightAudioMixer.mixRawAudioBytes(allAudioData);
|
|
|
+ mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, RATE,
|
|
|
+ AudioFormat.CHANNEL_OUT_STEREO, // CHANNEL_CONFIGURATION_MONO,
|
|
|
+ AudioFormat.ENCODING_PCM_16BIT, audiodata.length, AudioTrack.MODE_STATIC);
|
|
|
+ mAudioTrack.write(audiodata, 0, audiodata.length);
|
|
|
+ setPlaySpeed(2.0f);
|
|
|
+ mAudioTrack.play();
|
|
|
+ }
|
|
|
+ }).subscribeOn(Schedulers.newThread())
|
|
|
+ .observeOn(AndroidSchedulers.mainThread())
|
|
|
+ .subscribe(new Observer<String>() {
|
|
|
+ @Override
|
|
|
+ public void onSubscribe(@NonNull Disposable d) {
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onNext(@NonNull String s) {
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onError(@NonNull Throwable e) {
|
|
|
+ e.printStackTrace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onComplete() {
|
|
|
+
|
|
|
+ }
|
|
|
+ });
|
|
|
+ ISPLAYSOUND = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public byte[] getLastLengthFromExcept(byte[] originalAudio, int exceptLength) {
|
|
|
+ LOG.i("originalAudio:" + originalAudio.length);
|
|
|
+ LOG.i("exceptLength:" + exceptLength);
|
|
|
+ if (originalAudio.length == exceptLength) {
|
|
|
+ return originalAudio;
|
|
|
+ }
|
|
|
+ byte[] paddedBlock = new byte[exceptLength];
|
|
|
+ if (originalAudio.length > exceptLength) {
|
|
|
+ //裁剪
|
|
|
+ System.arraycopy(originalAudio, 0, paddedBlock, 0, paddedBlock.length);
|
|
|
+ } else {
|
|
|
+ //补齐
|
|
|
+ System.arraycopy(originalAudio, 0, paddedBlock, 0, originalAudio.length);
|
|
|
+ }
|
|
|
+ return paddedBlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void makeLengthConsistent(byte[][] dataBlocks) {
|
|
|
+ // 找出最长的数据块长度
|
|
|
+ int maxLength = 0;
|
|
|
+ for (byte[] block : dataBlocks) {
|
|
|
+ if (block.length > maxLength) {
|
|
|
+ maxLength = block.length;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // 填充数据块,使它们的长度一致
|
|
|
+ for (int i = 0; i < dataBlocks.length; i++) {
|
|
|
+ byte[] block = dataBlocks[i];
|
|
|
+ if (block.length < maxLength) {
|
|
|
+ byte[] paddedBlock = new byte[maxLength];
|
|
|
+ System.arraycopy(block, 0, paddedBlock, 0, block.length);
|
|
|
+ dataBlocks[i] = paddedBlock;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 叠加合成器
|
|
|
+ *
|
|
|
+ * @author Darcy
|
|
|
+ */
|
|
|
+ private static class AddAudioMixer extends MultiAudioMixer {
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public byte[] mixRawAudioBytes(byte[][] bMulRoadAudioes) {
|
|
|
+
|
|
|
+ if (bMulRoadAudioes == null || bMulRoadAudioes.length == 0)
|
|
|
+ return null;
|
|
|
+
|
|
|
+ byte[] realMixAudio = bMulRoadAudioes[0];
|
|
|
+
|
|
|
+ if (bMulRoadAudioes.length == 1)
|
|
|
+ return realMixAudio;
|
|
|
+
|
|
|
+// for (int rw = 0; rw < bMulRoadAudioes.length; ++rw) {
|
|
|
+// if (bMulRoadAudioes[rw].length != realMixAudio.length) {
|
|
|
+// Log.e("app", "column of the road of audio + " + rw + " is diffrent.");
|
|
|
+// return null;
|
|
|
+// }
|
|
|
+// }
|
|
|
+
|
|
|
+ //row 代表参与合成的音频数量
|
|
|
+ //column 代表一段音频的采样点数,这里所有参与合成的音频的采样点数都是相同的
|
|
|
+ int row = bMulRoadAudioes.length;
|
|
|
+ int coloum = realMixAudio.length / 2;
|
|
|
+ LOG.i("pq", "row:" + row);
|
|
|
+ LOG.i("pq", "coloum:" + coloum);
|
|
|
+ short[][] sMulRoadAudioes = new short[row][coloum];
|
|
|
+
|
|
|
+ //PCM音频16位的存储是大端存储方式,即低位在前,高位在后,例如(X1Y1, X2Y2, X3Y3)数据,它代表的采样点数值就是((Y1 * 256 + X1), (Y2 * 256 + X2), (Y3 * 256 + X3))
|
|
|
+ for (int r = 0; r < row; ++r) {
|
|
|
+ for (int c = 0; c < coloum; ++c) {
|
|
|
+ sMulRoadAudioes[r][c] = (short) ((bMulRoadAudioes[r][c * 2] & 0xff) | (bMulRoadAudioes[r][c * 2 + 1] & 0xff) << 8);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ short[] sMixAudio = new short[coloum];
|
|
|
+ int mixVal;
|
|
|
+ int sr = 0;
|
|
|
+ for (int sc = 0; sc < coloum; ++sc) {
|
|
|
+ mixVal = 0;
|
|
|
+ sr = 0;
|
|
|
+ //这里采取累加法
|
|
|
+ for (; sr < row; ++sr) {
|
|
|
+ mixVal += sMulRoadAudioes[sr][sc];
|
|
|
+ }
|
|
|
+ //最终值不能大于short最大值,因此可能出现溢出
|
|
|
+ sMixAudio[sc] = (short) (mixVal);
|
|
|
+ }
|
|
|
+
|
|
|
+ //short值转为大端存储的双字节序列
|
|
|
+ for (sr = 0; sr < coloum; ++sr) {
|
|
|
+ realMixAudio[sr * 2] = (byte) (sMixAudio[sr] & 0x00FF);
|
|
|
+ realMixAudio[sr * 2 + 1] = (byte) ((sMixAudio[sr] & 0xFF00) >> 8);
|
|
|
+ }
|
|
|
+
|
|
|
+ return realMixAudio;
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 求平均值合成器
|
|
|
+ *
|
|
|
+ * @author Darcy
|
|
|
+ */
|
|
|
+ private static class AverageAudioMixer extends MultiAudioMixer {
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public byte[] mixRawAudioBytes(byte[][] bMulRoadAudioes) {
|
|
|
+
|
|
|
+ if (bMulRoadAudioes == null || bMulRoadAudioes.length == 0)
|
|
|
+ return null;
|
|
|
+
|
|
|
+ byte[] realMixAudio = bMulRoadAudioes[0];
|
|
|
+
|
|
|
+ if (bMulRoadAudioes.length == 1)
|
|
|
+ return realMixAudio;
|
|
|
+
|
|
|
+ for (int rw = 0; rw < bMulRoadAudioes.length; ++rw) {
|
|
|
+ if (bMulRoadAudioes[rw].length != realMixAudio.length) {
|
|
|
+ Log.e("app", "column of the road of audio + " + rw + " is diffrent.");
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ int row = bMulRoadAudioes.length;
|
|
|
+ int coloum = realMixAudio.length / 2;
|
|
|
+ short[][] sMulRoadAudioes = new short[row][coloum];
|
|
|
+
|
|
|
+ for (int r = 0; r < row; ++r) {
|
|
|
+ for (int c = 0; c < coloum; ++c) {
|
|
|
+ sMulRoadAudioes[r][c] = (short) ((bMulRoadAudioes[r][c * 2] & 0xff) | (bMulRoadAudioes[r][c * 2 + 1] & 0xff) << 8);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ short[] sMixAudio = new short[coloum];
|
|
|
+ int mixVal;
|
|
|
+ int sr = 0;
|
|
|
+ for (int sc = 0; sc < coloum; ++sc) {
|
|
|
+ mixVal = 0;
|
|
|
+ sr = 0;
|
|
|
+ for (; sr < row; ++sr) {
|
|
|
+ mixVal += sMulRoadAudioes[sr][sc];
|
|
|
+ }
|
|
|
+ sMixAudio[sc] = (short) (mixVal / row);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (sr = 0; sr < coloum; ++sr) {
|
|
|
+ realMixAudio[sr * 2] = (byte) (sMixAudio[sr] & 0x00FF);
|
|
|
+ realMixAudio[sr * 2 + 1] = (byte) ((sMixAudio[sr] & 0xFF00) >> 8);
|
|
|
+ }
|
|
|
+
|
|
|
+ return realMixAudio;
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 权重求值合成器
|
|
|
+ *
|
|
|
+ * @author Darcy
|
|
|
+ */
|
|
|
+ private static class WeightAudioMixer extends MultiAudioMixer {
|
|
|
+ private float[] weights;
|
|
|
+
|
|
|
+ public WeightAudioMixer(float[] weights) {
|
|
|
+ this.weights = weights;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public byte[] mixRawAudioBytes(byte[][] bMulRoadAudioes) {
|
|
|
+
|
|
|
+ if (bMulRoadAudioes == null || bMulRoadAudioes.length == 0) {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ LOG.i("bMulRoadAudioes length:" + bMulRoadAudioes.length);
|
|
|
+ if (weights == null || weights.length != bMulRoadAudioes.length) {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+
|
|
|
+ byte[] realMixAudio = bMulRoadAudioes[0];
|
|
|
+
|
|
|
+ if (bMulRoadAudioes.length == 1)
|
|
|
+ return realMixAudio;
|
|
|
+
|
|
|
+ for (int rw = 0; rw < bMulRoadAudioes.length; ++rw) {
|
|
|
+ LOG.i("mixRawAudioBytes:" + bMulRoadAudioes[rw].length);
|
|
|
+ if (bMulRoadAudioes[rw].length != realMixAudio.length) {
|
|
|
+ Log.e("app", "column of the road of audio + " + rw + " is diffrent.");
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ int row = bMulRoadAudioes.length;
|
|
|
+ int coloum = realMixAudio.length / 2;
|
|
|
+ short[][] sMulRoadAudioes = new short[row][coloum];
|
|
|
+
|
|
|
+ for (int r = 0; r < row; ++r) {
|
|
|
+ for (int c = 0; c < coloum; ++c) {
|
|
|
+ sMulRoadAudioes[r][c] = (short) ((bMulRoadAudioes[r][c * 2] & 0xff) | (bMulRoadAudioes[r][c * 2 + 1] & 0xff) << 8);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ short[] sMixAudio = new short[coloum];
|
|
|
+ int mixVal;
|
|
|
+ int sr = 0;
|
|
|
+ for (int sc = 0; sc < coloum; ++sc) {
|
|
|
+ mixVal = 0;
|
|
|
+ sr = 0;
|
|
|
+ for (; sr < row; ++sr) {
|
|
|
+ mixVal += sMulRoadAudioes[sr][sc] * weights[sr];
|
|
|
+ }
|
|
|
+// sMixAudio[sc] = (short) (mixVal);
|
|
|
+ sMixAudio[sc] = (short) (mixVal / row);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (sr = 0; sr < coloum; ++sr) {
|
|
|
+ realMixAudio[sr * 2] = (byte) (sMixAudio[sr] & 0x00FF);
|
|
|
+ realMixAudio[sr * 2 + 1] = (byte) ((sMixAudio[sr] & 0xFF00) >> 8);
|
|
|
+ }
|
|
|
+
|
|
|
+ return realMixAudio;
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ private byte[] readAudioDataFromRawFile(int resId) {
|
|
|
+ try {
|
|
|
+ InputStream inputStream = Utils.getApp().getResources().openRawResource(resId);
|
|
|
+ ByteArrayOutputStream byteArrayOutputStream = ConvertUtils.input2OutputStream(inputStream);
|
|
|
+ byte[] audiodata = byteArrayOutputStream.toByteArray();
|
|
|
+ byteArrayOutputStream.close();
|
|
|
+ return audiodata;
|
|
|
+ } catch (Exception e) {
|
|
|
+ e.printStackTrace();
|
|
|
+ }
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+
|
|
|
+ private byte[] readAudioDataFromFile(String filePath) {
|
|
|
+ try {
|
|
|
+ File file = new File(filePath);
|
|
|
+ FileInputStream fileInputStream = new FileInputStream(file);
|
|
|
+ ByteArrayOutputStream byteArrayOutputStream = ConvertUtils.input2OutputStream(fileInputStream);
|
|
|
+ byte[] audiodata = byteArrayOutputStream.toByteArray();
|
|
|
+ byteArrayOutputStream.close();
|
|
|
+ return audiodata;
|
|
|
+ } catch (Exception e) {
|
|
|
+ e.printStackTrace();
|
|
|
+ }
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void stop() {
|
|
|
+ ISPLAYSOUND = false;
|
|
|
+ if (mAudioTrack != null) {
|
|
|
+ if (mAudioTrack.getState() == AudioRecord.STATE_INITIALIZED) {
|
|
|
+ mAudioTrack.pause();
|
|
|
+ mAudioTrack.flush();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public void release() {
|
|
|
+ if (mAllAudioData != null) {
|
|
|
+ mAllAudioData = null;
|
|
|
+ }
|
|
|
+ if (mAudioTrack != null) {
|
|
|
+ mAudioTrack.release();
|
|
|
+ mAudioTrack = null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|