|
@@ -16,12 +16,9 @@ import javax.sound.sampled.AudioFormat;
|
|
|
import org.slf4j.Logger;
|
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
|
|
-import be.tarsos.dsp.AudioEvent;
|
|
|
-import be.tarsos.dsp.pitch.PitchDetectionHandler;
|
|
|
-import be.tarsos.dsp.pitch.PitchDetectionResult;
|
|
|
-
|
|
|
import com.yonge.audio.analysis.Signals;
|
|
|
import com.yonge.audio.analysis.detector.YINPitchDetector;
|
|
|
+import com.yonge.audio.utils.ArrayUtil;
|
|
|
import com.yonge.nettty.dto.NoteAnalysis.NoteErrorType;
|
|
|
import com.yonge.nettty.entity.MusicXmlBasicInfo;
|
|
|
import com.yonge.nettty.entity.MusicXmlNote;
|
|
@@ -31,16 +28,18 @@ import com.yonge.netty.server.processor.WaveformWriter;
|
|
|
/**
|
|
|
* 用户通道上下文
|
|
|
*/
|
|
|
-public class UserChannelContext implements PitchDetectionHandler {
|
|
|
+public class UserChannelContext {
|
|
|
|
|
|
private final static Logger LOGGER = LoggerFactory.getLogger(UserChannelContext.class);
|
|
|
|
|
|
- private final double offsetMS = 300;
|
|
|
+ private final double offsetMS = 350;
|
|
|
|
|
|
private Long recordId;
|
|
|
|
|
|
private Integer subjectId;
|
|
|
|
|
|
+ private int beatByteLength;
|
|
|
+
|
|
|
// 曲目与musicxml对应关系
|
|
|
private ConcurrentHashMap<Integer, MusicXmlBasicInfo> songMusicXmlMap = new ConcurrentHashMap<Integer, MusicXmlBasicInfo>();
|
|
|
|
|
@@ -66,8 +65,23 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
|
|
|
private HardLevelEnum hardLevel = HardLevelEnum.ADVANCED;
|
|
|
|
|
|
- public void init(){
|
|
|
-
|
|
|
+ public void init(String heardLevel, int subjectId, int beatByteLength) {
|
|
|
+ this.subjectId = subjectId;
|
|
|
+ this.beatByteLength = WaveformWriter.SAMPLE_RATE * WaveformWriter.BITS_PER_SAMPLE / 8 * beatByteLength / 1000;
|
|
|
+ //hardLevel = HardLevelEnum.valueOf(heardLevel);
|
|
|
+ }
|
|
|
+
|
|
|
+ public byte[] skipHeader(byte[] datas) {
|
|
|
+ if (beatByteLength > 0) {
|
|
|
+ if (datas.length <= beatByteLength) {
|
|
|
+ beatByteLength -= datas.length;
|
|
|
+ return new byte[0];
|
|
|
+ }
|
|
|
+ byte[] data = ArrayUtil.extractByte(datas, beatByteLength, datas.length - 1);
|
|
|
+ beatByteLength = 0;
|
|
|
+ return data;
|
|
|
+ }
|
|
|
+ return datas;
|
|
|
}
|
|
|
|
|
|
public Long getRecordId() {
|
|
@@ -110,6 +124,10 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
return doneNoteAnalysisList;
|
|
|
}
|
|
|
|
|
|
+ public int getBeatByteLength() {
|
|
|
+ return beatByteLength;
|
|
|
+ }
|
|
|
+
|
|
|
public void resetUserInfo() {
|
|
|
waveFileProcessor = null;
|
|
|
processingNote = new NoteAnalysis(0,0,-1);
|
|
@@ -275,7 +293,7 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
if(Math.abs(chunkAnalysisList.get(chunkAnalysisList.size() - 1).getFrequency() - lastChunkAnalysis.getFrequency()) > hardLevel.getFrequencyOffset()){
|
|
|
lastChunkAnalysis.setFrequency(-1);
|
|
|
}
|
|
|
- if(chunkAnalysisList.get(chunkAnalysisList.size() - 1).getAmplitude() < lastChunkAnalysis.getAmplitude()){
|
|
|
+ if(chunkAnalysisList.get(chunkAnalysisList.size() - 1).getAmplitude() + 2 < lastChunkAnalysis.getAmplitude()){
|
|
|
lastChunkAnalysis.setPeak(true);
|
|
|
}
|
|
|
|
|
@@ -307,7 +325,7 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
}
|
|
|
} else {
|
|
|
if (subjectId == 23) {
|
|
|
- tempo = computeTempoWithAmplitude(chunkAnalysisList, lastChunkAnalysis);
|
|
|
+ tempo = computeTempoWithAmplitude2(chunkAnalysisList, lastChunkAnalysis);
|
|
|
} else {
|
|
|
tempo = computeTempoWithFrequency(chunkAnalysisList, lastChunkAnalysis);
|
|
|
}
|
|
@@ -361,123 +379,6 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
|
|
|
}
|
|
|
|
|
|
- public void handle2(float[] samples, AudioFormat audioFormat){
|
|
|
-
|
|
|
- //FrequencyDetector frequencyDetector = new FrequencyDetector(samples, audioFormat.getSampleRate(), false);
|
|
|
- YINPitchDetector frequencyDetector = new YINPitchDetector(samples.length/2 , audioFormat.getSampleRate());
|
|
|
- int frequency = (int)frequencyDetector.getFrequency(samples);
|
|
|
-
|
|
|
- int splDb = (int)Signals.soundPressureLevel(samples);
|
|
|
-
|
|
|
- int power = (int)Signals.power(samples);
|
|
|
-
|
|
|
- int energy = (int)Signals.energy(samples);
|
|
|
-
|
|
|
- LOGGER.info("Frequency:{} SplDb:{} Power:{}", frequency, splDb, power);
|
|
|
-
|
|
|
- double durationTime = 1000 * (samples.length * 2) / audioFormat.getSampleRate() / (audioFormat.getSampleSizeInBits() / 8);
|
|
|
-
|
|
|
- double startTime = 0;
|
|
|
-
|
|
|
- if(chunkAnalysisList.size() > 0){
|
|
|
- ChunkAnalysis lastestChunk = chunkAnalysisList.get(chunkAnalysisList.size() - 1);
|
|
|
- startTime = lastestChunk.getEndTime();
|
|
|
-
|
|
|
- if(Math.abs((lastestChunk.getFrequency() - frequency)) > 10 || Math.abs(lastestChunk.getPower() - power) > 0.1){
|
|
|
-
|
|
|
- double avgFrequency = chunkAnalysisList.stream().collect(Collectors.averagingDouble(ChunkAnalysis::getFrequency));
|
|
|
- NoteAnalysis noteAnalysis = new NoteAnalysis(chunkAnalysisList.get(0).getStartTime(), startTime, (int)avgFrequency);
|
|
|
- doneNoteAnalysisList.add(noteAnalysis);//添加演奏的一个音符
|
|
|
-
|
|
|
- //重置
|
|
|
- chunkAnalysisList.clear();
|
|
|
-
|
|
|
- //判断是否需要评分
|
|
|
- MusicXmlSection musicXmlSection = getCurrentMusicSection(null, 0);
|
|
|
- if(musicXmlSection != null){
|
|
|
- if(musicXmlSection.getDuration() < startTime){
|
|
|
-
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- chunkAnalysisList.add(new ChunkAnalysis(startTime, startTime + durationTime, frequency, splDb, power, energy));
|
|
|
- }
|
|
|
-
|
|
|
- @Override
|
|
|
- public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent audioEvent) {
|
|
|
-
|
|
|
- double durationTime = 1000 * (audioEvent.getFloatBuffer().length) / audioEvent.getSampleRate() / 2;
|
|
|
-
|
|
|
- float pitch = pitchDetectionResult.getPitch();
|
|
|
-
|
|
|
- LOGGER.info("pitch:{} timeStamp:{} endTimeStamp:{} durationTime:{}", pitch, audioEvent.getTimeStamp(), audioEvent.getEndTimeStamp(), durationTime);
|
|
|
-
|
|
|
- // 获取当前音符信息
|
|
|
- MusicXmlNote musicXmlNote = getCurrentMusicNote(null,null);
|
|
|
-
|
|
|
- if (musicXmlNote == null) {
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- //取出当前处理中的音符信息
|
|
|
- NoteAnalysis noteAnalysis = getProcessingNote();
|
|
|
- if(noteAnalysis == null){
|
|
|
- noteAnalysis = new NoteAnalysis(musicXmlNote.getMusicalNotesIndex(),musicXmlNote.getMeasureIndex(),(int)musicXmlNote.getFrequency());
|
|
|
- }
|
|
|
-
|
|
|
- double noteDurationTime = noteAnalysis.getDurationTime() + durationTime;
|
|
|
- noteAnalysis.setDurationTime(noteDurationTime);
|
|
|
-
|
|
|
- if(pitch != -1){
|
|
|
- //noteAnalysis.getChunkFrequencyList().add((double) pitch);
|
|
|
- }
|
|
|
-
|
|
|
- setProcessingNote(noteAnalysis);
|
|
|
-
|
|
|
- if(noteAnalysis.getMusicalNotesIndex() <= getTotalMusicNoteIndex(null) && noteDurationTime >= musicXmlNote.getDuration()){
|
|
|
-
|
|
|
- //noteAnalysis.setPlayFrequency(noteAnalysis.getChunkFrequencyList().stream().mapToDouble(t -> t).sum()/noteAnalysis.getChunkFrequencyList().size());
|
|
|
-
|
|
|
- LOGGER.info("当前音符下标[{}] 预计频率:{} 实际频率:{} 持续时间:{}", noteAnalysis.getMusicalNotesIndex() , musicXmlNote.getFrequency(), noteAnalysis.getPlayFrequency(), noteAnalysis.getDurationTime());
|
|
|
-
|
|
|
- // 准备处理下一个音符
|
|
|
- int nextNoteIndex = musicXmlNote.getMusicalNotesIndex() + 1;
|
|
|
- NoteAnalysis nextNoteAnalysis = new NoteAnalysis(nextNoteIndex, getMusicSectionIndex(null, nextNoteIndex), (int)getCurrentMusicNote(null,
|
|
|
- nextNoteIndex).getFrequency());
|
|
|
- setProcessingNote(nextNoteAnalysis);
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- /*// 获取字节流
|
|
|
- float[] bufferBytes = audioEvent.getFloatBuffer();
|
|
|
-
|
|
|
- // 粘合音符数据
|
|
|
- float[] totalNoteBytes = ArrayUtil.mergeFloat(getHandlerBufferBytes(), bufferBytes);
|
|
|
- setHandlerBufferBytes(totalNoteBytes);
|
|
|
-
|
|
|
-
|
|
|
- // 计算当前音符的数据长度 公式:数据量(字节/秒)= 采样频率(Hz)× (采样位数(bit)/ 8) × 声道数
|
|
|
- int length = (int) (44100 * (16 / 8) * 1 * musicXmlNote.getDuration() / 1000);
|
|
|
-
|
|
|
- if (noteAnalysis.getIndex() <= getTotalMusicNoteIndexNum(null) && totalNoteBytes.length >= length) {
|
|
|
- // 处理当前音符
|
|
|
- float[] noteFloatData = new float[length];
|
|
|
- System.arraycopy(totalNoteBytes, 0, noteFloatData, 0, length);
|
|
|
- // 剩余未处理的数据
|
|
|
- setHandlerBufferBytes(ArrayUtil.extractFloat(totalNoteBytes, length - 1, totalNoteBytes.length - 1));
|
|
|
-
|
|
|
- // 获取频率数据
|
|
|
- float npitch = getPitch(noteFloatData, audioEvent.getBufferSize());
|
|
|
-
|
|
|
- LOGGER.info("第{}个音符的样本频率:{} 实际频率:{}", noteAnalysis.getIndex(), musicXmlNote.getFrequency(), npitch);
|
|
|
-
|
|
|
- // 准备处理下一个音符
|
|
|
- setProcessingNote(noteAnalysis = new NoteAnalysis(musicXmlNote.getMusicalNotesIndex() + 1,musicXmlNote.getMeasureIndex()));
|
|
|
- }*/
|
|
|
- }
|
|
|
|
|
|
public int evaluateForSection(int sectionIndex, int subjectId){
|
|
|
|
|
@@ -555,6 +456,7 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
}
|
|
|
return result;
|
|
|
}
|
|
|
+
|
|
|
|
|
|
public void evaluateForNote(NoteAnalysis noteAnalysis) {
|
|
|
|
|
@@ -635,7 +537,6 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
.intValue());
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
private int computeFrequency(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis, int offsetRange) {
|
|
|
|
|
|
List<ChunkAnalysis> chunkList = new ArrayList<ChunkAnalysis>(chunkAnalysisList);
|
|
@@ -665,7 +566,7 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
- if(tenutoSize * 100 / chunkAnalysisList.size() >= 50){
|
|
|
+ if(tenutoSize * 100 / chunkAnalysisList.size() > 50){
|
|
|
return lastChunkAnalysis.getFrequency();
|
|
|
}
|
|
|
|
|
@@ -716,7 +617,7 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
|
|
|
return frequency;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
private boolean computeTempoWithFrequency(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis){
|
|
|
|
|
|
List<ChunkAnalysis> chunkList = new ArrayList<ChunkAnalysis>(chunkAnalysisList);
|
|
@@ -782,6 +683,58 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
return tempo;
|
|
|
}
|
|
|
|
|
|
+ private boolean computeTempoWithAmplitude2(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis) {
|
|
|
+
|
|
|
+ List<Integer> chunkAmplitudeList = chunkAnalysisList.stream().map(ChunkAnalysis::getAmplitude).collect(Collectors.toList());
|
|
|
+
|
|
|
+ if (chunkAmplitudeList.size() <= 3) {
|
|
|
+ return chunkAmplitudeList.stream().filter(t -> t.floatValue() > hardLevel.getAmplitudeThreshold()).count() > 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ chunkAmplitudeList.add(0, lastChunkAnalysis.getAmplitude());
|
|
|
+
|
|
|
+ // 检测是否有多个波峰
|
|
|
+ boolean tempo = false;
|
|
|
+ boolean isContinue = true;
|
|
|
+ int firstPeakIndex = -1;
|
|
|
+ int peakSize = 0;
|
|
|
+ for (int i = 1; i < chunkAmplitudeList.size(); i++) {
|
|
|
+ if (chunkAmplitudeList.get(i) > hardLevel.getAmplitudeThreshold() && chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1) + 2) {
|
|
|
+ tempo = true;
|
|
|
+ if(firstPeakIndex == -1){
|
|
|
+ firstPeakIndex = i;
|
|
|
+ peakSize++;
|
|
|
+ }
|
|
|
+ if (isContinue == false) {
|
|
|
+ tempo = false;
|
|
|
+ peakSize++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (tempo == true) {
|
|
|
+ isContinue = false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if(peakSize == 0){
|
|
|
+ tempo = lastChunkAnalysis.isPeak();
|
|
|
+ }else if(peakSize == 1){
|
|
|
+ tempo = true;
|
|
|
+ }else{
|
|
|
+ tempo = false;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (tempo) {
|
|
|
+ // 判断进入时间点
|
|
|
+ if((firstPeakIndex - 1) * 100 /chunkAmplitudeList.size() > hardLevel.getTempoOffsetOfPercent()){
|
|
|
+ tempo = false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return tempo;
|
|
|
+ }
|
|
|
+
|
|
|
private boolean computeTempoWithAmplitude(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis) {
|
|
|
|
|
|
boolean tempo = false;
|
|
@@ -809,11 +762,13 @@ public class UserChannelContext implements PitchDetectionHandler {
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
- if (chunkAmplitudeList.get(i - 1) + 2 < chunkAmplitudeList.get(i) && chunkAmplitudeList.get(i) >= chunkAmplitudeList.get(i + 1)) {
|
|
|
- peakSize++;
|
|
|
- if (minPeakIndex == -1 || minPeakIndex > i) {
|
|
|
- minPeakIndex = i;
|
|
|
- }
|
|
|
+ if (chunkAmplitudeList.get(i - 1) < chunkAmplitudeList.get(i) && chunkAmplitudeList.get(i) >= chunkAmplitudeList.get(i + 1)) {
|
|
|
+ //if(Math.abs(chunkAmplitudeList.get(i - 1) - chunkAmplitudeList.get(i)) > 2 || Math.abs(chunkAmplitudeList.get(i) - chunkAmplitudeList.get(i + 1)) > 2){
|
|
|
+ peakSize++;
|
|
|
+ if (minPeakIndex == -1 || minPeakIndex > i) {
|
|
|
+ minPeakIndex = i;
|
|
|
+ }
|
|
|
+ //}
|
|
|
}
|
|
|
}
|
|
|
}
|