yonge 3 лет назад
Родитель
Сommit
18162afb44

+ 13 - 36
audio-analysis/src/main/java/com/yonge/netty/dto/UserChannelContext.java

@@ -21,7 +21,6 @@ import be.tarsos.dsp.pitch.FastYin;
 
 import com.yonge.audio.analysis.Signals;
 import com.yonge.audio.analysis.detector.YINPitchDetector;
-import com.yonge.audio.utils.ArrayUtil;
 import com.yonge.netty.dto.NoteAnalysis.NoteErrorType;
 import com.yonge.netty.entity.MusicXmlBasicInfo;
 import com.yonge.netty.entity.MusicXmlNote;
@@ -40,10 +39,6 @@ public class UserChannelContext {
 	
 	private FastYin detector;
 	
-	private float sampleRate;
-	
-	private int bitsPerSample;
-	
 	private String user;
 	
 	private double standardFrequecy = 442;
@@ -60,8 +55,6 @@ public class UserChannelContext {
 	
 	private float beatDuration;
 	
-	private int beatByteLength;
-	
 	private boolean delayProcessed;
 	
 	// 曲目与musicxml对应关系
@@ -122,32 +115,14 @@ public class UserChannelContext {
 		return result;
 	}
 	
-	public void init(String platform, String heardLevel, int subjectId, float beatDuration,float sampleRate, int bitsPerSample, int bufferSize) {
+	public void init(String platform, String heardLevel, int subjectId, float beatDuration,float sampleRate, int bufferSize) {
 		this.platform = platform;
 		this.subjectId = subjectId;
 		this.beatDuration = beatDuration;
-		this.beatByteLength = (int) (sampleRate * bitsPerSample / 8 * beatDuration / 1000);
 		hardLevel = HardLevelEnum.valueOf(heardLevel);
 		if(detector == null){
 			detector = new FastYin(sampleRate, bufferSize);
 		}
-		this.sampleRate = sampleRate;
-		this.bitsPerSample = bitsPerSample;
-	}
-	
-	public byte[] skipMetronome(byte[] datas) {
-		if (beatByteLength > 0) {
-			if (datas.length <= beatByteLength) {
-				beatByteLength -= datas.length;
-				return new byte[0];
-			}
-			/*if(beatByteLength % 2 != 0){
-				beatByteLength++;
-			}*/
-			datas = ArrayUtil.extractByte(datas, beatByteLength, datas.length - 1);
-			beatByteLength = 0;
-		}
-		return datas;
 	}
 	
 	public void setUser(String user) {
@@ -219,7 +194,6 @@ public class UserChannelContext {
 	}
 
 	public void resetUserInfo() {
-		beatByteLength = (int) (sampleRate * bitsPerSample / 8 * beatDuration / 1000);
 		waveFileProcessor = null;
 		processingNote = new NoteAnalysis(0,0,-1);
 		evaluatingSectionIndex = new AtomicInteger(0);
@@ -230,6 +204,7 @@ public class UserChannelContext {
 		recordId = null;
 		playTime = 0;
 		delayProcessed = false;
+		offsetMS = 0;
 		dynamicOffset = 0;
 		handlerSwitch = false;
 	}
@@ -723,7 +698,7 @@ public class UserChannelContext {
 		
 		ChunkAnalysis firstChunkAnalysis = chunkAnalysisList.get(0);
 		
-		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) == Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
+		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
 
 		ChunkAnalysis lastChunkAnalysis = null;
 		if (chunkAnalysisOptional.isPresent()) {
@@ -756,11 +731,13 @@ public class UserChannelContext {
 						firstPeakIndex = i;
 						noteFrequencyRange = new NoteFrequencyRange(standardFrequecy, chunkAnalysis.getFrequency());
 					} else if (noteFrequencyRange.getMinFrequency() > chunkAnalysis.getFrequency()
-							|| chunkAnalysis.getFrequency() > noteFrequencyRange.getMaxFrequency()) {
-						// 判断是否是同一个音
-						tempo = false;
-						LOGGER.info("节奏错误原因:不是同一个音[{}]:{}-{}", chunkAnalysis.getFrequency(), noteFrequencyRange.getMinFrequency(), noteFrequencyRange.getMaxFrequency());
-						break;
+							|| chunkAnalysis.getFrequency() > noteFrequencyRange.getMaxFrequency()) {// 判断是否是同一个音
+						//是否是低八度
+						if(!(noteFrequencyRange.getMinFrequency() < chunkAnalysis.getFrequency() * 2 && chunkAnalysis.getFrequency() * 2 < noteFrequencyRange.getMaxFrequency())){
+							tempo = false;
+							LOGGER.info("节奏错误原因:不是同一个音[{}]:{}-{}", chunkAnalysis.getFrequency(), noteFrequencyRange.getMinFrequency(), noteFrequencyRange.getMaxFrequency());
+							break;
+						}
 					}
 					if (isContinue == false) {
 						if ((i + 1) / chunkAnalysisList.size() < hardLevel.getIntegrityRange()) {
@@ -790,7 +767,7 @@ public class UserChannelContext {
 				if(firstChunkAnalysis.getFrequency() > 100 && lastChunkAnalysis.getFrequency() > 100){
 					tempo = new NoteFrequencyRange(standardFrequecy, firstChunkAnalysis.getFrequency()).equals(new NoteFrequencyRange(standardFrequecy, lastChunkAnalysis.getFrequency())) == false;
 					if(tempo == false){
-						LOGGER.info("节奏错误原因:上一个音延续下来导致的");
+						LOGGER.info("节奏错误原因:上一个音[{}]延续下来导致的", lastChunkAnalysis.getFrequency());
 					}
 				}
 			}
@@ -826,7 +803,7 @@ public class UserChannelContext {
 			return chunkAnalysisList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count() <= 0;
 		}
 		
-		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) == Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
+		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
 
 		ChunkAnalysis lastChunkAnalysis = null;
 		if (chunkAnalysisOptional.isPresent()) {
@@ -848,7 +825,7 @@ public class UserChannelContext {
 		int firstPeakIndex = -1;
 		int peakSize = 0;
 		for (int i = 1; i < chunkAmplitudeList.size(); i++) {
-			if (chunkAmplitudeList.get(i) > hardLevel.getAmplitudeThreshold() && chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1) + 1) {
+			if (chunkAmplitudeList.get(i) > hardLevel.getAmplitudeThreshold() && chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1) + 2) {
 				tempo = true;
 				if(firstPeakIndex == -1){
 					firstPeakIndex = i;

+ 4 - 3
audio-analysis/src/main/java/com/yonge/netty/server/service/AudioCompareHandler.java

@@ -122,7 +122,7 @@ public class AudioCompareHandler implements MessageHandler {
 
 			channelContext.getSongMusicXmlMap().put(musicXmlBasicInfo.getExamSongId(), musicXmlBasicInfo);
 			channelContext.init(musicXmlBasicInfo.getPlatform(), musicXmlBasicInfo.getHeardLevel(), musicXmlBasicInfo.getSubjectId(),
-					musicXmlBasicInfo.getBeatLength(), audioFormat.getSampleRate(), audioFormat.getSampleSizeInBits(), bufferSize / 2);
+					musicXmlBasicInfo.getBeatLength(), audioFormat.getSampleRate(), bufferSize / 2);
 			channelContext.setUser(user);
 			
 			userChannelContextService.register(channel, channelContext);
@@ -300,10 +300,11 @@ public class AudioCompareHandler implements MessageHandler {
 			
 			if(totalLength > beatByteLength){
 				channelContext.setChannelBufferBytes(ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), beatByteLength, totalLength - 1));
+				
+				LOGGER.info("--------Length:{}  Times[{} + {}]:{}--------", waveFileProcessor.getFile().length() - channelContext.getChannelBufferBytes().length, channelContext.getOffsetMS() , channelContext.getBeatDuration(),(waveFileProcessor.getFile().length() - channelContext.getChannelBufferBytes().length) * 1000 /audioFormat.getSampleRate()/2);
+				
 				channelContext.setOffsetMS(0);
 				channelContext.setBeatDuration(0);
-				
-				LOGGER.info("--------Length:{}  Times:{}--------", waveFileProcessor.getFile().length() - channelContext.getChannelBufferBytes().length,(waveFileProcessor.getFile().length() - channelContext.getChannelBufferBytes().length) * 1000 /audioFormat.getSampleRate()/2);
 			}else{
 				return false;
 			}