Kaynağa Gözat

fix

fix:节奏和音高区分更详细
liujunchi 3 yıl önce
ebeveyn
işleme
6a001d823c

+ 51 - 17
audio-analysis/src/main/java/com/yonge/Main.java

@@ -12,16 +12,18 @@ import be.tarsos.dsp.pitch.PitchProcessor;
 import com.yonge.audio.analysis.AudioFloatConverter;
 import com.yonge.audio.analysis.detector.YINPitchDetector;
 import com.yonge.audio.utils.ArrayUtil;
+import com.yonge.netty.server.processor.WaveformWriter;
 import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.ArrayUtils;
 
 import javax.sound.sampled.AudioFormat;
 import javax.sound.sampled.AudioInputStream;
 import javax.sound.sampled.AudioSystem;
 import javax.sound.sampled.UnsupportedAudioFileException;
-import java.io.FileNotFoundException;
-import java.io.IOException;
+import java.io.*;
 import java.net.URL;
 import java.util.Arrays;
+import java.util.Date;
 
 /**
  * Description
@@ -44,41 +46,72 @@ public class Main {
     public static void main(String[] args){
         try{
             float sampleRate = 44100;
-            int audioBufferSize = 2048 * 2;
+            int audioBufferSize = 1024 *2;
             int bufferOverlap = 0;
             AudioFloatConverter converter = AudioFloatConverter.getConverter(audioFormat);
             //Create an AudioInputStream from my .wav file
             URL soundURL = Main.class.getResource("/WAV.wav");
             AudioInputStream stream = AudioSystem.getAudioInputStream(soundURL);
-            final MFCC mfccProcessor = new MFCC(audioBufferSize, stream.getFormat().getSampleRate(),
-                                                amountOfCepstrumCoef, amountOfMelFilters, lowerFilterFreq, upperFilterFreq);
+            // final MFCC mfccProcessor = new MFCC(audioBufferSize, stream.getFormat().getSampleRate(),
+            //                                     amountOfCepstrumCoef, amountOfMelFilters, lowerFilterFreq, upperFilterFreq);
 
-            FastYin detector = new FastYin(sampleRate, audioBufferSize );
+            FastYin detector = new FastYin(sampleRate, audioBufferSize *2);
             byte[] bytes = IOUtils.toByteArray(stream);
             AudioFormat format = stream.getFormat();
 
             int b = 0;
             int frequency = 0;
-            while (bytes.length > 2048 *4) {
+            File file = new File("D:\\project\\cooleshow\\audio-analysis\\target\\wav1.wav");
 
-                byte[] bufferData = ArrayUtil.extractByte(bytes, 0, 2048*4 - 1);
+            WaveformWriter waveFileProcessor = new WaveformWriter(file.getAbsolutePath());
+            byte[] bytes1 = new byte[0];
+            // for (int i = 0; i < bytes.length; i++) {
+            //     if (i%2 ==1) {
+            //         System.out.println(bytes[i] + "----------" + bytes[i-1]);
+            //     }
+            // }
+            while (bytes.length > audioBufferSize *2) {
+                byte[] bufferData = ArrayUtil.extractByte(bytes, 0, audioBufferSize*2 - 1);
+
+                bytes1 = ArrayUtil.mergeByte(bytes1, bufferData);
+                byte[] bytes2 = new byte[bytes1.length *2];
+                for (int i = 0; i < bytes1.length; i =i+2) {
 
-                float[] sampleFloats = new float[1024*4];
+                    bytes2[(i+1) *2]  = bytes2[i*2] = bytes1[i];
+                    bytes2[(i+1) *2 +1] = bytes2[i*2 + 1] = bytes1[i +1];
 
-                converter.toFloatArray(bufferData, sampleFloats);
-                int playFrequency = (int)detector.getPitch(sampleFloats).getPitch();
-                if (playFrequency != -1) {
-                    System.out.println("play frequency is " + playFrequency);
                 }
+                // byte ff = bytes1[bytes1.length -1];
+                // for (int start = 0, end = bytes1.length - 2; start < end; start++, end--) {
+                //     byte temp = bytes1[end];
+                //     bytes1[end] = bytes1[start];
+                //     bytes1[start] = temp;
+                // }
+                // bytes1[bytes1.length -1] = ff;
+                //
+                // bytes1 = ArrayUtil.mergeByte(bufferData, bytes1);
+                if (bytes2.length == audioBufferSize *4) {
+                    waveFileProcessor.process(bytes2);
 
 
-                YINPitchDetector frequencyDetector = new YINPitchDetector(sampleFloats.length, audioFormat.getSampleRate());
+                    float[] sampleFloats = new float[audioBufferSize *2];
+
+                    converter.toFloatArray(bytes2, sampleFloats);
+                    int playFrequency = (int) detector.getPitch(sampleFloats).getPitch();
+                    if (playFrequency != -1) {
+                        System.out.println("play frequency is " + playFrequency);
+                    }
+                    bytes1 = new byte[0];
+                }
 
-                playFrequency = (int) frequencyDetector.getFrequency(sampleFloats);
 
-                System.out.println("frequencyDetector play frequency is " + playFrequency);
+                // YINPitchDetector frequencyDetector = new YINPitchDetector(sampleFloats.length, audioFormat.getSampleRate());
+                //
+                // playFrequency = (int) frequencyDetector.getFrequency(sampleFloats);
+                //
+                // System.out.println("frequencyDetector play frequency is " + playFrequency);
                 // ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), bufferSize, totalLength - 1)
-                bytes  = ArrayUtil.extractByte(bytes, 2048*4, bytes.length - 1);
+                bytes  = ArrayUtil.extractByte(bytes, audioBufferSize, bytes.length - 1);
                 // if (b == 1) {
                 //     frequency += playFrequency;
                 //     System.out.println("play frequency is " +frequency/2);
@@ -89,6 +122,7 @@ public class Main {
                 //     b ++;
                 // }
             }
+            waveFileProcessor.processingFinished();
 
 
             //Convert into TarsosDSP API

+ 8 - 4
audio-analysis/src/main/java/com/yonge/netty/dto/NoteAnalysis.java

@@ -5,7 +5,11 @@ import com.yonge.toolset.base.enums.BaseEnum;
 public class NoteAnalysis {
 
 	public enum NoteErrorType implements BaseEnum<String, NoteErrorType> {
-		RIGHT("演奏正确"), CADENCE_WRONG("节奏错误"), INTONATION_WRONG("音准错误"), INTEGRITY_WRONG("完整度不足"), NOT_PLAY("未演奏");
+		RIGHT("演奏正确"), CADENCE_WRONG("节奏错误"), INTONATION_WRONG("音准错误"), INTEGRITY_WRONG("完整度不足"), NOT_PLAY("未演奏"),
+
+		CADENCE_FAST("节奏过快"),CADENCE_SLOW("节奏过慢"),
+		INTONATION_HIGH("音准过高"),INTONATION_LOW("音准过低"),
+		;
 
 		private String msg;
 
@@ -37,7 +41,7 @@ public class NoteAnalysis {
 
 	private int playFrequency = -1;
 
-	private boolean tempo = true;
+	private int tempo = 0;
 
 	private NoteErrorType noteErrorType = NoteErrorType.RIGHT;
 
@@ -116,11 +120,11 @@ public class NoteAnalysis {
 		this.frequency = frequency;
 	}
 
-	public boolean isTempo() {
+	public int getTempo() {
 		return tempo;
 	}
 
-	public void setTempo(boolean tempo) {
+	public void setTempo(int tempo) {
 		this.tempo = tempo;
 	}
 

+ 4 - 3
audio-analysis/src/main/java/com/yonge/netty/dto/NotePlayResult.java

@@ -2,7 +2,8 @@ package com.yonge.netty.dto;
 
 public class NotePlayResult {
 
-	private boolean status;
+	// 0:正确 1:音准过低 2:音准过高
+	private int status;
 	
 	private double migrationRate;
 	
@@ -10,11 +11,11 @@ public class NotePlayResult {
 		// TODO Auto-generated constructor stub
 	}
 
-	public boolean getStatus() {
+	public int getStatus() {
 		return status;
 	}
 
-	public void setStatus(boolean status) {
+	public void setStatus(int status) {
 		this.status = status;
 	}
 

+ 108 - 63
audio-analysis/src/main/java/com/yonge/netty/dto/UserChannelContext.java

@@ -15,8 +15,7 @@ import java.util.stream.Collectors;
 
 import javax.sound.sampled.AudioFormat;
 
-import com.yonge.audio.utils.ArrayUtil;
-import org.apache.commons.lang.ArrayUtils;
+import com.yonge.audio.analysis.AudioFloatConverter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,6 +28,7 @@ import com.yonge.netty.entity.MusicXmlBasicInfo;
 import com.yonge.netty.entity.MusicXmlNote;
 import com.yonge.netty.entity.MusicXmlSection;
 import com.yonge.netty.server.processor.WaveformWriter;
+import org.springframework.util.CollectionUtils;
 
 /**
  * 用户通道上下文
@@ -94,20 +94,22 @@ public class UserChannelContext {
 
 		NotePlayResult result = new NotePlayResult();
 
-		boolean status = false;
+		int status;
 		double migrationRate = 0;
 
 		if (Math.round(xmlNote.getFrequency()) == Math.round(playFrequency)) {
-			status = true;
+			status = 0;
 			migrationRate = 0;
 		} else {
 			NoteFrequencyRange noteFrequencyRange = new NoteFrequencyRange(standardFrequecy, xmlNote.getFrequency());
 
-			if (noteFrequencyRange.getMinFrequency() > playFrequency || playFrequency > noteFrequencyRange.getMaxFrequency()) {
-				status = false;
+			if (noteFrequencyRange.getMinFrequency() > playFrequency ) {
+				status = 1;
+			} else if( playFrequency > noteFrequencyRange.getMaxFrequency()){
+				status = 2;
 			} else {
 
-				status = true;
+				status = 0;
 
 				if (Math.round(playFrequency) < Math.round(xmlNote.getFrequency())) {
 					double min = Math.abs(xmlNote.getFrequency() - noteFrequencyRange.getMinFrequency()) / 2;
@@ -327,11 +329,14 @@ public class UserChannelContext {
 		return evaluatingSectionIndex;
 	}
 
-	public void handle(float[] samples, AudioFormat audioFormat){
+	public void handle(byte[] bytes, AudioFormat audioFormat, int bufferSize, AudioFloatConverter converter){
 		
 		//YINPitchDetector frequencyDetector = new YINPitchDetector(samples.length , audioFormat.getSampleRate());
 		//int playFrequency = (int) frequencyDetector.getFrequency(samples);
 
+		float[] samples = new float[bufferSize / 2];
+		converter.toFloatArray(bytes, samples);
+
 		double durationTime = 1000 * (samples.length * 2) / audioFormat.getSampleRate() / (audioFormat.getSampleSizeInBits() / 8);
 
 		playTime += durationTime;
@@ -344,22 +349,34 @@ public class UserChannelContext {
 		}
 		int playFrequency = -1;
 
-		if(!percussionList.contains(subjectId)){
-			if (musicXmlNote.getFrequency() < MIN_FREQUECY) {
-				float[] floats = ArrayUtils.addAll(samples, samples);
-				float[] lowSamples = ArrayUtils.addAll(floats,floats);
-
-				playFrequency = (int) lowDetector.getPitch(lowSamples).getPitch();
-
-			} else {
-				playFrequency = (int) detector.getPitch(samples).getPitch();
-			}
-			if (playFrequency != -1) {
-				LOGGER.info("频率 {}", playFrequency);
-			}
+		// 测试低音频 复制检测 ---> 失败
+		// if(!percussionList.contains(subjectId)){
+		// 	if (musicXmlNote.getFrequency() < MIN_FREQUECY) {
+		// 		// byte[] floats = ArrayUtils.addAll(bytes, bytes);
+		// 		// byte[] floatsSamples = ArrayUtils.addAll(floats, floats);
+		// 		//
+		// 		// float[] lowSamples = new float[floatsSamples.length / 2];
+		// 		// converter.toFloatArray(floatsSamples, lowSamples);
+		// 		//
+		// 		// 	playFrequency = (int) lowDetector.getPitch(lowSamples).getPitch();
+		// 			if (playFrequency != -1) {
+		// 				// LOGGER.info("频率1 {}", playFrequency);
+		// 			}
+		// 			// } else {
+		// 			playFrequency = (int) detector.getPitch(samples).getPitch();
+		// 			// }
+		// 			if (playFrequency != -1) {
+		// 				LOGGER.info("频率2 {}", playFrequency);
+		// 			}
+		// 	}
+		//
+		// }
 
+		if(!percussionList.contains(subjectId)){
+			playFrequency = (int) detector.getPitch(samples).getPitch();
+			LOGGER.info("频率 {}", playFrequency);
 		}
-		
+
 		int splDb = (int) Signals.soundPressureLevel(samples);
 		int power = (int) Signals.power(samples);
 		int amplitude = (int) Signals.norm(samples);
@@ -413,7 +430,8 @@ public class UserChannelContext {
 				}
 				
 				//判断节奏(音符持续时间内有不间断的音高,就节奏正确)
-				boolean tempo = true;
+				// 节奏  0:正常  1:错误 2:节奏慢 3:节奏快
+				int tempo = 0;
 				if (percussionList.contains(subjectId)) {
 					noteAnalysis.setPlayFrequency(-1);
 					tempo = computeTempoWithAmplitude2(musicXmlNote);
@@ -427,7 +445,7 @@ public class UserChannelContext {
 				evaluateForNote(musicXmlNote, noteAnalysis);//对当前音符评分
 
 				LOGGER.debug("当前音符下标[{}] 预计频率:{} 实际频率:{} 节奏:{}", noteAnalysis.getMusicalNotesIndex(), musicXmlNote.getFrequency(), noteAnalysis.getPlayFrequency(),
-						noteAnalysis.isTempo());
+						noteAnalysis.getTempo());
 				
 				doneNoteAnalysisList.add(noteAnalysis);
 				
@@ -553,15 +571,17 @@ public class UserChannelContext {
 		double correctedEndTime = correctedStartTime + musicXmlNote.getDuration();
 		
 		chunkAnalysisList = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getStartTime()) >= Double.doubleToLongBits(correctedStartTime) && Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(correctedEndTime)).collect(Collectors.toList());
-		
-		double durationTime = chunkAnalysisList.get(chunkAnalysisList.size() - 1).getEndTime() - chunkAnalysisList.get(0).getStartTime();
-		
+		double durationTime;
+		if (CollectionUtils.isEmpty(chunkAnalysisList)) {
+			durationTime = 0;
+		}else durationTime = chunkAnalysisList.get(chunkAnalysisList.size() - 1).getEndTime() - chunkAnalysisList.get(0).getStartTime();
+
 		double playDurationTime = 0;
 		
 		if (percussionList.contains(subjectId)) {
-			if (noteAnalysis.getFrequency() == -1) {// 休止符
-				if (!noteAnalysis.isTempo()) {
-					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
+			if (noteAnalysis.getFrequency() == -1) { // 休止符
+				if (noteAnalysis.getTempo() != 0 ) {
+					noteAnalysis.setMusicalErrorType(setMusicalErrorTempo(noteAnalysis.getTempo()));
 				} else {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
 				}
@@ -569,8 +589,8 @@ public class UserChannelContext {
 				int beatTimes = (int) chunkAnalysisList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count();
 				if(beatTimes == 0){
 					noteAnalysis.setMusicalErrorType(NoteErrorType.NOT_PLAY);
-				}else if (!noteAnalysis.isTempo()) {
-					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
+				}else if (noteAnalysis.getTempo() != 0) {
+					noteAnalysis.setMusicalErrorType(setMusicalErrorTempo(noteAnalysis.getTempo()));
 				} else {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
 				}
@@ -583,12 +603,12 @@ public class UserChannelContext {
 
 				playDurationTime = chunkAnalysisList.stream().filter(t -> t.getFrequency() <= MIN_FREQUECY).mapToDouble(t -> t.getDurationTime()).sum();
 
-				if (!noteAnalysis.isTempo()) {
-					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
+				if (noteAnalysis.getTempo() != 0) {
+					noteAnalysis.setMusicalErrorType(setMusicalErrorTempo(noteAnalysis.getTempo()));
 				} else if (playDurationTime * 100 / durationTime < hardLevel.getIntegrityRange()) {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.INTEGRITY_WRONG);
-				} else if (notePlayResult.getStatus() == false) {
-					noteAnalysis.setMusicalErrorType(NoteErrorType.INTONATION_WRONG);
+				} else if (notePlayResult.getStatus() != 0) {
+					noteAnalysis.setMusicalErrorType(setMusicalErrorStatus(notePlayResult.getStatus()));
 				} else {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
 				}
@@ -602,10 +622,10 @@ public class UserChannelContext {
 				} else if (playDurationTime * 100 / durationTime < hardLevel.getIntegrityRange()) {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.INTEGRITY_WRONG);
 					LOGGER.debug("完整度不足:{}", playDurationTime * 100 / durationTime);
-				} else if (!noteAnalysis.isTempo()) {
-					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
-				} else if (notePlayResult.getStatus() == false) {
-					noteAnalysis.setMusicalErrorType(NoteErrorType.INTONATION_WRONG);
+				} else if (noteAnalysis.getTempo() != 0) {
+					noteAnalysis.setMusicalErrorType(setMusicalErrorTempo(noteAnalysis.getTempo()));
+				} else if (notePlayResult.getStatus() != 0) {
+					noteAnalysis.setMusicalErrorType(setMusicalErrorStatus(notePlayResult.getStatus()));
 				} else {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
 				}
@@ -629,7 +649,7 @@ public class UserChannelContext {
 			intonationScore = 0;
 		} else {
 
-			if (noteAnalysis.isTempo()) {
+			if (noteAnalysis.getTempo() == 0) {
 				tempoScore = 100;
 				noteAnalysis.setTempoScore(tempoScore);
 			}
@@ -648,7 +668,31 @@ public class UserChannelContext {
 					.intValue());
 		}
 	}
-	
+
+	// 设置 音高
+	private NoteErrorType setMusicalErrorStatus(int status) {
+		if (status == 1) {
+			return NoteErrorType.INTONATION_LOW;
+		} else if (status ==2) {
+			return NoteErrorType.INTONATION_HIGH;
+		}
+		return null;
+	}
+
+	// 设置节奏
+	private NoteErrorType setMusicalErrorTempo(int tempo) {
+		if (tempo == 1) {
+			return NoteErrorType.CADENCE_WRONG;
+		} else if (tempo == 2) {
+			return NoteErrorType.CADENCE_SLOW;
+		} else if (tempo ==3) {
+
+			return NoteErrorType.CADENCE_FAST;
+		} else {
+			return NoteErrorType.RIGHT;
+		}
+	}
+
 	private int computeFrequency(MusicXmlNote musicXmlNote) {
 		
 		double floatingRange = musicXmlNote.getDuration() * hardLevel.getTempoEffectiveRange(musicXmlNote.getDenominator()) / 100;
@@ -699,7 +743,7 @@ public class UserChannelContext {
 	 * @param musicXmlNote
 	 * @return
 	 */
-	private boolean computeTempoWithFrequency(MusicXmlNote musicXmlNote){
+	private int computeTempoWithFrequency(MusicXmlNote musicXmlNote){
 		
 		double floatingRange = musicXmlNote.getDuration() * hardLevel.getTempoEffectiveRange(musicXmlNote.getDenominator()) / 100;
 		
@@ -721,11 +765,11 @@ public class UserChannelContext {
 		List<ChunkAnalysis> chunkList = chunkAnalysisList.subList(startIndex, elementSize + startIndex);
 		
 		if(chunkList == null || chunkList.size() == 0){
-			return false;
+			return 1;
 		}
 		
 		if (musicXmlNote.getFrequency() == -1) {// 休止符
-			return chunkList.stream().filter(t -> t.getFrequency() > MIN_FREQUECY).count() <= 1;
+			return chunkList.stream().filter(t -> t.getFrequency() > MIN_FREQUECY).count() <= 1? 0:1;
 		}
 
 		ChunkAnalysis firstChunkAnalysis = chunkAnalysisList.get(0);
@@ -761,7 +805,7 @@ public class UserChannelContext {
 		
 		NoteFrequencyRange noteFrequencyRange = null;
 		ChunkAnalysis chunkAnalysis = null;
-		boolean tempo = true;
+		int tempo = 0;
 		//boolean isContinue = true;
 		//int unplayedSize = 0;
 		int firstPeakIndex = -1;
@@ -803,7 +847,7 @@ public class UserChannelContext {
 		}
 		
 		if (maxTimes * 100 / totalTimes < hardLevel.getIntegrityRange()) {
-			tempo = false;
+			tempo = 1;
 			LOGGER.debug("节奏错误原因:信号分堆后的最大数量不足指定的完成比例");
 		}
 		
@@ -846,16 +890,17 @@ public class UserChannelContext {
 		}
 		*/
 		
-		if (tempo) {
+		if (tempo == 0) {
 			// 判断进入时间点
 			if(firstPeakIndex * 100 /chunkList.size() > hardLevel.getTempoEffectiveRange(musicXmlNote.getDenominator())){
-				tempo = false;
+				// 节奏慢
+				tempo = 2;
 				LOGGER.debug("节奏错误原因:进入时间点太晚");
 			}else{
 				//判断是否与上一个音延续下来的
 				if(firstChunkAnalysis.getFrequency() > MIN_FREQUECY && lastChunkAnalysis.getFrequency() > MIN_FREQUECY){
-					tempo = new NoteFrequencyRange(standardFrequecy, firstChunkAnalysis.getFrequency()).equals(new NoteFrequencyRange(standardFrequecy, lastChunkAnalysis.getFrequency())) == false;
-					if(tempo == false){
+					tempo = new NoteFrequencyRange(standardFrequecy, firstChunkAnalysis.getFrequency()).equals(new NoteFrequencyRange(standardFrequecy, lastChunkAnalysis.getFrequency())) == false?0:1;
+					if(tempo == 1){
 						LOGGER.debug("节奏错误原因:上一个音[{}]延续下来导致的", lastChunkAnalysis.getFrequency());
 					}
 				}
@@ -865,7 +910,7 @@ public class UserChannelContext {
 		return tempo;
 	}
 	
-	private boolean computeTempoWithAmplitude2(MusicXmlNote musicXmlNote) {
+	private int computeTempoWithAmplitude2(MusicXmlNote musicXmlNote) {
 
 		double floatingRange = musicXmlNote.getDuration() * hardLevel.getTempoEffectiveRange(musicXmlNote.getDenominator()) / 100;
 		
@@ -886,7 +931,7 @@ public class UserChannelContext {
 		List<ChunkAnalysis> chunkList = chunkAnalysisList.subList(0, elementSize);
 		
 		if(chunkList == null || chunkList.size() == 0){
-			return false;
+			return 1;
 		}
 		
 		ChunkAnalysis firstChunkAnalysis = chunkAnalysisList.get(0);
@@ -897,7 +942,7 @@ public class UserChannelContext {
 		if (musicXmlNote.getFrequency() == -1) {// 休止符
 			
 			LOGGER.debug("--Amplitude:{}  Denominator:{}",chunkList.stream().map(t -> t).collect(Collectors.toList()), musicXmlNote.getDenominator());
-			return chunkList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count() <= 0;
+			return chunkList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count() <= 0 ? 0:1;
 		}
 		
 		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) < Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
@@ -917,7 +962,7 @@ public class UserChannelContext {
 		LOGGER.debug("--Amplitude:{}  Denominator:{}",chunkAmplitudeList.stream().map(t -> t).collect(Collectors.toList()), musicXmlNote.getDenominator());
 		
 		// 检测是否有多个波峰
-		boolean tempo = false;
+		int tempo = 1;
 		boolean isContinue = true;
 		int firstPeakIndex = -1;
 		int peakSize = 0;
@@ -926,36 +971,36 @@ public class UserChannelContext {
 				continue;
 			}
 			if (chunkAmplitudeList.get(i) > hardLevel.getAmplitudeThreshold() && chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1)) {
-				tempo = true;
+				tempo = 0;
 				if(firstPeakIndex == -1){
 					firstPeakIndex = i;
 					peakSize++;
 				}
 				if (isContinue == false) {
-					tempo = false;
+					tempo = 1;
 					peakSize++;
 					break;
 				}
 			} else {
-				if (tempo == true) {
+				if (tempo == 0) {
 					isContinue = false;
 				}
 			}
 		}
 		
 		if(peakSize == 0){
-			tempo = lastChunkAnalysis.isPeak();
+			tempo = lastChunkAnalysis.isPeak() ?0:1;
 		}else if(peakSize == 1){
-			tempo = true;
+			tempo = 0;
 		}else{
-			tempo = false;
+			tempo = 1;
 		}
 		
-		if (tempo) {
+		if (tempo == 0) {
 			// 判断进入时间点
 			if((firstPeakIndex - 1) * 100 /chunkAmplitudeList.size() > hardLevel.getTempoEffectiveRange(musicXmlNote.getDenominator()) * 2){
 				LOGGER.debug("超过范围:{}", (firstPeakIndex - 1) * 100 /chunkAmplitudeList.size());
-				tempo = false;
+				tempo = 1;
 			}
 		}
 		

+ 4 - 3
audio-analysis/src/main/java/com/yonge/netty/server/service/AudioCompareHandler.java

@@ -16,6 +16,7 @@ import java.util.stream.Collectors;
 
 import javax.sound.sampled.AudioFormat;
 
+import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -118,7 +119,7 @@ public class AudioCompareHandler implements MessageHandler {
 		case "musicXml": // 同步music xml信息
 			
 			musicXmlBasicInfo = JSONObject.toJavaObject(dataObj, MusicXmlBasicInfo.class);
-			
+
 			userChannelContextService.remove(channel);
 
 			channelContext = new UserChannelContext();
@@ -289,7 +290,7 @@ public class AudioCompareHandler implements MessageHandler {
 			waveFileProcessor = new WaveformWriter(file.getAbsolutePath());
 			channelContext.setWaveFileProcessor(waveFileProcessor);
 		}
-		waveFileProcessor.process(datas);
+		waveFileProcessor.process(ArrayUtils.addAll(datas, datas));
 		
 		/*datas = channelContext.skipMetronome(datas);
 
@@ -340,7 +341,7 @@ public class AudioCompareHandler implements MessageHandler {
 
 			converter.toFloatArray(bufferData, sampleFloats);
 
-			channelContext.handle(sampleFloats, audioFormat);
+			channelContext.handle(bufferData, audioFormat,bufferSize,converter);
 
 			MusicXmlBasicInfo musicXmlBasicInfo = channelContext.getMusicXmlBasicInfo(null);
 			int sectionIndex = channelContext.getEvaluatingSectionIndex().get();

+ 1 - 1
audio-analysis/src/main/resources/logback-spring.xml

@@ -27,7 +27,7 @@
 		</encoder>
 	</appender>
 
-	<logger name="com.yonge" level="info" />
+	<logger name="com.yonge" level="dev" />
 
 	<!--开发环境:打印控制台 -->
 	<springProfile name="local">

+ 1 - 0
cooleshow-user/user-admin/src/main/java/com/yonge/cooleshow/admin/controller/EmployeeController.java

@@ -11,6 +11,7 @@ import com.yonge.toolset.mybatis.support.PageUtil;
 import com.yonge.cooleshow.biz.dal.vo.EmployeeVo;
 import com.yonge.cooleshow.common.entity.HttpResponseResult;
 import com.yonge.toolset.base.page.PageInfo;
+import com.yonge.toolset.utils.http.HttpUtil;
 import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiImplicitParam;
 import io.swagger.annotations.ApiImplicitParams;