Browse Source

Merge branch 'vip_price_827'

yonge 3 năm trước cách đây
mục cha
commit
c749902807

+ 4 - 3
audio-analysis/src/main/java/com/yonge/netty/dto/HardLevelEnum.java

@@ -8,20 +8,21 @@ public enum HardLevelEnum implements BaseEnum<String, HardLevelEnum> {
 	 * 节奏有效范围(1分音符), 节奏有效范围(2分音符), 节奏有效范围(4分音符), 节奏有效范围(8分音符), 节奏有效范围(16分音符), 节奏有效范围(32分音符)<br>
 	 * 完成度范围, 未演奏的范围
 	 */
-	BEGINNER("入门级", 3, 5, 5, 5, 10, 10, 13, 15, 60, 10), 
+	//BEGINNER("入门级", 3, 5, 5, 5, 10, 10, 13, 15, 60, 10), 
+	BEGINNER("入门级", 3, 5, 10, 10, 15, 15, 22, 22, 75, 25), 
 	/**
 	 * 进阶级, 振幅阈值, 频率阈值 <br>
 	 * 节奏有效范围(1分音符), 节奏有效范围(2分音符), 节奏有效范围(4分音符), 节奏有效范围(8分音符), 节奏有效范围(16分音符), 节奏有效范围(32分音符)<br>
 	 * 完成度范围, 未演奏的范围
 	 */
-	ADVANCED("进阶级", 3, 5, 8, 8, 15, 15, 30, 30, 80, 10),
+	ADVANCED("进阶级", 3, 5, 8, 8, 12, 12, 20, 20, 85, 15),
 	//ADVANCED("进阶级", 3, 5, 50, 50, 50, 50, 50, 5, 80, 10),
 	/**
 	 * 大师级, 振幅阈值, 频率阈值 <br>
 	 * 节奏有效范围(1分音符), 节奏有效范围(2分音符), 节奏有效范围(4分音符), 节奏有效范围(8分音符), 节奏有效范围(16分音符), 节奏有效范围(32分音符)<br>
 	 * 完成度范围, 未演奏的范围
 	 */
-	PERFORMER("大师级", 3, 3, 5, 5, 10, 10, 25, 25, 90, 20);
+	PERFORMER("大师级", 3, 3, 5, 5, 10, 10, 13, 15, 95, 10);
 
 	private String msg;
 

+ 5 - 0
audio-analysis/src/main/java/com/yonge/netty/dto/NoteFrequencyRange.java

@@ -69,5 +69,10 @@ public class NoteFrequencyRange {
 		}
 		return false;
 	}
+	
+	public static void main(String[] args) {
+		NoteFrequencyRange nfr = new NoteFrequencyRange(442,442);
+		System.out.println(nfr.getMinFrequency() + "-"+ nfr.getMaxFrequency());
+	}
 
 }

+ 66 - 55
audio-analysis/src/main/java/com/yonge/netty/dto/UserChannelContext.java

@@ -21,7 +21,6 @@ import be.tarsos.dsp.pitch.FastYin;
 
 import com.yonge.audio.analysis.Signals;
 import com.yonge.audio.analysis.detector.YINPitchDetector;
-import com.yonge.audio.utils.ArrayUtil;
 import com.yonge.netty.dto.NoteAnalysis.NoteErrorType;
 import com.yonge.netty.entity.MusicXmlBasicInfo;
 import com.yonge.netty.entity.MusicXmlNote;
@@ -38,11 +37,17 @@ public class UserChannelContext {
 	//打击乐
 	private final static List<Integer> percussionList = Arrays.asList(23, 113);
 	
+	private final static int MIN_FREQUECY = 100;
+	
+	private final static int MAX_FREQUECY = 2000;
+	
+	private FastYin detector;
+	
 	private String user;
 	
 	private double standardFrequecy = 442;
 	
-	private int offsetMS;
+	private float offsetMS;
 	
 	private double dynamicOffset;
 	
@@ -52,9 +57,7 @@ public class UserChannelContext {
 	
 	private Integer subjectId;
 	
-	private int beatDuration;
-	
-	private int beatByteLength;
+	private float beatDuration;
 	
 	private boolean delayProcessed;
 	
@@ -77,8 +80,6 @@ public class UserChannelContext {
 	
 	private double playTime;
 	
-	private double receivedTime;
-	
 	private HardLevelEnum hardLevel = HardLevelEnum.ADVANCED;
 	
 	private boolean handlerSwitch;
@@ -118,27 +119,14 @@ public class UserChannelContext {
 		return result;
 	}
 	
-	public void init(String platform, String heardLevel, int subjectId, int beatDuration) {
+	public void init(String platform, String heardLevel, int subjectId, float beatDuration,float sampleRate, int bufferSize) {
 		this.platform = platform;
 		this.subjectId = subjectId;
 		this.beatDuration = beatDuration;
-		this.beatByteLength = WaveformWriter.SAMPLE_RATE * WaveformWriter.BITS_PER_SAMPLE / 8 * beatDuration / 1000;
 		hardLevel = HardLevelEnum.valueOf(heardLevel);
-	}
-	
-	public byte[] skipMetronome(byte[] datas) {
-		if (beatByteLength > 0) {
-			if (datas.length <= beatByteLength) {
-				beatByteLength -= datas.length;
-				return new byte[0];
-			}
-			if(beatByteLength % 2 != 0){
-				beatByteLength++;
-			}
-			datas = ArrayUtil.extractByte(datas, beatByteLength, datas.length - 1);
-			beatByteLength = 0;
+		if(detector == null){
+			detector = new FastYin(sampleRate, bufferSize);
 		}
-		return datas;
 	}
 	
 	public void setUser(String user) {
@@ -161,14 +149,22 @@ public class UserChannelContext {
 		this.handlerSwitch = handlerSwitch;
 	}
 
-	public int getOffsetMS() {
+	public float getOffsetMS() {
 		return offsetMS;
 	}
 
-	public void setOffsetMS(int offsetMS) {
+	public void setOffsetMS(float offsetMS) {
 		this.offsetMS = offsetMS;
 	}
 
+	public float getBeatDuration() {
+		return beatDuration;
+	}
+
+	public void setBeatDuration(float beatDuration) {
+		this.beatDuration = beatDuration;
+	}
+
 	public HardLevelEnum getHardLevel() {
 		return hardLevel;
 	}
@@ -202,7 +198,6 @@ public class UserChannelContext {
 	}
 
 	public void resetUserInfo() {
-		beatByteLength = WaveformWriter.SAMPLE_RATE * WaveformWriter.BITS_PER_SAMPLE / 8 * beatDuration / 1000;
 		waveFileProcessor = null;
 		processingNote = new NoteAnalysis(0,0,-1);
 		evaluatingSectionIndex = new AtomicInteger(0);
@@ -212,8 +207,8 @@ public class UserChannelContext {
 		totalChunkAnalysisList = new ArrayList<ChunkAnalysis>();
 		recordId = null;
 		playTime = 0;
-		receivedTime = 0;
 		delayProcessed = false;
+		offsetMS = 0;
 		dynamicOffset = 0;
 		handlerSwitch = false;
 	}
@@ -330,10 +325,11 @@ public class UserChannelContext {
 		//YINPitchDetector frequencyDetector = new YINPitchDetector(samples.length , audioFormat.getSampleRate());
 		//int playFrequency = (int) frequencyDetector.getFrequency(samples);
 		
-		FastYin detector = new FastYin(audioFormat.getSampleRate(), samples.length);
 		int playFrequency = -1;
 		if(!percussionList.contains(subjectId)){
 			playFrequency = (int)detector.getPitch(samples).getPitch();
+		}else{
+			LOGGER.info("subjectId:{}", subjectId);
 		}
 		
 		int splDb = (int) Signals.soundPressureLevel(samples);
@@ -343,12 +339,6 @@ public class UserChannelContext {
 		
 		double durationTime = 1000 * (samples.length * 2) / audioFormat.getSampleRate() / (audioFormat.getSampleSizeInBits() / 8);
 		
-		receivedTime += durationTime;
-		
-		if(receivedTime < offsetMS){
-			return;
-		}
-		
 		playTime += durationTime;
 		
 		// 获取当前音符信息
@@ -383,7 +373,7 @@ public class UserChannelContext {
 			if(percussionList.contains(subjectId)){
 				flag = chunkAnalysis.getAmplitude() > hardLevel.getAmplitudeThreshold();
 			}else{
-				flag = chunkAnalysis.getFrequency() > 100;
+				flag = chunkAnalysis.getFrequency() > MIN_FREQUECY && chunkAnalysis.getFrequency() < MAX_FREQUECY;
 			}
 			
 			if(delayProcessed == false && flag){
@@ -566,7 +556,7 @@ public class UserChannelContext {
 			
 			if (noteAnalysis.getFrequency() == -1) {// 休止符
 
-				playDurationTime = chunkAnalysisList.stream().filter(t -> t.getFrequency() <= 100).mapToDouble(t -> t.getDurationTime()).sum();
+				playDurationTime = chunkAnalysisList.stream().filter(t -> t.getFrequency() <= MIN_FREQUECY).mapToDouble(t -> t.getDurationTime()).sum();
 
 				if (!noteAnalysis.isTempo()) {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
@@ -578,7 +568,7 @@ public class UserChannelContext {
 					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
 				}
 			} else {
-				playDurationTime = chunkAnalysisList.stream().filter(t -> t.getFrequency() > 100 && t.getFrequency() < 2000)
+				playDurationTime = chunkAnalysisList.stream().filter(t -> t.getFrequency() > MIN_FREQUECY && t.getFrequency() < MAX_FREQUECY)
 						.mapToDouble(t -> t.getDurationTime()).sum();
 
 				if (playDurationTime * 100 / durationTime < hardLevel.getNotPlayRange()) {
@@ -641,6 +631,8 @@ public class UserChannelContext {
 		double endTime = musicXmlNote.getTimeStamp() + dynamicOffset + floatingRange;
 		double startTime = musicXmlNote.getTimeStamp() + dynamicOffset - floatingRange;
 		
+		LOGGER.info("------------TimeStamp:{}  Duration:{}  floatingRange:{}  StartTime:{}  EndTime:{}------------", musicXmlNote.getTimeStamp(), musicXmlNote.getDuration(), floatingRange, startTime, endTime);
+		
 		List<ChunkAnalysis> chunkAnalysisList = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getStartTime()) >= Double.doubleToLongBits(startTime) && Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(endTime)).collect(Collectors.toList());
 		
 		double correctedStartTime = queryFirstNoteStartTime(chunkAnalysisList, musicXmlNote);
@@ -648,6 +640,8 @@ public class UserChannelContext {
 		
 		chunkAnalysisList = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getStartTime()) >= Double.doubleToLongBits(correctedStartTime) && Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(correctedEndTime)).collect(Collectors.toList());
 
+		LOGGER.info("------------ correctedStartTime:{}  correctedEndTime:{}------------", correctedStartTime, correctedEndTime);
+		
 		//根据完整度取部分有效信号
 		int elementSize = chunkAnalysisList.size() * hardLevel.getIntegrityRange() / 100;
 		chunkAnalysisList = chunkAnalysisList.subList(0, elementSize);
@@ -662,7 +656,7 @@ public class UserChannelContext {
 		
 		List<ChunkAnalysis> chunkList = new ArrayList<ChunkAnalysis>(chunkAnalysisList);
 		
-		List<Integer> chunkFrequencyList = chunkList.stream().map(t -> t.getFrequency()).filter(t -> t.doubleValue() > 100 && t.doubleValue() < 2000)
+		List<Integer> chunkFrequencyList = chunkList.stream().map(t -> t.getFrequency()).filter(t -> t.doubleValue() > MIN_FREQUECY && t.doubleValue() < MAX_FREQUECY)
 				.collect(Collectors.toList());
 		
 		if (chunkFrequencyList.size() == 0) {
@@ -703,12 +697,12 @@ public class UserChannelContext {
 		}
 		
 		if (musicXmlNote.getFrequency() == -1) {// 休止符
-			return chunkList.stream().filter(t -> t.getFrequency() > 100).count() <= 1;
+			return chunkList.stream().filter(t -> t.getFrequency() > MIN_FREQUECY).count() <= 1;
 		}
 		
 		ChunkAnalysis firstChunkAnalysis = chunkAnalysisList.get(0);
 		
-		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) == Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
+		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
 
 		ChunkAnalysis lastChunkAnalysis = null;
 		if (chunkAnalysisOptional.isPresent()) {
@@ -734,18 +728,21 @@ public class UserChannelContext {
 		for (int i = 0; i < chunkList.size(); i++) {
 			chunkAnalysis = chunkList.get(i);
 			if (chunkAnalysis != null) {
-				if (chunkAnalysis.getFrequency() > 100) {
+				if (chunkAnalysis.getFrequency() > MIN_FREQUECY) {
 					
 					tempo = true;
 					if (firstPeakIndex == -1) {
 						firstPeakIndex = i;
 						noteFrequencyRange = new NoteFrequencyRange(standardFrequecy, chunkAnalysis.getFrequency());
 					} else if (noteFrequencyRange.getMinFrequency() > chunkAnalysis.getFrequency()
-							|| chunkAnalysis.getFrequency() > noteFrequencyRange.getMaxFrequency()) {
-						// 判断是否是同一个音
-						tempo = false;
-						LOGGER.info("节奏错误原因:不是同一个音[{}]:{}-{}", chunkAnalysis.getFrequency(), noteFrequencyRange.getMinFrequency(), noteFrequencyRange.getMaxFrequency());
-						break;
+							|| chunkAnalysis.getFrequency() > noteFrequencyRange.getMaxFrequency()) {// 判断是否是同一个音
+						//是否是低八度或高八度
+						if(!((noteFrequencyRange.getMinFrequency() < chunkAnalysis.getFrequency() * 2 && chunkAnalysis.getFrequency() * 2 < noteFrequencyRange.getMaxFrequency())
+								|| (noteFrequencyRange.getMinFrequency() < chunkAnalysis.getFrequency() / 2 && chunkAnalysis.getFrequency() / 2 < noteFrequencyRange.getMaxFrequency()))){
+							tempo = false;
+							LOGGER.info("节奏错误原因:不是同一个音[{}]:{}-{}", chunkAnalysis.getFrequency(), noteFrequencyRange.getMinFrequency(), noteFrequencyRange.getMaxFrequency());
+							break;
+						}
 					}
 					if (isContinue == false) {
 						if ((i + 1) / chunkAnalysisList.size() < hardLevel.getIntegrityRange()) {
@@ -772,10 +769,10 @@ public class UserChannelContext {
 				LOGGER.info("节奏错误原因:进入时间点太晚");
 			}else{
 				//判断是否与上一个音延续下来的
-				if(firstChunkAnalysis.getFrequency() > 100 && lastChunkAnalysis.getFrequency() > 100){
+				if(firstChunkAnalysis.getFrequency() > MIN_FREQUECY && lastChunkAnalysis.getFrequency() > MIN_FREQUECY){
 					tempo = new NoteFrequencyRange(standardFrequecy, firstChunkAnalysis.getFrequency()).equals(new NoteFrequencyRange(standardFrequecy, lastChunkAnalysis.getFrequency())) == false;
 					if(tempo == false){
-						LOGGER.info("节奏错误原因:上一个音延续下来导致的");
+						LOGGER.info("节奏错误原因:上一个音[{}]延续下来导致的", lastChunkAnalysis.getFrequency());
 					}
 				}
 			}
@@ -791,6 +788,8 @@ public class UserChannelContext {
 		double endTime = musicXmlNote.getTimeStamp() + dynamicOffset + floatingRange;
 		double startTime = musicXmlNote.getTimeStamp() + dynamicOffset - floatingRange;
 		
+		LOGGER.info("------------TimeStamp:{}  floatingRange:{}  StartTime:{}  EndTime:{}------------", musicXmlNote.getTimeStamp(), floatingRange, startTime, endTime);
+		
 		List<ChunkAnalysis> chunkAnalysisList = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getStartTime()) >= Double.doubleToLongBits(startTime) && Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(endTime)).collect(Collectors.toList());
 		
 		double correctedStartTime = queryFirstNoteStartTime(chunkAnalysisList, musicXmlNote);
@@ -798,20 +797,24 @@ public class UserChannelContext {
 		
 		chunkAnalysisList = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getStartTime()) >= Double.doubleToLongBits(correctedStartTime) && Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(correctedEndTime)).collect(Collectors.toList());
 		
-		if(chunkAnalysisList == null || chunkAnalysisList.size() == 0){
+		//根据完整度取部分有效信号
+		int elementSize = chunkAnalysisList.size() * (100 - hardLevel.getTempoEffectiveRange(musicXmlNote.getDenominator())) / 100;
+		List<ChunkAnalysis> chunkList = chunkAnalysisList.subList(0, elementSize);
+		
+		if(chunkList == null || chunkList.size() == 0){
 			return false;
 		}
 		
 		ChunkAnalysis firstChunkAnalysis = chunkAnalysisList.get(0);
 		
-		LOGGER.info("-------startTime:{}  endTime:{}------", firstChunkAnalysis.getStartTime(), chunkAnalysisList.get(chunkAnalysisList.size() - 1)
+		LOGGER.info("-------startTime:{}  endTime:{}------", firstChunkAnalysis.getStartTime(), chunkList.get(chunkList.size() - 1)
 				.getEndTime());
 
 		if (musicXmlNote.getFrequency() == -1) {// 休止符
-			return chunkAnalysisList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count() <= 0;
+			return chunkList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count() <= 0;
 		}
 		
-		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) == Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
+		Optional<ChunkAnalysis> chunkAnalysisOptional = totalChunkAnalysisList.stream().filter(t -> Double.doubleToLongBits(t.getEndTime()) <= Double.doubleToLongBits(firstChunkAnalysis.getStartTime())).findFirst();
 
 		ChunkAnalysis lastChunkAnalysis = null;
 		if (chunkAnalysisOptional.isPresent()) {
@@ -821,7 +824,7 @@ public class UserChannelContext {
 			lastChunkAnalysis = new ChunkAnalysis(0, 0, -1, 0, 0, 0);
 		}
 		
-		List<Integer> chunkAmplitudeList = chunkAnalysisList.stream().map(ChunkAnalysis::getAmplitude).collect(Collectors.toList());
+		List<Integer> chunkAmplitudeList = chunkList.stream().map(ChunkAnalysis::getAmplitude).collect(Collectors.toList());
 
 		chunkAmplitudeList.add(0, lastChunkAnalysis.getAmplitude());
 		
@@ -833,7 +836,7 @@ public class UserChannelContext {
 		int firstPeakIndex = -1;
 		int peakSize = 0;
 		for (int i = 1; i < chunkAmplitudeList.size(); i++) {
-			if (chunkAmplitudeList.get(i) > hardLevel.getAmplitudeThreshold() && chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1) + 1) {
+			if (chunkAmplitudeList.get(i) > hardLevel.getAmplitudeThreshold() && chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1) + 2) {
 				tempo = true;
 				if(firstPeakIndex == -1){
 					firstPeakIndex = i;
@@ -873,14 +876,17 @@ public class UserChannelContext {
 	private double queryFirstNoteStartTime(List<ChunkAnalysis> chunkAnalysisList, MusicXmlNote musicXmlNote) {
 		
 		if(chunkAnalysisList == null || chunkAnalysisList.size() == 0){
+			LOGGER.info("找不到数据,correctedStartTime:{}", musicXmlNote.getTimeStamp() + dynamicOffset);
 			return musicXmlNote.getTimeStamp() + dynamicOffset;
 		}
 		
 		if (percussionList.contains(subjectId)) {
 			Optional<ChunkAnalysis> optional = chunkAnalysisList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).findFirst();
 			if(optional.isPresent()){
+				LOGGER.info("范围内查询到信号,correctedStartTime:{}", optional.get().getStartTime());
 				return optional.get().getStartTime();
 			}else{
+				LOGGER.info("范围内未查询到信号,correctedStartTime:{}", musicXmlNote.getTimeStamp() + dynamicOffset);
 				return musicXmlNote.getTimeStamp() + dynamicOffset;
 			}
 		}
@@ -889,10 +895,12 @@ public class UserChannelContext {
 		if(musicXmlNote.getMusicalNotesIndex() > 0){
 			MusicXmlNote preMusicXmlNote = getCurrentMusicNote(null, musicXmlNote.getMusicalNotesIndex() - 1);
 			if((int)preMusicXmlNote.getFrequency() == (int)musicXmlNote.getFrequency()){
-				Optional<ChunkAnalysis> optional = chunkAnalysisList.stream().filter(t -> t.getFrequency() <= 100).findFirst();
+				Optional<ChunkAnalysis> optional = chunkAnalysisList.stream().filter(t -> t.getFrequency() <= MIN_FREQUECY).findFirst();
 				if(optional.isPresent()){
+					LOGGER.info("与上一个音同音,有断开,correctedStartTime:{}", optional.get().getStartTime());
 					return optional.get().getEndTime();
 				}else{
+					LOGGER.info("与上一个音同音,未断开,correctedStartTime:{}", musicXmlNote.getTimeStamp() + dynamicOffset);
 					return musicXmlNote.getTimeStamp() + dynamicOffset;
 				}
 			}
@@ -905,9 +913,12 @@ public class UserChannelContext {
 		for (ChunkAnalysis ca : chunkAnalysisList) {
 			noteFrequencyRange = new NoteFrequencyRange(standardFrequecy, ca.getFrequency());
 			if (standardNote.equals(noteFrequencyRange)) {
+				LOGGER.info("范围内查询到信号,correctedStartTime:{}", ca.getStartTime());
 				return ca.getStartTime();
 			}
 		}
+		
+		LOGGER.info("范围内未查询到信号,correctedStartTime:{}", musicXmlNote.getTimeStamp() + dynamicOffset);
 
 		//return chunkAnalysisList.get(chunkAnalysisList.size() - 1).getEndTime();
 		return musicXmlNote.getTimeStamp() + dynamicOffset;

+ 2 - 2
audio-analysis/src/main/java/com/yonge/netty/entity/MusicXmlBasicInfo.java

@@ -29,7 +29,7 @@ public class MusicXmlBasicInfo {
 
 	private String uuid;
 	
-	private int beatLength;
+	private float beatLength;
 
 	private List<MusicXmlNote> musicXmlInfos = new ArrayList<MusicXmlNote>();
 
@@ -115,7 +115,7 @@ public class MusicXmlBasicInfo {
 		this.uuid = uuid;
 	}
 
-	public int getBeatLength() {
+	public float getBeatLength() {
 		return beatLength;
 	}
 

+ 17 - 9
audio-analysis/src/main/java/com/yonge/netty/server/service/AudioCompareHandler.java

@@ -79,7 +79,7 @@ public class AudioCompareHandler implements MessageHandler {
 	/**
 	 * @describe 采样大小
 	 */
-	private int bufferSize = 1024 * 4;
+	private int bufferSize = 1024 * 2;
 
 	private boolean signed = true;
 
@@ -121,7 +121,8 @@ public class AudioCompareHandler implements MessageHandler {
 			channelContext.setHandlerSwitch(false);
 
 			channelContext.getSongMusicXmlMap().put(musicXmlBasicInfo.getExamSongId(), musicXmlBasicInfo);
-			channelContext.init(musicXmlBasicInfo.getPlatform(), musicXmlBasicInfo.getHeardLevel(), musicXmlBasicInfo.getSubjectId(), musicXmlBasicInfo.getBeatLength());
+			channelContext.init(musicXmlBasicInfo.getPlatform(), musicXmlBasicInfo.getHeardLevel(), musicXmlBasicInfo.getSubjectId(),
+					musicXmlBasicInfo.getBeatLength(), audioFormat.getSampleRate(), bufferSize / 2);
 			channelContext.setUser(user);
 			
 			userChannelContextService.register(channel, channelContext);
@@ -188,6 +189,8 @@ public class AudioCompareHandler implements MessageHandler {
 						sysMusicCompareRecord.setIntegrity(new BigDecimal(scoreMap.get("integrity")));
 						sysMusicCompareRecord.setCadence(new BigDecimal(scoreMap.get("cadence")));
 						sysMusicCompareRecord.setPlayTime(scoreMap.get("playTime") / 1000);
+						
+						LOGGER.info("Score:{} Intonation:{} Integrity:{} Cadence:{}", sysMusicCompareRecord.getScore(),sysMusicCompareRecord.getIntonation(),sysMusicCompareRecord.getIntegrity(),sysMusicCompareRecord.getCadence());
 					}
 					sysMusicCompareRecord.setFeature(FeatureType.CLOUD_STUDY_EVALUATION);
 
@@ -278,11 +281,11 @@ public class AudioCompareHandler implements MessageHandler {
 		}
 		waveFileProcessor.process(datas);
 		
-		datas = channelContext.skipMetronome(datas);
+		/*datas = channelContext.skipMetronome(datas);
 
 		if (datas.length == 0) {
 			return false;
-		}
+		}*/
 
 		channelContext.setChannelBufferBytes(ArrayUtil.mergeByte(channelContext.getChannelBufferBytes(), datas));
 
@@ -292,21 +295,26 @@ public class AudioCompareHandler implements MessageHandler {
 			return false;
 		}
 		
-		if (channelContext.getOffsetMS() > 0) {
-			int beatByteLength = WaveformWriter.SAMPLE_RATE * WaveformWriter.BITS_PER_SAMPLE / 8 * channelContext.getOffsetMS() / 1000;
+		if (channelContext.getOffsetMS() + channelContext.getBeatDuration() > 0) {
+			int beatByteLength = (int) (audioFormat.getSampleRate() * audioFormat.getSampleSizeInBits() / 8 * (channelContext.getOffsetMS() + channelContext.getBeatDuration()) / 1000);
 			
 			if(totalLength > beatByteLength){
+				if(beatByteLength % 2 != 0){
+					LOGGER.info("**************奇数*****************");
+					beatByteLength--;
+				}
 				channelContext.setChannelBufferBytes(ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), beatByteLength, totalLength - 1));
+				
+				LOGGER.info("--------Length:{}  Times[{} + {}]:{}--------", waveFileProcessor.getFile().length() - channelContext.getChannelBufferBytes().length, channelContext.getOffsetMS() , channelContext.getBeatDuration(),(waveFileProcessor.getFile().length() - channelContext.getChannelBufferBytes().length) * 1000 /audioFormat.getSampleRate()/2);
+				
 				channelContext.setOffsetMS(0);
+				channelContext.setBeatDuration(0);
 			}else{
 				return false;
 			}
 		}
 		
 		totalLength = channelContext.getChannelBufferBytes().length;
-		if(totalLength % 2 != 0){
-			totalLength--;
-		}
 		
 
 		while (totalLength >= bufferSize) {