Browse Source

Merge remote-tracking branch 'origin/master'

zouxuan 4 years ago
parent
commit
52a25fc0f4
45 changed files with 4105 additions and 718 deletions
  1. 5 0
      audio-analysis/pom.xml
  2. 1084 0
      audio-analysis/src/main/java/com/yonge/audio/analysis/AudioFloatConverter.java
  3. 158 0
      audio-analysis/src/main/java/com/yonge/audio/analysis/Complex.java
  4. 167 0
      audio-analysis/src/main/java/com/yonge/audio/analysis/FFT.java
  5. 141 0
      audio-analysis/src/main/java/com/yonge/audio/analysis/Signals.java
  6. 52 0
      audio-analysis/src/main/java/com/yonge/audio/analysis/detector/FrequencyDetector.java
  7. 223 0
      audio-analysis/src/main/java/com/yonge/audio/analysis/detector/YINPitchDetector.java
  8. 8 8
      audio-analysis/src/main/java/com/yonge/audio/utils/ArrayUtil.java
  9. 100 0
      audio-analysis/src/main/java/com/yonge/nettty/dto/ChunkAnalysis.java
  10. 67 0
      audio-analysis/src/main/java/com/yonge/nettty/dto/HardLevelEnum.java
  11. 190 0
      audio-analysis/src/main/java/com/yonge/nettty/dto/NoteAnalysis.java
  12. 78 0
      audio-analysis/src/main/java/com/yonge/nettty/dto/SectionAnalysis.java
  13. 655 108
      audio-analysis/src/main/java/com/yonge/nettty/dto/UserChannelContext.java
  14. 67 0
      audio-analysis/src/main/java/com/yonge/nettty/dto/WebSocketResponse.java
  15. 44 0
      audio-analysis/src/main/java/com/yonge/nettty/entity/MusicXmlBasicInfo.java
  16. 7 7
      audio-analysis/src/main/java/com/yonge/nettty/entity/MusicXmlNote.java
  17. 50 0
      audio-analysis/src/main/java/com/yonge/nettty/entity/MusicXmlSection.java
  18. 0 70
      audio-analysis/src/main/java/com/yonge/nettty/entity/NoteAnalysis.java
  19. 7 5
      audio-analysis/src/main/java/com/yonge/netty/server/NettyServer.java
  20. 0 61
      audio-analysis/src/main/java/com/yonge/netty/server/NioAudioInputStream.java
  21. 9 0
      audio-analysis/src/main/java/com/yonge/netty/server/handler/ChannelContextConstants.java
  22. 6 3
      audio-analysis/src/main/java/com/yonge/netty/server/handler/NettyChannelManager.java
  23. 0 2
      audio-analysis/src/main/java/com/yonge/netty/server/handler/NettyServerHandler.java
  24. 10 0
      audio-analysis/src/main/java/com/yonge/netty/server/handler/message/BinaryMessageHandler.java
  25. 192 0
      audio-analysis/src/main/java/com/yonge/netty/server/handler/message/BinaryWebSocketFrameHandler.java
  26. 5 0
      audio-analysis/src/main/java/com/yonge/netty/server/handler/message/TextMessageHandler.java
  27. 228 0
      audio-analysis/src/main/java/com/yonge/netty/server/handler/message/TextWebSocketHandler.java
  28. 0 147
      audio-analysis/src/main/java/com/yonge/netty/server/messagehandler/BinaryWebSocketFrameHandler.java
  29. 0 112
      audio-analysis/src/main/java/com/yonge/netty/server/messagehandler/TextWebSocketHandler.java
  30. 27 8
      audio-analysis/src/main/java/com/yonge/netty/server/processor/WaveformWriter.java
  31. 135 0
      audio-analysis/src/main/java/com/yonge/netty/server/service/CompareHandler.java
  32. 84 0
      audio-analysis/src/main/java/com/yonge/netty/server/service/PitchDetectionHandler.java
  33. 5 1
      audio-analysis/src/main/resources/application-template.yml
  34. 16 0
      audio-analysis/src/main/resources/bootstrap-dev.properties
  35. 16 0
      audio-analysis/src/main/resources/bootstrap-prod.properties
  36. 16 0
      audio-analysis/src/main/resources/bootstrap-test.properties
  37. 27 96
      mec-biz/src/main/java/com/ym/mec/biz/dal/dto/SoundCompareHelper.java
  38. 7 0
      mec-biz/src/main/java/com/ym/mec/biz/service/SysMusicCompareRecordService.java
  39. 131 88
      mec-biz/src/main/java/com/ym/mec/biz/service/impl/SoundCompareHandler.java
  40. 13 0
      mec-biz/src/main/java/com/ym/mec/biz/service/impl/SysMusicCompareRecordServiceImpl.java
  41. 2 2
      mec-thirdparty/src/main/java/com/ym/mec/thirdparty/eseal/provider/TsignPlugin.java
  42. 9 0
      mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/StoragePlugin.java
  43. 5 0
      mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/StoragePluginContext.java
  44. 28 0
      mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/provider/AliyunOssStoragePlugin.java
  45. 31 0
      mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/provider/KS3StoragePlugin.java

+ 5 - 0
audio-analysis/pom.xml

@@ -23,6 +23,11 @@
 		</dependency>
 
 		<dependency>
+			<groupId>org.springframework.cloud</groupId>
+			<artifactId>spring-cloud-starter-alibaba-nacos-config</artifactId>
+		</dependency>
+
+		<dependency>
 			<groupId>de.codecentric</groupId>
 			<artifactId>spring-boot-admin-starter-client</artifactId>
 		</dependency>

+ 1084 - 0
audio-analysis/src/main/java/com/yonge/audio/analysis/AudioFloatConverter.java

@@ -0,0 +1,1084 @@
+/*
+*      _______                       _____   _____ _____  
+*     |__   __|                     |  __ \ / ____|  __ \ 
+*        | | __ _ _ __ ___  ___  ___| |  | | (___ | |__) |
+*        | |/ _` | '__/ __|/ _ \/ __| |  | |\___ \|  ___/ 
+*        | | (_| | |  \__ \ (_) \__ \ |__| |____) | |     
+*        |_|\__,_|_|  |___/\___/|___/_____/|_____/|_|     
+*                                                         
+* -------------------------------------------------------------
+*
+* TarsosDSP is developed by Joren Six at IPEM, University Ghent
+*  
+* -------------------------------------------------------------
+*
+*  Info: http://0110.be/tag/TarsosDSP
+*  Github: https://github.com/JorenSix/TarsosDSP
+*  Releases: http://0110.be/releases/TarsosDSP/
+*  
+*  TarsosDSP includes modified source code by various authors,
+*  for credits and info, see README.
+* 
+*/
+
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Sun designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Sun in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+package com.yonge.audio.analysis;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.DoubleBuffer;
+import java.nio.FloatBuffer;
+
+import javax.sound.sampled.AudioFormat;
+import javax.sound.sampled.AudioFormat.Encoding;
+
+
+/**
+ * This class is used to convert between 8,16,24,32,32+ bit signed/unsigned
+ * big/litle endian fixed/floating point byte buffers and float buffers.
+ * 
+ * @author Karl Helgason
+ */
+public abstract class AudioFloatConverter {
+
+    public static final Encoding PCM_FLOAT = new Encoding("PCM_FLOAT");
+    
+    /***************************************************************************
+     * 
+     * LSB Filter, used filter least significant byte in samples arrays.
+     * 
+     * Is used filter out data in lsb byte when SampleSizeInBits is not
+     * dividable by 8.
+     * 
+     **************************************************************************/
+
+    private static class AudioFloatLSBFilter extends AudioFloatConverter {
+
+        private AudioFloatConverter converter;
+
+        final private int offset;
+
+        final private int stepsize;
+
+        final private byte mask;
+
+        private byte[] mask_buffer;
+
+        public AudioFloatLSBFilter(AudioFloatConverter converter,
+        		AudioFormat format) {
+            int bits = format.getSampleSizeInBits();
+            boolean bigEndian = format.isBigEndian();
+            this.converter = converter;
+            stepsize = (bits + 7) / 8;
+            offset = bigEndian ? (stepsize - 1) : 0;
+            int lsb_bits = bits % 8;
+            if (lsb_bits == 0)
+                mask = (byte) 0x00;
+            else if (lsb_bits == 1)
+                mask = (byte) 0x80;
+            else if (lsb_bits == 2)
+                mask = (byte) 0xC0;
+            else if (lsb_bits == 3)
+                mask = (byte) 0xE0;
+            else if (lsb_bits == 4)
+                mask = (byte) 0xF0;
+            else if (lsb_bits == 5)
+                mask = (byte) 0xF8;
+            else if (lsb_bits == 6)
+                mask = (byte) 0xFC;
+            else if (lsb_bits == 7)
+                mask = (byte) 0xFE;
+            else
+                mask = (byte) 0xFF;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            byte[] ret = converter.toByteArray(in_buff, in_offset, in_len,
+                    out_buff, out_offset);
+
+            int out_offset_end = in_len * stepsize;
+            for (int i = out_offset + offset; i < out_offset_end; i += stepsize) {
+                out_buff[i] = (byte) (out_buff[i] & mask);
+            }
+
+            return ret;
+        }
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            if (mask_buffer == null || mask_buffer.length < in_buff.length)
+                mask_buffer = new byte[in_buff.length];
+            System.arraycopy(in_buff, 0, mask_buffer, 0, in_buff.length);
+            int in_offset_end = out_len * stepsize;
+            for (int i = in_offset + offset; i < in_offset_end; i += stepsize) {
+                mask_buffer[i] = (byte) (mask_buffer[i] & mask);
+            }
+            float[] ret = converter.toFloatArray(mask_buffer, in_offset,
+                    out_buff, out_offset, out_len);
+            return ret;
+        }
+
+    }
+
+    /***************************************************************************
+     * 
+     * 64 bit float, little/big-endian
+     * 
+     **************************************************************************/
+
+    // PCM 64 bit float, little-endian
+    private static class AudioFloatConversion64L extends AudioFloatConverter {
+        ByteBuffer bytebuffer = null;
+
+        DoubleBuffer floatbuffer = null;
+
+        double[] double_buff = null;
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int in_len = out_len * 8;
+            if (bytebuffer == null || bytebuffer.capacity() < in_len) {
+                bytebuffer = ByteBuffer.allocate(in_len).order(
+                        ByteOrder.LITTLE_ENDIAN);
+                floatbuffer = bytebuffer.asDoubleBuffer();
+            }
+            bytebuffer.position(0);
+            floatbuffer.position(0);
+            bytebuffer.put(in_buff, in_offset, in_len);
+            if (double_buff == null
+                    || double_buff.length < out_len + out_offset)
+                double_buff = new double[out_len + out_offset];
+            floatbuffer.get(double_buff, out_offset, out_len);
+            int out_offset_end = out_offset + out_len;
+            for (int i = out_offset; i < out_offset_end; i++) {
+                out_buff[i] = (float) double_buff[i];
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int out_len = in_len * 8;
+            if (bytebuffer == null || bytebuffer.capacity() < out_len) {
+                bytebuffer = ByteBuffer.allocate(out_len).order(
+                        ByteOrder.LITTLE_ENDIAN);
+                floatbuffer = bytebuffer.asDoubleBuffer();
+            }
+            floatbuffer.position(0);
+            bytebuffer.position(0);
+            if (double_buff == null || double_buff.length < in_offset + in_len)
+                double_buff = new double[in_offset + in_len];
+            int in_offset_end = in_offset + in_len;
+            for (int i = in_offset; i < in_offset_end; i++) {
+                double_buff[i] = in_buff[i];
+            }
+            floatbuffer.put(double_buff, in_offset, in_len);
+            bytebuffer.get(out_buff, out_offset, out_len);
+            return out_buff;
+        }
+    }
+
+    // PCM 64 bit float, big-endian
+    private static class AudioFloatConversion64B extends AudioFloatConverter {
+        ByteBuffer bytebuffer = null;
+
+        DoubleBuffer floatbuffer = null;
+
+        double[] double_buff = null;
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int in_len = out_len * 8;
+            if (bytebuffer == null || bytebuffer.capacity() < in_len) {
+                bytebuffer = ByteBuffer.allocate(in_len).order(
+                        ByteOrder.BIG_ENDIAN);
+                floatbuffer = bytebuffer.asDoubleBuffer();
+            }
+            bytebuffer.position(0);
+            floatbuffer.position(0);
+            bytebuffer.put(in_buff, in_offset, in_len);
+            if (double_buff == null
+                    || double_buff.length < out_len + out_offset)
+                double_buff = new double[out_len + out_offset];
+            floatbuffer.get(double_buff, out_offset, out_len);
+            int out_offset_end = out_offset + out_len;
+            for (int i = out_offset; i < out_offset_end; i++) {
+                out_buff[i] = (float) double_buff[i];
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int out_len = in_len * 8;
+            if (bytebuffer == null || bytebuffer.capacity() < out_len) {
+                bytebuffer = ByteBuffer.allocate(out_len).order(
+                        ByteOrder.BIG_ENDIAN);
+                floatbuffer = bytebuffer.asDoubleBuffer();
+            }
+            floatbuffer.position(0);
+            bytebuffer.position(0);
+            if (double_buff == null || double_buff.length < in_offset + in_len)
+                double_buff = new double[in_offset + in_len];
+            int in_offset_end = in_offset + in_len;
+            for (int i = in_offset; i < in_offset_end; i++) {
+                double_buff[i] = in_buff[i];
+            }
+            floatbuffer.put(double_buff, in_offset, in_len);
+            bytebuffer.get(out_buff, out_offset, out_len);
+            return out_buff;
+        }
+    }
+
+    /***************************************************************************
+     * 
+     * 32 bit float, little/big-endian
+     * 
+     **************************************************************************/
+
+    // PCM 32 bit float, little-endian
+    private static class AudioFloatConversion32L extends AudioFloatConverter {
+        ByteBuffer bytebuffer = null;
+
+        FloatBuffer floatbuffer = null;
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int in_len = out_len * 4;
+            if (bytebuffer == null || bytebuffer.capacity() < in_len) {
+                bytebuffer = ByteBuffer.allocate(in_len).order(
+                        ByteOrder.LITTLE_ENDIAN);
+                floatbuffer = bytebuffer.asFloatBuffer();
+            }
+            bytebuffer.position(0);
+            floatbuffer.position(0);
+            bytebuffer.put(in_buff, in_offset, in_len);
+            floatbuffer.get(out_buff, out_offset, out_len);
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int out_len = in_len * 4;
+            if (bytebuffer == null || bytebuffer.capacity() < out_len) {
+                bytebuffer = ByteBuffer.allocate(out_len).order(
+                        ByteOrder.LITTLE_ENDIAN);
+                floatbuffer = bytebuffer.asFloatBuffer();
+            }
+            floatbuffer.position(0);
+            bytebuffer.position(0);
+            floatbuffer.put(in_buff, in_offset, in_len);
+            bytebuffer.get(out_buff, out_offset, out_len);
+            return out_buff;
+        }
+    }
+
+    // PCM 32 bit float, big-endian
+    private static class AudioFloatConversion32B extends AudioFloatConverter {
+        ByteBuffer bytebuffer = null;
+
+        FloatBuffer floatbuffer = null;
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int in_len = out_len * 4;
+            if (bytebuffer == null || bytebuffer.capacity() < in_len) {
+                bytebuffer = ByteBuffer.allocate(in_len).order(
+                        ByteOrder.BIG_ENDIAN);
+                floatbuffer = bytebuffer.asFloatBuffer();
+            }
+            bytebuffer.position(0);
+            floatbuffer.position(0);
+            bytebuffer.put(in_buff, in_offset, in_len);
+            floatbuffer.get(out_buff, out_offset, out_len);
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int out_len = in_len * 4;
+            if (bytebuffer == null || bytebuffer.capacity() < out_len) {
+                bytebuffer = ByteBuffer.allocate(out_len).order(
+                        ByteOrder.BIG_ENDIAN);
+                floatbuffer = bytebuffer.asFloatBuffer();
+            }
+            floatbuffer.position(0);
+            bytebuffer.position(0);
+            floatbuffer.put(in_buff, in_offset, in_len);
+            bytebuffer.get(out_buff, out_offset, out_len);
+            return out_buff;
+        }
+    }
+
+    /***************************************************************************
+     * 
+     * 8 bit signed/unsigned
+     * 
+     **************************************************************************/
+
+    // PCM 8 bit, signed
+    private static class AudioFloatConversion8S extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++)
+                out_buff[ox++] = in_buff[ix++] * (1.0f / 127.0f);
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++)
+                out_buff[ox++] = (byte) (in_buff[ix++] * 127.0f);
+            return out_buff;
+        }
+    }
+
+    // PCM 8 bit, unsigned
+    private static class AudioFloatConversion8U extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++)
+                out_buff[ox++] = ((in_buff[ix++] & 0xFF) - 127)
+                        * (1.0f / 127.0f);
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++)
+                out_buff[ox++] = (byte) (127 + in_buff[ix++] * 127.0f);
+            return out_buff;
+        }
+    }
+
+    /***************************************************************************
+     * 
+     * 16 bit signed/unsigned, little/big-endian
+     * 
+     **************************************************************************/
+
+    // PCM 16 bit, signed, little-endian
+    private static class AudioFloatConversion16SL extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int len = out_offset + out_len;
+            for (int ox = out_offset; ox < len; ox++) {
+                out_buff[ox] = ((short) ((in_buff[ix++] & 0xFF) | 
+                           (in_buff[ix++] << 8))) * (1.0f / 32767.0f);
+            }
+
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ox = out_offset;
+            int len = in_offset + in_len;
+            for (int ix = in_offset; ix < len; ix++) {
+                int x = (int) (in_buff[ix] * 32767.0);
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 16 bit, signed, big-endian
+    private static class AudioFloatConversion16SB extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                out_buff[ox++] = ((short) ((in_buff[ix++] << 8) | 
+                        (in_buff[ix++] & 0xFF))) * (1.0f / 32767.0f);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * 32767.0);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 16 bit, unsigned, little-endian
+    private static class AudioFloatConversion16UL extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8);
+                out_buff[ox++] = (x - 32767) * (1.0f / 32767.0f);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = 32767 + (int) (in_buff[ix++] * 32767.0);
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 16 bit, unsigned, big-endian
+    private static class AudioFloatConversion16UB extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF);
+                out_buff[ox++] = (x - 32767) * (1.0f / 32767.0f);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = 32767 + (int) (in_buff[ix++] * 32767.0);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+            }
+            return out_buff;
+        }
+    }
+
+    /***************************************************************************
+     * 
+     * 24 bit signed/unsigned, little/big-endian
+     * 
+     **************************************************************************/
+
+    // PCM 24 bit, signed, little-endian
+    private static class AudioFloatConversion24SL extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8)
+                        | ((in_buff[ix++] & 0xFF) << 16);
+                if (x > 0x7FFFFF)
+                    x -= 0x1000000;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFF);
+                if (x < 0)
+                    x += 0x1000000;
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) (x >>> 16);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 24 bit, signed, big-endian
+    private static class AudioFloatConversion24SB extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = ((in_buff[ix++] & 0xFF) << 16)
+                        | ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF);
+                if (x > 0x7FFFFF)
+                    x -= 0x1000000;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFF);
+                if (x < 0)
+                    x += 0x1000000;
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 24 bit, unsigned, little-endian
+    private static class AudioFloatConversion24UL extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8)
+                        | ((in_buff[ix++] & 0xFF) << 16);
+                x -= 0x7FFFFF;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFF);
+                x += 0x7FFFFF;
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) (x >>> 16);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 24 bit, unsigned, big-endian
+    private static class AudioFloatConversion24UB extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = ((in_buff[ix++] & 0xFF) << 16)
+                        | ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF);
+                x -= 0x7FFFFF;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFF);
+                x += 0x7FFFFF;
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+            }
+            return out_buff;
+        }
+    }
+
+    /***************************************************************************
+     * 
+     * 32 bit signed/unsigned, little/big-endian
+     * 
+     **************************************************************************/
+
+    // PCM 32 bit, signed, little-endian
+    private static class AudioFloatConversion32SL extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) |
+                        ((in_buff[ix++] & 0xFF) << 16) |
+                        ((in_buff[ix++] & 0xFF) << 24);
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF);
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 24);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 32 bit, signed, big-endian
+    private static class AudioFloatConversion32SB extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = ((in_buff[ix++] & 0xFF) << 24) |
+                        ((in_buff[ix++] & 0xFF) << 16) |
+                        ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF);
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF);
+                out_buff[ox++] = (byte) (x >>> 24);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 32 bit, unsigned, little-endian
+    private static class AudioFloatConversion32UL extends AudioFloatConverter {
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) |
+                        ((in_buff[ix++] & 0xFF) << 16) | 
+                        ((in_buff[ix++] & 0xFF) << 24);
+                x -= 0x7FFFFFFF;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF);
+                x += 0x7FFFFFFF;
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 24);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 32 bit, unsigned, big-endian
+    private static class AudioFloatConversion32UB extends AudioFloatConverter {
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = ((in_buff[ix++] & 0xFF) << 24) |
+                        ((in_buff[ix++] & 0xFF) << 16) |
+                        ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF);
+                x -= 0x7FFFFFFF;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF);
+                x += 0x7FFFFFFF;
+                out_buff[ox++] = (byte) (x >>> 24);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+            }
+            return out_buff;
+        }
+    }
+
+    /***************************************************************************
+     * 
+     * 32+ bit signed/unsigned, little/big-endian
+     * 
+     **************************************************************************/
+
+    // PCM 32+ bit, signed, little-endian
+    private static class AudioFloatConversion32xSL extends AudioFloatConverter {
+
+        final int xbytes;
+
+        public AudioFloatConversion32xSL(int xbytes) {
+            this.xbytes = xbytes;
+        }
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                ix += xbytes;
+                int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8)
+                        | ((in_buff[ix++] & 0xFF) << 16)
+                        | ((in_buff[ix++] & 0xFF) << 24);
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF);
+                for (int j = 0; j < xbytes; j++) {
+                    out_buff[ox++] = 0;
+                }
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 24);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 32+ bit, signed, big-endian
+    private static class AudioFloatConversion32xSB extends AudioFloatConverter {
+
+        final int xbytes;
+
+        public AudioFloatConversion32xSB(int xbytes) {
+            this.xbytes = xbytes;
+        }
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = ((in_buff[ix++] & 0xFF) << 24)
+                        | ((in_buff[ix++] & 0xFF) << 16)
+                        | ((in_buff[ix++] & 0xFF) << 8)
+                        | (in_buff[ix++] & 0xFF);
+                ix += xbytes;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF);
+                out_buff[ox++] = (byte) (x >>> 24);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+                for (int j = 0; j < xbytes; j++) {
+                    out_buff[ox++] = 0;
+                }
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 32+ bit, unsigned, little-endian
+    private static class AudioFloatConversion32xUL extends AudioFloatConverter {
+
+        final int xbytes;
+
+        public AudioFloatConversion32xUL(int xbytes) {
+            this.xbytes = xbytes;
+        }
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                ix += xbytes;
+                int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8)
+                        | ((in_buff[ix++] & 0xFF) << 16)
+                        | ((in_buff[ix++] & 0xFF) << 24);
+                x -= 0x7FFFFFFF;
+                out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF);
+                x += 0x7FFFFFFF;
+                for (int j = 0; j < xbytes; j++) {
+                    out_buff[ox++] = 0;
+                }
+                out_buff[ox++] = (byte) x;
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 24);
+            }
+            return out_buff;
+        }
+    }
+
+    // PCM 32+ bit, unsigned, big-endian
+    private static class AudioFloatConversion32xUB extends AudioFloatConverter {
+
+        final int xbytes;
+
+        public AudioFloatConversion32xUB(int xbytes) {
+            this.xbytes = xbytes;
+        }
+
+        public float[] toFloatArray(byte[] in_buff, int in_offset,
+                float[] out_buff, int out_offset, int out_len) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < out_len; i++) {
+                int x = ((in_buff[ix++] & 0xFF) << 24) |
+                        ((in_buff[ix++] & 0xFF) << 16) |
+                        ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF);
+                ix += xbytes;
+                x -= 2147483647;
+                out_buff[ox++] = x * (1.0f / 2147483647.0f);
+            }
+            return out_buff;
+        }
+
+        public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+                byte[] out_buff, int out_offset) {
+            int ix = in_offset;
+            int ox = out_offset;
+            for (int i = 0; i < in_len; i++) {
+                int x = (int) (in_buff[ix++] * 2147483647.0);
+                x += 2147483647;
+                out_buff[ox++] = (byte) (x >>> 24);
+                out_buff[ox++] = (byte) (x >>> 16);
+                out_buff[ox++] = (byte) (x >>> 8);
+                out_buff[ox++] = (byte) x;
+                for (int j = 0; j < xbytes; j++) {
+                    out_buff[ox++] = 0;
+                }
+            }
+            return out_buff;
+        }
+    }
+
+    public static AudioFloatConverter getConverter(AudioFormat format) {
+    	AudioFloatConverter conv = null;
+        if (format.getFrameSize() == 0)
+            return null;
+        if (format.getFrameSize() != 
+                ((format.getSampleSizeInBits() + 7) / 8) * format.getChannels()) {
+            return null;
+        }
+        if (format.getEncoding().equals(Encoding.PCM_SIGNED)) {
+            if (format.isBigEndian()) {
+                if (format.getSampleSizeInBits() <= 8) {
+                    conv = new AudioFloatConversion8S();
+                } else if (format.getSampleSizeInBits() > 8 &&
+                      format.getSampleSizeInBits() <= 16) {
+                    conv = new AudioFloatConversion16SB();
+                } else if (format.getSampleSizeInBits() > 16 &&
+                      format.getSampleSizeInBits() <= 24) {
+                    conv = new AudioFloatConversion24SB();
+                } else if (format.getSampleSizeInBits() > 24 &&
+                      format.getSampleSizeInBits() <= 32) {
+                    conv = new AudioFloatConversion32SB();
+                } else if (format.getSampleSizeInBits() > 32) {
+                    conv = new AudioFloatConversion32xSB(((format
+                            .getSampleSizeInBits() + 7) / 8) - 4);
+                } 
+            } else {
+                if (format.getSampleSizeInBits() <= 8) {
+                    conv = new AudioFloatConversion8S();
+                } else if (format.getSampleSizeInBits() > 8 &&
+                         format.getSampleSizeInBits() <= 16) {
+                    conv = new AudioFloatConversion16SL();
+                } else if (format.getSampleSizeInBits() > 16 &&
+                         format.getSampleSizeInBits() <= 24) {
+                    conv = new AudioFloatConversion24SL();
+                } else if (format.getSampleSizeInBits() > 24 &&
+                         format.getSampleSizeInBits() <= 32) {
+                    conv = new AudioFloatConversion32SL();
+                } else if (format.getSampleSizeInBits() > 32) {
+                    conv = new AudioFloatConversion32xSL(((format
+                            .getSampleSizeInBits() + 7) / 8) - 4);
+                }
+            }
+        } else if (format.getEncoding().equals(Encoding.PCM_UNSIGNED)) {
+            if (format.isBigEndian()) {
+                if (format.getSampleSizeInBits() <= 8) {
+                    conv = new AudioFloatConversion8U();
+                } else if (format.getSampleSizeInBits() > 8 &&
+                        format.getSampleSizeInBits() <= 16) {
+                    conv = new AudioFloatConversion16UB();
+                } else if (format.getSampleSizeInBits() > 16 &&
+                        format.getSampleSizeInBits() <= 24) {
+                    conv = new AudioFloatConversion24UB();
+                } else if (format.getSampleSizeInBits() > 24 &&
+                        format.getSampleSizeInBits() <= 32) {
+                    conv = new AudioFloatConversion32UB();
+                } else if (format.getSampleSizeInBits() > 32) {
+                    conv = new AudioFloatConversion32xUB(((
+                            format.getSampleSizeInBits() + 7) / 8) - 4);
+                }
+            } else {
+                if (format.getSampleSizeInBits() <= 8) {
+                    conv = new AudioFloatConversion8U();
+                } else if (format.getSampleSizeInBits() > 8 &&
+                        format.getSampleSizeInBits() <= 16) {
+                    conv = new AudioFloatConversion16UL();
+                } else if (format.getSampleSizeInBits() > 16 &&
+                        format.getSampleSizeInBits() <= 24) {
+                    conv = new AudioFloatConversion24UL();
+                } else if (format.getSampleSizeInBits() > 24 &&
+                        format.getSampleSizeInBits() <= 32) {
+                    conv = new AudioFloatConversion32UL();
+                } else if (format.getSampleSizeInBits() > 32) {
+                    conv = new AudioFloatConversion32xUL(((
+                            format.getSampleSizeInBits() + 7) / 8) - 4);
+                }
+            }
+        } else if (format.getEncoding().equals(PCM_FLOAT)) {
+            if (format.getSampleSizeInBits() == 32) {
+                if (format.isBigEndian())
+                    conv = new AudioFloatConversion32B();
+                else
+                    conv = new AudioFloatConversion32L();
+            } else if (format.getSampleSizeInBits() == 64) {
+                if (format.isBigEndian()) 
+                    conv = new AudioFloatConversion64B();
+                else 
+                    conv = new AudioFloatConversion64L();                
+            }
+
+        }
+
+        if ((format.getEncoding().equals(Encoding.PCM_SIGNED) || 
+                format.getEncoding().equals(Encoding.PCM_UNSIGNED)) && 
+                (format.getSampleSizeInBits() % 8 != 0)) {
+            conv = new AudioFloatLSBFilter(conv, format);
+        }
+
+        if (conv != null)
+            conv.format = format;
+        return conv;
+    }
+
+    private AudioFormat format;
+
+    public AudioFormat getFormat() {
+        return format;
+    }
+
+    public abstract float[] toFloatArray(byte[] in_buff, int in_offset,
+            float[] out_buff, int out_offset, int out_len);
+
+    public float[] toFloatArray(byte[] in_buff, float[] out_buff,
+            int out_offset, int out_len) {
+        return toFloatArray(in_buff, 0, out_buff, out_offset, out_len);
+    }
+
+    public float[] toFloatArray(byte[] in_buff, int in_offset,
+            float[] out_buff, int out_len) {
+        return toFloatArray(in_buff, in_offset, out_buff, 0, out_len);
+    }
+
+    public float[] toFloatArray(byte[] in_buff, float[] out_buff, int out_len) {
+        return toFloatArray(in_buff, 0, out_buff, 0, out_len);
+    }
+
+    public float[] toFloatArray(byte[] in_buff, float[] out_buff) {
+        return toFloatArray(in_buff, 0, out_buff, 0, out_buff.length);
+    }
+
+    public abstract byte[] toByteArray(float[] in_buff, int in_offset,
+            int in_len, byte[] out_buff, int out_offset);
+
+    public byte[] toByteArray(float[] in_buff, int in_len, byte[] out_buff,
+            int out_offset) {
+        return toByteArray(in_buff, 0, in_len, out_buff, out_offset);
+    }
+
+    public byte[] toByteArray(float[] in_buff, int in_offset, int in_len,
+            byte[] out_buff) {
+        return toByteArray(in_buff, in_offset, in_len, out_buff, 0);
+    }
+
+    public byte[] toByteArray(float[] in_buff, int in_len, byte[] out_buff) {
+        return toByteArray(in_buff, 0, in_len, out_buff, 0);
+    }
+
+    public byte[] toByteArray(float[] in_buff, byte[] out_buff) {
+        return toByteArray(in_buff, 0, in_buff.length, out_buff, 0);
+    }
+
+}

+ 158 - 0
audio-analysis/src/main/java/com/yonge/audio/analysis/Complex.java

@@ -0,0 +1,158 @@
+package com.yonge.audio.analysis;
+
+/*************************************************************************
+ * Compilation: javac Complex.java Execution: java Complex
+ * 
+ * Data type for complex numbers.
+ * 
+ * The data type is "immutable" so once you create and initialize a Complex
+ * object, you cannot change it. The "final" keyword when declaring re and im
+ * enforces this rule, making it a compile-time error to change the .re or .im
+ * fields after they've been initialized.
+ * 
+ * % java Complex a = 5.0 + 6.0i b = -3.0 + 4.0i Re(a) = 5.0 Im(a) = 6.0 b + a =
+ * 2.0 + 10.0i a - b = 8.0 + 2.0i a * b = -39.0 + 2.0i b * a = -39.0 + 2.0i a /
+ * b = 0.36 - 1.52i (a / b) * b = 5.0 + 6.0i conj(a) = 5.0 - 6.0i |a| =
+ * 7.810249675906654 tan(a) = -6.685231390246571E-6 + 1.0000103108981198i
+ * 
+ *************************************************************************/
+
+public class Complex {
+	private final double re; // the real part
+	private final double im; // the imaginary part
+
+	// create a new object with the given real and imaginary parts
+	public Complex(double real, double imag) {
+		re = real;
+		im = imag;
+	}
+
+	// return a string representation of the invoking Complex object
+	public String toString() {
+		if (im == 0)
+			return re + "";
+		if (re == 0)
+			return im + "i";
+		if (im < 0)
+			return re + " - " + (-im) + "i";
+		return re + " + " + im + "i";
+	}
+
+	// return abs/modulus/magnitude and angle/phase/argument
+	public double abs() {
+		return Math.hypot(re, im);
+	} // Math.sqrt(re*re + im*im)
+
+	public double phase() {
+		return Math.atan2(im, re);
+	} // between -pi and pi
+
+	// return a new Complex object whose value is (this + b)
+	public Complex plus(Complex b) {
+		Complex a = this; // invoking object
+		double real = a.re + b.re;
+		double imag = a.im + b.im;
+		return new Complex(real, imag);
+	}
+
+	// return a new Complex object whose value is (this - b)
+	public Complex minus(Complex b) {
+		Complex a = this;
+		double real = a.re - b.re;
+		double imag = a.im - b.im;
+		return new Complex(real, imag);
+	}
+
+	// return a new Complex object whose value is (this * b)
+	public Complex times(Complex b) {
+		Complex a = this;
+		double real = a.re * b.re - a.im * b.im;
+		double imag = a.re * b.im + a.im * b.re;
+		return new Complex(real, imag);
+	}
+
+	// scalar multiplication
+	// return a new object whose value is (this * alpha)
+	public Complex times(double alpha) {
+		return new Complex(alpha * re, alpha * im);
+	}
+
+	// return a new Complex object whose value is the conjugate of this
+	public Complex conjugate() {
+		return new Complex(re, -im);
+	}
+
+	// return a new Complex object whose value is the reciprocal of this
+	public Complex reciprocal() {
+		double scale = re * re + im * im;
+		return new Complex(re / scale, -im / scale);
+	}
+
+	// return the real or imaginary part
+	public double re() {
+		return re;
+	}
+
+	public double im() {
+		return im;
+	}
+
+	// return a / b
+	public Complex divides(Complex b) {
+		Complex a = this;
+		return a.times(b.reciprocal());
+	}
+
+	// return a new Complex object whose value is the complex exponential of
+	// this
+	public Complex exp() {
+		return new Complex(Math.exp(re) * Math.cos(im), Math.exp(re)
+				* Math.sin(im));
+	}
+
+	// return a new Complex object whose value is the complex sine of this
+	public Complex sin() {
+		return new Complex(Math.sin(re) * Math.cosh(im), Math.cos(re)
+				* Math.sinh(im));
+	}
+
+	// return a new Complex object whose value is the complex cosine of this
+	public Complex cos() {
+		return new Complex(Math.cos(re) * Math.cosh(im), -Math.sin(re)
+				* Math.sinh(im));
+	}
+
+	// return a new Complex object whose value is the complex tangent of this
+	public Complex tan() {
+		return sin().divides(cos());
+	}
+
+	// a static version of plus
+	public static Complex plus(Complex a, Complex b) {
+		double real = a.re + b.re;
+		double imag = a.im + b.im;
+		Complex sum = new Complex(real, imag);
+		return sum;
+	}
+
+	
+	public static void main(String[] args) {
+		Complex a = new Complex(5.0, 0.0);
+		Complex b = new Complex(-3.0, 4.0);
+
+		System.out.println("a            = " + a);
+		System.out.println("b            = " + b);
+		System.out.println("Re(a)        = " + a.re());
+		System.out.println("Im(a)        = " + a.im());
+		System.out.println("b + a        = " + b.plus(a));
+		System.out.println("a - b        = " + a.minus(b));
+		System.out.println("a * b        = " + a.times(b));
+		System.out.println("b * a        = " + b.times(a));
+		System.out.println("a / b        = " + a.divides(b));
+		System.out.println("(a / b) * b  = " + a.divides(b).times(b));
+		System.out.println("conj(a)      = " + a.conjugate());
+		System.out.println("|a|          = " + a.abs());
+		System.out.println("tan(a)       = " + a.tan());
+	}
+
+}

+ 167 - 0
audio-analysis/src/main/java/com/yonge/audio/analysis/FFT.java

@@ -0,0 +1,167 @@
+package com.yonge.audio.analysis;
+
+
+/*************************************************************************
+ * Compilation: javac FFT.java Execution: java FFT N Dependencies: Complex.java
+ * 
+ * Compute the FFT and inverse FFT of a length N complex sequence. Bare bones
+ * implementation that runs in O(N log N) time. Our goal is to optimize the
+ * clarity of the code, rather than performance.
+ * 
+ * Limitations ----------- - assumes N is a power of 2
+ * 
+ * - not the most memory efficient algorithm (because it uses an object type for
+ * representing complex numbers and because it re-allocates memory for the
+ * subarray, instead of doing in-place or reusing a single temporary array)
+ * 
+ *************************************************************************/
+
+public class FFT {
+
+	// compute the FFT of x[], assuming its length is a power of 2
+	public static Complex[] fft(Complex[] x) {
+		int N = x.length;
+
+		// base case
+		if (N == 1)
+			return new Complex[] { x[0] };
+
+		// radix 2 Cooley-Tukey FFT
+		if (N % 2 != 0) {
+			throw new RuntimeException("N is not a power of 2");
+		}
+
+		// fft of even terms
+		Complex[] even = new Complex[N / 2];
+		for (int k = 0; k < N / 2; k++) {
+			even[k] = x[2 * k];
+		}
+		Complex[] q = fft(even);
+
+		// fft of odd terms
+		Complex[] odd = even; // reuse the array
+		for (int k = 0; k < N / 2; k++) {
+			odd[k] = x[2 * k + 1];
+		}
+		Complex[] r = fft(odd);
+
+		// combine
+		Complex[] y = new Complex[N];
+		for (int k = 0; k < N / 2; k++) {
+			double kth = -2 * k * Math.PI / N;
+			Complex wk = new Complex(Math.cos(kth), Math.sin(kth));
+			y[k] = q[k].plus(wk.times(r[k]));
+			y[k + N / 2] = q[k].minus(wk.times(r[k]));
+		}
+		return y;
+	}
+
+	// compute the inverse FFT of x[], assuming its length is a power of 2
+	public static Complex[] ifft(Complex[] x) {
+		int N = x.length;
+		Complex[] y = new Complex[N];
+
+		// take conjugate
+		for (int i = 0; i < N; i++) {
+			y[i] = x[i].conjugate();
+		}
+
+		// compute forward FFT
+		y = fft(y);
+
+		// take conjugate again
+		for (int i = 0; i < N; i++) {
+			y[i] = y[i].conjugate();
+		}
+
+		// divide by N
+		for (int i = 0; i < N; i++) {
+			y[i] = y[i].times(1.0 / N);
+		}
+
+		return y;
+
+	}
+
+	// compute the circular convolution of x and y
+	public static Complex[] cconvolve(Complex[] x, Complex[] y) {
+
+		// should probably pad x and y with 0s so that they have same length
+		// and are powers of 2
+		if (x.length != y.length) {
+			throw new RuntimeException("Dimensions don't agree");
+		}
+
+		int N = x.length;
+
+		// compute FFT of each sequence
+		Complex[] a = fft(x);
+		Complex[] b = fft(y);
+
+		// point-wise multiply
+		Complex[] c = new Complex[N];
+		for (int i = 0; i < N; i++) {
+			c[i] = a[i].times(b[i]);
+		}
+
+		// compute inverse FFT
+		return ifft(c);
+	}
+
+	// compute the linear convolution of x and y
+	public static Complex[] convolve(Complex[] x, Complex[] y) {
+		Complex ZERO = new Complex(0, 0);
+
+		Complex[] a = new Complex[2 * x.length];
+		for (int i = 0; i < x.length; i++)
+			a[i] = x[i];
+		for (int i = x.length; i < 2 * x.length; i++)
+			a[i] = ZERO;
+
+		Complex[] b = new Complex[2 * y.length];
+		for (int i = 0; i < y.length; i++)
+			b[i] = y[i];
+		for (int i = y.length; i < 2 * y.length; i++)
+			b[i] = ZERO;
+
+		return cconvolve(a, b);
+	}
+
+	// display an array of Complex numbers to standard output
+	public static void show(Complex[] x, String title) {
+		System.out.println(title);
+		System.out.println("-------------------");
+		for (int i = 0; i < x.length; i++) {
+			System.out.println(x[i]);
+		}
+		System.out.println();
+	}
+
+	/*********************************************************************
+	 * Test client and sample execution
+	 * 
+	 * % java FFT 4 x ------------------- -0.03480425839330703
+	 * 0.07910192950176387 0.7233322451735928 0.1659819820667019
+	 * 
+	 * y = fft(x) ------------------- 0.9336118983487516 -0.7581365035668999 +
+	 * 0.08688005256493803i 0.44344407521182005 -0.7581365035668999 -
+	 * 0.08688005256493803i
+	 * 
+	 * z = ifft(y) ------------------- -0.03480425839330703 0.07910192950176387
+	 * + 2.6599344570851287E-18i 0.7233322451735928 0.1659819820667019 -
+	 * 2.6599344570851287E-18i
+	 * 
+	 * c = cconvolve(x, x) ------------------- 0.5506798633981853
+	 * 0.23461407150576394 - 4.033186818023279E-18i -0.016542951108772352
+	 * 0.10288019294318276 + 4.033186818023279E-18i
+	 * 
+	 * d = convolve(x, x) ------------------- 0.001211336402308083 -
+	 * 3.122502256758253E-17i -0.005506167987577068 - 5.058885073636224E-17i
+	 * -0.044092969479563274 + 2.1934338938072244E-18i 0.10288019294318276 -
+	 * 3.6147323062478115E-17i 0.5494685269958772 + 3.122502256758253E-17i
+	 * 0.240120239493341 + 4.655566391833896E-17i 0.02755001837079092 -
+	 * 2.1934338938072244E-18i 4.01805098805014E-17i
+	 * 
+	 *********************************************************************/
+
+}

+ 141 - 0
audio-analysis/src/main/java/com/yonge/audio/analysis/Signals.java

@@ -0,0 +1,141 @@
+package com.yonge.audio.analysis;
+
+import javax.sound.sampled.AudioFormat;
+import javax.sound.sampled.AudioSystem;
+import javax.sound.sampled.DataLine;
+import javax.sound.sampled.LineUnavailableException;
+import javax.sound.sampled.TargetDataLine;
+
+public class Signals {
+	public static float mean(float[] signal) {
+		float mean = 0;
+		for (int i = 0; i < signal.length; i++)
+			mean += signal[i];
+		mean /= signal.length;
+		return mean;
+	}
+
+	public static float energy(float[] signal) {
+		float totalEnergy = 0;
+		for (int i = 0; i < signal.length; i++)
+			totalEnergy += Math.pow(signal[i], 2);
+		return totalEnergy;
+	}
+
+	public static float power(float[] signal) {
+		return energy(signal) / signal.length;
+	}
+
+	public static float norm(float[] signal) {
+		return (float) Math.sqrt(energy(signal));
+	}
+
+	public static float minimum(float[] signal) {
+		float min = Float.POSITIVE_INFINITY;
+		for (int i = 0; i < signal.length; i++)
+			min = Math.min(min, signal[i]);
+		return min;
+	}
+
+	public static float maximum(float[] signal) {
+		float max = Float.NEGATIVE_INFINITY;
+		for (int i = 0; i < signal.length; i++)
+			max = Math.max(max, signal[i]);
+		return max;
+	}
+
+	public static void scale(float[] signal, float scale) {
+		for (int i = 0; i < signal.length; i++) {
+			signal[i] *= scale;
+			if (signal[i] > 32767) {
+				signal[i] = 32767;
+			} else if (signal[i] < -32768) {
+				signal[i] = -32768;
+			}
+		}
+	}
+
+	public static float rms(float[] samples) {
+		// 均方根 (RMS) 功率
+		return (float) Math.sqrt(power(samples));
+
+	}
+
+	public static double soundPressureLevel(float[] samples) {
+
+		double rms = rms(samples);
+		// 计算声强级(Sound Pressure Level)
+		return (20.0 * Math.log10(rms));
+	}
+
+	public static int decibels(float[] samples) {
+		// 声音的分贝范围
+		double minDecibels = 0, db = 0, maxDecibels = 127;
+
+		double rms = rms(samples);
+
+		if (rms > 0) {
+			db = 20 * Math.log10(rms / 0.00002);// 空气中常用的“零”参考声压为20 uPa RMS,通常被认为是人类听力的阈值
+
+			if (db > maxDecibels) {
+				db = maxDecibels;
+			} else if (db < minDecibels) {
+				db = minDecibels;
+			}
+		}
+
+		return (int) db;
+	}
+
+	public static void main(String[] args) throws LineUnavailableException {
+
+		float sampleRate = 44100;
+
+		AudioFormat audioFormat = new AudioFormat(sampleRate, 16, 1, true, false);
+
+		DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, audioFormat);
+
+		TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(dataLineInfo);
+
+		targetDataLine.open(audioFormat);
+		targetDataLine.start();
+
+		AudioFloatConverter converter = AudioFloatConverter.getConverter(audioFormat);
+
+		byte[] buffer = new byte[1024 * 8];
+
+		while (true) {
+			targetDataLine.read(buffer, 0, buffer.length);
+
+			float[] sampleFloats = new float[buffer.length / 2];
+			converter.toFloatArray(buffer, sampleFloats);
+
+			// 计算声强级(Sound Pressure Level)
+			double splDb = soundPressureLevel(sampleFloats);
+
+			int db = decibels(sampleFloats);
+
+			Complex[] complex = new Complex[sampleFloats.length];
+
+			for (int i = 0; i < sampleFloats.length; i++) {
+				complex[i] = new Complex(sampleFloats[i], 0);
+			}
+			Complex[] result = FFT.fft(complex);
+
+			double maxMagnitude = result[0].abs();
+			int maxIndex = 0;
+
+			for (int i = 1; i < result.length / 2; i++) {
+				if (maxMagnitude < result[i].abs()) {
+					maxMagnitude = result[i].abs();
+					maxIndex = i;
+				}
+			}
+
+			double f = maxIndex * sampleRate / result.length;
+
+			System.out.println("db:" + db + "  energy:" + energy(sampleFloats) + "	power:" + power(sampleFloats) + "  rms:" + rms(sampleFloats) + "	splDb: "
+					+ splDb + "	frequency: " + f);
+		}
+	}
+}

+ 52 - 0
audio-analysis/src/main/java/com/yonge/audio/analysis/detector/FrequencyDetector.java

@@ -0,0 +1,52 @@
+package com.yonge.audio.analysis.detector;
+
+import com.yonge.audio.analysis.Complex;
+import com.yonge.audio.analysis.FFT;
+
+public class FrequencyDetector {
+
+	private float[] samples;
+
+	private boolean isUseHanmingWindow;
+
+	private float sampleRate;
+
+	public FrequencyDetector(float[] samples, float sampleRate, boolean isUseHanmingWindow) {
+		this.samples = samples;
+		this.sampleRate = sampleRate;
+		this.isUseHanmingWindow = isUseHanmingWindow;
+	}
+
+	public double getFrequency() {
+
+		if (isUseHanmingWindow) {
+			// 加汉明窗
+			hamming(samples);
+		}
+
+		Complex[] complex = new Complex[samples.length];
+
+		for (int i = 0; i < samples.length; i++) {
+			complex[i] = new Complex(samples[i], 0);
+		}
+		Complex[] result = FFT.fft(complex);
+
+		double maxMagnitude = result[0].abs();
+		int maxIndex = 0;
+
+		for (int i = 1; i < result.length / 2; i++) {
+			if (maxMagnitude < result[i].abs()) {
+				maxMagnitude = result[i].abs();
+				maxIndex = i;
+			}
+		}
+
+		return maxIndex * sampleRate / result.length;
+	}
+
+	private void hamming(float[] samples) {
+		for (int i = 0; i < samples.length; i++) {
+			samples[i] *= (0.54f - 0.46f * Math.cos((2 * Math.PI) * i / (samples.length - 1)));
+		}
+	}
+}

+ 223 - 0
audio-analysis/src/main/java/com/yonge/audio/analysis/detector/YINPitchDetector.java

@@ -0,0 +1,223 @@
+package com.yonge.audio.analysis.detector;
+
+/**
+ * A {@link PitchDetector} implementation that uses a YIN algorithm to determine the frequency of
+ * the provided waveform data. The YIN algorithm is similar to the Auto-correlation Function used
+ * for pitch detection but adds additional steps to better the accuracy of the results. Each step
+ * lowers the error rate further. The following implementation was inspired by
+ * <a href="https://github.com/JorenSix/TarsosDSP/blob/master/src/core/be/tarsos/dsp/pitch/Yin.java">TarsosDsp</a>
+ * and
+ * <a href="http://recherche.ircam.fr/equipes/pcm/cheveign/ps/2002_JASA_YIN_proof.pdf">this YIN paper</a>.
+ * The six steps in the YIN algorithm are (according to the YIN paper):
+ * <p>
+ * <ol>
+ * <li>Auto-correlation Method</li>
+ * <li>Difference Function</li>
+ * <li>Cumulative Mean Normalized Difference Function</li>
+ * <li>Absolute Threshold</li>
+ * <li>Parabolic Interpolation</li>
+ * <li>Best Local Estimate</li>
+ * </ol>
+ * </p>
+ * The first two steps, the Auto-correlation Method and the Difference Function, can seemingly be
+ * combined into a single difference function step according to the YIN paper.
+ */
+public class YINPitchDetector {
+    // According to the YIN Paper, the threshold should be between 0.10 and 0.15
+    private static final float ABSOLUTE_THRESHOLD = 0.125f;
+
+	/**
+	 * C-1 = 16.35 / 2 Hz.
+	 */
+	private static final double REF_FREQ = 8.17579892;
+
+	/**
+	 * Cache LOG 2 calculation.
+	 */
+	private static final double LOG_TWO = Math.log(2.0);
+
+    private final double sampleRate;
+    private final float[] resultBuffer;
+
+    public YINPitchDetector(int bufferSize, float sampleRate) {
+        this.sampleRate = sampleRate;
+        this.resultBuffer = new float[bufferSize/2];
+    }
+    
+    /**
+	 * The reference frequency is configured. The default reference frequency is
+	 * 16.35Hz. This is C0 on a piano keyboard with A4 tuned to 440 Hz. This
+	 * means that 0 cents is C0; 1200 is C1; 2400 is C2; ... also -1200 cents is
+	 * C-1
+	 * 
+	 * @param hertzValue
+	 *            The pitch in Hertz.
+	 * @return The value in absolute cents using the configured reference
+	 *         frequency
+	 */
+	public static double hertzToAbsoluteCent(final double hertzValue) {
+		double pitchInAbsCent = 0.0;
+		if (hertzValue > 0) {
+			pitchInAbsCent = 1200 * Math.log(hertzValue / REF_FREQ) / LOG_TWO;
+		}
+		return pitchInAbsCent;
+	}
+
+    public double getFrequency(float[] wave) {
+        int tau;
+
+        // First, perform the functions to normalize the wave data
+
+        // The first and second steps in the YIN algorithm
+        autoCorrelationDifference(wave);
+
+        // The third step in the YIN algorithm
+        cumulativeMeanNormalizedDifference();
+
+        // Then perform the functions to retrieve the tau (the approximate period)
+
+        // The fourth step in the YIN algorithm
+        tau = absoluteThreshold();
+
+        // The fifth step in the YIN algorithm
+        float betterTau = parabolicInterpolation(tau);
+
+        // TODO implement the sixth and final step of the YIN algorithm
+        // (it isn't implemented in the Tarsos DSP project but is briefly explained in the YIN
+        // paper).
+
+        // The fundamental frequency (note frequency) is the sampling rate divided by the tau (index
+        // within the resulting buffer array that marks the period).
+        // The period is the duration (or index here) of one cycle.
+        // Frequency = 1 / Period, with respect to the sampling rate, Frequency = Sample Rate / Period
+        return sampleRate / betterTau;
+    }
+
+    /**
+     * Performs the first and second step of the YIN Algorithm on the provided array buffer values.
+     * This is a "combination" of the AutoCorrelation Method and the Difference Function. The
+     * AutoCorrelation Method multiplies the array value at the specified index with the array value
+     * at the specified index plus the "tau" (greek letter used in the formula). Whereas the
+     * Difference Function takes the square of the difference of the two values. This is supposed to
+     * provide a more accurate result (from about 10% to about 1.95% error rate). Note that this
+     * formula is a riemann sum, meaning the operation specified above is performed and accumulated
+     * for every value in the array. The result of this function is stored in a global array,
+     * {@link #resultBuffer}, which the subsequent steps of the algorithm should use.
+     *
+     * @param wave The waveform data to perform the AutoCorrelation Difference function on.
+     */
+    private void autoCorrelationDifference(final float[] wave) {
+        // Note this algorithm is currently slow (O(n^2)). Should look for any possible optimizations.
+        int length = resultBuffer.length;
+        int i, j;
+
+        for (j = 1; j < length; j++) {
+            for (i = 0; i < length; i++) {
+                // d sub t (tau) = (x(i) - x(i - tau))^2, from i = 1 to result buffer size
+                resultBuffer[j] += Math.pow((wave[i] - wave[i + j]), 2);
+            }
+        }
+    }
+
+    /**
+     * Performs the third step in the YIN Algorithm on the {@link #resultBuffer}. The result of this
+     * function yields an even lower error rate (about 1.69% from 1.95%). The {@link #resultBuffer}
+     * is updated when this function is performed.
+     */
+    private void cumulativeMeanNormalizedDifference() {
+        // newValue = oldValue / (runningSum / tau)
+        // == (oldValue / 1) * (tau / runningSum)
+        // == oldValue * (tau / runningSum)
+
+        // Here we're using index i as the "tau" in the equation
+        int i;
+        int length = resultBuffer.length;
+        float runningSum = 0;
+
+        // Set the first value in the result buffer to the value of one
+        resultBuffer[0] = 1;
+
+        for (i = 1; i < length; i++) {
+            // The sum of this value plus all the previous values in the buffer array
+            runningSum += resultBuffer[i];
+
+            // The current value is updated to be the current value multiplied by the index divided by the running sum value
+            resultBuffer[i] *= i / runningSum;
+        }
+    }
+
+    /**
+     * Performs step four of the YIN Algorithm on the {@link #resultBuffer}. This is the first step
+     * in the algorithm to attempt finding the period of the wave data. When attempting to determine
+     * the period of a wave, it's common to search for the high or low peaks or dips of the wave.
+     * This will allow you to determine the length of a cycle or its period. However, especially
+     * with a natural sound sample, it is possible to have false dips. This makes determining the
+     * period more difficult. This function attempts to resolve this issue by introducing a
+     * threshold. The result of this function yields an even lower rate (about 0.78% from about
+     * 1.69%).
+     *
+     * @return The tau indicating the approximate period.
+     */
+    private int absoluteThreshold() {
+        int tau;
+        int length = resultBuffer.length;
+
+        // The first two values in the result buffer should be 1, so start at the third value
+        for (tau = 2; tau < length; tau++) {
+            // If we are less than the threshold, continue on until we find the lowest value
+            // indicating the lowest dip in the wave since we first crossed the threshold.
+            if (resultBuffer[tau] < ABSOLUTE_THRESHOLD) {
+                while (tau + 1 < length && resultBuffer[tau + 1] < resultBuffer[tau]) {
+                    tau++;
+                }
+
+                // We have the approximate tau value, so break the loop
+                break;
+            }
+        }
+
+        // Some implementations of this algorithm set the tau value to -1 to indicate no correct tau
+        // value was found. This implementation will just return the last tau.
+        tau = tau >= length ? length - 1 : tau;
+
+        return tau;
+    }
+
+    /**
+     * Further lowers the error rate by using parabolas to smooth the wave between the minimum and
+     * maximum points. Especially helps to detect higher frequencies more precisely. The result of
+     * this function results in only a small error rate decline from about 0.78% to about 0.77%.
+     */
+    private float parabolicInterpolation(final int currentTau) {
+        // Finds the points to fit the parabola between
+        int x0 = currentTau < 1 ? currentTau : currentTau - 1;
+        int x2 = currentTau + 1 < resultBuffer.length ? currentTau + 1 : currentTau;
+
+        // Finds the better tau estimate
+        float betterTau;
+
+        if (x0 == currentTau) {
+            if (resultBuffer[currentTau] <= resultBuffer[x2]) {
+                betterTau = currentTau;
+            } else {
+                betterTau = x2;
+            }
+        } else if (x2 == currentTau) {
+            if (resultBuffer[currentTau] <= resultBuffer[x0]) {
+                betterTau = currentTau;
+            } else {
+                betterTau = x0;
+            }
+        } else {
+            // Fit the parabola between the first point, current tau, and the last point to find a
+            // better tau estimate.
+            float s0 = resultBuffer[x0];
+            float s1 = resultBuffer[currentTau];
+            float s2 = resultBuffer[x2];
+
+            betterTau = currentTau + (s2 - s0) / (2 * (2 * s1 - s2 - s0));
+        }
+
+        return betterTau;
+    }
+}

+ 8 - 8
audio-analysis/src/main/java/com/yonge/audio/utils/ArrayUtil.java

@@ -27,7 +27,7 @@ public class ArrayUtil {
 	}
 
 	/**
-	 * 根据指定的起始、结束为止提取数组中的数据,并返回
+	 * 根据指定的起始、结束为止提取数组中的数据(起止都包含),并返回
 	 * @param src
 	 * @param startIndex
 	 * @param endIndex
@@ -39,8 +39,8 @@ public class ArrayUtil {
 			throw new RuntimeException("结束索引[" + endIndex + "]不能小于起始索引[" + startIndex + "]");
 		}
 
-		byte[] target = new byte[endIndex - startIndex];
-		System.arraycopy(src, startIndex, target, 0, endIndex - startIndex);
+		byte[] target = new byte[endIndex - startIndex + 1];
+		System.arraycopy(src, startIndex, target, 0, target.length);
 
 		return target;
 	}
@@ -70,7 +70,7 @@ public class ArrayUtil {
 	}
 
 	/**
-	 * 根据指定的起始、结束为止提取数组中的数据,并返回
+	 * 根据指定的起始、结束为止提取数组中的数据(起止都包含),并返回
 	 * @param src
 	 * @param startIndex
 	 * @param endIndex
@@ -81,16 +81,16 @@ public class ArrayUtil {
 			throw new RuntimeException("结束索引[" + endIndex + "]不能小于起始索引[" + startIndex + "]");
 		}
 
-		float[] target = new float[endIndex - startIndex];
-		System.arraycopy(src, startIndex, target, 0, endIndex - startIndex);
+		float[] target = new float[endIndex - startIndex + 1];
+		System.arraycopy(src, startIndex, target, 0, target.length);
 
 		return target;
 	}
 
 	public static void main(String[] args) {
 		byte[] b1 = { 1, 2, 3, 4, 5 };
-		byte[] b2 = { 3, 2, 1 };
-		byte[] r = mergeByte(b1, b2);
+		//byte[] b2 = { 3, 2, 1 };
+		byte[] r = extractByte(b1, 0, 4);
 		for (int i = 0; i < r.length; i++) {
 			System.out.println(r[i]);
 		}

+ 100 - 0
audio-analysis/src/main/java/com/yonge/nettty/dto/ChunkAnalysis.java

@@ -0,0 +1,100 @@
+package com.yonge.nettty.dto;
+
+public class ChunkAnalysis {
+
+	private double startTime;
+
+	private double endTime;
+
+	private double durationTime;
+
+	private int frequency;
+
+	private int splDb;
+
+	private int power;
+	
+	private int amplitude;
+	
+	private boolean isPeak;
+
+	public ChunkAnalysis(double startTime, double endTime, int frequency, int splDb, int power, int amplitude) {
+		this.startTime = startTime;
+		this.endTime = endTime;
+		this.frequency = frequency;
+		this.splDb = splDb;
+		this.power = power;
+		this.amplitude = amplitude;
+		this.durationTime = endTime - startTime;
+	}
+
+	public ChunkAnalysis(int frequency, int splDb, int power) {
+		this.frequency = frequency;
+		this.splDb = splDb;
+		this.power = power;
+	}
+
+	public double getStartTime() {
+		return startTime;
+	}
+
+	public void setStartTime(double startTime) {
+		this.startTime = startTime;
+	}
+
+	public double getEndTime() {
+		return endTime;
+	}
+
+	public void setEndTime(double endTime) {
+		this.endTime = endTime;
+	}
+
+	public double getDurationTime() {
+		return durationTime;
+	}
+
+	public void setDurationTime(double durationTime) {
+		this.durationTime = durationTime;
+	}
+
+	public int getFrequency() {
+		return frequency;
+	}
+
+	public void setFrequency(int frequency) {
+		this.frequency = frequency;
+	}
+
+	public int getSplDb() {
+		return splDb;
+	}
+
+	public void setSplDb(int splDb) {
+		this.splDb = splDb;
+	}
+
+	public int getPower() {
+		return power;
+	}
+
+	public void setPower(int power) {
+		this.power = power;
+	}
+
+	public int getAmplitude() {
+		return amplitude;
+	}
+
+	public void setAmplitude(int amplitude) {
+		this.amplitude = amplitude;
+	}
+
+	public boolean isPeak() {
+		return isPeak;
+	}
+
+	public void setPeak(boolean isPeak) {
+		this.isPeak = isPeak;
+	}
+}

+ 67 - 0
audio-analysis/src/main/java/com/yonge/nettty/dto/HardLevelEnum.java

@@ -0,0 +1,67 @@
+package com.yonge.nettty.dto;
+
+import com.ym.mec.common.enums.BaseEnum;
+
+public enum HardLevelEnum implements BaseEnum<String, HardLevelEnum> {
+	BEGINNER("入门级", 5, 5, 50, 60, 10), ADVANCED("进阶级", 5, 5, 50, 60, 10), PERFORMER("大师级", 5, 5, 50, 60, 10);
+
+	private String msg;
+
+	private int amplitudeThreshold;
+
+	private int frequencyOffset;
+
+	private int tempoOffsetOfPercent;
+
+	private int integrityRange;
+
+	private int notPlayRange;
+
+	/**
+	 * 
+	 * @param msg
+	 * @param amplitudeThreshold 振幅阈值
+	 * @param frequencyOffset 频率法制
+	 * @param tempoOffsetOfPercent 节奏偏移量百分比(在当前范围内节奏才算正确)
+	 * @param integrityRange 完成度范围
+	 * @param notPlayRange 未演奏的范围
+	 */
+	HardLevelEnum(String msg, int amplitudeThreshold, int frequencyOffset, int tempoOffsetOfPercent, int integrityRange, int notPlayRange) {
+		this.msg = msg;
+		this.amplitudeThreshold = amplitudeThreshold;
+		this.frequencyOffset = frequencyOffset;
+		this.tempoOffsetOfPercent = tempoOffsetOfPercent;
+		this.integrityRange = integrityRange;
+		this.notPlayRange = notPlayRange;
+	}
+
+	public String getMsg() {
+		return msg;
+	}
+
+	public int getAmplitudeThreshold() {
+		return amplitudeThreshold;
+	}
+
+	public int getFrequencyOffset() {
+		return frequencyOffset;
+	}
+
+	public int getTempoOffsetOfPercent() {
+		return tempoOffsetOfPercent;
+	}
+
+	public int getIntegrityRange() {
+		return integrityRange;
+	}
+
+	public int getNotPlayRange() {
+		return notPlayRange;
+	}
+
+	@Override
+	public String getCode() {
+		return this.name();
+	}
+
+}

+ 190 - 0
audio-analysis/src/main/java/com/yonge/nettty/dto/NoteAnalysis.java

@@ -0,0 +1,190 @@
+package com.yonge.nettty.dto;
+
+import com.ym.mec.common.enums.BaseEnum;
+
+public class NoteAnalysis {
+
+	public enum NoteErrorType implements BaseEnum<String, NoteErrorType> {
+		RIGHT("演奏正确"), CADENCE_WRONG("节奏错误"), INTONATION_WRONG("音准错误"), INTEGRITY_WRONG("完整度不足"), NOT_PLAY("未演奏");
+
+		private String msg;
+
+		NoteErrorType(String msg) {
+			this.msg = msg;
+		}
+
+		public String getMsg() {
+			return msg;
+		}
+
+		@Override
+		public String getCode() {
+			return this.name();
+		}
+	}
+
+	private int index;
+
+	private int sectionIndex;
+
+	private double startTime;
+
+	private double endTime;
+	
+	private double standardDurationTime;
+
+	private double durationTime;
+
+	private int frequency;
+
+	private int playFrequency = -1;
+
+	private boolean tempo = true;
+
+	private NoteErrorType noteErrorType = NoteErrorType.RIGHT;
+
+	private int score;
+	
+	private int intonationScore;
+	
+	private int tempoScore;
+	
+	private int integrityScore;
+
+	private boolean ignore;
+	
+	public NoteAnalysis(int index, int sectionIndex, int frequency, double standardDurationTime) {
+		this.standardDurationTime = standardDurationTime;
+		this.index = index;
+		this.sectionIndex = sectionIndex;
+		this.frequency = frequency;
+	}
+
+	public NoteAnalysis(double startTime, double endTime, int playFrequency) {
+		this.startTime = startTime;
+		this.endTime = endTime;
+		this.durationTime = endTime - startTime;
+		this.playFrequency = playFrequency;
+	}
+
+	public int getMusicalNotesIndex() {
+		return index;
+	}
+
+	public void setMusicalNotesIndex(int index) {
+		this.index = index;
+	}
+
+	public double getStartTime() {
+		return startTime;
+	}
+
+	public void setStartTime(double startTime) {
+		this.startTime = startTime;
+	}
+
+	public double getEndTime() {
+		return endTime;
+	}
+
+	public void setEndTime(double endTime) {
+		this.endTime = endTime;
+	}
+
+	public double getDurationTime() {
+		return durationTime;
+	}
+
+	public void setDurationTime(double durationTime) {
+		this.durationTime = durationTime;
+	}
+
+	public double getStandardDurationTime() {
+		return standardDurationTime;
+	}
+
+	public void setStandardDurationTime(double standardDurationTime) {
+		this.standardDurationTime = standardDurationTime;
+	}
+
+	public double getPlayFrequency() {
+		return playFrequency;
+	}
+
+	public void setPlayFrequency(int playFrequency) {
+		this.playFrequency = playFrequency;
+	}
+
+	public int getFrequency() {
+		return frequency;
+	}
+
+	public void setFrequency(int frequency) {
+		this.frequency = frequency;
+	}
+
+	public boolean isTempo() {
+		return tempo;
+	}
+
+	public void setTempo(boolean tempo) {
+		this.tempo = tempo;
+	}
+
+	public int getSectionIndex() {
+		return sectionIndex;
+	}
+
+	public void setSectionIndex(int sectionIndex) {
+		this.sectionIndex = sectionIndex;
+	}
+
+	public boolean isIgnore() {
+		return ignore;
+	}
+
+	public void setIgnore(boolean ignore) {
+		this.ignore = ignore;
+	}
+
+	public NoteErrorType getMusicalErrorType() {
+		return noteErrorType;
+	}
+
+	public void setMusicalErrorType(NoteErrorType noteErrorType) {
+		this.noteErrorType = noteErrorType;
+	}
+
+	public int getScore() {
+		return score;
+	}
+
+	public void setScore(int score) {
+		this.score = score;
+	}
+
+	public int getIntonationScore() {
+		return intonationScore;
+	}
+
+	public void setIntonationScore(int intonationScore) {
+		this.intonationScore = intonationScore;
+	}
+
+	public int getTempoScore() {
+		return tempoScore;
+	}
+
+	public void setTempoScore(int tempoScore) {
+		this.tempoScore = tempoScore;
+	}
+
+	public int getIntegrityScore() {
+		return integrityScore;
+	}
+
+	public void setIntegrityScore(int integrityScore) {
+		this.integrityScore = integrityScore;
+	}
+
+}

+ 78 - 0
audio-analysis/src/main/java/com/yonge/nettty/dto/SectionAnalysis.java

@@ -0,0 +1,78 @@
+package com.yonge.nettty.dto;
+
+import org.apache.commons.lang3.builder.ToStringBuilder;
+
+public class SectionAnalysis {
+
+	// 小节下标
+	private int measureIndex;
+
+	// 音符数
+	private int noteNum;
+
+	// 持续时长
+	private double durationTime;
+
+	// 得分
+	private float score;
+	
+	private boolean isIngore;
+	
+	public SectionAnalysis() {
+		// TODO Auto-generated constructor stub
+	}
+
+	public SectionAnalysis(int index, int noteNum, float durationTime, float score, boolean isIngore) {
+		this.measureIndex = index;
+		this.noteNum = noteNum;
+		this.durationTime = durationTime;
+		this.score = score;
+		this.isIngore = isIngore;
+	}
+
+	public int getIndex() {
+		return measureIndex;
+	}
+
+	public void setIndex(int measureIndex) {
+		this.measureIndex = measureIndex;
+	}
+
+	public int getNoteNum() {
+		return noteNum;
+	}
+
+	public void setNoteNum(int noteNum) {
+		this.noteNum = noteNum;
+	}
+
+	public double getDurationTime() {
+		return durationTime;
+	}
+
+	public void setDurationTime(double durationTime) {
+		this.durationTime = durationTime;
+	}
+
+	public float getScore() {
+		return score;
+	}
+
+	public void setScore(float score) {
+		this.score = score;
+	}
+	
+	public boolean isIngore() {
+		return isIngore;
+	}
+
+	public void setIsIngore(boolean isIngore) {
+		this.isIngore = isIngore;
+	}
+
+	@Override
+	public String toString() {
+		return ToStringBuilder.reflectionToString(this);
+	}
+
+}

+ 655 - 108
audio-analysis/src/main/java/com/yonge/nettty/dto/UserChannelContext.java

@@ -1,51 +1,104 @@
 package com.yonge.nettty.dto;
 
+import java.math.BigDecimal;
+import java.util.ArrayList;
 import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
 
+import javax.sound.sampled.AudioFormat;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import be.tarsos.dsp.AudioEvent;
-import be.tarsos.dsp.pitch.PitchDetectionHandler;
-import be.tarsos.dsp.pitch.PitchDetectionResult;
-import be.tarsos.dsp.pitch.PitchDetector;
-import be.tarsos.dsp.pitch.PitchProcessor;
-
+import com.yonge.audio.analysis.Signals;
+import com.yonge.audio.analysis.detector.YINPitchDetector;
 import com.yonge.audio.utils.ArrayUtil;
+import com.yonge.nettty.dto.NoteAnalysis.NoteErrorType;
 import com.yonge.nettty.entity.MusicXmlBasicInfo;
 import com.yonge.nettty.entity.MusicXmlNote;
-import com.yonge.nettty.entity.NoteAnalysis;
+import com.yonge.nettty.entity.MusicXmlSection;
 import com.yonge.netty.server.processor.WaveformWriter;
 
 /**
  * 用户通道上下文
  */
-public class UserChannelContext implements PitchDetectionHandler {
+public class UserChannelContext {
 	
 	private final static Logger LOGGER = LoggerFactory.getLogger(UserChannelContext.class);
-
+	
+	private final double offsetMS = 350;
+	
+	private Long recordId;
+	
+	private Integer subjectId;
+	
+	private int beatDuration;
+	
+	private int beatByteLength;
+	
 	// 曲目与musicxml对应关系
 	private ConcurrentHashMap<Integer, MusicXmlBasicInfo> songMusicXmlMap = new ConcurrentHashMap<Integer, MusicXmlBasicInfo>();
 
 	private WaveformWriter waveFileProcessor;
 
-	private NoteAnalysis processingNote = new NoteAnalysis(0, 0);
+	private NoteAnalysis processingNote = new NoteAnalysis(0, 0, -1);
+	
+	private AtomicInteger evaluatingSectionIndex = new AtomicInteger(0);
+	
+	private List<NoteAnalysis> doneNoteAnalysisList = new ArrayList<NoteAnalysis>();
+	
+	private List<SectionAnalysis> doneSectionAnalysisList = new ArrayList<SectionAnalysis>();
+	
+	private List<ChunkAnalysis> chunkAnalysisList = new ArrayList<ChunkAnalysis>();
 	
 	private byte[] channelBufferBytes = new byte[0];
 	
-	private float[] handlerBufferBytes = new float[0];
-
-	private PitchDetector pitchDetector = PitchProcessor.PitchEstimationAlgorithm.FFT_YIN.getDetector(44100, 1024 * 4);
+	private double playTime;
 	
-	public ConcurrentHashMap<Integer, MusicXmlBasicInfo> getSongMusicXmlMap() {
-		return songMusicXmlMap;
+	private double receivedTime;
+	
+	private List<ChunkAnalysis> lastChunkAnalysisList = new ArrayList<ChunkAnalysis>();
+	
+	private HardLevelEnum hardLevel = HardLevelEnum.ADVANCED;
+	
+	public void init(String heardLevel, int subjectId, int beatDuration) {
+		this.subjectId = subjectId;
+		this.beatDuration = beatDuration;
+		this.beatByteLength = WaveformWriter.SAMPLE_RATE * WaveformWriter.BITS_PER_SAMPLE / 8 * beatDuration / 1000;
+		hardLevel = HardLevelEnum.valueOf(heardLevel);
+	}
+	
+	public byte[] skipHeader(byte[] datas) {
+		if (beatByteLength > 0) {
+			if (datas.length <= beatByteLength) {
+				beatByteLength -= datas.length;
+				return new byte[0];
+			}
+			if(beatByteLength % 2 != 0){
+				beatByteLength++;
+			}
+			datas = ArrayUtil.extractByte(datas, beatByteLength, datas.length - 1);
+			beatByteLength = 0;
+		}
+		return datas;
+	}
+	
+	public Long getRecordId() {
+		return recordId;
+	}
+
+	public void setRecordId(Long recordId) {
+		this.recordId = recordId;
 	}
 
-	public void setSongMusicXmlMap(ConcurrentHashMap<Integer, MusicXmlBasicInfo> songMusicXmlMap) {
-		this.songMusicXmlMap = songMusicXmlMap;
+	public ConcurrentHashMap<Integer, MusicXmlBasicInfo> getSongMusicXmlMap() {
+		return songMusicXmlMap;
 	}
 
 	public WaveformWriter getWaveFileProcessor() {
@@ -63,43 +116,67 @@ public class UserChannelContext implements PitchDetectionHandler {
 	public void setProcessingNote(NoteAnalysis processingNote) {
 		this.processingNote = processingNote;
 	}
+	
+	public List<SectionAnalysis> getDoneSectionAnalysisList() {
+		return doneSectionAnalysisList;
+	}
 
-	public void resetUserInfo() {
+	public List<NoteAnalysis> getDoneNoteAnalysisList() {
+		return doneNoteAnalysisList;
+	}
 
+	public void resetUserInfo() {
+		beatByteLength = WaveformWriter.SAMPLE_RATE * WaveformWriter.BITS_PER_SAMPLE / 8 * beatDuration / 1000;
 		waveFileProcessor = null;
-		processingNote = new NoteAnalysis(0,0);
+		processingNote = new NoteAnalysis(0,0,-1);
+		evaluatingSectionIndex = new AtomicInteger(0);
 		channelBufferBytes = new byte[0];
-		handlerBufferBytes = new float[0];
+		doneNoteAnalysisList = new ArrayList<NoteAnalysis>();
+		doneSectionAnalysisList = new ArrayList<SectionAnalysis>();
+		chunkAnalysisList = new ArrayList<ChunkAnalysis>();
+		recordId = null;
+		playTime = 0;
+		receivedTime = 0;
+		lastChunkAnalysisList = new ArrayList<ChunkAnalysis>();
 	}
-
-	public MusicXmlNote getCurrentMusicNote(Integer songId) {
-		if (songMusicXmlMap.size() == 0) {
-			return null;
-		}
+	
+	public MusicXmlBasicInfo getMusicXmlBasicInfo(Integer songId){
 		MusicXmlBasicInfo musicXmlBasicInfo = null;
 		if (songId == null) {
 			musicXmlBasicInfo = songMusicXmlMap.values().stream().findFirst().get();
 		} else {
 			musicXmlBasicInfo = songMusicXmlMap.get(songId);
 		}
+		return musicXmlBasicInfo;
+	}
+	
+	public MusicXmlSection getCurrentMusicSection(Integer songId, int sectionIndex){
+		MusicXmlBasicInfo musicXmlBasicInfo = getMusicXmlBasicInfo(songId);
+		return musicXmlBasicInfo.getMusicXmlSectionMap().get(sectionIndex);
+	}
+
+	public MusicXmlNote getCurrentMusicNote(Integer songId, Integer noteIndex) {
+		if (songMusicXmlMap.size() == 0) {
+			return null;
+		}
+		if(noteIndex == null){
+			noteIndex = processingNote.getMusicalNotesIndex();
+		}
+		final int index = noteIndex;
+		MusicXmlBasicInfo musicXmlBasicInfo = getMusicXmlBasicInfo(songId);
 
-		if (musicXmlBasicInfo != null && processingNote.getIndex() <= getTotalMusicNoteIndexNum(null)) {
-			return musicXmlBasicInfo.getMusicXmlInfos().stream().filter(t -> t.getMusicalNotesIndex() == processingNote.getIndex()).findFirst().get();
+		if (musicXmlBasicInfo != null && index <= getTotalMusicNoteIndex(null)) {
+			return musicXmlBasicInfo.getMusicXmlInfos().stream().filter(t -> t.getMusicalNotesIndex() == index).findFirst().get();
 		}
 
 		return null;
 	}
 
-	public int getTotalMusicNoteIndexNum(Integer songId) {
+	public int getTotalMusicNoteIndex(Integer songId) {
 		if (songMusicXmlMap.size() == 0) {
 			return -1;
 		}
-		MusicXmlBasicInfo musicXmlBasicInfo = null;
-		if (songId == null) {
-			musicXmlBasicInfo = songMusicXmlMap.values().stream().findFirst().get();
-		} else {
-			musicXmlBasicInfo = songMusicXmlMap.get(songId);
-		}
+		MusicXmlBasicInfo musicXmlBasicInfo = getMusicXmlBasicInfo(songId);
 
 		if (musicXmlBasicInfo != null) {
 			return musicXmlBasicInfo.getMusicXmlInfos().stream().map(t -> t.getMusicalNotesIndex()).distinct().max(Integer::compareTo).get();
@@ -108,43 +185,55 @@ public class UserChannelContext implements PitchDetectionHandler {
 		return -1;
 	}
 
-	public List<MusicXmlNote> getCurrentMusicSection(Integer songId) {
+	public List<MusicXmlNote> getCurrentMusicSection(Integer songId, Integer sectionIndex) {
 		if (songMusicXmlMap.size() == 0) {
 			return null;
 		}
-		MusicXmlBasicInfo musicXmlBasicInfo = null;
-		if (songId == null) {
-			musicXmlBasicInfo = songMusicXmlMap.values().stream().findFirst().get();
-		} else {
-			musicXmlBasicInfo = songMusicXmlMap.get(songId);
+		if(sectionIndex == null){
+			sectionIndex = processingNote.getSectionIndex();
 		}
+		final int index = sectionIndex;
+		MusicXmlBasicInfo musicXmlBasicInfo = getMusicXmlBasicInfo(songId);
 
 		if (musicXmlBasicInfo != null) {
-			return musicXmlBasicInfo.getMusicXmlInfos().stream().filter(t -> t.getMusicalNotesIndex() == processingNote.getSectionIndex())
+			return musicXmlBasicInfo.getMusicXmlInfos().stream().filter(t -> t.getMusicalNotesIndex() == index)
 					.sorted(Comparator.comparing(MusicXmlNote::getMusicalNotesIndex)).collect(Collectors.toList());
 		}
 
 		return null;
 	}
 
-	public int getTotalMusicSectionIndexNum(Integer songId) {
+	public int getTotalMusicSectionSize(Integer songId) {
 		if (songMusicXmlMap.size() == 0) {
 			return -1;
 		}
-		MusicXmlBasicInfo musicXmlBasicInfo = null;
-		if (songId == null) {
-			musicXmlBasicInfo = songMusicXmlMap.values().stream().findFirst().get();
-		} else {
-			musicXmlBasicInfo = songMusicXmlMap.get(songId);
-		}
+		MusicXmlBasicInfo musicXmlBasicInfo = getMusicXmlBasicInfo(songId);
 
 		if (musicXmlBasicInfo != null) {
-			return musicXmlBasicInfo.getMusicXmlInfos().stream().map(t -> t.getMeasureIndex()).distinct().max(Integer::compareTo).get();
+			return (int) musicXmlBasicInfo.getMusicXmlInfos().stream().map(t -> t.getMeasureIndex()).distinct().count();
 		}
 
 		return -1;
 	}
+	
+	public int getMusicSectionIndex(Integer songId, int musicXmlNoteIndex) {
+		if (songMusicXmlMap.size() == 0) {
+			return -1;
+		}
+		
+		if(getTotalMusicNoteIndex(null) < musicXmlNoteIndex){
+			return -1;
+		}
+		
+		MusicXmlBasicInfo musicXmlBasicInfo = getMusicXmlBasicInfo(songId);
+
+		if (musicXmlBasicInfo != null) {
+			return musicXmlBasicInfo.getMusicXmlInfos().stream().filter(t -> t.getMusicalNotesIndex() == musicXmlNoteIndex).findFirst().get().getMeasureIndex();
+		}
 
+		return -1;
+	}
+	
 	public byte[] getChannelBufferBytes() {
 		return channelBufferBytes;
 	}
@@ -153,25 +242,31 @@ public class UserChannelContext implements PitchDetectionHandler {
 		this.channelBufferBytes = channelBufferBytes;
 	}
 
-	public float[] getHandlerBufferBytes() {
-		return handlerBufferBytes;
+	public AtomicInteger getEvaluatingSectionIndex() {
+		return evaluatingSectionIndex;
 	}
 
-	public void setHandlerBufferBytes(float[] handlerBufferBytes) {
-		this.handlerBufferBytes = handlerBufferBytes;
-	}
+	public void handle(float[] samples, AudioFormat audioFormat){
+		
+		YINPitchDetector frequencyDetector = new YINPitchDetector(samples.length , audioFormat.getSampleRate());
 
-	@Override
-	public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent audioEvent) {
+		int playFrequency = (int) frequencyDetector.getFrequency(samples);
+		int splDb = (int) Signals.soundPressureLevel(samples);
+		int power = (int) Signals.power(samples);
+		int amplitude = (int) Signals.norm(samples);
 		
-		double durationTime = 1000 * (audioEvent.getFloatBuffer().length) / audioEvent.getSampleRate() / 2;
+		double durationTime = 1000 * (samples.length * 2) / audioFormat.getSampleRate() / (audioFormat.getSampleSizeInBits() / 8);
 		
-		float pitch = pitchDetectionResult.getPitch();
+		receivedTime += durationTime;
 		
-		//LOGGER.info("pitch:{} timeStamp:{} endTimeStamp:{} durationTime:{}", pitch, audioEvent.getTimeStamp(), audioEvent.getEndTimeStamp(), durationTime);
+		if(receivedTime < offsetMS){
+			return;
+		}
+		
+		playTime += durationTime;
 		
 		// 获取当前音符信息
-		MusicXmlNote musicXmlNote = getCurrentMusicNote(null);
+		MusicXmlNote musicXmlNote = getCurrentMusicNote(null,null);
 
 		if (musicXmlNote == null) {
 			return;
@@ -179,81 +274,533 @@ public class UserChannelContext implements PitchDetectionHandler {
 		
 		//取出当前处理中的音符信息
 		NoteAnalysis noteAnalysis = getProcessingNote();
-		if(noteAnalysis == null){
-			noteAnalysis = new NoteAnalysis(musicXmlNote.getMusicalNotesIndex(),musicXmlNote.getMeasureIndex());
+		if(noteAnalysis == null || noteAnalysis.getDurationTime() == 0) {
+			noteAnalysis = new NoteAnalysis(musicXmlNote.getMusicalNotesIndex(), musicXmlNote.getMeasureIndex(), (int)musicXmlNote.getFrequency(), musicXmlNote.getDuration());
 		}
 		
-		double noteDurationTime = noteAnalysis.getDurationTime() + durationTime;
-		noteAnalysis.setDurationTime(noteDurationTime);
+		evaluatingSectionIndex.set(noteAnalysis.getSectionIndex());
 		
-		if(pitch != -1){
-			noteAnalysis.setChunks(noteAnalysis.getChunks() + 1);
-			noteAnalysis.setTotalPitch(noteAnalysis.getTotalPitch() + pitch);
+		if (noteAnalysis.getMusicalNotesIndex() >= 0 && noteAnalysis.getMusicalNotesIndex() <= getTotalMusicNoteIndex(null)) {
+
+			if (playTime >= (musicXmlNote.getDuration() + musicXmlNote.getTimeStamp())) {
+
+				LOGGER.info("------ Frequency:{}  splDb:{}  Power:{}  amplitude:{} time:{}------", playFrequency, splDb, power, amplitude, playTime);
+				
+				ChunkAnalysis lastChunkAnalysis = new ChunkAnalysis(playTime - durationTime, playTime, playFrequency, splDb, power, amplitude);
+				if(Math.abs(chunkAnalysisList.get(chunkAnalysisList.size() - 1).getFrequency() - lastChunkAnalysis.getFrequency()) > hardLevel.getFrequencyOffset()){
+					lastChunkAnalysis.setFrequency(-1);
+				}
+				if(chunkAnalysisList.get(chunkAnalysisList.size() - 1).getAmplitude() + 2 < lastChunkAnalysis.getAmplitude()){
+					lastChunkAnalysis.setPeak(true);
+				}
+				
+				//每个音符最后一个块
+				lastChunkAnalysisList.add(lastChunkAnalysis);
+				if(noteAnalysis.getMusicalNotesIndex() > 0){
+					lastChunkAnalysis = lastChunkAnalysisList.get(noteAnalysis.getMusicalNotesIndex() - 1);
+				}else{
+					lastChunkAnalysis = new ChunkAnalysis(0, 0, -1, 0, 0, 0);
+				}
+
+				if (musicXmlNote.getDontEvaluating()) {
+					noteAnalysis.setIgnore(true);
+				}
+				
+				if(chunkAnalysisList.size() == 0){// 延音线
+					
+				}
+				
+				noteAnalysis.setPlayFrequency(computeFrequency(chunkAnalysisList, lastChunkAnalysis, hardLevel.getFrequencyOffset()));
+				
+				//判断节奏(音符持续时间内有不间断的音高,就节奏正确)
+				boolean tempo = true;
+				if (subjectId == 23) {
+					if (musicXmlNote.getFrequency() == -1) {// 休止符
+						tempo = chunkAnalysisList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count() <= 0;
+					}else{
+						tempo = computeTempoWithAmplitude2(chunkAnalysisList, lastChunkAnalysis);
+					}
+				}else{
+					if (musicXmlNote.getFrequency() == -1) {// 休止符
+						tempo = chunkAnalysisList.stream().filter(t -> t.getFrequency() > 100).count() <= 1;
+					}else{
+						tempo = computeTempoWithFrequency(chunkAnalysisList, lastChunkAnalysis);
+					}
+				}
+				
+				noteAnalysis.setDurationTime(chunkAnalysisList.stream().mapToDouble(t -> t.getDurationTime()).sum());
+				
+				noteAnalysis.setTempo(tempo);
+				
+				evaluateForNote(noteAnalysis);
+
+				LOGGER.info("当前音符下标[{}] 预计频率:{} 实际频率:{} 节奏:{}", noteAnalysis.getMusicalNotesIndex(), musicXmlNote.getFrequency(), noteAnalysis.getPlayFrequency(),
+						noteAnalysis.isTempo());
+				
+				doneNoteAnalysisList.add(noteAnalysis);
+				
+				//lastChunkAnalysis = chunkAnalysisList.get(chunkAnalysisList.size() - 1);
+				
+				chunkAnalysisList.clear();
+
+				// 准备处理下一个音符
+				int nextNoteIndex = musicXmlNote.getMusicalNotesIndex() + 1;
+				float nextNoteFrequence = -1;
+				double standDuration = 0;
+				MusicXmlNote nextMusicXmlNote = getCurrentMusicNote(null, nextNoteIndex);
+				if(nextMusicXmlNote != null){
+					nextNoteFrequence = nextMusicXmlNote.getFrequency();
+					standDuration = nextMusicXmlNote.getDuration();
+				}
+				
+				NoteAnalysis nextNoteAnalysis = new NoteAnalysis(nextNoteIndex, getMusicSectionIndex(null, nextNoteIndex), (int)nextNoteFrequence, standDuration);
+
+				noteAnalysis = nextNoteAnalysis;
+
+			} else {
+				
+				/*double skip = 0;
+				if (firstNoteIndexPerSectionList.contains(noteAnalysis.getMusicalNotesIndex())) {
+					skip = offsetMSOfSection;
+				}*/
+				//skip = noteAnalysis.getStandardDurationTime() * 0.2;
+				
+				LOGGER.info("Frequency:{}  splDb:{}  Power:{}  amplitude:{}", playFrequency, splDb, power, amplitude);
+				
+				chunkAnalysisList.add(new ChunkAnalysis(playTime - durationTime, playTime, playFrequency, splDb, power, amplitude));
+				
+			}
+
+			setProcessingNote(noteAnalysis);
 		}
 		
-		setProcessingNote(noteAnalysis);
+	}
+	
+
+	public int evaluateForSection(int sectionIndex, int subjectId){
+
+		int score = -1;
+		if(doneSectionAnalysisList.size() >= getTotalMusicSectionSize(null)){
+			return score;
+		}
 		
-		if(noteAnalysis.getIndex() <= getTotalMusicNoteIndexNum(null) && noteDurationTime >= musicXmlNote.getDuration()){
-			
-			noteAnalysis.setAvgPitch(noteAnalysis.getTotalPitch()/noteAnalysis.getChunks());
-			
-			LOGGER.info("当前音符下标[{}] 预计频率:{} 实际频率:{} 持续时间:{}", noteAnalysis.getIndex() , musicXmlNote.getFrequency(), noteAnalysis.getAvgPitch(), noteAnalysis.getDurationTime());
+		//取出当前小节的所有音符
+		List<NoteAnalysis> noteAnalysisList = doneNoteAnalysisList.stream().filter(t -> t.getSectionIndex() == sectionIndex).collect(Collectors.toList());
+		
+		long ignoreSize = noteAnalysisList.stream().filter(t -> t.isIgnore()).count();
+
+		SectionAnalysis sectionAnalysis = new SectionAnalysis();
+		sectionAnalysis.setIndex(sectionIndex);
+		sectionAnalysis.setNoteNum(noteAnalysisList.size());
+		sectionAnalysis.setIsIngore(ignoreSize == noteAnalysisList.size());
+		
+		//判断是否需要评分
+		MusicXmlSection musicXmlSection = getCurrentMusicSection(null, sectionIndex);
+		if(noteAnalysisList.size() == musicXmlSection.getNoteNum()){
+			//取出需要评测的音符
+			List<NoteAnalysis>  noteList = noteAnalysisList.stream().filter(t -> t.isIgnore() == false).collect(Collectors.toList());
 			
-			// 准备处理下一个音符
-			setProcessingNote(noteAnalysis = new NoteAnalysis(musicXmlNote.getMusicalNotesIndex() + 1,musicXmlNote.getMeasureIndex()));
+			if(noteList != null && noteList.size() > 0){
+				score = noteList.stream().mapToInt(t -> t.getScore()).sum() / noteList.size();
+			}
+			sectionAnalysis.setDurationTime(noteAnalysisList.stream().mapToDouble(t -> t.getDurationTime()).sum());
+			sectionAnalysis.setScore(score);
+
+			LOGGER.info("小节评分:{}",sectionAnalysis);
+			doneSectionAnalysisList.add(sectionAnalysis);
 		}
 		
+		return score;
+	}
+	
+	public Map<String, Integer> evaluateForMusic() {
+
+		Map<String, Integer> result = new HashMap<String, Integer>();
+		
+		result.put("playTime", (int) doneNoteAnalysisList.stream().mapToDouble(t -> t.getDurationTime()).sum());
+		
+		// 取出需要评测的音符
+		List<NoteAnalysis> noteAnalysisList = doneNoteAnalysisList.stream().filter(t -> t.isIgnore() == false).collect(Collectors.toList());
+
+		if (noteAnalysisList != null && noteAnalysisList.size() > 0) {
+			int intonationScore = 0;
+			int tempoScore = 0;
+			int integrityScore = 0;
+			int socre = 0;
+
+			for (NoteAnalysis note : noteAnalysisList) {
+				intonationScore += note.getIntonationScore();
+				tempoScore += note.getTempoScore();
+				integrityScore += note.getIntegrityScore();
+				socre += note.getScore();
+			}
+
+			tempoScore = tempoScore / noteAnalysisList.size();
+			intonationScore = intonationScore / noteAnalysisList.size();
+			integrityScore = integrityScore / noteAnalysisList.size();
+
+			result.put("cadence", tempoScore);
+			result.put("intonation", intonationScore);
+			result.put("integrity", integrityScore);
+	        result.put("recordId", recordId.intValue());
 
-		/*// 获取字节流
-		float[] bufferBytes = audioEvent.getFloatBuffer();
+			int score = socre / noteAnalysisList.size();
 
-		// 粘合音符数据
-		float[] totalNoteBytes = ArrayUtil.mergeFloat(getHandlerBufferBytes(), bufferBytes);
-		setHandlerBufferBytes(totalNoteBytes);
+			// 平均得分
+			if (getMusicXmlBasicInfo(null).getSubjectId() == 23) {
+				score = tempoScore;
+			}
+			result.put("score", score);
+		}
+		return result;
+	}
+	
+
+	public void evaluateForNote(NoteAnalysis noteAnalysis) {
+
+		double playDurationTime = 0;
 		
+		if (subjectId == 23) {
+			if (noteAnalysis.getFrequency() == -1) {// 休止符
+				if (!noteAnalysis.isTempo()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
+				} else {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
+				}
+			}else{
+				int beatTimes = (int) chunkAnalysisList.stream().filter(t -> t.getAmplitude() > hardLevel.getAmplitudeThreshold()).count();
+				
+				if(beatTimes == 0){
+					noteAnalysis.setMusicalErrorType(NoteErrorType.NOT_PLAY);
+				}else if (!noteAnalysis.isTempo()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
+				} else {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
+				}
+			}
+		} else {
+			if (noteAnalysis.getFrequency() == -1) {// 休止符
+
+				playDurationTime = chunkAnalysisList.stream().filter(t -> t.getFrequency() <= 100).mapToDouble(t -> t.getDurationTime()).sum();
+
+				if (!noteAnalysis.isTempo()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
+				} else if (playDurationTime * 100 / noteAnalysis.getDurationTime() < hardLevel.getIntegrityRange()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.INTEGRITY_WRONG);
+				} else if (Math.abs(noteAnalysis.getFrequency() - noteAnalysis.getPlayFrequency()) > hardLevel.getFrequencyOffset()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.INTONATION_WRONG);
+				} else {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
+				}
+			} else {
+				playDurationTime = chunkAnalysisList.stream().filter(t -> t.getFrequency() > 100 && t.getFrequency() < 2000)
+						.mapToDouble(t -> t.getDurationTime()).sum();
+
+				if (playDurationTime * 100 / noteAnalysis.getDurationTime() < hardLevel.getNotPlayRange()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.NOT_PLAY);
+				} else if (playDurationTime * 100 / noteAnalysis.getDurationTime() < hardLevel.getIntegrityRange()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.INTEGRITY_WRONG);
+				} else if (!noteAnalysis.isTempo()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.CADENCE_WRONG);
+				} else if (Math.abs(noteAnalysis.getFrequency() - noteAnalysis.getPlayFrequency()) > hardLevel.getFrequencyOffset()) {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.INTONATION_WRONG);
+				} else {
+					noteAnalysis.setMusicalErrorType(NoteErrorType.RIGHT);
+				}
+			}
+		}
 
-		// 计算当前音符的数据长度 公式:数据量(字节/秒)= 采样频率(Hz)× (采样位数(bit)/ 8) × 声道数
-		int length = (int) (44100 * (16 / 8) * 1 * musicXmlNote.getDuration() / 1000);
+		// 计算音分
+		int tempoScore = 0;
+		int integrityScore = 0;
+		int intonationScore = 100 - new BigDecimal(Math.abs(YINPitchDetector.hertzToAbsoluteCent(noteAnalysis.getPlayFrequency())
+				- YINPitchDetector.hertzToAbsoluteCent(noteAnalysis.getFrequency()))).multiply(new BigDecimal(10)).divide(new BigDecimal(17), BigDecimal.ROUND_UP)
+				.setScale(0, BigDecimal.ROUND_UP).intValue();
+		if (intonationScore < 0) {
+			intonationScore = 0;
+		} else if (intonationScore > 100) {
+			intonationScore = 100;
+		}
 
-		if (noteAnalysis.getIndex() <= getTotalMusicNoteIndexNum(null) && totalNoteBytes.length >= length) {
-			// 处理当前音符
-			float[] noteFloatData = new float[length];
-			System.arraycopy(totalNoteBytes, 0, noteFloatData, 0, length);
-			// 剩余未处理的数据
-			setHandlerBufferBytes(ArrayUtil.extractFloat(totalNoteBytes, length - 1, totalNoteBytes.length - 1));
+		if (noteAnalysis.getMusicalErrorType() == NoteErrorType.NOT_PLAY) {
+			intonationScore = 0;
+		} else {
 
-			// 获取频率数据
-			float npitch = getPitch(noteFloatData, audioEvent.getBufferSize());
+			if (noteAnalysis.isTempo()) {
+				tempoScore = 100;
+				noteAnalysis.setTempoScore(tempoScore);
+			}
+
+			double durationPercent = playDurationTime / noteAnalysis.getDurationTime();
+			if (durationPercent >= 0.7) {
+				integrityScore = 100;
+			} else if (durationPercent < 0.7 && durationPercent >= 0.5) {
+				integrityScore = 50;
+			}
+			noteAnalysis.setIntegrityScore(integrityScore);
+		}
+		noteAnalysis.setIntonationScore(intonationScore);
+		if (subjectId == 23) {
+			noteAnalysis.setScore(tempoScore);
+		} else {
+			noteAnalysis.setScore(new BigDecimal(intonationScore + tempoScore + integrityScore).divide(new BigDecimal(3), 2).setScale(0, BigDecimal.ROUND_UP)
+					.intValue());
+		}
+	}
+	private int computeFrequency(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis, int offsetRange) {
+		
+		List<ChunkAnalysis> chunkList = new ArrayList<ChunkAnalysis>(chunkAnalysisList);
+		
+		int tenutoSize = 0;
+		// 剔除上一个音延续下来的信号
+		if (lastChunkAnalysis != null) {
+			int lastFrequency = lastChunkAnalysis.getFrequency();
+			Iterator<ChunkAnalysis> iterable = chunkList.iterator();
+			while (iterable.hasNext()) {
+				if (Math.abs(lastFrequency - iterable.next().getFrequency()) > offsetRange) {
+					break;
+				}
+				iterable.remove();
+				tenutoSize++;
+			}
 
-			LOGGER.info("第{}个音符的样本频率:{} 实际频率:{}", noteAnalysis.getIndex(), musicXmlNote.getFrequency(), npitch);
+			if (chunkList.size() == 0) {
+				return lastFrequency < 100 ? -1 : lastFrequency;
+			}
+		}
 
-			// 准备处理下一个音符
-			setProcessingNote(noteAnalysis = new NoteAnalysis(musicXmlNote.getMusicalNotesIndex() + 1,musicXmlNote.getMeasureIndex()));
-		}*/
+		List<Integer> chunkFrequencyList = chunkList.stream().map(t -> t.getFrequency()).filter(t -> t.doubleValue() > 100 && t.doubleValue() < 2000)
+				.collect(Collectors.toList());
+		
+		if (chunkFrequencyList.size() == 0) {
+			return -1;
+		}
+		
+		if(tenutoSize * 100 / chunkAnalysisList.size() > 50){
+			return lastChunkAnalysis.getFrequency();
+		}
+		
+		// 排序
+		chunkFrequencyList = chunkFrequencyList.stream().sorted().collect(Collectors.toList());
+		
+		int tempFrequency = chunkFrequencyList.get(0), totalFrequency = chunkFrequencyList.get(0);
+
+		int maxChunkSize = 0;
+		int frequency = chunkFrequencyList.get(0);
+		int chunkSize = 1;
+		int avgFrequency = chunkFrequencyList.get(0);
+		for (int i = 1; i < chunkFrequencyList.size(); i++) {
+			tempFrequency = chunkFrequencyList.get(i);
+
+			if (Math.abs(avgFrequency - tempFrequency) > offsetRange) {
+
+				avgFrequency = totalFrequency / chunkSize;
+
+				if (maxChunkSize < chunkSize) {
+					maxChunkSize = chunkSize;
+					frequency = avgFrequency;
+				}
+
+				chunkSize = 1;
+				avgFrequency = tempFrequency;
+				totalFrequency = tempFrequency;
+			} else {
+				chunkSize++;
+				totalFrequency += tempFrequency;
+			}
+
+			if (i == chunkFrequencyList.size() - 1) {
+				if (maxChunkSize <= chunkSize) {
+					maxChunkSize = chunkSize;
+					frequency = avgFrequency;
+				}
+			}
+		}
+
+		if (chunkFrequencyList.size() < 3) {
+			frequency = (int)(chunkFrequencyList.stream().collect(Collectors.summingDouble(t -> t)) / chunkFrequencyList.size());
+		}
+		
+		if(frequency < 100){
+			frequency = -1;
+		}
+
+		return frequency;
 	}
+	
+	private boolean computeTempoWithFrequency(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis){
+		
+		List<ChunkAnalysis> chunkList = new ArrayList<ChunkAnalysis>(chunkAnalysisList);
+		
+		// 剔除上一个音延续下来的信号
+		if (lastChunkAnalysis != null) {
+			double lastFrequency = lastChunkAnalysis.getFrequency();
+			Iterator<ChunkAnalysis> iterable = chunkList.iterator();
+			while (iterable.hasNext()) {
+				if (Math.abs(lastFrequency - iterable.next().getFrequency()) > hardLevel.getFrequencyOffset()) {
+					break;
+				}
+				iterable.remove();
+			}
+		}
+		
+		if(chunkList.size() == 0){
+			return false;
+		}
+		
+		ChunkAnalysis chunkAnalysis = null;
+		boolean tempo = false;
+		boolean isContinue = true;
+		int unplayedSize = 0;
+		int firstPeakIndex = -1;
+		for (int i = 0; i < chunkList.size(); i++) {
+			chunkAnalysis = chunkList.get(i);
+			if (chunkAnalysis != null) {
+				if (chunkAnalysis.getFrequency() > 100) {
+					tempo = true;
+					if(firstPeakIndex == -1){
+						firstPeakIndex = i;
+					}
+					if (isContinue == false) {
+						if (chunkAnalysisList.size() < 5) {
+							if (unplayedSize > 0) {
+								tempo = false;
+								break;
+							}
+						} else {
+							if ((unplayedSize * 100 / chunkAnalysisList.size()) > hardLevel.getNotPlayRange() || unplayedSize > 1) {
+								tempo = false;
+								break;
+							}
+						}
+					}
+				} else {
+					if (tempo == true) {
+						isContinue = false;
+						unplayedSize++;
+					}
+				}
+			}
+		}
+		
+		if (tempo) {
+			// 判断进入时间点
+			if((chunkAnalysisList.size() - chunkList.size() + firstPeakIndex) * 100 /chunkAnalysisList.size() > hardLevel.getTempoOffsetOfPercent()){
+				tempo = false;
+			}
+		}
+		
+		return tempo;
+	}
+	
+	private boolean computeTempoWithAmplitude2(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis) {
 
-	private float getPitch(float[] audioBuffer, int bufferSize) {
+		List<Integer> chunkAmplitudeList = chunkAnalysisList.stream().map(ChunkAnalysis::getAmplitude).collect(Collectors.toList());
 
-		int blankNum = audioBuffer.length % bufferSize;
-		float[] zeroBytes = new float[blankNum];
+		if (chunkAmplitudeList.size() <= 3) {
+			return chunkAmplitudeList.stream().filter(t -> t.floatValue() > hardLevel.getAmplitudeThreshold()).count() > 0;
+		}
+		
+		chunkAmplitudeList.add(0, lastChunkAnalysis.getAmplitude());
+		
+		// 检测是否有多个波峰
+		boolean tempo = false;
+		boolean isContinue = true;
+		int firstPeakIndex = -1;
+		int peakSize = 0;
+		for (int i = 1; i < chunkAmplitudeList.size(); i++) {
+			if (chunkAmplitudeList.get(i) > hardLevel.getAmplitudeThreshold() && chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1) + 2) {
+				tempo = true;
+				if(firstPeakIndex == -1){
+					firstPeakIndex = i;
+					peakSize++;
+				}
+				if (isContinue == false) {
+					tempo = false;
+					peakSize++;
+					break;
+				}
+			} else {
+				if (tempo == true) {
+					isContinue = false;
+				}
+			}
+		}
+		
+		if(peakSize == 0){
+			tempo = lastChunkAnalysis.isPeak();
+		}else if(peakSize == 1){
+			tempo = true;
+		}else{
+			tempo = false;
+		}
+		
+		if (tempo) {
+			// 判断进入时间点
+			if((firstPeakIndex - 1) * 100 /chunkAmplitudeList.size() > hardLevel.getTempoOffsetOfPercent()){
+				tempo = false;
+			}
+		}
+		
+		return tempo;
+	}
+	
+	private boolean computeTempoWithAmplitude(List<ChunkAnalysis> chunkAnalysisList, ChunkAnalysis lastChunkAnalysis) {
 
-		audioBuffer = ArrayUtil.mergeFloat(audioBuffer, zeroBytes);
+		boolean tempo = false;
 
-		int times = audioBuffer.length / bufferSize;
+		List<Integer> chunkAmplitudeList = chunkAnalysisList.stream().map(ChunkAnalysis::getAmplitude).collect(Collectors.toList());
 
-		float totalPitch = 0f;
+		if (chunkAmplitudeList.size() < 3) {
+			return chunkAmplitudeList.stream().filter(t -> t.floatValue() > hardLevel.getAmplitudeThreshold()).count() > 0;
+		}
+		
+		chunkAmplitudeList.add(0, lastChunkAnalysis.getAmplitude());
 
-		float[] bufferByte = new float[bufferSize];
-		for (int i = 0; i < times; i++) {
-			bufferByte = ArrayUtil.extractFloat(audioBuffer, i * bufferSize, (i + 1) * bufferSize);
-			float pitch = pitchDetector.getPitch(bufferByte).getPitch();
-			if(pitch == -1){
+		// 检测是否有多个波峰
+		int peakSize = 0;
+		int minPeakIndex = -1;
+		for (int i = 1; i < chunkAmplitudeList.size(); i++) {
+			if (chunkAmplitudeList.get(i) < hardLevel.getAmplitudeThreshold()) {
 				continue;
 			}
-			totalPitch += pitch;
+			if (i == chunkAmplitudeList.size() - 1) {
+				if (chunkAmplitudeList.get(i) > chunkAmplitudeList.get(i - 1)) {
+					peakSize++;
+					if (minPeakIndex == -1 || minPeakIndex > i) {
+						minPeakIndex = i;
+					}
+				}
+			} else {
+				if (chunkAmplitudeList.get(i - 1) < chunkAmplitudeList.get(i) && chunkAmplitudeList.get(i) >= chunkAmplitudeList.get(i + 1)) {
+					//if(Math.abs(chunkAmplitudeList.get(i - 1) - chunkAmplitudeList.get(i)) > 2 || Math.abs(chunkAmplitudeList.get(i) - chunkAmplitudeList.get(i + 1)) > 2){
+						peakSize++;
+						if (minPeakIndex == -1 || minPeakIndex > i) {
+							minPeakIndex = i;
+						}
+					//}
+				}
+			}
 		}
 
-		return totalPitch / times;
-	}
+		if (peakSize == 1) {
+			if (lastChunkAnalysis.isPeak() == false) {
+				tempo = true;
+			}
+		} else if (peakSize == 0) {
+			if (lastChunkAnalysis.isPeak()) {
+				tempo = true;
+			}
+		}
 
+		// 检测是否延迟进入
+		if (tempo == true) {
+			if (minPeakIndex * 100 / chunkAmplitudeList.size() > hardLevel.getTempoOffsetOfPercent() && chunkAmplitudeList.size() > 3) {
+				tempo = false;
+			}
+		}
+
+		return tempo;
+	}
+	
 }

+ 67 - 0
audio-analysis/src/main/java/com/yonge/nettty/dto/WebSocketResponse.java

@@ -0,0 +1,67 @@
+package com.yonge.nettty.dto;
+
+import org.springframework.http.HttpStatus;
+
+public class WebSocketResponse<T> {
+
+	private Head header = new Head();
+
+	private T body;
+
+	public WebSocketResponse(Head header, T body) {
+		this.header = header;
+		this.body = body;
+	}
+
+	public WebSocketResponse(String command, T body) {
+		this.header = new Head(command, HttpStatus.OK.value());
+		this.body = body;
+	}
+
+	public Head getHeader() {
+		return header;
+	}
+
+	public void setHeader(Head header) {
+		this.header = header;
+	}
+
+	public T getBody() {
+		return body;
+	}
+
+	public void setBody(T body) {
+		this.body = body;
+	}
+
+	public static class Head {
+		private int status = HttpStatus.OK.value();
+		private String commond = "";
+
+		public Head() {
+
+		}
+
+		public Head(String commond, int status) {
+			this.commond = commond;
+			this.status = status;
+		}
+
+		public int getStatus() {
+			return status;
+		}
+
+		public void setStatus(int status) {
+			this.status = status;
+		}
+
+		public String getCommond() {
+			return commond;
+		}
+
+		public void setCommond(String commond) {
+			this.commond = commond;
+		}
+
+	}
+}

+ 44 - 0
audio-analysis/src/main/java/com/yonge/nettty/entity/MusicXmlBasicInfo.java

@@ -1,7 +1,11 @@
 package com.yonge.nettty.entity;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.stream.Collectors;
 
 public class MusicXmlBasicInfo {
 
@@ -24,9 +28,13 @@ public class MusicXmlBasicInfo {
 	private String heardLevel;
 
 	private String uuid;
+	
+	private int beatLength;
 
 	private List<MusicXmlNote> musicXmlInfos = new ArrayList<MusicXmlNote>();
 
+	private Map<Integer, MusicXmlSection> musicXmlSectionMap = new HashMap<Integer, MusicXmlSection>();
+
 	public Integer getId() {
 		return id;
 	}
@@ -107,6 +115,14 @@ public class MusicXmlBasicInfo {
 		this.uuid = uuid;
 	}
 
+	public int getBeatLength() {
+		return beatLength;
+	}
+
+	public void setBeatLength(int beatLength) {
+		this.beatLength = beatLength;
+	}
+
 	public List<MusicXmlNote> getMusicXmlInfos() {
 		return musicXmlInfos;
 	}
@@ -114,4 +130,32 @@ public class MusicXmlBasicInfo {
 	public void setMusicXmlInfos(List<MusicXmlNote> musicXmlInfos) {
 		this.musicXmlInfos = musicXmlInfos;
 	}
+
+	public Map<Integer, MusicXmlSection> getMusicXmlSectionMap() {
+
+		if (musicXmlSectionMap.size() == 0) {
+			Map<Integer, List<MusicXmlNote>> map = musicXmlInfos.stream().collect(Collectors.groupingBy(MusicXmlNote::getMeasureIndex));
+
+			List<MusicXmlNote> noteList = null;
+			MusicXmlSection section = null;
+			for (Entry<Integer, List<MusicXmlNote>> entry : map.entrySet()) {
+				noteList = entry.getValue();
+
+				section = new MusicXmlSection();
+
+				section.setStartTime(noteList.stream().map(t -> t.getTimeStamp()).distinct().min(Double::compareTo).get());
+				section.setDuration(noteList.stream().mapToDouble(t -> t.getDuration()).sum());
+				section.setNoteNum(noteList.size());
+				section.setIndex(entry.getKey());
+
+				musicXmlSectionMap.put(entry.getKey(), section);
+			}
+		}
+
+		return musicXmlSectionMap;
+	}
+
+	public void setMusicXmlSectionMap(Map<Integer, MusicXmlSection> musicXmlSectionMap) {
+		this.musicXmlSectionMap = musicXmlSectionMap;
+	}
 }

+ 7 - 7
audio-analysis/src/main/java/com/yonge/nettty/entity/MusicXmlNote.java

@@ -6,10 +6,10 @@ package com.yonge.nettty.entity;
 public class MusicXmlNote {
 
 	// 音符起始时间戳(第一个音符是0ms)
-	private int timeStamp;
+	private double timeStamp;
 
 	// 当前音符持续的播放时间(ms)
-	private int duration;
+	private double duration;
 
 	// 当前音符的频率
 	private float frequency;
@@ -26,19 +26,19 @@ public class MusicXmlNote {
 	// 当前音符在整个曲谱中的下标(从0开始)
 	private int musicalNotesIndex;
 
-	public int getTimeStamp() {
+	public double getTimeStamp() {
 		return timeStamp;
 	}
 
-	public void setTimeStamp(int timeStamp) {
+	public void setTimeStamp(double timeStamp) {
 		this.timeStamp = timeStamp;
 	}
 
-	public int getDuration() {
+	public double getDuration() {
 		return duration;
 	}
 
-	public void setDuration(int duration) {
+	public void setDuration(double duration) {
 		this.duration = duration;
 	}
 
@@ -66,7 +66,7 @@ public class MusicXmlNote {
 		this.measureIndex = measureIndex;
 	}
 
-	public boolean isDontEvaluating() {
+	public boolean getDontEvaluating() {
 		return dontEvaluating;
 	}
 

+ 50 - 0
audio-analysis/src/main/java/com/yonge/nettty/entity/MusicXmlSection.java

@@ -0,0 +1,50 @@
+package com.yonge.nettty.entity;
+
+/**
+ * 小节信息
+ */
+public class MusicXmlSection {
+
+	private double startTime;
+
+	// 当前小节持续的播放时间(ms)
+	private double duration;
+
+	// 音符数量
+	private int noteNum;
+
+	private int index;
+
+	public double getDuration() {
+		return duration;
+	}
+
+	public void setDuration(double duration) {
+		this.duration = duration;
+	}
+
+	public int getNoteNum() {
+		return noteNum;
+	}
+
+	public void setNoteNum(int noteNum) {
+		this.noteNum = noteNum;
+	}
+
+	public double getStartTime() {
+		return startTime;
+	}
+
+	public void setStartTime(double startTime) {
+		this.startTime = startTime;
+	}
+
+	public int getIndex() {
+		return index;
+	}
+
+	public void setIndex(int index) {
+		this.index = index;
+	}
+
+}

+ 0 - 70
audio-analysis/src/main/java/com/yonge/nettty/entity/NoteAnalysis.java

@@ -1,70 +0,0 @@
-package com.yonge.nettty.entity;
-
-public class NoteAnalysis {
-
-	private int index;
-
-	private double durationTime;
-
-	private double avgPitch;
-
-	private int sectionIndex;
-
-	private double totalPitch;
-
-	private int chunks;
-
-	public NoteAnalysis(int index, int sectionIndex) {
-		this.index = index;
-		this.sectionIndex = sectionIndex;
-	}
-
-	public int getIndex() {
-		return index;
-	}
-
-	public void setIndex(int index) {
-		this.index = index;
-	}
-
-	public double getDurationTime() {
-		return durationTime;
-	}
-
-	public void setDurationTime(double durationTime) {
-		this.durationTime = durationTime;
-	}
-
-	public double getAvgPitch() {
-		return avgPitch;
-	}
-
-	public void setAvgPitch(double avgPitch) {
-		this.avgPitch = avgPitch;
-	}
-
-	public int getSectionIndex() {
-		return sectionIndex;
-	}
-
-	public void setSectionIndex(int sectionIndex) {
-		this.sectionIndex = sectionIndex;
-	}
-
-	public double getTotalPitch() {
-		return totalPitch;
-	}
-
-	public void setTotalPitch(double totalPitch) {
-		this.totalPitch = totalPitch;
-	}
-
-	public int getChunks() {
-		return chunks;
-	}
-
-	public void setChunks(int chunks) {
-		this.chunks = chunks;
-	}
-
-}

+ 7 - 5
audio-analysis/src/main/java/com/yonge/netty/server/NettyServer.java

@@ -23,11 +23,12 @@ import javax.annotation.PreDestroy;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
 import org.springframework.context.annotation.Configuration;
 
 import com.yonge.netty.server.handler.NettyServerHandler;
-import com.yonge.netty.server.messagehandler.BinaryWebSocketFrameHandler;
-import com.yonge.netty.server.messagehandler.TextWebSocketHandler;
+import com.yonge.netty.server.handler.message.BinaryWebSocketFrameHandler;
+import com.yonge.netty.server.handler.message.TextWebSocketHandler;
 
 @Configuration
 public class NettyServer {
@@ -42,12 +43,13 @@ public class NettyServer {
 	/**
 	 * 端口号
 	 */
-	private int port = 8080;
+	@Value("${netty.server.port}")
+	private int port;
 
 	/**
 	 * webSocket路径
 	 */
-	private String webSocketPath = "/audioEvaluate";
+	private String webSocketPath = "/audioAnalysis";
 
 	private EventLoopGroup bossGroup = new NioEventLoopGroup();
 
@@ -103,7 +105,7 @@ public class NettyServer {
 				/*
 				 * 说明: 1、对应webSocket,它的数据是以帧(frame)的形式传递 2、浏览器请求时 ws://localhost:58080/xxx 表示请求的uri 3、核心功能是将http协议升级为ws协议,保持长连接
 				 */
-				channelPipeline.addLast(new WebSocketServerProtocolHandler(webSocketPath, WEBSOCKET_PROTOCOL, true, 65536 * 10, false, true));
+				channelPipeline.addLast(new WebSocketServerProtocolHandler(webSocketPath, WEBSOCKET_PROTOCOL, true, 65536 * 1000, false, true));
 
 				// 自定义的handler,处理业务逻辑
 				channelPipeline.addLast(nettyServerHandler);

+ 0 - 61
audio-analysis/src/main/java/com/yonge/netty/server/NioAudioInputStream.java

@@ -1,61 +0,0 @@
-package com.yonge.netty.server;
-
-import be.tarsos.dsp.io.TarsosDSPAudioFormat;
-import be.tarsos.dsp.io.TarsosDSPAudioInputStream;
-import org.springframework.stereotype.Component;
-
-import javax.sound.sampled.AudioFormat;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-
-/**
- * @Author Joburgess
- * @Date 2021/8/4 0004
- */
-public class NioAudioInputStream implements TarsosDSPAudioInputStream {
-
-    private RandomAccessFile randomAccessFile;
-    private AudioFormat format;
-
-    private long position = 44;
-
-    public NioAudioInputStream() {
-    }
-
-    public NioAudioInputStream(RandomAccessFile randomAccessFile, AudioFormat audioFormat) {
-        this.randomAccessFile = randomAccessFile;
-        this.format = audioFormat;
-    }
-
-    @Override
-    public long skip(long bytesToSkip) throws IOException {
-        return randomAccessFile.skipBytes((int) bytesToSkip);
-    }
-
-    @Override
-    public int read(byte[] b, int off, int len) throws IOException {
-        randomAccessFile.seek(position);
-        int read = randomAccessFile.read(b, off, len);
-        if(read>0){
-            position += read;
-        }
-        return read;
-    }
-
-    @Override
-    public void close() throws IOException {
-        randomAccessFile.close();
-    }
-
-    @Override
-    public TarsosDSPAudioFormat getFormat() {
-        boolean isSigned = format.getEncoding() == AudioFormat.Encoding.PCM_SIGNED;
-        TarsosDSPAudioFormat tarsosDSPFormat = new TarsosDSPAudioFormat(format.getSampleRate(), format.getSampleSizeInBits(), format.getChannels(), isSigned, format.isBigEndian());
-        return tarsosDSPFormat;
-    }
-
-    @Override
-    public long getFrameLength() {
-        return 0;
-    }
-}

+ 9 - 0
audio-analysis/src/main/java/com/yonge/netty/server/handler/ChannelContextConstants.java

@@ -0,0 +1,9 @@
+package com.yonge.netty.server.handler;
+
+import io.netty.util.AttributeKey;
+
+public class ChannelContextConstants {
+
+	public static final AttributeKey<String> CHANNEL_ATTR_KEY_ACTION = AttributeKey.newInstance("action");
+	
+}

+ 6 - 3
audio-analysis/src/main/java/com/yonge/netty/server/NettyChannelManager.java → audio-analysis/src/main/java/com/yonge/netty/server/handler/NettyChannelManager.java

@@ -1,7 +1,8 @@
-package com.yonge.netty.server;
+package com.yonge.netty.server.handler;
 
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelId;
+import io.netty.handler.codec.http.websocketx.TextWebSocketFrame;
 import io.netty.util.AttributeKey;
 
 import java.util.concurrent.ConcurrentHashMap;
@@ -11,6 +12,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.stereotype.Component;
 
+import com.ym.mec.util.json.JsonUtil;
+
 @Component
 public class NettyChannelManager {
 
@@ -98,7 +101,7 @@ public class NettyChannelManager {
 	 * @param user 用户
 	 * @param message 消息体
 	 */
-	public void send(String user, Object message) {
+	public void sendTextMessage(String user, Object message) {
 		// 获得用户对应的 Channel
 		Channel channel = userChannels.get(user);
 		if (channel == null) {
@@ -110,7 +113,7 @@ public class NettyChannelManager {
 			return;
 		}
 		// 发送消息
-		channel.writeAndFlush(message);
+		channel.writeAndFlush(new TextWebSocketFrame(JsonUtil.toJSONString(message)));
 	}
 
 	/**

+ 0 - 2
audio-analysis/src/main/java/com/yonge/netty/server/handler/NettyServerHandler.java

@@ -14,8 +14,6 @@ import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.security.oauth2.common.OAuth2AccessToken;
 import org.springframework.stereotype.Component;
 
-import com.yonge.netty.server.NettyChannelManager;
-
 @Component
 @ChannelHandler.Sharable
 public class NettyServerHandler extends ChannelInboundHandlerAdapter {

+ 10 - 0
audio-analysis/src/main/java/com/yonge/netty/server/handler/message/BinaryMessageHandler.java

@@ -0,0 +1,10 @@
+package com.yonge.netty.server.handler.message;
+
+import io.netty.channel.Channel;
+
+public interface BinaryMessageHandler {
+	
+	String getAction();
+
+	boolean handler(String user, Channel channel, byte[] bytes);
+}

+ 192 - 0
audio-analysis/src/main/java/com/yonge/netty/server/handler/message/BinaryWebSocketFrameHandler.java

@@ -0,0 +1,192 @@
+package com.yonge.netty.server.handler.message;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufUtil;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.SimpleChannelInboundHandler;
+import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame;
+
+import java.io.File;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import javax.sound.sampled.AudioFormat;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.BeansException;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.ApplicationContextAware;
+import org.springframework.stereotype.Component;
+
+import com.yonge.audio.analysis.AudioFloatConverter;
+import com.yonge.audio.utils.ArrayUtil;
+import com.yonge.nettty.dto.UserChannelContext;
+import com.yonge.nettty.dto.WebSocketResponse;
+import com.yonge.nettty.entity.MusicXmlBasicInfo;
+import com.yonge.netty.server.handler.ChannelContextConstants;
+import com.yonge.netty.server.handler.NettyChannelManager;
+import com.yonge.netty.server.processor.WaveformWriter;
+import com.yonge.netty.server.service.UserChannelContextService;
+
+@Component
+@ChannelHandler.Sharable
+public class BinaryWebSocketFrameHandler extends SimpleChannelInboundHandler<BinaryWebSocketFrame> implements ApplicationContextAware,InitializingBean {
+
+	private final static Logger LOGGER = LoggerFactory.getLogger(BinaryWebSocketFrameHandler.class);
+	
+	@Autowired
+	private NettyChannelManager nettyChannelManager;
+
+	@Autowired
+	private UserChannelContextService userChannelContextService;
+	
+	private ApplicationContext applicationContext;
+	
+	private Map<String, BinaryMessageHandler> handlerMap;
+
+	/**
+	 * @describe 采样率
+	 */
+	private float sampleRate = 44100;
+
+	/**
+	 * 每个采样大小(Bit)
+	 */
+	private int bitsPerSample = 16;
+
+	/**
+	 * 通道数
+	 */
+	private int channels = 1;
+
+	/**
+	 * @describe 采样大小
+	 */
+	private int bufferSize = 1024 * 4;
+
+	private boolean signed = true;
+
+	private boolean bigEndian = false;
+
+	private AudioFormat audioFormat = new AudioFormat(sampleRate, bitsPerSample, channels, signed, bigEndian);
+
+	private AudioFloatConverter converter = AudioFloatConverter.getConverter(audioFormat);
+
+	private String tmpFileDir = "/mdata/soundCompare/";
+	
+	private SimpleDateFormat sdf =new SimpleDateFormat("yyMMddHHmmSS");
+	
+	@Override
+	protected void channelRead0(ChannelHandlerContext ctx, BinaryWebSocketFrame frame) throws Exception {
+
+		Channel channel = ctx.channel();
+
+		ByteBuf buf = frame.content().retain();
+
+		try {
+			byte[] datas = ByteBufUtil.getBytes(buf);
+
+			String user = nettyChannelManager.getUser(channel);
+			
+			String action = channel.attr(ChannelContextConstants.CHANNEL_ATTR_KEY_ACTION).get();
+			
+			if(handlerMap == null){
+				LOGGER.error("消息处理器没有初始化");
+			}
+			BinaryMessageHandler handler = handlerMap.get(action);
+			
+			switch (action) {
+			case "PITCH_DETECTION":
+				handler.handler(user, channel, datas);
+				break;
+			case "SOUND_COMPARE":
+				
+				UserChannelContext channelContext = userChannelContextService.getChannelContext(channel);
+
+				if (channelContext == null) {
+					return;
+				}
+				
+				// 写录音文件
+				WaveformWriter waveFileProcessor = channelContext.getWaveFileProcessor();
+				if (waveFileProcessor == null) {
+					File file = new File(tmpFileDir + user + "_" + sdf.format(new Date()) + ".wav");
+					waveFileProcessor = new WaveformWriter(file.getAbsolutePath());
+					channelContext.setWaveFileProcessor(waveFileProcessor);
+				}
+				waveFileProcessor.process(datas);
+				
+				datas = channelContext.skipHeader(datas);
+
+				if (datas.length == 0) {
+					return;
+				}
+				
+				channelContext.setChannelBufferBytes(ArrayUtil.mergeByte(channelContext.getChannelBufferBytes(), datas));
+				
+				int totalLength = channelContext.getChannelBufferBytes().length;
+				
+				while (totalLength >= bufferSize) {
+					byte[] bufferData = ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), 0, bufferSize - 1);
+
+					if (bufferSize != totalLength) {
+						channelContext.setChannelBufferBytes(ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), bufferSize, totalLength - 1));
+					} else {
+						channelContext.setChannelBufferBytes(new byte[0]);
+					}
+
+					float[] sampleFloats = new float[bufferSize / 2];
+
+					converter.toFloatArray(bufferData, sampleFloats);
+
+					channelContext.handle(sampleFloats, audioFormat);
+
+					MusicXmlBasicInfo musicXmlBasicInfo = channelContext.getMusicXmlBasicInfo(null);
+					int sectionIndex = channelContext.getEvaluatingSectionIndex().get();
+
+					// 评分
+					int score = channelContext.evaluateForSection(sectionIndex, musicXmlBasicInfo.getSubjectId());
+					if (score >= 0) {
+
+						Map<String, Object> params = new HashMap<String, Object>();
+						params.put("score", score);
+						params.put("measureIndex", sectionIndex);
+
+						WebSocketResponse<Map<String, Object>> resp = new WebSocketResponse<Map<String, Object>>("measureScore", params);
+
+						nettyChannelManager.sendTextMessage(user, resp);
+					}
+
+					totalLength = channelContext.getChannelBufferBytes().length;
+				}
+				break;
+
+			default:
+				break;
+			}
+
+		} finally {
+			buf.release();
+		}
+	}
+
+	@Override
+	public void afterPropertiesSet() throws Exception {
+		handlerMap = applicationContext.getBeansOfType(BinaryMessageHandler.class).values().stream()
+				.collect(Collectors.toMap(BinaryMessageHandler::getAction, t -> t));
+	}
+
+	@Override
+	public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
+		this.applicationContext = applicationContext;
+	}
+
+}

+ 5 - 0
audio-analysis/src/main/java/com/yonge/netty/server/handler/message/TextMessageHandler.java

@@ -0,0 +1,5 @@
+package com.yonge.netty.server.handler.message;
+
+public interface TextMessageHandler {
+
+}

+ 228 - 0
audio-analysis/src/main/java/com/yonge/netty/server/handler/message/TextWebSocketHandler.java

@@ -0,0 +1,228 @@
+package com.yonge.netty.server.handler.message;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.SimpleChannelInboundHandler;
+import io.netty.handler.codec.http.websocketx.TextWebSocketFrame;
+
+import java.math.BigDecimal;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Map.Entry;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONObject;
+import com.alibaba.fastjson.JSONPath;
+import com.ym.mec.biz.dal.entity.SysMusicCompareRecord;
+import com.ym.mec.biz.dal.enums.DeviceTypeEnum;
+import com.ym.mec.biz.dal.enums.FeatureType;
+import com.ym.mec.biz.service.SysMusicCompareRecordService;
+import com.ym.mec.thirdparty.storage.StoragePluginContext;
+import com.ym.mec.thirdparty.storage.provider.KS3StoragePlugin;
+import com.ym.mec.util.upload.UploadUtil;
+import com.yonge.nettty.dto.SectionAnalysis;
+import com.yonge.nettty.dto.UserChannelContext;
+import com.yonge.nettty.dto.WebSocketResponse;
+import com.yonge.nettty.entity.MusicXmlBasicInfo;
+import com.yonge.nettty.entity.MusicXmlNote;
+import com.yonge.netty.server.handler.ChannelContextConstants;
+import com.yonge.netty.server.handler.NettyChannelManager;
+import com.yonge.netty.server.processor.WaveformWriter;
+import com.yonge.netty.server.service.UserChannelContextService;
+
+@Component
+@ChannelHandler.Sharable
+public class TextWebSocketHandler extends SimpleChannelInboundHandler<TextWebSocketFrame> {
+
+	private static final Logger LOGGER = LoggerFactory.getLogger(TextWebSocketHandler.class);
+
+	@Autowired
+	private SysMusicCompareRecordService sysMusicCompareRecordService;
+
+    @Autowired
+    private StoragePluginContext storagePluginContext;
+
+	@Autowired
+	private UserChannelContextService userChannelContextService;
+
+	@Autowired
+	private NettyChannelManager nettyChannelManager;
+
+	@Override
+	protected void channelRead0(ChannelHandlerContext ctx, TextWebSocketFrame frame) throws Exception {
+
+		Channel channel = ctx.channel();
+
+		String jsonMsg = frame.text();
+		
+		LOGGER.info("接收到客户端的消息内容:{}", jsonMsg);
+		
+		String type = (String) JSONPath.extract(jsonMsg, "$.header.type");
+		
+		if(StringUtils.isNoneBlank(type)){
+			channel.attr(ChannelContextConstants.CHANNEL_ATTR_KEY_ACTION).set(type);
+		}
+		
+		if (StringUtils.equals(type, "PITCH_DETECTION")) {// 校音
+			
+			return;
+		} else if (StringUtils.equals(type, "SOUND_COMPARE")) {// 评测
+			String command = (String) JSONPath.extract(jsonMsg, "$.header.commond");
+
+			JSONObject dataObj = (JSONObject) JSONPath.extract(jsonMsg, "$.body");
+
+			UserChannelContext channelContext = userChannelContextService.getChannelContext(channel);
+			
+			MusicXmlBasicInfo musicXmlBasicInfo = null;
+
+			switch (command) {
+			case "musicXml": // 同步music xml信息
+
+				musicXmlBasicInfo = JSONObject.toJavaObject(dataObj, MusicXmlBasicInfo.class);
+
+				userChannelContextService.remove(channel);
+
+				if (channelContext == null) {
+					channelContext = new UserChannelContext();
+				}
+
+				channelContext.getSongMusicXmlMap().put(musicXmlBasicInfo.getExamSongId(), musicXmlBasicInfo);
+				channelContext.init(musicXmlBasicInfo.getHeardLevel(), musicXmlBasicInfo.getSubjectId(), musicXmlBasicInfo.getBeatLength());
+
+				userChannelContextService.register(channel, channelContext);
+
+				break;
+			case "recordStart": // 开始评测
+
+				// 清空缓存信息
+				channelContext.resetUserInfo();
+				
+				musicXmlBasicInfo = channelContext.getMusicXmlBasicInfo(null);
+
+				if (musicXmlBasicInfo != null) {
+					Date date = new Date();
+					SysMusicCompareRecord sysMusicCompareRecord = new SysMusicCompareRecord(FeatureType.CLOUD_STUDY_EVALUATION);
+					sysMusicCompareRecord.setCreateTime(date);
+					sysMusicCompareRecord.setUserId(Integer.parseInt(nettyChannelManager.getUser(channel)));
+					sysMusicCompareRecord.setSysMusicScoreId(musicXmlBasicInfo.getExamSongId());
+					sysMusicCompareRecord.setBehaviorId(musicXmlBasicInfo.getBehaviorId());
+					//sysMusicCompareRecord.setClientId();
+					sysMusicCompareRecord.setDeviceType(DeviceTypeEnum.valueOf(musicXmlBasicInfo.getPlatform()));
+					sysMusicCompareRecord.setSpeed(musicXmlBasicInfo.getSpeed());
+					
+					MusicXmlNote musicXmlNote = musicXmlBasicInfo.getMusicXmlInfos().stream().max(Comparator.comparing(MusicXmlNote::getTimeStamp)).get();
+					sysMusicCompareRecord.setSourceTime((float) ((musicXmlNote.getTimeStamp()+musicXmlNote.getDuration())/1000));
+					sysMusicCompareRecordService.insert(sysMusicCompareRecord);
+					channelContext.setRecordId(sysMusicCompareRecord.getId());
+				}
+				break;
+			case "recordEnd": // 结束评测
+			case "recordCancel": // 取消评测
+				if (channelContext == null) {
+					return;
+				}
+
+				WaveformWriter waveFileProcessor = channelContext.getWaveFileProcessor();
+				if (waveFileProcessor != null) {
+					// 写文件头
+					waveFileProcessor.processingFinished();
+				}
+
+				if (StringUtils.equals(command, "recordEnd")) {
+					// 生成评测报告
+					Map<String, Object> params = new HashMap<String, Object>();
+
+					Map<String, Integer> scoreMap = channelContext.evaluateForMusic();
+					for (Entry<String, Integer> entry : scoreMap.entrySet()) {
+						params.put(entry.getKey(), entry.getValue());
+					}
+					
+					//保存评测结果
+					Long recordId = channelContext.getRecordId();
+					SysMusicCompareRecord sysMusicCompareRecord = sysMusicCompareRecordService.get(recordId);
+					if(sysMusicCompareRecord != null){
+						musicXmlBasicInfo = channelContext.getMusicXmlBasicInfo(null);
+						
+						if (scoreMap != null && scoreMap.size() > 1) {
+							sysMusicCompareRecord.setScore(new BigDecimal(scoreMap.get("score")));
+							sysMusicCompareRecord.setIntonation(new BigDecimal(scoreMap.get("intonation")));
+							sysMusicCompareRecord.setIntegrity(new BigDecimal(scoreMap.get("integrity")));
+							sysMusicCompareRecord.setCadence(new BigDecimal(scoreMap.get("cadence")));
+							sysMusicCompareRecord.setPlayTime(scoreMap.get("playTime") / 1000);
+						}
+						sysMusicCompareRecord.setFeature(FeatureType.CLOUD_STUDY_EVALUATION);
+
+			            String url = null;
+			            try {
+			                String folder = UploadUtil.getFileFloder();
+			                url = storagePluginContext.asyncUploadFile(KS3StoragePlugin.PLUGIN_NAME,"soundCompare/" + folder, waveFileProcessor.getFile(), true);
+			            } catch (Exception e) {
+			                LOGGER.error("录音文件上传失败:{}", e);
+			            }
+						sysMusicCompareRecord.setRecordFilePath(url);
+						//sysMusicCompareRecord.setVideoFilePath(videoFilePath);
+
+						Map<String, Object> scoreData = new HashMap<>();
+						List<SectionAnalysis> sectionAnalysisList = channelContext.getDoneSectionAnalysisList();
+						sectionAnalysisList = sectionAnalysisList.stream().filter(t -> t.isIngore() == false).collect(Collectors.toList());
+						scoreData.put("userMeasureScore", sectionAnalysisList.stream().collect(Collectors.toMap(SectionAnalysis :: getIndex, t -> t)));
+
+						Map<String, Object> musicalNotesPlayStats = new HashMap<>();
+						musicalNotesPlayStats.put("detailId", musicXmlBasicInfo.getDetailId());
+						musicalNotesPlayStats.put("examSongId", musicXmlBasicInfo.getExamSongId());
+						musicalNotesPlayStats.put("xmlUrl", musicXmlBasicInfo.getXmlUrl());
+						
+						musicalNotesPlayStats.put("notesData", channelContext.getDoneNoteAnalysisList().stream().filter(t -> t.isIgnore() == false).collect(Collectors.toList()));
+						scoreData.put("musicalNotesPlayStats", musicalNotesPlayStats);
+						sysMusicCompareRecord.setScoreData(JSON.toJSONString(scoreData));
+						
+						sysMusicCompareRecordService.saveMusicCompareData(sysMusicCompareRecord);
+					}
+					
+					WebSocketResponse<Map<String, Object>> resp = new WebSocketResponse<Map<String, Object>>("overall", params);
+
+					nettyChannelManager.sendTextMessage(nettyChannelManager.getUser(channel), resp);
+				}
+
+				// 清空缓存信息
+				channelContext.resetUserInfo();
+
+				break;
+			case "proxyMessage": // ???
+
+				break;
+			case "videoUpload": // 上传音频
+				SysMusicCompareRecord musicCompareRecord = null;
+				if (dataObj.containsKey("recordId")) {
+					musicCompareRecord = sysMusicCompareRecordService.get(dataObj.getLong("recordId"));
+				}
+				if (Objects.nonNull(musicCompareRecord) && dataObj.containsKey("filePath")) {
+					musicCompareRecord.setVideoFilePath(dataObj.getString("filePath"));
+					sysMusicCompareRecordService.update(musicCompareRecord);
+				} else {
+					musicCompareRecord.setVideoFilePath(musicCompareRecord.getRecordFilePath());
+					sysMusicCompareRecordService.update(musicCompareRecord);
+				}
+				
+				break;
+
+			default:
+				// 非法请求
+				break;
+			}
+		}
+	}
+
+}

+ 0 - 147
audio-analysis/src/main/java/com/yonge/netty/server/messagehandler/BinaryWebSocketFrameHandler.java

@@ -1,147 +0,0 @@
-package com.yonge.netty.server.messagehandler;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufUtil;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandler;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
-import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame;
-
-import java.io.File;
-
-import javax.sound.sampled.AudioFormat;
-
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.stereotype.Component;
-
-import be.tarsos.dsp.AudioDispatcher;
-import be.tarsos.dsp.io.TarsosDSPAudioFloatConverter;
-import be.tarsos.dsp.io.TarsosDSPAudioFormat;
-import be.tarsos.dsp.io.jvm.AudioDispatcherFactory;
-import be.tarsos.dsp.pitch.PitchDetector;
-import be.tarsos.dsp.pitch.PitchProcessor;
-import be.tarsos.dsp.pitch.PitchProcessor.PitchEstimationAlgorithm;
-
-import com.yonge.audio.utils.ArrayUtil;
-import com.yonge.nettty.dto.UserChannelContext;
-import com.yonge.netty.server.NettyChannelManager;
-import com.yonge.netty.server.processor.WaveformWriter;
-import com.yonge.netty.server.service.UserChannelContextService;
-
-@Component
-@ChannelHandler.Sharable
-public class BinaryWebSocketFrameHandler extends SimpleChannelInboundHandler<BinaryWebSocketFrame> {
-
-	@Autowired
-	private NettyChannelManager nettyChannelManager;
-
-	@Autowired
-	private UserChannelContextService userChannelContextService;
-
-	/**
-	 * @describe 采样率
-	 */
-	private float sampleRate = 44100;
-
-	/**
-	 * 每个采样大小(Bit)
-	 */
-	private int bitsPerSample = 16;
-
-	/**
-	 * 通道数
-	 */
-	private int channels = 1;
-
-	/**
-	 * @describe 采样大小
-	 */
-	private int bufferSize = 1024 * 4;
-	/**
-	 * @describe 帧覆盖大小
-	 */
-	private int overlap = 0;
-
-	private boolean signed = true;
-
-	private boolean bigEndian = false;
-
-	private AudioFormat audioFormat = new AudioFormat(sampleRate, bitsPerSample, channels, signed, bigEndian);
-
-	private TarsosDSPAudioFloatConverter converter = TarsosDSPAudioFloatConverter.getConverter(new TarsosDSPAudioFormat(sampleRate, bitsPerSample, channels,
-			signed, bigEndian));
-
-	private PitchEstimationAlgorithm algorithm = PitchProcessor.PitchEstimationAlgorithm.FFT_YIN;
-
-	/**
-	 * @describe 有效分贝大小
-	 */
-	private int validDb = 35;
-	/**
-	 * @describe 有效频率
-	 */
-	private int validFrequency = 20;
-	/**
-	 * @describe 音准前后音分误差范围
-	 */
-	private int intonationCentsRange = 3;
-	/**
-	 * @describe 节奏有效阈值
-	 */
-	private float cadenceValidDuty = 0.09f;
-	/**
-	 * @describe 完整性有效频率误差范围
-	 */
-	private int integrityFrequencyRange = 30;
-
-	private String tmpFileDir = "e:/soundRecords/";
-	
-	@Override
-	protected void channelRead0(ChannelHandlerContext ctx, BinaryWebSocketFrame frame) throws Exception {
-
-		Channel channel = ctx.channel();
-
-		ByteBuf buf = frame.content().retain();
-
-		try {
-			byte[] datas = ByteBufUtil.getBytes(buf);
-
-			String user = nettyChannelManager.getUser(channel);
-
-			UserChannelContext channelContext = userChannelContextService.getChannelContext(channel);
-
-			if (channelContext == null) {
-				return;
-			}
-
-			// 写录音文件
-			WaveformWriter waveFileProcessor = channelContext.getWaveFileProcessor();
-			if (waveFileProcessor == null) {
-				File file = new File(tmpFileDir + user + "_" + System.currentTimeMillis() + ".wav");
-				waveFileProcessor = new WaveformWriter(file.getAbsolutePath());
-				channelContext.setWaveFileProcessor(waveFileProcessor);
-			}
-			waveFileProcessor.process(datas);
-			
-			channelContext.setChannelBufferBytes(ArrayUtil.mergeByte(channelContext.getChannelBufferBytes(), datas));
-			
-			int totalLength = channelContext.getChannelBufferBytes().length;
-			
-			while(totalLength >= bufferSize){
-				byte[] bufferData = ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), 0, bufferSize);
-				channelContext.setChannelBufferBytes(ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), bufferSize - 1, totalLength - 1));
-				
-				totalLength = channelContext.getChannelBufferBytes().length;
-
-				AudioDispatcher dispatcher = AudioDispatcherFactory.fromByteArray(bufferData, audioFormat, bufferSize, overlap);
-				dispatcher.addAudioProcessor(new PitchProcessor(algorithm, sampleRate, bufferSize, channelContext));
-				dispatcher.run();
-			}
-
-		} finally {
-			buf.release();
-		}
-	}
-
-}

+ 0 - 112
audio-analysis/src/main/java/com/yonge/netty/server/messagehandler/TextWebSocketHandler.java

@@ -1,112 +0,0 @@
-package com.yonge.netty.server.messagehandler;
-
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandler;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
-import io.netty.handler.codec.http.websocketx.TextWebSocketFrame;
-
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.stereotype.Component;
-
-import com.alibaba.fastjson.JSONObject;
-import com.alibaba.fastjson.JSONPath;
-import com.ym.mec.biz.dal.entity.SysMusicCompareRecord;
-import com.ym.mec.biz.dal.enums.FeatureType;
-import com.ym.mec.biz.service.SysMusicCompareRecordService;
-import com.yonge.nettty.dto.UserChannelContext;
-import com.yonge.nettty.entity.MusicXmlBasicInfo;
-import com.yonge.netty.server.processor.WaveformWriter;
-import com.yonge.netty.server.service.UserChannelContextService;
-
-@Component
-@ChannelHandler.Sharable
-public class TextWebSocketHandler extends SimpleChannelInboundHandler<TextWebSocketFrame> {
-
-	private static final Logger LOGGER = LoggerFactory.getLogger(TextWebSocketHandler.class);
-
-	@Autowired
-	private SysMusicCompareRecordService sysMusicCompareRecordService;
-
-	@Autowired
-	private UserChannelContextService userChannelContextService;
-
-	@Override
-	protected void channelRead0(ChannelHandlerContext ctx, TextWebSocketFrame frame) throws Exception {
-
-		Channel channel = ctx.channel();
-
-		String jsonMsg = frame.text();
-		String commond = (String) JSONPath.extract(jsonMsg, "$.header.commond");
-
-		JSONObject dataObj = (JSONObject) JSONPath.extract(jsonMsg, "$.body");
-
-		LOGGER.info("接收到客户端的指令[{}]:{}", commond, dataObj);
-
-		UserChannelContext channelContext = userChannelContextService.getChannelContext(channel);
-
-		switch (commond) {
-		case "musicXml": // 同步music xml信息
-
-			MusicXmlBasicInfo musicXmlBasicInfo = JSONObject.toJavaObject(dataObj, MusicXmlBasicInfo.class);
-
-			userChannelContextService.remove(channel);
-
-			if (channelContext == null) {
-				channelContext = new UserChannelContext();
-			}
-
-			channelContext.getSongMusicXmlMap().put(musicXmlBasicInfo.getExamSongId(), musicXmlBasicInfo);
-
-			userChannelContextService.register(channel, channelContext);
-
-			break;
-		case "recordStart": // 开始评测
-
-			SysMusicCompareRecord sysMusicCompareRecord = new SysMusicCompareRecord(FeatureType.CLOUD_STUDY_EVALUATION);
-			sysMusicCompareRecordService.insert(sysMusicCompareRecord);
-			
-			//清空缓存信息
-			channelContext.resetUserInfo();
-
-			break;
-		case "recordEnd": // 结束评测
-		case "recordCancel": // 取消评测
-			if (channelContext == null) {
-				return;
-			}
-
-			WaveformWriter waveFileProcessor = channelContext.getWaveFileProcessor();
-			if (waveFileProcessor != null) {
-				// 写文件头
-				waveFileProcessor.processingFinished();
-			}
-			
-			//清空缓存信息
-			channelContext.resetUserInfo();
-
-			if (StringUtils.equals(commond, "recordEnd")) {
-				// 生成评测报告
-			}
-
-			break;
-		case "proxyMessage": // ???
-
-			break;
-		case "videoUpload": // 上传音频
-
-			break;
-		case "checkSound": // 校音
-
-			break;
-
-		default:
-			// 非法请求
-			break;
-		}
-	}
-
-}

+ 27 - 8
audio-analysis/src/main/java/com/yonge/netty/server/processor/WaveformWriter.java

@@ -24,6 +24,7 @@
 package com.yonge.netty.server.processor;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 
@@ -37,19 +38,19 @@ import be.tarsos.dsp.writer.WaveHeader;
  */
 public class WaveformWriter {
 
+	private static final Logger LOGGER = LoggerFactory.getLogger(WaveformWriter.class);
+
 	private RandomAccessFile randomAccessFile;
 
 	private final String fileName;
 
-	private short channelNum = 1;
+	public static final short CHANNEL_NUM = 1;
 
-	private int sampleRate = 44100;
+	public static final int SAMPLE_RATE = 44100;
 
-	private short bitsPerSample = 16;
+	public static final short BITS_PER_SAMPLE = 16;
 
-	private static final int HEADER_LENGTH = 44;
-
-	private static final Logger LOGGER = LoggerFactory.getLogger(WaveformWriter.class);
+	public static final int HEADER_LENGTH = 44;
 
 	public WaveformWriter(String fileName) {
 
@@ -78,8 +79,9 @@ public class WaveformWriter {
 
 	public void processingFinished() {
 		try {
-			WaveHeader waveHeader = new WaveHeader(WaveHeader.FORMAT_PCM, channelNum, sampleRate, bitsPerSample, (int) randomAccessFile.length() - HEADER_LENGTH);// 16
-																																					// is
+			WaveHeader waveHeader = new WaveHeader(WaveHeader.FORMAT_PCM, CHANNEL_NUM, SAMPLE_RATE, BITS_PER_SAMPLE, (int) randomAccessFile.length()
+					- HEADER_LENGTH);
+
 			ByteArrayOutputStream header = new ByteArrayOutputStream();
 			waveHeader.write(header);
 			randomAccessFile.seek(0);
@@ -90,4 +92,21 @@ public class WaveformWriter {
 			e.printStackTrace();
 		}
 	}
+
+	public File getFile() {
+		return new File(fileName);
+	}
+
+	public long getFileLength(boolean isSubHeadLength) {
+		try {
+			if (isSubHeadLength) {
+				return randomAccessFile.length() - HEADER_LENGTH;
+			}
+			return randomAccessFile.length();
+		} catch (IOException e) {
+			LOGGER.error("读取WAV文件出现异常[{}]:{}", fileName, e.getMessage());
+			e.printStackTrace();
+		}
+		return 0;
+	}
 }

+ 135 - 0
audio-analysis/src/main/java/com/yonge/netty/server/service/CompareHandler.java

@@ -0,0 +1,135 @@
+package com.yonge.netty.server.service;
+
+import io.netty.channel.Channel;
+
+import java.io.File;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.sound.sampled.AudioFormat;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+import com.yonge.audio.analysis.AudioFloatConverter;
+import com.yonge.audio.utils.ArrayUtil;
+import com.yonge.nettty.dto.UserChannelContext;
+import com.yonge.nettty.dto.WebSocketResponse;
+import com.yonge.nettty.entity.MusicXmlBasicInfo;
+import com.yonge.netty.server.handler.NettyChannelManager;
+import com.yonge.netty.server.handler.message.BinaryMessageHandler;
+import com.yonge.netty.server.processor.WaveformWriter;
+
+@Component
+public class CompareHandler implements BinaryMessageHandler {
+
+	@Autowired
+	private UserChannelContextService userChannelContextService;
+
+	@Autowired
+	private NettyChannelManager nettyChannelManager;
+
+	/**
+	 * @describe 采样率
+	 */
+	private float sampleRate = 44100;
+
+	/**
+	 * 每个采样大小(Bit)
+	 */
+	private int bitsPerSample = 16;
+
+	/**
+	 * 通道数
+	 */
+	private int channels = 1;
+
+	/**
+	 * @describe 采样大小
+	 */
+	private int bufferSize = 1024 * 4;
+
+	private boolean signed = true;
+
+	private boolean bigEndian = false;
+
+	private AudioFormat audioFormat = new AudioFormat(sampleRate, bitsPerSample, channels, signed, bigEndian);
+
+	private AudioFloatConverter converter = AudioFloatConverter.getConverter(audioFormat);
+
+	private String tmpFileDir = "e:/soundRecords/";
+
+	private SimpleDateFormat sdf = new SimpleDateFormat("yyMMddHHmmSS");
+
+	@Override
+	public String getAction() {
+		return "SOUND_COMPARE";
+	}
+
+	@Override
+	public boolean handler(String user, Channel channel, byte[] datas) {
+		UserChannelContext channelContext = userChannelContextService.getChannelContext(channel);
+
+		if (channelContext == null) {
+			return false;
+		}
+
+		// 写录音文件
+		WaveformWriter waveFileProcessor = channelContext.getWaveFileProcessor();
+		if (waveFileProcessor == null) {
+			File file = new File(tmpFileDir + user + "_" + sdf.format(new Date()) + ".wav");
+			waveFileProcessor = new WaveformWriter(file.getAbsolutePath());
+			channelContext.setWaveFileProcessor(waveFileProcessor);
+		}
+		waveFileProcessor.process(datas);
+
+		datas = channelContext.skipHeader(datas);
+
+		if (datas.length == 0) {
+			return false;
+		}
+
+		channelContext.setChannelBufferBytes(ArrayUtil.mergeByte(channelContext.getChannelBufferBytes(), datas));
+
+		int totalLength = channelContext.getChannelBufferBytes().length;
+
+		while (totalLength >= bufferSize) {
+			byte[] bufferData = ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), 0, bufferSize - 1);
+
+			if (bufferSize != totalLength) {
+				channelContext.setChannelBufferBytes(ArrayUtil.extractByte(channelContext.getChannelBufferBytes(), bufferSize, totalLength - 1));
+			} else {
+				channelContext.setChannelBufferBytes(new byte[0]);
+			}
+
+			float[] sampleFloats = new float[bufferSize / 2];
+
+			converter.toFloatArray(bufferData, sampleFloats);
+
+			channelContext.handle(sampleFloats, audioFormat);
+
+			MusicXmlBasicInfo musicXmlBasicInfo = channelContext.getMusicXmlBasicInfo(null);
+			int sectionIndex = channelContext.getEvaluatingSectionIndex().get();
+
+			// 评分
+			int score = channelContext.evaluateForSection(sectionIndex, musicXmlBasicInfo.getSubjectId());
+			if (score >= 0) {
+
+				Map<String, Object> params = new HashMap<String, Object>();
+				params.put("score", score);
+				params.put("measureIndex", sectionIndex);
+
+				WebSocketResponse<Map<String, Object>> resp = new WebSocketResponse<Map<String, Object>>("measureScore", params);
+
+				nettyChannelManager.sendTextMessage(user, resp);
+			}
+
+			totalLength = channelContext.getChannelBufferBytes().length;
+		}
+
+		return true;
+	}
+
+}

+ 84 - 0
audio-analysis/src/main/java/com/yonge/netty/server/service/PitchDetectionHandler.java

@@ -0,0 +1,84 @@
+package com.yonge.netty.server.service;
+
+import io.netty.channel.Channel;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.sound.sampled.AudioFormat;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+import com.yonge.audio.analysis.AudioFloatConverter;
+import com.yonge.audio.analysis.detector.YINPitchDetector;
+import com.yonge.nettty.dto.WebSocketResponse;
+import com.yonge.netty.server.handler.NettyChannelManager;
+import com.yonge.netty.server.handler.message.BinaryMessageHandler;
+
+@Component
+public class PitchDetectionHandler implements BinaryMessageHandler {
+	
+	private final static Logger LOGGER = LoggerFactory.getLogger(PitchDetectionHandler.class);
+
+	/**
+	 * @describe 采样率
+	 */
+	private float sampleRate = 44100;
+
+	/**
+	 * 每个采样大小(Bit)
+	 */
+	private int bitsPerSample = 16;
+
+	/**
+	 * 通道数
+	 */
+	private int channels = 1;
+	
+	private boolean signed = true;
+
+	private boolean bigEndian = false;
+
+	private AudioFormat audioFormat = new AudioFormat(sampleRate, bitsPerSample, channels, signed, bigEndian);
+
+	private AudioFloatConverter converter = AudioFloatConverter.getConverter(audioFormat);
+	
+	@Autowired
+	private NettyChannelManager nettyChannelManager;
+	
+	@Override
+	public String getAction() {
+		return "PITCH_DETECTION";
+	}
+
+	@Override
+	public boolean handler(String userId, Channel channel, byte[] bytes) {
+
+		float[] samples = new float[bytes.length / 2];
+
+		if (samples.length == 0) {
+			return false;
+		}
+
+		converter.toFloatArray(bytes, samples);
+
+		YINPitchDetector frequencyDetector = new YINPitchDetector(samples.length, audioFormat.getSampleRate());
+
+		int playFrequency = (int) frequencyDetector.getFrequency(samples);
+		
+		LOGGER.info("校音频率:{}", playFrequency);
+
+		Map<String, Object> params = new HashMap<String, Object>();
+		params.put("frequency", playFrequency);
+
+		WebSocketResponse<Map<String, Object>> resp = new WebSocketResponse<Map<String, Object>>("checking", params);
+
+		nettyChannelManager.sendTextMessage(userId, resp);
+
+		return true;
+	}
+	
+}

+ 5 - 1
audio-analysis/src/main/resources/application.yml → audio-analysis/src/main/resources/application-template.yml

@@ -12,6 +12,10 @@ server:
       request-attributes-enabled: false
       rotate: true
       suffix: .log
+      
+netty:
+  server:
+    port: 8090
 
 eureka:
   client:
@@ -26,7 +30,7 @@ spring:
     
   datasource:
     name: test
-    url: jdbc:mysql://47.114.1.200:3306/mec_dev?useUnicode=true&characterEncoding=UTF8&serverTimezone=Asia/Shanghai
+    url: jdbc:mysql://47.114.1.200:3306/mec_test?useUnicode=true&characterEncoding=UTF8&serverTimezone=Asia/Shanghai&allowMultiQueries=true
     username: mec_dev
     password: dayaDataOnline@2019
     # 使用druid数据源

+ 16 - 0
audio-analysis/src/main/resources/bootstrap-dev.properties

@@ -0,0 +1,16 @@
+#\u6307\u5b9a\u5f00\u53d1\u73af\u5883
+#spring.profiles.active=dev
+#\u670d\u52a1\u5668\u5730\u5740
+spring.cloud.nacos.config.server-addr=47.114.1.200:8848
+#\u9ed8\u8ba4\u4e3aPublic\u547d\u540d\u7a7a\u95f4,\u53ef\u4ee5\u7701\u7565\u4e0d\u5199
+spring.cloud.nacos.config.namespace=a5c10b43-0c4d-4e3b-a0ad-9af651cfe89c
+#\u6307\u5b9a\u914d\u7f6e\u7fa4\u7ec4 --\u5982\u679c\u662fPublic\u547d\u540d\u7a7a\u95f4 \u5219\u53ef\u4ee5\u7701\u7565\u7fa4\u7ec4\u914d\u7f6e
+spring.cloud.nacos.config.group=DEFAULT_GROUP
+#\u6587\u4ef6\u540d -- \u5982\u679c\u6ca1\u6709\u914d\u7f6e\u5219\u9ed8\u8ba4\u4e3a ${spring.appliction.name}
+spring.cloud.nacos.config.prefix=audio-analysis
+#\u6307\u5b9a\u6587\u4ef6\u540e\u7f00
+spring.cloud.nacos.config.file-extension=yaml
+#\u662f\u5426\u52a8\u6001\u5237\u65b0
+spring.cloud.nacos.config.refresh.enabled=true
+#\u662f\u5426\u542f\u7528nacos\u914d\u7f6e\u4e2d\u5fc3
+spring.cloud.nacos.config.enabled=true

+ 16 - 0
audio-analysis/src/main/resources/bootstrap-prod.properties

@@ -0,0 +1,16 @@
+#\u6307\u5b9a\u5f00\u53d1\u73af\u5883
+#spring.profiles.active=dev
+#\u670d\u52a1\u5668\u5730\u5740
+spring.cloud.nacos.config.server-addr=47.96.80.97:8848
+#\u9ed8\u8ba4\u4e3aPublic\u547d\u540d\u7a7a\u95f4,\u53ef\u4ee5\u7701\u7565\u4e0d\u5199
+spring.cloud.nacos.config.namespace=f40a7594-4bd0-4bc6-8397-9353c6d2e63a
+#\u6307\u5b9a\u914d\u7f6e\u7fa4\u7ec4 --\u5982\u679c\u662fPublic\u547d\u540d\u7a7a\u95f4 \u5219\u53ef\u4ee5\u7701\u7565\u7fa4\u7ec4\u914d\u7f6e
+spring.cloud.nacos.config.group=DEFAULT_GROUP
+#\u6587\u4ef6\u540d -- \u5982\u679c\u6ca1\u6709\u914d\u7f6e\u5219\u9ed8\u8ba4\u4e3a ${spring.appliction.name}
+spring.cloud.nacos.config.prefix=audio-analysis
+#\u6307\u5b9a\u6587\u4ef6\u540e\u7f00
+spring.cloud.nacos.config.file-extension=yaml
+#\u662f\u5426\u52a8\u6001\u5237\u65b0
+spring.cloud.nacos.config.refresh.enabled=true
+#\u662f\u5426\u542f\u7528nacos\u914d\u7f6e\u4e2d\u5fc3
+spring.cloud.nacos.config.enabled=true

+ 16 - 0
audio-analysis/src/main/resources/bootstrap-test.properties

@@ -0,0 +1,16 @@
+#\u6307\u5b9a\u5f00\u53d1\u73af\u5883
+#spring.profiles.active=dev
+#\u670d\u52a1\u5668\u5730\u5740
+spring.cloud.nacos.config.server-addr=47.114.1.200:8848
+#\u9ed8\u8ba4\u4e3aPublic\u547d\u540d\u7a7a\u95f4,\u53ef\u4ee5\u7701\u7565\u4e0d\u5199
+spring.cloud.nacos.config.namespace=46f06363-b9d6-46f0-9cd7-7b33dcf26bb0
+#\u6307\u5b9a\u914d\u7f6e\u7fa4\u7ec4 --\u5982\u679c\u662fPublic\u547d\u540d\u7a7a\u95f4 \u5219\u53ef\u4ee5\u7701\u7565\u7fa4\u7ec4\u914d\u7f6e
+spring.cloud.nacos.config.group=DEFAULT_GROUP
+#\u6587\u4ef6\u540d -- \u5982\u679c\u6ca1\u6709\u914d\u7f6e\u5219\u9ed8\u8ba4\u4e3a ${spring.appliction.name}
+spring.cloud.nacos.config.prefix=audio-analysis
+#\u6307\u5b9a\u6587\u4ef6\u540e\u7f00
+spring.cloud.nacos.config.file-extension=yaml
+#\u662f\u5426\u52a8\u6001\u5237\u65b0
+spring.cloud.nacos.config.refresh.enabled=true
+#\u662f\u5426\u542f\u7528nacos\u914d\u7f6e\u4e2d\u5fc3
+spring.cloud.nacos.config.enabled=true

+ 27 - 96
mec-biz/src/main/java/com/ym/mec/biz/dal/dto/SoundCompareHelper.java

@@ -1,11 +1,9 @@
 package com.ym.mec.biz.dal.dto;
 
 import be.tarsos.dsp.AudioEvent;
-import be.tarsos.dsp.AudioRunDispatcher;
 import be.tarsos.dsp.SilenceDetector;
 import be.tarsos.dsp.pitch.PitchDetectionHandler;
 import be.tarsos.dsp.pitch.PitchDetectionResult;
-import com.alibaba.fastjson.JSON;
 import com.ym.mec.biz.dal.enums.DeviceTypeEnum;
 import com.ym.mec.biz.dal.enums.HeardLevelEnum;
 import com.ym.mec.biz.service.impl.SoundCompareHandler;
@@ -19,7 +17,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.stream.Collectors;
 
 /**
  * @Author Joburgess
@@ -65,8 +62,8 @@ public class SoundCompareHelper implements PitchDetectionHandler {
     @ApiModelProperty(value = "小节结束时间字典")
     private Map<Integer, MusicPitchDetailDto> measureEndTime = new HashMap<>();
 
-//    @ApiModelProperty(value = "录音音频信息")
-//    private List<MusicPitchDetailDto> recordMeasurePithInfo = new ArrayList<>();
+    @ApiModelProperty(value = "录音音频信息")
+    private List<MusicPitchDetailDto> recordMeasurePithInfo = new ArrayList<>();
 
     @ApiModelProperty(value = "小节分数记录")
     private Map<String, BigDecimal> userScoreMap = new HashMap<>();
@@ -84,10 +81,6 @@ public class SoundCompareHelper implements PitchDetectionHandler {
 
     private List<MusicPitchDetailDto> musicXmlInfos;
 
-    private long firstMeasureStartBytes = 0;
-
-    private long recordBytes = 0;
-
     private String clientId;
 
     private Integer detailId;
@@ -96,30 +89,11 @@ public class SoundCompareHelper implements PitchDetectionHandler {
 
     private String xmlUrl;
 
-    /** 演奏音频数据中频率变更明显持续数量 */
-    private int obviousChangeNum = 0;
-
-    /** 演奏音频中但前小节所有频率数据 */
-    private List<MusicPitchDetailDto> currPitchInfos = new ArrayList<>();
-    private List<MusicPitchDetailDto> currTmpPitchInfos = new ArrayList<>();
-
-    private List<MusicPitchDetailDto> recordMeasurePitchInfos = new ArrayList<>();
-
     /**
      * @describe 分贝检测器
      */
     public SilenceDetector silenceDetector = new SilenceDetector();
 
-    private AudioRunDispatcher audioRunDispatcher;
-
-    public AudioRunDispatcher getAudioRunDispatcher() {
-        return audioRunDispatcher;
-    }
-
-    public void setAudioRunDispatcher(AudioRunDispatcher audioRunDispatcher) {
-        this.audioRunDispatcher = audioRunDispatcher;
-    }
-
     public List<MusicPitchDetailDto> getMusicXmlInfos() {
         return musicXmlInfos;
     }
@@ -288,28 +262,20 @@ public class SoundCompareHelper implements PitchDetectionHandler {
         this.musicalNotesPlayStats = musicalNotesPlayStats;
     }
 
-    public Map<String, BigDecimal> getUserScoreMap() {
-        return userScoreMap;
-    }
-
-    public void setUserScoreMap(Map<String, BigDecimal> userScoreMap) {
-        this.userScoreMap = userScoreMap;
+    public List<MusicPitchDetailDto> getRecordMeasurePithInfo() {
+        return recordMeasurePithInfo;
     }
 
-    public long getFirstMeasureStartBytes() {
-        return firstMeasureStartBytes;
+    public void setRecordMeasurePithInfo(List<MusicPitchDetailDto> recordMeasurePithInfo) {
+        this.recordMeasurePithInfo = recordMeasurePithInfo;
     }
 
-    public void setFirstMeasureStartBytes(long firstMeasureStartBytes) {
-        this.firstMeasureStartBytes = firstMeasureStartBytes;
-    }
-
-    public long getRecordBytes() {
-        return recordBytes;
+    public Map<String, BigDecimal> getUserScoreMap() {
+        return userScoreMap;
     }
 
-    public void setRecordBytes(long recordBytes) {
-        this.recordBytes = recordBytes;
+    public void setUserScoreMap(Map<String, BigDecimal> userScoreMap) {
+        this.userScoreMap = userScoreMap;
     }
 
     public Map<Integer, Map<String, Object>> getUserMeasureScoreMap() {
@@ -320,64 +286,29 @@ public class SoundCompareHelper implements PitchDetectionHandler {
         this.userMeasureScoreMap = userMeasureScoreMap;
     }
 
-    public List<MusicPitchDetailDto> getRecordMeasurePitchInfos() {
-        return recordMeasurePitchInfos;
-    }
-
-    public void setRecordMeasurePitchInfos(List<MusicPitchDetailDto> recordMeasurePitchInfos) {
-        this.recordMeasurePitchInfos = recordMeasurePitchInfos;
-    }
-
     @Override
     public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent audioEvent) {
         int timeStamp = (int) (measureStartTime + audioEvent.getTimeStamp()*1000);
         float pitch = pitchDetectionResult.getPitch();
-        int decibel = (int) (100 - Math.abs(silenceDetector.currentSPL()));
-
-        byte byteOne = audioEvent.getByteBuffer()[0];
-        byte byteTwo = audioEvent.getByteBuffer()[1];
-
-        double amplitude = Math.abs(((byteOne&0x000000FF)|(byteTwo<<8)));
-
-        if(decibel <= SoundCompareHandler.soundCompareConfig.validDb){
-            pitch = -1;
-            decibel = 0;
-        }
-
-        SoundCompareHandler.LOGGER.info("时间:{}, 振幅:{}, 频率:{}, 分贝:{}", timeStamp, amplitude, pitch, decibel);
-
-        if(currPitchInfos.size()>0&&(Math.abs(currPitchInfos.get(currPitchInfos.size()-1).getFrequency()-pitch)>10||Math.abs(currPitchInfos.get(currPitchInfos.size()-1).getDecibel()-decibel)>10)){
-            Double avgPitch = currPitchInfos.stream().collect(Collectors.averagingDouble(MusicPitchDetailDto::getFrequency));
-            Double avgDb = currPitchInfos.stream().collect(Collectors.averagingDouble(MusicPitchDetailDto::getDecibel));
-
-            MusicPitchDetailDto measureDetail = new MusicPitchDetailDto(currPitchInfos.get(0).getTimeStamp(), avgPitch.floatValue(), avgDb);
-            measureDetail.setEndTimeStamp(timeStamp);
-            measureDetail.setDuration(measureDetail.getEndTimeStamp()-measureDetail.getTimeStamp());
-
-            if(measureDetail.getDuration()>25&&(avgPitch>0||avgDb>SoundCompareHandler.soundCompareConfig.validDb)){
-                recordMeasurePitchInfos.add(measureDetail);
-            }
-
-            currPitchInfos.clear();
-
-            //初始化偏移时间
-            if(offsetTime == -1 && recordMeasurePitchInfos.size() == 1){
-                offsetTime = recordMeasurePitchInfos.get(0).getTimeStamp();
-                for (MusicPitchDetailDto musicXmlInfo : musicXmlInfos) {
-                    if(!musicXmlInfo.getDontEvaluating()){
-                        if(offsetTime > musicXmlInfo.getTimeStamp()) {
-                            offsetTime = offsetTime - musicXmlInfo.getTimeStamp();
-                        }
-                        break;
-                    }
-                }
-                musicXmlInfos.forEach(e->e.setTimeStamp(e.getTimeStamp()+offsetTime));
-                for (Map.Entry<Integer, MusicPitchDetailDto> musicPitchDetailDtoEntry : measureEndTime.entrySet()) {
-                    musicPitchDetailDtoEntry.getValue().setTimeStamp(musicPitchDetailDtoEntry.getValue().getTimeStamp() + offsetTime);
-                    musicPitchDetailDtoEntry.getValue().setEndTimeStamp(musicPitchDetailDtoEntry.getValue().getEndTimeStamp() + offsetTime);
+        if(offsetTime == -1 && !DeviceTypeEnum.IOS.equals(deviceType) && pitch>0){
+            int preTimeStamp = CollectionUtils.isEmpty(recordMeasurePithInfo)?0:recordMeasurePithInfo.get(recordMeasurePithInfo.size()-1).getTimeStamp();
+            offsetTime = timeStamp - (timeStamp - preTimeStamp)/2;
+            for (MusicPitchDetailDto musicXmlInfo : musicXmlInfos) {
+                if(!musicXmlInfo.getDontEvaluating()){
+                    if(offsetTime > musicXmlInfo.getTimeStamp())
+                        offsetTime = offsetTime - musicXmlInfo.getTimeStamp();
+                    break;
                 }
             }
+            for (Map.Entry<Integer, MusicPitchDetailDto> musicPitchDetailDtoEntry : measureEndTime.entrySet()) {
+                musicPitchDetailDtoEntry.getValue().setTimeStamp(musicPitchDetailDtoEntry.getValue().getTimeStamp() + offsetTime);
+                musicPitchDetailDtoEntry.getValue().setEndTimeStamp(musicPitchDetailDtoEntry.getValue().getEndTimeStamp() + offsetTime);
+            }
+        }
+        if(silenceDetector.currentSPL()< SoundCompareHandler.soundCompareConfig.validDb){
+            pitch = -1;
         }
-        currPitchInfos.add(new MusicPitchDetailDto(timeStamp, pitch, decibel));
+//            LOGGER.info("时间:{}, 频率:{}, 分贝:{}", timeStamp, pitch, silenceDetecor.currentSPL());
+        recordMeasurePithInfo.add(new MusicPitchDetailDto(timeStamp, pitch, silenceDetector.currentSPL()));
     }
 }

+ 7 - 0
mec-biz/src/main/java/com/ym/mec/biz/service/SysMusicCompareRecordService.java

@@ -24,6 +24,13 @@ public interface SysMusicCompareRecordService extends BaseService<Long, SysMusic
     void saveMusicCompareData(String phone, SoundCompareHelper soundCompareInfo);
 
     /**
+     * @describe 保存用户评测记录
+     * @param sysMusicCompareRecord
+     * @return void
+     */
+    void saveMusicCompareData(SysMusicCompareRecord sysMusicCompareRecord);
+
+    /**
      * @describe 用户最后一次评测数据
      * @author Joburgess
      * @date 2021/8/23 0023

+ 131 - 88
mec-biz/src/main/java/com/ym/mec/biz/service/impl/SoundCompareHandler.java

@@ -1,14 +1,11 @@
 package com.ym.mec.biz.service.impl;
 
 import be.tarsos.dsp.AudioDispatcher;
-import be.tarsos.dsp.AudioRunDispatcher;
 import be.tarsos.dsp.io.jvm.AudioDispatcherFactory;
 import be.tarsos.dsp.pitch.PitchProcessor;
 import be.tarsos.dsp.util.PitchConverter;
 import com.alibaba.fastjson.JSON;
 import com.alibaba.fastjson.JSONObject;
-import com.alibaba.fastjson.serializer.SerializerFeature;
-import com.ym.mec.biz.dal.config.NioAudioInputStream;
 import com.ym.mec.biz.dal.config.SoundCompareConfig;
 import com.ym.mec.biz.dal.dao.SysMusicScoreAccompanimentDao;
 import com.ym.mec.biz.dal.dto.*;
@@ -34,6 +31,7 @@ import org.springframework.web.socket.WebSocketSession;
 
 import javax.sound.sampled.UnsupportedAudioFileException;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.math.BigDecimal;
@@ -52,7 +50,7 @@ import static com.ym.mec.biz.service.SoundSocketService.VIDEO_UPDATE;
 @Service
 public class SoundCompareHandler implements WebSocketEventHandler {
 
-    public static final Logger LOGGER = LoggerFactory.getLogger(SoundCompareHandler.class);
+    private final Logger LOGGER = LoggerFactory.getLogger(SoundCompareHandler.class);
 
     private BigDecimal oneHundred = new BigDecimal(100);
 
@@ -101,9 +99,6 @@ public class SoundCompareHandler implements WebSocketEventHandler {
                 userSoundInfoMap.get(phone).setMusicXmlInfos(musicXmlInfos);
                 musicXmlInfos = musicXmlInfos.stream().filter(m->!m.getDontEvaluating()).collect(Collectors.toList());
                 userSoundInfoMap.get(phone).setMusicScoreId(bodyObject.getInteger("id"));
-                if(bodyObject.containsKey("beatLength")){
-                    userSoundInfoMap.get(phone).setFirstMeasureStartBytes((long) (bodyObject.getLong("beatLength")/1000f*(soundCompareConfig.audioFormat.getFrameSize()*soundCompareConfig.audioFormat.getFrameRate())));
-                }
                 if(bodyObject.containsKey("platform")){
                     userSoundInfoMap.get(phone).setDeviceType(DeviceTypeEnum.valueOf(bodyObject.getString("platform")));
                 }
@@ -153,15 +148,8 @@ public class SoundCompareHandler implements WebSocketEventHandler {
                     File file = new File(tmpDir+phone + "_"+ userSoundInfoMap.get(phone).getMusicScoreId() +"_"+ LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMddHHmmss")) +".wav");
                     userSoundInfoMap.get(phone).setFile(file);
                     userSoundInfoMap.get(phone).setAccessFile(new RandomAccessFile(file, "rw"));
-                    userSoundInfoMap.get(phone).getAccessFile().seek(44);
                     userSoundInfoMap.get(phone).setRecordFilePath(file.getAbsolutePath());
-
-                    AudioRunDispatcher dispatcher = new AudioRunDispatcher(new NioAudioInputStream(userSoundInfoMap.get(phone).getAccessFile(), soundCompareConfig.audioFormat), soundCompareConfig.simpleSize, 0);
-                    dispatcher.addAudioProcessor(userSoundInfoMap.get(phone).silenceDetector);
-                    dispatcher.addAudioProcessor(new PitchProcessor(soundCompareConfig.algo, soundCompareConfig.simpleRate, soundCompareConfig.simpleSize, userSoundInfoMap.get(phone)));
-//                    new Thread(dispatcher, phone).start();
-                    userSoundInfoMap.get(phone).setAudioRunDispatcher(dispatcher);
-                } catch (IOException e) {
+                } catch (FileNotFoundException e) {
                     throw new BizException("文件创建失败:", e);
                 }
                 break;
@@ -190,10 +178,10 @@ public class SoundCompareHandler implements WebSocketEventHandler {
                 createHeader(phone, false);
                 break;
             case SoundSocketService.PROXY_MESSAGE:
-//                if(DeviceTypeEnum.IOS.equals(userSoundInfoMap.get(phone).getDeviceType())&&bodyObject.containsKey(SoundSocketService.OFFSET_TIME)){
-//                    int offsetTime = bodyObject.getIntValue(SoundSocketService.OFFSET_TIME);
-//                    calOffsetTime(phone, offsetTime);
-//                }
+                if(DeviceTypeEnum.IOS.equals(userSoundInfoMap.get(phone).getDeviceType())&&bodyObject.containsKey(SoundSocketService.OFFSET_TIME)){
+                    int offsetTime = bodyObject.getIntValue(SoundSocketService.OFFSET_TIME);
+                    calOffsetTime(phone, offsetTime);
+                }
                 break;
             case VIDEO_UPDATE:
                 SysMusicCompareRecord update = null;
@@ -219,34 +207,24 @@ public class SoundCompareHandler implements WebSocketEventHandler {
         if(!userSoundInfoMap.containsKey(phone)){
             return;
         }
-        //读取波形数据
-//        for (int i = 0; i < message.getPayload().array().length; i+=2) {
-//            System.out.println((double) ((message.getPayload().array()[i]&0x000000FF)|(message.getPayload().array()[i+1]<<8))/32767);
-//        }
-        userSoundInfoMap.get(phone).setRecordBytes(userSoundInfoMap.get(phone).getRecordBytes()+message.getPayloadLength());
-        if(userSoundInfoMap.get(phone).getRecordBytes()<userSoundInfoMap.get(phone).getFirstMeasureStartBytes()){
-            return;
-        }
         try {
             if(Objects.nonNull(userSoundInfoMap.get(phone).getAccessFile())){
-                userSoundInfoMap.get(phone).getAccessFile().seek(userSoundInfoMap.get(phone).getAccessFile().length());
                 userSoundInfoMap.get(phone).getAccessFile().write(message.getPayload().array());
-            }else{
-                return;
             }
 
-            userSoundInfoMap.get(phone).getAudioRunDispatcher().run();
+            AudioDispatcher dispatcher = AudioDispatcherFactory.fromByteArray(message.getPayload().array(), soundCompareConfig.audioFormat, soundCompareConfig.simpleSize, soundCompareConfig.overlap);
 
-//            AudioDispatcher dispatcher = AudioDispatcherFactory.fromByteArray(message.getPayload().array(), soundCompareConfig.audioFormat, soundCompareConfig.simpleSize, soundCompareConfig.overlap);
-//            dispatcher.addAudioProcessor(userSoundInfoMap.get(phone).silenceDetector);
-//            dispatcher.addAudioProcessor(new PitchProcessor(soundCompareConfig.algo, soundCompareConfig.simpleRate, soundCompareConfig.simpleSize, userSoundInfoMap.get(phone)));
-//            dispatcher.run();
+            dispatcher.addAudioProcessor(userSoundInfoMap.get(phone).silenceDetector);
+            dispatcher.addAudioProcessor(new PitchProcessor(soundCompareConfig.algo, soundCompareConfig.simpleRate, soundCompareConfig.simpleSize, userSoundInfoMap.get(phone)));
+            dispatcher.run();
+            if(Objects.isNull(userSoundInfoMap.get(phone).getAccessFile())){
+                return;
+            }
 
             double recordTime = userSoundInfoMap.get(phone).getAccessFile().length()/(soundCompareConfig.audioFormat.getFrameSize()*soundCompareConfig.audioFormat.getFrameRate())*1000;
-//            userSoundInfoMap.get(phone).setMeasureStartTime(recordTime);
-
+            userSoundInfoMap.get(phone).setMeasureStartTime(recordTime);
             for (Map.Entry<Integer, MusicPitchDetailDto> userMeasureEndTimeMapEntry : userSoundInfoMap.get(phone).getMeasureEndTime().entrySet()) {
-                if(recordTime>(userMeasureEndTimeMapEntry.getValue().getEndTimeStamp()+100)){
+                if(recordTime>(userMeasureEndTimeMapEntry.getValue().getEndTimeStamp())){
                     if(userMeasureEndTimeMapEntry.getValue().getDontEvaluating()){
                         continue;
                     }else{
@@ -256,7 +234,7 @@ public class SoundCompareHandler implements WebSocketEventHandler {
                     break;
                 }
             }
-        } catch (IOException e) {
+        } catch (UnsupportedAudioFileException | IOException e) {
             throw new BizException("{}评分异常:{}", phone, e);
         }
     }
@@ -283,7 +261,6 @@ public class SoundCompareHandler implements WebSocketEventHandler {
             musicPitchDetailDtoEntry.getValue().setTimeStamp(musicPitchDetailDtoEntry.getValue().getTimeStamp() + offsetTime);
             musicPitchDetailDtoEntry.getValue().setEndTimeStamp(musicPitchDetailDtoEntry.getValue().getEndTimeStamp() + offsetTime);
         }
-        userSoundInfoMap.get(phone).getMusicXmlInfos().forEach(e->e.setTimeStamp(e.getTimeStamp()+offsetTime));
     }
 
     /**
@@ -304,7 +281,6 @@ public class SoundCompareHandler implements WebSocketEventHandler {
                 randomAccessFile.seek(0);
                 randomAccessFile.write(WavHeader.getWaveHeader(randomAccessFile.length(), (long) soundCompareConfig.audioFormat.getFrameRate(), soundCompareConfig.audioFormat.getSampleSizeInBits()));
                 randomAccessFile.close();
-                userSoundInfoMap.get(phone).getAudioRunDispatcher().stop();
             } catch (IOException e) {
                 e.printStackTrace();
             }
@@ -371,32 +347,111 @@ public class SoundCompareHandler implements WebSocketEventHandler {
             for (int i = 0; i < userSoundInfoMap.get(phone).getMeasureXmlInfoMap().get(measureIndex).size(); i++) {
                 MusicPitchDetailDto musicXmlInfo = userSoundInfoMap.get(phone).getMeasureXmlInfoMap().get(measureIndex).get(i);
 
-                int startTimeStamp = musicXmlInfo.getTimeStamp();
-                int endTimeStamp = musicXmlInfo.getTimeStamp() + musicXmlInfo.getDuration();
+                int ot5 = (int) (musicXmlInfo.getDuration()*0.1);
+                int startTimeStamp = musicXmlInfo.getTimeStamp() + userSoundInfoMap.get(phone).getOffsetTime() + ot5;
+                int endTimeStamp = musicXmlInfo.getTimeStamp()  + userSoundInfoMap.get(phone).getOffsetTime() + musicXmlInfo.getDuration() - ot5;
+
+                //时间范围内有效节奏数量
+                float cadenceValidNum = 0;
+                //时间范围内有效音频数量
+                float integrityValidNum = 0;
+                //时间范围内匹配次数
+                float compareNum = 0;
 
-                int ot5 = (int) (musicXmlInfo.getDuration()*0.22<70?70:musicXmlInfo.getDuration()*0.22);
-                int rightTimeRange = ot5>200?200:ot5;
+                List<MusicPitchDetailDto> measureSoundPitchInfos = new ArrayList<>();
 
-                List<MusicPitchDetailDto> recordPitchs = userSoundInfoMap.get(phone).getRecordMeasurePitchInfos().stream().filter(m -> m.getTimeStamp()>=startTimeStamp-rightTimeRange && m.getTimeStamp() < endTimeStamp-rightTimeRange).collect(Collectors.toList());
+                for (int j = 0; j < userSoundInfoMap.get(phone).getRecordMeasurePithInfo().size(); j++) {
+                    MusicPitchDetailDto recordInfo = userSoundInfoMap.get(phone).getRecordMeasurePithInfo().get(j);
+                    //如果在时间范围之外直接跳过
+                    if(recordInfo.getTimeStamp()<startTimeStamp||recordInfo.getTimeStamp()>endTimeStamp){
+                        continue;
+                    }
+                    measureSoundPitchInfos.add(recordInfo);
+                    compareNum++;
+                    //如果在最低有效频率以下则跳过
+                    if(recordInfo.getFrequency()<soundCompareConfig.validFrequency&&musicXmlInfo.getFrequency()!=-1){
+                        continue;
+                    }
+                    cadenceValidNum++;
+                    //如果频率差值在节奏误差范围内
+//                    if(Math.abs(recordInfo.getFrequency()-musicXmlInfo.getFrequency())<=soundCompareConfig.integrityFrequencyRange){
+//                        integrityValidNum++;
+//                    }
+                }
+
+                //非正常频率次数
+                int errPitchNum = 0;
+                //分贝变化次数
+                int decibelChangeNum = 0;
+
+                if(CollectionUtils.isEmpty(measureSoundPitchInfos)){
+                    userSoundInfoMap.get(phone).getMusicalNotePitchMap().put(musicXmlInfo.getMusicalNotesIndex(), (float) 0);
+                }else{
+                    Map<Integer, Long> collect = measureSoundPitchInfos.stream().map(pitch -> (int)pitch.getFrequency()).collect(Collectors.groupingBy(Integer::intValue, Collectors.counting()));
+                    //出现次数最多的频率
+                    Integer pitch = collect.entrySet().stream().max(Comparator.comparing(e -> e.getValue())).get().getKey();
+                    //当前频率
+                    double cf = -1;
+                    //频率持续数量
+                    int fnum = 0;
+                    //是否演奏中
+                    boolean ing = false;
+                    //当前分贝
+                    double cd = 0;
+                    //分贝变化方向,-1变小,1变大
+                    int dcd = -1;
+                    //分贝持续数量
+                    int dnum = 0;
+                    for (MusicPitchDetailDto musicalNotesPitch : measureSoundPitchInfos) {
+                        //计算频率断层次数
+                        if (Math.abs(musicalNotesPitch.getFrequency() - cf) > 20){
+                            fnum ++;
+                        }
+                        if (fnum>=5){
+                            cf = musicalNotesPitch.getFrequency();
+                            fnum = 0;
+                            if (cf != -1){
+                                errPitchNum ++;
+                                ing = true;
+                                cd = musicalNotesPitch.getDecibel();
+                            }
+                        }
+                        //计算声音大小断层册数
+                        if(ing && Math.abs(musicalNotesPitch.getDecibel() - cd) > 10){
+                            dnum ++;
+                        }
+                        if (dnum > 2){
+                            int tdcd = cd > musicalNotesPitch.getDecibel() ? -1 : 1;
+                            cd = musicalNotesPitch.getDecibel();
+                            dnum = 0;
+                            if (tdcd != dcd) {
+                                decibelChangeNum++;
+                            }
+                            dcd = tdcd;
+                        }
+                    }
+                    userSoundInfoMap.get(phone).getMusicalNotePitchMap().put(musicXmlInfo.getMusicalNotesIndex(), (float) pitch);
+                }
 
                 boolean cadenceRight = false;
                 boolean intonationRight = false;
                 boolean integrityRight = false;
 
-                float integrityDuty = 0;
-                if(recordPitchs.size()>0){
-                    integrityDuty = recordPitchs.get(0).getDuration()/(float)musicXmlInfo.getDuration();
+                //有效节奏占比
+                float cadenceDuty = cadenceValidNum/compareNum;
+                //如果频率出现断层或这个音量出现断层,则当前音符节奏无效
+                if(errPitchNum>=2 || decibelChangeNum>1){
+                    cadenceDuty = 0;
                 }
-                integrityDuty = scoreMapping(integrityDuty, userSoundInfoMap.get(phone).getHeardLevel().getIntegrityRange(), 1);
                 //节奏
-                if(recordPitchs.size()==1){
+                if(cadenceDuty>=userSoundInfoMap.get(phone).getHeardLevel().getCadenceRange()){
                     cadenceNum++;
                     cadenceRight = true;
                 }
                 //音准、完成度
-                if (cadenceRight){
+                if (cadenceRight && !CollectionUtils.isEmpty(measureSoundPitchInfos)){
                     //音准
-                    float avgPitch = recordPitchs.get(0).getFrequency();
+                    Double avgPitch = measureSoundPitchInfos.stream().filter(pitch -> Math.abs((pitch.getFrequency()-musicXmlInfo.getFrequency()))<15).collect(Collectors.averagingDouble(pitch -> pitch.getFrequency()));
                     //音分
                     double recordCents = 0;
                     if (avgPitch > 0){
@@ -406,40 +461,29 @@ public class SoundCompareHandler implements WebSocketEventHandler {
                     if(musicXmlInfo.getFrequency()>0){
                         cents =  PitchConverter.hertzToAbsoluteCent(musicXmlInfo.getFrequency());
                     }
-                    double score = 100 - Math.round(Math.abs(cents - recordCents)) + 3;
+                    double score = 100 - Math.round(Math.abs(cents - recordCents)) + userSoundInfoMap.get(phone).getHeardLevel().getIntonationCentsRange();
                     if (score < 0){
                         score = 0;
                     }else if(score > 100){
                         score = 100;
                     }
-
-                    score = Math.pow(score/100f, userSoundInfoMap.get(phone).getHeardLevel().getIntonationCentsRange())*100;
-
-                    if(Objects.nonNull(userSoundInfoMap.get(phone).getSubjectId())&&userSoundInfoMap.get(phone).getSubjectId()==23){
-                        score = 100;
-                        integrityDuty = 1;
-                    }
-
                     intonationScore += score;
-                    musicXmlInfo.setAvgFrequency(avgPitch);
+                    musicXmlInfo.setAvgFrequency(avgPitch.floatValue());
                     intonationRight = score>70;
 
-                    integrityScore += integrityDuty;
-                    integrityRight = integrityDuty>0.7;
+                    if(score>0){
+                        integrityValidNum = measureSoundPitchInfos.stream().filter(pitch -> Math.abs((pitch.getFrequency()-musicXmlInfo.getFrequency()))<15).count();
+                    }else{
+                        integrityValidNum = 0;
+                    }
                 }
-
-                //如果当前音符不需要演奏
-                if (musicXmlInfo.getFrequency()<0&&recordPitchs.size()<=0){
-                    cadenceNum++;
-                    cadenceRight = true;
-
-                    intonationScore += 100;
-                    musicXmlInfo.setAvgFrequency(-1);
-                    intonationRight = true;
-
-                    integrityScore += 1;
-                    integrityRight = true;
+                //完成度
+                if(integrityValidNum > compareNum){
+                    integrityValidNum = compareNum;
                 }
+                float integrityDuty = integrityValidNum/compareNum;
+                integrityScore += integrityDuty;
+                integrityRight = integrityDuty>0.7;
 
                 if(!cadenceRight){
                     userSoundInfoMap.get(phone).getMusicalNotesPlayStats().add(new MusicalNotesPlayStatDto(musicXmlInfo.getMusicalNotesIndex(), MusicalErrorTypeEnum.CADENCE_WRONG));
@@ -484,11 +528,6 @@ public class SoundCompareHandler implements WebSocketEventHandler {
         createPushInfo(phone, "measureScore", measureIndex, intonation, cadence, integrity);
     }
 
-    private float scoreMapping(float score, float divisor, float maxValue){
-        score = score*divisor;
-        return score>maxValue?maxValue:score;
-    }
-
     /**
      * @describe 计算最终评分
      * @author Joburgess
@@ -504,15 +543,15 @@ public class SoundCompareHandler implements WebSocketEventHandler {
         BigDecimal integrity = BigDecimal.ZERO;
 
         if(currentCompareNum>0){
-            intonation = userSoundInfoMap.get(phone).getUserScoreMap().get("intonation").divide(new BigDecimal(currentCompareNum), 0, BigDecimal.ROUND_UP);
-            cadence = userSoundInfoMap.get(phone).getUserScoreMap().get("cadence").divide(new BigDecimal(currentCompareNum), 0, BigDecimal.ROUND_UP);
-            integrity = userSoundInfoMap.get(phone).getUserScoreMap().get("integrity").divide(new BigDecimal(currentCompareNum), 0, BigDecimal.ROUND_UP);
+            intonation = userSoundInfoMap.get(phone).getUserScoreMap().get("intonation").divide(new BigDecimal(currentCompareNum), 0, BigDecimal.ROUND_DOWN);
+            cadence = userSoundInfoMap.get(phone).getUserScoreMap().get("cadence").divide(new BigDecimal(currentCompareNum), 0, BigDecimal.ROUND_DOWN);
+            integrity = userSoundInfoMap.get(phone).getUserScoreMap().get("integrity").divide(new BigDecimal(currentCompareNum), 0, BigDecimal.ROUND_DOWN);
         }
 
         //计算分数并推送
         createPushInfo(phone, "overall", -1, intonation, cadence, integrity);
 
-        LOGGER.info("评分数据:{}", JSON.toJSONString(userSoundInfoMap.get(phone), SerializerFeature.DisableCircularReferenceDetect));
+        LOGGER.info("评分数据:{}", JSON.toJSONString(userSoundInfoMap.get(phone)));
     }
 
     /**
@@ -531,8 +570,12 @@ public class SoundCompareHandler implements WebSocketEventHandler {
         WebSocketInfo webSocketInfo = new WebSocketInfo();
         webSocketInfo.setHeader(new WebSocketInfo.Head(command));
         Map<String, Object> result = new HashMap<>(5);
-        BigDecimal score  = intonation.add(cadence).add(integrity).divide(new BigDecimal(3), CommonConstants.DECIMAL_PLACE, BigDecimal.ROUND_DOWN).setScale(0, BigDecimal.ROUND_UP);
-
+        //打击乐只看节奏分
+        BigDecimal score = cadence;
+        //非打击乐总分为平均分
+        if(Objects.isNull(userSoundInfoMap.get(phone).getSubjectId())||userSoundInfoMap.get(phone).getSubjectId()!=23){
+            score = intonation.add(cadence).add(integrity).divide(new BigDecimal(3), CommonConstants.DECIMAL_PLACE, BigDecimal.ROUND_DOWN).setScale(0, BigDecimal.ROUND_UP);
+        }
         result.put("score", score);
         result.put("intonation", intonation);
         result.put("cadence", cadence);

+ 13 - 0
mec-biz/src/main/java/com/ym/mec/biz/service/impl/SysMusicCompareRecordServiceImpl.java

@@ -29,6 +29,7 @@ import com.ym.mec.util.date.DateUtil;
 import org.apache.commons.lang3.StringUtils;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Service;
+import org.springframework.transaction.annotation.Transactional;
 import org.springframework.util.CollectionUtils;
 
 import java.math.BigDecimal;
@@ -131,6 +132,18 @@ public class SysMusicCompareRecordServiceImpl extends BaseServiceImpl<Long, SysM
 	}
 
 	@Override
+	@Transactional
+	public void saveMusicCompareData(SysMusicCompareRecord sysMusicCompareRecord) {
+		
+		Integer userId = sysMusicCompareRecord.getUserId();
+		
+		sysMusicCompareRecordDao.update(sysMusicCompareRecord);
+		studentDao.addStudentCloudStudySequenceDays(userId);
+		sysMusicCompareWeekDataService
+				.updateUserWeekTrainData(userId, LocalDate.now().with(DateUtil.weekFields.dayOfWeek(), DayOfWeek.MONDAY.getValue()));
+	}
+
+	@Override
 	public Object getLastEvaluationMusicalNotesPlayStats(Integer userId, Long recordId) {
 		SysMusicCompareRecord userLastEvaluationData;
 		if(Objects.nonNull(recordId)){

+ 2 - 2
mec-thirdparty/src/main/java/com/ym/mec/thirdparty/eseal/provider/TsignPlugin.java

@@ -58,12 +58,12 @@ public class TsignPlugin implements ESealPlugin, InitializingBean, DisposableBea
         projectconfig.setItsmApiUrl(apisUrl);
         Result result = ServiceClientManager.registClient(projectconfig, null, null);
         if (result.getErrCode() != 0) {
-            throw new ThirdpartyException("e签宝客户端注册失败:{}", result.getMsg());
+            //throw new ThirdpartyException("e签宝客户端注册失败:{}", result.getMsg());
         }
 
         serviceClient = ServiceClientManager.get(projectId);
         if (serviceClient == null) {
-            throw new ThirdpartyException("获取e签宝客户端失败");
+            //throw new ThirdpartyException("获取e签宝客户端失败");
         }
     }
 

+ 9 - 0
mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/StoragePlugin.java

@@ -16,6 +16,15 @@ public interface StoragePlugin {
 	String uploadFile(String folderName, File file);
 
 	/**
+	 * 上传文件
+	 * @param folderName 文件夹
+	 * @param file 需要上传的文件
+	 * @param delLocalFile 删除本地文件
+	 * @return 返回文件路径
+	 */
+	String asyncUploadFile(String folderName, File file, boolean delLocalFile);
+
+	/**
 	 * 下载文件
 	 * @param folderName 文件夹
 	 * @param fileName 文件名称

+ 5 - 0
mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/StoragePluginContext.java

@@ -24,6 +24,11 @@ public class StoragePluginContext {
 		StoragePlugin StoragePlugin = getStoragePlugin(storagePluginName);
 		return StoragePlugin.uploadFile(folderName, file);
 	}
+	
+	public String asyncUploadFile(String storagePluginName, String folderName, File file, boolean delLocalFile){
+		StoragePlugin StoragePlugin = getStoragePlugin(storagePluginName);
+		return StoragePlugin.asyncUploadFile(folderName, file, delLocalFile);
+	}
 
 	private StoragePlugin getStoragePlugin(String storagePluginName) {
 		StoragePlugin storagePlugin = mapper.get(storagePluginName);

+ 28 - 0
mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/provider/AliyunOssStoragePlugin.java

@@ -3,6 +3,7 @@ package com.ym.mec.thirdparty.storage.provider;
 import java.io.File;
 import java.io.IOException;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.poi.util.IOUtils;
 import org.springframework.beans.factory.DisposableBean;
 import org.springframework.beans.factory.InitializingBean;
@@ -80,6 +81,33 @@ public class AliyunOssStoragePlugin implements StoragePlugin, InitializingBean,
 	}
 
 	@Override
+	public String asyncUploadFile(String folderName, File file, boolean delLocalFile) {
+		if (!file.exists()) {
+			throw new ThirdpartyException("需要上传的文件[{}]不存在", file.getAbsolutePath());
+		}
+
+		if (folderName.endsWith("/")) {
+			folderName = folderName.substring(0, folderName.length() - 1);
+		}
+		
+		final String dir = folderName;
+		
+		Thread thread = new Thread(new Runnable() {
+			
+			@Override
+			public void run() {
+				ossClient.putObject(bucketName, dir + "/" + file.getName(), file);
+				if(delLocalFile){
+					FileUtils.deleteQuietly(file);
+				}
+			}
+		});
+		thread.start();
+
+		return "https://" + bucketName + "." + endpoint + "/" + folderName + "/" + file.getName();
+	}
+
+	@Override
 	public byte[] getFile(String folderName, String fileName) throws IOException {
 		OSSObject ossObject = ossClient.getObject(bucketName, folderName + "/" + fileName);
 		try {

+ 31 - 0
mec-thirdparty/src/main/java/com/ym/mec/thirdparty/storage/provider/KS3StoragePlugin.java

@@ -3,6 +3,7 @@ package com.ym.mec.thirdparty.storage.provider;
 import java.io.File;
 import java.io.IOException;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.poi.util.IOUtils;
 import org.springframework.beans.factory.DisposableBean;
 import org.springframework.beans.factory.InitializingBean;
@@ -93,6 +94,36 @@ public class KS3StoragePlugin implements StoragePlugin, InitializingBean, Dispos
 	}
 
 	@Override
+	public String asyncUploadFile(String folderName, File file, boolean delLocalFile) {
+		if (!file.exists()) {
+			throw new ThirdpartyException("需要上传的文件[{}]不存在", file.getAbsolutePath());
+		}
+
+		if (folderName.endsWith("/")) {
+			folderName = folderName.substring(0, folderName.length() - 1);
+		}
+
+		PutObjectRequest request = new PutObjectRequest(bucketName, folderName + "/" + file.getName(), file);
+
+		// 上传一个公开文件
+		request.setCannedAcl(CannedAccessControlList.PublicRead);
+		
+		Thread thread = new Thread(new Runnable() {
+			
+			@Override
+			public void run() {
+				client.putObject(request);
+				if(delLocalFile){
+					FileUtils.deleteQuietly(file);
+				}
+			}
+		});
+		thread.start();
+
+		return "https://" + bucketName + "." + endpoint + "/" + folderName + "/" + file.getName();
+	}
+
+	@Override
 	public byte[] getFile(String folderName, String fileName) throws IOException {
 		GetObjectRequest request = new GetObjectRequest(bucketName, folderName + "/" + fileName);
 		GetObjectResult result = client.getObject(request);