从麦克风流,添加效果并保存到wav文件使用tarsos android库

问题描述:

注:我使用的是android studio,目前我使用最新的tarsos audio library,它应该与android兼容,并且实际上已经成功添加该库到我的android studio项目。我以前尝试过使用JTransforms和Minim库,但没有运气。 编辑8/23/17:找到并修复了一些错误,重新发布了当前代码,仍然没有取得进展,实际问题总结如下:从麦克风流,添加效果并保存到wav文件使用tarsos android库

摘要:在第5个代码块中,我发布了第15行的注释我需要知道如何让这条线路工作,而不是抛出一个编译错误

我想要做的是从麦克风录制,并在录制时使用tarsos库中的dsp BandPass过滤器并输出结果传输到.wav文件。我可以通过使用android.media导入将麦克风成功传输到this tutorial之后的.wav文件,但这不允许我添加BandPass过滤器,并且使用tarsos导入函数不允许使用save .wav方法that tutorial has,我知道我错过了某些东西和/或做错了什么,但是我一直在使用Google搜索近一周,并且没有找到工作解决方案,我只找到了指向java文件的链接这是在图书馆内,这是没有帮助的,因为我找不到教程如何正确使用它们。我究竟做错了什么?下面是tarsos方法,我尝试使用相关代码:

相关进口和“全球”变量

import android.media.AudioRecord; 
import android.media.MediaRecorder; 
import android.media.AudioFormat; 
import android.media.AudioTrack; 

import be.tarsos.dsp.AudioDispatcher; 
import be.tarsos.dsp.AudioProcessor; 
import be.tarsos.dsp.filters.BandPass; 
import be.tarsos.dsp.io.android.AudioDispatcherFactory; 

//start the class 

AudioRecord alteredRecord = null; 
AudioDispatcher dispatcher; 
float freqChange; 
float tollerance; 
private static final int RECORDER_BPP = 16; 
private static final String AUDIO_RECORDER_FOLDER = "Crowd_Speech"; 
private static final String AUDIO_RECORDER_TEMP_FILE = "record_temp.raw"; 
private static final int RECORDER_SAMPLERATE = 44100; 
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_MONO; 
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT; 
private int bufferSize = 1024; 
private Thread recordingThread = null; 

//set the min buffer size in onCreate event 
bufferSize = AudioRecord.getMinBufferSize(RECORDER_SAMPLERATE, 
RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING)*4; 

这将启动一个onClick方法中的MIC录音,并通过注释/取消注释2“跑步”的变量值我可以过滤或不过滤器(机器人或tarsos函数)时的startRecording方法被调用之间进行切换的一个

if(crowdFilter && running==0 && set==0){//crowd speech mode, start talking 
    Icons(2,""); 
    running=4;//start recording from mic, apply bandpass filter and save as wave file using TARSOS import 
    //running=5;//start recording from mic, no filter, save as wav file using android media import 
    freqChange = Globals.minFr[Globals.curUser]; 
    tollerance = 40; 
    set=1; 
    startRecording(); 
} 

开始记录方法:

一个onClick方法

if(crowdFilter && (running==4 || running==5) && set==0) {//crowd speech finished talking 
    Icons(1, ""); 
    stopRecording(); 
    set = 1; 
} 

内部

private void startRecording() { 

    if (running == 5) {//start recording from mic, no filter, save as wav file using android media library 
     alteredRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, RECORDER_SAMPLERATE, RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING, bufferSize); 
      alteredRecord.startRecording(); 
      isRecording = true; 
      recordingThread = new Thread(new Runnable() { 
       @Override 
       public void run() { 
        writeAudioDataToFile(); 
       } 
      }, "Crowd_Speech Thread"); 
      recordingThread.start(); 
    } 

    if (running == 4) {//start recording from mic, apply bandpass filter and save as wave file using TARSOS library 
     dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(RECORDER_SAMPLERATE, bufferSize, 0); 
     AudioProcessor p = new BandPass(freqChange, tollerance, RECORDER_SAMPLERATE); 
     dispatcher.addAudioProcessor(p); 
      isRecording = true; 
      dispatcher.run(); 
      recordingThread = new Thread(new Runnable() { 
       @Override 
       public void run() { 
        writeAudioDataToFile(); 
       } 
      }, "Crowd_Speech Thread"); 
      recordingThread.start(); 
    } 
} 

停止记录按钮这两种情况都很好,直到此时,如果(施加tarsos DSP滤波器)运行== 4程序崩溃。如果我使用运行== 5(android.media方式没有过滤器),其余的工作正常并保存文件,但没有应用BandPass效果。如果我试着用tarsos dispatcher = AudioDispatcherFactory ...(比如dispatcher = new AudioRecord ...)替换changedRecord = new AudioRecord ...,它们是不兼容的,甚至不会考虑编译。 (这就是为什么在下面的方法行15注释掉)

private void writeAudioDataToFile(){ 
    byte data[] = new byte[bufferSize]; 
    String filename = getTempFilename(); 
    FileOutputStream os = null; 
    try { 
     os = new FileOutputStream(filename); 
    } catch (FileNotFoundException e) { 
     e.printStackTrace(); 
    } 
    int read = 0; 
    if(null != os){ 
     while(isRecording){ 
      if(running==4) 
      { 
       //read = dispatcher.(data, 0, bufferSize); 
      } 
      if(running==5) 
      { 
       read = alteredRecord.read(data, 0, bufferSize); 
      } 
      if(AudioRecord.ERROR_INVALID_OPERATION != read){ 
       try { 
        os.write(data); 
       } catch (IOException e) { 
        e.printStackTrace(); 
       } 
      } 
     } 
     try { 
      os.close(); 
     } catch (IOException e) { 
      e.printStackTrace(); 
     } 
    } 
} 

private void stopRecording(){ 
    if(null != alteredRecord) { 
     isRecording = false; 
     int i = alteredRecord.getState(); 
     if (i == 1) { 
      running = 0; 
      alteredRecord.stop(); 
      alteredRecord.release(); 
      alteredRecord = null; 
      recordingThread = null; 
     } 
    } 
     if(null !=dispatcher){ 
      isRecording = false; 
      running = 0; 
      dispatcher.stop(); 
      recordingThread = null; 
     } 
    copyWaveFile(getTempFilename(),getFilename()); 
    deleteTempFile(); 
} 

private void deleteTempFile() { 
    File file = new File(getTempFilename()); 
    file.delete(); 
} 

private void copyWaveFile(String inFilename,String outFilename){ 
    FileInputStream in = null; 
    FileOutputStream out = null; 
    long totalAudioLen = 0; 
    long totalDataLen = totalAudioLen + 36; 
    long longSampleRate = RECORDER_SAMPLERATE; 
    int channels = 1; 
    long byteRate = RECORDER_BPP * RECORDER_SAMPLERATE * channels/8; 
    byte[] data = new byte[bufferSize]; 
    try { 
     in = new FileInputStream(inFilename); 
     out = new FileOutputStream(outFilename); 
     totalAudioLen = in.getChannel().size(); 
     totalDataLen = totalAudioLen + 36; 
     WriteWaveFileHeader(out, totalAudioLen, totalDataLen, 
       longSampleRate, channels, byteRate); 
     while(in.read(data) != -1){ 
      out.write(data); 
     } 
     in.close(); 
     out.close(); 
    } catch (FileNotFoundException e) { 
     e.printStackTrace(); 
    } catch (IOException e) { 
     e.printStackTrace(); 
    } 
} 

private void WriteWaveFileHeader(
     FileOutputStream out, long totalAudioLen, 
     long totalDataLen, long longSampleRate, int channels, 
     long byteRate) throws IOException { 
    byte[] header = new byte[44]; 
    header[0] = 'R';header[1] = 'I'; header[2] = 'F';header[3] = 'F';// RIFF/WAVE header 
    header[4] = (byte) (totalDataLen & 0xff); 
    header[5] = (byte) ((totalDataLen >> 8) & 0xff); 
    header[6] = (byte) ((totalDataLen >> 16) & 0xff); 
    header[7] = (byte) ((totalDataLen >> 24) & 0xff); 
    header[8] = 'W';header[9] = 'A';header[10] = 'V';header[11] = 'E';header[12] = 'f';header[13] = 'm';header[14] = 't';header[15] = ' ';// 'fmt ' chunk 
    header[16] = 16;header[17] = 0;header[18] = 0;header[19] = 0;// 4 bytes: size of 'fmt ' chunk 
    header[20] = 1;header[21] = 0;header[22] = (byte) channels;header[23] = 0;// format = 1 
    header[24] = (byte) (longSampleRate & 0xff);header[25] = (byte) ((longSampleRate >> 8) & 0xff);header[26] = (byte) ((longSampleRate >> 16) & 0xff); 
    header[27] = (byte) ((longSampleRate >> 24) & 0xff);header[28] = (byte) (byteRate & 0xff);header[29] = (byte) ((byteRate >> 8) & 0xff); 
    header[30] = (byte) ((byteRate >> 16) & 0xff); header[31] = (byte) ((byteRate >> 24) & 0xff); 
    header[32] = (byte) (2 * 16/8);header[33] = 0;// block align 
    header[34] = RECORDER_BPP;header[35] = 0;header[36] = 'd';header[37] = 'a';header[38] = 't';header[39] = 'a'; 
    header[40] = (byte) (totalAudioLen & 0xff);header[41] = (byte) ((totalAudioLen >> 8) & 0xff);header[42] = (byte) ((totalAudioLen >> 16) & 0xff); 
    header[43] = (byte) ((totalAudioLen >> 24) & 0xff);// bits per sample 
    out.write(header, 0, 44); 
} 

解决,你需要使用作家的功能,而不是与任何所需要的方法懒得从android.media进口保存WAV文件函数,这是从我开始记录的方法改变的工作代码段:

if (running == 4) {//start recording from mic, apply bandpass filter and save as wave file using TARSOS library 
     dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(RECORDER_SAMPLERATE, bufferSize, 0); 
     AudioProcessor p = new BandPass(freqChange, tollerance, RECORDER_SAMPLERATE); 
     dispatcher.addAudioProcessor(p); 
     isRecording = true; 
     // Output 
     RandomAccessFile outputFile = new RandomAccessFile(getFilename(), "rw"); 
     TarsosDSPAudioFormat outputFormat = new TarsosDSPAudioFormat(44100, 16, 1, true, false); 
     WriterProcessor writer = new WriterProcessor(outputFormat, outputFile); 
     dispatcher.addAudioProcessor(writer); 
     recordingThread = new Thread(new Runnable() { 
      @Override 
      public void run() { 
       dispatcher.run(); 
      } 
     }, "Crowd_Speech Thread"); 
     recordingThread.start(); 
    }