2016-08-05 70 views
0

我在Android应用程序上使用Microsoft Cognitive Services - 语音识别。代码在我的主要活动中时一切正常,但是当我想将对应于语音识别的部分移动到新类时,它会引发错误。Microsoft认知语音服务 - Android

下面是代码示例:

在主要活动

int m_waitSeconds = 0; 
MicrophoneRecognitionClient micClient = null; 
FinalResponseStatus isReceivedResponse = FinalResponseStatus.NotReceived; 
Boolean speechIntent = false; 
SpeechRecognitionMode speechMode = SpeechRecognitionMode.ShortPhrase; 

public enum FinalResponseStatus { NotReceived, OK } 

/** 
* Gets the primary subscription key 
*/ 
public String getPrimaryKey() { 
    return this.getString(R.string.primaryKey); 
} 

/** 
* Gets the secondary subscription key 
*/ 
public String getSecondaryKey() { 
    return this.getString(R.string.secondaryKey); 
} 

/** 
* Gets the LUIS application identifier. 
* @return The LUIS application identifier. 
*/ 
private String getLuisAppId() { 
    return this.getString(R.string.luisAppID); 
} 

/** 
* Gets the LUIS subscription identifier. 
* @return The LUIS subscription identifier. 
*/ 
private String getLuisSubscriptionID() { 
    return this.getString(R.string.luisSubscriptionID); 
} 

/** 
* Gets the default locale. 
* @return The default locale. 
*/ 
private String getDefaultLocale() { 
    return "en-us"; 
} 


/** 
* Handles the Click event of the _startButton control. 
*/ 
private void StartButton_Click(View v) { 

    this.m_waitSeconds = this.speechMode == SpeechRecognitionMode.ShortPhrase ? 20 : 200; 

    if (this.micClient == null) { 
     if (this.speechIntent) { 
      this.WriteLine("--- Start microphone dictation with Intent detection ----"); 

      this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(
          this, 
          this.getDefaultLocale(), 
          this, 
          this.getPrimaryKey(), 
          this.getSecondaryKey(), 
          this.getLuisAppId(), 
          this.getLuisSubscriptionID()); 
     } 
     else 
     { 
      this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(
        this, 
        this.speechMode, 
        this.getDefaultLocale(), 
        this, 
        this.getPrimaryKey(), 
        this.getSecondaryKey()); 
     } 
    } 

    this.micClient.startMicAndRecognition(); 
} 

public void onFinalResponseReceived(final RecognitionResult response) { 
    boolean isFinalDicationMessage = this.speechMode == SpeechRecognitionMode.LongDictation && 
      (response.RecognitionStatus == RecognitionStatus.EndOfDictation || 
        response.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout); 
    if (null != this.micClient && ((this.speechMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) { 
     // we got the final result, so it we can end the mic reco. No need to do this 
     // for dataReco, since we already called endAudio() on it as soon as we were done 
     // sending all the data. 
     this.micClient.endMicAndRecognition(); 
    } 

    if (isFinalDicationMessage) { 
     this.isReceivedResponse = FinalResponseStatus.OK; 
    } 

    Confidence cMax = Confidence.Low; 
    int iMax = 0; 
    if (!isFinalDicationMessage && response.Results.length != 0) { 
     for (int i = 0; i < response.Results.length; i++) { 
      //get the text with highest confidence: 
      if(response.Results[i].Confidence.getValue() > cMax.getValue()){ 
       cMax = response.Results[i].Confidence; 
       iMax = i; 
      } 
     } 
     this.WriteLine(response.Results[iMax].DisplayText); 
    } 

} 



/** 
* Called when a final response is received and its intent is parsed 
*/ 

public void onIntentReceived(final String payload) { 
    this.WriteLine("--- Intent received by onIntentReceived() ---"); 
    this.WriteLine(payload); 
    this.WriteLine(); 
} 

public void onPartialResponseReceived(final String response) { 
    this.WriteLine("--- Partial result received by onPartialResponseReceived() ---"); 
    this.WriteLine(response); 
    this.WriteLine(); 
} 

public void onError(final int errorCode, final String response) { 
    this.WriteLine("--- Error received by onError() ---"); 
    this.WriteLine("Error code: " + SpeechClientStatus.fromInt(errorCode) + " " + errorCode); 
    this.WriteLine("Error text: " + response); 
    this.WriteLine(); 
} 

/** 
* Called when the microphone status has changed. 
* @param recording The current recording state 
*/ 
public void onAudioEvent(boolean recording) { 
    if (!recording) { 
     this.micClient.endMicAndRecognition(); 
    } 
} 

/** 
* Writes the line. 
*/ 
private void WriteLine() { 
    this.WriteLine(""); 
} 

/** 
* Writes the line. 
* @param text The line to write. 
*/ 
private void WriteLine(String text) { 
    System.out.println(text); 
} 

在一个单独的类

public class SpeechRecognition implements ISpeechRecognitionServerEvents 
{ 
int m_waitSeconds = 0; 
private MicrophoneRecognitionClient micClient = null; 
private FinalResponseStatus isReceivedResponse =FinalResponseStatus.NotReceived; 

private Boolean speechIntent = false; 
private SpeechRecognitionMode speechMode=SpeechRecognitionMode.ShortPhrase; 

public enum FinalResponseStatus { NotReceived, OK } 

/** 
* Gets the primary subscription key 
*/ 
public String getPrimaryKey() { 
    return Integer.toString(R.string.primaryKey); 
} 
/** 
* Gets the secondary subscription key 
*/ 
public String getSecondaryKey() { 
    return Integer.toString(R.string.secondaryKey); 
} 

/** 
* Gets the LUIS application identifier. 
* @return The LUIS application identifier. 
*/ 
private String getLuisAppId() { 
    return Integer.toString(R.string.luisAppID); 
} 

/** 
* Gets the LUIS subscription identifier. 
* @return The LUIS subscription identifier. 
*/ 
private String getLuisSubscriptionID() { 
    return Integer.toString(R.string.luisSubscriptionID); 
} 

/** 
* Gets the default locale. 
* @return The default locale. 
*/ 
private String getDefaultLocale() { 
    return "en-us"; 
} 

public void startSpeechRecognition() { 

    this.m_waitSeconds = this.speechMode == SpeechRecognitionMode.ShortPhrase ? 20 : 200; 

    if (this.micClient == null) { 
     if (this.speechIntent) { 
      this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(
          this.getDefaultLocale(), 
          this, 
          this.getPrimaryKey(), 
          this.getSecondaryKey(), 
          this.getLuisAppId(), 
          this.getLuisSubscriptionID()); 
     } 
     else 
     { 
      this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(
        this.speechMode, 
        this.getDefaultLocale(), 
        this, 
        this.getPrimaryKey(), 
        this.getSecondaryKey()); 

     } 
    } 

    this.micClient.startMicAndRecognition(); 


} 

public void endSpeechRecognition(){ 
    this.micClient.endMicAndRecognition(); 
} 


public void onFinalResponseReceived(final RecognitionResult response) { 
    boolean isFinalDicationMessage = this.speechMode == SpeechRecognitionMode.LongDictation && 
      (response.RecognitionStatus == RecognitionStatus.EndOfDictation || 
        response.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout); 
    if (null != this.micClient && ((this.speechMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) { 
     // we got the final result, so it we can end the mic reco. No need to do this 
     // for dataReco, since we already called endAudio() on it as soon as we were done 
     // sending all the data. 
     this.micClient.endMicAndRecognition(); 
    } 

    if (isFinalDicationMessage) { 
     this.isReceivedResponse = FinalResponseStatus.OK; 
    } 

    Confidence cMax = Confidence.Low; 
    int iMax = 0; 
    if (!isFinalDicationMessage && response.Results.length != 0) { 
     for (int i = 0; i < response.Results.length; i++) { 
      //get the text with highest confidence: 
      if(response.Results[i].Confidence.getValue() > cMax.getValue()){ 
       cMax = response.Results[i].Confidence; 
       iMax = i; 
      } 
     } 
     System.out.println("Action to take: " + response.Results[iMax].DisplayText); 
    } 

} 

/** 
* Called when a final response is received and its intent is parsed 
*/ 
public void onIntentReceived(final String payload) { 
    System.out.println("--- Intent received by onIntentReceived() ---"); 
    System.out.println(payload); 
} 

public void onPartialResponseReceived(final String response) { 
    System.out.println("--- Partial result received by onPartialResponseReceived() ---"); 
    System.out.println(response); 
} 

public void onError(final int errorCode, final String response) { 
    System.err.println("--- Error received by onError() ---"); 
    System.err.println("Error code: " + SpeechClientStatus.fromInt(errorCode) + " " + errorCode); 
    System.err.println("Error text: " + response); 
} 

/** 
* Called when the microphone status has changed. 
* @param recording The current recording state 
*/ 
public void onAudioEvent(boolean recording) { 
    System.out.println("--- Microphone status change received by onAudioEvent() ---"); 
    System.out.println("********* Microphone status: " + recording + " *********"); 
    if (recording) { 
     System.out.println("Please start speaking."); 
    } 

    if (!recording) { 
     this.micClient.endMicAndRecognition(); 
    } 
} 

}

可以清除当我调用this.micClient.startMicAndRecognition()后,它会在onError函数中给我这个错误。 :

Error code: LoginFailed -1910505470 
+0

你有意在新类中使用不同的'createMicrophoneClient'构造函数吗?您是否在原始类中测试过这些构造函数? – brandall

+0

是的,我故意使用不同的构造函数,因为它不需要Activity作为参数。无论如何,我在主要活动中尝试了两个构造函数,并且它们工作得很好。 当我尝试创建一个MicClient时,当我开始录制时弹出错误,构造函数返回一个micClient。 @brandall – Najj

+0

好的。那么剩下的只是一个线程问题?尝试从UI线程运行代码,然后测试后台线程。确保你不是从不同的线程调用构造函数和'startSpeechRecognition'。如果这是您的意图,请将'MicrophoneRecognitionClient'设置为'volatile'。 – brandall

回答

0

我解决了这个问题。 我得到一个错误的主键在这行代码:

public String getPrimaryKey() { 
    return Integer.toString(R.string.primaryKey); 
} 

这是因为主键太长由处理器为一个整数然后转化为String读取。我必须通过getString(R.string.primaryKey)从主要活动获得主键,然后将其作为参数传递给SpeechRecognition类的构造函数。