2015-03-31 62 views
0

在我的Web Api项目中,我有一个[HttpPost]方法 - public HttpResponseMessage saveFiles() {} ,它将一些音频文件保存到服务器。 后我保存文件,我需要调用在Microsoft.Speech服务器API的方法,这种方法是异步,但它返回void:ASP.NET Web Api调用非任务异步方法

public void RecognizeAsync(RecognizeMode mode); 

我想等到这个方法完成,就在这时,用我收集的所有信息向客户返回答案。 这里我不能使用await,因为这个函数返回void。 我实施了一个活动:public event RecognitionFinishedHandler RecognitionFinished;

此功能完成后调用此事件。

- 编辑 我用一个任务包装这个事件,但我想我做错了什么,因为我无法获得RecognizeAsync函数来实际完成它的工作。似乎功能没有现在的工作,这里是我的代码:

包含语音识别功能:

public delegate void RecognitionFinishedHandler(object sender); 
public class SpeechActions 
{ 
    public event RecognitionFinishedHandler RecognitionFinished; 
    private SpeechRecognitionEngine sre; 
    public Dictionary<string, List<TimeSpan>> timeTags; // contains the times of each tag: "tag": [00:00, 00:23 .. ] 

    public SpeechActions() 
    { 
     sre = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US")); 
     sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized); 
     sre.AudioStateChanged += new EventHandler<AudioStateChangedEventArgs>(sre_AudioStateChanged); 
    } 

    /// <summary> 
    /// Calculates the tags appearances in a voice over wav file. 
    /// </summary> 
    /// <param name="path">The path to the voice over wav file.</param> 
    public void CalcTagsAppearancesInVO(string path, string[] tags, TimeSpan voLength) 
    { 
     timeTags = new Dictionary<string, List<TimeSpan>>(); 
     sre.SetInputToWaveFile(path); 

     foreach (string tag in tags) 
     { 
      GrammarBuilder gb = new GrammarBuilder(tag); 
      gb.Culture = new System.Globalization.CultureInfo("en-US"); 
      Grammar g = new Grammar(gb); 
      sre.LoadGrammar(g); 
     } 

     sre.RecognizeAsync(RecognizeMode.Multiple); 
    } 

    void sre_AudioStateChanged(object sender, AudioStateChangedEventArgs e) 
    { 
     if (e.AudioState == AudioState.Stopped) 
     { 
      sre.RecognizeAsyncStop(); 
      if (RecognitionFinished != null) 
      { 
       RecognitionFinished(this); 
      } 
     } 
    } 

    void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) 
    { 
     string word = e.Result.Text; 
     TimeSpan time = e.Result.Audio.AudioPosition; 
     if(!timeTags.ContainsKey(word)) 
     { 
      timeTags.Add(word, new List<TimeSpan>()); 
     } 

     // add the found time 
     timeTags[word].Add(time); 
    } 
} 

和我的函数调用它+的事件处理程序:

[HttpPost] 
    public HttpResponseMessage saveFiles() 
    { 
     if (HttpContext.Current.Request.Files.AllKeys.Any()) 
     { 
      string originalFolder = HttpContext.Current.Server.MapPath("~/files/original/"); 
      string lowFolder = HttpContext.Current.Server.MapPath("~/files/low/"); 
      string audioFolder = HttpContext.Current.Server.MapPath("~/files/audio/"); 
      string voiceoverPath = Path.Combine(originalFolder, Path.GetFileName(HttpContext.Current.Request.Files["voiceover"].FileName)); 
      string outputFile = HttpContext.Current.Server.MapPath("~/files/output/") + "result.mp4"; 
      string voiceoverWavPath = Path.Combine(audioFolder, "voiceover.wav"); 
      var voiceoverInfo = Resource.From(voiceoverWavPath).LoadMetadata().Streams.OfType<AudioStream>().ElementAt(0).Info; 
      DirectoryInfo di = new DirectoryInfo(originalFolder); 
      // speech recognition 
      // get tags from video filenames 
      string sTags = ""; 
      di = new DirectoryInfo(HttpContext.Current.Server.MapPath("~/files/low/")); 

      foreach (var item in di.EnumerateFiles()) 
      { 
       string filename = item.Name.Substring(0, item.Name.LastIndexOf(".")); 
       if (item.Name.ToLower().Contains("thumbs") || filename == "voiceover") 
       { 
        continue; 
       } 
       sTags += filename + ","; 
      } 
      if (sTags.Length > 0) // remove last ',' 
      { 
       sTags = sTags.Substring(0, sTags.Length - 1); 
      } 
      string[] tags = sTags.Split(new char[] { ',' }); 

      // HERE STARTS THE PROBLEMATIC PART! ---------------------------------------------------- 
      var task = GetSpeechActionsCalculated(voiceoverWavPath, tags, voiceoverInfo.Duration); 

      // now return the times to the client 
      var finalTimes = GetFinalTimes(HttpContext.Current.Server.MapPath("~/files/low/"), task.Result.timeTags); 
      var goodResponse = Request.CreateResponse(HttpStatusCode.OK, finalTimes); 
      return goodResponse; 
     } 
     return Request.CreateResponse(HttpStatusCode.OK, "no files"); 
    } 
    private Task<SpeechActions> GetSpeechActionsCalculated(string voPath, string[] tags, TimeSpan voLength) 
    { 
     var tcs = new TaskCompletionSource<SpeechActions>(); 
     SpeechActions sa = new SpeechActions(); 
     sa.RecognitionFinished += (s) => 
     { 
      tcs.TrySetResult((SpeechActions)s); 
     }; 
     sa.CalcTagsAppearancesInVO(voPath, tags, voLength); 

     return tcs.Task; 
    } 
+1

_ [Speech sample in MSDN](https://msdn.microsoft.com/en-us/library/ms554584(v = vs.110).aspx)_ – MickyD 2015-03-31 10:52:08

+1

或[EAP的TAP包装程序](https://msdn.microsoft.com/ en-us/library/hh873178(v = vs.110).aspx#EAP) – 2015-03-31 12:20:41

+0

另请参见:[将事件转换为任务的可重复使用的模式](http://stackoverflow.com/q/22783741/1768303) – Noseratio 2015-03-31 21:18:23

回答

2

你几乎在那里与你的编辑,你只需要await任务:

[HttpPost] 
public async Task<HttpResponseMessage> saveFiles() 
{ 
    if (HttpContext.Current.Request.Files.AllKeys.Any()) 
    { 
     ... 

     string[] tags = sTags.Split(new char[] { ',' }); 

     await GetSpeechActionsCalculated(voiceoverWavPath, tags, voiceoverInfo.Duration); 

     // now return the times to the client 
     var finalTimes = GetFinalTimes(HttpContext.Current.Server.MapPath("~/files/low/"), task.Result.timeTags); 
     var goodResponse = Request.CreateResponse(HttpStatusCode.OK, finalTimes); 
     return goodResponse; 
    } 
    return Request.CreateResponse(HttpStatusCode.OK, "no files"); 
} 
+0

非常感谢@stephencleary,它现在有效。我正在面对Speech API的一些问题,但至少我可以等待语音识别结束。 – 2015-04-04 14:53:36