牛津项目语音识别api

本文关键字:api 语音识别 项目 | 更新日期: 2023-09-27 17:50:58

我从牛津项目网站下载了一个应用程序,并试图使它工作。因此,当给出wav文件作为输入时,代码可以工作。当试图使它与麦克风工作,它不工作。

  • run VS在Admin模式

  • 检查麦克风隐私设置

  • 以各种方式修改代码使其正常工作

  • 主/从键应该工作,因为它与Wav文件工作,但不与麦克风。

下面是修改后的示例代码

using MicrosoftProjectOxford;
using System;
using System.Configuration;
using System.Diagnostics;
using System.IO;
using System.Windows;
namespace MicrosoftProjectOxfordExample
{
    /// <summary>
    /// Interaction logic for MainWindow.xaml
    /// </summary>
    public partial class MainWindow : Window
    {
        string m_primaryOrSecondaryKey = ConfigurationManager.AppSettings["primaryKey"];
        private const string LongWaveFile = @"batman.wav";
        private const string ShortWaveFile = @"whatstheweatherlike.wav";
        private string m_filename = null;
        private SpeechRecognitionMode m_recoMode = SpeechRecognitionMode.ShortPhrase;
        private bool m_isMicrophoneReco = true;
        private DataRecognitionClient m_dataClient;
        private MicrophoneRecognitionClient m_micClient;
        /// <summary>
        /// Initializes a new instance of the <see cref="MainWindow"/> class.
        /// </summary>
        public MainWindow()
        {
            InitializeComponent();
            Intialize();
        }
        private void Intialize()
        {
            if (m_isMicrophoneReco) {
                    m_micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(m_recoMode, "en-us",m_primaryOrSecondaryKey);
                // Event handlers for speech recognition results
                m_micClient.OnResponseReceived += this.OnResponseReceivedHandler;
                m_micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler;
                m_micClient.OnConversationError += this.OnConversationErrorHandler;
                m_micClient.OnMicrophoneStatus += this.OnMicrophoneStatus;
            }
            else {
                  DataRecognitionClientWithIntent intentDataClient;
                m_dataClient = SpeechRecognitionServiceFactory.CreateDataClient(m_recoMode, "en-us",m_primaryOrSecondaryKey);

                // Event handlers for speech recognition results
                m_dataClient.OnResponseReceived += this.OnResponseReceivedHandler;
                m_dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler;
                m_dataClient.OnConversationError += this.OnConversationErrorHandler;
            }
        }
        /// <summary>
        /// Handles the Click event of the button1 control.
        /// </summary>
        /// <param name="sender">The source of the event.</param>
        /// <param name="e">The <see cref="RoutedEventArgs"/> instance containing the event data.</param>
        private void button1_Click(object sender, RoutedEventArgs e)
        {
            this.editText1.Text = String.Empty;
            this.button1.IsEnabled = false;
            m_micClient.StartMicAndRecognition();
        }
        private void button3_Click(object sender, RoutedEventArgs e)
        {
            int waitSeconds =  15;
            bool isReceivedResponse = m_micClient.WaitForFinalResponse(waitSeconds * 1000);
            // We are done sending audio.  Final recognition results will arrive in OnResponseReceived event call.
            m_micClient.EndMicAndRecognition();
            this.WriteLine("App Done.");

        }
        /// <summary>
        /// Handles the Click event of the button2 control.
        /// </summary>
        /// <param name="sender">The source of the event.</param>
        /// <param name="e">The <see cref="RoutedEventArgs"/> instance containing the event data.</param>
        private void button2_Click(object sender, RoutedEventArgs e)
        {
            bool isReceivedResponse = false;
            int waitSeconds = (m_recoMode == SpeechRecognitionMode.LongDictation) ? 200 : 15;
            // sleep until the final result in OnResponseReceived event call, or waitSeconds, whichever is smaller.
            if (null != m_dataClient)
            {
                isReceivedResponse = m_dataClient.WaitForFinalResponse(waitSeconds * 1000);
                this.WriteLine("App Done.");
                this.button1.IsEnabled = false;
                m_dataClient.Dispose();
                m_dataClient = null;
            }
            if (null != m_micClient)
            {
                isReceivedResponse = m_micClient.WaitForFinalResponse(waitSeconds * 1000);
                // We are done sending audio.  Final recognition results will arrive in OnResponseReceived event call.
                m_micClient.EndMicAndRecognition();
                this.WriteLine("App Done.");
                this.button1.IsEnabled = false;
                m_micClient.Dispose();
                m_micClient = null;
            }
            if (!isReceivedResponse)
            {
                this.button1.IsEnabled = false;
                this.WriteLine("{0}: Timed out waiting for conversation response after {1} ms",
                               DateTime.UtcNow, waitSeconds * 1000);
            }
        }
        /// <summary>
        /// Writes the line.
        /// </summary>
        void WriteLine()
        {
            this.WriteLine(string.Empty);
        }
        /// <summary>
        /// Writes the line.
        /// </summary>
        /// <param name="format">The format.</param>
        /// <param name="args">The arguments.</param>
        void WriteLine(string format, params object [] args)
        {
            var formattedStr = string.Format(format, args);
            Trace.WriteLine(formattedStr);
            this.Dispatcher.Invoke(() => {
                this.editText1.Text += (formattedStr + "'n");
            });
        }
        /// <summary>
        ///     Called when a final response is received; 
        /// </summary>
        void OnResponseReceivedHandler(object sender, SpeechResponseEventArgs e)
        {
            bool isFinalDicationMessage = m_recoMode == SpeechRecognitionMode.LongDictation &&
                                          (e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                                           e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);
            if ((m_recoMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage) {
                this.Dispatcher.Invoke((Action)(() => {
                    this.button1.IsEnabled = true;
                }));                
            }
            if (m_isMicrophoneReco && ((m_recoMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) {
                // we got the final result, so it we can end the mic reco.  No need to do this
                // for dataReco, since we already called endAudio() on it as soon as we were done
                // sending all the data.
                m_micClient.EndMicAndRecognition();
            }
            if (!isFinalDicationMessage)
            {
                this.WriteLine("********* Final NBEST Results *********");
                for (int i = 0; i < e.PhraseResponse.Results.Length; i++)
                {
                    this.WriteLine("[{0}] Confidence={1} Text='"{2}'"",
                                   i, e.PhraseResponse.Results[i].Confidence,
                                   e.PhraseResponse.Results[i].DisplayText);
                }
                this.WriteLine();
            }
        }
        /// <summary>
        ///     Called when a final response is received and its intent is parsed 
        /// </summary>
        void OnIntentHandler(object sender, SpeechIntentEventArgs e)
        {
            this.WriteLine("********* Final Intent *********");
            this.WriteLine("{0}", e.Payload);
            this.WriteLine();
        }
        /// <summary>
        ///     Called when a partial response is received.
        /// </summary>
        void OnPartialResponseReceivedHandler(object sender, PartialSpeechResponseEventArgs e)
        {
            this.WriteLine("********* Partial Result *********");
            this.WriteLine("{0}", e.PartialResult);
            this.WriteLine();
        }
        /// <summary>
        ///     Called when an error is received.
        /// </summary>
        void OnConversationErrorHandler(object sender, SpeechErrorEventArgs e)
        {
            this.Dispatcher.Invoke((Action) (() => {
                this.button1.IsEnabled = true;
            }));
            this.WriteLine("********* Error Detected *********");
            this.WriteLine("{0}", e.SpeechErrorCode.ToString());
            this.WriteLine("{0}", e.SpeechErrorText);
            this.WriteLine();
        }
        /// <summary>
        ///     Called when the microphone status has changed.
        /// </summary>
        void OnMicrophoneStatus(object sender, MicrophoneEventArgs e)
        {
            //if (!e.Recording) {
            //    m_micClient.EndMicAndRecognition();
            //    this.Dispatcher.Invoke((Action) (() => {
            //        this.button1.IsEnabled = true;
            //    }));
            //}
            this.WriteLine("********* Microphone status: {0} *********", e.Recording);
            this.WriteLine();
        }

    }
}

牛津项目语音识别api

我也有同样的问题。我最后禁用了Windows声音面板中的所有录音设备(除了我的麦克风)来让它工作。

我有一个类似的问题,我必须确保我的麦克风的输出是单声道,而不是立体声;请确保检查您正在使用的音频文件的格式。直接来自Project Oxford API文档:

音频文件格式必须满足以下要求。

  • 容器:WAV
  • PCM编码:
  • 率 : 16 k
  • 样本格式:16位
  • 通道:Mono
https://dev.projectoxford.ai/docs/services/563309b6778daf02acc0a508/operations/5645c523778daf217c292592