当计算机被锁定时,语音识别引擎停止识别

本文关键字:引擎 识别 语音识别 计算机 锁定 定时 | 更新日期: 2023-09-27 17:54:55

我正在尝试创建一个语音识别程序,需要在锁定的Windows计算机上运行,作为家庭自动化项目的一部分。但似乎语音识别引擎停止识别时,计算机被锁定(并继续当计算机解锁)。

我当前的测试程序是这样的:

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Microsoft.Speech.Recognition;
using System.Globalization;
namespace WindowsFormsApplication1
{
    public partial class Form1 : Form
    {
        SpeechRecognitionEngine sre;
        public Form1()
        {
            InitializeComponent();
            CultureInfo ci = new CultureInfo("en-us");
            sre = new SpeechRecognitionEngine(ci);
            sre.SetInputToDefaultAudioDevice();
            GrammarBuilder gb = new GrammarBuilder("Hello");
            sre.LoadGrammarAsync(new Grammar(gb));
            sre.SpeechRecognized += sre_SpeechRecognized;
            sre.RecognizeAsync(RecognizeMode.Multiple);
        }
        void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            listBox1.Items.Add(DateTime.Now.ToString() + " " + e.Result.Text);
        }
    }
}

我想知道是否有可能将语音识别引擎的输入(也许使用SetInputToAudioStreamSetInputToWaveStream方法)更改为麦克风输入的实时音频流,这样就可以解决问题。因为似乎麦克风没有关闭时,电脑(尝试与录音)。

不幸的是,我还没有找到一种方法来获得麦克风输入的实时流。

当计算机被锁定时,语音识别引擎停止识别

我已经找到了一个解决方案,使用NAudio (http://naudio.codeplex.com/)和SpeechStreamer类从这个StackOverflow的答案(https://stackoverflow.com/a/11813276/2950065)。

更新后的测试程序,继续识别计算机何时被锁定,看起来像这样:

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Microsoft.Speech.Recognition;
using System.Globalization;
using NAudio.Wave;
using System.IO;
using System.IO.Pipes;
namespace WindowsFormsApplication1
{
    public partial class Form1 : Form
    {
        SpeechRecognitionEngine sre;
        WaveIn wi;
        SpeechStreamer ss;
        public Form1()
        {
            InitializeComponent();
            WaveCallbackInfo callbackInfo = WaveCallbackInfo.FunctionCallback();
            wi = new WaveIn(callbackInfo);
            ss = new SpeechStreamer(100000);
            wi.DataAvailable += wi_DataAvailable;
            wi.StartRecording();
            CultureInfo ci = new CultureInfo("en-us");
            sre = new SpeechRecognitionEngine(ci);
            // The default format for WaveIn is 8000 samples/sec, 16 bit, 1 channel
            Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo safi = new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(8000, Microsoft.Speech.AudioFormat.AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
            sre.SetInputToAudioStream(ss, safi);
            GrammarBuilder gb = new GrammarBuilder("Hello");
            sre.LoadGrammarAsync(new Grammar(gb));
            sre.SpeechRecognized += sre_SpeechRecognized;
            sre.RecognizeAsync(RecognizeMode.Multiple);
        }
        void wi_DataAvailable(object sender, WaveInEventArgs e)
        {
            ss.Write(e.Buffer, 0, e.BytesRecorded);
        }
        void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            listBox1.Items.Add(DateTime.Now.ToString() + " " + e.Result.Text);
        }
    }
}