ストリーミング入力の音声文字変換

このセクションでは、マイクからの入力などのストリーミング音声をテキストに変換する方法について説明します。

ストリーミング音声認識では、音声を Cloud Speech-to-Text にストリーミングし、音声を処理しながらリアルタイムでストリーム音声認識の結果を受信することができます。ストリーミング音声認識リクエストについては、音声の制限もご覧ください。ストリーミング音声認識は、gRPC 経由でのみ利用できます。

ローカル ファイルでのストリーミング音声認識の実行

Speech-to-Text v1 は正式にリリースされており、https://speech.googleapis.com/v1/speech エンドポイントから一般向けに提供されています。クライアント ライブラリはアルファ版としてリリースされており、下位互換性のない方法で変更される可能性があります。クライアント ライブラリの本番環境での使用は、現在推奨されていません。

これらのサンプルでは、gcloud が設定され、サービス アカウントが作成されて有効になっている必要があります。gcloud のセットアップ、およびサービス アカウントの作成と有効化について詳しくは、クイックスタートをご覧ください。

ローカル音声ファイルに対して、ストリーミング音声認識を実行する例を次に示します。

C#

static async Task<object> StreamingRecognizeAsync(string filePath)
{
    var speech = SpeechClient.Create();
    var streamingCall = speech.StreamingRecognize();
    // Write the initial request with the config.
    await streamingCall.WriteAsync(
        new StreamingRecognizeRequest()
        {
            StreamingConfig = new StreamingRecognitionConfig()
            {
                Config = new RecognitionConfig()
                {
                    Encoding =
                    RecognitionConfig.Types.AudioEncoding.Linear16,
                    SampleRateHertz = 16000,
                    LanguageCode = "en",
                },
                InterimResults = true,
            }
        });
    // Print responses as they arrive.
    Task printResponses = Task.Run(async () =>
    {
        while (await streamingCall.ResponseStream.MoveNext(
            default(CancellationToken)))
        {
            foreach (var result in streamingCall.ResponseStream
                .Current.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
        }
    });
    // Stream the file content to the API.  Write 2 32kb chunks per
    // second.
    using (FileStream fileStream = new FileStream(filePath, FileMode.Open))
    {
        var buffer = new byte[32 * 1024];
        int bytesRead;
        while ((bytesRead = await fileStream.ReadAsync(
            buffer, 0, buffer.Length)) > 0)
        {
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
                {
                    AudioContent = Google.Protobuf.ByteString
                    .CopyFrom(buffer, 0, bytesRead),
                });
            await Task.Delay(500);
        };
    }
    await streamingCall.WriteCompleteAsync();
    await printResponses;
    return 0;
}

Java

/**
 * Performs streaming speech recognition on raw PCM audio data.
 *
 * @param fileName the path to a PCM audio file to transcribe.
 */
public static void streamingRecognizeFile(String fileName) throws Exception, IOException {
  Path path = Paths.get(fileName);
  byte[] data = Files.readAllBytes(path);

  // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
  try (SpeechClient speech = SpeechClient.create()) {

    // Configure request with local raw PCM audio
    RecognitionConfig recConfig =
        RecognitionConfig.newBuilder()
            .setEncoding(AudioEncoding.LINEAR16)
            .setLanguageCode("en-US")
            .setSampleRateHertz(16000)
            .setModel("default")
            .build();
    StreamingRecognitionConfig config =
        StreamingRecognitionConfig.newBuilder().setConfig(recConfig).build();

    class ResponseApiStreamingObserver<T> implements ApiStreamObserver<T> {
      private final SettableFuture<List<T>> future = SettableFuture.create();
      private final List<T> messages = new java.util.ArrayList<T>();

      @Override
      public void onNext(T message) {
        messages.add(message);
      }

      @Override
      public void onError(Throwable t) {
        future.setException(t);
      }

      @Override
      public void onCompleted() {
        future.set(messages);
      }

      // Returns the SettableFuture object to get received messages / exceptions.
      public SettableFuture<List<T>> future() {
        return future;
      }
    }

    ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver =
        new ResponseApiStreamingObserver<>();

    BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse> callable =
        speech.streamingRecognizeCallable();

    ApiStreamObserver<StreamingRecognizeRequest> requestObserver =
        callable.bidiStreamingCall(responseObserver);

    // The first request must **only** contain the audio configuration:
    requestObserver.onNext(
        StreamingRecognizeRequest.newBuilder().setStreamingConfig(config).build());

    // Subsequent requests must **only** contain the audio data.
    requestObserver.onNext(
        StreamingRecognizeRequest.newBuilder()
            .setAudioContent(ByteString.copyFrom(data))
            .build());

    // Mark transmission as completed after sending the data.
    requestObserver.onCompleted();

    List<StreamingRecognizeResponse> responses = responseObserver.future().get();

    for (StreamingRecognizeResponse response : responses) {
      // For streaming recognize, the results list has one is_final result (if available) followed
      // by a number of in-progress results (if iterim_results is true) for subsequent utterances.
      // Just print the first result here.
      StreamingRecognitionResult result = response.getResultsList().get(0);
      // There can be several alternative transcripts for a given chunk of speech. Just use the
      // first (most likely) one here.
      SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
      System.out.printf("Transcript : %s\n", alternative.getTranscript());
    }
  }
}

Node.js

const fs = require('fs');

// Imports the Google Cloud client library
const speech = require('@google-cloud/speech');

// Creates a client
const client = new speech.SpeechClient();

/**
 * TODO(developer): Uncomment the following lines before running the sample.
 */
// const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
// const encoding = 'Encoding of the audio file, e.g. LINEAR16';
// const sampleRateHertz = 16000;
// const languageCode = 'BCP-47 language code, e.g. en-US';

const request = {
  config: {
    encoding: encoding,
    sampleRateHertz: sampleRateHertz,
    languageCode: languageCode,
  },
  interimResults: false, // If you want interim results, set this to true
};

// Stream the audio to the Google Cloud Speech API
const recognizeStream = client
  .streamingRecognize(request)
  .on('error', console.error)
  .on('data', data => {
    console.log(
      `Transcription: ${data.results[0].alternatives[0].transcript}`
    );
  });

// Stream an audio file from disk to the Speech API, e.g. "./resources/audio.raw"
fs.createReadStream(filename).pipe(recognizeStream);

PHP

use Google\Cloud\Speech\V1\SpeechClient;
use Google\Cloud\Speech\V1\RecognitionConfig;
use Google\Cloud\Speech\V1\StreamingRecognitionConfig;
use Google\Cloud\Speech\V1\StreamingRecognizeRequest;
use Google\Cloud\Speech\V1\RecognitionConfig\AudioEncoding;

/**
 * Transcribe an audio file using Google Cloud Speech API
 * Example:
 * ```
 * $audoEncoding =  Google\Cloud\Speech\V1\RecognitionConfig\AudioEncoding::WAV
 * streaming_recognize('/path/to/audiofile.wav', 'en-US');
 * ```.
 *
 * @param string $audioFile path to an audio file.
 * @param string $languageCode The language of the content to
 *     be recognized. Accepts BCP-47 (e.g., `"en-US"`, `"es-ES"`).
 * @param string $encoding
 * @param string $sampleRateHertz
 *
 * @return string the text transcription
 */
function streaming_recognize($audioFile)
{
    // change these variables
    $encoding = AudioEncoding::LINEAR16;
    $sampleRateHertz = 32000;
    $languageCode = 'en-US';

    if (!extension_loaded('grpc')) {
        throw new \Exception('Install the grpc extension ' .
            '(pecl install grpc)');
    }

    $speechClient = new SpeechClient();
    try {
        $config = (new RecognitionConfig())
            ->setEncoding($encoding)
            ->setSampleRateHertz($sampleRateHertz)
            ->setLanguageCode($languageCode);

        $strmConfig = new StreamingRecognitionConfig();
        $strmConfig->setConfig($config);

        $strmReq = new StreamingRecognizeRequest();
        $strmReq->setStreamingConfig($strmConfig);

        $strm = $speechClient->streamingRecognize();
        $strm->write($strmReq);

        $strmReq = new StreamingRecognizeRequest();
        $content = file_get_contents($audioFile);
        $strmReq->setAudioContent($content);
        $strm->write($strmReq);

        foreach ($strm->closeWriteAndReadAll() as $response) {
            foreach ($response->getResults() as $result) {
                foreach ($result->getAlternatives() as $alt) {
                    printf("Transcription: %s\n", $alt->getTranscript());
                }
            }
        }
    } finally {
        $speechClient->close();
    }
}

Python

def transcribe_streaming(stream_file):
    """Streams transcription of the given audio file."""
    from google.cloud import speech
    from google.cloud.speech import enums
    from google.cloud.speech import types
    client = speech.SpeechClient()

    with io.open(stream_file, 'rb') as audio_file:
        content = audio_file.read()

    # In practice, stream should be a generator yielding chunks of audio data.
    stream = [content]
    requests = (types.StreamingRecognizeRequest(audio_content=chunk)
                for chunk in stream)

    config = types.RecognitionConfig(
        encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
        sample_rate_hertz=16000,
        language_code='en-US')
    streaming_config = types.StreamingRecognitionConfig(config=config)

    # streaming_recognize returns a generator.
    responses = client.streaming_recognize(streaming_config, requests)

    for response in responses:
        # Once the transcription has settled, the first result will contain the
        # is_final result. The other results will be for subsequent portions of
        # the audio.
        for result in response.results:
            print('Finished: {}'.format(result.is_final))
            print('Stability: {}'.format(result.stability))
            alternatives = result.alternatives
            # The alternatives are ordered from most likely to least.
            for alternative in alternatives:
                print('Confidence: {}'.format(alternative.confidence))
                print(u'Transcript: {}'.format(alternative.transcript))

Ruby

# audio_file_path = "Path to file on which to perform speech recognition"

require "google/cloud/speech"

speech = Google::Cloud::Speech.new

audio_content  = File.binread audio_file_path
bytes_total    = audio_content.size
bytes_sent     = 0
chunk_size     = 32000

streaming_config = {config: {encoding:                :LINEAR16,
                             sample_rate_hertz:       16000,
                             language_code:           "en-US",
                             enable_word_time_offsets: true     },
                    interim_results: true}

stream = speech.streaming_recognize streaming_config

# Simulated streaming from a microphone
# Stream bytes...
while bytes_sent < bytes_total do
  stream.send audio_content[bytes_sent, chunk_size]
  bytes_sent += chunk_size
  sleep 1
end

puts "Stopped passing"
stream.stop

# Wait until processing is complete...
stream.wait_until_complete!

results = stream.results

alternatives = results.first.alternatives
alternatives.each do |result|
  puts "Transcript: #{result.transcript}"
end

ローカルの音声ファイルは Speech-to-Text API にストリーミングできますが、同期または非同期の音声認識を実行してバッチモードの結果を取得することをおすすめします。

音声ストリームでのストリーミング音声認識の実行

Speech-to-Text v1 は正式にリリースされており、https://speech.googleapis.com/v1/speech エンドポイントから一般向けに提供されています。クライアント ライブラリはアルファ版としてリリースされており、下位互換性のない方法で変更される可能性があります。クライアント ライブラリの本番環境での使用は、現在推奨されていません。

これらのサンプルでは、gcloud が設定され、サービス アカウントが作成されて有効になっている必要があります。gcloud のセットアップ、およびサービス アカウントの作成と有効化について詳しくは、クイックスタートをご覧ください。

Speech-to-Text では、リアルタイムのストリーミング音声の認識も行うことができます。

マイクから受信した音声ストリームに対して、ストリーミング音声認識を実行する例を次に示します。

C#

static async Task<object> StreamingMicRecognizeAsync(int seconds)
{
    if (NAudio.Wave.WaveIn.DeviceCount < 1)
    {
        Console.WriteLine("No microphone!");
        return -1;
    }
    var speech = SpeechClient.Create();
    var streamingCall = speech.StreamingRecognize();
    // Write the initial request with the config.
    await streamingCall.WriteAsync(
        new StreamingRecognizeRequest()
        {
            StreamingConfig = new StreamingRecognitionConfig()
            {
                Config = new RecognitionConfig()
                {
                    Encoding =
                    RecognitionConfig.Types.AudioEncoding.Linear16,
                    SampleRateHertz = 16000,
                    LanguageCode = "en",
                },
                InterimResults = true,
            }
        });
    // Print responses as they arrive.
    Task printResponses = Task.Run(async () =>
    {
        while (await streamingCall.ResponseStream.MoveNext(
            default(CancellationToken)))
        {
            foreach (var result in streamingCall.ResponseStream
                .Current.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
        }
    });
    // Read from the microphone and stream to API.
    object writeLock = new object();
    bool writeMore = true;
    var waveIn = new NAudio.Wave.WaveInEvent();
    waveIn.DeviceNumber = 0;
    waveIn.WaveFormat = new NAudio.Wave.WaveFormat(16000, 1);
    waveIn.DataAvailable +=
        (object sender, NAudio.Wave.WaveInEventArgs args) =>
        {
            lock (writeLock)
            {
                if (!writeMore) return;
                streamingCall.WriteAsync(
                    new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                            .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
            }
        };
    waveIn.StartRecording();
    Console.WriteLine("Speak now.");
    await Task.Delay(TimeSpan.FromSeconds(seconds));
    // Stop recording and shut down.
    waveIn.StopRecording();
    lock (writeLock) writeMore = false;
    await streamingCall.WriteCompleteAsync();
    await printResponses;
    return 0;
}

Go

client, err := speech.NewClient(ctx)
if err != nil {
	log.Fatal(err)
}
stream, err := client.StreamingRecognize(ctx)
if err != nil {
	log.Fatal(err)
}
// Send the initial configuration message.
if err := stream.Send(&speechpb.StreamingRecognizeRequest{
	StreamingRequest: &speechpb.StreamingRecognizeRequest_StreamingConfig{
		StreamingConfig: &speechpb.StreamingRecognitionConfig{
			Config: &speechpb.RecognitionConfig{
				Encoding:        speechpb.RecognitionConfig_LINEAR16,
				SampleRateHertz: 16000,
				LanguageCode:    "en-US",
			},
		},
	},
}); err != nil {
	log.Fatal(err)
}

go func() {
	// Pipe stdin to the API.
	buf := make([]byte, 1024)
	for {
		n, err := os.Stdin.Read(buf)
		if n > 0 {
			if err := stream.Send(&speechpb.StreamingRecognizeRequest{
				StreamingRequest: &speechpb.StreamingRecognizeRequest_AudioContent{
					AudioContent: buf[:n],
				},
			}); err != nil {
				log.Printf("Could not send audio: %v", err)
			}
		}
		if err == io.EOF {
			// Nothing else to pipe, close the stream.
			if err := stream.CloseSend(); err != nil {
				log.Fatalf("Could not close stream: %v", err)
			}
			return
		}
		if err != nil {
			log.Printf("Could not read from stdin: %v", err)
			continue
		}
	}
}()

for {
	resp, err := stream.Recv()
	if err == io.EOF {
		break
	}
	if err != nil {
		log.Fatalf("Cannot stream results: %v", err)
	}
	if err := resp.Error; err != nil {
		// Workaround while the API doesn't give a more informative error.
		if err.Code == 3 || err.Code == 11 {
			log.Print("WARNING: Speech recognition request exceeded limit of 60 seconds.")
		}
		log.Fatalf("Could not recognize: %v", err)
	}
	for _, result := range resp.Results {
		fmt.Printf("Result: %+v\n", result)
	}
}

Java

/** Performs microphone streaming speech recognition with a duration of 1 minute. */
public static void streamingMicRecognize() throws Exception {

  ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
  try (SpeechClient client = SpeechClient.create()) {

    responseObserver =
        new ResponseObserver<StreamingRecognizeResponse>() {
          ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

          public void onStart(StreamController controller) {}

          public void onResponse(StreamingRecognizeResponse response) {
            responses.add(response);
          }

          public void onComplete() {
            for (StreamingRecognizeResponse response : responses) {
              StreamingRecognitionResult result = response.getResultsList().get(0);
              SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
              System.out.printf("Transcript : %s\n", alternative.getTranscript());
            }
          }

          public void onError(Throwable t) {
            System.out.println(t);
          }
        };

    ClientStream<StreamingRecognizeRequest> clientStream =
        client.streamingRecognizeCallable().splitCall(responseObserver);

    RecognitionConfig recognitionConfig =
        RecognitionConfig.newBuilder()
            .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
            .setLanguageCode("en-US")
            .setSampleRateHertz(16000)
            .build();
    StreamingRecognitionConfig streamingRecognitionConfig =
        StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();

    StreamingRecognizeRequest request =
        StreamingRecognizeRequest.newBuilder()
            .setStreamingConfig(streamingRecognitionConfig)
            .build(); // The first request in a streaming call has to be a config

    clientStream.send(request);
    // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
    // bigEndian: false
    AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
    DataLine.Info targetInfo =
        new Info(
            TargetDataLine.class,
            audioFormat); // Set the system information to read from the microphone audio stream

    if (!AudioSystem.isLineSupported(targetInfo)) {
      System.out.println("Microphone not supported");
      System.exit(0);
    }
    // Target data line captures the audio stream the microphone produces.
    TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
    targetDataLine.open(audioFormat);
    targetDataLine.start();
    System.out.println("Start speaking");
    long startTime = System.currentTimeMillis();
    // Audio Input Stream
    AudioInputStream audio = new AudioInputStream(targetDataLine);
    while (true) {
      long estimatedTime = System.currentTimeMillis() - startTime;
      byte[] data = new byte[6400];
      audio.read(data);
      if (estimatedTime > 60000) { // 60 seconds
        System.out.println("Stop speaking.");
        targetDataLine.stop();
        targetDataLine.close();
        break;
      }
      request =
          StreamingRecognizeRequest.newBuilder()
              .setAudioContent(ByteString.copyFrom(data))
              .build();
      clientStream.send(request);
    }
  } catch (Exception e) {
    System.out.println(e);
  }
  responseObserver.onComplete();
}

Node.js

このサンプルでは、SoX をインストールし、$PATH で使用可能になっている必要があります。

  • Mac OS の場合は brew install sox
  • ほとんどの Linux ディストリビューションの場合は sudo apt-get install sox libsox-fmt-all
  • Windows の場合はバイナリをダウンロード

Speech-to-Text クライアントのインストールと作成の詳細については、Speech-to-Text クライアント ライブラリをご覧ください。

const record = require('node-record-lpcm16');

// Imports the Google Cloud client library
const speech = require('@google-cloud/speech');

// Creates a client
const client = new speech.SpeechClient();

/**
 * TODO(developer): Uncomment the following lines before running the sample.
 */
// const encoding = 'Encoding of the audio file, e.g. LINEAR16';
// const sampleRateHertz = 16000;
// const languageCode = 'BCP-47 language code, e.g. en-US';

const request = {
  config: {
    encoding: encoding,
    sampleRateHertz: sampleRateHertz,
    languageCode: languageCode,
  },
  interimResults: false, // If you want interim results, set this to true
};

// Create a recognize stream
const recognizeStream = client
  .streamingRecognize(request)
  .on('error', console.error)
  .on('data', data =>
    process.stdout.write(
      data.results[0] && data.results[0].alternatives[0]
        ? `Transcription: ${data.results[0].alternatives[0].transcript}\n`
        : `\n\nReached transcription time limit, press Ctrl+C\n`
    )
  );

// Start recording and send the microphone input to the Speech API
record
  .start({
    sampleRateHertz: sampleRateHertz,
    threshold: 0,
    // Other options, see https://www.npmjs.com/package/node-record-lpcm16#options
    verbose: false,
    recordProgram: 'rec', // Try also "arecord" or "sox"
    silence: '10.0',
  })
  .on('error', console.error)
  .pipe(recognizeStream);

console.log('Listening, press Ctrl+C to stop.');

Python

from __future__ import division

import re
import sys

from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
import pyaudio
from six.moves import queue

# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10)  # 100ms

class MicrophoneStream(object):
    """Opens a recording stream as a generator yielding the audio chunks."""
    def __init__(self, rate, chunk):
        self._rate = rate
        self._chunk = chunk

        # Create a thread-safe buffer of audio data
        self._buff = queue.Queue()
        self.closed = True

    def __enter__(self):
        self._audio_interface = pyaudio.PyAudio()
        self._audio_stream = self._audio_interface.open(
            format=pyaudio.paInt16,
            # The API currently only supports 1-channel (mono) audio
            # https://goo.gl/z757pE
            channels=1, rate=self._rate,
            input=True, frames_per_buffer=self._chunk,
            # Run the audio stream asynchronously to fill the buffer object.
            # This is necessary so that the input device's buffer doesn't
            # overflow while the calling thread makes network requests, etc.
            stream_callback=self._fill_buffer,
        )

        self.closed = False

        return self

    def __exit__(self, type, value, traceback):
        self._audio_stream.stop_stream()
        self._audio_stream.close()
        self.closed = True
        # Signal the generator to terminate so that the client's
        # streaming_recognize method will not block the process termination.
        self._buff.put(None)
        self._audio_interface.terminate()

    def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
        """Continuously collect data from the audio stream, into the buffer."""
        self._buff.put(in_data)
        return None, pyaudio.paContinue

    def generator(self):
        while not self.closed:
            # Use a blocking get() to ensure there's at least one chunk of
            # data, and stop iteration if the chunk is None, indicating the
            # end of the audio stream.
            chunk = self._buff.get()
            if chunk is None:
                return
            data = [chunk]

            # Now consume whatever other data's still buffered.
            while True:
                try:
                    chunk = self._buff.get(block=False)
                    if chunk is None:
                        return
                    data.append(chunk)
                except queue.Empty:
                    break

            yield b''.join(data)

def listen_print_loop(responses):
    """Iterates through server responses and prints them.

    The responses passed is a generator that will block until a response
    is provided by the server.

    Each response may contain multiple results, and each result may contain
    multiple alternatives; for details, see https://goo.gl/tjCPAU.  Here we
    print only the transcription for the top alternative of the top result.

    In this case, responses are provided for interim results as well. If the
    response is an interim one, print a line feed at the end of it, to allow
    the next result to overwrite it, until the response is a final one. For the
    final one, print a newline to preserve the finalized transcription.
    """
    num_chars_printed = 0
    for response in responses:
        if not response.results:
            continue

        # The `results` list is consecutive. For streaming, we only care about
        # the first result being considered, since once it's `is_final`, it
        # moves on to considering the next utterance.
        result = response.results[0]
        if not result.alternatives:
            continue

        # Display the transcription of the top alternative.
        transcript = result.alternatives[0].transcript

        # Display interim results, but with a carriage return at the end of the
        # line, so subsequent lines will overwrite them.
        #
        # If the previous result was longer than this one, we need to print
        # some extra spaces to overwrite the previous result
        overwrite_chars = ' ' * (num_chars_printed - len(transcript))

        if not result.is_final:
            sys.stdout.write(transcript + overwrite_chars + '\r')
            sys.stdout.flush()

            num_chars_printed = len(transcript)

        else:
            print(transcript + overwrite_chars)

            # Exit recognition if any of the transcribed phrases could be
            # one of our keywords.
            if re.search(r'\b(exit|quit)\b', transcript, re.I):
                print('Exiting..')
                break

            num_chars_printed = 0

def main():
    # See http://g.co/cloud/speech/docs/languages
    # for a list of supported languages.
    language_code = 'en-US'  # a BCP-47 language tag

    client = speech.SpeechClient()
    config = types.RecognitionConfig(
        encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
        sample_rate_hertz=RATE,
        language_code=language_code)
    streaming_config = types.StreamingRecognitionConfig(
        config=config,
        interim_results=True)

    with MicrophoneStream(RATE, CHUNK) as stream:
        audio_generator = stream.generator()
        requests = (types.StreamingRecognizeRequest(audio_content=content)
                    for content in audio_generator)

        responses = client.streaming_recognize(streaming_config, requests)

        # Now, put the transcription responses to use.
        listen_print_loop(responses)

if __name__ == '__main__':
    main()

このページは役立ちましたか?評価をお願いいたします。

フィードバックを送信...

Cloud Speech-to-Text ドキュメント
ご不明な点がありましたら、Google のサポートページをご覧ください。