Audio aus Streamingdaten transkribieren

In diesem Abschnitt wird gezeigt, wie Sie Streamingaudio, das z. B. mit einem Mikrofon aufgenommen wurde, in Text transkribieren können.

Mit einer Streamingspracherkennung können Sie Audiodaten zu Speech-to-Text streamen. Sie erhalten dann bei der Verarbeitung dieser Audiodaten die Ergebnisse der Streamingspracherkennung in Echtzeit. Weitere Informationen zu Anfragen für die Streamingspracherkennung finden Sie unter Audiobeschränkungen. Die Streamingspracherkennung ist nur über gRPC verfügbar.

Streamingspracherkennung für eine lokale Datei ausführen

Version 1 von Speech-to-Text wurde veröffentlicht und ist über den Endpunkt https://speech.googleapis.com/v1/speech allgemein verfügbar. Die Clientbibliotheken wurden als Alphaversion veröffentlicht und werden vermutlich nicht abwärtskompatibel sein. Die Clientbibliotheken werden derzeit nicht für eine Verwendung in der Produktion empfohlen.

Für diese Beispiele ist es erforderlich, dass Sie gcloud eingerichtet sowie ein Dienstkonto erstellt und aktiviert haben. Informationen zum Einrichten von gcloud sowie zum Erstellen und Aktivieren eines Dienstkontos finden Sie in der Kurzanleitung.

Hier ist ein Beispiel für eine Streamingspracherkennung für eine lokale Datei:

C#

static async Task<object> StreamingRecognizeAsync(string filePath)
    {
        var speech = SpeechClient.Create();
        var streamingCall = speech.StreamingRecognize();
        // Write the initial request with the config.
        await streamingCall.WriteAsync(
            new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                        RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode = "en",
                    },
                    InterimResults = true,
                }
            });
        // Print responses as they arrive.
        Task printResponses = Task.Run(async () =>
        {
            var responseStream = streamingCall.GetResponseStream();
            while (await responseStream.MoveNextAsync())
            {
                StreamingRecognizeResponse response = responseStream.Current;
                foreach (StreamingRecognitionResult result in response.Results)
                {
                    foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                    {
                        Console.WriteLine(alternative.Transcript);
                    }
                }
            }
        });
        // Stream the file content to the API.  Write 2 32kb chunks per
        // second.
        using (FileStream fileStream = new FileStream(filePath, FileMode.Open))
        {
            var buffer = new byte[32 * 1024];
            int bytesRead;
            while ((bytesRead = await fileStream.ReadAsync(
                buffer, 0, buffer.Length)) > 0)
            {
                await streamingCall.WriteAsync(
                    new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                        .CopyFrom(buffer, 0, bytesRead),
                    });
                await Task.Delay(500);
            };
        }
        await streamingCall.WriteCompleteAsync();
        await printResponses;
        return 0;
    }

Go

import (
    	"context"
    	"flag"
    	"fmt"
    	"io"
    	"log"
    	"os"
    	"path/filepath"

    	speech "cloud.google.com/go/speech/apiv1"
    	speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1"
    )

    func main() {
    	flag.Usage = func() {
    		fmt.Fprintf(os.Stderr, "Usage: %s <AUDIOFILE>\n", filepath.Base(os.Args[0]))
    		fmt.Fprintf(os.Stderr, "<AUDIOFILE> must be a path to a local audio file. Audio file must be a 16-bit signed little-endian encoded with a sample rate of 16000.\n")

    	}
    	flag.Parse()
    	if len(flag.Args()) != 1 {
    		log.Fatal("Please pass path to your local audio file as a command line argument")
    	}
    	audioFile := flag.Arg(0)

    	ctx := context.Background()

    	client, err := speech.NewClient(ctx)
    	if err != nil {
    		log.Fatal(err)
    	}
    	stream, err := client.StreamingRecognize(ctx)
    	if err != nil {
    		log.Fatal(err)
    	}
    	// Send the initial configuration message.
    	if err := stream.Send(&speechpb.StreamingRecognizeRequest{
    		StreamingRequest: &speechpb.StreamingRecognizeRequest_StreamingConfig{
    			StreamingConfig: &speechpb.StreamingRecognitionConfig{
    				Config: &speechpb.RecognitionConfig{
    					Encoding:        speechpb.RecognitionConfig_LINEAR16,
    					SampleRateHertz: 16000,
    					LanguageCode:    "en-US",
    				},
    			},
    		},
    	}); err != nil {
    		log.Fatal(err)
    	}

    	f, err := os.Open(audioFile)
    	if err != nil {
    		log.Fatal(err)
    	}
    	defer f.Close()

    	go func() {
    		buf := make([]byte, 1024)
    		for {
    			n, err := f.Read(buf)
    			if n > 0 {
    				if err := stream.Send(&speechpb.StreamingRecognizeRequest{
    					StreamingRequest: &speechpb.StreamingRecognizeRequest_AudioContent{
    						AudioContent: buf[:n],
    					},
    				}); err != nil {
    					log.Printf("Could not send audio: %v", err)
    				}
    			}
    			if err == io.EOF {
    				// Nothing else to pipe, close the stream.
    				if err := stream.CloseSend(); err != nil {
    					log.Fatalf("Could not close stream: %v", err)
    				}
    				return
    			}
    			if err != nil {
    				log.Printf("Could not read from %s: %v", audioFile, err)
    				continue
    			}
    		}
    	}()

    	for {
    		resp, err := stream.Recv()
    		if err == io.EOF {
    			break
    		}
    		if err != nil {
    			log.Fatalf("Cannot stream results: %v", err)
    		}
    		if err := resp.Error; err != nil {
    			log.Fatalf("Could not recognize: %v", err)
    		}
    		for _, result := range resp.Results {
    			fmt.Printf("Result: %+v\n", result)
    		}
    	}
    }
    

Java

/**
     * Performs streaming speech recognition on raw PCM audio data.
     *
     * @param fileName the path to a PCM audio file to transcribe.
     */
    public static void streamingRecognizeFile(String fileName) throws Exception, IOException {
      Path path = Paths.get(fileName);
      byte[] data = Files.readAllBytes(path);

      // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
      try (SpeechClient speech = SpeechClient.create()) {

        // Configure request with local raw PCM audio
        RecognitionConfig recConfig =
            RecognitionConfig.newBuilder()
                .setEncoding(AudioEncoding.LINEAR16)
                .setLanguageCode("en-US")
                .setSampleRateHertz(16000)
                .setModel("default")
                .build();
        StreamingRecognitionConfig config =
            StreamingRecognitionConfig.newBuilder().setConfig(recConfig).build();

        class ResponseApiStreamingObserver<T> implements ApiStreamObserver<T> {
          private final SettableFuture<List<T>> future = SettableFuture.create();
          private final List<T> messages = new java.util.ArrayList<T>();

          @Override
          public void onNext(T message) {
            messages.add(message);
          }

          @Override
          public void onError(Throwable t) {
            future.setException(t);
          }

          @Override
          public void onCompleted() {
            future.set(messages);
          }

          // Returns the SettableFuture object to get received messages / exceptions.
          public SettableFuture<List<T>> future() {
            return future;
          }
        }

        ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver =
            new ResponseApiStreamingObserver<>();

        BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse> callable =
            speech.streamingRecognizeCallable();

        ApiStreamObserver<StreamingRecognizeRequest> requestObserver =
            callable.bidiStreamingCall(responseObserver);

        // The first request must **only** contain the audio configuration:
        requestObserver.onNext(
            StreamingRecognizeRequest.newBuilder().setStreamingConfig(config).build());

        // Subsequent requests must **only** contain the audio data.
        requestObserver.onNext(
            StreamingRecognizeRequest.newBuilder()
                .setAudioContent(ByteString.copyFrom(data))
                .build());

        // Mark transmission as completed after sending the data.
        requestObserver.onCompleted();

        List<StreamingRecognizeResponse> responses = responseObserver.future().get();

        for (StreamingRecognizeResponse response : responses) {
          // For streaming recognize, the results list has one is_final result (if available) followed
          // by a number of in-progress results (if iterim_results is true) for subsequent utterances.
          // Just print the first result here.
          StreamingRecognitionResult result = response.getResultsList().get(0);
          // There can be several alternative transcripts for a given chunk of speech. Just use the
          // first (most likely) one here.
          SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
          System.out.printf("Transcript : %s\n", alternative.getTranscript());
        }
      }
    }

Node.js

const fs = require('fs');

    // Imports the Google Cloud client library
    const speech = require('@google-cloud/speech');

    // Creates a client
    const client = new speech.SpeechClient();

    /**
     * TODO(developer): Uncomment the following lines before running the sample.
     */
    // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
    // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
    // const sampleRateHertz = 16000;
    // const languageCode = 'BCP-47 language code, e.g. en-US';

    const request = {
      config: {
        encoding: encoding,
        sampleRateHertz: sampleRateHertz,
        languageCode: languageCode,
      },
      interimResults: false, // If you want interim results, set this to true
    };

    // Stream the audio to the Google Cloud Speech API
    const recognizeStream = client
      .streamingRecognize(request)
      .on('error', console.error)
      .on('data', data => {
        console.log(
          `Transcription: ${data.results[0].alternatives[0].transcript}`
        );
      });

    // Stream an audio file from disk to the Speech API, e.g. "./resources/audio.raw"
    fs.createReadStream(filename).pipe(recognizeStream);

PHP

use Google\Cloud\Speech\V1\SpeechClient;
    use Google\Cloud\Speech\V1\RecognitionConfig;
    use Google\Cloud\Speech\V1\StreamingRecognitionConfig;
    use Google\Cloud\Speech\V1\StreamingRecognizeRequest;
    use Google\Cloud\Speech\V1\RecognitionConfig\AudioEncoding;

    /** Uncomment and populate these variables in your code */
    // $audioFile = 'path to an audio file';

    // change these variables if necessary
    $encoding = AudioEncoding::LINEAR16;
    $sampleRateHertz = 32000;
    $languageCode = 'en-US';

    // the gRPC extension is required for streaming
    if (!extension_loaded('grpc')) {
        throw new \Exception('Install the grpc extension (pecl install grpc)');
    }

    $speechClient = new SpeechClient();
    try {
        $config = (new RecognitionConfig())
            ->setEncoding($encoding)
            ->setSampleRateHertz($sampleRateHertz)
            ->setLanguageCode($languageCode);

        $strmConfig = new StreamingRecognitionConfig();
        $strmConfig->setConfig($config);

        $strmReq = new StreamingRecognizeRequest();
        $strmReq->setStreamingConfig($strmConfig);

        $strm = $speechClient->streamingRecognize();
        $strm->write($strmReq);

        $strmReq = new StreamingRecognizeRequest();
        $content = file_get_contents($audioFile);
        $strmReq->setAudioContent($content);
        $strm->write($strmReq);

        foreach ($strm->closeWriteAndReadAll() as $response) {
            foreach ($response->getResults() as $result) {
                foreach ($result->getAlternatives() as $alt) {
                    printf("Transcription: %s\n", $alt->getTranscript());
                }
            }
        }
    } finally {
        $speechClient->close();
    }

Python

def transcribe_streaming(stream_file):
        """Streams transcription of the given audio file."""
        import io
        from google.cloud import speech
        from google.cloud.speech import enums
        from google.cloud.speech import types
        client = speech.SpeechClient()

        with io.open(stream_file, 'rb') as audio_file:
            content = audio_file.read()

        # In practice, stream should be a generator yielding chunks of audio data.
        stream = [content]
        requests = (types.StreamingRecognizeRequest(audio_content=chunk)
                    for chunk in stream)

        config = types.RecognitionConfig(
            encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
            sample_rate_hertz=16000,
            language_code='en-US')
        streaming_config = types.StreamingRecognitionConfig(config=config)

        # streaming_recognize returns a generator.
        responses = client.streaming_recognize(streaming_config, requests)

        for response in responses:
            # Once the transcription has settled, the first result will contain the
            # is_final result. The other results will be for subsequent portions of
            # the audio.
            for result in response.results:
                print('Finished: {}'.format(result.is_final))
                print('Stability: {}'.format(result.stability))
                alternatives = result.alternatives
                # The alternatives are ordered from most likely to least.
                for alternative in alternatives:
                    print('Confidence: {}'.format(alternative.confidence))
                    print(u'Transcript: {}'.format(alternative.transcript))

Ruby

# audio_file_path = "Path to file on which to perform speech recognition"

    require "google/cloud/speech"

    speech = Google::Cloud::Speech.new

    audio_content  = File.binread audio_file_path
    bytes_total    = audio_content.size
    bytes_sent     = 0
    chunk_size     = 32_000

    streaming_config = { config:          { encoding:                 :LINEAR16,
                                            sample_rate_hertz:        16_000,
                                            language_code:            "en-US",
                                            enable_word_time_offsets: true },
                         interim_results: true }

    stream = speech.streaming_recognize streaming_config

    # Simulated streaming from a microphone
    # Stream bytes...
    while bytes_sent < bytes_total
      stream.send audio_content[bytes_sent, chunk_size]
      bytes_sent += chunk_size
      sleep 1
    end

    puts "Stopped passing"
    stream.stop

    # Wait until processing is complete...
    stream.wait_until_complete!

    results = stream.results

    alternatives = results.first.alternatives
    alternatives.each do |result|
      puts "Transcript: #{result.transcript}"
    end

Sie können zwar eine lokale Audiodatei an die Speech-to-Text API streamen, für Ergebnisse im Batchmodus wird allerdings die synchrone oder asynchrone Audioerkennung empfohlen.

Streamingspracherkennung für einen Audiostream ausführen

Version 1 von Speech-to-Text wurde veröffentlicht und ist über den Endpunkt https://speech.googleapis.com/v1/speech allgemein verfügbar. Die Clientbibliotheken wurden als Alphaversion veröffentlicht und werden vermutlich nicht abwärtskompatibel sein. Die Clientbibliotheken werden derzeit nicht für eine Verwendung in der Produktion empfohlen.

Für diese Beispiele ist es erforderlich, dass Sie gcloud eingerichtet sowie ein Dienstkonto erstellt und aktiviert haben. Informationen zum Einrichten von gcloud sowie zum Erstellen und Aktivieren eines Dienstkontos finden Sie in der Kurzanleitung.

Speech-to-Text kann die Erkennung auch beim Streaming von Audiodaten in Echtzeit durchführen.

Hier ist ein Beispiel für die Durchführung der Streamingspracherkennung für einen Audiostream, der von einem Mikrofon empfangen wird:

C#

static async Task<object> StreamingMicRecognizeAsync(int seconds)
    {
        var speech = SpeechClient.Create();
        var streamingCall = speech.StreamingRecognize();
        // Write the initial request with the config.
        await streamingCall.WriteAsync(
            new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                        RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode = "en",
                    },
                    InterimResults = true,
                }
            });
        // Print responses as they arrive.
        Task printResponses = Task.Run(async () =>
        {
            var responseStream = streamingCall.GetResponseStream();
            while (await responseStream.MoveNextAsync())
            {
                StreamingRecognizeResponse response = responseStream.Current;
                foreach (StreamingRecognitionResult result in response.Results)
                {
                    foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                    {
                        Console.WriteLine(alternative.Transcript);
                    }
                }
            }
        });
        // Read from the microphone and stream to API.
        object writeLock = new object();
        bool writeMore = true;
        var waveIn = new NAudio.Wave.WaveInEvent();
        waveIn.DeviceNumber = 0;
        waveIn.WaveFormat = new NAudio.Wave.WaveFormat(16000, 1);
        waveIn.DataAvailable +=
            (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }

                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                        {
                            AudioContent = Google.Protobuf.ByteString
                                .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                        }).Wait();
                }
            };
        waveIn.StartRecording();
        Console.WriteLine("Speak now.");
        await Task.Delay(TimeSpan.FromSeconds(seconds));
        // Stop recording and shut down.
        waveIn.StopRecording();
        lock (writeLock)
        {
            writeMore = false;
        }

        await streamingCall.WriteCompleteAsync();
        await printResponses;
        return 0;
    }

Go

import (
    	"context"
    	"fmt"
    	"io"
    	"log"
    	"os"

    	speech "cloud.google.com/go/speech/apiv1"
    	speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1"
    )

    func main() {
    	ctx := context.Background()

    	client, err := speech.NewClient(ctx)
    	if err != nil {
    		log.Fatal(err)
    	}
    	stream, err := client.StreamingRecognize(ctx)
    	if err != nil {
    		log.Fatal(err)
    	}
    	// Send the initial configuration message.
    	if err := stream.Send(&speechpb.StreamingRecognizeRequest{
    		StreamingRequest: &speechpb.StreamingRecognizeRequest_StreamingConfig{
    			StreamingConfig: &speechpb.StreamingRecognitionConfig{
    				Config: &speechpb.RecognitionConfig{
    					Encoding:        speechpb.RecognitionConfig_LINEAR16,
    					SampleRateHertz: 16000,
    					LanguageCode:    "en-US",
    				},
    			},
    		},
    	}); err != nil {
    		log.Fatal(err)
    	}

    	go func() {
    		// Pipe stdin to the API.
    		buf := make([]byte, 1024)
    		for {
    			n, err := os.Stdin.Read(buf)
    			if n > 0 {
    				if err := stream.Send(&speechpb.StreamingRecognizeRequest{
    					StreamingRequest: &speechpb.StreamingRecognizeRequest_AudioContent{
    						AudioContent: buf[:n],
    					},
    				}); err != nil {
    					log.Printf("Could not send audio: %v", err)
    				}
    			}
    			if err == io.EOF {
    				// Nothing else to pipe, close the stream.
    				if err := stream.CloseSend(); err != nil {
    					log.Fatalf("Could not close stream: %v", err)
    				}
    				return
    			}
    			if err != nil {
    				log.Printf("Could not read from stdin: %v", err)
    				continue
    			}
    		}
    	}()

    	for {
    		resp, err := stream.Recv()
    		if err == io.EOF {
    			break
    		}
    		if err != nil {
    			log.Fatalf("Cannot stream results: %v", err)
    		}
    		if err := resp.Error; err != nil {
    			// Workaround while the API doesn't give a more informative error.
    			if err.Code == 3 || err.Code == 11 {
    				log.Print("WARNING: Speech recognition request exceeded limit of 60 seconds.")
    			}
    			log.Fatalf("Could not recognize: %v", err)
    		}
    		for _, result := range resp.Results {
    			fmt.Printf("Result: %+v\n", result)
    		}
    	}
    }
    

Java

/** Performs microphone streaming speech recognition with a duration of 1 minute. */
    public static void streamingMicRecognize() throws Exception {

      ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
      try (SpeechClient client = SpeechClient.create()) {

        responseObserver =
            new ResponseObserver<StreamingRecognizeResponse>() {
              ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

              public void onStart(StreamController controller) {}

              public void onResponse(StreamingRecognizeResponse response) {
                responses.add(response);
              }

              public void onComplete() {
                for (StreamingRecognizeResponse response : responses) {
                  StreamingRecognitionResult result = response.getResultsList().get(0);
                  SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
                  System.out.printf("Transcript : %s\n", alternative.getTranscript());
                }
              }

              public void onError(Throwable t) {
                System.out.println(t);
              }
            };

        ClientStream<StreamingRecognizeRequest> clientStream =
            client.streamingRecognizeCallable().splitCall(responseObserver);

        RecognitionConfig recognitionConfig =
            RecognitionConfig.newBuilder()
                .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
                .setLanguageCode("en-US")
                .setSampleRateHertz(16000)
                .build();
        StreamingRecognitionConfig streamingRecognitionConfig =
            StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();

        StreamingRecognizeRequest request =
            StreamingRecognizeRequest.newBuilder()
                .setStreamingConfig(streamingRecognitionConfig)
                .build(); // The first request in a streaming call has to be a config

        clientStream.send(request);
        // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
        // bigEndian: false
        AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
        DataLine.Info targetInfo =
            new Info(
                TargetDataLine.class,
                audioFormat); // Set the system information to read from the microphone audio stream

        if (!AudioSystem.isLineSupported(targetInfo)) {
          System.out.println("Microphone not supported");
          System.exit(0);
        }
        // Target data line captures the audio stream the microphone produces.
        TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
        targetDataLine.open(audioFormat);
        targetDataLine.start();
        System.out.println("Start speaking");
        long startTime = System.currentTimeMillis();
        // Audio Input Stream
        AudioInputStream audio = new AudioInputStream(targetDataLine);
        while (true) {
          long estimatedTime = System.currentTimeMillis() - startTime;
          byte[] data = new byte[6400];
          audio.read(data);
          if (estimatedTime > 60000) { // 60 seconds
            System.out.println("Stop speaking.");
            targetDataLine.stop();
            targetDataLine.close();
            break;
          }
          request =
              StreamingRecognizeRequest.newBuilder()
                  .setAudioContent(ByteString.copyFrom(data))
                  .build();
          clientStream.send(request);
        }
      } catch (Exception e) {
        System.out.println(e);
      }
      responseObserver.onComplete();
    }

Node.js

Für dieses Beispiel ist die Installation von SoX erforderlich, das in $PATH verfügbar sein muss.

Weitere Informationen zum Installieren und Erstellen eines Speech-to-Text-Clients finden Sie unter Speech-to-Text-Clientbibliotheken.

const recorder = require('node-record-lpcm16');

    // Imports the Google Cloud client library
    const speech = require('@google-cloud/speech');

    // Creates a client
    const client = new speech.SpeechClient();

    /**
     * TODO(developer): Uncomment the following lines before running the sample.
     */
    // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
    // const sampleRateHertz = 16000;
    // const languageCode = 'BCP-47 language code, e.g. en-US';

    const request = {
      config: {
        encoding: encoding,
        sampleRateHertz: sampleRateHertz,
        languageCode: languageCode,
      },
      interimResults: false, // If you want interim results, set this to true
    };

    // Create a recognize stream
    const recognizeStream = client
      .streamingRecognize(request)
      .on('error', console.error)
      .on('data', data =>
        process.stdout.write(
          data.results[0] && data.results[0].alternatives[0]
            ? `Transcription: ${data.results[0].alternatives[0].transcript}\n`
            : '\n\nReached transcription time limit, press Ctrl+C\n'
        )
      );

    // Start recording and send the microphone input to the Speech API.
    // Ensure SoX is installed, see https://www.npmjs.com/package/node-record-lpcm16#dependencies
    recorder
      .record({
        sampleRateHertz: sampleRateHertz,
        threshold: 0,
        // Other options, see https://www.npmjs.com/package/node-record-lpcm16#options
        verbose: false,
        recordProgram: 'rec', // Try also "arecord" or "sox"
        silence: '10.0',
      })
      .stream()
      .on('error', console.error)
      .pipe(recognizeStream);

    console.log('Listening, press Ctrl+C to stop.');

Python

from __future__ import division

    import re
    import sys

    from google.cloud import speech
    from google.cloud.speech import enums
    from google.cloud.speech import types
    import pyaudio
    from six.moves import queue

    # Audio recording parameters
    RATE = 16000
    CHUNK = int(RATE / 10)  # 100ms

    class MicrophoneStream(object):
        """Opens a recording stream as a generator yielding the audio chunks."""
        def __init__(self, rate, chunk):
            self._rate = rate
            self._chunk = chunk

            # Create a thread-safe buffer of audio data
            self._buff = queue.Queue()
            self.closed = True

        def __enter__(self):
            self._audio_interface = pyaudio.PyAudio()
            self._audio_stream = self._audio_interface.open(
                format=pyaudio.paInt16,
                # The API currently only supports 1-channel (mono) audio
                # https://goo.gl/z757pE
                channels=1, rate=self._rate,
                input=True, frames_per_buffer=self._chunk,
                # Run the audio stream asynchronously to fill the buffer object.
                # This is necessary so that the input device's buffer doesn't
                # overflow while the calling thread makes network requests, etc.
                stream_callback=self._fill_buffer,
            )

            self.closed = False

            return self

        def __exit__(self, type, value, traceback):
            self._audio_stream.stop_stream()
            self._audio_stream.close()
            self.closed = True
            # Signal the generator to terminate so that the client's
            # streaming_recognize method will not block the process termination.
            self._buff.put(None)
            self._audio_interface.terminate()

        def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
            """Continuously collect data from the audio stream, into the buffer."""
            self._buff.put(in_data)
            return None, pyaudio.paContinue

        def generator(self):
            while not self.closed:
                # Use a blocking get() to ensure there's at least one chunk of
                # data, and stop iteration if the chunk is None, indicating the
                # end of the audio stream.
                chunk = self._buff.get()
                if chunk is None:
                    return
                data = [chunk]

                # Now consume whatever other data's still buffered.
                while True:
                    try:
                        chunk = self._buff.get(block=False)
                        if chunk is None:
                            return
                        data.append(chunk)
                    except queue.Empty:
                        break

                yield b''.join(data)

    def listen_print_loop(responses):
        """Iterates through server responses and prints them.

        The responses passed is a generator that will block until a response
        is provided by the server.

        Each response may contain multiple results, and each result may contain
        multiple alternatives; for details, see https://goo.gl/tjCPAU.  Here we
        print only the transcription for the top alternative of the top result.

        In this case, responses are provided for interim results as well. If the
        response is an interim one, print a line feed at the end of it, to allow
        the next result to overwrite it, until the response is a final one. For the
        final one, print a newline to preserve the finalized transcription.
        """
        num_chars_printed = 0
        for response in responses:
            if not response.results:
                continue

            # The `results` list is consecutive. For streaming, we only care about
            # the first result being considered, since once it's `is_final`, it
            # moves on to considering the next utterance.
            result = response.results[0]
            if not result.alternatives:
                continue

            # Display the transcription of the top alternative.
            transcript = result.alternatives[0].transcript

            # Display interim results, but with a carriage return at the end of the
            # line, so subsequent lines will overwrite them.
            #
            # If the previous result was longer than this one, we need to print
            # some extra spaces to overwrite the previous result
            overwrite_chars = ' ' * (num_chars_printed - len(transcript))

            if not result.is_final:
                sys.stdout.write(transcript + overwrite_chars + '\r')
                sys.stdout.flush()

                num_chars_printed = len(transcript)

            else:
                print(transcript + overwrite_chars)

                # Exit recognition if any of the transcribed phrases could be
                # one of our keywords.
                if re.search(r'\b(exit|quit)\b', transcript, re.I):
                    print('Exiting..')
                    break

                num_chars_printed = 0

    def main():
        # See http://g.co/cloud/speech/docs/languages
        # for a list of supported languages.
        language_code = 'en-US'  # a BCP-47 language tag

        client = speech.SpeechClient()
        config = types.RecognitionConfig(
            encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
            sample_rate_hertz=RATE,
            language_code=language_code)
        streaming_config = types.StreamingRecognitionConfig(
            config=config,
            interim_results=True)

        with MicrophoneStream(RATE, CHUNK) as stream:
            audio_generator = stream.generator()
            requests = (types.StreamingRecognizeRequest(audio_content=content)
                        for content in audio_generator)

            responses = client.streaming_recognize(streaming_config, requests)

            # Now, put the transcription responses to use.
            listen_print_loop(responses)

    if __name__ == '__main__':
        main()

Weitere Informationen