Transcribe a local multi-lingual file (beta)

Transcribe a local audio file that includes more than one language.

Documentation pages that include this code sample

To view the code sample used in context, see the following documentation:

Code sample


 * Please include the following imports to run this sample.
 * import;
 * import;
 * import;
 * import;
 * import;
 * import;
 * import;
 * import;
 * import java.nio.file.Files;
 * import java.nio.file.Path;
 * import java.nio.file.Paths;
 * import java.util.Arrays;
 * import java.util.List;

public static void sampleRecognize() {
  // TODO(developer): Replace these variables before running the sample.
  String localFilePath = "resources/brooklyn_bridge.flac";

 * Transcribe a short audio file with language detected from a list of possible languages
 * @param localFilePath Path to local audio file, e.g. /path/audio.wav
public static void sampleRecognize(String localFilePath) {
  try (SpeechClient speechClient = SpeechClient.create()) {

    // The language of the supplied audio. Even though additional languages are
    // provided by alternative_language_codes, a primary language is still required.
    String languageCode = "fr";

    // Specify up to 3 additional languages as possible alternative languages
    // of the supplied audio.
    String alternativeLanguageCodesElement = "es";
    String alternativeLanguageCodesElement2 = "en";
    List<String> alternativeLanguageCodes =
        Arrays.asList(alternativeLanguageCodesElement, alternativeLanguageCodesElement2);
    RecognitionConfig config =
    Path path = Paths.get(localFilePath);
    byte[] data = Files.readAllBytes(path);
    ByteString content = ByteString.copyFrom(data);
    RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
    RecognizeRequest request =
    RecognizeResponse response = speechClient.recognize(request);
    for (SpeechRecognitionResult result : response.getResultsList()) {
      // The languageCode which was detected as the most likely being spoken in the audio
      System.out.printf("Detected language: %s\n", result.getLanguageCode());
      // First alternative is the most probable result
      SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
      System.out.printf("Transcript: %s\n", alternative.getTranscript());
  } catch (Exception exception) {
    System.err.println("Failed to create the client due to: " + exception);


const fs = require('fs');

// Imports the Google Cloud client library
const speech = require('@google-cloud/speech').v1p1beta1;

// Creates a client
const client = new speech.SpeechClient();

 * TODO(developer): Uncomment the following lines before running the sample.
// const fileName = 'Local path to audio file, e.g. /path/to/audio.raw';

const config = {
  encoding: 'LINEAR16',
  sampleRateHertz: 44100,
  languageCode: 'en-US',
  alternativeLanguageCodes: ['es-ES', 'en-US'],

const audio = {
  content: fs.readFileSync(fileName).toString('base64'),

const request = {
  config: config,
  audio: audio,

const [response] = await client.recognize(request);
const transcription = response.results
  .map(result => result.alternatives[0].transcript)
console.log(`Transcription: ${transcription}`);


from import speech_v1p1beta1 as speech

client = speech.SpeechClient()

speech_file = "resources/multi.wav"
first_lang = "en-US"
second_lang = "es"

with open(speech_file, "rb") as audio_file:
    content =

audio = speech.RecognitionAudio(content=content)

config = speech.RecognitionConfig(

print("Waiting for operation to complete...")
response = client.recognize(config=config, audio=audio)

for i, result in enumerate(response.results):
    alternative = result.alternatives[0]
    print("-" * 20)
    print(u"First alternative of result {}: {}".format(i, alternative))
    print(u"Transcript: {}".format(alternative.transcript))

What's next

To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser