複数の言語を含むローカル音声ファイルを文字に変換します。
もっと見る
このコードサンプルを含む詳細なドキュメントについては、以下をご覧ください。
コードサンプル
Java
/**
* Transcribe a local audio file with multi-language recognition
*
* @param fileName the path to the audio file
*/
public static void transcribeMultiLanguage(String fileName) throws Exception {
Path path = Paths.get(fileName);
// Get the contents of the local audio file
byte[] content = Files.readAllBytes(path);
try (SpeechClient speechClient = SpeechClient.create()) {
RecognitionAudio recognitionAudio =
RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();
ArrayList<String> languageList = new ArrayList<>();
languageList.add("es-ES");
languageList.add("en-US");
// Configure request to enable multiple languages
RecognitionConfig config =
RecognitionConfig.newBuilder()
.setEncoding(AudioEncoding.LINEAR16)
.setSampleRateHertz(16000)
.setLanguageCode("ja-JP")
.addAllAlternativeLanguageCodes(languageList)
.build();
// Perform the transcription request
RecognizeResponse recognizeResponse = speechClient.recognize(config, recognitionAudio);
// Print out the results
for (SpeechRecognitionResult result : recognizeResponse.getResultsList()) {
// There can be several alternative transcripts for a given chunk of speech. Just use the
// first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternatives(0);
System.out.format("Transcript : %s\n\n", alternative.getTranscript());
}
}
}
Node.js
const fs = require('fs');
// Imports the Google Cloud client library
const speech = require('@google-cloud/speech').v1p1beta1;
// Creates a client
const client = new speech.SpeechClient();
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const fileName = 'Local path to audio file, e.g. /path/to/audio.raw';
const config = {
encoding: 'LINEAR16',
sampleRateHertz: 44100,
languageCode: 'en-US',
alternativeLanguageCodes: ['es-ES', 'en-US'],
};
const audio = {
content: fs.readFileSync(fileName).toString('base64'),
};
const request = {
config: config,
audio: audio,
};
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
Python
from google.cloud import speech_v1p1beta1 as speech
client = speech.SpeechClient()
speech_file = "resources/multi.wav"
first_lang = "en-US"
second_lang = "es"
with open(speech_file, "rb") as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=44100,
audio_channel_count=2,
language_code=first_lang,
alternative_language_codes=[second_lang],
)
print("Waiting for operation to complete...")
response = client.recognize(config=config, audio=audio)
for i, result in enumerate(response.results):
alternative = result.alternatives[0]
print("-" * 20)
print(u"First alternative of result {}: {}".format(i, alternative))
print(u"Transcript: {}".format(alternative.transcript))
次のステップ
他の Google Cloud プロダクトに関連するコードサンプルの検索およびフィルタ検索を行うには、Google Cloud のサンプルをご覧ください。