快速入门中提供了一些基本示例,展示了如何检测 intent。本指南提供了适用于高级场景的其他示例。
通过音频输入检测 intent
以下示例展示了如何使用音频输入来检测 intent。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.cloud.dialogflow.cx.v3.AudioEncoding;
import com.google.cloud.dialogflow.cx.v3.AudioInput;
import com.google.cloud.dialogflow.cx.v3.DetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3.DetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3.InputAudioConfig;
import com.google.cloud.dialogflow.cx.v3.QueryInput;
import com.google.cloud.dialogflow.cx.v3.QueryResult;
import com.google.cloud.dialogflow.cx.v3.SessionName;
import com.google.cloud.dialogflow.cx.v3.SessionsClient;
import com.google.cloud.dialogflow.cx.v3.SessionsSettings;
import com.google.protobuf.ByteString;
import java.io.FileInputStream;
import java.io.IOException;
public class DetectIntentAudioInput {
// DialogFlow API Detect Intent sample with Audio input.
public static void main(String[] args) throws IOException, ApiException {
/** TODO (developer): replace these values with your own values */
String projectId = "my-project-id";
String locationId = "global";
String agentId = "my-agent-id";
String audioFileName = "resources/book_a_room.wav";
int sampleRateHertz = 16000;
/*
* A session ID is a string of at most 36 bytes in size.
* Your system is responsible for generating unique session IDs.
* They can be random numbers, hashed end-user identifiers,
* or any other values that are convenient for you to generate.
*/
String sessionId = "my-UUID";
String languageCode = "en";
detectIntent(
projectId, locationId, agentId, audioFileName, sampleRateHertz, sessionId, languageCode);
}
public static void detectIntent(
String projectId,
String locationId,
String agentId,
String audioFileName,
int sampleRateHertz,
String sessionId,
String languageCode)
throws IOException, ApiException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
// Instantiates a client by setting the session name.
// Format:`projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session =
SessionName.ofProjectLocationAgentSessionName(projectId, locationId, agentId, sessionId);
// TODO : Uncomment if you want to print session path
// System.out.println("Session Path: " + session.toString());
InputAudioConfig inputAudioConfig =
InputAudioConfig.newBuilder()
.setAudioEncoding(AudioEncoding.AUDIO_ENCODING_LINEAR_16)
.setSampleRateHertz(sampleRateHertz)
.build();
try (FileInputStream audioStream = new FileInputStream(audioFileName)) {
// Subsequent requests must **only** contain the audio data.
// Following messages: audio chunks. We just read the file in fixed-size chunks. In reality
// you would split the user input by time.
byte[] buffer = new byte[4096];
int bytes = audioStream.read(buffer);
AudioInput audioInput =
AudioInput.newBuilder()
.setAudio(ByteString.copyFrom(buffer, 0, bytes))
.setConfig(inputAudioConfig)
.build();
QueryInput queryInput =
QueryInput.newBuilder()
.setAudio(audioInput)
.setLanguageCode("en-US") // languageCode = "en-US"
.build();
DetectIntentRequest request =
DetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.build();
// Performs the detect intent request.
DetectIntentResponse response = sessionsClient.detectIntent(request);
// Display the query result.
QueryResult queryResult = response.getQueryResult();
System.out.println("====================");
System.out.format(
"Detected Intent: %s (confidence: %f)\n",
queryResult.getTranscript(), queryResult.getIntentDetectionConfidence());
}
}
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'my-project';
// const location = 'global';
// const agentId = 'my-agent';
// const audioFileName = '/path/to/audio.raw';
// const encoding = 'AUDIO_ENCODING_LINEAR_16';
// const sampleRateHertz = 16000;
// const languageCode = 'en'
// Imports the Google Cloud Some API library
const {SessionsClient} = require('@google-cloud/dialogflow-cx');
/**
* Example for regional endpoint:
* const location = 'us-central1'
* const client = new SessionsClient({apiEndpoint: 'us-central1-dialogflow.googleapis.com'})
*/
const client = new SessionsClient();
const fs = require('fs');
const util = require('util');
async function detectIntentAudio() {
const sessionId = Math.random().toString(36).substring(7);
const sessionPath = client.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
console.info(sessionPath);
// Read the content of the audio file and send it as part of the request.
const readFile = util.promisify(fs.readFile);
const inputAudio = await readFile(audioFileName);
const request = {
session: sessionPath,
queryInput: {
audio: {
config: {
audioEncoding: encoding,
sampleRateHertz: sampleRateHertz,
},
audio: inputAudio,
},
languageCode,
},
};
const [response] = await client.detectIntent(request);
console.log(`User Query: ${response.queryResult.transcript}`);
for (const message of response.queryResult.responseMessages) {
if (message.text) {
console.log(`Agent Response: ${message.text.text}`);
}
}
if (response.queryResult.match.intent) {
console.log(
`Matched Intent: ${response.queryResult.match.intent.displayName}`
);
}
console.log(
`Current Page: ${response.queryResult.currentPage.displayName}`
);
}
detectIntentAudio();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
def run_sample():
# TODO(developer): Replace these values when running the function
project_id = "YOUR-PROJECT-ID"
# For more information about regionalization see https://cloud.google.com/dialogflow/cx/docs/how/region
location_id = "YOUR-LOCATION-ID"
# For more info on agents see https://cloud.google.com/dialogflow/cx/docs/concept/agent
agent_id = "YOUR-AGENT-ID"
agent = f"projects/{project_id}/locations/{location_id}/agents/{agent_id}"
# For more information on sessions see https://cloud.google.com/dialogflow/cx/docs/concept/session
session_id = str(uuid.uuid4())
audio_file_path = "YOUR-AUDIO-FILE-PATH"
# For more supported languages see https://cloud.google.com/dialogflow/es/docs/reference/language
language_code = "en-us"
detect_intent_audio(agent, session_id, audio_file_path, language_code)
def detect_intent_audio(agent, session_id, audio_file_path, language_code):
"""Returns the result of detect intent with an audio file as input.
Using the same `session_id` between requests allows continuation
of the conversation."""
session_path = f"{agent}/sessions/{session_id}"
print(f"Session path: {session_path}\n")
client_options = None
agent_components = AgentsClient.parse_agent_path(agent)
location_id = agent_components["location"]
if location_id != "global":
api_endpoint = f"{location_id}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
input_audio_config = audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16,
sample_rate_hertz=24000,
)
with open(audio_file_path, "rb") as audio_file:
input_audio = audio_file.read()
audio_input = session.AudioInput(config=input_audio_config, audio=input_audio)
query_input = session.QueryInput(audio=audio_input, language_code=language_code)
request = session.DetectIntentRequest(session=session_path, query_input=query_input)
response = session_client.detect_intent(request=request)
print("=" * 20)
print(f"Query text: {response.query_result.transcript}")
response_messages = [
" ".join(msg.text.text) for msg in response.query_result.response_messages
]
print(f"Response text: {' '.join(response_messages)}\n")
通过事件调用检测 intent
以下示例展示了如何通过事件调用检测 intent。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.cloud.dialogflow.cx.v3.DetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3.DetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3.EventInput;
import com.google.cloud.dialogflow.cx.v3.QueryInput;
import com.google.cloud.dialogflow.cx.v3.QueryResult;
import com.google.cloud.dialogflow.cx.v3.SessionName;
import com.google.cloud.dialogflow.cx.v3.SessionsClient;
import com.google.cloud.dialogflow.cx.v3.SessionsSettings;
import java.io.IOException;
public class DetectIntentEventInput {
// DialogFlow API Detect Intent sample with Event input.
public static void main(String[] args) throws IOException, ApiException {
String projectId = "my-project-id";
String locationId = "global";
String agentId = "my-agent-id";
String sessionId = "my-UUID";
String event = "my-event-id";
String languageCode = "en";
detectIntent(projectId, locationId, agentId, sessionId, event, languageCode);
}
public static void detectIntent(
String projectId,
String locationId,
String agentId,
String sessionId,
String event,
String languageCode)
throws IOException, ApiException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
// Instantiates a client by setting the session name.
// Format:`projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session =
SessionName.ofProjectLocationAgentSessionName(projectId, locationId, agentId, sessionId);
// TODO : Uncomment if you want to print session path
// System.out.println("Session Path: " + session.toString());
EventInput.Builder eventInput = EventInput.newBuilder().setEvent(event);
// Build the query with the EventInput and language code (en-US).
QueryInput queryInput =
QueryInput.newBuilder().setEvent(eventInput).setLanguageCode(languageCode).build();
// Build the DetectIntentRequest with the SessionName and QueryInput.
DetectIntentRequest request =
DetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.build();
// Performs the detect intent request.
DetectIntentResponse response = sessionsClient.detectIntent(request);
// Display the query result.
QueryResult queryResult = response.getQueryResult();
// TODO : Uncomment if you want to print queryResult
System.out.println("====================");
System.out.format("Triggering Event: %s \n", queryResult.getTriggerEvent());
}
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* Required. The name of the session this query is sent to.
* Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
* ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
* ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
* If `Environment ID` is not specified, we assume default 'draft'
* environment.
* It's up to the API caller to choose an appropriate `Session ID`. It can be
* a random number or some type of session identifiers (preferably hashed).
* The length of the `Session ID` must not exceed 36 characters.
* For more information, see the sessions
* guide (https://cloud.google.com/dialogflow/cx/docs/concept/session).
* Note: Always use agent versions for production traffic.
* See Versions and
* environments (https://cloud.google.com/dialogflow/cx/docs/concept/version).
*/
/**
* Optional. The parameters of this query.
*/
// const queryParams = {}
/**
* Required. The input specification. See https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3beta1/ConversationTurn#QueryInput for information about query inputs.
*/
// const event = 'name-of-event-to-trigger';
// Imports the Cx library
const {SessionsClient} = require('@google-cloud/dialogflow-cx');
/**
* Example for regional endpoint:
* const location = 'us-central1'
* const client = new SessionsClient({apiEndpoint: 'us-central1-dialogflow.googleapis.com'})
*/
// Instantiates a client
const cxClient = new SessionsClient();
async function detectIntentWithEventInput() {
const sessionId = Math.random().toString(36).substring(7);
const sessionPath = cxClient.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
// Construct detect intent request
const request = {
session: sessionPath,
queryInput: {
event: {
event: event,
},
languageCode,
},
};
// Send request and receive response
const [response] = await cxClient.detectIntent(request);
console.log(`Event Name: ${event}`);
// Response message from the triggered event
console.log('Agent Response: \n');
console.log(response.queryResult.responseMessages[0].text.text[0]);
}
detectIntentWithEventInput();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import uuid
from google.cloud.dialogflowcx_v3.services.sessions import SessionsClient
from google.cloud.dialogflowcx_v3.types import session
def run_sample():
# TODO(developer): Update these values when running the function
# project_id = "YOUR-PROJECT-ID"
# location = "YOUR-LOCATION-ID"
# agent_id = "YOUR-AGENT-ID"
# event = "YOUR-EVENT"
# language_code = "YOUR-LANGUAGE-CODE"
project_id = "dialogflow-cx-demo-1-348717"
location = "global"
agent_id = "8caa6b47-5dd7-4380-b86e-ea4301d565b0"
event = "sys.no-match-default"
language_code = "en-us"
detect_intent_with_event_input(
project_id,
location,
agent_id,
event,
language_code,
)
def detect_intent_with_event_input(
project_id,
location,
agent_id,
event,
language_code,
):
"""Detects intent using EventInput"""
client_options = None
if location != "global":
api_endpoint = f"{location}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
session_id = str(uuid.uuid4())
session_path = session_client.session_path(
project=project_id,
location=location,
agent=agent_id,
session=session_id,
)
# Construct detect intent request:
event = session.EventInput(event=event)
query_input = session.QueryInput(event=event, language_code=language_code)
request = session.DetectIntentRequest(
session=session_path,
query_input=query_input,
)
response = session_client.detect_intent(request=request)
response_text = response.query_result.response_messages[0].text.text[0]
print(f"Response: {response_text}")
return response_text
使用调用方触发的意图匹配检测 intent
以下示例展示了如何使用调用方触发的意图匹配来检测 intent。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.cloud.dialogflow.cx.v3.DetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3.DetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3.IntentInput;
import com.google.cloud.dialogflow.cx.v3.QueryInput;
import com.google.cloud.dialogflow.cx.v3.QueryResult;
import com.google.cloud.dialogflow.cx.v3.SessionName;
import com.google.cloud.dialogflow.cx.v3.SessionsClient;
import com.google.cloud.dialogflow.cx.v3.SessionsSettings;
import java.io.IOException;
public class DetectIntentIntentInput {
// DialogFlow API Detect Intent sample with Intent input.
public static void main(String[] args) throws IOException, ApiException {
String projectId = "my-project-id";
String locationId = "global";
String agentId = "my-agent-id";
String sessionId = "my-UUID";
String intent = "my-intent-id";
String languageCode = "en";
detectIntent(projectId, locationId, agentId, sessionId, intent, languageCode);
}
public static void detectIntent(
String projectId,
String locationId,
String agentId,
String sessionId,
String intent,
String languageCode)
throws IOException, ApiException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
// Instantiates a client by setting the session name.
// Format:`projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session =
SessionName.ofProjectLocationAgentSessionName(projectId, locationId, agentId, sessionId);
// TODO : Uncomment if you want to print session path
// System.out.println("Session Path: " + session.toString());
IntentInput.Builder intentInput = IntentInput.newBuilder().setIntent(intent);
// Build the query with the IntentInput and language code (en-US).
QueryInput queryInput =
QueryInput.newBuilder().setIntent(intentInput).setLanguageCode(languageCode).build();
// Build the DetectIntentRequest with the SessionName and QueryInput.
DetectIntentRequest request =
DetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.build();
// Performs the detect intent request.
DetectIntentResponse response = sessionsClient.detectIntent(request);
// Display the query result.
QueryResult queryResult = response.getQueryResult();
// TODO : Uncomment if you want to print queryResult
System.out.println("====================");
System.out.format(
"Detected Intent: %s (confidence: %f)\n",
queryResult.getIntent().getDisplayName(), queryResult.getIntentDetectionConfidence());
}
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* const projectId = 'your-project-id';
* const location = 'location';
* const agentId = 'your-agent-id';
* const languageCode = 'your-language-code';
*/
/**
* The input specification. See https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3beta1/ConversationTurn#QueryInput for information about query inputs.
*/
// const intentId = 'unique-identifier-of-the-intent-to-trigger';
// Imports the Cx library
const {
SessionsClient,
IntentsClient,
} = require('@google-cloud/dialogflow-cx');
/**
* Example for regional endpoint:
* const location = 'us-central1'
* const client = new SessionsClient({apiEndpoint: 'us-central1-dialogflow.googleapis.com'})
*/
// Instantiates a Sessions client
const sessionsClient = new SessionsClient();
// Instantiates an Intents client
const intentsClient = new IntentsClient();
async function detectIntentWithIntentInput() {
const sessionId = Math.random().toString(36).substring(7);
// Creates session path
const sessionPath = sessionsClient.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
// Creates intent path. Format: projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/intents/<Intent ID>
const intentPath = intentsClient.intentPath(
projectId,
location,
agentId,
intentId
);
// Construct detectIntent request
const request = {
session: sessionPath,
queryInput: {
intent: {
intent: intentPath,
},
languageCode,
},
};
// Send request and receive response
const [response] = await sessionsClient.detectIntent(request);
// Display the name of the detected intent
console.log('Intent Name: \n');
console.log(response.queryResult.intent.displayName);
// Agent responds with fulfillment message of the detected intent
console.log('Agent Response: \n');
console.log(response.queryResult.responseMessages[0].text.text[0]);
}
detectIntentWithIntentInput();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import uuid
from google.cloud.dialogflowcx_v3.services.intents import IntentsClient
from google.cloud.dialogflowcx_v3.services.sessions import SessionsClient
from google.cloud.dialogflowcx_v3.types import session
def run_sample():
# TODO(developer): Update these values when running the function
project_id = "YOUR-PROJECT-ID"
location = "YOUR-LOCATION-ID"
agent_id = "YOUR-AGENT-ID"
intent_id = "YOUR-INTENT-ID"
language_code = "en-us"
detect_intent_with_intent_input(
project_id,
location,
agent_id,
intent_id,
language_code,
)
def detect_intent_with_intent_input(
project_id,
location,
agent_id,
intent_id,
language_code,
):
"""Returns the result of detect intent with sentiment analysis"""
client_options = None
if location != "global":
api_endpoint = f"{location}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
session_id = str(uuid.uuid4())
intents_client = IntentsClient()
session_path = session_client.session_path(
project=project_id,
location=location,
agent=agent_id,
session=session_id,
)
intent_path = intents_client.intent_path(
project=project_id,
location=location,
agent=agent_id,
intent=intent_id,
)
intent = session.IntentInput(intent=intent_path)
query_input = session.QueryInput(intent=intent, language_code=language_code)
request = session.DetectIntentRequest(
session=session_path,
query_input=query_input,
)
response = session_client.detect_intent(request=request)
response_text = []
for response_message in response.query_result.response_messages:
response_text.append(response_message.text.text)
print(response_message.text.text)
return response_text
启用情感分析功能后检测意图
以下示例展示了如何在启用情感分析的情况下检测意图。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.cloud.dialogflow.cx.v3.DetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3.DetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3.QueryInput;
import com.google.cloud.dialogflow.cx.v3.QueryParameters;
import com.google.cloud.dialogflow.cx.v3.QueryResult;
import com.google.cloud.dialogflow.cx.v3.SessionName;
import com.google.cloud.dialogflow.cx.v3.SessionsClient;
import com.google.cloud.dialogflow.cx.v3.SessionsSettings;
import com.google.cloud.dialogflow.cx.v3.TextInput;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class DetectIntentSentimentAnalysis {
public static void main(String[] args) throws IOException, ApiException {
String projectId = "my-project-id";
String locationId = "global";
String agentId = "my-agent-id";
String sessionId = "my-UUID";
List<String> texts = new ArrayList<>(List.of("my-list", "of-texts"));
String languageCode = "en";
detectIntent(projectId, locationId, agentId, sessionId, texts, languageCode);
}
// DialogFlow API Detect Intent sample with sentiment analysis.
public static Map<String, QueryResult> detectIntent(
String projectId,
String locationId,
String agentId,
String sessionId,
List<String> texts,
String languageCode)
throws IOException, ApiException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
Map<String, QueryResult> queryResults = Maps.newHashMap();
// Instantiates a client by setting the session name.
// Format:`projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session =
SessionName.ofProjectLocationAgentSessionName(projectId, locationId, agentId, sessionId);
// TODO : Uncomment if you want to print session path
// System.out.println("Session Path: " + session.toString());
// Detect intents for each text input.
for (String text : texts) {
// Set the text (hello) for the query.
TextInput.Builder textInput = TextInput.newBuilder().setText(text);
// Build the query with the TextInput and language code (en-US).
QueryInput queryInput =
QueryInput.newBuilder().setText(textInput).setLanguageCode(languageCode).build();
// Build the query parameters to analyze the sentiment of the query.
QueryParameters queryParameters =
QueryParameters.newBuilder().setAnalyzeQueryTextSentiment(true).build();
// Build the DetectIntentRequest with the SessionName, QueryInput, and QueryParameters.
DetectIntentRequest request =
DetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.setQueryParams(queryParameters)
.build();
// Performs the detect intent request.
DetectIntentResponse response = sessionsClient.detectIntent(request);
// Display the query result.
QueryResult queryResult = response.getQueryResult();
// TODO : Uncomment if you want to print queryResult
// System.out.println("====================");
// SentimentAnalysisResult sentimentAnalysisResult =
// queryResult.getSentimentAnalysisResult();
// Float score = sentimentAnalysisResult.getScore();
queryResults.put(text, queryResult);
}
}
return queryResults;
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* Required. The name of the session this query is sent to.
* Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
* ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
* ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
* If `Environment ID` is not specified, we assume default 'draft'
* environment.
* It's up to the API caller to choose an appropriate `Session ID`. It can be
* a random number or some type of session identifiers (preferably hashed).
* The length of the `Session ID` must not exceed 36 characters.
* For more information, see the sessions
* guide (https://cloud.google.com/dialogflow/cx/docs/concept/session).
* Note: Always use agent versions for production traffic.
* See Versions and
* environments (https://cloud.google.com/dialogflow/cx/docs/concept/version).
*/
/**
* Optional. The parameters of this query.
*/
// const queryParams = {}
/**
* Required. The input specification. See https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3beta1/ConversationTurn#QueryInput for information about query inputs.
*/
// const text = 'text-of-your-query';
// Imports the Cx library
const {SessionsClient} = require('@google-cloud/dialogflow-cx');
/**
* Example for regional endpoint:
* const location = 'us-central1'
* const client = new SessionsClient({apiEndpoint: 'us-central1-dialogflow.googleapis.com'})
*/
// Instantiates a client
const cxClient = new SessionsClient();
// Configures whether sentiment analysis should be performed. If not provided, sentiment analysis is not performed.
const analyzeQueryTextSentiment = true;
async function detectIntentWithSentimentAnalysis() {
const sessionId = Math.random().toString(36).substring(7);
const sessionPath = cxClient.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
// Construct detect intent request
const request = {
session: sessionPath,
queryInput: {
text: {
text: query,
},
languageCode,
},
queryParams: {
analyzeQueryTextSentiment: analyzeQueryTextSentiment,
},
};
// Run request
const [response] = await cxClient.detectIntent(request);
console.log(`User Query: ${query}`);
// Shows result of sentiment analysis (sentimentAnalysisResult)
const sentimentAnalysis = response.queryResult.sentimentAnalysisResult;
// Determines sentiment score of user query
let sentiment;
if (sentimentAnalysis.score < 0) {
sentiment = 'negative';
} else if (sentimentAnalysis.score > 0) {
sentiment = 'positive';
} else {
sentiment = 'neutral';
}
console.log(
`User input sentiment has a score of ${sentimentAnalysis.score}, which indicates ${sentiment} sentiment.`
);
}
detectIntentWithSentimentAnalysis();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import uuid
from google.cloud.dialogflowcx_v3beta1.services.sessions import SessionsClient
from google.cloud.dialogflowcx_v3beta1.types import session
def run_sample():
# TODO(developer): Update these values when running the function
project_id = "YOUR-PROJECT-ID"
location = "YOUR-LOCATION-ID"
agent_id = "YOUR-AGENT-ID"
text = "Perfect!"
language_code = "en-us"
detect_intent_with_sentiment_analysis(
project_id,
location,
agent_id,
text,
language_code,
)
def detect_intent_with_sentiment_analysis(
project_id,
location,
agent_id,
text,
language_code,
):
"""Returns the result of detect intent with sentiment analysis"""
client_options = None
if location != "global":
api_endpoint = f"{location}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
session_id = str(uuid.uuid4())
session_path = session_client.session_path(
project=project_id,
location=location,
agent=agent_id,
session=session_id,
)
text_input = session.TextInput(text=text)
query_input = session.QueryInput(text=text_input, language_code=language_code)
query_params = session.QueryParameters(
analyze_query_text_sentiment=True,
)
request = session.DetectIntentRequest(
session=session_path,
query_input=query_input,
query_params=query_params,
)
response = session_client.detect_intent(request=request)
score = response.query_result.sentiment_analysis_result.score
print("Sentiment Score: {score}")
return score
通过文本转语音响应检测 intent
以下示例展示了如何使用文本转语音响应检测 intent。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.cloud.dialogflow.cx.v3.AudioEncoding;
import com.google.cloud.dialogflow.cx.v3.AudioInput;
import com.google.cloud.dialogflow.cx.v3.DetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3.DetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3.InputAudioConfig;
import com.google.cloud.dialogflow.cx.v3.OutputAudioConfig;
import com.google.cloud.dialogflow.cx.v3.OutputAudioEncoding;
import com.google.cloud.dialogflow.cx.v3.QueryInput;
import com.google.cloud.dialogflow.cx.v3.SessionName;
import com.google.cloud.dialogflow.cx.v3.SessionsClient;
import com.google.cloud.dialogflow.cx.v3.SessionsSettings;
import com.google.cloud.dialogflow.cx.v3.SynthesizeSpeechConfig;
import com.google.protobuf.ByteString;
import java.io.FileInputStream;
import java.io.IOException;
public class DetectIntentSynthesizeTextToSpeechOutput {
// DialogFlow API Detect Intent sample with synthesize TTS output.
public static void main(String[] args) throws IOException, ApiException {
String projectId = "my-project-id";
String locationId = "my-location-id";
String agentId = "my-agent-id";
String audioFileName = "my-audio-file-name";
int sampleRateHertz = 16000;
String sessionId = "my-session-id";
String languageCode = "my-language-code";
detectIntent(
projectId, locationId, agentId, audioFileName, sampleRateHertz, sessionId, languageCode);
}
public static void detectIntent(
String projectId,
String locationId,
String agentId,
String audioFileName,
int sampleRateHertz,
String sessionId,
String languageCode)
throws IOException, ApiException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
// Instantiates a client by setting the session name.
// Format:`projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session =
SessionName.ofProjectLocationAgentSessionName(projectId, locationId, agentId, sessionId);
// TODO : Uncomment if you want to print session path
// System.out.println("Session Path: " + session.toString());
InputAudioConfig inputAudioConfig =
InputAudioConfig.newBuilder()
.setAudioEncoding(AudioEncoding.AUDIO_ENCODING_LINEAR_16)
.setSampleRateHertz(sampleRateHertz)
.build();
try (FileInputStream audioStream = new FileInputStream(audioFileName)) {
// Subsequent requests must **only** contain the audio data.
// Following messages: audio chunks. We just read the file in fixed-size chunks. In reality
// you would split the user input by time.
byte[] buffer = new byte[4096];
int bytes = audioStream.read(buffer);
AudioInput audioInput =
AudioInput.newBuilder()
.setAudio(ByteString.copyFrom(buffer, 0, bytes))
.setConfig(inputAudioConfig)
.build();
QueryInput queryInput =
QueryInput.newBuilder()
.setAudio(audioInput)
.setLanguageCode("en-US") // languageCode = "en-US"
.build();
SynthesizeSpeechConfig speechConfig =
SynthesizeSpeechConfig.newBuilder().setSpeakingRate(1.25).setPitch(10.0).build();
OutputAudioConfig outputAudioConfig =
OutputAudioConfig.newBuilder()
.setAudioEncoding(OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_LINEAR_16)
.setSynthesizeSpeechConfig(speechConfig)
.build();
DetectIntentRequest request =
DetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.setOutputAudioConfig(outputAudioConfig)
.build();
// Performs the detect intent request.
DetectIntentResponse response = sessionsClient.detectIntent(request);
// Display the output audio config retrieved from the response.
OutputAudioConfig audioConfigFromResponse = response.getOutputAudioConfig();
System.out.println("====================");
System.out.format("Output Audio Config: %s \n", audioConfigFromResponse.toString());
}
}
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
// Imports the Cx library
const {SessionsClient} = require('@google-cloud/dialogflow-cx');
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const projectId = 'ID of GCP project associated with your Dialogflow agent';
// const sessionId = `user specific ID of session, e.g. 12345`;
// const query = `phrase(s) to pass to detect, e.g. I'd like to reserve a room for six people`;
// const languageCode = 'BCP-47 language code, e.g. en-US';
// const outputFile = `path for audio output file, e.g. ./resources/myOutput.wav`;
// Instantiates a Sessions client
const sessionsClient = new SessionsClient();
// Define session path
const sessionPath = sessionsClient.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
const fs = require('fs');
const util = require('util');
async function detectIntentSynthesizeTTSResponse() {
// Configuration of how speech should be synthesized. See https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/OutputAudioConfig#SynthesizeSpeechConfig
const synthesizeSpeechConfig = {
speakingRate: 1.25,
pitch: 10.0,
};
// Constructs the audio query request
const request = {
session: sessionPath,
queryInput: {
text: {
text: query,
},
languageCode: languageCode,
},
outputAudioConfig: {
audioEncoding: 'OUTPUT_AUDIO_ENCODING_LINEAR_16',
synthesizeSpeechConfig: synthesizeSpeechConfig,
},
};
// Sends the detectIntent request
const [response] = await sessionsClient.detectIntent(request);
// Output audio configurations
console.log(
`Speaking Rate: ${response.outputAudioConfig.synthesizeSpeechConfig.speakingRate}`
);
console.log(
`Pitch: ${response.outputAudioConfig.synthesizeSpeechConfig.pitch}`
);
const audioFile = response.outputAudio;
// Writes audio content to output file
util.promisify(fs.writeFile)(outputFile, audioFile, 'binary');
console.log(`Audio content written to file: ${outputFile}`);
}
detectIntentSynthesizeTTSResponse();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import uuid
from google.cloud.dialogflowcx_v3.services.sessions import SessionsClient
from google.cloud.dialogflowcx_v3.types import audio_config
from google.cloud.dialogflowcx_v3.types import session
def run_sample():
# TODO(developer): Update these values when running the function
project_id = "YOUR-PROJECT-ID"
location = "YOUR-LOCATION-ID"
agent_id = "YOUR-AGENT-ID"
text = "YOUR-TEXT"
audio_encoding = "YOUR-AUDIO-ENCODING"
language_code = "YOUR-LANGUAGE-CODE"
output_file = "YOUR-OUTPUT-FILE"
detect_intent_synthesize_tts_response(
project_id,
location,
agent_id,
text,
audio_encoding,
language_code,
output_file,
)
def detect_intent_synthesize_tts_response(
project_id,
location,
agent_id,
text,
audio_encoding,
language_code,
output_file,
):
"""Returns the result of detect intent with synthesized response."""
client_options = None
if location != "global":
api_endpoint = f"{location}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
session_id = str(uuid.uuid4())
# Constructs the audio query request
session_path = session_client.session_path(
project=project_id,
location=location,
agent=agent_id,
session=session_id,
)
text_input = session.TextInput(text=text)
query_input = session.QueryInput(text=text_input, language_code=language_code)
synthesize_speech_config = audio_config.SynthesizeSpeechConfig(
speaking_rate=1.25,
pitch=10.0,
)
output_audio_config = audio_config.OutputAudioConfig(
synthesize_speech_config=synthesize_speech_config,
audio_encoding=audio_config.OutputAudioEncoding[audio_encoding],
)
request = session.DetectIntentRequest(
session=session_path,
query_input=query_input,
output_audio_config=output_audio_config,
)
response = session_client.detect_intent(request=request)
print(
"Speaking Rate: "
f"{response.output_audio_config.synthesize_speech_config.speaking_rate}"
)
print("Pitch: " f"{response.output_audio_config.synthesize_speech_config.pitch}")
with open(output_file, "wb") as fout:
fout.write(response.output_audio)
print(f"Audio content written to file: {output_file}")
在停用 webhook 的情况下检测 intent
以下示例展示了如何在停用网络钩子调用的情况下检测 intent。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.cloud.dialogflow.cx.v3.DetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3.DetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3.QueryInput;
import com.google.cloud.dialogflow.cx.v3.QueryParameters;
import com.google.cloud.dialogflow.cx.v3.QueryResult;
import com.google.cloud.dialogflow.cx.v3.SessionName;
import com.google.cloud.dialogflow.cx.v3.SessionsClient;
import com.google.cloud.dialogflow.cx.v3.SessionsSettings;
import com.google.cloud.dialogflow.cx.v3.TextInput;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class DetectIntentDisableWebhook {
public static void main(String[] args) throws IOException, ApiException {
String projectId = "my-project-id";
String locationId = "global";
String agentId = "my-agent-id";
String sessionId = "my-UUID";
List<String> texts = new ArrayList<>(List.of("my-list", "of-texts"));
String languageCode = "en";
detectIntent(projectId, locationId, agentId, sessionId, texts, languageCode);
}
// DialogFlow API Detect Intent sample with webhook disabled.
public static Map<String, QueryResult> detectIntent(
String projectId,
String locationId,
String agentId,
String sessionId,
List<String> texts,
String languageCode)
throws IOException, ApiException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
Map<String, QueryResult> queryResults = Maps.newHashMap();
// Instantiates a client by setting the session name.
// Format:`projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session =
SessionName.ofProjectLocationAgentSessionName(projectId, locationId, agentId, sessionId);
// TODO : Uncomment if you want to print session path
// System.out.println("Session Path: " + session.toString());
// Detect intents for each text input.
for (String text : texts) {
// Set the text (hello) for the query.
TextInput.Builder textInput = TextInput.newBuilder().setText(text);
// Build the query with the TextInput and language code (en-US).
QueryInput queryInput =
QueryInput.newBuilder().setText(textInput).setLanguageCode(languageCode).build();
// Build the query parameters and setDisableWebhook to true.
QueryParameters queryParameters =
QueryParameters.newBuilder().setDisableWebhook(true).build();
// Build the DetectIntentRequest with the SessionName, QueryInput, and QueryParameters.
DetectIntentRequest request =
DetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.setQueryParams(queryParameters)
.build();
System.out.println(request.toString());
// Performs the detect intent request.
DetectIntentResponse response = sessionsClient.detectIntent(request);
// Display the query result.
QueryResult queryResult = response.getQueryResult();
// TODO : Uncomment if you want to print queryResult
// System.out.println("====================");
// System.out.format("Query Text: '%s'\n", queryResult.getText());
// System.out.format(
// "Detected Intent: %s (confidence: %f)\n",
// queryResult.getIntent().getDisplayName(),
// queryResult.getIntentDetectionConfidence());
queryResults.put(text, queryResult);
}
}
return queryResults;
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'my-project';
// const location = 'global';
// const agentId = 'my-agent';
// const query = 'Hello';
// const languageCode = 'en'
const {SessionsClient} = require('@google-cloud/dialogflow-cx');
/**
* Example for regional endpoint:
* const location = 'us-central1'
* const client = new SessionsClient({apiEndpoint: 'us-central1-dialogflow.googleapis.com'})
*/
const client = new SessionsClient();
async function detectIntentText() {
const sessionId = Math.random().toString(36).substring(7);
const sessionPath = client.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
console.info(sessionPath);
const request = {
session: sessionPath,
queryParams: {
disableWebhook: true,
},
queryInput: {
text: {
text: query,
},
languageCode,
},
};
const [response] = await client.detectIntent(request);
console.log(`Detect Intent Request: ${request.queryParams.disableWebhook}`);
for (const message of response.queryResult.responseMessages) {
if (message.text) {
console.log(`Agent Response: ${message.text.text}`);
}
}
}
detectIntentText();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import uuid
from google.cloud.dialogflowcx_v3.services.sessions import SessionsClient
from google.cloud.dialogflowcx_v3.types import session
def run_sample():
# TODO(developer): Update these values when running the function
project_id = "YOUR-PROJECT-ID"
location = "YOUR-LOCATION-ID"
agent_id = "YOUR-AGENT-ID"
text = "Perfect!"
language_code = "en-us"
detect_intent_disabled_webhook(
project_id,
location,
agent_id,
text,
language_code,
)
def detect_intent_disabled_webhook(
project_id,
location,
agent_id,
text,
language_code,
):
"""Returns the result of detect intent with sentiment analysis"""
client_options = None
if location != "global":
api_endpoint = f"{location}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
session_id = str(uuid.uuid4())
session_path = session_client.session_path(
project=project_id,
location=location,
agent=agent_id,
session=session_id,
)
# Prepare request
text_input = session.TextInput(text=text)
query_input = session.QueryInput(text=text_input, language_code=language_code)
query_params = session.QueryParameters(
disable_webhook=True,
)
request = session.DetectIntentRequest(
session=session_path,
query_input=query_input,
query_params=query_params,
)
response = session_client.detect_intent(request=request)
print(f"Detect Intent Request: {request.query_params.disable_webhook}")
response_text = []
for message in response.query_result.response_messages:
if message.text:
curr_response_text = message.text.text
print(f"Agent Response: {curr_response_text}")
response_text.append(curr_response_text)
return response_text
通过音频输入流式传输检测 intent
以下示例展示了如何将音频输入流式传输给流式检测意图请求。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.api.gax.rpc.BidiStream;
import com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding;
import com.google.cloud.dialogflow.cx.v3beta1.AudioInput;
import com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig;
import com.google.cloud.dialogflow.cx.v3beta1.OutputAudioConfig;
import com.google.cloud.dialogflow.cx.v3beta1.OutputAudioEncoding;
import com.google.cloud.dialogflow.cx.v3beta1.QueryInput;
import com.google.cloud.dialogflow.cx.v3beta1.QueryResult;
import com.google.cloud.dialogflow.cx.v3beta1.SessionName;
import com.google.cloud.dialogflow.cx.v3beta1.SessionsClient;
import com.google.cloud.dialogflow.cx.v3beta1.SessionsSettings;
import com.google.cloud.dialogflow.cx.v3beta1.SsmlVoiceGender;
import com.google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3beta1.SynthesizeSpeechConfig;
import com.google.cloud.dialogflow.cx.v3beta1.VoiceSelectionParams;
import com.google.protobuf.ByteString;
import java.io.FileInputStream;
import java.io.IOException;
public abstract class DetectIntentStream {
// DialogFlow API Detect Intent sample with audio files processes as an audio stream.
public static void detectIntentStream(
String projectId, String locationId, String agentId, String sessionId, String audioFilePath)
throws ApiException, IOException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if ("global".equals(locationId)) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
// Instantiates a client by setting the session name.
// Format: `projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Using the same `sessionId` between requests allows continuation of the conversation.
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session = SessionName.of(projectId, locationId, agentId, sessionId);
// Instructs the speech recognizer how to process the audio content.
// Note: hard coding audioEncoding and sampleRateHertz for simplicity.
// Audio encoding of the audio content sent in the query request.
InputAudioConfig inputAudioConfig =
InputAudioConfig.newBuilder()
.setAudioEncoding(AudioEncoding.AUDIO_ENCODING_LINEAR_16)
.setSampleRateHertz(16000) // sampleRateHertz = 16000
.build();
// Build the AudioInput with the InputAudioConfig.
AudioInput audioInput = AudioInput.newBuilder().setConfig(inputAudioConfig).build();
// Build the query with the InputAudioConfig.
QueryInput queryInput =
QueryInput.newBuilder()
.setAudio(audioInput)
.setLanguageCode("en-US") // languageCode = "en-US"
.build();
// Create the Bidirectional stream
BidiStream<StreamingDetectIntentRequest, StreamingDetectIntentResponse> bidiStream =
sessionsClient.streamingDetectIntentCallable().call();
// Specify sssml name and gender
VoiceSelectionParams voiceSelection =
// Voices that are available https://cloud.google.com/text-to-speech/docs/voices
VoiceSelectionParams.newBuilder()
.setName("en-US-Standard-F")
.setSsmlGender(SsmlVoiceGender.SSML_VOICE_GENDER_FEMALE)
.build();
SynthesizeSpeechConfig speechConfig =
SynthesizeSpeechConfig.newBuilder().setVoice(voiceSelection).build();
// Setup audio config
OutputAudioConfig audioConfig =
// Output enconding explanation
// https://cloud.google.com/dialogflow/cx/docs/reference/rpc/google.cloud.dialogflow.cx.v3#outputaudioencoding
OutputAudioConfig.newBuilder()
.setAudioEncoding(OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_UNSPECIFIED)
.setAudioEncodingValue(1)
.setSynthesizeSpeechConfig(speechConfig)
.build();
// The first request must **only** contain the audio configuration:
bidiStream.send(
StreamingDetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.setOutputAudioConfig(audioConfig)
.build());
try (FileInputStream audioStream = new FileInputStream(audioFilePath)) {
// Subsequent requests must **only** contain the audio data.
// Following messages: audio chunks. We just read the file in fixed-size chunks. In reality
// you would split the user input by time.
byte[] buffer = new byte[4096];
int bytes;
while ((bytes = audioStream.read(buffer)) != -1) {
AudioInput subAudioInput =
AudioInput.newBuilder().setAudio(ByteString.copyFrom(buffer, 0, bytes)).build();
QueryInput subQueryInput =
QueryInput.newBuilder()
.setAudio(subAudioInput)
.setLanguageCode("en-US") // languageCode = "en-US"
.build();
bidiStream.send(
StreamingDetectIntentRequest.newBuilder().setQueryInput(subQueryInput).build());
}
}
// Tell the service you are done sending data.
bidiStream.closeSend();
for (StreamingDetectIntentResponse response : bidiStream) {
QueryResult queryResult = response.getDetectIntentResponse().getQueryResult();
System.out.println("====================");
System.out.format("Query Text: '%s'\n", queryResult.getTranscript());
System.out.format(
"Detected Intent: %s (confidence: %f)\n",
queryResult.getMatch().getIntent().getDisplayName(),
queryResult.getMatch().getConfidence());
}
}
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'my-project';
// const location = 'global';
// const agentId = 'my-agent';
// const audioFileName = '/path/to/audio.raw';
// const encoding = 'AUDIO_ENCODING_LINEAR_16';
// const sampleRateHertz = 16000;
// const languageCode = 'en'
// Imports the Google Cloud Some API library
const {SessionsClient} = require('@google-cloud/dialogflow-cx');
/**
* Example for regional endpoint:
* const location = 'us-central1'
* const client = new SessionsClient({apiEndpoint: 'us-central1-dialogflow.googleapis.com'})
*/
const client = new SessionsClient();
const fs = require('fs');
const util = require('util');
const {Transform, pipeline} = require('stream');
const pump = util.promisify(pipeline);
async function detectIntentAudio() {
const sessionId = Math.random().toString(36).substring(7);
const sessionPath = client.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
console.info(sessionPath);
// Create a stream for the streaming request.
const detectStream = client
.streamingDetectIntent()
.on('error', console.error)
.on('data', data => {
if (data.recognitionResult) {
console.log(
`Intermediate Transcript: ${data.recognitionResult.transcript}`
);
} else {
console.log('Detected Intent:');
const result = data.detectIntentResponse.queryResult;
console.log(`User Query: ${result.transcript}`);
for (const message of result.responseMessages) {
if (message.text) {
console.log(`Agent Response: ${message.text.text}`);
}
}
if (result.match.intent) {
console.log(`Matched Intent: ${result.match.intent.displayName}`);
}
console.log(`Current Page: ${result.currentPage.displayName}`);
}
});
// Write the initial stream request to config for audio input.
const initialStreamRequest = {
session: sessionPath,
queryInput: {
audio: {
config: {
audioEncoding: encoding,
sampleRateHertz: sampleRateHertz,
synthesize_speech_config: {
voice: {
// Set's the name and gender of the ssml voice
name: 'en-GB-Standard-A',
ssml_gender: 'SSML_VOICE_GENDER_FEMALE',
},
},
singleUtterance: true,
},
},
languageCode: languageCode,
},
};
detectStream.write(initialStreamRequest);
// Stream the audio from audio file to Dialogflow.
await pump(
fs.createReadStream(audioFileName),
// Format the audio stream into the request format.
new Transform({
objectMode: true,
transform: (obj, _, next) => {
next(null, {queryInput: {audio: {audio: obj}}});
},
}),
detectStream
);
}
detectIntentAudio();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
def run_sample():
# TODO(developer): Replace these values when running the function
project_id = "YOUR-PROJECT-ID"
# For more information about regionalization see https://cloud.google.com/dialogflow/cx/docs/how/region
location_id = "YOUR-LOCATION-ID"
# For more info on agents see https://cloud.google.com/dialogflow/cx/docs/concept/agent
agent_id = "YOUR-AGENT-ID"
agent = f"projects/{project_id}/locations/{location_id}/agents/{agent_id}"
# For more information on sessions see https://cloud.google.com/dialogflow/cx/docs/concept/session
session_id = uuid.uuid4()
audio_file_path = "YOUR-AUDIO-FILE-PATH"
# For more supported languages see https://cloud.google.com/dialogflow/es/docs/reference/language
language_code = "en-us"
detect_intent_stream(agent, session_id, audio_file_path, language_code)
def detect_intent_stream(agent, session_id, audio_file_path, language_code):
"""Returns the result of detect intent with streaming audio as input.
Using the same `session_id` between requests allows continuation
of the conversation."""
session_path = f"{agent}/sessions/{session_id}"
print(f"Session path: {session_path}\n")
client_options = None
agent_components = AgentsClient.parse_agent_path(agent)
location_id = agent_components["location"]
if location_id != "global":
api_endpoint = f"{location_id}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
input_audio_config = audio_config.InputAudioConfig(
audio_encoding=audio_config.AudioEncoding.AUDIO_ENCODING_LINEAR_16,
sample_rate_hertz=24000,
)
def request_generator():
audio_input = session.AudioInput(config=input_audio_config)
query_input = session.QueryInput(audio=audio_input, language_code=language_code)
voice_selection = audio_config.VoiceSelectionParams()
synthesize_speech_config = audio_config.SynthesizeSpeechConfig()
output_audio_config = audio_config.OutputAudioConfig()
# Sets the voice name and gender
voice_selection.name = "en-GB-Standard-A"
voice_selection.ssml_gender = (
audio_config.SsmlVoiceGender.SSML_VOICE_GENDER_FEMALE
)
synthesize_speech_config.voice = voice_selection
# Sets the audio encoding
output_audio_config.audio_encoding = (
audio_config.OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_UNSPECIFIED
)
output_audio_config.synthesize_speech_config = synthesize_speech_config
# The first request contains the configuration.
yield session.StreamingDetectIntentRequest(
session=session_path,
query_input=query_input,
output_audio_config=output_audio_config,
)
# Here we are reading small chunks of audio data from a local
# audio file. In practice these chunks should come from
# an audio input device.
with open(audio_file_path, "rb") as audio_file:
while True:
chunk = audio_file.read(4096)
if not chunk:
break
# The later requests contains audio data.
audio_input = session.AudioInput(audio=chunk)
query_input = session.QueryInput(audio=audio_input)
yield session.StreamingDetectIntentRequest(query_input=query_input)
responses = session_client.streaming_detect_intent(requests=request_generator())
print("=" * 20)
for response in responses:
print(f'Intermediate transcript: "{response.recognition_result.transcript}".')
# Note: The result from the last response is the final transcript along
# with the detected content.
response = response.detect_intent_response
print(f"Query text: {response.query_result.transcript}")
response_messages = [
" ".join(msg.text.text) for msg in response.query_result.response_messages
]
print(f"Response text: {' '.join(response_messages)}\n")
启用部分响应功能的流式传输检测 intent
以下示例展示了如何将流式检测 intent 与部分响应结合使用。
Java
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.api.gax.rpc.ApiException;
import com.google.api.gax.rpc.BidiStream;
import com.google.cloud.dialogflow.cx.v3.AudioEncoding;
import com.google.cloud.dialogflow.cx.v3.AudioInput;
import com.google.cloud.dialogflow.cx.v3.InputAudioConfig;
import com.google.cloud.dialogflow.cx.v3.OutputAudioConfig;
import com.google.cloud.dialogflow.cx.v3.OutputAudioEncoding;
import com.google.cloud.dialogflow.cx.v3.QueryInput;
import com.google.cloud.dialogflow.cx.v3.SessionName;
import com.google.cloud.dialogflow.cx.v3.SessionsClient;
import com.google.cloud.dialogflow.cx.v3.SessionsSettings;
import com.google.cloud.dialogflow.cx.v3.SsmlVoiceGender;
import com.google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest;
import com.google.cloud.dialogflow.cx.v3.StreamingDetectIntentResponse;
import com.google.cloud.dialogflow.cx.v3.SynthesizeSpeechConfig;
import com.google.cloud.dialogflow.cx.v3.VoiceSelectionParams;
import com.google.protobuf.ByteString;
import java.io.FileInputStream;
import java.io.IOException;
public class DetectIntentStreamingPartialResponse {
// DialogFlow API Detect Intent sample with audio files
// that processes as an audio stream.
public static void detectIntentStreamingPartialResponse(
String projectId, String locationId, String agentId, String sessionId, String audioFilePath)
throws ApiException, IOException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
// Instantiates a client by setting the session name.
// Format:`projects/<ProjectID>/locations/<LocationID>/agents/<AgentID>/sessions/<SessionID>`
// Using the same `sessionId` between requests allows continuation of the conversation.
// Note: close() needs to be called on the SessionsClient object to clean up resources
// such as threads. In the example below, try-with-resources is used,
// which automatically calls close().
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
SessionName session = SessionName.of(projectId, locationId, agentId, sessionId);
// Instructs the speech recognizer how to process the audio content.
// Note: hard coding audioEncoding and sampleRateHertz for simplicity.
// Audio encoding of the audio content sent in the query request.
InputAudioConfig inputAudioConfig =
InputAudioConfig.newBuilder()
.setAudioEncoding(AudioEncoding.AUDIO_ENCODING_LINEAR_16)
.setSampleRateHertz(16000) // sampleRateHertz = 16000
.build();
// Build the AudioInput with the InputAudioConfig.
AudioInput audioInput = AudioInput.newBuilder().setConfig(inputAudioConfig).build();
// Build the query with the InputAudioConfig.
QueryInput queryInput =
QueryInput.newBuilder()
.setAudio(audioInput)
.setLanguageCode("en-US") // languageCode = "en-US"
.build();
// Create the Bidirectional stream
BidiStream<StreamingDetectIntentRequest, StreamingDetectIntentResponse> bidiStream =
sessionsClient.streamingDetectIntentCallable().call();
// Specify sssml name and gender
VoiceSelectionParams voiceSelection =
// Voices that are available https://cloud.google.com/text-to-speech/docs/voices
VoiceSelectionParams.newBuilder()
.setName("en-GB-Standard-A")
.setSsmlGender(SsmlVoiceGender.SSML_VOICE_GENDER_FEMALE)
.build();
SynthesizeSpeechConfig speechConfig =
SynthesizeSpeechConfig.newBuilder().setVoice(voiceSelection).build();
// Setup audio config
OutputAudioConfig audioConfig =
// Output encoding explanation
// https://cloud.google.com/dialogflow/cx/docs/reference/rpc/google.cloud.dialogflow.cx.v3#outputaudioencoding
OutputAudioConfig.newBuilder()
.setAudioEncoding(OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_UNSPECIFIED)
.setAudioEncodingValue(1)
.setSynthesizeSpeechConfig(speechConfig)
.build();
StreamingDetectIntentRequest streamingDetectIntentRequest =
StreamingDetectIntentRequest.newBuilder()
.setSession(session.toString())
.setQueryInput(queryInput)
.setEnablePartialResponse(true)
.setOutputAudioConfig(audioConfig)
.build();
System.out.println(streamingDetectIntentRequest.toString());
// The first request must **only** contain the audio configuration:
bidiStream.send(streamingDetectIntentRequest);
try (FileInputStream audioStream = new FileInputStream(audioFilePath)) {
// Subsequent requests must **only** contain the audio data.
// Following messages: audio chunks. We just read the file in fixed-size chunks. In reality
// you would split the user input by time.
byte[] buffer = new byte[4096];
int bytes;
while ((bytes = audioStream.read(buffer)) != -1) {
AudioInput subAudioInput =
AudioInput.newBuilder().setAudio(ByteString.copyFrom(buffer, 0, bytes)).build();
QueryInput subQueryInput =
QueryInput.newBuilder()
.setAudio(subAudioInput)
.setLanguageCode("en-US") // languageCode = "en-US"
.build();
bidiStream.send(
StreamingDetectIntentRequest.newBuilder().setQueryInput(subQueryInput).build());
}
}
// Tell the service you are done sending data.
bidiStream.closeSend();
// TODO: Uncomment to print detectIntentResponse.
// for (StreamingDetectIntentResponse response : bidiStream) {
// QueryResult queryResult = response.getDetectIntentResponse().getQueryResult();
// System.out.println("====================");
// System.out.format("Query Text: '%s'\n", queryResult.getTranscript());
// System.out.format(
// "Detected Intent: %s (confidence: %f)\n",
// queryResult.getIntent()
// .getDisplayName(), queryResult.getIntentDetectionConfidence());
// }
}
}
}
Node.js
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'my-project';
// const location = 'global';
// const agentId = 'my-agent';
// const audioFileName = '/path/to/audio.raw';
// const encoding = 'AUDIO_ENCODING_LINEAR_16';
// const sampleRateHertz = 16000;
// const languageCode = 'en';
const {SessionsClient} = require('@google-cloud/dialogflow-cx');
/**
* Example for regional endpoint:
* const location = 'us-central1'
* const client = new SessionsClient({apiEndpoint: 'us-central1-dialogflow.googleapis.com'})
*/
const client = new SessionsClient();
const fs = require('fs');
const util = require('util');
const {Transform, pipeline} = require('stream');
const pump = util.promisify(pipeline);
async function streamingDetectIntentPartialResponse() {
const sessionId = Math.random().toString(36).substring(7);
const sessionPath = client.projectLocationAgentSessionPath(
projectId,
location,
agentId,
sessionId
);
const request = {
session: sessionPath,
queryInput: {
audio: {
config: {
audio_encoding: encoding,
sampleRateHertz: sampleRateHertz,
singleUtterance: true,
},
},
languageCode: languageCode,
},
enablePartialResponse: true,
};
const stream = await client.streamingDetectIntent();
stream.on('data', data => {
if (data.detectIntentResponse) {
const result = data.detectIntentResponse.queryResult;
for (const message of result.responseMessages) {
if (message.text) {
console.log(`Agent Response: ${message.text.text}`);
}
}
}
});
stream.on('error', err => {
console.log(err);
});
stream.on('end', () => {
/* API call completed */
});
stream.write(request);
// Stream the audio from audio file to Dialogflow.
await pump(
fs.createReadStream(audioFileName),
// Format the audio stream into the request format.
new Transform({
objectMode: true,
transform: (obj, _, next) => {
next(null, {queryInput: {audio: {audio: obj}}});
},
}),
stream
);
}
streamingDetectIntentPartialResponse();
Python
如需向 Dialogflow 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import uuid
from google.cloud.dialogflowcx_v3.services.sessions import SessionsClient
from google.cloud.dialogflowcx_v3.types import audio_config
from google.cloud.dialogflowcx_v3.types import InputAudioConfig
from google.cloud.dialogflowcx_v3.types import session
def run_sample():
"""
TODO(developer): Modify these variables before running the sample.
"""
project_id = "YOUR-PROJECT-ID"
location = "YOUR-LOCATION-ID"
agent_id = "YOUR-AGENT-ID"
audio_file_name = "YOUR-AUDIO-FILE-PATH"
encoding = "AUDIO_ENCODING_LINEAR_16"
sample_rate_hertz = 16000
language_code = "en"
streaming_detect_intent_partial_response(
project_id,
location,
agent_id,
audio_file_name,
encoding,
sample_rate_hertz,
language_code,
)
def streaming_detect_intent_partial_response(
project_id,
location,
agent_id,
audio_file_name,
encoding,
sample_rate_hertz,
language_code,
):
client_options = None
if location != "global":
api_endpoint = f"{location}-dialogflow.googleapis.com:443"
print(f"API Endpoint: {api_endpoint}\n")
client_options = {"api_endpoint": api_endpoint}
session_client = SessionsClient(client_options=client_options)
session_id = str(uuid.uuid4())
session_path = session_client.session_path(
project=project_id,
location=location,
agent=agent_id,
session=session_id,
)
def request_generator():
audio_encoding = audio_config.AudioEncoding[encoding]
config = InputAudioConfig(
audio_encoding=audio_encoding,
sample_rate_hertz=sample_rate_hertz,
single_utterance=True,
)
audio_input = session.AudioInput(config=config)
query_input = session.QueryInput(audio=audio_input, language_code=language_code)
yield session.StreamingDetectIntentRequest(
session=session_path,
query_input=query_input,
enable_partial_response=True,
)
# Here we are reading small chunks of audio data from a local
# audio file. In practice these chunks should come from
# an audio input device.
with open(audio_file_name, "rb") as audio_file:
while True:
chunk = audio_file.read(4096)
if not chunk:
break
# The later requests contains audio data.
audio_input = session.AudioInput(audio=chunk, config=config)
query_input = session.QueryInput(
audio=audio_input, language_code=language_code
)
yield session.StreamingDetectIntentRequest(
session=session_path,
query_input=query_input,
enable_partial_response=True,
)
responses = session_client.streaming_detect_intent(requests=request_generator())
print("=" * 20)
for response in responses:
print(f'Intermediate transcript: "{response.recognition_result.transcript}".')
# Note: The result from the last response is the final transcript along
# with the detected content.
response = response.detect_intent_response
print(f"Query text: {response.query_result.transcript}")
response_messages = [
" ".join(msg.text.text) for msg in response.query_result.response_messages
]
print(f"Response text: {' '.join(response_messages)}\n")