Running Parameterized Queries

This document describes how to run parameterized queries in BigQuery.

Running parameterized queries

BigQuery supports query parameters to help prevent SQL injection when queries are constructed using user input. This feature is only available with standard SQL syntax. Query parameters can be used as substitutes for arbitrary expressions. Parameters cannot be used as substitutes for identifiers, column names, table names, or other parts of the query.

To specify a named parameter, use the @ character followed by an identifier, such as @param_name. For example, this query finds all the words in a specific Shakespeare corpus with counts that are at least the specified value.

#standardSQL
SELECT
  word,
  word_count
FROM
  `bigquery-public-data.samples.shakespeare`
WHERE
  corpus = @corpus
  AND word_count >= @min_word_count
ORDER BY
  word_count DESC;

Alternatively, use the placeholder value ? to specify a positional parameter. Note that a query can use positional or named parameters but not both.

Web UI

Parameterized queries are not supported by the BigQuery web UI.

Command-line

Use --parameter to provide values for parameters in the form "name:type:value". An empty name produces a positional parameter. The type may be omitted to assume STRING.

The --parameter flag must be used in conjunction with the flag --use_legacy_sql=False to specify standard SQL syntax. Specify your location using the --location flag. The --location flag is optional if your data is in the US or the EU multi-region location.

bq --location=US query --use_legacy_sql=False \
    --parameter=corpus::romeoandjuliet \
    --parameter=min_word_count:INT64:250 \
    'SELECT word, word_count
    FROM `bigquery-public-data.samples.shakespeare`
    WHERE corpus = @corpus
    AND word_count >= @min_word_count
    ORDER BY word_count DESC;'

API

To use named parameters, set the jobs#configuration.query.parameterMode to NAMED.

Populate jobs#configuration.query.queryParameters[] with the list of parameters. Set the name of each parameter with the @param_name used in the query.

Enable standard SQL syntax by setting useLegacySql to false.

{
  "query": "SELECT word, word_count FROM `bigquery-public-data.samples.shakespeare` WHERE corpus = @corpus AND word_count >= @min_word_count ORDER BY word_count DESC;",
  "queryParameters": [
    {
      "parameterType": {
        "type": "STRING"
      },
      "parameterValue": {
        "value": "romeoandjuliet"
      },
      "name": "corpus"
    },
    {
      "parameterType": {
        "type": "INT64"
      },
      "parameterValue": {
        "value": "250"
      },
      "name": "min_word_count"
    }
  ],
  "useLegacySql": false,
  "parameterMode": "NAMED"
}

Try it in the Google APIs Explorer.

To use positional parameters, set the jobs#configuration.query.parameterMode to POSITIONAL.

Go

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

To use named parameters:
	q := client.Query(
		`SELECT word, word_count
        FROM ` + "`bigquery-public-data.samples.shakespeare`" + `
        WHERE corpus = @corpus
        AND word_count >= @min_word_count
        ORDER BY word_count DESC;`)
	q.Parameters = []bigquery.QueryParameter{
		{
			Name:  "corpus",
			Value: "romeoandjuliet",
		},
		{
			Name:  "min_word_count",
			Value: 250,
		},
	}
	job, err := q.Run(ctx)
	if err != nil {
		return err
	}
	status, err := job.Wait(ctx)
	if err != nil {
		return err
	}
	if err := status.Err(); err != nil {
		return err
	}
	it, err := job.Read(ctx)
	for {
		var row []bigquery.Value
		err := it.Next(&row)
		if err == iterator.Done {
			break
		}
		if err != nil {
			return err
		}
		fmt.Println(row)
	}
To use positional parameters:
	q := client.Query(
		`SELECT word, word_count
        FROM ` + "`bigquery-public-data.samples.shakespeare`" + `
        WHERE corpus = ?
        AND word_count >= ?
        ORDER BY word_count DESC;`)
	q.Parameters = []bigquery.QueryParameter{
		{
			Value: "romeoandjuliet",
		},
		{
			Value: 250,
		},
	}
	job, err := q.Run(ctx)
	if err != nil {
		return err
	}
	status, err := job.Wait(ctx)
	if err != nil {
		return err
	}
	if err := status.Err(); err != nil {
		return err
	}
	it, err := job.Read(ctx)
	for {
		var row []bigquery.Value
		err := it.Next(&row)
		if err == iterator.Done {
			break
		}
		if err != nil {
			return err
		}
		fmt.Println(row)
	}

Java

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

// BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService();
String corpus = "romeoandjuliet";
long minWordCount = 250;
String query =
    "SELECT word, word_count\n"
        + "FROM `bigquery-public-data.samples.shakespeare`\n"
        + "WHERE corpus = @corpus\n"
        + "AND word_count >= @min_word_count\n"
        + "ORDER BY word_count DESC";
// Note: Standard SQL is required to use query parameters.
QueryJobConfiguration queryConfig =
    QueryJobConfiguration.newBuilder(query)
        .addNamedParameter("corpus", QueryParameterValue.string(corpus))
        .addNamedParameter("min_word_count", QueryParameterValue.int64(minWordCount))
        .build();

// Print the results.
for (FieldValueList row : bigquery.query(queryConfig).iterateAll()) {
  for (FieldValue val : row) {
    System.out.printf("%s,", val.toString());
  }
  System.out.printf("\n");
}

Python

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

To use named parameters:
# from google.cloud import bigquery
# client = bigquery.Client()

query = """
    SELECT word, word_count
    FROM `bigquery-public-data.samples.shakespeare`
    WHERE corpus = @corpus
    AND word_count >= @min_word_count
    ORDER BY word_count DESC;
"""
query_params = [
    bigquery.ScalarQueryParameter('corpus', 'STRING', 'romeoandjuliet'),
    bigquery.ScalarQueryParameter('min_word_count', 'INT64', 250)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
    query,
    # Location must match that of the dataset(s) referenced in the query.
    location='US',
    job_config=job_config)  # API request - starts the query

# Print the results
for row in query_job:
    print('{}: \t{}'.format(row.word, row.word_count))

assert query_job.state == 'DONE'
To use positional parameters:
# from google.cloud import bigquery
# client = bigquery.Client()

query = """
    SELECT word, word_count
    FROM `bigquery-public-data.samples.shakespeare`
    WHERE corpus = ?
    AND word_count >= ?
    ORDER BY word_count DESC;
"""
# Set the name to None to use positional parameters.
# Note that you cannot mix named and positional parameters.
query_params = [
    bigquery.ScalarQueryParameter(None, 'STRING', 'romeoandjuliet'),
    bigquery.ScalarQueryParameter(None, 'INT64', 250)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
    query,
    # Location must match that of the dataset(s) referenced in the query.
    location='US',
    job_config=job_config)  # API request - starts the query

# Print the results
for row in query_job:
    print('{}: \t{}'.format(row.word, row.word_count))

assert query_job.state == 'DONE'

Using arrays in parameterized queries

To use an array type in a query parameter set the type to ARRAY<T> where T is the type of the elements in the array. Construct the value as a comma-separated list of elements enclosed in square brackets, such as [1, 2, 3].

See the data types reference for more information about the array type.

Web UI

Parameterized queries are not supported by the BigQuery web UI.

Command-line

This query selects the most popular names for baby boys born in US states starting with the letter W. This example uses the --location=US flag because you are querying a public dataset. The BigQuery public datasets are stored in the US multi-region location. Because the public datasets are stored in the US, you cannot write public data query results to a table in another region, and you cannot join tables in public datasets with tables in another region.

bq --location=US query --use_legacy_sql=False \
    --parameter='gender::M' \
    --parameter='states:ARRAY<STRING>:["WA", "WI", "WV", "WY"]' \
    'SELECT name, sum(number) as count
    FROM `bigquery-public-data.usa_names.usa_1910_2013`
    WHERE gender = @gender
    AND state IN UNNEST(@states)
    GROUP BY name
    ORDER BY count DESC
    LIMIT 10;'

Be careful to enclose the array type declaration in single quotes so that the command output is not accidentally redirected to a file by the > character.

API

To use an array-valued parameter set the jobs#configuration.query.queryParameters[].parameterType.type to ARRAY.

If the array values are scalars set the jobs#configuration.query.queryParameters[].parameterType.arrayType.type to the type of the values, such as STRING. If the array values are structures set this to STRUCT and add the needed field definitions to structTypes.

For example, this query selects the most popular names for baby boys born in US states starting with the letter W.

{
 "query": "SELECT name, sum(number) as count\nFROM `bigquery-public-data.usa_names.usa_1910_2013`\nWHERE gender = @gender\nAND state IN UNNEST(@states)\nGROUP BY name\nORDER BY count DESC\nLIMIT 10;",
 "queryParameters": [
  {
   "parameterType": {
    "type": "STRING"
   },
   "parameterValue": {
    "value": "M"
   },
   "name": "gender"
  },
  {
   "parameterType": {
    "type": "ARRAY",
    "arrayType": {
     "type": "STRING"
    }
   },
   "parameterValue": {
    "arrayValues": [
     {
      "value": "WA"
     },
     {
      "value": "WI"
     },
     {
      "value": "WV"
     },
     {
      "value": "WY"
     }
    ]
   },
   "name": "states"
  }
 ],
 "useLegacySql": false,
 "parameterMode": "NAMED"
}

Try it in the Google APIs Explorer.

Go

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

	q := client.Query(
		`SELECT
			name,
			sum(number) as count 
        FROM ` + "`bigquery-public-data.usa_names.usa_1910_2013`" + `
		WHERE
			gender = @gender
        	AND state IN UNNEST(@states)
		GROUP BY
			name
		ORDER BY
			count DESC
		LIMIT 10;`)
	q.Parameters = []bigquery.QueryParameter{
		{
			Name:  "gender",
			Value: "M",
		},
		{
			Name:  "states",
			Value: []string{"WA", "WI", "WV", "WY"},
		},
	}
	job, err := q.Run(ctx)
	if err != nil {
		return err
	}
	status, err := job.Wait(ctx)
	if err != nil {
		return err
	}
	if err := status.Err(); err != nil {
		return err
	}
	it, err := job.Read(ctx)
	for {
		var row []bigquery.Value
		err := it.Next(&row)
		if err == iterator.Done {
			break
		}
		if err != nil {
			return err
		}
		fmt.Println(row)
	}

Java

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

// BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService();
String gender = "M";
String[] states = {"WA", "WI", "WV", "WY"};
String query =
    "SELECT name, sum(number) as count\n"
        + "FROM `bigquery-public-data.usa_names.usa_1910_2013`\n"
        + "WHERE gender = @gender\n"
        + "AND state IN UNNEST(@states)\n"
        + "GROUP BY name\n"
        + "ORDER BY count DESC\n"
        + "LIMIT 10;";
// Note: Standard SQL is required to use query parameters.
QueryJobConfiguration queryConfig =
    QueryJobConfiguration.newBuilder(query)
        .addNamedParameter("gender", QueryParameterValue.string(gender))
        .addNamedParameter("states", QueryParameterValue.array(states, String.class))
        .build();

// Print the results.
for (FieldValueList row : bigquery.query(queryConfig).iterateAll()) {
  for (FieldValue val : row) {
    System.out.printf("%s,", val.toString());
  }
  System.out.printf("\n");
}

Python

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

# from google.cloud import bigquery
# client = bigquery.Client()

query = """
    SELECT name, sum(number) as count
    FROM `bigquery-public-data.usa_names.usa_1910_2013`
    WHERE gender = @gender
    AND state IN UNNEST(@states)
    GROUP BY name
    ORDER BY count DESC
    LIMIT 10;
"""
query_params = [
    bigquery.ScalarQueryParameter('gender', 'STRING', 'M'),
    bigquery.ArrayQueryParameter(
        'states', 'STRING', ['WA', 'WI', 'WV', 'WY'])
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
    query,
    # Location must match that of the dataset(s) referenced in the query.
    location='US',
    job_config=job_config)  # API request - starts the query

# Print the results
for row in query_job:
    print('{}: \t{}'.format(row.name, row.count))

assert query_job.state == 'DONE'

Using timestamps in parameterized queries

To use a timestamp in a query parameter set the type to TIMESTAMP. The value should be in the format YYYY-MM-DD HH:MM:SS.DDDDDD time_zone.

See the data types reference for more information about the timestamp type.

Web UI

Parameterized queries are not supported by the BigQuery web UI.

Command-line

This query adds an hour to the timestamp parameter value.

bq --location=US query --use_legacy_sql=False \
    --parameter='ts_value:TIMESTAMP:2016-12-07 08:00:00' \
    'SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);'

API

To use a timestamp parameter set the jobs#configuration.query.queryParameters[].parameterType.type to TIMESTAMP.

This query adds an hour to the timestamp parameter value.

{
  "query": "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);",
  "queryParameters": [
    {
      "name": "ts_value",
      "parameterType": {
        "type": "TIMESTAMP"
      },
      "parameterValue": {
        "value": "2016-12-07 08:00:00"
      }
    }
  ],
  "useLegacySql": false,
  "parameterMode": "NAMED"
}

Try it in the Google APIs Explorer.

Go

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

q := client.Query(
	`SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);`)
q.Parameters = []bigquery.QueryParameter{
	{
		Name:  "ts_value",
		Value: time.Date(2016, 12, 7, 8, 0, 0, 0, time.UTC),
	},
}
job, err := q.Run(ctx)
if err != nil {
	return err
}
status, err := job.Wait(ctx)
if err != nil {
	return err
}
if err := status.Err(); err != nil {
	return err
}
it, err := job.Read(ctx)
for {
	var row []bigquery.Value
	err := it.Next(&row)
	if err == iterator.Done {
		break
	}
	if err != nil {
		return err
	}
	fmt.Println(row)
}

Java

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

// BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService();
DateTime timestamp = new DateTime(2016, 12, 7, 8, 0, 0, DateTimeZone.UTC);
String query = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);";
// Note: Standard SQL is required to use query parameters.
QueryJobConfiguration queryConfig =
    QueryJobConfiguration.newBuilder(query)
        .addNamedParameter(
            "ts_value",
            QueryParameterValue.timestamp(
                // Timestamp takes microseconds since 1970-01-01T00:00:00 UTC
                timestamp.getMillis() * 1000))
        .build();

// Print the results.
DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZoneUTC();
for (FieldValueList row : bigquery.query(queryConfig).iterateAll()) {
  System.out.printf(
      "%s\n",
      formatter.print(
          new DateTime(
              // Timestamp values are returned in microseconds since 1970-01-01T00:00:00 UTC,
              // but org.joda.time.DateTime constructor accepts times in milliseconds.
              row.get(0).getTimestampValue() / 1000, DateTimeZone.UTC)));
  System.out.printf("\n");
}

Python

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

# from google.cloud import bigquery
# client = bigquery.Client()

import datetime
import pytz

query = 'SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);'
query_params = [
    bigquery.ScalarQueryParameter(
        'ts_value',
        'TIMESTAMP',
        datetime.datetime(2016, 12, 7, 8, 0, tzinfo=pytz.UTC))
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
    query,
    # Location must match that of the dataset(s) referenced in the query.
    location='US',
    job_config=job_config)  # API request - starts the query

# Print the results
for row in query_job:
    print(row)

assert query_job.state == 'DONE'

Using structs in parameterized queries

To use a struct in a query parameter set the type to STRUCT<T> where T defines the fields and types within the struct. Field definitions are separated by commas and are of the form field_name TF where TF is the type of the field. For example, STRUCT<x INT64, y STRING> defines a struct with a field named x of type INT64 and a second field named y of type STRING.

See the data types reference for more information about the struct type.

Web UI

Parameterized queries are not supported by the BigQuery web UI.

Command-line

This trivial query demonstrates the use of structured types by returning the parameter value.

bq --location=US query --use_legacy_sql=False \
    --parameter='struct_value:STRUCT<x INT64, y STRING>:{"x": 1, "y": "foo"}' \
    'SELECT @struct_value AS s;'

API

To use a struct parameter set the jobs#configuration.query.queryParameters.parameterType.type to STRUCT.

Add an object for each field of the struct to jobs#configuration.query.queryParameters.parameterType.structTypes. If the struct values are scalars set the type to the type of the values, such as STRING. If the struct values are arrays set this to ARRAY and set the nested arrayType field to the appropriate type. If the struct values are structures set type to STRUCT and add the needed structTypes.

This trivial query demonstrates the use of structured types by returning the parameter value.

{
  "query": "SELECT @struct_value AS s;",
  "queryParameters": [
    {
      "name": "struct_value",
      "parameterType": {
        "type": "STRUCT",
        "structTypes": [
          {
            "name": "x",
            "type": {
              "type": "INT64"
            }
          },
          {
            "name": "y",
            "type": {
              "type": "STRING"
            }
          }
        ]
      },
      "parameterValue": {
        "structValues": {
          "x": {
            "value": "1"
          },
          "y": {
            "value": "foo"
          }
        }
      }
    }
  ],
  "useLegacySql": false,
  "parameterMode": "NAMED"
}

Try it in the Google APIs Explorer.

Go

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

type MyStruct struct {
	X int64
	Y string
}
q := client.Query(
	`SELECT @struct_value as s;`)
q.Parameters = []bigquery.QueryParameter{
	{
		Name:  "struct_value",
		Value: MyStruct{X: 1, Y: "foo"},
	},
}
job, err := q.Run(ctx)
if err != nil {
	return err
}
status, err := job.Wait(ctx)
if err != nil {
	return err
}
if err := status.Err(); err != nil {
	return err
}
it, err := job.Read(ctx)
for {
	var row []bigquery.Value
	err := it.Next(&row)
	if err == iterator.Done {
		break
	}
	if err != nil {
		return err
	}
	fmt.Println(row)
}

Python

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

Python

For more on installing and creating a BigQuery client, refer to BigQuery Client Libraries.

# from google.cloud import bigquery
# client = bigquery.Client()

query = 'SELECT @struct_value AS s;'
query_params = [
    bigquery.StructQueryParameter(
        'struct_value',
        bigquery.ScalarQueryParameter('x', 'INT64', 1),
        bigquery.ScalarQueryParameter('y', 'STRING', 'foo')
    )
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
    query,
    # Location must match that of the dataset(s) referenced in the query.
    location='US',
    job_config=job_config)  # API request - starts the query

# Print the results
for row in query_job:
    print(row.s)

assert query_job.state == 'DONE'

Was this page helpful? Let us know how we did:

Send feedback about...