BigQuery テーブルに新しい列を追加しながら、明示的な宛先テーブルを持つクエリジョブを介して行を追加します。
このコードサンプルが含まれるドキュメント ページ
コードサンプル
Go
このサンプルを試す前に、BigQuery クイックスタート: クライアント ライブラリの使用の Go の手順に従って設定を行ってください。詳細については、BigQuery Go API のリファレンス ドキュメントをご覧ください。
import (
"context"
"fmt"
"cloud.google.com/go/bigquery"
)
// createTableAndWidenQuery demonstrates how the schema of a table can be modified to add columns by appending
// query results that include the new columns.
func createTableAndWidenQuery(projectID, datasetID, tableID string) error {
// projectID := "my-project-id"
// datasetID := "mydataset"
// tableID := "mytable"
ctx := context.Background()
client, err := bigquery.NewClient(ctx, projectID)
if err != nil {
return fmt.Errorf("bigquery.NewClient: %v", err)
}
defer client.Close()
// First, we create a sample table.
sampleSchema := bigquery.Schema{
{Name: "full_name", Type: bigquery.StringFieldType, Required: true},
{Name: "age", Type: bigquery.IntegerFieldType, Required: true},
}
original := &bigquery.TableMetadata{
Schema: sampleSchema,
}
tableRef := client.Dataset(datasetID).Table(tableID)
if err := tableRef.Create(ctx, original); err != nil {
return err
}
// Our table has two columns. We'll introduce a new favorite_color column via
// a subsequent query that appends to the table.
q := client.Query("SELECT \"Timmy\" as full_name, 85 as age, \"Blue\" as favorite_color")
q.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
q.QueryConfig.Dst = client.Dataset(datasetID).Table(tableID)
q.WriteDisposition = bigquery.WriteAppend
q.Location = "US"
job, err := q.Run(ctx)
if err != nil {
return err
}
_, err = job.Wait(ctx)
if err != nil {
return err
}
return nil
}
Node.js
このサンプルを試す前に、BigQuery クイックスタート: クライアント ライブラリの使用の Node.js の設定手順を実施してください。詳細については、BigQuery Node.js API のリファレンス ドキュメントをご覧ください。
// Import the Google Cloud client libraries
const {BigQuery} = require('@google-cloud/bigquery');
// Instantiate client
const bigquery = new BigQuery();
async function addColumnQueryAppend() {
// Adds a new column to a BigQuery table while appending rows via a query job.
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const datasetId = 'my_dataset';
// const tableId = 'my_table';
// Retrieve destination table reference
const [table] = await bigquery
.dataset(datasetId)
.table(tableId)
.get();
const destinationTableRef = table.metadata.tableReference;
// In this example, the existing table contains only the 'name' column.
// 'REQUIRED' fields cannot be added to an existing schema,
// so the additional column must be 'NULLABLE'.
const query = `SELECT name, year
FROM \`bigquery-public-data.usa_names.usa_1910_2013\`
WHERE state = 'TX'
LIMIT 10`;
// Set load job options
const options = {
query: query,
schemaUpdateOptions: ['ALLOW_FIELD_ADDITION'],
writeDisposition: 'WRITE_APPEND',
destinationTable: destinationTableRef,
// Location must match that of the dataset(s) referenced in the query.
location: 'US',
};
const [job] = await bigquery.createQueryJob(options);
console.log(`Job ${job.id} started.`);
// Wait for the query to finish
const [rows] = await job.getQueryResults();
console.log(`Job ${job.id} completed.`);
// Print the results
console.log('Rows:');
rows.forEach(row => console.log(row));
}
Python
このサンプルを試す前に、BigQuery クイックスタート: クライアント ライブラリの使用の Python の手順に従って設定を行ってください。詳細については、BigQuery Python API のリファレンス ドキュメントをご覧ください。
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
# Retrieves the destination table and checks the length of the schema.
table = client.get_table(table_id) # Make an API request.
print("Table {} contains {} columns".format(table_id, len(table.schema)))
# Configures the query to append the results to a destination table,
# allowing field addition.
job_config = bigquery.QueryJobConfig(
destination=table_id,
schema_update_options=[bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION],
write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
)
# Start the query, passing in the extra configuration.
query_job = client.query(
# In this example, the existing table contains only the 'full_name' and
# 'age' columns, while the results of this query will contain an
# additional 'favorite_color' column.
'SELECT "Timmy" as full_name, 85 as age, "Blue" as favorite_color;',
job_config=job_config,
) # Make an API request.
query_job.result() # Wait for the job to complete.
# Checks the updated length of the schema.
table = client.get_table(table_id) # Make an API request.
print("Table {} now contains {} columns".format(table_id, len(table.schema)))