/** * TODO(developer): Uncomment the following lines before running the sample. */// The ID of your GCS bucket// const bucketName = 'your-unique-bucket-name';// The path of file to upload// const filePath = 'path/to/your/file';// The size of each chunk to be uploaded// const chunkSize = 32 * 1024 * 1024;// Imports the Google Cloud client libraryconst{Storage,TransferManager}=require('@google-cloud/storage');// Creates a clientconststorage=newStorage();// Creates a transfer manager clientconsttransferManager=newTransferManager(storage.bucket(bucketName));asyncfunctionuploadFileInChunksWithTransferManager(){// Uploads the filesawaittransferManager.uploadFileInChunks(filePath,{chunkSizeBytes:chunkSize,});console.log(`${filePath} uploaded to ${bucketName}.`);}uploadFileInChunksWithTransferManager().catch(console.error);
defupload_chunks_concurrently(bucket_name,source_filename,destination_blob_name,chunk_size=32*1024*1024,workers=8,):"""Upload a single file, in chunks, concurrently in a process pool."""# The ID of your GCS bucket# bucket_name = "your-bucket-name"# The path to your file to upload# source_filename = "local/path/to/file"# The ID of your GCS object# destination_blob_name = "storage-object-name"# The size of each chunk. The performance impact of this value depends on# the use case. The remote service has a minimum of 5 MiB and a maximum of# 5 GiB.# chunk_size = 32 * 1024 * 1024 (32 MiB)# The maximum number of processes to use for the operation. The performance# impact of this value depends on the use case. Each additional process# occupies some CPU and memory resources until finished. Threads can be used# instead of processes by passing `worker_type=transfer_manager.THREAD`.# workers=8fromgoogle.cloud.storageimportClient,transfer_managerstorage_client=Client()bucket=storage_client.bucket(bucket_name)blob=bucket.blob(destination_blob_name)transfer_manager.upload_chunks_concurrently(source_filename,blob,chunk_size=chunk_size,max_workers=workers)print(f"File {source_filename} uploaded to {destination_blob_name}.")if__name__=="__main__":argparse=argparse.ArgumentParser(description="Upload a file to GCS in chunks concurrently.")argparse.add_argument("--bucket_name",help="The name of the GCS bucket to upload to.")argparse.add_argument("--source_filename",help="The local path to the file to upload.")argparse.add_argument("--destination_blob_name",help="The name of the object in GCS.")argparse.add_argument("--chunk_size",type=int,default=32*1024*1024,help="The size of each chunk in bytes (default: 32 MiB). The remote\ service has a minimum of 5 MiB and a maximum of 5 GiB",)argparse.add_argument("--workers",type=int,default=8,help="The number of worker processes to use (default: 8).",)args=argparse.parse_args()upload_chunks_concurrently(args.bucket_name,args.source_filename,args.destination_blob_name,args.chunk_size,args.workers,)
[[["容易理解","easyToUnderstand","thumb-up"],["確實解決了我的問題","solvedMyProblem","thumb-up"],["其他","otherUp","thumb-up"]],[["難以理解","hardToUnderstand","thumb-down"],["資訊或程式碼範例有誤","incorrectInformationOrSampleCode","thumb-down"],["缺少我需要的資訊/範例","missingTheInformationSamplesINeed","thumb-down"],["翻譯問題","translationIssue","thumb-down"],["其他","otherDown","thumb-down"]],["上次更新時間:2025-09-05 (世界標準時間)。"],[],[],null,["# XML API multipart uploads\n\nThis page discusses XML API multipart uploads in Cloud Storage. This upload\nmethod uploads files in parts and then assembles them into a single object using\na final request. XML API multipart uploads are compatible with Amazon S3\nmultipart uploads.\n| **Note:** Within the [JSON API](/storage/docs/json_api), there is an unrelated [type of single-request upload](/storage/docs/uploads-downloads#uploads) also called a \"multipart upload\".\n\nOverview\n--------\n\nAn *XML API multipart upload* lets you upload data in multiple parts and\nthen assemble them into a final object. This behavior has several advantages,\nparticularly for large files:\n\n- You can upload parts simultaneously, reducing the time it takes to upload the\n data in its entirety.\n\n- If one of the upload operations fails, you only have to re-upload a portion\n of the overall object, instead of restarting from the beginning.\n\n- Since the total file size is not specified in advance, you can use XML API\n multipart uploads for [streaming uploads](/storage/docs/streaming-uploads) or for compressing data\n on-the-fly while uploading.\n\nAn XML API multipart upload has three required steps:\n\n1. [Initiate the upload](/storage/docs/xml-api/post-object-multipart) using a `POST` request, which includes specifying\n any metadata that the completed object should have. The response returns an\n [`UploadId`](/storage/docs/xml-api/reference-headers#uploadid-multipart) that you use in all subsequent requests associated with\n the upload.\n\n2. [Upload the data](/storage/docs/xml-api/put-object-multipart) using one or more `PUT` requests.\n\n3. [Complete the upload](/storage/docs/xml-api/post-object-complete) using a `POST` request. This request overwrites\n any existing object in the bucket with the same name.\n\nThere is no limit to how long a multipart upload and its uploaded parts can\nremain unfinished or idle in a bucket.\n\n- Successfully uploaded parts count toward your [monthly storage usage](/storage/pricing#storage-pricing).\n- You can avoid a buildup of abandoned multipart uploads by using [Object Lifecycle Management](/storage/docs/lifecycle#abort-mpu) to automatically remove multipart uploads when they reach a specified age.\n\nConsiderations\n--------------\n\nThe following limitations apply to using XML API multipart uploads:\n\n- There are [limits](/storage/quotas#requests) to the minimum size a part can be, the maximum size a part can be, and the number of parts used to assemble the completed upload.\n- [Preconditions](/storage/docs/request-preconditions) are not supported in the requests.\n- [MD5 hashes](/storage/docs/metadata#md5) don't exist for objects uploaded using this method.\n- This upload method is not supported in the Google Cloud console or the Google Cloud CLI.\n\nKeep in mind the following when working with XML API multipart uploads:\n\n- XML API multipart uploads have [specific IAM permissions](/storage/docs/access-control/iam-permissions#multipart-uploads).\n If you use [custom IAM roles](/iam/docs/creating-custom-roles), you should ensure those\n roles have the permissions you need.\n\n- While you can initiate an upload and upload parts, the request to\n complete the upload fails if it would overwrite an object that has\n a [hold](/storage/docs/object-holds) on it or an unfulfilled [retention period](/storage/docs/bucket-lock).\n\n- You can [list ongoing uploads](/storage/docs/xml-api/get-bucket-uploads) in a bucket, but only a completed upload\n appears in the normal list of objects in the bucket.\n\n- An uploaded part can be subject to [early deletion charges](/storage/pricing#early-delete) if it is\n never used.\n\nHow client libraries use XML API multipart uploads\n--------------------------------------------------\n\nThis section provides information about performing XML API multipart uploads\nwith client libraries that support it. \n\n### Client libraries\n\n\n### Java\n\n\nFor more information, see the\n[Cloud Storage Java API\nreference documentation](https://cloud.google.com/java/docs/reference/google-cloud-storage/latest/overview).\n\n\nTo authenticate to Cloud Storage, set up Application Default Credentials.\nFor more information, see\n\n[Set up authentication for client libraries](/storage/docs/authentication#client-libs).\n\nThe Java client library does not support XML API multipart uploads. Instead, use\n[parallel composite uploads](/storage/docs/parallel-composite-uploads).\n\n### Node.js\n\n\nFor more information, see the\n[Cloud Storage Node.js API\nreference documentation](https://cloud.google.com/nodejs/docs/reference/storage/latest).\n\n\nTo authenticate to Cloud Storage, set up Application Default Credentials.\nFor more information, see\n\n[Set up authentication for client libraries](/storage/docs/authentication#client-libs).\n\nYou can perform XML API multipart uploads using the\n[`uploadFileInChunks`](https://googleapis.dev/nodejs/storage/latest/TransferManager.html#uploadFileInChunks)\nmethod. For example: \n\n /**\n * TODO(developer): Uncomment the following lines before running the sample.\n */\n // The ID of your GCS bucket\n // const bucketName = 'your-unique-bucket-name';\n\n // The path of file to upload\n // const filePath = 'path/to/your/file';\n\n // The size of each chunk to be uploaded\n // const chunkSize = 32 * 1024 * 1024;\n\n // Imports the Google Cloud client library\n const {Storage, TransferManager} = require('https://cloud.google.com/nodejs/docs/reference/storage/latest/overview.html');\n\n // Creates a client\n const storage = new Storage();\n\n // Creates a transfer manager client\n const transferManager = new https://cloud.google.com/nodejs/docs/reference/storage/latest/storage/transfermanager.html(storage.bucket(bucketName));\n\n async function uploadFileInChunksWithTransferManager() {\n // Uploads the files\n await transferManager.https://cloud.google.com/nodejs/docs/reference/storage/latest/storage/transfermanager.html(filePath, {\n chunkSizeBytes: chunkSize,\n });\n\n console.log(`${filePath} uploaded to ${bucketName}.`);\n }\n\n uploadFileInChunksWithTransferManager().catch(console.error);\n\n### Python\n\n\nFor more information, see the\n[Cloud Storage Python API\nreference documentation](https://cloud.google.com/python/docs/reference/storage/latest).\n\n\nTo authenticate to Cloud Storage, set up Application Default Credentials.\nFor more information, see\n\n[Set up authentication for client libraries](/storage/docs/authentication#client-libs).\n\nYou can perform XML API multipart uploads using the\n[`upload_chunks_concurrently`](/python/docs/reference/storage/latest/google.cloud.storage.transfer_manager#google_cloud_storage_transfer_manager_upload_chunks_concurrently)\nmethod. For example: \n\n def upload_chunks_concurrently(\n bucket_name,\n source_filename,\n destination_blob_name,\n chunk_size=32 * 1024 * 1024,\n workers=8,\n ):\n \"\"\"Upload a single file, in chunks, concurrently in a process pool.\"\"\"\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n\n # The path to your file to upload\n # source_filename = \"local/path/to/file\"\n\n # The ID of your GCS object\n # destination_blob_name = \"storage-object-name\"\n\n # The size of each chunk. The performance impact of this value depends on\n # the use case. The remote service has a minimum of 5 MiB and a maximum of\n # 5 GiB.\n # chunk_size = 32 * 1024 * 1024 (32 MiB)\n\n # The maximum number of processes to use for the operation. The performance\n # impact of this value depends on the use case. Each additional process\n # occupies some CPU and memory resources until finished. Threads can be used\n # instead of processes by passing `worker_type=transfer_manager.THREAD`.\n # workers=8\n\n from google.cloud.storage import https://cloud.google.com/python/docs/reference/storage/latest/google.cloud.storage.client.Client.html, https://cloud.google.com/python/docs/reference/storage/latest/google.cloud.storage.transfer_manager.html\n\n storage_client = Client()\n bucket = storage_client.https://cloud.google.com/python/docs/reference/storage/latest/google.cloud.storage.client.Client.html#google_cloud_storage_client_Client_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n https://cloud.google.com/python/docs/reference/storage/latest/google.cloud.storage.transfer_manager.html.https://cloud.google.com/python/docs/reference/storage/latest/google.cloud.storage.transfer_manager.html(\n source_filename, blob, chunk_size=chunk_size, max_workers=workers\n )\n\n print(f\"File {source_filename} uploaded to {destination_blob_name}.\")\n\n\u003cbr /\u003e\n\nWhat's next\n-----------\n\n- Explore additional [uploading methods](/storage/docs/uploads-downloads#uploads) for Cloud Storage.\n- Learn about [truncated exponential backoff](/storage/docs/retry-strategy) and when to retry requests."]]