List S3 bucket all obj use paginator

  • Create a paginator for the S3 list_objects_v2 operation so you can list all objects in a bucket across multiple pages.
  • paginator=s3.get_paginator("list_objects_v2")
import boto3
import json
 
def get_bucket_size(bucketname, region = "us-west-1"):
    s3 = boto3.client("s3")
    # A paginator automatically fetches "next pages" when results are > 1000 objects
    paginator = s3.get_paginator("list_objects_v2")
    # Each `page` is one API response page (up to 1000 objects)
    for page in paginator.paginate(Bucket=bucketname):
        # print(json.dumps(page, default=str, indent=2))
        content = page.get("Contents", [])
        print(json.dumps(content, default=str, indent=2))
raw page:
{
  "ResponseMetadata": {
    "RequestId": "JB...W",
    "HostId": "C...=",
    "HTTPStatusCode": 200,
    "HTTPHeaders": {
      "x-amz-id-2": "CLNk7...tbM0y61wVU=",
      "x-amz-request-id": "JBRS...W",
      "date": "Tue, 10 Feb 2026 07:39:12 GMT",
      "x-amz-bucket-region": "us-west-1",
      "content-type": "application/xml",
      "transfer-encoding": "chunked",
      "server": "AmazonS3"
    },
    "RetryAttempts": 0
  },
  "IsTruncated": false,
  "Contents": [
    {
      "Key": "test1.txt",
      "LastModified": "2026-02-10 07:26:07+00:00",
      "ETag": "\"c4ca4...849b\"",
      "ChecksumAlgorithm": [
        "CRC64NVME"
      ],
      "ChecksumType": "FULL_OBJECT",
      "Size": 1,
      "StorageClass": "STANDARD"
    },
    {
      "Key": "test2.txt",
      "LastModified": "2026-02-10 07:26:07+00:00",
      "ETag": "\"c4ca...5849b\"",
      "ChecksumAlgorithm": [
        "CRC64NVME"
      ],
      "ChecksumType": "FULL_OBJECT",
      "Size": 1,
      "StorageClass": "STANDARD"
    }
  ],
  "Name": "assign2-test-bucket-alan",
  "Prefix": "",
  "MaxKeys": 1000,
  "EncodingType": "url",
  "KeyCount": 2
}