tessl install tessl/golang-cloud-google-com--go--bigquery@1.72.0Google Cloud BigQuery client library providing comprehensive Go APIs for querying, loading data, managing datasets and tables, streaming inserts, and accessing BigQuery's ecosystem of services including Storage, Analytics Hub, Data Transfer, and Migration APIs
This document covers creating, reading, updating, and deleting BigQuery tables, along with schema management, table configuration, and advanced table features.
Tables are the primary data storage objects in BigQuery. They contain structured data organized in rows and columns with a defined schema. BigQuery supports regular tables, partitioned tables, clustered tables, views, materialized views, external tables, and snapshots.
func (d *Dataset) Table(tableID string) *TableGet a table reference from a dataset:
dataset := client.Dataset("my_dataset")
table := dataset.Table("my_table")type Table struct {
ProjectID string
DatasetID string
TableID string
}The Table type represents a reference to a BigQuery table. The table may or may not exist yet.
func (t *Table) Identifier(f IdentifierFormat) (string, error)Get the table identifier in various formats:
// Standard SQL format: `project.dataset.table`
sqlID, err := table.Identifier(bigquery.StandardSQLID)
// Legacy SQL format: [project:dataset.table]
legacyID, err := table.Identifier(bigquery.LegacySQLID)
// Storage API format: projects/project/datasets/dataset/tables/table
storageID, err := table.Identifier(bigquery.StorageAPIResourceID)func (t *Table) FullyQualifiedName() stringGet the fully qualified name in standard SQL format:
name := table.FullyQualifiedName()
// Returns: `project.dataset.table`func (t *Table) Create(ctx context.Context, tm *TableMetadata) errorCreate an empty table:
if err := table.Create(ctx, nil); err != nil {
// Handle error
}Create a table with schema:
schema := bigquery.Schema{
{Name: "name", Type: bigquery.StringFieldType, Required: true},
{Name: "age", Type: bigquery.IntegerFieldType},
{Name: "created_at", Type: bigquery.TimestampFieldType},
}
meta := &bigquery.TableMetadata{
Schema: schema,
}
if err := table.Create(ctx, meta); err != nil {
// Handle error
}type TableMetadata struct {
// Settable fields
Name string
Description string
Schema Schema
ViewQuery string
MaterializedView *MaterializedViewDefinition
UseLegacySQL bool
UseStandardSQL bool
Labels map[string]string
ExpirationTime time.Time
TimePartitioning *TimePartitioning
RangePartitioning *RangePartitioning
Clustering *Clustering
RequirePartitionFilter bool
EncryptionConfig *EncryptionConfig
ExternalDataConfig *ExternalDataConfig
DefaultCollation string
MaxStaleness *IntervalValue
CloneDefinition *CloneDefinition
TableConstraints *TableConstraints
ResourceTags map[string]string
BigLakeConfiguration *BigLakeConfiguration
// Read-only fields
Location string
FullID string
Type TableType
CreationTime time.Time
LastModifiedTime time.Time
NumBytes int64
NumLongTermBytes int64
NumRows uint64
StreamingBuffer *StreamingBuffer
SnapshotDefinition *SnapshotDefinition
ETag string
}func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error)meta, err := table.Metadata(ctx)
if err != nil {
// Handle error (e.g., table doesn't exist)
}
fmt.Printf("Table: %s\n", meta.Name)
fmt.Printf("Schema: %v\n", meta.Schema)
fmt.Printf("Rows: %d\n", meta.NumRows)
fmt.Printf("Size: %d bytes\n", meta.NumBytes)
fmt.Printf("Created: %s\n", meta.CreationTime)type TableMetadataOption func(*tableGetCall)type TableMetadataView stringconst (
BasicMetadataView TableMetadataView = "BASIC"
StorageStatsMetadataView TableMetadataView = "STORAGE_STATS"
FullMetadataView TableMetadataView = "FULL"
)func WithMetadataView(tmv TableMetadataView) TableMetadataOptionControl the level of detail returned when fetching table metadata. This is useful for improving performance when working with large tables:
// Basic view - returns schema and basic info but not storage statistics
// More efficient for large tables
meta, err := table.Metadata(ctx, bigquery.WithMetadataView(bigquery.BasicMetadataView))
if err != nil {
// Handle error
}
// Storage stats view - includes storage statistics
meta, err = table.Metadata(ctx, bigquery.WithMetadataView(bigquery.StorageStatsMetadataView))
// Full view - returns all available information (default behavior)
meta, err = table.Metadata(ctx, bigquery.WithMetadataView(bigquery.FullMetadataView))Metadata views:
BasicMetadataView - Returns schema, partitioning, and clustering information but not storage statistics (NumRows, NumBytes). Most efficient for large tables.StorageStatsMetadataView - Includes all basic information plus storage statisticsFullMetadataView - Returns all table information (equivalent to no view specified)type Schema []*FieldSchematype FieldSchema struct {
Name string
Description string
Repeated bool
Required bool
Type FieldType
PolicyTags *PolicyTagList
Schema Schema
MaxLength int64
Precision int64
Scale int64
DefaultValueExpression string
Collation string
RangeElementType *RangeElementType
RoundingMode RoundingMode
}type FieldType string
const (
StringFieldType FieldType = "STRING"
BytesFieldType FieldType = "BYTES"
IntegerFieldType FieldType = "INTEGER"
FloatFieldType FieldType = "FLOAT"
BooleanFieldType FieldType = "BOOLEAN"
TimestampFieldType FieldType = "TIMESTAMP"
RecordFieldType FieldType = "RECORD"
DateFieldType FieldType = "DATE"
TimeFieldType FieldType = "TIME"
DateTimeFieldType FieldType = "DATETIME"
NumericFieldType FieldType = "NUMERIC"
GeographyFieldType FieldType = "GEOGRAPHY"
BigNumericFieldType FieldType = "BIGNUMERIC"
IntervalFieldType FieldType = "INTERVAL"
JSONFieldType FieldType = "JSON"
RangeFieldType FieldType = "RANGE"
)schema := bigquery.Schema{
{Name: "id", Type: bigquery.IntegerFieldType, Required: true},
{Name: "name", Type: bigquery.StringFieldType, Required: true},
{Name: "email", Type: bigquery.StringFieldType},
{Name: "created_at", Type: bigquery.TimestampFieldType},
{Name: "metadata", Type: bigquery.JSONFieldType},
{Name: "location", Type: bigquery.GeographyFieldType},
}schema := bigquery.Schema{
{Name: "user_id", Type: bigquery.IntegerFieldType},
{
Name: "address",
Type: bigquery.RecordFieldType,
Required: false,
Schema: bigquery.Schema{
{Name: "street", Type: bigquery.StringFieldType},
{Name: "city", Type: bigquery.StringFieldType},
{Name: "zipcode", Type: bigquery.StringFieldType},
},
},
{
Name: "tags",
Type: bigquery.StringFieldType,
Repeated: true,
},
}func InferSchema(st interface{}) (Schema, error)Infer schema from a Go struct:
type User struct {
ID int `bigquery:"id"`
Name string `bigquery:"name"`
Email string `bigquery:"email"`
CreatedAt time.Time `bigquery:"created_at"`
Active bool `bigquery:"active"`
}
schema, err := bigquery.InferSchema(User{})
if err != nil {
return err
}Struct tags:
bigquery:"field_name" - Set field namebigquery:"-" - Ignore fieldbigquery:",nullable" - Mark field as nullabletype Product struct {
ID int `bigquery:"id"`
Name string `bigquery:"name"`
Price float64 `bigquery:"price"`
Discontinued bigquery.NullBool `bigquery:"discontinued"`
Internal string `bigquery:"-"` // Ignored
Description []byte `bigquery:"description,nullable"`
}func SchemaFromJSON(schemaJSON []byte) (Schema, error)Create a schema from a JSON representation. This is useful when working with schema definitions from external tools or the BQ CLI:
// JSON schema definition (TableFieldSchema array format)
schemaJSON := []byte(`[
{
"name": "name",
"type": "STRING",
"mode": "REQUIRED"
},
{
"name": "age",
"type": "INTEGER",
"mode": "NULLABLE"
},
{
"name": "addresses",
"type": "RECORD",
"mode": "REPEATED",
"fields": [
{
"name": "street",
"type": "STRING"
},
{
"name": "city",
"type": "STRING"
}
]
}
]`)
schema, err := bigquery.SchemaFromJSON(schemaJSON)
if err != nil {
return err
}
// Use the schema to create a table
meta := &bigquery.TableMetadata{
Schema: schema,
}
err = table.Create(ctx, meta)The JSON format follows the BigQuery REST API TableFieldSchema specification. Type aliases like "STRUCT" for "RECORD" are automatically resolved.
schema := bigquery.Schema{
{
Name: "email",
Type: bigquery.StringFieldType,
MaxLength: 255,
},
{
Name: "price",
Type: bigquery.NumericFieldType,
Precision: 10,
Scale: 2,
},
{
Name: "score",
Type: bigquery.BigNumericFieldType,
Precision: 38,
Scale: 9,
},
}schema := bigquery.Schema{
{
Name: "created_at",
Type: bigquery.TimestampFieldType,
DefaultValueExpression: "CURRENT_TIMESTAMP()",
},
{
Name: "status",
Type: bigquery.StringFieldType,
DefaultValueExpression: "'active'",
},
{
Name: "uuid",
Type: bigquery.StringFieldType,
DefaultValueExpression: "GENERATE_UUID()",
},
}type PolicyTagList struct {
Names []string
}schema := bigquery.Schema{
{
Name: "ssn",
Type: bigquery.StringFieldType,
PolicyTags: &bigquery.PolicyTagList{
Names: []string{
"projects/my-project/locations/us/taxonomies/12345/policyTags/67890",
},
},
},
}type RangeElementType struct {
Type FieldType
}schema := bigquery.Schema{
{
Name: "valid_period",
Type: bigquery.RangeFieldType,
RangeElementType: &bigquery.RangeElementType{
Type: bigquery.DateFieldType,
},
},
}type RoundingMode string
const (
RoundHalfAwayFromZero RoundingMode = "ROUND_HALF_AWAY_FROM_ZERO"
RoundHalfEven RoundingMode = "ROUND_HALF_EVEN"
)schema := bigquery.Schema{
{
Name: "amount",
Type: bigquery.NumericFieldType,
RoundingMode: bigquery.RoundHalfEven,
},
}type TableMetadataToUpdate struct {
Name optional.String
Description optional.String
Schema Schema
ViewQuery optional.String
MaterializedView *MaterializedViewDefinition
UseLegacySQL optional.Bool
ExpirationTime optional.Time
TimePartitioning *TimePartitioning
RequirePartitionFilter optional.Bool
DefaultCollation optional.String
MaxStaleness *IntervalValue
TableConstraints *TableConstraints
ResourceTags map[string]string
}func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error)Update table metadata:
import "cloud.google.com/go/internal/optional"
update := bigquery.TableMetadataToUpdate{
Description: optional.ToString("Updated table description"),
ExpirationTime: optional.ToTime(time.Now().Add(30 * 24 * time.Hour)),
}
// Blind write
meta, err := table.Update(ctx, update, "")
// With concurrency control
meta, err := table.Metadata(ctx)
if err != nil {
return err
}
update := bigquery.TableMetadataToUpdate{
Description: optional.ToString("Updated"),
}
meta, err = table.Update(ctx, update, meta.ETag)type TableUpdateOption func(*tablePatchCall)func WithAutoDetectSchema(b bool) TableUpdateOptionControl schema auto-detection when updating external tables:
// Update external table configuration with schema auto-detection
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.CSV,
SourceURIs: []string{"gs://mybucket/data/*.csv"},
AutoDetect: true,
}
update := bigquery.TableMetadataToUpdate{
ExternalDataConfig: externalConfig,
}
// Enable auto-detection as part of the update operation
meta, err := table.Update(ctx, update, "",
bigquery.WithAutoDetectSchema(true))
if err != nil {
// Handle error
}
// The schema will be automatically inferred from the external data source
fmt.Printf("Detected schema: %v\n", meta.Schema)The WithAutoDetectSchema option is particularly useful when updating external tables where the data structure may have changed, allowing BigQuery to automatically update the table schema based on the current external data.
Add new fields to the schema:
meta, err := table.Metadata(ctx)
if err != nil {
return err
}
// Add new field
newSchema := append(meta.Schema, &bigquery.FieldSchema{
Name: "new_field",
Type: bigquery.StringFieldType,
})
update := bigquery.TableMetadataToUpdate{
Schema: newSchema,
}
meta, err = table.Update(ctx, update, meta.ETag)Note: You can add fields, relax mode from REQUIRED to NULLABLE, or change type from NUMERIC to BIGNUMERIC or from INTEGER to NUMERIC. Other changes may require recreating the table.
func (u *TableMetadataToUpdate) SetLabel(name, value string)
func (u *TableMetadataToUpdate) DeleteLabel(name string)update := bigquery.TableMetadataToUpdate{}
update.SetLabel("environment", "staging")
update.SetLabel("version", "2.0")
update.DeleteLabel("deprecated")
meta, err := table.Update(ctx, update, "")func (t *Table) Delete(ctx context.Context) errorDelete a table:
if err := table.Delete(ctx); err != nil {
// Handle error
}func (t *Table) Read(ctx context.Context) *RowIteratorRead all rows from a table:
it := table.Read(ctx)
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
return err
}
fmt.Println(values)
}type User struct {
ID int
Name string
Email string
}
it := table.Read(ctx)
for {
var user User
err := it.Next(&user)
if err == iterator.Done {
break
}
if err != nil {
return err
}
fmt.Printf("User: %+v\n", user)
}func (d *Dataset) Tables(ctx context.Context) *TableIteratortype TableIterator struct{}func (it *TableIterator) Next() (*Table, error)
func (it *TableIterator) PageInfo() *iterator.PageInfoList all tables in a dataset:
dataset := client.Dataset("my_dataset")
it := dataset.Tables(ctx)
for {
table, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
fmt.Printf("Table: %s.%s.%s\n",
table.ProjectID, table.DatasetID, table.TableID)
}type TableType string
const (
RegularTable TableType = "TABLE"
ViewTable TableType = "VIEW"
ExternalTable TableType = "EXTERNAL"
MaterializedView TableType = "MATERIALIZED_VIEW"
SnapshotTable TableType = "SNAPSHOT"
)Check table type:
meta, err := table.Metadata(ctx)
if err != nil {
return err
}
switch meta.Type {
case bigquery.RegularTable:
fmt.Println("Regular table")
case bigquery.ViewTable:
fmt.Println("View")
case bigquery.MaterializedView:
fmt.Println("Materialized view")
case bigquery.ExternalTable:
fmt.Println("External table")
case bigquery.SnapshotTable:
fmt.Println("Snapshot")
}view := dataset.Table("my_view")
meta := &bigquery.TableMetadata{
ViewQuery: `
SELECT
name,
COUNT(*) as count
FROM ` + "`project.dataset.source_table`" + `
GROUP BY name
`,
UseLegacySQL: false,
}
if err := view.Create(ctx, meta); err != nil {
return err
}update := bigquery.TableMetadataToUpdate{
ViewQuery: optional.ToString(`
SELECT name, email
FROM ` + "`project.dataset.users`" + `
WHERE active = TRUE
`),
}
meta, err := view.Update(ctx, update, "")type MaterializedViewDefinition struct {
Query string
EnableRefresh bool
RefreshInterval time.Duration
LastRefreshTime time.Time
AllowNonIncrementalDefinition bool
MaxStaleness *IntervalValue
}mv := dataset.Table("my_materialized_view")
meta := &bigquery.TableMetadata{
MaterializedView: &bigquery.MaterializedViewDefinition{
Query: `
SELECT
date,
product_id,
SUM(revenue) as total_revenue
FROM ` + "`project.dataset.sales`" + `
GROUP BY date, product_id
`,
EnableRefresh: true,
RefreshInterval: 1 * time.Hour,
},
}
if err := mv.Create(ctx, meta); err != nil {
return err
}Set max staleness for materialized views:
maxStaleness := &bigquery.IntervalValue{Months: 0, Days: 0, Nanos: 3600000000000} // 1 hour
update := bigquery.TableMetadataToUpdate{
MaxStaleness: maxStaleness,
}
meta, err := mv.Update(ctx, update, "")type DataFormat stringconst (
CSV DataFormat = "CSV"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
Avro DataFormat = "AVRO"
Parquet DataFormat = "PARQUET"
ORC DataFormat = "ORC"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
GoogleSheets DataFormat = "GOOGLE_SHEETS"
Bigtable DataFormat = "BIGTABLE"
Iceberg DataFormat = "ICEBERG"
TFSavedModel DataFormat = "ML_TF_SAVED_MODEL"
XGBoostBooster DataFormat = "ML_XGBOOST_BOOSTER"
)Data format types for external data sources and file operations:
CSV - Comma-separated values formatJSON - Newline-delimited JSON formatAvro - Apache Avro formatParquet - Apache Parquet formatORC - Apache ORC formatDatastoreBackup - Cloud Datastore backup formatGoogleSheets - Google Sheets formatBigtable - Cloud Bigtable formatIceberg - Apache Iceberg formatTFSavedModel - TensorFlow Saved Model format (for ML models)XGBoostBooster - XGBoost Booster format (for ML models)type Compression stringconst (
None Compression = "NONE"
Gzip Compression = "GZIP"
Snappy Compression = "SNAPPY"
)Compression types for external data sources:
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.CSV,
SourceURIs: []string{"gs://mybucket/data/*.csv.gz"},
Compression: bigquery.Gzip,
}type Encoding stringconst (
UTF_8 Encoding = "UTF-8"
ISO_8859_1 Encoding = "ISO-8859-1"
)Character encoding for CSV files:
csvOptions := &bigquery.CSVOptions{
Encoding: bigquery.UTF_8,
FieldDelimiter: ",",
}type ExternalDataConfig struct {
SourceFormat DataFormat
SourceURIs []string
Schema Schema
AutoDetect bool
Compression Compression
IgnoreUnknownValues bool
MaxBadRecords int64
Options ExternalDataConfigOptions
HivePartitioningOptions *HivePartitioningOptions
DecimalTargetTypes []DecimalTargetType
ConnectionID string
ReferenceFileSchemaURI string
MetadataCacheMode MetadataCacheMode
TimeZone string
DateFormat string
DatetimeFormat string
TimeFormat string
TimestampFormat string
}type DecimalTargetType stringconst (
NumericTargetType DecimalTargetType = "NUMERIC"
BigNumericTargetType DecimalTargetType = "BIGNUMERIC"
StringTargetType DecimalTargetType = "STRING"
)Decimal target types allow selection of how decimal values are converted when processing external data. Types are evaluated in order of NUMERIC, BIGNUMERIC, and STRING, selecting the first type that supports the necessary precision and scale:
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.Parquet,
SourceURIs: []string{"gs://mybucket/data.parquet"},
DecimalTargetTypes: []bigquery.DecimalTargetType{
bigquery.NumericTargetType,
bigquery.BigNumericTargetType,
bigquery.StringTargetType,
},
}type MetadataCacheMode stringconst (
Automatic MetadataCacheMode = "AUTOMATIC"
Manual MetadataCacheMode = "MANUAL"
)Metadata cache mode controls caching behavior for external tables:
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.Parquet,
SourceURIs: []string{"gs://mybucket/data/*.parquet"},
MetadataCacheMode: bigquery.Automatic,
}Automatic - Triggers automatic background refresh of metadata cache from the external sourceManual - Requires manual refresh of the metadata cachetype SourceColumnMatch stringconst (
SourceColumnMatchUnspecified SourceColumnMatch = "SOURCE_COLUMN_MATCH_UNSPECIFIED"
SourceColumnMatchPosition SourceColumnMatch = "POSITION"
SourceColumnMatchName SourceColumnMatch = "NAME"
)Source column match controls how loaded columns are matched to the schema:
csvOptions := &bigquery.CSVOptions{
SkipLeadingRows: 1,
SourceColumnMatch: bigquery.SourceColumnMatchName, // Match by header names
}SourceColumnMatchUnspecified - Uses sensible defaults (by name if autodetect, by position otherwise)SourceColumnMatchPosition - Matches columns by position (assumes same order as schema)SourceColumnMatchName - Matches columns by name from header rowtype ColumnNameCharacterMap stringconst (
UnspecifiedColumnNameCharacterMap ColumnNameCharacterMap = "COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED"
StrictColumnNameCharacterMap ColumnNameCharacterMap = "STRICT"
V1ColumnNameCharacterMap ColumnNameCharacterMap = "V1"
V2ColumnNameCharacterMap ColumnNameCharacterMap = "V2"
)Column name character map specifies column naming behavior for load jobs:
UnspecifiedColumnNameCharacterMap - Default valueStrictColumnNameCharacterMap - Flexible column names, invalid names rejectedV1ColumnNameCharacterMap - Alphanumeric + underscore, must start with letter/underscore, invalid names normalizedV2ColumnNameCharacterMap - Flexible column names, invalid names normalizedexternalTable := dataset.Table("external_table")
meta := &bigquery.TableMetadata{
ExternalDataConfig: &bigquery.ExternalDataConfig{
SourceFormat: bigquery.Parquet,
SourceURIs: []string{
"gs://my-bucket/data/*.parquet",
},
AutoDetect: true,
},
}
if err := externalTable.Create(ctx, meta); err != nil {
return err
}meta := &bigquery.TableMetadata{
ExternalDataConfig: &bigquery.ExternalDataConfig{
SourceFormat: bigquery.CSV,
SourceURIs: []string{
"gs://my-bucket/data/*.csv",
},
Options: &bigquery.CSVOptions{
SkipLeadingRows: 1,
FieldDelimiter: ",",
AllowQuotedNewlines: true,
},
Schema: schema,
},
}type HivePartitioningOptions struct {
Mode HivePartitioningMode
SourceURIPrefix string
RequirePartitionFilter bool
}type HivePartitioningMode string
const (
AutoHivePartitioningMode HivePartitioningMode = "AUTO"
StringHivePartitioningMode HivePartitioningMode = "STRINGS"
CustomHivePartitioningMode HivePartitioningMode = "CUSTOM"
)meta := &bigquery.TableMetadata{
ExternalDataConfig: &bigquery.ExternalDataConfig{
SourceFormat: bigquery.Parquet,
SourceURIs: []string{
"gs://my-bucket/data/year=*/month=*/day=*/*.parquet",
},
HivePartitioningOptions: &bigquery.HivePartitioningOptions{
Mode: bigquery.AutoHivePartitioningMode,
SourceURIPrefix: "gs://my-bucket/data/",
RequirePartitionFilter: true,
},
AutoDetect: true,
},
}type ExternalDataConfigOptions interface {
populateExternalDataConfig(*bq.ExternalDataConfiguration)
}The ExternalDataConfigOptions interface is implemented by format-specific option types (CSVOptions, AvroOptions, ParquetOptions, GoogleSheetsOptions, BigtableOptions).
type AvroOptions struct {
UseAvroLogicalTypes bool
}Options for Avro external data sources:
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.Avro,
SourceURIs: []string{"gs://mybucket/data/*.avro"},
Options: &bigquery.AvroOptions{
UseAvroLogicalTypes: true, // Interpret logical types correctly
},
}type ParquetOptions struct {
EnumAsString bool
EnableListInference bool
}Options for Parquet external data sources:
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.Parquet,
SourceURIs: []string{"gs://mybucket/data/*.parquet"},
Options: &bigquery.ParquetOptions{
EnumAsString: true, // Infer ENUM as STRING instead of BYTES
EnableListInference: true, // Use schema inference for LIST logical type
},
}type GoogleSheetsOptions struct {
SkipLeadingRows int64
Range string
}Options for Google Sheets external data sources:
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.GoogleSheets,
SourceURIs: []string{"https://docs.google.com/spreadsheets/d/..."},
Options: &bigquery.GoogleSheetsOptions{
SkipLeadingRows: 1,
Range: "sheet1!A1:D100", // Optional range specification
},
}type BigtableOptions struct {
ColumnFamilies []*BigtableColumnFamily
IgnoreUnspecifiedColumnFamilies bool
ReadRowkeyAsString bool
}type BigtableColumnFamily struct {
FamilyID string
Columns []*BigtableColumn
Encoding string
OnlyReadLatest bool
Type string
}type BigtableColumn struct {
Qualifier string
FieldName string
OnlyReadLatest bool
Encoding string
Type string
}Options for Bigtable external data sources:
externalConfig := &bigquery.ExternalDataConfig{
SourceFormat: bigquery.Bigtable,
SourceURIs: []string{"https://googleapis.com/bigtable/projects/..."},
Options: &bigquery.BigtableOptions{
ReadRowkeyAsString: true,
ColumnFamilies: []*bigquery.BigtableColumnFamily{
{
FamilyID: "cf1",
Type: "STRING",
Encoding: "TEXT",
OnlyReadLatest: true,
Columns: []*bigquery.BigtableColumn{
{
Qualifier: "col1",
FieldName: "column1",
Type: "STRING",
},
},
},
},
},
}type TableConstraints struct {
PrimaryKey *PrimaryKey
ForeignKeys []*ForeignKey
}type PrimaryKey struct {
Columns []string
}type ForeignKey struct {
Name string
ReferencedTable *Table
ColumnReferences []*ColumnReference
}type ColumnReference struct {
ReferencingColumn string
ReferencedColumn string
}referencedTable := dataset.Table("users")
meta := &bigquery.TableMetadata{
Schema: schema,
TableConstraints: &bigquery.TableConstraints{
PrimaryKey: &bigquery.PrimaryKey{
Columns: []string{"id"},
},
ForeignKeys: []*bigquery.ForeignKey{
{
Name: "fk_user_id",
ReferencedTable: referencedTable,
ColumnReferences: []*bigquery.ColumnReference{
{
ReferencingColumn: "user_id",
ReferencedColumn: "id",
},
},
},
},
},
}type CloneDefinition struct {
BaseTableReference *Table
CloneTime time.Time
}Create a clone:
sourceTable := dataset.Table("source_table")
clone := dataset.Table("cloned_table")
meta := &bigquery.TableMetadata{
CloneDefinition: &bigquery.CloneDefinition{
BaseTableReference: sourceTable,
},
}
if err := clone.Create(ctx, meta); err != nil {
return err
}type SnapshotDefinition struct {
BaseTableReference *Table
SnapshotTime time.Time
}Snapshots are created using the SNAPSHOT operation with CopyConfig (see data-export.md).
type StreamingBuffer struct {
EstimatedBytes uint64
EstimatedRows uint64
OldestEntryTime time.Time
}Check streaming buffer status:
meta, err := table.Metadata(ctx)
if err != nil {
return err
}
if meta.StreamingBuffer != nil {
fmt.Printf("Streaming buffer: %d bytes, %d rows\n",
meta.StreamingBuffer.EstimatedBytes,
meta.StreamingBuffer.EstimatedRows)
}package main
import (
"context"
"fmt"
"log"
"time"
"cloud.google.com/go/bigquery"
"cloud.google.com/go/internal/optional"
"google.golang.org/api/iterator"
)
type User struct {
ID int `bigquery:"id"`
Name string `bigquery:"name"`
Email string `bigquery:"email"`
CreatedAt time.Time `bigquery:"created_at"`
Active bigquery.NullBool `bigquery:"active"`
}
func main() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "my-project")
if err != nil {
log.Fatal(err)
}
defer client.Close()
dataset := client.Dataset("my_dataset")
table := dataset.Table("users")
// Infer schema from struct
schema, err := bigquery.InferSchema(User{})
if err != nil {
log.Fatal(err)
}
// Create table with partitioning and clustering
meta := &bigquery.TableMetadata{
Name: "Users Table",
Description: "User information",
Schema: schema,
TimePartitioning: &bigquery.TimePartitioning{
Type: bigquery.DayPartitioningType,
Field: "created_at",
},
Clustering: &bigquery.Clustering{
Fields: []string{"active", "email"},
},
ExpirationTime: time.Now().Add(365 * 24 * time.Hour),
Labels: map[string]string{
"env": "production",
},
}
if err := table.Create(ctx, meta); err != nil {
log.Fatal(err)
}
// Get metadata
meta, err = table.Metadata(ctx)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Created table: %s\n", meta.FullID)
fmt.Printf("Rows: %d\n", meta.NumRows)
// Update table
update := bigquery.TableMetadataToUpdate{
Description: optional.ToString("Updated user information"),
}
update.SetLabel("updated", "true")
meta, err = table.Update(ctx, update, meta.ETag)
if err != nil {
log.Fatal(err)
}
// List all tables
it := dataset.Tables(ctx)
for {
t, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Printf("Table: %s\n", t.TableID)
}
}