or run

tessl search
Log in

Version

Workspace
tessl
Visibility
Public
Created
Last updated
Describes
golangpkg:golang/cloud.google.com/go/spanner@v1.87.0

docs

client.mddml.mdindex.mdkeys.mdlow-level.mdprotobuf-types.mdreads.mdtesting.mdtransactions.mdtypes.mdwrites.md
tile.json

tessl/golang-cloud-google-com--go--spanner

tessl install tessl/golang-cloud-google-com--go--spanner@1.87.2

Official Google Cloud Spanner client library for Go providing comprehensive database operations, transactions, and admin functionality

reads.mddocs/

Read Operations

This document covers all read operations including single reads, batch reads, queries, and read options.

Overview

Spanner supports several types of read operations:

  • Single reads: One-time read operations optimized for low latency
  • ReadOnlyTransaction: Multiple consistent reads at the same timestamp
  • Queries: SQL-based data retrieval
  • Batch reads: Partitioned reads for parallel processing
  • Index reads: Reading using secondary indexes

Single Reads

Use Single() for one-shot read operations:

func (c *Client) Single() *ReadOnlyTransaction

ReadRow - Read Single Row

func (t *ReadOnlyTransaction) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error)

Example:

row, err := client.Single().ReadRow(ctx, "Users",
    spanner.Key{"alice"}, []string{"email", "name"})
if err != nil {
    return err
}

var email, name string
if err := row.Columns(&email, &name); err != nil {
    return err
}

Read - Read Multiple Rows

func (t *ReadOnlyTransaction) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator

Example:

iter := client.Single().Read(ctx, "Users",
    spanner.AllKeys(), []string{"id", "name"})
defer iter.Stop()

for {
    row, err := iter.Next()
    if err == iterator.Done {
        break
    }
    if err != nil {
        return err
    }
    // Process row
}

ReadUsingIndex - Read with Secondary Index

func (t *ReadOnlyTransaction) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator

Example:

iter := client.Single().ReadUsingIndex(ctx, "Users", "UsersByEmail",
    spanner.Key{"alice@example.com"}, []string{"id", "name"})

Read Options

ReadWithOptions

func (t *ReadOnlyTransaction) ReadWithOptions(ctx context.Context, table string, keys KeySet, columns []string, opts *ReadOptions) *RowIterator

type ReadOptions struct {
    Index                string
    Limit                int
    Priority             sppb.RequestOptions_Priority
    RequestTag           string
    DataBoostEnabled     bool
    DirectedReadOptions  *sppb.DirectedReadOptions
    OrderBy              sppb.ReadRequest_OrderBy
    LockHint             sppb.ReadRequest_LockHint
}

Example:

opts := &spanner.ReadOptions{
    Limit:      100,
    Priority:   sppb.RequestOptions_PRIORITY_HIGH,
    RequestTag: "user-query-001",
}

iter := client.Single().ReadWithOptions(ctx, "Users",
    spanner.AllKeys(), []string{"id", "name"}, opts)

SQL Queries

Query - Execute SQL Statement

func (t *ReadOnlyTransaction) Query(ctx context.Context, statement Statement) *RowIterator

type Statement struct {
    SQL    string
    Params map[string]interface{}
}

func NewStatement(sql string) Statement

Example:

stmt := spanner.NewStatement("SELECT id, name FROM Users WHERE age > @minAge")
stmt.Params["minAge"] = 18

iter := client.Single().Query(ctx, stmt)
defer iter.Stop()

for {
    row, err := iter.Next()
    if err == iterator.Done {
        break
    }
    if err != nil {
        return err
    }
    
    var id int64
    var name string
    if err := row.Columns(&id, &name); err != nil {
        return err
    }
}

QueryWithOptions

func (t *ReadOnlyTransaction) QueryWithOptions(ctx context.Context, statement Statement, opts QueryOptions) *RowIterator

type QueryOptions struct {
    Mode                        *sppb.ExecuteSqlRequest_QueryMode
    Options                     *sppb.ExecuteSqlRequest_QueryOptions
    Priority                    sppb.RequestOptions_Priority
    RequestTag                  string
    DataBoostEnabled            bool
    DirectedReadOptions         *sppb.DirectedReadOptions
    ExcludeTxnFromChangeStreams bool
    LastStatement               bool
}

Example:

stmt := spanner.NewStatement("SELECT * FROM Users WHERE region = @region")
stmt.Params["region"] = "us-west"

opts := spanner.QueryOptions{
    Priority:   sppb.RequestOptions_PRIORITY_MEDIUM,
    RequestTag: "regional-query",
}

iter := client.Single().QueryWithOptions(ctx, stmt, opts)

QueryWithStats - Query with Execution Statistics

func (t *ReadOnlyTransaction) QueryWithStats(ctx context.Context, statement Statement) *RowIterator

Example:

stmt := spanner.NewStatement("SELECT * FROM Users WHERE active = true")
iter := client.Single().QueryWithStats(ctx, stmt)
defer iter.Stop()

// Process rows
for {
    row, err := iter.Next()
    if err == iterator.Done {
        break
    }
    if err != nil {
        return err
    }
    // Process row
}

// Access statistics after iteration
fmt.Printf("Rows returned: %d\n", iter.RowCount)
fmt.Printf("Query stats: %v\n", iter.QueryStats)

AnalyzeQuery - Get Query Plan

func (t *ReadOnlyTransaction) AnalyzeQuery(ctx context.Context, statement Statement) (*sppb.QueryPlan, error)

Example:

stmt := spanner.NewStatement("SELECT * FROM Users WHERE region = @region")
stmt.Params["region"] = "us-east"

plan, err := client.Single().AnalyzeQuery(ctx, stmt)
if err != nil {
    return err
}

// Inspect query plan
fmt.Printf("Query plan: %v\n", plan)

RowIterator

Iterator for reading multiple rows:

type RowIterator struct {
    QueryPlan  *sppb.QueryPlan               // Available after QueryWithStats completes
    QueryStats map[string]interface{}        // Available after QueryWithStats completes
    RowCount   int64                         // Rows affected (for DML)
    Metadata   *sppb.ResultSetMetadata       // Result metadata
}

func (r *RowIterator) Next() (*Row, error)
func (r *RowIterator) Do(f func(r *Row) error) error
func (r *RowIterator) Stop()

RowIterator.Next() Pattern

iter := client.Single().Read(ctx, "Users", spanner.AllKeys(), []string{"id", "name"})
defer iter.Stop()

for {
    row, err := iter.Next()
    if err == iterator.Done {
        break
    }
    if err != nil {
        return err
    }
    // Process row
}

RowIterator.Do() Pattern

iter := client.Single().Read(ctx, "Users", spanner.AllKeys(), []string{"id", "name"})

err := iter.Do(func(row *spanner.Row) error {
    var id int64
    var name string
    if err := row.Columns(&id, &name); err != nil {
        return err
    }
    fmt.Printf("%d: %s\n", id, name)
    return nil
})
if err != nil {
    return err
}

Row Type

type Row struct {
    // Has unexported fields
}

func NewRow(columnNames []string, columnValues []interface{}) (*Row, error)

Row Methods

func (r *Row) Column(i int, ptr interface{}) error
func (r *Row) ColumnByName(name string, ptr interface{}) error
func (r *Row) Columns(ptrs ...interface{}) error
func (r *Row) ToStruct(p interface{}) error
func (r *Row) ToStructLenient(p interface{}) error
func (r *Row) Size() int
func (r *Row) ColumnNames() []string
func (r *Row) ColumnName(i int) string
func (r *Row) ColumnIndex(name string) (int, error)
func (r *Row) ColumnType(i int) *sppb.Type
func (r *Row) ColumnValue(i int) *proto3.Value

Decoding Row Data

By Position:

var id int64
var name string
if err := row.Column(0, &id); err != nil {
    return err
}
if err := row.Column(1, &name); err != nil {
    return err
}

By Name:

var email string
if err := row.ColumnByName("email", &email); err != nil {
    return err
}

All Columns:

var id int64
var name, email string
if err := row.Columns(&id, &name, &email); err != nil {
    return err
}

To Struct:

type User struct {
    ID    int64  `spanner:"id"`
    Name  string `spanner:"name"`
    Email string `spanner:"email"`
}

var user User
if err := row.ToStruct(&user); err != nil {
    return err
}

ReadOnlyTransaction

For multiple consistent reads at the same timestamp:

func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction

type ReadOnlyTransaction struct {
    // Has unexported fields
}

Read-Only Transaction Methods

All read methods from Single() plus:

func (t *ReadOnlyTransaction) Close()
func (t *ReadOnlyTransaction) Timestamp() (time.Time, error)
func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction
func (t *ReadOnlyTransaction) WithBeginTransactionOption(option BeginTransactionOption) *ReadOnlyTransaction

Example:

txn := client.ReadOnlyTransaction()
defer txn.Close()

// Multiple reads at same timestamp
row1, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"balance"})
if err != nil {
    return err
}

row2, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"123"}, []string{"total"})
if err != nil {
    return err
}

// Get read timestamp
ts, err := txn.Timestamp()
if err != nil {
    return err
}
fmt.Printf("Read at timestamp: %v\n", ts)

Timestamp Bounds

Control when data is read using timestamp bounds:

type TimestampBound struct {
    // Has unexported fields
}

func StrongRead() TimestampBound
func ReadTimestamp(t time.Time) TimestampBound
func ExactStaleness(d time.Duration) TimestampBound
func MinReadTimestamp(t time.Time) TimestampBound
func MaxStaleness(d time.Duration) TimestampBound

Strong Read (Default)

Read latest committed data:

row, err := client.Single().ReadRow(ctx, "Users", key, columns)
// Equivalent to:
row, err := client.Single().WithTimestampBound(spanner.StrongRead()).ReadRow(ctx, "Users", key, columns)

Exact Staleness

Read data at a specific staleness:

// Read data as it was 15 seconds ago
txn := client.Single().WithTimestampBound(spanner.ExactStaleness(15 * time.Second))
iter := txn.Query(ctx, stmt)

Read Timestamp

Read at specific timestamp:

ts := time.Now().Add(-1 * time.Hour)
txn := client.Single().WithTimestampBound(spanner.ReadTimestamp(ts))
row, err := txn.ReadRow(ctx, "Users", key, columns)

Max Staleness

Read data at most N seconds stale:

// Read data at most 10 seconds stale
txn := client.Single().WithTimestampBound(spanner.MaxStaleness(10 * time.Second))
iter := txn.Read(ctx, "Users", keys, columns)

Min Read Timestamp

Read at timestamp at least as recent as specified:

minTs := time.Now().Add(-5 * time.Minute)
txn := client.Single().WithTimestampBound(spanner.MinReadTimestamp(minTs))
iter := txn.Query(ctx, stmt)

Batch Read-Only Transactions

For partitioned reads across multiple machines:

func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound) (*BatchReadOnlyTransaction, error)
func (c *Client) BatchReadOnlyTransactionFromID(tid BatchReadOnlyTransactionID) *BatchReadOnlyTransaction

type BatchReadOnlyTransaction struct {
    ReadOnlyTransaction
    ID BatchReadOnlyTransactionID
}

Partition Reads

func (t *BatchReadOnlyTransaction) PartitionRead(ctx context.Context, table string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error)
func (t *BatchReadOnlyTransaction) PartitionReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error)
func (t *BatchReadOnlyTransaction) PartitionReadWithOptions(ctx context.Context, table string, keys KeySet, columns []string, opt PartitionOptions, readOptions ReadOptions) ([]*Partition, error)

type PartitionOptions struct {
    PartitionBytes int64  // Desired data size per partition
    MaxPartitions  int64  // Desired maximum partitions
}

Partition Queries

func (t *BatchReadOnlyTransaction) PartitionQuery(ctx context.Context, statement Statement, opt PartitionOptions) ([]*Partition, error)
func (t *BatchReadOnlyTransaction) PartitionQueryWithOptions(ctx context.Context, statement Statement, opt PartitionOptions, qOpts QueryOptions) ([]*Partition, error)

Execute Partition

func (t *BatchReadOnlyTransaction) Execute(ctx context.Context, p *Partition) *RowIterator

Batch Read Example

// Create batch transaction
txn, err := client.BatchReadOnlyTransaction(ctx, spanner.StrongRead())
if err != nil {
    return err
}
defer txn.Close()

// Create partitions
stmt := spanner.NewStatement("SELECT * FROM Users WHERE active = true")
partitions, err := txn.PartitionQuery(ctx, stmt, spanner.PartitionOptions{
    PartitionBytes: 100000000, // 100MB per partition
})
if err != nil {
    return err
}

// Execute partitions in parallel
var wg sync.WaitGroup
for _, partition := range partitions {
    wg.Add(1)
    go func(p *spanner.Partition) {
        defer wg.Done()
        
        iter := txn.Execute(ctx, p)
        defer iter.Stop()
        
        for {
            row, err := iter.Next()
            if err == iterator.Done {
                break
            }
            if err != nil {
                log.Printf("Error: %v", err)
                return
            }
            // Process row
        }
    }(partition)
}
wg.Wait()

Sharing Batch Transactions

Share batch transaction across processes:

// Process 1: Create and serialize
txn, err := client.BatchReadOnlyTransaction(ctx, spanner.StrongRead())
if err != nil {
    return err
}

txnID := txn.ID
data, err := txnID.MarshalBinary()
if err != nil {
    return err
}
// Share 'data' with other processes

// Process 2: Reconstruct
var txnID spanner.BatchReadOnlyTransactionID
if err := txnID.UnmarshalBinary(data); err != nil {
    return err
}

txn := client.BatchReadOnlyTransactionFromID(txnID)
// Use txn for reads

SelectAll Helper Function

Utility for iterating all rows into a slice:

func SelectAll(rows rowIterator, destination interface{}, options ...DecodeOptions) error

func WithLenient() DecodeOptions

Example:

type User struct {
    ID    int64
    Name  string
    Email string
}

stmt := spanner.NewStatement("SELECT id, name, email FROM Users")
iter := client.Single().Query(ctx, stmt)

var users []*User
if err := spanner.SelectAll(iter, &users); err != nil {
    return err
}

// Or with lenient mode (ignore extra columns)
if err := spanner.SelectAll(iter, &users, spanner.WithLenient()); err != nil {
    return err
}

Best Practices

  1. Use Single() for one-time reads: More efficient than creating transactions
  2. Always call Stop(): On iterators when not using Do()
  3. Use timestamp bounds wisely: Stale reads improve performance for non-critical data
  4. Batch similar reads: Use ReadOnlyTransaction for multiple consistent reads
  5. Use partitioning for large datasets: Enables parallel processing
  6. Set appropriate limits: Prevent memory issues with large result sets
  7. Handle iterator.Done properly: Always check for this specific error
  8. Use ToStruct for complex data: Cleaner than manual column extraction
  9. Add request tags: For tracking and debugging queries
  10. Monitor query stats: Use QueryWithStats to optimize slow queries