A Pulumi package for creating and managing Amazon Web Services (AWS) cloud resources with infrastructure-as-code.
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Guide to selecting and using AWS database services with Pulumi.
Structured data with ACID transactions
Flexible schemas, horizontal scaling
Microsecond latency, session stores
Purpose-built for specific workloads
Start: What's your data model?
┌─ Relational (SQL)
│ ├─ Need AWS-optimized? → Yes: Aurora | No: RDS
│ ├─ Analytics/warehousing? → Redshift
│ └─ Specific engine?
│ ├─ PostgreSQL → Aurora PostgreSQL or RDS PostgreSQL
│ ├─ MySQL → Aurora MySQL or RDS MySQL
│ ├─ Oracle → RDS Oracle
│ └─ SQL Server → RDS SQL Server
│
┌─ NoSQL
│ ├─ Key-value? → DynamoDB (serverless, millisecond latency)
│ ├─ Document?
│ │ ├─ MongoDB compatible? → DocumentDB
│ │ └─ Simple key-value → DynamoDB
│ ├─ Wide-column? → Keyspaces (Cassandra)
│ └─ Graph? → Neptune
│
┌─ Caching
│ ├─ DynamoDB acceleration? → DAX
│ ├─ Redis features? → ElastiCache Redis or MemoryDB
│ └─ Simple caching? → ElastiCache Memcached
│
└─ Specialized
├─ Time-series data → Timestream
├─ Search/analytics → OpenSearch
├─ Immutable ledger → QLDB
└─ Graph relationships → NeptuneEngine selection:
Instance classes:
When to use Aurora instead:
Aurora vs RDS:
Aurora Serverless v2:
Access patterns:
Capacity modes:
Best practices:
Redis vs Memcached:
Redis:
Memcached:
When to use MemoryDB instead:
Redshift vs RDS:
Deployment options:
Best practices:
DocumentDB vs DynamoDB:
Query languages:
Use cases:
RDS Multi-AZ with Read Replicas
import * as aws from "@pulumi/aws";
// Subnet group for RDS
const subnetGroup = new aws.rds.SubnetGroup("db-subnet", {
subnetIds: privateSubnetIds,
tags: { Name: "Main DB subnet group" },
});
// Security group
const dbSg = new aws.ec2.SecurityGroup("db-sg", {
vpcId: vpc.id,
ingress: [{
protocol: "tcp",
fromPort: 5432,
toPort: 5432,
securityGroups: [appSg.id],
}],
});
// Primary database instance
const db = new aws.rds.Instance("primary-db", {
engine: "postgres",
engineVersion: "15.4",
instanceClass: "db.t3.medium",
allocatedStorage: 100,
storageType: "gp3",
dbName: "myapp",
username: "admin",
password: dbPassword.result, // From secrets manager
multiAz: true, // High availability
dbSubnetGroupName: subnetGroup.name,
vpcSecurityGroupIds: [dbSg.id],
backupRetentionPeriod: 7,
backupWindow: "03:00-04:00",
maintenanceWindow: "sun:04:00-sun:05:00",
enabledCloudwatchLogsExports: ["postgresql", "upgrade"],
storageEncrypted: true,
skipFinalSnapshot: false,
finalSnapshotIdentifier: "final-snapshot",
});
// Read replica for scaling reads
const readReplica = new aws.rds.Instance("read-replica", {
replicateSourceDb: db.id,
instanceClass: "db.t3.medium",
publiclyAccessible: false,
});
export const dbEndpoint = db.endpoint;
export const replicaEndpoint = readReplica.endpoint;Use when: Traditional web applications, ACID requirements
DynamoDB + Lambda
// DynamoDB table
const table = new aws.dynamodb.Table("users", {
attributes: [
{ name: "userId", type: "S" },
{ name: "email", type: "S" },
{ name: "createdAt", type: "N" },
],
hashKey: "userId",
billingMode: "PAY_PER_REQUEST", // On-demand scaling
globalSecondaryIndexes: [{
name: "EmailIndex",
hashKey: "email",
projectionType: "ALL",
}],
streamEnabled: true,
streamViewType: "NEW_AND_OLD_IMAGES",
pointInTimeRecovery: { enabled: true },
serverSideEncryption: { enabled: true },
tags: { Name: "Users table" },
});
// Lambda function
const handler = new aws.lambda.Function("api", {
runtime: "nodejs20.x",
handler: "index.handler",
role: lambdaRole.arn,
code: new pulumi.asset.FileArchive("./app"),
environment: {
variables: {
TABLE_NAME: table.name,
},
},
});
// DynamoDB stream processor
const streamProcessor = new aws.lambda.Function("stream-processor", {
runtime: "nodejs20.x",
handler: "stream.handler",
role: streamRole.arn,
code: new pulumi.asset.FileArchive("./stream"),
});
const eventSourceMapping = new aws.lambda.EventSourceMapping("dynamodb-stream", {
eventSourceArn: table.streamArn,
functionName: streamProcessor.arn,
startingPosition: "LATEST",
});
export const tableName = table.name;Use when: Serverless applications, variable workloads, high scale
Aurora Serverless v2 + Connection Pooling
// Aurora Serverless v2 cluster
const cluster = new aws.rds.Cluster("aurora-cluster", {
engine: "aurora-postgresql",
engineMode: "provisioned",
engineVersion: "15.4",
databaseName: "myapp",
masterUsername: "admin",
masterPassword: dbPassword.result,
dbSubnetGroupName: subnetGroup.name,
vpcSecurityGroupIds: [dbSg.id],
serverlessv2ScalingConfiguration: {
minCapacity: 0.5, // 0.5 ACU minimum
maxCapacity: 16, // 16 ACU maximum
},
enabledCloudwatchLogsExports: ["postgresql"],
backupRetentionPeriod: 7,
storageEncrypted: true,
});
// Serverless v2 instances
const writer = new aws.rds.ClusterInstance("writer", {
clusterIdentifier: cluster.id,
instanceClass: "db.serverless",
engine: cluster.engine,
engineVersion: cluster.engineVersion,
});
const reader = new aws.rds.ClusterInstance("reader", {
clusterIdentifier: cluster.id,
instanceClass: "db.serverless",
engine: cluster.engine,
engineVersion: cluster.engineVersion,
});
// RDS Proxy for connection pooling
const proxy = new aws.rds.Proxy("db-proxy", {
name: "aurora-proxy",
engineFamily: "POSTGRESQL",
auths: [{
authScheme: "SECRETS",
secretArn: dbSecret.arn,
}],
roleArn: proxyRole.arn,
vpcSubnetIds: privateSubnetIds,
vpcSecurityGroupIds: [proxySg.id],
requireTls: true,
});
const proxyTarget = new aws.rds.ProxyDefaultTargetGroup("target", {
dbProxyName: proxy.name,
connectionPoolConfig: {
maxConnectionsPercent: 100,
maxIdleConnectionsPercent: 50,
connectionBorrowTimeout: 120,
},
});
const proxyTargetGroupAttachment = new aws.rds.ProxyTarget("attachment", {
dbProxyName: proxy.name,
targetGroupName: proxyTarget.name,
dbClusterIdentifier: cluster.id,
});
export const proxyEndpoint = proxy.endpoint;Use when: Variable workloads, Lambda functions, high connection counts
ElastiCache Redis + RDS
// ElastiCache subnet group
const cacheSubnetGroup = new aws.elasticache.SubnetGroup("cache-subnet", {
subnetIds: privateSubnetIds,
});
// Redis cluster
const redis = new aws.elasticache.ReplicationGroup("redis-cluster", {
replicationGroupId: "app-cache",
description: "Application cache layer",
engine: "redis",
engineVersion: "7.0",
nodeType: "cache.t3.micro",
numCacheClusters: 2, // Primary + 1 replica
automaticFailoverEnabled: true,
multiAzEnabled: true,
subnetGroupName: cacheSubnetGroup.name,
securityGroupIds: [cacheSg.id],
atRestEncryptionEnabled: true,
transitEncryptionEnabled: true,
authToken: cachePassword.result,
snapshotRetentionLimit: 5,
snapshotWindow: "03:00-05:00",
maintenanceWindow: "sun:05:00-sun:07:00",
autoMinorVersionUpgrade: true,
});
// Application uses cache-aside pattern
export const redisEndpoint = redis.primaryEndpointAddress;
export const redisPort = redis.port;
// Example application code pattern:
// 1. Check Redis for cached data
// 2. If miss, query RDS
// 3. Store result in Redis with TTL
// 4. Return dataUse when: Read-heavy workloads, reduce database load, session storage
Redshift + S3 Data Lake
// Redshift subnet group
const redshiftSubnetGroup = new aws.redshift.SubnetGroup("redshift-subnet", {
subnetIds: privateSubnetIds,
tags: { Name: "Redshift subnet group" },
});
// Redshift cluster
const dataWarehouse = new aws.redshift.Cluster("analytics", {
clusterIdentifier: "data-warehouse",
databaseName: "analytics",
masterUsername: "admin",
masterPassword: redshiftPassword.result,
nodeType: "dc2.large",
numberOfNodes: 2,
clusterSubnetGroupName: redshiftSubnetGroup.name,
vpcSecurityGroupIds: [redshiftSg.id],
encrypted: true,
enhancedVpcRouting: true,
automatedSnapshotRetentionPeriod: 7,
skipFinalSnapshot: false,
finalSnapshotIdentifier: "final-snapshot",
});
// S3 bucket for data lake
const dataLake = new aws.s3.BucketV2("data-lake", {
bucket: "analytics-data-lake",
});
// IAM role for Redshift to access S3
const redshiftRole = new aws.iam.Role("redshift-role", {
assumeRolePolicy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Effect: "Allow",
Principal: { Service: "redshift.amazonaws.com" },
Action: "sts:AssumeRole",
}],
}),
});
const s3Policy = new aws.iam.RolePolicyAttachment("s3-access", {
role: redshiftRole.name,
policyArn: "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess",
});
const clusterIamRole = new aws.redshift.ClusterIamRoles("cluster-roles", {
clusterIdentifier: dataWarehouse.id,
iamRoleArns: [redshiftRole.arn],
});
// Query S3 data with Redshift Spectrum
// CREATE EXTERNAL SCHEMA spectrum_schema
// FROM DATA CATALOG DATABASE 'spectrum_db'
// IAM_ROLE 'arn:aws:iam::account:role/role'
// CREATE EXTERNAL DATABASE IF NOT EXISTS;
export const redshiftEndpoint = dataWarehouse.endpoint;Use when: Business intelligence, analytics, data warehousing
Install with Tessl CLI
npx tessl i tessl/npm-pulumi--aws