A Pulumi package for creating and managing Amazon Web Services (AWS) cloud resources with infrastructure-as-code.
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
AWS artificial intelligence and machine learning services.
Access foundation models from leading AI companies through a unified API.
import { bedrock } from "@pulumi/aws";
// Create a custom model with fine-tuning
const customModel = new bedrock.CustomModel("custom-llm", {
customModelName: "my-custom-model",
jobName: "training-job-001",
baseModelIdentifier: "amazon.titan-text-express-v1",
roleArn: bedrockRole.arn,
trainingDataConfig: {
s3Uri: `s3://${trainingBucket.id}/training-data/`,
},
outputDataConfig: {
s3Uri: `s3://${outputBucket.id}/output/`,
},
hyperparameters: {
epochCount: "3",
batchSize: "1",
},
});
// Create a knowledge base for RAG
const knowledgeBase = new bedrock.KnowledgeBase("docs", {
name: "documentation-kb",
roleArn: kbRole.arn,
knowledgeBaseConfiguration: {
type: "VECTOR",
vectorKnowledgeBaseConfiguration: {
embeddingModelArn: "arn:aws:bedrock:us-east-1::foundation-model/amazon.titan-embed-text-v1",
},
},
storageConfiguration: {
type: "OPENSEARCH_SERVERLESS",
opensearchServerlessConfiguration: {
collectionArn: opensearchCollection.arn,
vectorIndexName: "bedrock-knowledge-base-index",
fieldMapping: {
vectorField: "vector",
textField: "text",
metadataField: "metadata",
},
},
},
});
// Create an agent with action groups
const agent = new bedrock.Agent("assistant", {
agentName: "customer-service-agent",
foundationModel: "anthropic.claude-3-sonnet-20240229-v1:0",
instruction: "You are a helpful customer service assistant.",
agentResourceRoleArn: agentRole.arn,
});Key Resources: CustomModel, KnowledgeBase, Agent, AgentAlias, AgentActionGroup, GuardrailVersion
Use Cases: Text generation, chatbots, content creation, document summarization, retrieval-augmented generation (RAG)
Build, train, and deploy machine learning models at scale.
import { sagemaker } from "@pulumi/aws";
// Create a SageMaker domain for Studio
const domain = new sagemaker.Domain("ml-platform", {
domainName: "ml-development",
authMode: "IAM",
defaultUserSettings: {
executionRole: sagemakerRole.arn,
},
vpcId: vpc.id,
subnetIds: subnetIds,
});
// Create a user profile
const userProfile = new sagemaker.UserProfile("data-scientist", {
domainId: domain.id,
userProfileName: "ds-user",
userSettings: {
executionRole: sagemakerRole.arn,
},
});
// Create a model
const model = new sagemaker.Model("text-classifier", {
executionRoleArn: sagemakerRole.arn,
primaryContainer: {
image: `${accountId}.dkr.ecr.${region}.amazonaws.com/my-model:latest`,
modelDataUrl: `s3://${modelBucket.id}/model.tar.gz`,
},
});
// Create an endpoint configuration
const endpointConfig = new sagemaker.EndpointConfiguration("production", {
productionVariants: [{
variantName: "variant-1",
modelName: model.name,
instanceType: "ml.m5.xlarge",
initialInstanceCount: 2,
}],
});
// Deploy the endpoint
const endpoint = new sagemaker.Endpoint("api", {
endpointConfigName: endpointConfig.name,
});
// Create a training job
const trainingJob = new sagemaker.TrainingJob("model-training", {
roleArn: sagemakerRole.arn,
algorithmSpecification: {
trainingImage: `${accountId}.dkr.ecr.${region}.amazonaws.com/training:latest`,
trainingInputMode: "File",
},
inputDataConfig: [{
channelName: "training",
dataSource: {
s3DataSource: {
s3DataType: "S3Prefix",
s3Uri: `s3://${trainingBucket.id}/data/`,
},
},
}],
outputDataConfig: {
s3OutputPath: `s3://${outputBucket.id}/output/`,
},
resourceConfig: {
instanceType: "ml.p3.2xlarge",
instanceCount: 1,
volumeSizeInGb: 50,
},
stoppingCondition: {
maxRuntimeInSeconds: 86400,
},
});
// Feature Store for ML features
const featureGroup = new sagemaker.FeatureGroup("customer-features", {
featureGroupName: "customer-features",
recordIdentifierFeatureName: "customer_id",
eventTimeFeatureName: "event_time",
roleArn: sagemakerRole.arn,
featureDefinitions: [
{ featureName: "customer_id", featureType: "String" },
{ featureName: "age", featureType: "Integral" },
{ featureName: "total_purchases", featureType: "Fractional" },
{ featureName: "event_time", featureType: "String" },
],
onlineStoreConfig: {
enableOnlineStore: true,
},
offlineStoreConfig: {
s3StorageConfig: {
s3Uri: `s3://${featuresBucket.id}/`,
},
},
});Key Resources: Domain, UserProfile, Model, Endpoint, EndpointConfiguration, TrainingJob, FeatureGroup, Pipeline, MonitoringSchedule
Use Cases: Model development, training, deployment, feature engineering, model monitoring, MLOps
Extract insights from text using machine learning.
import { comprehend } from "@pulumi/aws";
// Create a document classifier
const classifier = new comprehend.DocumentClassifier("support-tickets", {
name: "ticket-classifier",
dataAccessRoleArn: comprehendRole.arn,
languageCode: "en",
inputDataConfig: {
s3Uri: `s3://${trainingBucket.id}/training-data/`,
},
outputDataConfig: {
s3Uri: `s3://${outputBucket.id}/output/`,
},
});
// Create an entity recognizer for custom entities
const entityRecognizer = new comprehend.EntityRecognizer("product-names", {
name: "product-recognizer",
dataAccessRoleArn: comprehendRole.arn,
languageCode: "en",
inputDataConfig: {
entityTypes: [
{ type: "PRODUCT" },
{ type: "FEATURE" },
],
documents: {
s3Uri: `s3://${trainingBucket.id}/documents/`,
},
annotations: {
s3Uri: `s3://${trainingBucket.id}/annotations/`,
},
},
});Key Resources: DocumentClassifier, EntityRecognizer
Use Cases: Sentiment analysis, entity extraction, language detection, topic modeling, custom classification
Analyze images and videos with deep learning.
import { rekognition } from "@pulumi/aws";
// Create a collection for face search
const faceCollection = new rekognition.Collection("employees", {
collectionId: "employee-faces",
});
// Create a custom labels project
const project = new rekognition.Project("product-detection", {
name: "product-detector",
});
// Create a stream processor for real-time video analysis
const streamProcessor = new rekognition.StreamProcessor("security-camera", {
name: "entrance-monitor",
roleArn: rekognitionRole.arn,
kinesisVideoStream: {
arn: kinesisStream.arn,
},
kinesisDataStream: {
arn: outputStream.arn,
},
settings: {
faceSearch: {
collectionId: faceCollection.collectionId,
faceMatchThreshold: 95,
},
},
});Key Resources: Collection, Project, StreamProcessor
Use Cases: Face detection, object recognition, content moderation, custom label detection, video analysis
Convert text into lifelike speech.
import { polly } from "@pulumi/aws";
// Create a lexicon for custom pronunciation
const lexicon = new polly.Lexicon("company-terms", {
name: "company-terminology",
content: `<?xml version="1.0" encoding="UTF-8"?>
<lexicon version="1.0" xmlns="http://www.w3.org/2005/01/pronunciation-lexicon">
<lexeme>
<grapheme>AWS</grapheme>
<alias>Amazon Web Services</alias>
</lexeme>
</lexicon>`,
});
// Application code example to synthesize speech:
// const polly = new AWS.Polly();
// polly.synthesizeSpeech({
// Text: "Hello from AWS Polly",
// OutputFormat: "mp3",
// VoiceId: "Joanna",
// Engine: "neural"
// });Key Resources: Lexicon
Use Cases: Accessibility, voice assistants, content narration, e-learning, IVR systems
Intelligent search service powered by machine learning.
import { kendra } from "@pulumi/aws";
// Create a Kendra index
const index = new kendra.Index("docs", {
name: "documentation-search",
roleArn: kendraRole.arn,
edition: "ENTERPRISE_EDITION",
});
// Add a data source
const dataSource = new kendra.DataSource("s3-docs", {
indexId: index.id,
name: "s3-documentation",
type: "S3",
configuration: JSON.stringify({
S3Configuration: {
BucketName: docsBucket.id,
},
}),
roleArn: kendraRole.arn,
});
// Create an experience (search application)
const experience = new kendra.Experience("portal", {
indexId: index.id,
name: "search-portal",
roleArn: kendraRole.arn,
configuration: {
contentSourceConfiguration: {
dataSourceIds: [dataSource.id],
},
},
});Key Resources: Index, DataSource, Experience, Faq, QuerySuggestionsBlockList, Thesaurus
Use Cases: Document search, knowledge bases, FAQ systems, intranet search, customer support
Build conversational interfaces using voice and text.
import { lex } from "@pulumi/aws";
// Create a bot for customer service
const bot = new lex.V2modelsBot("support-bot", {
name: "customer-support",
roleArn: lexRole.arn,
dataPrivacy: {
childDirected: false,
},
idleSessionTtlInSeconds: 300,
});
// Create a bot locale
const locale = new lex.V2modelsBotLocale("en", {
botId: bot.id,
botVersion: "DRAFT",
localeId: "en_US",
nluIntentConfidenceThreshold: 0.7,
});
// Create an intent
const intent = new lex.V2modelsIntent("check-order", {
botId: bot.id,
botVersion: "DRAFT",
localeId: locale.localeId,
name: "CheckOrderStatus",
sampleUtterances: [
{ utterance: "Where is my order" },
{ utterance: "Track my order" },
{ utterance: "Order status" },
],
});
// Create a slot type for custom values
const slotType = new lex.V2modelsSlotType("order-id", {
botId: bot.id,
botVersion: "DRAFT",
localeId: locale.localeId,
name: "OrderId",
valueSelectionSetting: {
resolutionStrategy: "ORIGINAL_VALUE",
},
});Key Resources: V2modelsBot, V2modelsBotLocale, V2modelsIntent, V2modelsSlot, V2modelsSlotType, V2modelsBotVersion
Use Cases: Chatbots, voice assistants, contact center automation, self-service applications
Neural machine translation service for real-time and batch translation.
Automatic speech recognition for converting audio to text.
Extract text and data from scanned documents.
Time-series forecasting using machine learning.
For complete service list, see All Services A-Z.
Install with Tessl CLI
npx tessl i tessl/npm-pulumi--aws