or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

administration.mdassistants-threads.mdbatch-processing.mdbeta-realtime.mdchat-completions.mdconfiguration-management.mdcontainer-content.mdcore-client.mdembeddings.mdevaluation-testing.mdfeedback-collections.mdfile-management.mdfine-tuning.mdframework-integrations.mdindex.mdkey-management.mdmodels.mdmultimodal-apis.mdobservability-analytics.mdprompt-management.mdprovider-integration.mdtext-completions.mduploads.mdvector-stores.md

prompt-management.mddocs/

0

# Prompt Management

1

2

Advanced prompt management system with templating, rendering, versioning, and prompt execution capabilities.

3

4

## Capabilities

5

6

### Prompt Rendering and Execution

7

8

Advanced prompt management with templating, versioning, and execution capabilities.

9

10

```python { .api }

11

class Prompts:

12

def render(

13

self,

14

*,

15

prompt_id: str,

16

variables: Mapping[str, Any]

17

) -> PromptRender:

18

"""

19

Render a prompt template with variables.

20

21

Args:

22

prompt_id: Unique identifier for the prompt template

23

variables: Dictionary of variables to substitute in template

24

25

Returns:

26

PromptRender: Rendered prompt content

27

"""

28

29

def completions(

30

self,

31

*,

32

prompt_id: str,

33

variables: Optional[Mapping[str, Any]] = None,

34

**kwargs

35

) -> Union[PromptCompletion, Stream[PromptCompletionChunk]]:

36

"""

37

Execute a prompt template and generate completion.

38

39

Args:

40

prompt_id: Unique identifier for the prompt template

41

variables: Variables for template substitution

42

**kwargs: Additional completion parameters (model, temperature, etc.)

43

44

Returns:

45

PromptCompletion: Completion response or streaming chunks

46

"""

47

48

completions: Completions

49

versions: PromptVersions

50

partials: PromptPartials

51

52

class AsyncPrompts:

53

async def render(

54

self,

55

*,

56

prompt_id: str,

57

variables: Mapping[str, Any]

58

) -> PromptRender:

59

"""Async version of render method."""

60

61

async def completions(

62

self,

63

*,

64

prompt_id: str,

65

variables: Optional[Mapping[str, Any]] = None,

66

**kwargs

67

) -> Union[PromptCompletion, AsyncStream[PromptCompletionChunk]]:

68

"""Async version of completions method."""

69

70

completions: AsyncCompletions

71

versions: AsyncPromptVersions

72

partials: AsyncPromptPartials

73

```

74

75

### Prompt Versions

76

77

Manage different versions of prompt templates for A/B testing and iterative improvement.

78

79

```python { .api }

80

class PromptVersions:

81

def list(self, prompt_id: str, **kwargs): ...

82

def retrieve(self, prompt_id: str, version_id: str, **kwargs): ...

83

def create(self, prompt_id: str, **kwargs): ...

84

def update(self, prompt_id: str, version_id: str, **kwargs): ...

85

86

class AsyncPromptVersions:

87

async def list(self, prompt_id: str, **kwargs): ...

88

async def retrieve(self, prompt_id: str, version_id: str, **kwargs): ...

89

async def create(self, prompt_id: str, **kwargs): ...

90

async def update(self, prompt_id: str, version_id: str, **kwargs): ...

91

```

92

93

### Prompt Partials

94

95

Handle partial prompt rendering and composition for complex prompt structures.

96

97

```python { .api }

98

class PromptPartials:

99

def render(self, **kwargs): ...

100

def create(self, **kwargs): ...

101

102

class AsyncPromptPartials:

103

async def render(self, **kwargs): ...

104

async def create(self, **kwargs): ...

105

```

106

107

### Generation Management (Deprecated)

108

109

Legacy prompt generation API - use Prompts API instead for new implementations.

110

111

```python { .api }

112

class Generations:

113

def create(

114

self,

115

*,

116

prompt_id: str,

117

config: Optional[Union[Mapping, str]] = None,

118

variables: Optional[Mapping[str, Any]] = None

119

) -> Union[GenericResponse, Stream[GenericResponse]]:

120

"""

121

DEPRECATED: Create prompt generation.

122

Use portkey.prompts.completions() instead.

123

"""

124

125

class AsyncGenerations:

126

async def create(

127

self,

128

*,

129

prompt_id: str,

130

config: Optional[Union[Mapping, str]] = None,

131

variables: Optional[Mapping[str, Any]] = None

132

) -> Union[GenericResponse, AsyncStream[GenericResponse]]:

133

"""DEPRECATED: Async version of create method."""

134

```

135

136

## Usage Examples

137

138

```python

139

from portkey_ai import Portkey

140

141

portkey = Portkey(

142

api_key="PORTKEY_API_KEY",

143

virtual_key="VIRTUAL_KEY"

144

)

145

146

# Render prompt template

147

rendered = portkey.prompts.render(

148

prompt_id="template_123",

149

variables={"name": "Alice", "topic": "AI"}

150

)

151

152

print(f"Rendered prompt: {rendered.prompt}")

153

154

# Execute prompt with completion

155

response = portkey.prompts.completions(

156

prompt_id="template_123",

157

variables={"name": "Alice", "topic": "AI"},

158

model="gpt-4",

159

temperature=0.7,

160

max_tokens=150

161

)

162

163

print(f"Response: {response.choices[0].message.content}")

164

165

# Work with prompt versions

166

versions = portkey.prompts.versions.list("template_123")

167

print(f"Available versions: {[v.id for v in versions.data]}")

168

169

# Get specific version

170

version = portkey.prompts.versions.retrieve("template_123", "v2")

171

print(f"Version details: {version.prompt}")

172

173

# Stream completion responses

174

for chunk in portkey.prompts.completions(

175

prompt_id="template_123",

176

variables={"name": "Alice", "topic": "AI"},

177

model="gpt-4",

178

stream=True

179

):

180

if chunk.choices[0].delta.content:

181

print(chunk.choices[0].delta.content, end="")

182

```

183

184

### Async Usage

185

186

```python

187

import asyncio

188

from portkey_ai import AsyncPortkey

189

190

async def manage_prompts():

191

portkey = AsyncPortkey(

192

api_key="PORTKEY_API_KEY",

193

virtual_key="VIRTUAL_KEY"

194

)

195

196

# Render prompt asynchronously

197

rendered = await portkey.prompts.render(

198

prompt_id="template_123",

199

variables={"name": "Bob", "topic": "Machine Learning"}

200

)

201

202

# Execute prompt completion

203

response = await portkey.prompts.completions(

204

prompt_id="template_123",

205

variables={"name": "Bob", "topic": "Machine Learning"},

206

model="gpt-4"

207

)

208

209

return rendered, response

210

211

rendered, response = asyncio.run(manage_prompts())

212

```

213

214

## Types

215

216

```python { .api }

217

class PromptRender:

218

"""Rendered prompt content"""

219

prompt: str # Rendered prompt text

220

variables: dict # Variables used in rendering

221

prompt_id: str # Source prompt template ID

222

223

class PromptCompletion:

224

"""Prompt completion response"""

225

id: str # Completion ID

226

object: str # "prompt.completion"

227

created: int # Unix timestamp

228

model: str # Model used

229

choices: List[PromptChoice] # Response choices

230

usage: dict # Token usage information

231

prompt_id: str # Source prompt ID

232

233

class PromptCompletionChunk:

234

"""Streaming prompt completion chunk"""

235

id: str # Completion ID

236

object: str # "prompt.completion.chunk"

237

created: int # Unix timestamp

238

model: str # Model used

239

choices: List[PromptChoiceDelta] # Response deltas

240

prompt_id: str # Source prompt ID

241

242

class PromptChoice:

243

"""Prompt completion choice"""

244

index: int # Choice index

245

message: dict # Response message

246

finish_reason: Optional[str] # Completion reason

247

248

class PromptChoiceDelta:

249

"""Streaming choice delta"""

250

index: int # Choice index

251

delta: dict # Content delta

252

finish_reason: Optional[str] # Completion reason

253

```