or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

advanced.mdagents.mdauthentication.mdcode-execution.mdgoogle-cloud.mdindex.mdmemory-sessions.mdmodels.mdrunners.mdtools.md

models.mddocs/

0

# Language Models

1

2

Model integration layer supporting Google Gemini and other LLM providers with a unified interface and model registry for management.

3

4

## Capabilities

5

6

### Base LLM Classes

7

8

Core classes for LLM integration and model management.

9

10

```python { .api }

11

class BaseLlm:

12

"""Base class for LLM implementations."""

13

14

def __init__(self, model_name: str, **kwargs):

15

"""

16

Initialize base LLM.

17

18

Args:

19

model_name (str): Name/identifier of the model

20

**kwargs: Model-specific configuration parameters

21

"""

22

pass

23

24

def generate(self, prompt: str, **kwargs) -> str:

25

"""

26

Generate text response from the model.

27

28

Args:

29

prompt (str): Input prompt

30

**kwargs: Generation parameters (temperature, max_tokens, etc.)

31

32

Returns:

33

str: Generated response

34

"""

35

pass

36

37

def generate_async(self, prompt: str, **kwargs):

38

"""

39

Generate text response asynchronously.

40

41

Args:

42

prompt (str): Input prompt

43

**kwargs: Generation parameters

44

45

Returns:

46

Coroutine: Async generated response

47

"""

48

pass

49

50

class Gemini(BaseLlm):

51

"""Google Gemini LLM implementation."""

52

53

def __init__(

54

self,

55

model_name: str = "gemini-2.0-flash",

56

api_key: str = None,

57

project_id: str = None,

58

location: str = "us-central1",

59

**kwargs

60

):

61

"""

62

Initialize Gemini model.

63

64

Args:

65

model_name (str): Gemini model variant

66

api_key (str, optional): Google API key

67

project_id (str, optional): Google Cloud project ID

68

location (str): Model location/region

69

**kwargs: Additional Gemini-specific parameters

70

"""

71

pass

72

73

def generate(

74

self,

75

prompt: str,

76

temperature: float = 0.7,

77

max_tokens: int = 1024,

78

top_p: float = 0.95,

79

top_k: int = 40,

80

**kwargs

81

) -> str:

82

"""

83

Generate response using Gemini.

84

85

Args:

86

prompt (str): Input prompt

87

temperature (float): Sampling temperature (0.0-1.0)

88

max_tokens (int): Maximum tokens to generate

89

top_p (float): Top-p sampling parameter

90

top_k (int): Top-k sampling parameter

91

**kwargs: Additional generation parameters

92

93

Returns:

94

str: Generated response

95

"""

96

pass

97

98

class LLMRegistry:

99

"""Registry for LLM model management."""

100

101

def __init__(self):

102

"""Initialize the LLM registry."""

103

pass

104

105

def register(self, name: str, llm_class: type, **default_kwargs):

106

"""

107

Register an LLM class in the registry.

108

109

Args:

110

name (str): Registry name for the LLM

111

llm_class (type): LLM class to register

112

**default_kwargs: Default parameters for the LLM

113

"""

114

pass

115

116

def get(self, name: str, **kwargs) -> BaseLlm:

117

"""

118

Get an LLM instance from the registry.

119

120

Args:

121

name (str): Registry name of the LLM

122

**kwargs: Override parameters for the LLM

123

124

Returns:

125

BaseLlm: LLM instance

126

"""

127

pass

128

129

def list_models(self) -> list:

130

"""

131

List all registered models.

132

133

Returns:

134

list: List of registered model names

135

"""

136

pass

137

```

138

139

## Usage Examples

140

141

### Using Gemini Models

142

143

```python

144

from google.adk.models import Gemini

145

146

# Initialize Gemini with default settings

147

gemini = Gemini(model_name="gemini-2.0-flash")

148

149

# Generate a response

150

response = gemini.generate(

151

prompt="Explain quantum computing in simple terms",

152

temperature=0.7,

153

max_tokens=500

154

)

155

print(response)

156

157

# Generate with specific parameters

158

creative_response = gemini.generate(

159

prompt="Write a creative story about AI",

160

temperature=0.9, # More creative

161

max_tokens=1000

162

)

163

```

164

165

### Custom Model Configuration

166

167

```python

168

from google.adk.models import Gemini

169

170

# Configure Gemini with specific settings

171

gemini = Gemini(

172

model_name="gemini-2.0-flash",

173

project_id="my-project",

174

location="us-west1",

175

api_key="your-api-key"

176

)

177

178

# Use with custom generation parameters

179

response = gemini.generate(

180

prompt="Analyze this data...",

181

temperature=0.3, # More deterministic

182

top_p=0.8,

183

top_k=20

184

)

185

```

186

187

### Using with Agents

188

189

```python

190

from google.adk.agents import Agent

191

from google.adk.models import Gemini

192

193

# Create model instance

194

model = Gemini(

195

model_name="gemini-2.0-flash",

196

temperature=0.7

197

)

198

199

# Use model with agent

200

agent = Agent(

201

name="analysis_agent",

202

model=model, # Pass model instance

203

instruction="Analyze data and provide insights"

204

)

205

206

# Or use model name directly (ADK will create the instance)

207

agent = Agent(

208

name="simple_agent",

209

model="gemini-2.0-flash", # Model string

210

instruction="Help with general tasks"

211

)

212

```

213

214

### Async Model Usage

215

216

```python

217

import asyncio

218

from google.adk.models import Gemini

219

220

async def async_generation():

221

gemini = Gemini()

222

223

# Generate response asynchronously

224

response = await gemini.generate_async(

225

prompt="What are the benefits of async programming?",

226

temperature=0.5

227

)

228

229

return response

230

231

# Run async generation

232

response = asyncio.run(async_generation())

233

print(response)

234

```

235

236

### Model Registry

237

238

```python

239

from google.adk.models import LLMRegistry, Gemini, BaseLlm

240

241

# Create registry

242

registry = LLMRegistry()

243

244

# Register Gemini models with different configurations

245

registry.register(

246

"gemini-creative",

247

Gemini,

248

model_name="gemini-2.0-flash",

249

temperature=0.9,

250

top_p=0.95

251

)

252

253

registry.register(

254

"gemini-analytical",

255

Gemini,

256

model_name="gemini-2.0-flash",

257

temperature=0.3,

258

top_p=0.8

259

)

260

261

# Get models from registry

262

creative_model = registry.get("gemini-creative")

263

analytical_model = registry.get("gemini-analytical")

264

265

# Override registry defaults

266

custom_model = registry.get("gemini-creative", temperature=0.5)

267

```

268

269

### Custom LLM Implementation

270

271

```python

272

from google.adk.models import BaseLlm

273

274

class CustomLLM(BaseLlm):

275

def __init__(self, api_endpoint: str, **kwargs):

276

super().__init__(**kwargs)

277

self.api_endpoint = api_endpoint

278

279

def generate(self, prompt: str, **kwargs) -> str:

280

# Custom implementation for your LLM provider

281

# Make API call to your endpoint

282

return "Custom LLM response"

283

284

async def generate_async(self, prompt: str, **kwargs):

285

# Async implementation

286

return "Async custom LLM response"

287

288

# Use custom LLM

289

custom_llm = CustomLLM(

290

model_name="custom-model-v1",

291

api_endpoint="https://api.custom-llm.com"

292

)

293

294

# Register in registry

295

registry = LLMRegistry()

296

registry.register("custom", CustomLLM, api_endpoint="https://api.custom-llm.com")

297

```

298

299

### Model Comparison

300

301

```python

302

from google.adk.models import Gemini

303

304

# Create different model configurations

305

models = {

306

"conservative": Gemini(temperature=0.1),

307

"balanced": Gemini(temperature=0.5),

308

"creative": Gemini(temperature=0.9)

309

}

310

311

prompt = "Describe the future of AI"

312

313

# Compare responses

314

for name, model in models.items():

315

response = model.generate(prompt, max_tokens=200)

316

print(f"{name.upper()} Model Response:")

317

print(response)

318

print("-" * 50)

319

```

320

321

### Batch Processing

322

323

```python

324

from google.adk.models import Gemini

325

326

gemini = Gemini()

327

328

prompts = [

329

"Summarize the history of computing",

330

"Explain machine learning basics",

331

"Describe renewable energy types"

332

]

333

334

# Process multiple prompts

335

responses = []

336

for prompt in prompts:

337

response = gemini.generate(prompt, max_tokens=300)

338

responses.append(response)

339

340

# Or async batch processing

341

async def batch_generate(prompts):

342

tasks = [gemini.generate_async(prompt) for prompt in prompts]

343

return await asyncio.gather(*tasks)

344

345

# responses = asyncio.run(batch_generate(prompts))

346

```