or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

autocomplete-paths.mdcore-classes.mdexecution-control.mdforms.mdindex.mdselection-prompts.mdtext-input.md

execution-control.mddocs/

0

# Execution Control and Batch Processing

1

2

Comprehensive prompt execution methods supporting both synchronous and asynchronous operation, batch processing of multiple questions, and advanced error handling capabilities.

3

4

## Capabilities

5

6

### Batch Prompt Execution

7

8

Execute multiple prompts in sequence from various input formats with consolidated result handling.

9

10

```python { .api }

11

def prompt(questions: Union[Dict, Iterable[Mapping]],

12

answers: Optional[Mapping] = None, patch_stdout: bool = False,

13

true_color: bool = False, kbi_msg: str = DEFAULT_KBI_MESSAGE,

14

**kwargs) -> Dict[str, Any]:

15

"""

16

Execute multiple prompts synchronously with error handling.

17

18

Args:

19

questions: Questions configuration as dict or iterable of mappings

20

answers: Pre-filled answers to skip certain questions

21

patch_stdout: Patch stdout to prevent interference

22

true_color: Enable true color support

23

kbi_msg: Message displayed on keyboard interrupt

24

**kwargs: Additional arguments passed to individual prompts

25

26

Returns:

27

Dictionary mapping question names to user responses

28

29

Raises:

30

KeyboardInterrupt: User cancelled with appropriate message

31

PromptParameterException: Missing required prompt parameters

32

"""

33

34

def prompt_async(questions: Union[Dict, Iterable[Mapping]],

35

answers: Optional[Mapping] = None, patch_stdout: bool = False,

36

true_color: bool = False, kbi_msg: str = DEFAULT_KBI_MESSAGE,

37

**kwargs) -> Dict[str, Any]:

38

"""

39

Execute multiple prompts asynchronously with error handling.

40

41

Args:

42

questions: Questions configuration as dict or iterable of mappings

43

answers: Pre-filled answers to skip certain questions

44

patch_stdout: Patch stdout to prevent interference

45

true_color: Enable true color support

46

kbi_msg: Message displayed on keyboard interrupt

47

**kwargs: Additional arguments passed to individual prompts

48

49

Returns:

50

Dictionary mapping question names to user responses

51

52

Raises:

53

KeyboardInterrupt: User cancelled with appropriate message

54

PromptParameterException: Missing required prompt parameters

55

"""

56

57

def unsafe_prompt(questions: Union[Dict, Iterable[Mapping]],

58

answers: Optional[Mapping] = None, patch_stdout: bool = False,

59

true_color: bool = False, **kwargs) -> Dict[str, Any]:

60

"""

61

Execute multiple prompts synchronously without error handling.

62

63

Args:

64

questions: Questions configuration as dict or iterable of mappings

65

answers: Pre-filled answers to skip certain questions

66

patch_stdout: Patch stdout to prevent interference

67

true_color: Enable true color support

68

**kwargs: Additional arguments passed to individual prompts

69

70

Returns:

71

Dictionary mapping question names to user responses

72

"""

73

74

def unsafe_prompt_async(questions: Union[Dict, Iterable[Mapping]],

75

answers: Optional[Mapping] = None, patch_stdout: bool = False,

76

true_color: bool = False, **kwargs) -> Dict[str, Any]:

77

"""

78

Execute multiple prompts asynchronously without error handling.

79

80

Args:

81

questions: Questions configuration as dict or iterable of mappings

82

answers: Pre-filled answers to skip certain questions

83

patch_stdout: Patch stdout to prevent interference

84

true_color: Enable true color support

85

**kwargs: Additional arguments passed to individual prompts

86

87

Returns:

88

Dictionary mapping question names to user responses

89

"""

90

```

91

92

### Individual Question Control

93

94

Advanced control methods for individual Question instances with conditional execution and error handling.

95

96

```python { .api }

97

class Question:

98

def ask(self, patch_stdout: bool = False,

99

kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:

100

"""

101

Execute question synchronously with error handling.

102

103

Args:

104

patch_stdout: Patch stdout to prevent interference

105

kbi_msg: Message displayed on keyboard interrupt

106

107

Returns:

108

User response value

109

110

Raises:

111

KeyboardInterrupt: User cancelled with appropriate message

112

"""

113

114

def unsafe_ask(self, patch_stdout: bool = False) -> Any:

115

"""

116

Execute question synchronously without error handling.

117

118

Args:

119

patch_stdout: Patch stdout to prevent interference

120

121

Returns:

122

User response value

123

"""

124

125

def ask_async(self, patch_stdout: bool = False,

126

kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:

127

"""

128

Execute question asynchronously with error handling.

129

130

Args:

131

patch_stdout: Patch stdout to prevent interference

132

kbi_msg: Message displayed on keyboard interrupt

133

134

Returns:

135

User response value

136

137

Raises:

138

KeyboardInterrupt: User cancelled with appropriate message

139

"""

140

141

def unsafe_ask_async(self, patch_stdout: bool = False) -> Any:

142

"""

143

Execute question asynchronously without error handling.

144

145

Args:

146

patch_stdout: Patch stdout to prevent interference

147

148

Returns:

149

User response value

150

"""

151

152

def skip_if(self, condition: bool, default: Any = None) -> Question:

153

"""

154

Conditionally skip question execution.

155

156

Args:

157

condition: If True, skip question and return default

158

default: Value to return when question is skipped

159

160

Returns:

161

Self for method chaining

162

"""

163

```

164

165

### Exception Handling

166

167

Custom exception for prompt parameter validation errors.

168

169

```python { .api }

170

class PromptParameterException(ValueError):

171

"""

172

Exception raised when prompt parameters are missing or invalid.

173

174

Inherits from ValueError for compatibility with standard error handling.

175

"""

176

```

177

178

## Usage Examples

179

180

### Basic Batch Execution

181

182

```python

183

import questionary

184

185

# Questions as dictionary

186

questions = {

187

"name": {

188

"type": "text",

189

"message": "What's your name?"

190

},

191

"age": {

192

"type": "text",

193

"message": "What's your age?",

194

"validate": lambda x: "Must be a number" if not x.isdigit() else True

195

},

196

"confirmed": {

197

"type": "confirm",

198

"message": "Is this correct?"

199

}

200

}

201

202

answers = questionary.prompt(questions)

203

print(f"Hello {answers['name']}, age {answers['age']}")

204

```

205

206

### Questions as List of Dictionaries

207

208

```python

209

import questionary

210

211

# Questions as list for ordered execution

212

questions = [

213

{

214

"type": "select",

215

"name": "deployment_type",

216

"message": "Deployment type:",

217

"choices": ["development", "staging", "production"]

218

},

219

{

220

"type": "text",

221

"name": "app_name",

222

"message": "Application name:",

223

"default": "my-app"

224

},

225

{

226

"type": "confirm",

227

"name": "auto_deploy",

228

"message": "Enable automatic deployment?"

229

}

230

]

231

232

config = questionary.prompt(questions)

233

print(f"Deploying {config['app_name']} to {config['deployment_type']}")

234

```

235

236

### Pre-filled Answers

237

238

```python

239

import questionary

240

241

questions = {

242

"database_host": {

243

"type": "text",

244

"message": "Database host:",

245

"default": "localhost"

246

},

247

"database_port": {

248

"type": "text",

249

"message": "Database port:",

250

"default": "5432"

251

},

252

"database_name": {

253

"type": "text",

254

"message": "Database name:"

255

}

256

}

257

258

# Pre-fill some answers

259

existing_config = {

260

"database_host": "prod-db.example.com",

261

"database_port": "3306"

262

}

263

264

# Only database_name will be prompted

265

final_config = questionary.prompt(questions, answers=existing_config)

266

print("Database config:", final_config)

267

```

268

269

### Async Batch Execution

270

271

```python

272

import questionary

273

import asyncio

274

275

async def async_setup():

276

questions = {

277

"service_name": {

278

"type": "text",

279

"message": "Service name:"

280

},

281

"replicas": {

282

"type": "select",

283

"message": "Number of replicas:",

284

"choices": ["1", "3", "5", "10"]

285

},

286

"monitoring": {

287

"type": "confirm",

288

"message": "Enable monitoring?"

289

}

290

}

291

292

config = await questionary.prompt_async(questions)

293

print(f"Configuring {config['service_name']} with {config['replicas']} replicas")

294

return config

295

296

# Run async

297

# config = asyncio.run(async_setup())

298

```

299

300

### Individual Question Control

301

302

```python

303

import questionary

304

305

# Create question instances

306

name_question = questionary.text("Enter your name:")

307

age_question = questionary.text("Enter your age:")

308

309

# Execute with different methods

310

try:

311

name = name_question.ask()

312

age = age_question.ask()

313

print(f"Hello {name}, age {age}")

314

except KeyboardInterrupt:

315

print("Setup cancelled")

316

317

# Using unsafe methods (no error handling)

318

backup_name = name_question.unsafe_ask()

319

```

320

321

### Conditional Question Execution

322

323

```python

324

import questionary

325

326

# Ask initial question

327

user_type = questionary.select(

328

"User type:",

329

choices=["admin", "regular"]

330

).ask()

331

332

# Conditional execution based on response

333

admin_password = questionary.password("Admin password:").skip_if(

334

condition=user_type != "admin",

335

default=None

336

)

337

338

password = admin_password.ask()

339

if password:

340

print("Admin access granted")

341

else:

342

print("Regular user mode")

343

```

344

345

### Advanced Error Handling

346

347

```python

348

import questionary

349

from questionary import PromptParameterException

350

351

def safe_prompt_execution():

352

questions = {

353

"critical_setting": {

354

"type": "confirm",

355

"message": "Enable critical feature?",

356

"default": False

357

}

358

}

359

360

try:

361

answers = questionary.prompt(

362

questions,

363

kbi_msg="Configuration cancelled - using defaults"

364

)

365

return answers

366

367

except KeyboardInterrupt:

368

print("User cancelled - applying safe defaults")

369

return {"critical_setting": False}

370

371

except PromptParameterException as e:

372

print(f"Configuration error: {e}")

373

return None

374

375

except Exception as e:

376

print(f"Unexpected error: {e}")

377

return None

378

379

result = safe_prompt_execution()

380

```

381

382

### Complex Workflow Control

383

384

```python

385

import questionary

386

387

def interactive_wizard():

388

"""Multi-step wizard with branching logic"""

389

390

# Step 1: Choose workflow type

391

workflow = questionary.select(

392

"What would you like to configure?",

393

choices=["database", "web_server", "cache"]

394

).ask()

395

396

base_questions = {

397

"environment": {

398

"type": "select",

399

"message": "Environment:",

400

"choices": ["development", "staging", "production"]

401

}

402

}

403

404

# Step 2: Environment-specific questions

405

if workflow == "database":

406

db_questions = {

407

**base_questions,

408

"db_type": {

409

"type": "select",

410

"message": "Database type:",

411

"choices": ["postgresql", "mysql", "mongodb"]

412

},

413

"backup_enabled": {

414

"type": "confirm",

415

"message": "Enable automated backups?"

416

}

417

}

418

config = questionary.prompt(db_questions)

419

420

elif workflow == "web_server":

421

web_questions = {

422

**base_questions,

423

"port": {

424

"type": "text",

425

"message": "Port number:",

426

"default": "8080"

427

},

428

"ssl_enabled": {

429

"type": "confirm",

430

"message": "Enable SSL?"

431

}

432

}

433

config = questionary.prompt(web_questions)

434

435

else: # cache

436

cache_questions = {

437

**base_questions,

438

"cache_size": {

439

"type": "select",

440

"message": "Cache size:",

441

"choices": ["128MB", "512MB", "1GB", "4GB"]

442

}

443

}

444

config = questionary.prompt(cache_questions)

445

446

# Step 3: Confirmation

447

confirmed = questionary.confirm(

448

f"Apply {workflow} configuration?",

449

default=True

450

).ask()

451

452

if confirmed:

453

print(f"Applying {workflow} configuration:", config)

454

return config

455

else:

456

print("Configuration cancelled")

457

return None

458

459

# Run the wizard

460

# final_config = interactive_wizard()

461

```

462

463

### Output Control

464

465

```python

466

import questionary

467

468

# Control stdout patching for integration with other tools

469

questions = {

470

"setting": {

471

"type": "text",

472

"message": "Enter setting value:"

473

}

474

}

475

476

# Patch stdout to prevent interference with other output

477

config = questionary.prompt(questions, patch_stdout=True)

478

479

# Enable true color support for better styling

480

styled_config = questionary.prompt(

481

questions,

482

true_color=True,

483

patch_stdout=True

484

)

485

```