or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

administration.mdindex.mdtest-execution.md

test-execution.mddocs/

0

# Test Execution and Monitoring

1

2

Execute load tests and monitor performance through the LoadTestRunClient. This includes starting and stopping test runs, monitoring execution progress, collecting comprehensive performance metrics, and accessing detailed results and logs from both client-side and server-side perspectives.

3

4

## Capabilities

5

6

### Test Run Management

7

8

Create, monitor, and control test run execution. Test runs execute the load test configuration against target endpoints and collect comprehensive performance metrics.

9

10

```python { .api }

11

def begin_test_run(

12

test_run_id: str,

13

body: Union[JSON, IO],

14

*,

15

old_test_run_id: Optional[str] = None,

16

**kwargs

17

) -> LROPoller[JSON]:

18

"""

19

Create and start a new test run. Long running operation.

20

21

Parameters:

22

- test_run_id (str): Unique identifier for the test run

23

- body (Union[JSON, IO]): Test run configuration

24

- old_test_run_id (str, optional): Previous test run ID for comparison

25

26

Returns:

27

LROPoller[JSON]: Poller for test run execution status

28

"""

29

30

def get_test_run(test_run_id: str, **kwargs) -> JSON:

31

"""

32

Get test run details and current status.

33

34

Parameters:

35

- test_run_id (str): Unique test run identifier

36

37

Returns:

38

JSON: Test run details including status, start/end times, and statistics

39

"""

40

41

def delete_test_run(test_run_id: str, **kwargs) -> None:

42

"""

43

Delete a test run and its results.

44

45

Parameters:

46

- test_run_id (str): Unique test run identifier

47

"""

48

49

def stop_test_run(test_run_id: str, **kwargs) -> JSON:

50

"""

51

Stop a running test before completion.

52

53

Parameters:

54

- test_run_id (str): Unique test run identifier

55

56

Returns:

57

JSON: Updated test run status

58

"""

59

60

def list_test_runs(

61

*,

62

orderby: Optional[str] = None,

63

search: Optional[str] = None,

64

test_id: Optional[str] = None,

65

execution_from: Optional[datetime] = None,

66

execution_to: Optional[datetime] = None,

67

status: Optional[str] = None,

68

**kwargs

69

) -> Iterable[JSON]:

70

"""

71

List test runs with optional filtering.

72

73

Parameters:

74

- orderby (str, optional): Sort order ("lastModifiedDateTime asc/desc", "createdDateTime asc/desc")

75

- search (str, optional): Search by displayName or createdBy

76

- test_id (str, optional): Filter by test ID

77

- execution_from (datetime, optional): Filter by execution start time

78

- execution_to (datetime, optional): Filter by execution end time

79

- status (str, optional): Filter by status ("ACCEPTED", "NOTSTARTED", "PROVISIONING", "PROVISIONED", "CONFIGURING", "CONFIGURED", "EXECUTING", "EXECUTED", "DEPROVISIONING", "DEPROVISIONED", "DONE", "CANCELLING", "CANCELLED", "FAILED")

80

81

Returns:

82

Iterable[JSON]: Paginated list of test runs

83

"""

84

```

85

86

#### Example: Executing a Load Test

87

88

```python

89

from azure.core.credentials import DefaultAzureCredential

90

from azure.developer.loadtesting import LoadTestRunClient

91

import time

92

93

credential = DefaultAzureCredential()

94

client = LoadTestRunClient(

95

endpoint="https://your-resource.loadtest.azure.com",

96

credential=credential

97

)

98

99

# Define test run configuration

100

test_run_config = {

101

"testId": "my-load-test",

102

"displayName": "Production Load Test Run",

103

"description": "Load test against production API",

104

"loadTestConfiguration": {

105

"engineInstances": 2

106

},

107

"environmentVariables": {

108

"BASE_URL": "https://api.myapp.com",

109

"USERS_PER_ENGINE": "50"

110

},

111

"secrets": {

112

"API_KEY": {

113

"value": "https://my-vault.vault.azure.net/secrets/api-key",

114

"type": "AKV_SECRET_URI"

115

}

116

}

117

}

118

119

with client:

120

# Start the test run

121

print("Starting test run...")

122

run_poller = client.begin_test_run("prod-run-001", test_run_config)

123

124

# Monitor progress

125

while not run_poller.done():

126

test_run = client.get_test_run("prod-run-001")

127

print(f"Status: {test_run['status']} - {test_run.get('statusMessage', '')}")

128

time.sleep(30)

129

130

# Get final results

131

final_result = run_poller.result()

132

print(f"Test completed with status: {final_result['status']}")

133

print(f"Test result: {final_result['testResult']}")

134

```

135

136

### Test Run File Access

137

138

Access test run output files including logs, results, and artifacts generated during test execution.

139

140

```python { .api }

141

def get_test_run_file(test_run_id: str, file_name: str, **kwargs) -> JSON:

142

"""

143

Get test run file details and download URL.

144

145

Parameters:

146

- test_run_id (str): Unique test run identifier

147

- file_name (str): Name of the file to retrieve

148

149

Returns:

150

JSON: File metadata including download URL and expiration

151

"""

152

```

153

154

#### Example: Accessing Test Results

155

156

```python

157

with client:

158

# Get list of available files for a test run

159

test_run = client.get_test_run("prod-run-001")

160

161

# Download test results

162

results_file = client.get_test_run_file("prod-run-001", "results.xml")

163

download_url = results_file['url']

164

165

# Download and process results

166

import requests

167

response = requests.get(download_url)

168

with open("test-results.xml", "wb") as f:

169

f.write(response.content)

170

```

171

172

### Metrics and Performance Monitoring

173

174

Retrieve comprehensive performance metrics collected during test execution, including client-side metrics (response times, throughput, error rates) and server-side Azure resource metrics.

175

176

```python { .api }

177

def get_metric_namespaces(test_run_id: str, **kwargs) -> JSON:

178

"""

179

Get available metric namespaces for a test run.

180

181

Parameters:

182

- test_run_id (str): Unique test run identifier

183

184

Returns:

185

JSON: Available metric namespaces including LoadTestRunMetrics and Azure resource namespaces

186

"""

187

188

def get_metric_definitions(test_run_id: str, *, metric_namespace: str, **kwargs) -> JSON:

189

"""

190

Get metric definitions for a specific namespace.

191

192

Parameters:

193

- test_run_id (str): Unique test run identifier

194

- metric_namespace (str): Metric namespace (e.g., "LoadTestRunMetrics")

195

196

Returns:

197

JSON: Available metrics and their definitions for the namespace

198

"""

199

200

def list_metrics(

201

test_run_id: str,

202

body: Optional[Union[JSON, IO]] = None,

203

*,

204

metric_namespace: str,

205

metric_name: str,

206

time_interval: str,

207

interval: Optional[str] = None,

208

aggregation: Optional[str] = None,

209

**kwargs

210

) -> Iterable[JSON]:

211

"""

212

List metric values for a test run.

213

214

Parameters:

215

- test_run_id (str): Unique test run identifier

216

- body (Optional[Union[JSON, IO]]): Optional request body for metric filtering

217

- metric_namespace (str): Metric namespace (e.g., "LoadTestRunMetrics")

218

- metric_name (str): Specific metric name (e.g., "response_time_ms", "requests_per_sec")

219

- time_interval (str): Time range in ISO 8601 format (required)

220

- interval (str, optional): Aggregation interval (e.g., "PT1M" for 1 minute)

221

- aggregation (str, optional): Aggregation type ("Average", "Count", "Maximum", "Minimum", "Total")

222

223

Returns:

224

Iterable[JSON]: Time series metric data points

225

"""

226

227

def list_metric_dimension_values(

228

test_run_id: str,

229

name: str,

230

*,

231

metric_namespace: str,

232

metric_name: str,

233

time_interval: str,

234

interval: Optional[str] = None,

235

**kwargs

236

) -> Iterable[str]:

237

"""

238

List metric dimension values (e.g., different samplers or URLs).

239

240

Parameters:

241

- test_run_id (str): Unique test run identifier

242

- name (str): Dimension name (e.g., "SamplerName", "RequestName")

243

- metric_namespace (str): Metric namespace

244

- metric_name (str): Metric name

245

- time_interval (str): Time range filter (required)

246

- interval (str, optional): Aggregation interval

247

248

Returns:

249

Iterable[str]: Available dimension values

250

"""

251

```

252

253

#### Example: Analyzing Performance Metrics

254

255

```python

256

import json

257

258

with client:

259

test_run_id = "prod-run-001"

260

261

# Get available metric namespaces

262

namespaces = client.get_metric_namespaces(test_run_id)

263

print("Available namespaces:")

264

for ns in namespaces['value']:

265

print(f"- {ns['name']}")

266

267

# Get client-side metrics definitions

268

metrics_def = client.get_metric_definitions(

269

test_run_id,

270

metric_namespace="LoadTestRunMetrics"

271

)

272

273

print("\nAvailable client metrics:")

274

for metric in metrics_def['value']:

275

print(f"- {metric['name']['value']}: {metric['displayDescription']}")

276

277

# Get response time metrics

278

response_times = list(client.list_metrics(

279

test_run_id,

280

metric_namespace="LoadTestRunMetrics",

281

metric_name="response_time_ms",

282

time_interval="PT30M", # Last 30 minutes

283

aggregation="Average",

284

interval="PT1M" # 1 minute intervals

285

))

286

287

print(f"\nResponse time data points: {len(response_times)}")

288

for point in response_times[:5]: # Show first 5 points

289

timestamp = point['timestamp']

290

value = point['average']

291

print(f" {timestamp}: {value:.2f}ms")

292

293

# Get throughput metrics

294

throughput = list(client.list_metrics(

295

test_run_id,

296

metric_namespace="LoadTestRunMetrics",

297

metric_name="requests_per_sec",

298

time_interval="PT30M",

299

aggregation="Average"

300

))

301

302

avg_throughput = sum(p['average'] for p in throughput) / len(throughput)

303

print(f"\nAverage throughput: {avg_throughput:.2f} requests/sec")

304

305

# Get error rate

306

errors = list(client.list_metrics(

307

test_run_id,

308

metric_namespace="LoadTestRunMetrics",

309

metric_name="error",

310

time_interval="PT30M",

311

aggregation="Percentage"

312

))

313

314

if errors:

315

error_rate = errors[-1]['average'] # Latest error rate

316

print(f"Final error rate: {error_rate:.2f}%")

317

```

318

319

### Application Components for Test Runs

320

321

Configure and retrieve application components monitoring for specific test runs, enabling server-side metrics collection during test execution.

322

323

```python { .api }

324

def create_or_update_app_components(test_run_id: str, body: Union[JSON, IO], **kwargs) -> JSON:

325

"""

326

Create or update app components configuration for a test run.

327

328

Parameters:

329

- test_run_id (str): Unique test run identifier

330

- body (Union[JSON, IO]): App components configuration

331

332

Returns:

333

JSON: App components configuration for the test run

334

"""

335

336

def get_app_components(test_run_id: str, **kwargs) -> JSON:

337

"""

338

Get app components configuration for a test run.

339

340

Parameters:

341

- test_run_id (str): Unique test run identifier

342

343

Returns:

344

JSON: App components configuration

345

"""

346

```

347

348

### Server Metrics for Test Runs

349

350

Configure and retrieve server-side metrics collection for specific test runs, enabling monitoring of Azure resource performance during load testing.

351

352

```python { .api }

353

def create_or_update_server_metrics_config(test_run_id: str, body: Union[JSON, IO], **kwargs) -> JSON:

354

"""

355

Create or update server metrics configuration for a test run.

356

357

Parameters:

358

- test_run_id (str): Unique test run identifier

359

- body (Union[JSON, IO]): Server metrics configuration

360

361

Returns:

362

JSON: Server metrics configuration for the test run

363

"""

364

365

def get_server_metrics_config(test_run_id: str, **kwargs) -> JSON:

366

"""

367

Get server metrics configuration for a test run.

368

369

Parameters:

370

- test_run_id (str): Unique test run identifier

371

372

Returns:

373

JSON: Server metrics configuration

374

"""

375

```

376

377

#### Example: Monitoring Server Metrics During Test Run

378

379

```python

380

with client:

381

test_run_id = "prod-run-001"

382

383

# Configure server metrics for this specific test run

384

server_metrics = {

385

"metrics": {

386

"webapp-cpu": {

387

"resourceId": "/subscriptions/.../providers/Microsoft.Web/sites/my-app",

388

"metricNamespace": "Microsoft.Web/sites",

389

"name": "CpuPercentage",

390

"aggregation": "Average"

391

}

392

}

393

}

394

395

# Apply configuration to test run

396

client.create_or_update_server_metrics_config(test_run_id, server_metrics)

397

398

# After test completion, get server metrics

399

server_cpu_data = list(client.list_metrics(

400

test_run_id,

401

metric_namespace="Microsoft.Web/sites",

402

metric_name="CpuPercentage",

403

time_interval="PT30M",

404

aggregation="Average"

405

))

406

407

max_cpu = max(point['average'] for point in server_cpu_data)

408

print(f"Peak server CPU usage: {max_cpu:.1f}%")

409

```

410

411

## Async Operations

412

413

All test execution operations have async equivalents in `azure.developer.loadtesting.aio.LoadTestRunClient`:

414

415

```python

416

from azure.developer.loadtesting.aio import LoadTestRunClient

417

from azure.core.credentials_async import DefaultAzureCredential

418

import asyncio

419

420

async def run_load_test():

421

credential = DefaultAzureCredential()

422

client = LoadTestRunClient(

423

endpoint="https://your-resource.loadtest.azure.com",

424

credential=credential

425

)

426

427

async with client:

428

# Start test run

429

run_poller = await client.begin_test_run("async-run-001", test_config)

430

431

# Monitor progress

432

while not run_poller.done():

433

await asyncio.sleep(30)

434

test_run = await client.get_test_run("async-run-001")

435

print(f"Status: {test_run['status']}")

436

437

# Get results

438

result = await run_poller.result()

439

440

# Collect metrics

441

metrics = []

442

async for metric_point in client.list_metrics(

443

"async-run-001",

444

metric_namespace="LoadTestRunMetrics",

445

metric_name="response_time_ms",

446

time_interval="PT30M"

447

):

448

metrics.append(metric_point)

449

450

return result, metrics

451

452

# Run the async function

453

result, metrics = asyncio.run(run_load_test())

454

```