or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

assertions.mdconfiguration.mdfixtures.mdindex.mdmarks.mdreporting.mdtest-collection.mdtest-utilities.mdtesting-functions.mdwarnings.md

marks.mddocs/

0

# Marks and Parametrization

1

2

Mark system for test categorization, parametrization, and metadata attachment enabling flexible test selection and data-driven testing. The mark system provides a powerful way to organize and control test execution.

3

4

## Capabilities

5

6

### Mark Generation

7

8

Dynamic mark creation system providing access to built-in and custom marks.

9

10

```python { .api }

11

class MarkGenerator:

12

"""Generates mark decorators dynamically."""

13

14

def __getattr__(self, name: str) -> MarkDecorator:

15

"""Create MarkDecorator for any mark name."""

16

17

# Built-in marks

18

def parametrize(

19

self,

20

argnames: str | list[str] | tuple[str, ...],

21

argvalues,

22

*,

23

indirect: bool | list[str] = False,

24

ids=None,

25

scope: str | None = None

26

) -> MarkDecorator:

27

"""Parametrize test function."""

28

29

def skip(self, reason: str = "") -> MarkDecorator:

30

"""Skip test unconditionally."""

31

32

def skipif(

33

self,

34

condition,

35

*,

36

reason: str = ""

37

) -> MarkDecorator:

38

"""Skip test conditionally."""

39

40

def xfail(

41

self,

42

condition=True,

43

*,

44

reason: str = "",

45

raises=None,

46

run: bool = True,

47

strict: bool = False

48

) -> MarkDecorator:

49

"""Mark test as expected to fail."""

50

51

def usefixtures(self, *names: str) -> MarkDecorator:

52

"""Use fixtures without declaring them as arguments."""

53

54

def filterwarnings(self, *filters: str) -> MarkDecorator:

55

"""Filter warnings for specific test."""

56

57

# Global mark instance

58

mark: MarkGenerator

59

```

60

61

**Usage Example:**

62

63

```python

64

import pytest

65

66

# Basic marks

67

@pytest.mark.slow

68

def test_slow_operation():

69

pass

70

71

@pytest.mark.skip(reason="Not implemented yet")

72

def test_future_feature():

73

pass

74

75

@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires Python 3.9+")

76

def test_new_feature():

77

pass

78

79

@pytest.mark.xfail(reason="Known bug")

80

def test_known_issue():

81

assert False

82

83

# Custom marks

84

@pytest.mark.integration

85

@pytest.mark.database

86

def test_database_integration():

87

pass

88

89

# Using fixtures without parameters

90

@pytest.mark.usefixtures("setup_test_data", "cleanup_temp_files")

91

def test_with_automatic_fixtures():

92

pass

93

94

# Filter warnings

95

@pytest.mark.filterwarnings("ignore::DeprecationWarning")

96

def test_legacy_code():

97

pass

98

```

99

100

### Mark Decorators

101

102

Decorators that apply marks to test functions and classes.

103

104

```python { .api }

105

class MarkDecorator:

106

"""Decorator that applies marks to test functions and classes."""

107

108

# Attributes

109

mark: Mark # The underlying mark

110

markname: str # Mark name

111

args: tuple # Mark arguments

112

kwargs: dict # Mark keyword arguments

113

114

def __call__(self, func_or_class):

115

"""Apply mark to function or class."""

116

117

def with_args(self, *args, **kwargs) -> MarkDecorator:

118

"""Create new decorator with additional arguments."""

119

120

def combine(self, other: MarkDecorator) -> MarkDecorator:

121

"""Combine with another mark decorator."""

122

```

123

124

### Mark Objects

125

126

Represents a mark applied to a test with its arguments and metadata.

127

128

```python { .api }

129

class Mark:

130

"""Represents a mark applied to a test."""

131

132

# Attributes

133

name: str # Mark name

134

args: tuple # Mark arguments

135

kwargs: dict # Mark keyword arguments

136

137

def combined_with(self, other: Mark) -> Mark:

138

"""Combine with another mark."""

139

140

def _for_parametrize(self) -> Mark:

141

"""Create mark suitable for parametrization."""

142

```

143

144

### Test Parametrization

145

146

Create parameter sets for data-driven testing.

147

148

```python { .api }

149

def param(*values, marks=(), id=None):

150

"""

151

Create parameter sets for pytest.mark.parametrize.

152

153

Parameters:

154

- values: Parameter values for the test

155

- marks: Marks to apply to this parameter set

156

- id: Identifier for this parameter set

157

158

Returns:

159

ParameterSet object

160

"""

161

162

class ParameterSet:

163

"""Represents a set of parameters for parametrized tests."""

164

165

# Attributes

166

values: tuple # Parameter values

167

marks: tuple[Mark, ...] # Applied marks

168

id: str | None # Parameter set identifier

169

170

@classmethod

171

def param(cls, *values, marks=(), id=None) -> ParameterSet:

172

"""Create parameter set."""

173

174

@classmethod

175

def extract_from(

176

cls,

177

parameterset,

178

legacy_force_tuple: bool = False

179

) -> ParameterSet:

180

"""Extract parameter set from various input formats."""

181

```

182

183

**Usage Example:**

184

185

```python

186

import pytest

187

188

# Basic parametrization

189

@pytest.mark.parametrize("input,expected", [

190

(2, 4),

191

(3, 9),

192

(4, 16),

193

])

194

def test_square(input, expected):

195

assert input ** 2 == expected

196

197

# Named parameters with IDs

198

@pytest.mark.parametrize(

199

"operation,a,b,expected",

200

[

201

("add", 2, 3, 5),

202

("multiply", 4, 5, 20),

203

("subtract", 10, 3, 7),

204

],

205

ids=["addition", "multiplication", "subtraction"]

206

)

207

def test_math_operations(operation, a, b, expected):

208

if operation == "add":

209

assert a + b == expected

210

elif operation == "multiply":

211

assert a * b == expected

212

elif operation == "subtract":

213

assert a - b == expected

214

215

# Using pytest.param with marks

216

@pytest.mark.parametrize("value", [

217

1,

218

2,

219

pytest.param(3, marks=pytest.mark.xfail(reason="Known issue with 3")),

220

pytest.param(4, marks=pytest.mark.skip(reason="Skip 4 for now")),

221

pytest.param(5, id="special_five"),

222

])

223

def test_values(value):

224

assert value < 10

225

226

# Multiple parametrizations

227

@pytest.mark.parametrize("browser", ["chrome", "firefox"])

228

@pytest.mark.parametrize("os", ["windows", "linux", "mac"])

229

def test_cross_platform(browser, os):

230

# Runs 6 times (2 browsers × 3 operating systems)

231

pass

232

233

# Indirect parametrization (parameters passed to fixtures)

234

@pytest.fixture

235

def database(request):

236

db_type = request.param

237

return setup_database(db_type)

238

239

@pytest.mark.parametrize("database", ["sqlite", "postgresql"], indirect=True)

240

def test_with_database(database):

241

# database fixture receives "sqlite" or "postgresql" as request.param

242

pass

243

```

244

245

### Mark Constants

246

247

Special constants for controlling mark behavior.

248

249

```python { .api }

250

HIDDEN_PARAM = object() # Special value to hide parameters from test names

251

```

252

253

**Usage Example:**

254

255

```python

256

@pytest.mark.parametrize("value,description", [

257

(1, "first"),

258

(2, "second"),

259

(pytest.HIDDEN_PARAM, "hidden"), # This parameter won't appear in test name

260

])

261

def test_with_hidden_param(value, description):

262

pass

263

```

264

265

## Built-in Marks

266

267

### Skip Marks

268

269

```python

270

# Unconditional skip

271

@pytest.mark.skip(reason="Not implemented")

272

273

# Conditional skip

274

@pytest.mark.skipif(condition, reason="Explanation")

275

@pytest.mark.skipif(sys.platform == "win32", reason="Unix only")

276

@pytest.mark.skipif(not has_feature(), reason="Feature not available")

277

```

278

279

### Expected Failure Marks

280

281

```python

282

# Basic expected failure

283

@pytest.mark.xfail(reason="Known bug")

284

285

# Conditional expected failure

286

@pytest.mark.xfail(sys.version_info < (3, 9), reason="Requires Python 3.9+")

287

288

# Strict xfail (failure if test unexpectedly passes)

289

@pytest.mark.xfail(strict=True, reason="Should definitely fail")

290

291

# Expected specific exception

292

@pytest.mark.xfail(raises=ValueError, reason="Should raise ValueError")

293

294

# Don't run, just mark as xfail

295

@pytest.mark.xfail(run=False, reason="Don't run this test")

296

```

297

298

### Parametrization Marks

299

300

```python

301

# Simple parametrization

302

@pytest.mark.parametrize("value", [1, 2, 3])

303

304

# Multiple parameters

305

@pytest.mark.parametrize("a,b,expected", [(1, 2, 3), (2, 3, 5)])

306

307

# Custom IDs

308

@pytest.mark.parametrize("value", [1, 2, 3], ids=["one", "two", "three"])

309

310

# Indirect parametrization

311

@pytest.mark.parametrize("fixture_name", ["value1", "value2"], indirect=True)

312

```

313

314

### Fixture Marks

315

316

```python

317

# Use fixtures without declaring as parameters

318

@pytest.mark.usefixtures("setup_database", "cleanup_temp_files")

319

320

# Filter warnings

321

@pytest.mark.filterwarnings("ignore::DeprecationWarning")

322

@pytest.mark.filterwarnings("error::UserWarning")

323

```

324

325

## Running Tests with Marks

326

327

Command-line options for running specific marked tests:

328

329

```bash

330

# Run only tests with specific mark

331

pytest -m "slow"

332

333

# Run tests with multiple marks

334

pytest -m "slow and integration"

335

336

# Run tests without specific mark

337

pytest -m "not slow"

338

339

# Complex mark expressions

340

pytest -m "(slow or integration) and not windows"

341

```

342

343

## Custom Marks

344

345

Register custom marks in pytest configuration:

346

347

```python

348

# pytest.ini

349

[tool:pytest]

350

markers =

351

slow: marks tests as slow (deselect with '-m "not slow"')

352

integration: marks tests as integration tests

353

unit: marks tests as unit tests

354

smoke: marks tests as smoke tests

355

regression: marks tests as regression tests

356

357

# pyproject.toml

358

[tool.pytest.ini_options]

359

markers = [

360

"slow: marks tests as slow",

361

"integration: marks tests as integration tests",

362

]

363

```