or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

api-embedding.mdbuild-system.mdcommand-line-tools.mdcore-compilation.mdfrontend-integration.mdindex.md

frontend-integration.mddocs/

0

# Frontend Integration

1

2

Specialized APIs for importing and compiling models from TensorFlow and TensorFlow Lite frameworks. These integration modules provide simplified interfaces for common ML framework workflows and handle framework-specific model formats and optimization patterns.

3

4

## Capabilities

5

6

### TensorFlow Integration

7

8

High-level integration for TensorFlow models including SavedModel and module compilation.

9

10

```python { .api }

11

def compile_saved_model(saved_model_dir: str, **kwargs):

12

"""

13

Compiles an on-disk saved model to an IREE binary.

14

15

Parameters:

16

- saved_model_dir: Path to directory where the model was saved

17

- **kwargs: Keyword args corresponding to ImportOptions or CompilerOptions

18

19

Returns:

20

bytes | None: Compiled output or None if output_file was specified

21

"""

22

23

def compile_module(module, saved_model_dir: Optional[str] = None, **kwargs):

24

"""

25

Compiles a tf.Module to an IREE binary (by saving to disk).

26

27

Parameters:

28

- module: The tf.Module instance to convert to MLIR

29

- saved_model_dir: Optional path to save the tf.Module to

30

- **kwargs: Keyword args corresponding to ImportOptions or CompilerOptions

31

32

Returns:

33

bytes | None: Same as compile_saved_model()

34

"""

35

36

def is_available() -> bool:

37

"""

38

Determine if TensorFlow and the TF frontend are available.

39

40

Returns:

41

bool: True if TensorFlow integration is available

42

"""

43

```

44

45

### TensorFlow Import Types

46

47

Enumeration of TensorFlow import types for different SavedModel formats.

48

49

```python { .api }

50

class ImportType(Enum):

51

"""

52

Import type of the model.

53

"""

54

OBJECT_GRAPH = "savedmodel_v2"

55

V2 = "savedmodel_v2"

56

SIGNATURE_DEF = "savedmodel_v1"

57

V1 = "savedmodel_v1"

58

59

@staticmethod

60

def parse(spec: Union[str, ImportType]) -> ImportType:

61

"""

62

Parses or returns an ImportType.

63

64

Parameters:

65

- spec: ImportType instance or case-insensitive name

66

67

Returns:

68

ImportType: Parsed import type

69

"""

70

```

71

72

### TensorFlow Import Options

73

74

Configuration options for TensorFlow model import with compilation settings.

75

76

```python { .api }

77

class ImportOptions(CompilerOptions):

78

"""

79

Import options layer on top of the backend compiler options.

80

81

Attributes:

82

- exported_names: Sequence[str], exported names to keep (object graph/v2 models only)

83

- import_only: bool, only import without compilation (default: False)

84

- import_type: ImportType, type of import to perform (default: OBJECT_GRAPH)

85

- input_type: Union[InputType, str], input type (default: STABLEHLO_XLA)

86

- saved_model_tags: Set[str], tags to export (signature def/v1 models only)

87

- save_temp_iree_input: Optional[str], save IR result of import

88

"""

89

90

def __init__(self, **kwargs): ...

91

```

92

93

### TensorFlow Lite Integration

94

95

Integration for TensorFlow Lite FlatBuffer model compilation.

96

97

```python { .api }

98

def compile_file(fb_path: str, **kwargs):

99

"""

100

Compiles a TFLite FlatBuffer file to an IREE binary.

101

102

Parameters:

103

- fb_path: Path to the FlatBuffer

104

- **kwargs: Keyword args corresponding to ImportOptions or CompilerOptions

105

106

Returns:

107

bytes | None: Compiled output or None if output_file was specified

108

"""

109

110

def compile_str(fb_content: bytes, **kwargs):

111

"""

112

Compiles TFLite FlatBuffer content to an IREE binary.

113

114

Parameters:

115

- fb_content: FlatBuffer content as bytes

116

- **kwargs: Keyword args corresponding to ImportOptions or CompilerOptions

117

118

Returns:

119

bytes | None: Compiled output or None if output_file was specified

120

"""

121

122

def is_available() -> bool:

123

"""

124

Determine if TensorFlow and the TFLite frontend are available.

125

126

Returns:

127

bool: True if TFLite integration is available

128

"""

129

```

130

131

### TensorFlow Lite Import Options

132

133

Configuration options for TensorFlow Lite model import and compilation.

134

135

```python { .api }

136

class ImportOptions(CompilerOptions):

137

"""

138

Import options layer on top of the backend compiler options.

139

140

Attributes:

141

- input_arrays: Sequence[str], input array node names (if different from default)

142

- output_arrays: Sequence[str], output array node names (if different from default)

143

- import_only: bool, only import without compilation (default: False)

144

- import_extra_args: Sequence[str], extra arguments to pass to import tool

145

- save_temp_tfl_input: Optional[str], save IR from importing flatbuffer

146

- save_temp_iree_input: Optional[str], save IR result of import

147

- input_type: Optional[str], input type (default: "tosa")

148

"""

149

150

def __init__(self, **kwargs): ...

151

```

152

153

### Default Configuration

154

155

Default backend configurations for frontend integration testing.

156

157

```python { .api }

158

DEFAULT_TESTING_BACKENDS: List[str] = ["llvm-cpu"]

159

```

160

161

## Usage Examples

162

163

### TensorFlow SavedModel Compilation

164

165

```python

166

import iree.compiler.tools.tf as tf_compiler

167

168

# Basic SavedModel compilation

169

bytecode = tf_compiler.compile_saved_model(

170

"path/to/saved_model",

171

target_backends=["llvm-cpu"]

172

)

173

174

# Advanced compilation with export selection

175

bytecode = tf_compiler.compile_saved_model(

176

"path/to/saved_model",

177

exported_names=["serving_default", "predict"],

178

target_backends=["llvm-cpu", "cuda"],

179

import_type=tf_compiler.ImportType.OBJECT_GRAPH,

180

optimize=True

181

)

182

183

# Import-only workflow

184

mlir_content = tf_compiler.compile_saved_model(

185

"path/to/saved_model",

186

import_only=True,

187

save_temp_iree_input="/tmp/model.mlir"

188

)

189

```

190

191

### TensorFlow Module Compilation

192

193

```python

194

import tensorflow as tf

195

import iree.compiler.tools.tf as tf_compiler

196

197

# Create a TensorFlow module

198

class SimpleModule(tf.Module):

199

def __init__(self):

200

super().__init__()

201

self.dense = tf.keras.layers.Dense(10)

202

203

@tf.function(input_signature=[tf.TensorSpec([None, 784], tf.float32)])

204

def __call__(self, x):

205

return self.dense(x)

206

207

# Compile the module

208

module = SimpleModule()

209

bytecode = tf_compiler.compile_module(

210

module,

211

target_backends=["llvm-cpu"],

212

export_names=["__call__"]

213

)

214

215

# Save module to disk and compile

216

tf_compiler.compile_module(

217

module,

218

saved_model_dir="/tmp/my_model",

219

target_backends=["cuda"],

220

output_file="model.vmfb"

221

)

222

```

223

224

### TensorFlow Lite FlatBuffer Compilation

225

226

```python

227

import iree.compiler.tools.tflite as tflite_compiler

228

229

# Basic TFLite compilation

230

bytecode = tflite_compiler.compile_file(

231

"model.tflite",

232

target_backends=["llvm-cpu"]

233

)

234

235

# Advanced compilation with array specification

236

bytecode = tflite_compiler.compile_file(

237

"model.tflite",

238

input_arrays=["input_tensor"],

239

output_arrays=["output_tensor"],

240

target_backends=["vulkan-spirv"],

241

optimize=True,

242

save_temp_iree_input="/tmp/tflite_imported.mlir"

243

)

244

245

# Import-only workflow

246

mlir_content = tflite_compiler.compile_file(

247

"model.tflite",

248

import_only=True

249

)

250

```

251

252

### Availability Checking and Error Handling

253

254

```python

255

import iree.compiler.tools.tf as tf_compiler

256

import iree.compiler.tools.tflite as tflite_compiler

257

258

# Check TensorFlow availability

259

if tf_compiler.is_available():

260

print("TensorFlow integration available")

261

try:

262

bytecode = tf_compiler.compile_saved_model(

263

"path/to/model",

264

target_backends=["llvm-cpu"]

265

)

266

except Exception as e:

267

print(f"TensorFlow compilation failed: {e}")

268

else:

269

print("TensorFlow integration not available")

270

271

# Check TensorFlow Lite availability

272

if tflite_compiler.is_available():

273

print("TensorFlow Lite integration available")

274

try:

275

bytecode = tflite_compiler.compile_file(

276

"model.tflite",

277

target_backends=["llvm-cpu"]

278

)

279

except Exception as e:

280

print(f"TFLite compilation failed: {e}")

281

else:

282

print("TensorFlow Lite integration not available")

283

```

284

285

### Advanced Import Options

286

287

```python

288

import iree.compiler.tools.tf as tf_compiler

289

290

# Custom import options for TensorFlow

291

import_options = tf_compiler.ImportOptions(

292

exported_names=["serving_default"],

293

import_type=tf_compiler.ImportType.V2,

294

target_backends=["llvm-cpu", "cuda"],

295

optimize=True,

296

output_mlir_debuginfo=True,

297

save_temp_iree_input="/tmp/tf_imported.mlir"

298

)

299

300

# Compile with custom options

301

bytecode = tf_compiler.compile_saved_model(

302

"path/to/saved_model",

303

**import_options.__dict__

304

)

305

```

306

307

### Multi-Target Compilation Workflow

308

309

```python

310

import iree.compiler.tools.tf as tf_compiler

311

import os

312

313

def compile_for_multiple_targets(saved_model_path: str, targets: list):

314

"""Compile a SavedModel for multiple target backends."""

315

results = {}

316

317

for target in targets:

318

print(f"Compiling for {target}...")

319

320

try:

321

output_file = f"model_{target}.vmfb"

322

tf_compiler.compile_saved_model(

323

saved_model_path,

324

target_backends=[target],

325

output_file=output_file,

326

optimize=True

327

)

328

results[target] = output_file

329

print(f"Successfully compiled for {target}: {output_file}")

330

331

except Exception as e:

332

print(f"Failed to compile for {target}: {e}")

333

results[target] = None

334

335

return results

336

337

# Usage

338

targets = ["llvm-cpu", "cuda", "vulkan-spirv", "rocm"]

339

results = compile_for_multiple_targets("path/to/saved_model", targets)

340

```