or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

cli-commands.mdfile-io.mdindex.mdipython-integration.mdmemory-tracking.md

ipython-integration.mddocs/

0

# IPython Integration

1

2

Enhanced integration with IPython and Jupyter notebooks for interactive memory profiling workflows. Provides magic commands and seamless notebook integration for data science and development workflows.

3

4

## Capabilities

5

6

### Extension Loading

7

8

Load memray magic commands in IPython or Jupyter environments.

9

10

```python { .api }

11

def load_ipython_extension(ipython):

12

"""

13

Load memray magic commands in IPython/Jupyter.

14

15

Parameters:

16

- ipython: IPython instance

17

18

Provides:

19

- %%memray_flamegraph magic command for notebook profiling

20

"""

21

```

22

23

Usage in IPython/Jupyter:

24

25

```python

26

# Load the extension

27

%load_ext memray

28

29

# Now memray magic commands are available

30

```

31

32

### Magic Commands

33

34

#### %%memray_flamegraph

35

36

Cell magic for profiling code cells and generating inline flame graphs.

37

38

```python

39

%%memray_flamegraph [options]

40

# Code to profile

41

```

42

43

Options:

44

- `--output FILE`: Output file for flame graph HTML

45

- `--native`: Enable native stack traces

46

- `--leaks`: Show only leaked allocations

47

- `--merge-threads`: Merge allocations across threads

48

49

Usage examples:

50

51

```python

52

# Basic cell profiling

53

%%memray_flamegraph

54

import numpy as np

55

data = np.random.random((1000, 1000))

56

result = np.sum(data ** 2)

57

```

58

59

```python

60

# Profile with native traces and custom output

61

%%memray_flamegraph --native --output my_analysis.html

62

import pandas as pd

63

df = pd.read_csv('large_dataset.csv')

64

processed = df.groupby('category').sum()

65

```

66

67

```python

68

# Focus on memory leaks

69

%%memray_flamegraph --leaks

70

def potentially_leaky_function():

71

cache = {}

72

for i in range(10000):

73

cache[i] = [0] * 1000

74

# Forgot to clear cache

75

return len(cache)

76

77

result = potentially_leaky_function()

78

```

79

80

## Notebook Workflow Examples

81

82

### Data Science Profiling

83

84

```python

85

# Load extension

86

%load_ext memray

87

88

# Profile data loading

89

%%memray_flamegraph --output data_loading.html

90

import pandas as pd

91

import numpy as np

92

93

# Load large dataset

94

df = pd.read_csv('big_data.csv')

95

print(f"Loaded {len(df)} rows")

96

```

97

98

```python

99

# Profile data processing

100

%%memray_flamegraph --output processing.html

101

# Heavy computation

102

df['computed'] = df['value'].apply(lambda x: x ** 2 + np.sin(x))

103

df_grouped = df.groupby('category').agg({

104

'value': ['mean', 'std', 'sum'],

105

'computed': ['mean', 'max']

106

})

107

```

108

109

```python

110

# Profile model training

111

%%memray_flamegraph --native --output model_training.html

112

from sklearn.ensemble import RandomForestRegressor

113

from sklearn.model_selection import train_test_split

114

115

X = df[['feature1', 'feature2', 'feature3']]

116

y = df['target']

117

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

118

119

model = RandomForestRegressor(n_estimators=100)

120

model.fit(X_train, y_train)

121

```

122

123

### Algorithm Development

124

125

```python

126

# Profile algorithm implementations

127

%%memray_flamegraph --output algorithm_comparison.html

128

129

def bubble_sort(arr):

130

n = len(arr)

131

for i in range(n):

132

for j in range(0, n-i-1):

133

if arr[j] > arr[j+1]:

134

arr[j], arr[j+1] = arr[j+1], arr[j]

135

return arr

136

137

def quick_sort(arr):

138

if len(arr) <= 1:

139

return arr

140

pivot = arr[len(arr) // 2]

141

left = [x for x in arr if x < pivot]

142

middle = [x for x in arr if x == pivot]

143

right = [x for x in arr if x > pivot]

144

return quick_sort(left) + middle + quick_sort(right)

145

146

# Test both algorithms

147

test_data_1 = list(range(1000, 0, -1)) # Reverse sorted

148

test_data_2 = test_data_1.copy()

149

150

sorted_1 = bubble_sort(test_data_1)

151

sorted_2 = quick_sort(test_data_2)

152

```

153

154

### Memory Leak Detection

155

156

```python

157

# Profile for memory leaks

158

%%memray_flamegraph --leaks --output leak_detection.html

159

160

class PotentiallyLeakyClass:

161

_instances = [] # Class variable that holds references

162

163

def __init__(self, data):

164

self.data = data

165

PotentiallyLeakyClass._instances.append(self) # Creates leak

166

167

def process(self):

168

return sum(self.data)

169

170

# Create many instances without cleanup

171

results = []

172

for i in range(100):

173

instance = PotentiallyLeakyClass(list(range(1000)))

174

results.append(instance.process())

175

176

print(f"Processed {len(results)} instances")

177

print(f"Leaked instances: {len(PotentiallyLeakyClass._instances)}")

178

```

179

180

### Interactive Analysis

181

182

```python

183

# Use programmatic API for custom analysis

184

import memray

185

186

# Profile a cell programmatically

187

with memray.Tracker("notebook_profile.bin"):

188

# Expensive operation

189

large_dict = {i: [j for j in range(100)] for i in range(1000)}

190

191

# Analyze results in next cell

192

with memray.FileReader("notebook_profile.bin") as reader:

193

print(f"Peak memory: {reader.metadata.peak_memory:,} bytes")

194

print(f"Total allocations: {reader.metadata.total_allocations:,}")

195

196

# Find largest allocations

197

records = list(reader.get_allocation_records())

198

largest = sorted(records, key=lambda r: r.size, reverse=True)[:5]

199

200

print("\nLargest allocations:")

201

for record in largest:

202

print(f" {record.size:,} bytes in thread {record.thread_name}")

203

```

204

205

### Custom Reporting

206

207

```python

208

def analyze_memory_profile(filename):

209

"""Custom analysis function for notebook use."""

210

with memray.FileReader(filename) as reader:

211

metadata = reader.metadata

212

213

# Collect statistics

214

total_size = 0

215

allocator_counts = {}

216

217

for record in reader.get_allocation_records():

218

total_size += record.size

219

allocator = record.allocator.name

220

allocator_counts[allocator] = allocator_counts.get(allocator, 0) + 1

221

222

return {

223

'duration': (metadata.end_time - metadata.start_time).total_seconds(),

224

'peak_memory': metadata.peak_memory,

225

'total_allocated': total_size,

226

'allocator_breakdown': allocator_counts

227

}

228

229

# Use custom analysis

230

%%memray_flamegraph --output custom_analysis.html

231

# Code to analyze

232

import json

233

data = json.loads('{"key": "value"}' * 10000)

234

235

# Analyze the results

236

stats = analyze_memory_profile("custom_analysis.html")

237

print(f"Duration: {stats['duration']:.2f}s")

238

print(f"Peak memory: {stats['peak_memory']:,} bytes")

239

print(f"Allocators used: {list(stats['allocator_breakdown'].keys())}")

240

```