List processing tools and functional utilities
All toolz functions available in curried form for automatic partial application. The curried namespace enables more concise functional programming style and easier function composition by allowing functions to be called with fewer arguments than required.
import toolz.curried as toolz
# All functions automatically support partial application
# Alternative imports
from toolz.curried import map, filter, groupby, compose, merge
from toolz import curried # Access as curried.map, curried.filter, etc.
# Operator functions import
from toolz.curried.operator import add, mul, eq, lt, getitem
# Or access as toolz.add, toolz.mul, etc. when using main importEvery function in the curried namespace supports partial application - calling a function with fewer arguments than required returns a new function expecting the remaining arguments.
# All main toolz functions available as curried versions:
# - All itertoolz functions (groupby, take, partition, etc.)
# - All functoolz functions (compose, pipe, memoize, etc.)
# - All dicttoolz functions (merge, assoc, get_in, etc.)
# - All recipe functions (countby, partitionby)
# - All operator module functions (add, mul, eq, etc.)The curried namespace includes all Python operator module functions in curried form, plus additional enhanced versions.
# Curried operator functions available:
# add, sub, mul, truediv, floordiv, mod, pow, and_, or_, xor,
# lshift, rshift, inv, neg, pos, abs, eq, ne, lt, le, gt, ge,
# getitem, setitem, delitem, contains, countOf, indexOf, etc.
def merge(*dicts, **kwargs):
"""Enhanced curried merge function."""
def merge_with(func, *dicts, **kwargs):
"""Enhanced curried merge_with function."""import toolz.curried as toolz
# Create specialized functions through partial application
take_5 = toolz.take(5) # Function that takes first 5 elements
evens = toolz.filter(lambda x: x % 2 == 0) # Function that filters even numbers
double = toolz.map(lambda x: x * 2) # Function that doubles all elements
# Use in pipelines
data = range(20)
result = toolz.pipe(
data,
evens, # filter even numbers
take_5, # take first 5
double, # double each
list # convert to list
)
# [0, 4, 8, 12, 16]
# Create reusable grouping functions
group_by_length = toolz.groupby(len)
group_by_first_letter = toolz.groupby(lambda word: word[0])
words = ['apple', 'apricot', 'banana', 'blueberry', 'cherry']
by_length = group_by_length(words)
# {5: ['apple'], 7: ['apricot', 'cherry'], 6: ['banana'], 9: ['blueberry']}import toolz.curried as toolz
# Create processing pipeline with curried functions
def process_numbers(data):
"""Process list of numbers with functional pipeline."""
return toolz.pipe(
data,
toolz.filter(lambda x: x > 0), # positive numbers only
toolz.map(lambda x: x * x), # square each number
toolz.groupby(lambda x: x % 10), # group by last digit
toolz.valmap(list), # convert iterators to lists
lambda d: toolz.keyfilter(lambda k: k in [1, 4, 9], d) # perfect squares mod 10
)
numbers = [-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
result = process_numbers(numbers)
# {1: [1], 4: [4], 9: [9]}
# Compose reusable transformations
text_processor = toolz.compose(
list, # convert to list
toolz.map(str.upper), # uppercase each word
toolz.filter(lambda w: len(w) > 3), # words longer than 3 chars
str.split # split into words
)
text = "the quick brown fox jumps over the lazy dog"
processed = text_processor(text)
# ['QUICK', 'BROWN', 'JUMPS', 'OVER', 'LAZY']import toolz.curried as toolz
# Create specialized dictionary operations
add_timestamp = toolz.assoc('timestamp', '2023-01-01')
get_user_name = toolz.get_in(['user', 'name'])
double_values = toolz.valmap(lambda x: x * 2)
# Apply to data
user_data = {'user': {'name': 'Alice', 'id': 123}, 'score': 85}
enhanced = add_timestamp(user_data)
name = get_user_name(enhanced)
# 'Alice'
# Dictionary processing pipeline
inventory = {'apples': 50, 'bananas': 30, 'oranges': 40}
processed_inventory = toolz.pipe(
inventory,
double_values, # double all quantities
toolz.keymap(str.upper), # uppercase all keys
toolz.valfilter(lambda x: x >= 70) # keep items with 70+ quantity
)
# {'APPLES': 100, 'ORANGES': 80}import toolz.curried as toolz
from operator import add, mul
# Create mathematical operation pipelines
numbers = [1, 2, 3, 4, 5]
# Curried operator functions
add_10 = toolz.add(10) # curried addition
multiply_3 = toolz.mul(3) # curried multiplication
transformed = toolz.pipe(
numbers,
toolz.map(multiply_3), # multiply each by 3
toolz.map(add_10), # add 10 to each
list
)
# [13, 16, 19, 22, 25]
# Data analysis pipeline
sales_data = [
{'product': 'laptop', 'price': 1000, 'quantity': 2},
{'product': 'mouse', 'price': 25, 'quantity': 10},
{'product': 'keyboard', 'price': 75, 'quantity': 5}
]
# Create analysis functions
get_total_value = lambda item: item['price'] * item['quantity']
is_high_value = lambda total: total >= 200
analysis = toolz.pipe(
sales_data,
toolz.map(lambda item: toolz.assoc(item, 'total', get_total_value(item))),
toolz.filter(toolz.compose(is_high_value, toolz.get('total'))),
toolz.groupby(toolz.get('product')),
toolz.valmap(lambda items: sum(item['total'] for item in items))
)
# {'laptop': 2000, 'keyboard': 375}import toolz.curried as toolz
# Build complex operations from simple curried functions
def create_data_processor(min_value, transform_fn, group_key_fn):
"""Factory function that creates data processors."""
return toolz.compose(
lambda groups: toolz.valmap(list, groups), # materialize groups
toolz.groupby(group_key_fn), # group by key function
toolz.map(transform_fn), # transform each item
toolz.filter(lambda x: x >= min_value) # filter by minimum value
)
# Create specific processors
score_processor = create_data_processor(
min_value=70,
transform_fn=lambda x: x * 1.1, # 10% bonus
group_key_fn=lambda x: 'high' if x >= 90 else 'medium'
)
scores = [65, 75, 85, 95, 88, 92, 68, 78, 96]
processed = score_processor(scores)
# {'medium': [82.5, 93.5, 96.8, 85.8], 'high': [104.5, 101.2, 105.6]}
# Combine multiple curried operations
text_analyzer = toolz.compose(
lambda d: toolz.valmap(len, d), # count words in each group
toolz.groupby(len), # group by word length
toolz.map(str.lower), # lowercase all words
toolz.filter(str.isalpha), # alphabetic words only
str.split # split into words
)
text = "The Quick Brown Fox Jumps Over The Lazy Dog!"
analysis = text_analyzer(text)
# {3: 4, 5: 2, 4: 2, 3: 1} # word counts by length# Standard toolz
from toolz import filter, map, groupby
result = list(map(lambda x: x * 2, filter(lambda x: x % 2 == 0, data)))
# Curried toolz
import toolz.curried as toolz
even_doubler = toolz.compose(list, toolz.map(lambda x: x * 2), toolz.filter(lambda x: x % 2 == 0))
result = even_doubler(data)import toolz.curried as toolz
# Create reusable components
high_scores = toolz.filter(lambda x: x >= 90)
grade_counts = toolz.compose(toolz.valmap(len), toolz.groupby(lambda x: 'A' if x >= 90 else 'B'))
# Apply to different datasets
student_scores = [85, 92, 78, 96, 88, 94]
test_scores = [91, 87, 93, 89, 95, 86]
high_students = list(high_scores(student_scores)) # [92, 96, 94]
high_tests = list(high_scores(test_scores)) # [91, 93, 95]
student_grades = grade_counts(student_scores) # {'B': 3, 'A': 3}
test_grades = grade_counts(test_scores) # {'A': 3, 'B': 3}import toolz.curried as toolz
# Build complex pipelines from simple parts
data_cleaner = toolz.compose(
toolz.unique, # remove duplicates
toolz.filter(lambda x: x > 0), # positive values only
toolz.map(abs) # absolute values
)
aggregator = toolz.compose(
lambda d: toolz.merge(d, {'total': sum(d.values())}), # add total
toolz.frequencies, # count frequencies
data_cleaner # clean first
)
messy_data = [-2, 1, -1, 2, 1, 3, 2, -3, 1]
result = aggregator(messy_data)
# {1: 3, 2: 2, 3: 1, 'total': 6}Install with Tessl CLI
npx tessl i tessl/pypi-toolz