A very fast and expressive template engine for Python applications
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Security framework for safely executing untrusted templates by restricting access to dangerous Python operations and attributes. Includes configurable policies and safe alternatives for common operations.
Restricted template execution environment that prevents access to unsafe Python operations while maintaining template functionality.
class SandboxedEnvironment(Environment):
def __init__(self, **options):
"""
Initialize sandboxed environment with security restrictions.
Inherits all Environment parameters and adds security policies:
- Restricts attribute access to safe attributes only
- Limits range() function to MAX_RANGE items
- Prevents access to private/internal attributes
- Blocks dangerous method calls
Parameters:
**options: Same as Environment, plus security-specific options
"""Usage example:
from jinja2.sandbox import SandboxedEnvironment
from jinja2 import DictLoader
# Create sandboxed environment
env = SandboxedEnvironment(
loader=DictLoader({
'user_template': '''
Hello {{ user.name }}!
Your items: {{ items | join(', ') }}
Range: {{ range(5) | list }}
'''
})
)
# Safe template rendering with untrusted input
template = env.from_string('Hello {{ name | upper }}!')
result = template.render(name='<script>alert("xss")</script>')
# Result: Hello <SCRIPT>ALERT("XSS")</SCRIPT>!Utility functions for implementing and customizing sandbox security policies.
def safe_range(*args):
"""
Safe range function with configurable limits.
Parameters:
*args: Same as built-in range() function
Returns:
range: Range object limited to MAX_RANGE items
Raises:
OverflowError: If range exceeds MAX_RANGE limit
"""
def is_internal_attribute(obj, attr):
"""
Check if attribute is considered internal/private.
Parameters:
obj: Object being accessed
attr: Attribute name
Returns:
bool: True if attribute should be blocked
"""
def modifies_known_mutable(obj, attr):
"""
Check if accessing attribute/method modifies known mutable objects.
Parameters:
obj: Object being accessed
attr: Attribute/method name
Returns:
bool: True if access modifies the object
"""Configuration constants for controlling sandbox behavior and limits.
MAX_RANGE = 100000 # Maximum range size allowed
UNSAFE_FUNCTION_ATTRIBUTES = frozenset([
'__call__', '__code__', '__closure__', '__defaults__',
'__globals__', '__dict__', '__annotations__'
])
UNSAFE_METHOD_ATTRIBUTES = frozenset([
'__func__', '__self__', '__code__', '__closure__',
'__defaults__', '__globals__'
])
UNSAFE_GENERATOR_ATTRIBUTES = frozenset([
'gi_frame', 'gi_code', 'gi_locals', 'gi_yieldfrom'
])
UNSAFE_COROUTINE_ATTRIBUTES = frozenset([
'cr_frame', 'cr_code', 'cr_locals', 'cr_origin'
])
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = frozenset([
'ag_frame', 'ag_code', 'ag_locals'
])Customize sandbox behavior by overriding security methods:
from jinja2.sandbox import SandboxedEnvironment
class CustomSandboxedEnvironment(SandboxedEnvironment):
def is_safe_attribute(self, obj, attr, value):
"""
Override attribute safety checking.
Parameters:
obj: Object being accessed
attr: Attribute name
value: Attribute value
Returns:
bool: True if attribute access is safe
"""
# Allow access to specific safe attributes
if attr in ('safe_method', 'allowed_property'):
return True
# Block access to sensitive attributes
if attr.startswith('_secret'):
return False
# Delegate to parent implementation
return super().is_safe_attribute(obj, attr, value)
def is_safe_callable(self, obj):
"""
Override callable safety checking.
Parameters:
obj: Callable object
Returns:
bool: True if callable is safe to execute
"""
# Allow specific safe functions
if hasattr(obj, '__name__') and obj.__name__ in ('safe_func', 'allowed_func'):
return True
# Block dangerous callables
if hasattr(obj, '__name__') and obj.__name__ in ('exec', 'eval', 'compile'):
return False
return super().is_safe_callable(obj)
# Usage
env = CustomSandboxedEnvironment()Implement strict allowlist-based security policies:
class AllowlistSandboxedEnvironment(SandboxedEnvironment):
# Define allowed attributes per type
ALLOWED_ATTRIBUTES = {
str: {'upper', 'lower', 'strip', 'split', 'join', 'replace'},
list: {'append', 'extend', 'count', 'index'},
dict: {'get', 'keys', 'values', 'items'},
# Add more types as needed
}
# Define allowed global functions
ALLOWED_FUNCTIONS = {
'len', 'abs', 'min', 'max', 'sum', 'sorted', 'reversed'
}
def is_safe_attribute(self, obj, attr, value):
obj_type = type(obj)
allowed = self.ALLOWED_ATTRIBUTES.get(obj_type, set())
return attr in allowed
def is_safe_callable(self, obj):
if hasattr(obj, '__name__'):
return obj.__name__ in self.ALLOWED_FUNCTIONS
return FalseImplement security policies that consider template context:
class ContextAwareSandboxedEnvironment(SandboxedEnvironment):
def __init__(self, **options):
super().__init__(**options)
self.security_context = {
'trusted_users': set(),
'admin_mode': False
}
def is_safe_attribute(self, obj, attr, value):
# More permissive for trusted users
if self.security_context.get('admin_mode'):
return not attr.startswith('__')
# Strict checking for untrusted users
return super().is_safe_attribute(obj, attr, value)
def set_security_context(self, **context):
"""Update security context for current request."""
self.security_context.update(context)
# Usage
env = ContextAwareSandboxedEnvironment()
# For trusted admin user
env.set_security_context(admin_mode=True)
template = env.from_string('{{ obj.admin_method() }}')
# For regular user
env.set_security_context(admin_mode=False)
template = env.from_string('{{ obj.safe_method() }}')Validate and sanitize template inputs before rendering:
from jinja2.sandbox import SandboxedEnvironment
from markupsafe import escape
import re
def sanitize_input(value):
"""Sanitize user input for safe template rendering."""
if isinstance(value, str):
# Remove potentially dangerous characters
value = re.sub(r'[<>"\']', '', value)
# Limit length
value = value[:1000]
return value
def safe_render(template_str, **context):
"""Safely render template with sanitized context."""
env = SandboxedEnvironment(autoescape=True)
# Sanitize all context values
safe_context = {}
for key, value in context.items():
if isinstance(value, (str, int, float, bool, list, dict)):
safe_context[key] = sanitize_input(value)
template = env.from_string(template_str)
return template.render(**safe_context)Validate template sources to prevent malicious template injection:
def validate_template_source(source):
"""
Validate template source for security issues.
Returns:
tuple: (is_safe, issues) where is_safe is bool and issues is list of problems
"""
issues = []
# Check for dangerous patterns
dangerous_patterns = [
r'\{%\s*include\s+["\'][^"\']*/\.\./', # Path traversal
r'\{%\s*extends\s+["\'][^"\']*/\.\./', # Path traversal
r'__[a-zA-Z_]+__', # Dunder attributes
r'\bexec\b|\beval\b|\bcompile\b', # Dangerous functions
]
for pattern in dangerous_patterns:
if re.search(pattern, source):
issues.append(f'Potentially dangerous pattern: {pattern}')
# Check template size
if len(source) > 50000: # 50KB limit
issues.append('Template source too large')
return len(issues) == 0, issues
# Usage
source = '{{ user.name }} - {{ items | length }}'
is_safe, issues = validate_template_source(source)
if is_safe:
env = SandboxedEnvironment()
template = env.from_string(source)
else:
print(f'Template validation failed: {issues}')Implement resource limits to prevent denial-of-service attacks:
import signal
import time
from contextlib import contextmanager
class ResourceLimitedSandboxedEnvironment(SandboxedEnvironment):
def __init__(self, max_execution_time=5, **options):
super().__init__(**options)
self.max_execution_time = max_execution_time
@contextmanager
def execution_timeout(self):
"""Context manager for limiting execution time."""
def timeout_handler(signum, frame):
raise TimeoutError('Template execution timeout')
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(self.max_execution_time)
try:
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
def render_with_limits(self, template_str, **context):
"""Render template with resource limits."""
template = self.from_string(template_str)
with self.execution_timeout():
return template.render(**context)
# Usage
env = ResourceLimitedSandboxedEnvironment(max_execution_time=3)
try:
result = env.render_with_limits('{{ range(1000000) | list | length }}') # This might timeout
except TimeoutError:
print('Template execution took too long')class SecurityPolicy:
"""
Security policy configuration for sandboxed environments.
Attributes:
max_range_size: Maximum allowed range size
allowed_attributes: Set of allowed attribute names
blocked_attributes: Set of blocked attribute names
allowed_functions: Set of allowed function names
blocked_functions: Set of blocked function names
enable_auto_escape: Enable automatic HTML escaping
"""
class SecurityViolation(Exception):
"""
Exception raised when sandbox security policy is violated.
Attributes:
object: Object that caused the violation
attribute: Attribute that was accessed (if applicable)
message: Detailed violation message
"""
class SandboxContext:
"""
Security context for sandbox execution.
Attributes:
user_id: Current user identifier
permissions: Set of user permissions
trust_level: User trust level (0-100)
source_origin: Origin of template source
"""