CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/npm-graphql-yoga

Fully-featured GraphQL Server with focus on easy setup, performance & great developer experience

Pending
Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

SecuritybySnyk

Pending

The risk profile of this skill

Overview
Eval results
Files

caching-performance.mddocs/

Caching and Performance

Built-in caching utilities including LRU cache implementation and parser/validation caching for improved performance in production environments with configurable cache sizes and TTL settings.

Capabilities

LRU Cache Implementation

Comprehensive LRU (Least Recently Used) cache implementation for general-purpose caching needs.

/**
 * Create an LRU cache instance
 * @param options - Cache configuration options
 * @returns LRU cache instance
 */
function createLRUCache<T extends {}>(options: LRUCacheOptions): LRUCache<T>;

/**
 * Internal LRU cache creation function
 * @param options - Internal cache configuration options
 * @returns Internal LRU cache instance
 */
function _createLRUCache<T extends {}>(options: _LRUCacheOptions): _LRUCache<T>;

LRU Cache Interface

Cache interface providing standard cache operations.

/**
 * LRU cache interface for type-safe cache operations
 */
interface LRUCache<T extends {}> {
  /** Get value by key */
  get(key: string): T | undefined;
  /** Set key-value pair */
  set(key: string, value: T): void;
  /** Check if key exists in cache */
  has(key: string): boolean;
  /** Delete entry by key */
  delete(key: string): boolean;
  /** Clear all cache entries */
  clear(): void;
  /** Get current cache size */
  size: number;
}

/**
 * Internal LRU cache type
 */
type _LRUCache<T extends {}> = LRU<string, T>;

Cache Configuration Options

Configuration interfaces for customizing cache behavior.

/**
 * LRU cache configuration options
 */
interface LRUCacheOptions {
  /** Maximum number of entries */
  max: number;
  /** Time to live in milliseconds */
  ttl?: number;
  /** Update age on get operations */
  updateAgeOnGet?: boolean;
  /** Update age on has operations */
  updateAgeOnHas?: boolean;
}

/**
 * Internal LRU cache configuration options
 */
interface _LRUCacheOptions {
  /** Maximum number of entries */
  max: number;
  /** Time to live in milliseconds */
  ttl?: number;
  /** Size calculation function */
  sizeCalculation?: (value: any, key: string) => number;
  /** Disposal callback */
  dispose?: (value: any, key: string) => void;
}

Parser and Validation Cache Plugin

Plugin for caching GraphQL parsing and validation results to improve performance.

/**
 * Parser and validation cache plugin for performance optimization
 * @param options - Cache configuration options
 * @returns Plugin instance
 */
function useParserAndValidationCache(options: ParserAndValidationCacheOptions): Plugin;

/**
 * Parser and validation cache configuration options
 */
interface ParserAndValidationCacheOptions {
  /** Maximum cache entries */
  max?: number;
  /** Time to live in milliseconds */
  ttl?: number;
  /** Custom cache implementation */
  cache?: LRUCache<any>;
  /** Enable parser caching */
  parser?: boolean;
  /** Enable validation caching */
  validation?: boolean;
}

Usage Examples:

import { 
  createYoga,
  createLRUCache,
  useParserAndValidationCache,
  _createLRUCache
} from 'graphql-yoga';

// Basic caching setup
const yoga = createYoga({
  schema: mySchema,
  parserCache: true,
  validationCache: true
});

// Advanced caching configuration
const advancedYoga = createYoga({
  schema: mySchema,
  plugins: [
    useParserAndValidationCache({
      max: 1000,
      ttl: 3600000, // 1 hour
      parser: true,
      validation: true
    })
  ]
});

// Custom cache implementation
const customCache = createLRUCache<any>({
  max: 500,
  ttl: 1800000, // 30 minutes
  updateAgeOnGet: true
});

const customCacheYoga = createYoga({
  schema: mySchema,
  plugins: [
    useParserAndValidationCache({
      cache: customCache,
      max: 500,
      ttl: 1800000
    })
  ]
});

// Creating LRU caches for custom use
const resultCache = createLRUCache<ExecutionResult>({
  max: 100,
  ttl: 300000 // 5 minutes
});

const userCache = createLRUCache<User>({
  max: 1000,
  ttl: 600000 // 10 minutes
});

// Using cache in resolvers
const cachedResolvers = {
  Query: {
    user: async (_, { id }, context) => {
      const cacheKey = `user:${id}`;
      
      // Check cache first
      if (userCache.has(cacheKey)) {
        return userCache.get(cacheKey);
      }
      
      // Fetch from database
      const user = await context.userService.findById(id);
      
      // Cache the result
      if (user) {
        userCache.set(cacheKey, user);
      }
      
      return user;
    },
    
    expensiveQuery: async (_, args, context) => {
      const cacheKey = `expensive:${JSON.stringify(args)}`;
      
      if (resultCache.has(cacheKey)) {
        console.log('Cache hit for expensive query');
        return resultCache.get(cacheKey);
      }
      
      console.log('Cache miss, executing expensive query');
      const result = await context.expensiveService.compute(args);
      
      resultCache.set(cacheKey, result);
      return result;
    }
  }
};

// Cache management plugin
function useCacheManagement(): Plugin {
  const queryCache = createLRUCache<any>({
    max: 200,
    ttl: 900000 // 15 minutes
  });
  
  return {
    onParams({ params }) {
      // Log cache statistics
      console.log('Query cache size:', queryCache.size);
    },
    
    onExecutionResult({ result, serverContext }) {
      // Cache successful query results
      if (!result.errors && result.data) {
        const query = serverContext?.params?.query;
        const variables = serverContext?.params?.variables;
        
        if (query && !query.includes('mutation')) {
          const cacheKey = `${query}:${JSON.stringify(variables)}`;
          queryCache.set(cacheKey, result.data);
        }
      }
    }
  };
}

// Performance monitoring with caching
function usePerformanceCache(): Plugin {
  const performanceCache = createLRUCache<{
    executionTime: number;
    timestamp: number;
  }>({
    max: 50,
    ttl: 3600000 // 1 hour
  });
  
  return {
    onParams({ params }) {
      const startTime = Date.now();
      
      return {
        onExecutionResult({ result }) {
          const executionTime = Date.now() - startTime;
          const query = params.query || '';
          
          // Cache performance metrics
          performanceCache.set(query, {
            executionTime,
            timestamp: Date.now()
          });
          
          // Log slow queries
          if (executionTime > 1000) {
            console.warn(`Slow query detected (${executionTime}ms):`, query);
          }
        }
      };
    }
  };
}

// Advanced cache configuration with disposal
const advancedCache = _createLRUCache<CachedData>({
  max: 1000,
  ttl: 1800000,
  sizeCalculation: (value, key) => {
    // Calculate size based on data complexity
    return JSON.stringify(value).length + key.length;
  },
  dispose: (value, key) => {
    // Cleanup when items are removed from cache
    console.log(`Cache entry disposed: ${key}`);
    if (value.cleanup) {
      value.cleanup();
    }
  }
});

// Multi-level caching strategy
class MultiLevelCache {
  private l1Cache: LRUCache<any>;
  private l2Cache: LRUCache<any>;
  
  constructor() {
    // Fast, small L1 cache
    this.l1Cache = createLRUCache({
      max: 50,
      ttl: 60000 // 1 minute
    });
    
    // Slower, larger L2 cache
    this.l2Cache = createLRUCache({
      max: 500,
      ttl: 600000 // 10 minutes
    });
  }
  
  get(key: string): any {
    // Check L1 first
    if (this.l1Cache.has(key)) {
      return this.l1Cache.get(key);
    }
    
    // Check L2
    if (this.l2Cache.has(key)) {
      const value = this.l2Cache.get(key);
      // Promote to L1
      this.l1Cache.set(key, value);
      return value;
    }
    
    return undefined;
  }
  
  set(key: string, value: any): void {
    this.l1Cache.set(key, value);
    this.l2Cache.set(key, value);
  }
  
  clear(): void {
    this.l1Cache.clear();
    this.l2Cache.clear();
  }
}

// Using multi-level cache
const multiCache = new MultiLevelCache();

const performantYoga = createYoga({
  schema: mySchema,
  plugins: [
    {
      onParams({ params }) {
        const cacheKey = `${params.query}:${JSON.stringify(params.variables)}`;
        const cached = multiCache.get(cacheKey);
        
        if (cached) {
          console.log('Multi-level cache hit');
          // Return cached result
        }
      }
    },
    useParserAndValidationCache({
      max: 2000,
      ttl: 7200000 // 2 hours
    }),
    useCacheManagement(),
    usePerformanceCache()
  ]
});

// Cache warming strategy
async function warmCache() {
  const commonQueries = [
    '{ users { id name } }',
    '{ posts { id title author { name } } }',
    '{ categories { id name } }'
  ];
  
  for (const query of commonQueries) {
    try {
      await yoga.fetch('/graphql', {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify({ query })
      });
      console.log(`Warmed cache for query: ${query}`);
    } catch (error) {
      console.error(`Failed to warm cache for query: ${query}`, error);
    }
  }
}

// Cache invalidation
function invalidateUserCache(userId: string): void {
  const patterns = [
    `user:${userId}`,
    `user:${userId}:*`,
    'users:*'
  ];
  
  patterns.forEach(pattern => {
    if (userCache.has(pattern)) {
      userCache.delete(pattern);
    }
  });
}

docs

caching-performance.md

context-types.md

error-handling.md

index.md

logging-system.md

plugin-system.md

request-processing.md

result-processing.md

schema-management.md

server-configuration.md

subscription-system.md

tile.json