CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/npm-miniprogram-api-typings

Type definitions for APIs of WeChat Mini Program in TypeScript

Pending
Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

SecuritybySnyk

Pending

The risk profile of this skill

Overview
Eval results
Files

ai-ml-apis.mddocs/

AI & Machine Learning APIs

Comprehensive AI and Machine Learning capabilities for WeChat Mini Programs, including local ONNX model inference, cloud AI services, and AI-powered features.

Capabilities

Local AI Inference

Local machine learning model inference using ONNX runtime for privacy-preserving AI applications.

/**
 * Create AI inference session for ONNX models
 * @param option - Inference session configuration
 */
function createInferenceSession(option: CreateInferenceSessionOption): InferenceSession;

/**
 * Get AI inference environment information
 * @param option - Environment info configuration
 */
function getInferenceEnvInfo(option?: GetInferenceEnvInfoOption): void;

interface CreateInferenceSessionOption {
  /** ONNX model file path (supports code package path and local file system path) */
  model: string;
  /** Whether to use NPU inference (iOS only) */
  allowNPU?: boolean;
  /** Whether to generate quantized model inference */
  allowQuantize?: boolean;
  /** Precision level for computation */
  precisionLevel?: 1 | 2 | 3 | 4;
  /** Typical input shapes for dynamic axis models */
  typicalShape?: Record<string, number[]>;
}

interface GetInferenceEnvInfoOption {
  /** Success callback */
  success?(res: GetInferenceEnvInfoSuccessCallbackResult): void;
  /** Failure callback */
  fail?(res: any): void;
  /** Completion callback */
  complete?(res: any): void;
}

interface GetInferenceEnvInfoSuccessCallbackResult {
  /** AI inference engine version */
  ver: string;
  /** Success message */
  errMsg: string;
}

Usage Examples:

// Check AI inference environment
wx.getInferenceEnvInfo({
  success(res) {
    console.log('AI Engine Version:', res.ver);
  },
  fail(err) {
    console.error('AI inference not supported:', err);
  }
});

// Create inference session
const session = wx.createInferenceSession({
  model: `${wx.env.USER_DATA_PATH}/image_classifier.onnx`,
  precisionLevel: 4,
  allowNPU: true,
  allowQuantize: false,
  typicalShape: {
    input: [1, 3, 224, 224], // Batch, Channels, Height, Width
    mask: [1, 1, 224, 224]   // Optional mask input
  }
});

// Handle session load events
session.onLoad(() => {
  console.log('Model loaded successfully');
  
  // Model is ready for inference
  runInference();
});

session.onError((err) => {
  console.error('Model loading failed:', err);
});

async function runInference() {
  try {
    // Prepare input tensors
    const inputTensor = new Float32Array(1 * 3 * 224 * 224);
    // ... fill tensor with image data
    
    const inputTensors = {
      input: {
        data: inputTensor,
        shape: [1, 3, 224, 224],
        type: 'float32'
      }
    };
    
    // Run inference
    const results = await session.run(inputTensors);
    
    console.log('Inference results:', results);
    
    // Process results
    const predictions = results.output.data;
    const topClass = Array.from(predictions).indexOf(Math.max(...predictions));
    console.log('Predicted class:', topClass);
    
  } catch (error) {
    console.error('Inference failed:', error);
  }
}

// Clean up when done
function cleanup() {
  session.destroy();
  console.log('Inference session destroyed');
}

InferenceSession Interface

Complete interface for managing AI inference sessions.

interface InferenceSession {
  /**
   * Run inference on input tensors
   * @param tensors - Input tensor data
   * @returns Promise resolving to output tensors
   */
  run(tensors: Tensors): Promise<Tensors>;
  
  /**
   * Destroy inference session and free resources
   */
  destroy(): void;
  
  /**
   * Listen for session load completion
   * @param callback - Load completion callback
   */
  onLoad(callback: () => void): void;
  
  /**
   * Remove load event listener
   * @param callback - Callback to remove
   */
  offLoad(callback: () => void): void;
  
  /**
   * Listen for session errors
   * @param callback - Error callback
   */
  onError(callback: (error: any) => void): void;
  
  /**
   * Remove error event listener
   * @param callback - Callback to remove
   */
  offError(callback: (error: any) => void): void;
}

interface Tensors {
  [key: string]: Tensor;
}

interface Tensor {
  /** Tensor data as typed array */
  data: Float32Array | Int32Array | Uint8Array;
  /** Tensor shape dimensions */
  shape: number[];
  /** Data type */
  type: 'float32' | 'int32' | 'uint8' | 'int64' | 'float64';
}

Advanced Usage Examples:

// Multi-input model inference
const multiInputSession = wx.createInferenceSession({
  model: `${wx.env.USER_DATA_PATH}/multi_input_model.onnx`,
  precisionLevel: 3,
  typicalShape: {
    image: [1, 3, 224, 224],
    features: [1, 100],
    metadata: [1, 10]
  }
});

multiInputSession.onLoad(async () => {
  // Prepare multiple inputs
  const imageData = new Float32Array(1 * 3 * 224 * 224);
  const featureData = new Float32Array(1 * 100);
  const metadataData = new Float32Array(1 * 10);
  
  // Fill with actual data...
  
  const inputs = {
    image: {
      data: imageData,
      shape: [1, 3, 224, 224],
      type: 'float32'
    },
    features: {
      data: featureData,
      shape: [1, 100],
      type: 'float32'
    },
    metadata: {
      data: metadataData,
      shape: [1, 10],
      type: 'float32'
    }
  };
  
  try {
    const results = await multiInputSession.run(inputs);
    
    // Process multiple outputs
    console.log('Classification:', results.classification.data);
    console.log('Confidence:', results.confidence.data);
    console.log('Features:', results.extracted_features.data);
    
  } catch (error) {
    console.error('Multi-input inference failed:', error);
  }
});

// Batch inference for multiple samples
async function batchInference(samples: Float32Array[]) {
  const batchSize = samples.length;
  const batchData = new Float32Array(batchSize * 3 * 224 * 224);
  
  // Concatenate samples into batch
  samples.forEach((sample, index) => {
    const offset = index * 3 * 224 * 224;
    batchData.set(sample, offset);
  });
  
  const batchInputs = {
    input: {
      data: batchData,
      shape: [batchSize, 3, 224, 224],
      type: 'float32'
    }
  };
  
  const results = await session.run(batchInputs);
  
  // Split batch results
  const predictions = [];
  const outputSize = results.output.data.length / batchSize;
  
  for (let i = 0; i < batchSize; i++) {
    const start = i * outputSize;
    const end = start + outputSize;
    predictions.push(Array.from(results.output.data.slice(start, end)));
  }
  
  return predictions;
}

Face Detection APIs

Built-in face detection and analysis capabilities.

/**
 * Initialize face detection
 * @param option - Face detection initialization
 */
function initFaceDetect(option: InitFaceDetectOption): void;

/**
 * Detect faces in image
 * @param option - Face detection configuration
 */
function faceDetect(option: FaceDetectOption): void;

/**
 * Stop face detection
 * @param option - Stop face detection configuration
 */
function stopFaceDetect(option?: StopFaceDetectOption): void;

interface InitFaceDetectOption {
  /** Success callback */
  success?(res: any): void;
  /** Failure callback */
  fail?(res: any): void;
  /** Completion callback */
  complete?(res: any): void;
}

interface FaceDetectOption {
  /** Image file path */
  frameBuffer: ArrayBuffer;
  /** Image width */
  width: number;
  /** Image height */
  height: number;
  /** Enable angle detection */
  enableAngle?: boolean;
  /** Enable point detection */
  enablePoint?: boolean;
  /** Enable confidence score */
  enableConf?: boolean;
  /** Success callback */
  success?(res: FaceDetectSuccessCallbackResult): void;
  /** Failure callback */
  fail?(res: any): void;
  /** Completion callback */
  complete?(res: any): void;
}

interface FaceDetectSuccessCallbackResult {
  /** Detected faces */
  faceInfo: FaceInfo[];
  /** Error message */
  errMsg: string;
}

interface FaceInfo {
  /** Face bounding box */
  x: number;
  y: number;
  width: number;
  height: number;
  /** Face angle (if enabled) */
  angleArray?: {
    pitch: number;
    roll: number;
    yaw: number;
  };
  /** Facial landmarks (if enabled) */
  pointArray?: FacePoint[];
  /** Confidence score (if enabled) */
  confArray?: number[];
}

interface FacePoint {
  /** Point x coordinate */
  x: number;
  /** Point y coordinate */
  y: number;
}

Usage Examples:

// Initialize face detection
wx.initFaceDetect({
  success() {
    console.log('Face detection initialized');
  },
  fail(err) {
    console.error('Face detection init failed:', err);
  }
});

// Capture image and detect faces
wx.createCameraContext('cameraId').takePhoto({
  quality: 'high',
  success(res) {
    // Convert image to ArrayBuffer
    wx.getFileSystemManager().readFile({
      filePath: res.tempImagePath,
      success(fileRes) {
        // Perform face detection
        wx.faceDetect({
          frameBuffer: fileRes.data,
          width: 640,
          height: 480,
          enableAngle: true,
          enablePoint: true,
          enableConf: true,
          success(detectRes) {
            console.log(`Detected ${detectRes.faceInfo.length} faces`);
            
            detectRes.faceInfo.forEach((face, index) => {
              console.log(`Face ${index + 1}:`);
              console.log(`  Position: (${face.x}, ${face.y})`);
              console.log(`  Size: ${face.width}x${face.height}`);
              
              if (face.angleArray) {
                console.log(`  Angles: pitch=${face.angleArray.pitch}, roll=${face.angleArray.roll}, yaw=${face.angleArray.yaw}`);
              }
              
              if (face.pointArray) {
                console.log(`  Landmarks: ${face.pointArray.length} points`);
              }
              
              if (face.confArray) {
                console.log(`  Confidence: ${face.confArray.join(', ')}`);
              }
            });
          },
          fail(err) {
            console.error('Face detection failed:', err);
          }
        });
      }
    });
  }
});

// Stop face detection when done
wx.stopFaceDetect({
  success() {
    console.log('Face detection stopped');
  }
});

Vision Kit (VK) Session

Advanced computer vision capabilities for AR and object recognition.

/**
 * Create Vision Kit session for AR and computer vision
 * @param option - VK session configuration
 */
function createVKSession(option: CreateVKSessionOption): VKSession;

interface CreateVKSessionOption {
  /** VK session track */
  track: VKTrack;
  /** VK session version */
  version?: 'v1' | 'v2';
  /** Success callback */
  success?(res: any): void;
  /** Failure callback */
  fail?(res: any): void;
  /** Completion callback */
  complete?(res: any): void;
}

interface VKSession {
  /**
   * Start VK session
   * @param callback - Start callback
   */
  start(callback?: (res: any) => void): void;
  
  /**
   * Stop VK session
   * @param callback - Stop callback
   */
  stop(callback?: (res: any) => void): void;
  
  /**
   * Destroy VK session
   */
  destroy(): void;
  
  /**
   * Detect anchor points
   * @param option - Detection configuration
   */
  detectAnchors(option: VKDetectAnchorsOption): void;
  
  /**
   * Get camera texture
   */
  getCameraTexture(): any;
}

interface VKTrack {
  /** Plane tracking */
  plane?: {
    mode?: 1 | 3; // 1 for horizontal, 3 for both
  };
  /** Face tracking */
  face?: {
    mode?: 1 | 2; // 1 for single face, 2 for multiple faces
  };
  /** Object tracking */
  osd?: {
    mode?: 1;
  };
  /** Hand tracking */
  hand?: {
    mode?: 1;
  };
}

Cloud AI Services

Server-side AI capabilities through WeChat Cloud functions.

interface WxCloud {
  /**
   * Generate text using cloud AI models
   * @param options - Text generation configuration
   */
  generateText(options: ICloud.GenerateTextOptions): Promise<ICloud.GenerateTextResult>;
  
  /**
   * Analyze image content
   * @param options - Image analysis configuration
   */
  analyzeImage(options: ICloud.AnalyzeImageOptions): Promise<ICloud.AnalyzeImageResult>;
  
  /**
   * Convert speech to text
   * @param options - Speech recognition configuration
   */
  speechToText(options: ICloud.SpeechToTextOptions): Promise<ICloud.SpeechToTextResult>;
  
  /**
   * Convert text to speech
   * @param options - Text-to-speech configuration
   */
  textToSpeech(options: ICloud.TextToSpeechOptions): Promise<ICloud.TextToSpeechResult>;
}

interface ICloud.GenerateTextOptions {
  /** AI model identifier */
  modelId: string;
  /** Input prompt */
  prompt: string;
  /** Generation parameters */
  parameters?: {
    /** Maximum tokens to generate */
    maxTokens?: number;
    /** Sampling temperature */
    temperature?: number;
    /** Top-p sampling */
    topP?: number;
    /** Frequency penalty */
    frequencyPenalty?: number;
    /** Presence penalty */
    presencePenalty?: number;
  };
}

interface ICloud.GenerateTextResult {
  /** Generated text */
  text: string;
  /** Token usage statistics */
  usage: {
    promptTokens: number;
    completionTokens: number;
    totalTokens: number;
  };
}

interface ICloud.AnalyzeImageOptions {
  /** Image file ID or URL */
  imageUrl: string;
  /** Analysis features to enable */
  features: ('labels' | 'faces' | 'text' | 'objects')[];
  /** Maximum results per feature */
  maxResults?: number;
}

interface ICloud.AnalyzeImageResult {
  /** Detected labels */
  labels?: ImageLabel[];
  /** Detected faces */
  faces?: ImageFace[];
  /** Detected text */
  text?: ImageText;
  /** Detected objects */
  objects?: ImageObject[];
}

Cloud AI Usage Examples:

// Text generation
wx.cloud.generateText({
  modelId: 'gpt-3.5-turbo',
  prompt: 'Write a product description for a smartphone',
  parameters: {
    maxTokens: 150,
    temperature: 0.7
  }
}).then(result => {
  console.log('Generated text:', result.text);
  console.log('Tokens used:', result.usage.totalTokens);
});

// Image analysis
wx.cloud.analyzeImage({
  imageUrl: 'cloud://my-env.my-bucket/image.jpg',
  features: ['labels', 'faces', 'text'],
  maxResults: 10
}).then(result => {
  if (result.labels) {
    console.log('Image labels:', result.labels);
  }
  if (result.faces) {
    console.log('Detected faces:', result.faces.length);
  }
  if (result.text) {
    console.log('Detected text:', result.text.content);
  }
});

Types

// AI inference types
interface ImageLabel {
  /** Label name */
  name: string;
  /** Confidence score */
  confidence: number;
  /** Label category */
  category?: string;
}

interface ImageFace {
  /** Face bounding box */
  boundingBox: {
    x: number;
    y: number;
    width: number;
    height: number;
  };
  /** Face attributes */
  attributes?: {
    age?: number;
    gender?: 'male' | 'female';
    emotion?: string;
  };
  /** Facial landmarks */
  landmarks?: FacePoint[];
}

interface ImageText {
  /** Detected text content */
  content: string;
  /** Text regions */
  regions: TextRegion[];
}

interface TextRegion {
  /** Text content */
  text: string;
  /** Bounding box */
  boundingBox: {
    x: number;
    y: number;
    width: number;
    height: number;
  };
  /** Confidence score */
  confidence: number;
}

interface ImageObject {
  /** Object name */
  name: string;
  /** Confidence score */
  confidence: number;
  /** Bounding box */
  boundingBox: {
    x: number;
    y: number;
    width: number;
    height: number;
  };
}

// VK session types
interface VKDetectAnchorsOption {
  /** Success callback */
  success?(res: VKDetectAnchorsResult): void;
  /** Failure callback */
  fail?(res: any): void;
}

interface VKDetectAnchorsResult {
  /** Detected anchors */
  anchors: VKAnchor[];
}

interface VKAnchor {
  /** Anchor ID */
  id: string;
  /** Anchor type */
  type: 'plane' | 'face' | 'hand';
  /** Transform matrix */
  transform: number[];
  /** Size (for plane anchors) */
  size?: { width: number; height: number };
}

docs

ai-ml-apis.md

app-page-lifecycle.md

bluetooth-nfc-apis.md

canvas-graphics.md

cloud-services.md

component-system.md

core-apis.md

device-hardware-apis.md

event-system.md

index.md

payment-apis.md

tile.json