micromark utility to resolve subtokens
npx @tessl/cli install tessl/npm-micromark-util-resolve-all@2.0.0micromark-util-resolve-all is a utility for the micromark parser ecosystem that provides functionality to resolve subtokens through event manipulation. This utility is essential for handling complex markdown structures like media (links, images) and attention (bold, italic) that aren't parsed left-to-right but require post-processing to match openings with closings and convert leftovers to plain text.
npm install micromark-util-resolve-allimport { resolveAll } from "micromark-util-resolve-all";For CommonJS:
const { resolveAll } = require("micromark-util-resolve-all");For Deno:
import { resolveAll } from "https://esm.sh/micromark-util-resolve-all@2";For browsers:
<script type="module">
import { resolveAll } from "https://esm.sh/micromark-util-resolve-all@2?bundle";
</script>import { push } from "micromark-util-chunked";
import { resolveAll } from "micromark-util-resolve-all";
/**
* Example resolver that processes attention sequences (emphasis/strong)
* @type {import('micromark-util-types').Resolver}
*/
function resolveAllAttention(events, context) {
let index = -1;
let open;
const nextEvents = [];
// Walk through all events looking for closable attention sequences
while (++index < events.length) {
if (
events[index][0] === 'enter' &&
events[index][1].type === 'attentionSequence' &&
events[index][1]._close
) {
open = index;
// Find the matching opener by walking backward
while (open--) {
if (
events[open][0] === 'enter' &&
events[open][1].type === 'attentionSequence' &&
events[open][1]._open
// Additional matching logic would go here
) {
// Process events between opener and closer using resolveAll
const betweenEvents = resolveAll(
context.parser.constructs.insideSpan.null,
events.slice(open + 1, index),
context
);
// Add the resolved events to the result
push(nextEvents, betweenEvents);
break;
}
}
}
}
return nextEvents;
}Calls all resolveAll functions from constructs to manipulate parsing events. This is used internally by micromark to handle complex parsing scenarios where initial tokenization needs post-processing.
/**
* Call all resolveAll functions from constructs to manipulate events
* @param {Array<{resolveAll?: Resolver | undefined}>} constructs - List of constructs, optionally with resolveAll resolvers
* @param {Array<Event>} events - List of events to be processed
* @param {TokenizeContext} context - Context used by tokenize function
* @returns {Array<Event>} Modified/processed events
*/
function resolveAll(constructs, events, context);Parameters:
Array<{resolveAll?: Resolver | undefined}> - List of constructs, where each construct may optionally contain a resolveAll resolver functionArray<Event> - List of parsing events to be processedTokenizeContext - Context used by tokenize functionReturns:
Array<Event> - The processed events after all resolvers have been appliedBehavior:
resolveAll function, calls it with the current events and contextRepresents a parsing event in micromark's tokenization process.
/**
* A parsing event tuple containing entry/exit type, token, and context
*/
type Event = ['enter' | 'exit', Token, TokenizeContext];Function type for event manipulation functions.
/**
* Function that takes events and manipulates them
* @param events - List of events
* @param context - Tokenize context
* @returns The given, modified, events
*/
type Resolver = (
events: Array<Event>,
context: TokenizeContext
) => Array<Event>;Interface representing the tokenization context used during parsing.
/**
* A context object that helps with tokenizing markdown constructs
*/
interface TokenizeContext {
/** The previous character code */
previous: Code;
/** Current character code */
code: Code;
/** Whether we're currently interrupting */
interrupt?: boolean | undefined;
/** The current construct (constructs that are not partial are set here) */
currentConstruct?: Construct | undefined;
/** State shared between container parsing phases */
containerState?: ContainerState | undefined;
/** Current list of events */
events: Array<Event>;
/** The relevant parsing context */
parser: ParseContext;
/** Get the chunks that span a token */
sliceStream: (token: Pick<Token, 'end' | 'start'>) => Array<Chunk>;
/** Get the source text that spans a token */
sliceSerialize: (
token: Pick<Token, 'end' | 'start'>,
expandTabs?: boolean | undefined
) => string;
}Represents a character code in micromark's processing.
/**
* A character code.
*
* This is often the same as what String#charCodeAt() yields but micromark
* adds meaning to certain other values.
*
* null represents the end of the input stream (called eof).
* Negative integers are used instead of certain sequences of characters
* (such as line endings and tabs).
*/
type Code = number | null;Represents a token in the micromark parsing process.
/**
* A token: a span of chunks.
* Tokens are passed in events to the compiler.
* The chunks they span are then passed through the flow tokenizer.
*/
interface Token {
/** Token type */
type: TokenType;
/** Point where the token starts */
start: Point;
/** Point where the token ends */
end: Point;
/** The previous token in a list of linked tokens */
previous?: Token | undefined;
/** The next token in a list of linked tokens */
next?: Token | undefined;
}This utility is primarily used when creating custom micromark extensions that need to:
Common use cases include:
* and _ tokens for emphasis and strong emphasis[ and ] tokens for links and images