LLVM 22.x tile for building compilers, language runtimes, and out-of-tree tooling
88
83%
Does it follow best practices?
Impact
96%
1.23xAverage score across 5 eval scenarios
Passed
No known issues
Use this skill when the user wants to JIT-compile and execute LLVM IR at runtime — for a REPL, scripting engine, or language runtime.
find_package(LLVM 22 REQUIRED CONFIG)
llvm_map_components_to_libnames(LLVM_LIBS
Core Support OrcJIT ExecutionEngine
X86CodeGen X86AsmParser X86Desc X86Info # replace with your native target
)
add_executable(my_jit src/main.cpp)
target_include_directories(my_jit PRIVATE ${LLVM_INCLUDE_DIRS})
target_compile_definitions(my_jit PRIVATE ${LLVM_DEFINITIONS})
target_link_libraries(my_jit PRIVATE ${LLVM_LIBS})#include "llvm/Support/InitLLVM.h"
#include "llvm/Support/TargetSelect.h"
int main(int argc, char **argv) {
llvm::InitLLVM X(argc, argv); // signal handlers, stack traces
// Must be called before any JIT is constructed
llvm::InitializeNativeTarget();
llvm::InitializeNativeTargetAsmPrinter();
llvm::InitializeNativeTargetAsmParser();
// ... create JIT, run code ...
}#include "llvm/ExecutionEngine/Orc/LLJIT.h"
llvm::ExitOnError ExitOnErr;
auto JIT = ExitOnErr(llvm::orc::LLJITBuilder().create());For more control:
auto JIT = ExitOnErr(
llvm::orc::LLJITBuilder()
.setNumCompileThreads(4) // parallel compilation
.create()
);JIT'd code cannot see printf, malloc, etc. by default:
#include "llvm/ExecutionEngine/Orc/DynamicLibrarySearchGenerator.h"
auto &MainJD = JIT->getMainJITDylib();
MainJD.addGenerator(
ExitOnErr(llvm::orc::DynamicLibrarySearchGenerator::GetForCurrentProcess(
JIT->getDataLayout().getGlobalPrefix()))
);To expose a specific host function by name:
#include "llvm/ExecutionEngine/Orc/AbsoluteSymbols.h"
double myRuntimeSqrt(double x) { return std::sqrt(x); }
auto &ES = JIT->getExecutionSession();
ExitOnErr(MainJD.define(
llvm::orc::absoluteSymbols({
{ES.intern("my_sqrt"),
{llvm::orc::ExecutorAddr::fromPtr(&myRuntimeSqrt),
llvm::JITSymbolFlags::Exported | llvm::JITSymbolFlags::Callable}}
})
));Wrap your module in ThreadSafeModule and add it to the JIT:
#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
// Build or receive the module
auto M = buildMyModule(); // std::unique_ptr<llvm::Module>
// Each module needs its own context for thread-safety
auto Ctx = std::make_unique<llvm::LLVMContext>();
// Note: M must have been built with *Ctx, OR re-parse into Ctx
llvm::orc::ThreadSafeModule TSM(std::move(M), std::move(Ctx));
ExitOnErr(JIT->addIRModule(std::move(TSM)));#include "llvm/Passes/PassBuilder.h"
JIT->getIRTransformLayer().setTransform(
[](llvm::orc::ThreadSafeModule TSM,
const llvm::orc::MaterializationResponsibility &)
-> llvm::Expected<llvm::orc::ThreadSafeModule> {
TSM.withModuleDo([](llvm::Module &M) {
llvm::PassBuilder PB;
llvm::LoopAnalysisManager LAM;
llvm::FunctionAnalysisManager FAM;
llvm::CGSCCAnalysisManager CGAM;
llvm::ModuleAnalysisManager MAM;
PB.registerModuleAnalyses(MAM);
PB.registerCGSCCAnalyses(CGAM);
PB.registerFunctionAnalyses(FAM);
PB.registerLoopAnalyses(LAM);
PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
PB.buildPerModuleDefaultPipeline(llvm::OptimizationLevel::O2)
.run(M, MAM);
});
return std::move(TSM);
});// Lookup triggers compilation (for LLJIT, always eager)
auto Sym = ExitOnErr(JIT->lookup("my_function"));
// Cast to function pointer
auto *Fn = Sym.toPtr<int(int, int)>();
int Result = Fn(3, 4); // call the JIT'd function#include "llvm/ExecutionEngine/Orc/LLLazyJIT.h"
auto LazyJIT = ExitOnErr(llvm::orc::LLLazyJITBuilder().create());
// Add module — functions compiled only on first invocation
ExitOnErr(LazyJIT->addLazyIRModule(std::move(TSM)));
// Lookup installs a trampoline stub; actual compilation deferred
auto Sym = ExitOnErr(LazyJIT->lookup("my_function"));
auto *Fn = Sym.toPtr<int(int)>();
Fn(1); // ← compilation happens here, on first callFor a REPL, add a new module per expression/statement:
// Each REPL iteration:
while (true) {
std::string Input = readLine();
// Parse input into an AST, lower to IR in a fresh module+context
auto Ctx = std::make_unique<llvm::LLVMContext>();
auto M = lowerToIR(*Ctx, Input); // your frontend
llvm::orc::ThreadSafeModule TSM(std::move(M), std::move(Ctx));
ExitOnErr(JIT->addIRModule(std::move(TSM)));
// Each anonymous expression is lowered to "__anon_expr"
auto Sym = ExitOnErr(JIT->lookup("__anon_expr"));
auto *Fn = Sym.toPtr<double()>();
llvm::outs() << "= " << Fn() << "\n";
}For a REPL, function definitions accumulate across modules naturally because the JITDylib persists across iterations.
auto &ES = JIT->getExecutionSession();
auto &RuntimeJD = ExitOnErr(ES.createJITDylib("runtime"));
// Load runtime module into RuntimeJD
ExitOnErr(JIT->addIRModule(RuntimeJD, std::move(RuntimeTSM)));
// User code in MainJD can see RuntimeJD symbols
auto &MainJD = JIT->getMainJITDylib();
MainJD.addToLinkOrder(RuntimeJD);
// Load user module into MainJD
ExitOnErr(JIT->addIRModule(MainJD, std::move(UserTSM)));LLJIT/LLLazyJIT) is the correct API in LLVM 22.InitializeNativeTarget() after creating a JIT instance — it must come first.InitializeNativeTargetAsmPrinter() — the JIT cannot emit code without it.LLVMContext between threads without ThreadSafeContext — use one context per ThreadSafeModule.JIT object — they point into JIT'd memory that is freed on destruction.DynamicLibrarySearchGenerator if JIT'd code calls any libc/runtime functions.ExitOnError or handle llvm::Expected<T> / llvm::Error — ORC never silently discards errors.docs
evals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
skills
add-alias-analysis
add-attributes-metadata
add-calling-convention
add-debug-info
add-exception-handling
add-gc-statepoints
add-intrinsic
add-lto
add-sanitizer
add-vectorization-hint
frontend-to-ir
jit-setup
lit-filecheck
lower-struct-types
new-target
out-of-tree-setup
tessl-llvm
version-sync