docs
evals
scenario-1
scenario-10
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9
{
"context": "Evaluates how the solution uses @pulumi/pulumi runtime testing hooks to execute mocked programs, control preview/dry-run behavior, and return mocked results without leaking state. Focus is entirely on the correct use of runtime.setMocks, preview detection, mock handlers, and cleanup across runs.",
"type": "weighted_checklist",
"checklist": [
{
"name": "Mock setup",
"description": "Installs mocks via @pulumi/pulumi runtime.setMocks with both newResource and call handlers wired from provided options so resource registrations and provider invokes are intercepted before running the program.",
"max_score": 30
},
{
"name": "Preview control",
"description": "Passes the preview flag into runtime.setMocks (and/or sets runtime.isDryRun() state) so the Pulumi runtime reports dry-run mode when preview is requested and update mode otherwise.",
"max_score": 25
},
{
"name": "Resource results",
"description": "Uses MockResourceArgs and MockResourceResult shapes within the newResource handler to return stable ids and state derived from args.inputs, preserving dependency/output semantics instead of manual object copies.",
"max_score": 20
},
{
"name": "Data call mocks",
"description": "Implements the call handler using MockCallArgs to return data-source results (async or sync) and ensures those values feed through Pulumi Outputs as call results.",
"max_score": 15
},
{
"name": "Runtime cleanup",
"description": "Resets Pulumi runtime state after each run (e.g., via runtime.disconnect() or resetting setMocks) so subsequent executions start without leftover mocks or config.",
"max_score": 10
}
]
}