OpenAI Codex CLI is a lightweight coding agent that runs locally, providing multimodal inputs, rich approvals workflow, and terminal-based AI-assisted development
Protocol mode allows Codex CLI to operate as a programmatic interface, communicating via JSON-line protocol over stdin/stdout for integration with other tools and systems.
codex proto [OPTIONS]
codex p [OPTIONS]SessionConfigured eventSend JSON objects representing user submissions:
{
"id": "unique-submission-id",
"type": "text",
"content": "Your prompt or request here"
}Receive JSON objects representing system events:
{
"id": "unique-event-id",
"msg": {
"type": "SessionConfigured",
"data": {
// Session configuration details
}
}
}# Start protocol mode (stdin must be piped)
echo '{"id":"1","type":"text","content":"Hello"}' | codex proto#!/bin/bash
# example-integration.sh
# Start codex in protocol mode
{
# Send initial prompt
echo '{"id":"req-1","type":"text","content":"List all files in current directory"}'
# Send follow-up prompt
echo '{"id":"req-2","type":"text","content":"Show me the main.py file"}'
} | codex proto | while IFS= read -r line; do
# Process each event
echo "Event: $line"
doneimport subprocess
import json
import sys
def codex_protocol_session():
# Start codex in protocol mode
process = subprocess.Popen(
['codex', 'proto'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
# Send submission
submission = {
"id": "python-req-1",
"type": "text",
"content": "Help me write a function to sort a list"
}
process.stdin.write(json.dumps(submission) + '\n')
process.stdin.flush()
# Read events
for line in process.stdout:
event = json.loads(line.strip())
print(f"Received event: {event['msg']['type']}")
# Handle different event types
if event['msg']['type'] == 'SessionConfigured':
print("Session is ready")
elif event['msg']['type'] == 'Response':
print(f"AI Response: {event['msg']['content']}")
process.stdin.close()
process.wait()
if __name__ == "__main__":
codex_protocol_session()const { spawn } = require('child_process');
const readline = require('readline');
function startCodexProtocol() {
const codex = spawn('codex', ['proto']);
// Handle stdout (events)
const rl = readline.createInterface({
input: codex.stdout,
crlfDelay: Infinity
});
rl.on('line', (line) => {
const event = JSON.parse(line);
console.log('Event:', event.msg.type);
if (event.msg.type === 'SessionConfigured') {
// Send a submission when session is ready
const submission = {
id: 'js-req-1',
type: 'text',
content: 'Create a simple Express.js server'
};
codex.stdin.write(JSON.dumps(submission) + '\n');
}
});
// Handle stderr
codex.stderr.on('data', (data) => {
console.error('Error:', data.toString());
});
return codex;
}
const session = startCodexProtocol();Emitted when the protocol session is ready to accept submissions.
{
"id": "",
"msg": {
"type": "SessionConfigured",
"data": {
"session_id": "uuid-here",
"model": "gpt-4",
"capabilities": ["text", "images"],
"sandbox_mode": "workspace-read"
}
}
}Various response events are emitted during conversation processing:
Protocol mode inherits configuration from:
# Use configuration overrides
codex proto -c model=gpt-4 -c sandbox_mode=workspace-write
# Use configuration profile
codex --config-profile integration protoIf malformed JSON is sent, an error event is emitted:
{
"id": "error-1",
"msg": {
"type": "Error",
"error": "invalid submission: expected JSON object"
}
}Protocol mode responds to signals:
Install with Tessl CLI
npx tessl i tessl/npm-openai--codex