Agent skill for agent - invoke with $agent-agent
42
Quality
13%
Does it follow best practices?
Impact
93%
4.65xAverage score across 3 eval scenarios
Passed
No known issues
Optimize this skill with Tessl
npx tessl skill review --optimize ./.agents/skills/agent-agent/SKILL.mdA sophisticated Goal-Oriented Action Planning (GOAP) specialist that dynamically creates intelligent plans to achieve complex objectives using advanced graph analysis and sublinear optimization techniques. This agent transforms high-level goals into executable action sequences through mathematical optimization, temporal advantage prediction, and multi-agent coordination.
mcp__sublinear-time-solver__solve - Optimize action sequences and resource allocationmcp__sublinear-time-solver__pageRank - Prioritize goals and actions based on importancemcp__sublinear-time-solver__analyzeMatrix - Analyze goal dependencies and system propertiesmcp__sublinear-time-solver__predictWithTemporalAdvantage - Predict future states before data arrivesmcp__sublinear-time-solver__estimateEntry - Evaluate partial state information efficientlymcp__sublinear-time-solver__calculateLightTravel - Compute temporal advantages for time-critical planningmcp__sublinear-time-solver__demonstrateTemporalLead - Validate predictive planning scenariosmcp__flow-nexus__swarm_init - Initialize multi-agent execution systemsmcp__flow-nexus__task_orchestrate - Execute planned action sequencesmcp__flow-nexus__agent_spawn - Create specialized agents for specific goalsmcp__flow-nexus__workflow_create - Define repeatable goal achievement patternsmcp__flow-nexus__sandbox_create - Isolated environments for goal testing// World state representation
const WorldState = {
current_state: new Map([
['code_written', false],
['tests_passing', false],
['documentation_complete', false],
['deployment_ready', false]
]),
goal_state: new Map([
['code_written', true],
['tests_passing', true],
['documentation_complete', true],
['deployment_ready', true]
])
};
// Action definitions with preconditions and effects
const Actions = [
{
name: 'write_code',
cost: 5,
preconditions: new Map(),
effects: new Map([['code_written', true]])
},
{
name: 'write_tests',
cost: 3,
preconditions: new Map([['code_written', true]]),
effects: new Map([['tests_passing', true]])
},
{
name: 'write_documentation',
cost: 2,
preconditions: new Map([['code_written', true]]),
effects: new Map([['documentation_complete', true]])
},
{
name: 'deploy_application',
cost: 4,
preconditions: new Map([
['code_written', true],
['tests_passing', true],
['documentation_complete', true]
]),
effects: new Map([['deployment_ready', true]])
}
];// Build adjacency matrix for sublinear optimization
async function buildActionGraph(actions, worldState) {
const n = actions.length;
const adjacencyMatrix = Array(n).fill().map(() => Array(n).fill(0));
// Calculate action dependencies and transitions
for (let i = 0; i < n; i++) {
for (let j = 0; j < n; j++) {
if (canTransition(actions[i], actions[j], worldState)) {
adjacencyMatrix[i][j] = 1 / actions[j].cost; // Weight by inverse cost
}
}
}
// Analyze matrix properties for optimization
const analysis = await mcp__sublinear_time_solver__analyzeMatrix({
matrix: {
rows: n,
cols: n,
format: "dense",
data: adjacencyMatrix
},
checkDominance: true,
checkSymmetry: false,
estimateCondition: true
});
return { adjacencyMatrix, analysis };
}async function prioritizeGoals(actionGraph, goals) {
// Use PageRank to identify critical actions and goals
const pageRank = await mcp__sublinear_time_solver__pageRank({
adjacency: {
rows: actionGraph.length,
cols: actionGraph.length,
format: "dense",
data: actionGraph
},
damping: 0.85,
epsilon: 1e-6
});
// Sort goals by importance scores
const prioritizedGoals = goals.map((goal, index) => ({
goal,
priority: pageRank.ranks[index],
index
})).sort((a, b) => b.priority - a.priority);
return prioritizedGoals;
}async function planWithTemporalAdvantage(planningMatrix, constraints) {
// Predict optimal solutions before full problem manifestation
const prediction = await mcp__sublinear_time_solver__predictWithTemporalAdvantage({
matrix: planningMatrix,
vector: constraints,
distanceKm: 12000 // Global coordination distance
});
// Validate temporal feasibility
const validation = await mcp__sublinear_time_solver__validateTemporalAdvantage({
size: planningMatrix.rows,
distanceKm: 12000
});
if (validation.feasible) {
return {
solution: prediction.solution,
temporalAdvantage: prediction.temporalAdvantage,
confidence: prediction.confidence
};
}
return null;
}async function findOptimalPath(startState, goalState, actions) {
const openSet = new PriorityQueue();
const closedSet = new Set();
const gScore = new Map();
const fScore = new Map();
const cameFrom = new Map();
openSet.enqueue(startState, 0);
gScore.set(stateKey(startState), 0);
fScore.set(stateKey(startState), heuristic(startState, goalState));
while (!openSet.isEmpty()) {
const current = openSet.dequeue();
const currentKey = stateKey(current);
if (statesEqual(current, goalState)) {
return reconstructPath(cameFrom, current);
}
closedSet.add(currentKey);
// Generate successor states using available actions
for (const action of getApplicableActions(current, actions)) {
const neighbor = applyAction(current, action);
const neighborKey = stateKey(neighbor);
if (closedSet.has(neighborKey)) continue;
const tentativeGScore = gScore.get(currentKey) + action.cost;
if (!gScore.has(neighborKey) || tentativeGScore < gScore.get(neighborKey)) {
cameFrom.set(neighborKey, { state: current, action });
gScore.set(neighborKey, tentativeGScore);
// Use sublinear solver for heuristic optimization
const heuristicValue = await optimizedHeuristic(neighbor, goalState);
fScore.set(neighborKey, tentativeGScore + heuristicValue);
if (!openSet.contains(neighbor)) {
openSet.enqueue(neighbor, fScore.get(neighborKey));
}
}
}
}
return null; // No path found
}async function coordinateWithSwarm(complexGoal) {
// Initialize planning swarm
const swarm = await mcp__claude_flow__swarm_init({
topology: "hierarchical",
maxAgents: 8,
strategy: "adaptive"
});
// Spawn specialized planning agents
const coordinator = await mcp__claude_flow__agent_spawn({
type: "coordinator",
capabilities: ["goal_decomposition", "plan_synthesis"]
});
const analyst = await mcp__claude_flow__agent_spawn({
type: "analyst",
capabilities: ["constraint_analysis", "feasibility_assessment"]
});
const optimizer = await mcp__claude_flow__agent_spawn({
type: "optimizer",
capabilities: ["path_optimization", "resource_allocation"]
});
// Orchestrate distributed planning
const planningTask = await mcp__claude_flow__task_orchestrate({
task: `Plan execution for: ${complexGoal}`,
strategy: "parallel",
priority: "high"
});
return { swarm, planningTask };
}async function achieveConsensus(agents, proposals) {
// Build consensus matrix
const consensusMatrix = buildConsensusMatrix(agents, proposals);
// Solve for optimal consensus
const consensus = await mcp__sublinear_time_solver__solve({
matrix: consensusMatrix,
vector: generatePreferenceVector(agents),
method: "neumann",
epsilon: 1e-6
});
// Select proposal with highest consensus score
const optimalProposal = proposals[consensus.solution.indexOf(Math.max(...consensus.solution))];
return {
selectedProposal: optimalProposal,
consensusScore: Math.max(...consensus.solution),
convergenceTime: consensus.convergenceTime
};
}async function decomposeGoal(complexGoal) {
// Create sandbox for goal simulation
const sandbox = await mcp__flow_nexus__sandbox_create({
template: "node",
name: "goal-decomposition",
env_vars: {
GOAL_CONTEXT: complexGoal.context,
CONSTRAINTS: JSON.stringify(complexGoal.constraints)
}
});
// Recursive goal breakdown
const subgoals = await recursiveDecompose(complexGoal, 0, 3); // Max depth 3
// Build dependency graph
const dependencyMatrix = buildDependencyMatrix(subgoals);
// Optimize execution order
const executionOrder = await mcp__sublinear_time_solver__pageRank({
adjacency: dependencyMatrix,
damping: 0.9
});
return {
subgoals: subgoals.sort((a, b) =>
executionOrder.ranks[b.id] - executionOrder.ranks[a.id]
),
dependencies: dependencyMatrix,
estimatedCompletion: calculateCompletionTime(subgoals, executionOrder)
};
}class DynamicPlanner {
constructor() {
this.currentPlan = null;
this.worldState = new Map();
this.monitoringActive = false;
}
async startMonitoring() {
this.monitoringActive = true;
while (this.monitoringActive) {
// OODA Loop Implementation
await this.observe();
await this.orient();
await this.decide();
await this.act();
await new Promise(resolve => setTimeout(resolve, 1000)); // 1s cycle
}
}
async observe() {
// Monitor world state changes
const stateChanges = await this.detectStateChanges();
this.updateWorldState(stateChanges);
}
async orient() {
// Analyze deviations from expected state
const deviations = this.analyzeDeviations();
if (deviations.significant) {
this.triggerReplanning(deviations);
}
}
async decide() {
if (this.needsReplanning()) {
await this.replan();
}
}
async act() {
if (this.currentPlan && this.currentPlan.nextAction) {
await this.executeAction(this.currentPlan.nextAction);
}
}
async replan() {
// Use temporal advantage for predictive replanning
const newPlan = await planWithTemporalAdvantage(
this.buildCurrentMatrix(),
this.getCurrentConstraints()
);
if (newPlan && newPlan.confidence > 0.8) {
this.currentPlan = newPlan;
// Store successful pattern
await mcp__claude_flow__memory_usage({
action: "store",
namespace: "goap-patterns",
key: `replan_${Date.now()}`,
value: JSON.stringify({
trigger: this.lastDeviation,
solution: newPlan,
worldState: Array.from(this.worldState.entries())
})
});
}
}
}class PlanningLearner {
async learnFromExecution(executedPlan, outcome) {
// Analyze plan effectiveness
const effectiveness = this.calculateEffectiveness(executedPlan, outcome);
if (effectiveness.success) {
// Store successful pattern
await this.storeSuccessPattern(executedPlan, effectiveness);
// Train neural network on successful patterns
await mcp__flow_nexus__neural_train({
config: {
architecture: {
type: "feedforward",
layers: [
{ type: "input", size: this.getStateSpaceSize() },
{ type: "hidden", size: 128, activation: "relu" },
{ type: "hidden", size: 64, activation: "relu" },
{ type: "output", size: this.getActionSpaceSize(), activation: "softmax" }
]
},
training: {
epochs: 50,
learning_rate: 0.001,
batch_size: 32
}
},
tier: "small"
});
} else {
// Analyze failure patterns
await this.analyzeFailure(executedPlan, outcome);
}
}
async retrieveSimilarPatterns(currentSituation) {
// Search for similar successful patterns
const patterns = await mcp__claude_flow__memory_search({
pattern: `situation:${this.encodeSituation(currentSituation)}`,
namespace: "goap-patterns",
limit: 10
});
// Rank by similarity and success rate
return patterns.results
.map(p => ({ ...p, similarity: this.calculateSimilarity(currentSituation, p.context) }))
.sort((a, b) => b.similarity * b.successRate - a.similarity * a.successRate);
}
}class GOAPBehaviorTree {
constructor() {
this.root = new SelectorNode([
new SequenceNode([
new ConditionNode(() => this.hasValidPlan()),
new ActionNode(() => this.executePlan())
]),
new SequenceNode([
new ActionNode(() => this.generatePlan()),
new ActionNode(() => this.executePlan())
]),
new ActionNode(() => this.handlePlanningFailure())
]);
}
async tick() {
return await this.root.execute();
}
hasValidPlan() {
return this.currentPlan &&
this.currentPlan.isValid &&
!this.worldStateChanged();
}
async generatePlan() {
const startTime = performance.now();
// Use sublinear solver for rapid planning
const planMatrix = this.buildPlanningMatrix();
const constraints = this.extractConstraints();
const solution = await mcp__sublinear_time_solver__solve({
matrix: planMatrix,
vector: constraints,
method: "random-walk",
maxIterations: 1000
});
const endTime = performance.now();
this.currentPlan = {
actions: this.decodeSolution(solution.solution),
confidence: solution.residual < 1e-6 ? 0.95 : 0.7,
planningTime: endTime - startTime,
isValid: true
};
return this.currentPlan !== null;
}
}class UtilityPlanner {
constructor() {
this.utilityWeights = {
timeEfficiency: 0.3,
resourceCost: 0.25,
riskLevel: 0.2,
goalAlignment: 0.25
};
}
async selectOptimalAction(availableActions, currentState, goalState) {
const utilities = await Promise.all(
availableActions.map(action => this.calculateUtility(action, currentState, goalState))
);
// Use sublinear optimization for multi-objective selection
const utilityMatrix = this.buildUtilityMatrix(utilities);
const preferenceVector = Object.values(this.utilityWeights);
const optimal = await mcp__sublinear_time_solver__solve({
matrix: utilityMatrix,
vector: preferenceVector,
method: "neumann"
});
const bestActionIndex = optimal.solution.indexOf(Math.max(...optimal.solution));
return availableActions[bestActionIndex];
}
async calculateUtility(action, currentState, goalState) {
const timeUtility = await this.estimateTimeUtility(action);
const costUtility = this.calculateCostUtility(action);
const riskUtility = await this.assessRiskUtility(action, currentState);
const goalUtility = this.calculateGoalAlignment(action, currentState, goalState);
return {
action,
timeUtility,
costUtility,
riskUtility,
goalUtility,
totalUtility: (
timeUtility * this.utilityWeights.timeEfficiency +
costUtility * this.utilityWeights.resourceCost +
riskUtility * this.utilityWeights.riskLevel +
goalUtility * this.utilityWeights.goalAlignment
)
};
}
}// Goal: Launch a new product feature
const productLaunchGoal = {
objective: "Launch authentication system",
constraints: ["2 week deadline", "high security", "user-friendly"],
resources: ["3 developers", "1 designer", "$10k budget"]
};
// Decompose into actionable sub-goals
const subGoals = [
"Design user interface",
"Implement backend authentication",
"Create security tests",
"Deploy to production",
"Monitor system performance"
];
// Build dependency matrix
const dependencyMatrix = buildDependencyMatrix(subGoals);
// Optimize execution order
const optimizedPlan = await mcp__sublinear_time_solver__solve({
matrix: dependencyMatrix,
vector: resourceConstraints,
method: "neumann"
});// Multiple competing objectives
const objectives = [
{ name: "reduce_costs", weight: 0.3, urgency: 0.7 },
{ name: "improve_quality", weight: 0.4, urgency: 0.8 },
{ name: "increase_speed", weight: 0.3, urgency: 0.9 }
];
// Use PageRank for multi-objective prioritization
const objectivePriorities = await mcp__sublinear_time_solver__pageRank({
adjacency: buildObjectiveGraph(objectives),
personalized: objectives.map(o => o.urgency)
});
// Allocate resources based on priorities
const resourceAllocation = optimizeResourceAllocation(objectivePriorities);// Predict market conditions before they change
const marketPrediction = await mcp__sublinear_time_solver__predictWithTemporalAdvantage({
matrix: marketTrendMatrix,
vector: currentMarketState,
distanceKm: 20000 // Global market data propagation
});
// Plan actions based on predictions
const strategicActions = generateStrategicActions(marketPrediction);
// Execute with temporal advantage
const results = await executeWithTemporalLead(strategicActions);// Initialize coordinated swarm
const coordinatedSwarm = await mcp__flow_nexus__swarm_init({
topology: "mesh",
maxAgents: 12,
strategy: "specialized"
});
// Spawn specialized agents for different goal aspects
const agents = await Promise.all([
mcp__flow_nexus__agent_spawn({ type: "researcher", capabilities: ["data_analysis"] }),
mcp__flow_nexus__agent_spawn({ type: "coder", capabilities: ["implementation"] }),
mcp__flow_nexus__agent_spawn({ type: "optimizer", capabilities: ["performance"] })
]);
// Coordinate goal achievement
const coordinatedExecution = await mcp__flow_nexus__task_orchestrate({
task: "Build and optimize recommendation system",
strategy: "adaptive",
maxAgents: 3
});// Monitor execution progress
const executionStatus = await mcp__flow_nexus__task_status({
taskId: currentExecutionId,
detailed: true
});
// Detect deviations from plan
if (executionStatus.deviation > threshold) {
// Analyze new constraints
const updatedMatrix = updateConstraintMatrix(executionStatus.changes);
// Generate new optimal plan
const revisedPlan = await mcp__sublinear_time_solver__solve({
matrix: updatedMatrix,
vector: updatedObjectives,
method: "adaptive"
});
// Implement revised plan
await implementRevisedPlan(revisedPlan);
}// Well-structured goal definition
const optimizedGoal = {
objective: "Clear and measurable outcome",
preconditions: ["List of required starting states"],
postconditions: ["List of desired end states"],
constraints: ["Time, resource, and quality constraints"],
metrics: ["Quantifiable success measures"],
dependencies: ["Relationships with other goals"]
};// Robust plan execution with fallbacks
try {
const result = await executePlan(optimizedPlan);
return result;
} catch (error) {
// Generate contingency plan
const contingencyPlan = await generateContingencyPlan(error, originalGoal);
return await executePlan(contingencyPlan);
}const plannerConfig = {
searchAlgorithm: "a_star", // a_star, dijkstra, greedy
heuristicFunction: "manhattan", // manhattan, euclidean, custom
maxSearchDepth: 20,
planningTimeout: 30000, // 30 seconds
convergenceEpsilon: 1e-6,
temporalAdvantageThreshold: 0.8,
utilityWeights: {
time: 0.3,
cost: 0.3,
risk: 0.2,
quality: 0.2
}
};class RobustPlanner extends GOAPAgent {
async handlePlanningFailure(error, context) {
switch (error.type) {
case 'MATRIX_SINGULAR':
return await this.regularizeMatrix(context.matrix);
case 'NO_CONVERGENCE':
return await this.relaxConstraints(context.constraints);
case 'TIMEOUT':
return await this.useApproximateSolution(context);
default:
return await this.fallbackToSimplePlanning(context);
}
}
}Leverage light-speed delays for predictive planning:
This goal-planner agent represents the cutting edge of AI-driven objective achievement, combining mathematical rigor with practical execution capabilities through the powerful sublinear-time-solver toolkit and Claude Flow ecosystem.
b2618f9
If you maintain this skill, you can claim it as your own. Once claimed, you can manage eval scenarios, bundle related skills, attach documentation or rules, and ensure cross-agent compatibility.