Skip to content

Implementing CORE Profile

This guide explains how to implement the ARAL-CORE conformance profile, covering layers L1-L5.


The CORE profile is the foundational conformance level for ARAL agents. It covers:

  • ✅ Layer 1: Runtime (L1)
  • ✅ Layer 2: Memory (L2)
  • ✅ Layer 3: Capabilities (L3)
  • ✅ Layer 4: Reasoning (L4)
  • ✅ Layer 5: Persona (L5)

  1. Runtime Layer

    • Unique agent instance ID
    • Graceful shutdown with timeout
    • Health check endpoint
    • Metrics endpoint (Prometheus format)
    • Structured logging
  2. Memory Layer

    • Working memory implementation
    • TTL-based expiration
    • Size limits with eviction policy
    • Concurrent access safety
    • No plaintext secrets
  3. Capabilities Layer

    • Input/output schema validation
    • Permission declarations
    • Timeout handling
    • Error handling
    • Invocation logging
  4. Reasoning Layer

    • Structured decision output
    • Confidence scores
    • Respect persona constraints
    • Chain-of-thought logging
    • Request timeout
  5. Persona Layer

    • Unique identifier
    • Capability constraints (allow/deny)
    • Runtime immutability
    • Startup validation
    • Version tracking

{
"agent_id": "core-compliant-agent",
"version": "1.0.0",
"conformance_profile": "ARAL-CORE",
"runtime": {
"max_memory_mb": 512,
"max_cpu_percent": 80,
"shutdown_timeout_ms": 30000,
"request_timeout_ms": 60000
},
"health_check": {
"enabled": true,
"port": 8080,
"path": "/health"
},
"metrics": {
"enabled": true,
"port": 9090,
"format": "prometheus"
}
}
from flask import Flask, jsonify
import time
app = Flask(__name__)
start_time = time.time()
@app.route('/health')
def health():
return jsonify({
"status": "healthy",
"timestamp": time.time(),
"agent_id": "core-compliant-agent",
"uptime_seconds": int(time.time() - start_time),
"version": "1.0.0"
})
@app.route('/metrics')
def metrics():
return f"""
# HELP agent_uptime_seconds Agent uptime
# TYPE agent_uptime_seconds counter
agent_uptime_seconds {int(time.time() - start_time)}
# HELP agent_requests_total Total requests
# TYPE agent_requests_total counter
agent_requests_total 42
"""

from datetime import datetime, timedelta
from threading import Lock
import json
class ARALMemory:
def __init__(self, max_size_mb=100, default_ttl_seconds=3600):
self.max_size_mb = max_size_mb
self.default_ttl = default_ttl_seconds
self.store = {}
self.lock = Lock()
def set(self, key, value, ttl_seconds=None):
"""Store value with TTL"""
with self.lock:
ttl = ttl_seconds or self.default_ttl
expires_at = datetime.now() + timedelta(seconds=ttl)
self.store[key] = {
"value": value,
"expires_at": expires_at,
"created_at": datetime.now()
}
# Check size and evict if needed
self._evict_if_needed()
def get(self, key):
"""Retrieve value if not expired"""
with self.lock:
if key not in self.store:
return None
entry = self.store[key]
if datetime.now() > entry["expires_at"]:
del self.store[key]
return None
return entry["value"]
def _evict_if_needed(self):
"""LRU eviction when size limit exceeded"""
current_size = len(json.dumps(self.store)) / (1024 * 1024)
if current_size > self.max_size_mb:
# Remove oldest entries
sorted_keys = sorted(
self.store.keys(),
key=lambda k: self.store[k]["created_at"]
)
for key in sorted_keys[:len(sorted_keys)//4]:
del self.store[key]

from jsonschema import validate, ValidationError
class Capability:
def __init__(self, id, name, input_schema, output_schema, permissions, handler):
self.id = id
self.name = name
self.input_schema = input_schema
self.output_schema = output_schema
self.permissions = permissions
self.handler = handler
def invoke(self, params, trace_id):
"""Invoke capability with validation"""
# Validate input
try:
validate(instance=params, schema=self.input_schema)
except ValidationError as e:
return {
"success": False,
"error": {
"code": "invalid_input",
"message": str(e)
}
}
# Execute handler
try:
result = self.handler(params)
# Validate output
validate(instance=result, schema=self.output_schema)
# Log invocation
self._log_invocation(params, result, trace_id)
return {
"success": True,
"result": result
}
except Exception as e:
return {
"success": False,
"error": {
"code": "execution_error",
"message": "Internal error occurred"
}
}
def _log_invocation(self, params, result, trace_id):
"""Log capability invocation"""
log_entry = {
"timestamp": datetime.now().isoformat(),
"trace_id": trace_id,
"capability_id": self.id,
"params": params,
"success": True
}
print(json.dumps(log_entry))

from anthropic import Anthropic
class ReasoningLayer:
def __init__(self, llm_client, max_depth=5):
self.llm = llm_client
self.max_depth = max_depth
def decide(self, input_text, context, persona, trace_id):
"""Make decision with confidence score"""
# Build prompt with constraints
prompt = self._build_prompt(input_text, context, persona)
# Get LLM response
response = self.llm.messages.create(
model="claude-3-sonnet-20240229",
messages=[{"role": "user", "content": prompt}],
max_tokens=1000
)
# Parse decision
decision = self._parse_decision(response.content[0].text)
# Add metadata
decision.update({
"decision_id": str(uuid.uuid4()),
"timestamp": datetime.now().isoformat(),
"trace_id": trace_id
})
# Log decision
self._log_decision(decision)
return decision
def _build_prompt(self, input_text, context, persona):
"""Build prompt respecting persona constraints"""
return f"""
You are {persona.name}. Your role is {persona.role}.
Allowed capabilities: {persona.constraints.allowed_capabilities}
Denied capabilities: {persona.constraints.denied_capabilities}
Context: {context}
User input: {input_text}
Decide which capability to invoke. Respond with JSON:
{{
"action": "capability_id",
"parameters": {{}},
"confidence": 0.95,
"rationale": "explanation"
}}
"""

import json
from jsonschema import validate
PERSONA_SCHEMA = {
"type": "object",
"required": ["id", "name", "role", "version", "constraints"],
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
"role": {"type": "string"},
"version": {"type": "string"},
"constraints": {
"type": "object",
"required": ["allowed_capabilities", "denied_capabilities"],
"properties": {
"allowed_capabilities": {"type": "array"},
"denied_capabilities": {"type": "array"}
}
}
}
}
class Persona:
def __init__(self, persona_dict):
# Validate schema
validate(instance=persona_dict, schema=PERSONA_SCHEMA)
self.id = persona_dict["id"]
self.name = persona_dict["name"]
self.role = persona_dict["role"]
self.version = persona_dict["version"]
self.constraints = persona_dict["constraints"]
# Make immutable
self._frozen = True
@classmethod
def from_file(cls, filepath):
"""Load persona from file"""
with open(filepath, 'r') as f:
persona_dict = json.load(f)
return cls(persona_dict)
def allows_capability(self, capability_id):
"""Check if capability is allowed"""
allowed = self.constraints["allowed_capabilities"]
denied = self.constraints["denied_capabilities"]
# Check denied first
if capability_id in denied or "*" in denied:
return False
# Check allowed
if "*" in allowed or capability_id in allowed:
return True
return False

Terminal window
# Install test harness
pip install aral-test-harness
# Run CORE profile tests
aral-test --agent-url http://localhost:8080 --profile CORE --output report.json
# View results
cat report.json | jq '.results[] | select(.passed == false)'
  • Health endpoint returns 200
  • Metrics endpoint returns Prometheus format
  • Memory respects TTL
  • Capabilities validate inputs
  • Decisions include confidence scores
  • Persona constraints are enforced
  • All layers log structured JSON
  • Agent shuts down gracefully

Solution: Ensure health endpoint is running and accessible:

Terminal window
curl -v http://localhost:8080/health

Solution: Check eviction policy:

memory = ARALMemory(max_size_mb=100, default_ttl_seconds=3600)

Solution: Validate persona at startup:

persona = Persona.from_file("persona.json")
agent.set_persona(persona) # This must validate


© 2026 IbIFACE — CC BY 4.0