Skip to content

Runtime Examples

Practical examples for using StrataRouter Runtime in production.

Basic Execution

from stratarouter import Router
from stratarouter_runtime import CoreRuntimeBridge

# Setup
router = Router()
router.add_routes([...])
router.build_index(embeddings)

bridge = CoreRuntimeBridge(core_router=router)

# Execute
result = await bridge.execute("Where's my invoice?")
print(f"Response: {result.response}")
print(f"Cost: ${result.cost_usd:.4f}")

With Caching

from stratarouter_runtime import RuntimeConfig

config = RuntimeConfig(
    cache_enabled=True,
    cache_backend="redis"
)

bridge = CoreRuntimeBridge(config=config)

# First call - cache miss
result1 = await bridge.execute(query)
print(f"Latency: {result1.latency_ms}ms")  # ~50ms

# Second call - cache hit!
result2 = await bridge.execute(query)
print(f"Latency: {result2.latency_ms}ms")  # ~5ms

Production Setup

config = RuntimeConfig(
    # Execution
    execution_timeout=60,
    max_retries=3,

    # Cache
    cache_enabled=True,
    cache_backend="redis",
    cache_ttl=3600,

    # Batch
    batch_enabled=True,
    batch_window_ms=50,

    # State
    state_backend="postgresql",

    # Observability
    metrics_enabled=True,
    tracing_enabled=True
)

bridge = CoreRuntimeBridge(config=config)

Multi-Provider with Fallback

from stratarouter_runtime import LLMClientRegistry

registry = LLMClientRegistry()
registry.register("openai", OpenAIClient(api_key="..."))
registry.register("anthropic", AnthropicClient(api_key="..."))

result = await registry.complete(
    primary="openai",
    fallback=["anthropic"],
    messages=[...]
)

Cost Tracking

# Track costs automatically
total_cost = 0

for query in queries:
    result = await bridge.execute(query)
    total_cost += result.cost_usd
    print(f"Query cost: ${result.cost_usd:.4f}")

print(f"Total cost: ${total_cost:.2f}")

Error Handling

from stratarouter_runtime import (
    ExecutionTimeout,
    ProviderError,
    CacheError
)

try:
    result = await bridge.execute(query)
except ExecutionTimeout:
    logger.error("Request timed out")
except ProviderError as e:
    logger.error(f"Provider failed: {e}")
except CacheError:
    logger.warning("Cache unavailable, proceeding without cache")

Runtime Index | Python API