Skip to main content

LangGraph Integration

Integrate Valmi Value with LangGraph agents:
from valmi_value import ValueClient
from langgraph.graph import StateGraph

value = ValueClient(api_key="sk_live_abc123xyz")

def agent_node(state):
    # Your agent logic
    result = llm.invoke(state["messages"])
    
    # Meter the LLM call
    value.send_action(
        agent_key="my-langgraph-agent",
        action_type="llm_call",
        metadata={
            "model": "gpt-4",
            "input_tokens": result.usage.prompt_tokens,
            "output_tokens": result.usage.completion_tokens,
            "cost_usd": calculate_cost(result.usage)
        }
    )
    
    return {"messages": result}

LangGraph Middleware

Create middleware for automatic metering:
from valmi_value import ValueClient

class ValmiMiddleware:
    def __init__(self, agent_key):
        self.value = ValueClient(api_key="sk_live_abc123xyz")
        self.agent_key = agent_key
    
    def on_llm_call(self, state, result):
        self.value.send_action(
            agent_key=self.agent_key,
            action_type="llm_call",
            metadata={
                "model": result.model,
                "input_tokens": result.usage.prompt_tokens,
                "output_tokens": result.usage.completion_tokens
            }
        )

CrewAI Integration

Integrate with CrewAI agents:
from valmi_value import ValueClient
from crewai import Agent, Task, Crew

value = ValueClient(api_key="sk_live_abc123xyz")

class MeteredAgent(Agent):
    def execute(self, task):
        result = super().execute(task)
        
        # Meter the agent execution
        value.send_action(
            agent_key="crew-research-agent",
            action_type="agent_execution",
            metadata={
                "task": task.description,
                "tools_used": len(self.tools),
                "execution_time_ms": result.execution_time
            }
        )
        
        return result

CrewAI Task Metering

Meter individual tasks:
def execute_task_with_metering(task, agent):
    start_time = time.time()
    result = agent.execute(task)
    execution_time = (time.time() - start_time) * 1000
    
    value.send_action(
        agent_key="crew-agent",
        action_type="task_execution",
        metadata={
            "task_id": task.id,
            "execution_time_ms": execution_time,
            "success": result.success
        }
    )
    
    return result

n8n Integration

Integrate with n8n workflows:
from valmi_value import ValueClient

value = ValueClient(api_key="sk_live_abc123xyz")

# In your n8n workflow node
async def execute(self):
    start_time = time.time()
    
    # Your workflow logic
    result = await process_data(self.inputData)
    
    execution_time = (time.time() - start_time) * 1000
    
    # Meter the workflow execution
    value.send_action(
        agent_key="n8n-workflow",
        action_type="workflow_execution",
        metadata={
            "workflow_id": self.workflow.id,
            "node_id": self.node.id,
            "execution_time_ms": execution_time,
            "nodes_executed": len(self.executedNodes)
        }
    )
    
    return result

n8n Custom Node

Create a custom n8n node for metering:
class ValmiMeterNode:
    def execute(self, data):
        value = ValueClient(api_key=self.credentials.api_key)
        
        value.send_action(
            agent_key=self.parameters.agent_key,
            action_type=self.parameters.action_type,
            metadata=json.loads(self.parameters.metadata)
        )
        
        return data

Custom Integration

Integrate with any Python application:
from valmi_value import ValueClient

value = ValueClient(api_key="sk_live_abc123xyz")

def my_ai_function(input_data):
    # Your AI logic
    result = process_with_ai(input_data)
    
    # Meter the operation
    value.send_action(
        agent_key="my-custom-agent",
        action_type="ai_processing",
        metadata={
            "input_size": len(input_data),
            "output_size": len(result),
            "processing_time_ms": calculate_time()
        }
    )
    
    return result

Decorator Pattern

Use decorators for automatic metering:
from valmi_value import ValueClient
from functools import wraps

value = ValueClient(api_key="sk_live_abc123xyz")

def meter_action(agent_key, action_type):
    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            start_time = time.time()
            result = func(*args, **kwargs)
            execution_time = (time.time() - start_time) * 1000
            
            value.send_action(
                agent_key=agent_key,
                action_type=action_type,
                metadata={
                    "function": func.__name__,
                    "execution_time_ms": execution_time
                }
            )
            
            return result
        return wrapper
    return decorator

# Use the decorator
@meter_action("my-agent", "custom_function")
def my_function():
    # Your code
    pass

Context Manager Pattern

Use context managers for scoped metering:
from valmi_value import ValueClient

value = ValueClient(api_key="sk_live_abc123xyz")

class MeteredContext:
    def __init__(self, agent_key, action_type):
        self.agent_key = agent_key
        self.action_type = action_type
        self.start_time = None
    
    def __enter__(self):
        self.start_time = time.time()
        return self
    
    def __exit__(self, *args):
        execution_time = (time.time() - self.start_time) * 1000
        value.send_action(
            agent_key=self.agent_key,
            action_type=self.action_type,
            metadata={"execution_time_ms": execution_time}
        )

# Use the context manager
with MeteredContext("my-agent", "processing"):
    # Your code
    process_data()