Skip to main content

Testing MCP Adapters

Comprehensive guide to testing MCP adapters with unit tests, integration tests, and Claude Desktop validation.

Testing Strategy

Unit Tests → Integration Tests → MCP Protocol Tests → E2E with Agent

Unit Testing

Test Tool Implementations

File: tests/tools.test.ts

import { describe, it, expect, vi, beforeEach } from 'vitest';
import { AvigilonClient } from '../src/vendor-client.js';
import { SafetyChecker } from '../src/safety.js';

describe('Avigilon Adapter Tools', () => {
let client: AvigilonClient;

beforeEach(() => {
// Use mock mode
process.env.ENABLE_MOCK_MODE = 'true';
client = new AvigilonClient();
});

it('should detect persons in zone', async () => {
const result = await client.detectPersons('entrance', 0.8);

expect(result).toHaveLength(1);
expect(result[0].zone).toBe('entrance');
expect(result[0].confidence).toBeGreaterThanOrEqual(0.8);
});

it('should track person across cameras', async () => {
const tracks = await client.trackPerson('person-123', 300);

expect(tracks).toHaveLength(1);
expect(tracks[0].person_id).toBe('person-123');
});
});

Test Schema Validation

import { DetectPersonsSchema } from '../src/types.js';

describe('Schema Validation', () => {
it('should validate correct params', () => {
const params = {
zone: 'entrance',
confidence_threshold: 0.8
};

const result = DetectPersonsSchema.parse(params);

expect(result.zone).toBe('entrance');
expect(result.confidence_threshold).toBe(0.8);
});

it('should reject invalid confidence', () => {
const params = {
zone: 'entrance',
confidence_threshold: 1.5 // Invalid: > 1.0
};

expect(() => DetectPersonsSchema.parse(params)).toThrow();
});

it('should use default values', () => {
const params = { zone: 'entrance' };

const result = DetectPersonsSchema.parse(params);

expect(result.confidence_threshold).toBe(0.8); // Default
});
});

Mock Vendor API

import axios from 'axios';
import MockAdapter from 'axios-mock-adapter';

describe('Vendor API Integration', () => {
let mockAxios: MockAdapter;
let client: AvigilonClient;

beforeEach(() => {
process.env.ENABLE_MOCK_MODE = 'false';
mockAxios = new MockAdapter(axios);
client = new AvigilonClient('http://test-api', 'test-key');
});

afterEach(() => {
mockAxios.restore();
});

it('should call vendor API correctly', async () => {
mockAxios.onPost('/analytics/detect').reply(200, {
detections: [{
detection_id: 'det-1',
person_id: 'person-1',
zone: 'entrance',
confidence: 0.95
}]
});

const result = await client.detectPersons('entrance', 0.8);

expect(result).toHaveLength(1);
expect(result[0].detection_id).toBe('det-1');
});

it('should handle API errors', async () => {
mockAxios.onPost('/analytics/detect').networkError();

await expect(
client.detectPersons('entrance', 0.8)
).rejects.toThrow();
});
});

Integration Testing

Test MCP Protocol

import { spawn, ChildProcess } from 'child_process';
import { describe, it, expect, beforeAll, afterAll } from 'vitest';

describe('MCP Server Integration', () => {
let serverProcess: ChildProcess;

beforeAll(async () => {
// Start MCP server
serverProcess = spawn('node', ['dist/index.js'], {
stdio: ['pipe', 'pipe', 'pipe'],
env: { ...process.env, ENABLE_MOCK_MODE: 'true' }
});

// Wait for server to start
await new Promise(resolve => setTimeout(resolve, 1000));
});

afterAll(() => {
serverProcess.kill();
});

it('should list tools', (done) => {
const request = {
jsonrpc: '2.0',
id: 1,
method: 'tools/list'
};

serverProcess.stdin.write(JSON.stringify(request) + '\n');

serverProcess.stdout.once('data', (data) => {
const response = JSON.parse(data.toString());

expect(response.result.tools).toBeDefined();
expect(response.result.tools.length).toBeGreaterThan(0);
expect(response.result.tools[0]).toHaveProperty('name');
expect(response.result.tools[0]).toHaveProperty('description');
expect(response.result.tools[0]).toHaveProperty('inputSchema');

done();
});
});

it('should call detect_persons tool', (done) => {
const request = {
jsonrpc: '2.0',
id: 2,
method: 'tools/call',
params: {
name: 'detect_persons',
arguments: {
zone: 'entrance',
confidence_threshold: 0.8
}
}
};

serverProcess.stdin.write(JSON.stringify(request) + '\n');

serverProcess.stdout.once('data', (data) => {
const response = JSON.parse(data.toString());
const result = JSON.parse(response.result.content[0].text);

expect(result.status).toBe('SUCCESS');
expect(result.detections).toBeDefined();

done();
});
});
});

Test OPA Integration

import axios from 'axios';
import { SafetyChecker } from '../src/safety.js';

describe('OPA Safety Integration', () => {
it('should allow action when policy permits', async () => {
const safety = new SafetyChecker('http://localhost:8181');

const result = await safety.checkPolicy('camera_detect', {
zone: 'entrance',
agent_id: 'test-adapter'
});

expect(result.allowed).toBe(true);
});

it('should deny action when policy restricts', async () => {
const safety = new SafetyChecker('http://localhost:8181');

const result = await safety.checkPolicy('camera_detect', {
zone: 'restricted_area',
agent_id: 'test-adapter'
});

// Assuming policy denies restricted areas
expect(result.allowed).toBe(false);
expect(result.reason).toBeDefined();
});

it('should fail safe when OPA unreachable', async () => {
const safety = new SafetyChecker('http://nonexistent:8181');

const result = await safety.checkPolicy('camera_detect', {
zone: 'entrance'
});

// Should deny when OPA is unreachable (fail safe)
expect(result.allowed).toBe(false);
expect(result.reason).toContain('unavailable');
});
});

Claude Desktop Testing

Test your adapter with Claude Desktop to validate real-world usage.

1. Configure Claude Desktop

File: ~/Library/Application Support/Claude/claude_desktop_config.json (macOS)

{
"mcpServers": {
"avigilon": {
"command": "node",
"args": [
"/path/to/CitadelMesh/mcp-servers/avigilon-adapter/dist/index.js"
],
"env": {
"AVIGILON_API_URL": "https://avigilon.example.com/api/v1",
"AVIGILON_API_KEY": "your_key",
"OPA_URL": "http://localhost:8181",
"ENABLE_MOCK_MODE": "true"
}
}
}
}

2. Test with Claude

  1. Restart Claude Desktop

  2. Open new conversation

  3. Check MCP tools are loaded:

    You: Can you list the available MCP tools?

    Claude: I have access to the following tools from avigilon:
    - detect_persons: Detect persons in a specific zone
    - track_person: Track a detected person across cameras
    - get_incidents: Get active security incidents
    - list_cameras: List all available cameras
  4. Test tool execution:

    You: Detect persons in the entrance zone

    Claude: [Uses detect_persons tool]
    I detected 1 person in the entrance zone with 95% confidence.
    Detection ID: det-123
    Bounding box: [100, 200, 50, 150]

3. Validate Error Handling

You: Set temperature to 90 degrees

Claude: [Adapter returns error]
I cannot set the temperature to 90°F because it's outside the safe
range of 65-78°F enforced by the safety policy.

Performance Testing

Measure Tool Execution Time

import { performance } from 'perf_hooks';

describe('Performance', () => {
it('should complete detection within 500ms', async () => {
const client = new AvigilonClient();

const start = performance.now();
await client.detectPersons('entrance', 0.8);
const duration = performance.now() - start;

expect(duration).toBeLessThan(500);
});

it('should handle 100 concurrent requests', async () => {
const client = new AvigilonClient();

const requests = Array(100).fill(null).map(() =>
client.detectPersons('entrance', 0.8)
);

const start = performance.now();
await Promise.all(requests);
const duration = performance.now() - start;

const throughput = 100 / (duration / 1000);
console.log(`Throughput: ${throughput.toFixed(2)} req/s`);

expect(throughput).toBeGreaterThan(50); // >50 req/s
});
});

End-to-End Testing

Test adapter with real agent:

# tests/e2e/test_adapter_integration.py
import pytest
import asyncio

@pytest.mark.e2e
@pytest.mark.asyncio
async def test_agent_uses_adapter():
"""Test agent calling MCP adapter"""

from agents.security.security_agent import SecurityAgent, AgentConfig

config = AgentConfig(
agent_id="test-agent",
agent_type="security",
spiffe_id="spiffe://test",
nats_url="nats://localhost:4222"
)

agent = SecurityAgent(config)
await agent.start()

try:
# Agent should use Avigilon adapter
result = await agent.process_security_scenario("entrance_monitoring")

assert result["success"] == True
assert "detections" in result

finally:
await agent.stop()

CI/CD Integration

File: .github/workflows/adapter-tests.yml

name: MCP Adapter Tests

on:
push:
paths:
- 'mcp-servers/avigilon-adapter/**'
pull_request:
paths:
- 'mcp-servers/avigilon-adapter/**'

jobs:
test:
runs-on: ubuntu-latest

services:
opa:
image: openpolicyagent/opa:latest
ports:
- 8181:8181

steps:
- uses: actions/checkout@v4

- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'

- name: Install dependencies
run: |
cd mcp-servers/avigilon-adapter
npm ci

- name: Build
run: |
cd mcp-servers/avigilon-adapter
npm run build

- name: Run unit tests
run: |
cd mcp-servers/avigilon-adapter
npm test

- name: Run integration tests
run: |
cd mcp-servers/avigilon-adapter
npm run test:integration

- name: Check types
run: |
cd mcp-servers/avigilon-adapter
npx tsc --noEmit

Troubleshooting

Issue: MCP server not responding

// Add debug logging
server.onerror = (error) => {
console.error('[MCP Error]', error);
};

// Log all requests
server.setRequestHandler(CallToolRequestSchema, async (request) => {
console.error('[Tool Call]', request.params.name, request.params.arguments);
// ... handle request
});

Issue: Claude Desktop can't find tools

  1. Check server starts successfully:

    node dist/index.js
    # Should not error
  2. Verify config path:

    # macOS
    cat ~/Library/Application\ Support/Claude/claude_desktop_config.json

    # Windows
    type %APPDATA%\Claude\claude_desktop_config.json
  3. Check Claude Desktop logs:

    # macOS
    tail -f ~/Library/Logs/Claude/mcp*.log

Issue: Tools fail with schema errors

// Add detailed error logging
try {
const params = DetectPersonsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
console.error('Schema validation failed:', error.errors);
throw new Error(`Invalid arguments: ${JSON.stringify(error.errors)}`);
}
throw error;
}

Best Practices

  1. Test in mock mode first
  2. Integration test with OPA running
  3. Validate in Claude Desktop before deployment
  4. Performance test for production loads
  5. E2E test with agents for full validation
  6. Automate in CI/CD for every change

Next Steps


Well-tested adapters are reliable adapters! Continue to Contributing Guide.