🚀 AI Latency Test Results

← Back to Home

🏆 Fastest: OpenAI (241ms - Good) | 📊 Range: 241-2012ms | 📈 Average: 747ms | ✅ 8/8 providers responding
Full JSON Response (for developers & agents)
{
  "service": "InferenceLatency.com",
  "providers": [
    {
      "name": "OpenAI",
      "model": "GPT-4o",
      "latency_ms": 241
    },
    {
      "name": "Groq",
      "model": "llama-3.1-8b-instant",
      "latency_ms": 2012
    },
    {
      "name": "Claude",
      "model": "Claude Sonnet 4",
      "latency_ms": 1199
    },
    {
      "name": "OpenRouter",
      "model": "Mistral",
      "latency_ms": 1222
    },
    {
      "name": "Google Gemini",
      "model": "Gemini-2.0-Flash",
      "latency_ms": 442
    },
    {
      "name": "Together AI",
      "model": "Llama3.1-8B-Turbo",
      "latency_ms": 258
    },
    {
      "name": "Fireworks AI",
      "model": "Llama3.1-8B",
      "latency_ms": 275
    },
    {
      "name": "HF GPT OSS 120B (Cerebras)",
      "model": "GPT OSS 120B",
      "latency_ms": 324,
      "status": "success"
    }
  ],
  "ranked": [
    {
      "name": "OpenAI",
      "model": "GPT-4o",
      "latency_ms": 241
    },
    {
      "name": "Together AI",
      "model": "Llama3.1-8B-Turbo",
      "latency_ms": 258
    },
    {
      "name": "Fireworks AI",
      "model": "Llama3.1-8B",
      "latency_ms": 275
    },
    {
      "name": "HF GPT OSS 120B (Cerebras)",
      "model": "GPT OSS 120B",
      "latency_ms": 324,
      "status": "success"
    },
    {
      "name": "Google Gemini",
      "model": "Gemini-2.0-Flash",
      "latency_ms": 442
    },
    {
      "name": "Claude",
      "model": "Claude Sonnet 4",
      "latency_ms": 1199
    },
    {
      "name": "OpenRouter",
      "model": "Mistral",
      "latency_ms": 1222
    },
    {
      "name": "Groq",
      "model": "llama-3.1-8b-instant",
      "latency_ms": 2012
    }
  ],
  "fastest": "OpenAI",
  "total_tested": 8,
  "successful_tests": 8,
  "failed_tests": 0,
  "performance_summary": {
    "fastest_ms": 241,
    "slowest_ms": 2012,
    "average_ms": 747
  },
  "ai_agent_guidance": {
    "recommended_provider": "OpenAI",
    "use_case": "Choose 'fastest' provider for lowest latency API calls",
    "fallback_order": [
      "OpenAI",
      "Together AI",
      "Fireworks AI",
      "HF GPT OSS 120B (Cerebras)",
      "Google Gemini",
      "Claude",
      "OpenRouter",
      "Groq"
    ],
    "reliability_score": "8/8 providers responding"
  },
  "human_readable_summary": "\ud83c\udfc6 Fastest: OpenAI (241ms - Good) | \ud83d\udcca Range: 241-2012ms | \ud83d\udcc8 Average: 747ms | \u2705 8/8 providers responding",
  "timestamp": "2025-09-07T23:22:01.621228Z"
}