CVE-2025-62426 (GCVE-0-2025-62426)
Vulnerability from cvelistv5
Published
2025-11-21 01:21
Modified
2025-11-24 18:12
CWE
  • CWE-770 - Allocation of Resources Without Limits or Throttling
Summary
vLLM is an inference and serving engine for large language models (LLMs). From version 0.5.5 to before 0.11.1, the /v1/chat/completions and /tokenize endpoints allow a chat_template_kwargs request parameter that is used in the code before it is properly validated against the chat template. With the right chat_template_kwargs parameters, it is possible to block processing of the API server for long periods of time, delaying all other requests. This issue has been patched in version 0.11.1.
Impacted products
Vendor Product Version
vllm-project vllm Version: >= 0.5.5, < 0.11.1
Create a notification for this product.
Show details on NVD website


{
  "containers": {
    "adp": [
      {
        "metrics": [
          {
            "other": {
              "content": {
                "id": "CVE-2025-62426",
                "options": [
                  {
                    "Exploitation": "none"
                  },
                  {
                    "Automatable": "no"
                  },
                  {
                    "Technical Impact": "partial"
                  }
                ],
                "role": "CISA Coordinator",
                "timestamp": "2025-11-24T17:12:00.809982Z",
                "version": "2.0.3"
              },
              "type": "ssvc"
            }
          }
        ],
        "providerMetadata": {
          "dateUpdated": "2025-11-24T18:12:23.183Z",
          "orgId": "134c704f-9b21-4f2e-91b3-4a467353bcc0",
          "shortName": "CISA-ADP"
        },
        "title": "CISA ADP Vulnrichment"
      }
    ],
    "cna": {
      "affected": [
        {
          "product": "vllm",
          "vendor": "vllm-project",
          "versions": [
            {
              "status": "affected",
              "version": "\u003e= 0.5.5, \u003c 0.11.1"
            }
          ]
        }
      ],
      "descriptions": [
        {
          "lang": "en",
          "value": "vLLM is an inference and serving engine for large language models (LLMs). From version 0.5.5 to before 0.11.1, the /v1/chat/completions and /tokenize endpoints allow a chat_template_kwargs request parameter that is used in the code before it is properly validated against the chat template. With the right chat_template_kwargs parameters, it is possible to block processing of the API server for long periods of time, delaying all other requests. This issue has been patched in version 0.11.1."
        }
      ],
      "metrics": [
        {
          "cvssV3_1": {
            "attackComplexity": "LOW",
            "attackVector": "NETWORK",
            "availabilityImpact": "HIGH",
            "baseScore": 6.5,
            "baseSeverity": "MEDIUM",
            "confidentialityImpact": "NONE",
            "integrityImpact": "NONE",
            "privilegesRequired": "LOW",
            "scope": "UNCHANGED",
            "userInteraction": "NONE",
            "vectorString": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H",
            "version": "3.1"
          }
        }
      ],
      "problemTypes": [
        {
          "descriptions": [
            {
              "cweId": "CWE-770",
              "description": "CWE-770: Allocation of Resources Without Limits or Throttling",
              "lang": "en",
              "type": "CWE"
            }
          ]
        }
      ],
      "providerMetadata": {
        "dateUpdated": "2025-11-21T01:21:29.546Z",
        "orgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
        "shortName": "GitHub_M"
      },
      "references": [
        {
          "name": "https://github.com/vllm-project/vllm/security/advisories/GHSA-69j4-grxj-j64p",
          "tags": [
            "x_refsource_CONFIRM"
          ],
          "url": "https://github.com/vllm-project/vllm/security/advisories/GHSA-69j4-grxj-j64p"
        },
        {
          "name": "https://github.com/vllm-project/vllm/pull/27205",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/vllm-project/vllm/pull/27205"
        },
        {
          "name": "https://github.com/vllm-project/vllm/commit/3ada34f9cb4d1af763fdfa3b481862a93eb6bd2b",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/vllm-project/vllm/commit/3ada34f9cb4d1af763fdfa3b481862a93eb6bd2b"
        },
        {
          "name": "https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/chat_utils.py#L1602-L1610",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/chat_utils.py#L1602-L1610"
        },
        {
          "name": "https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/openai/serving_engine.py#L809-L814",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/openai/serving_engine.py#L809-L814"
        }
      ],
      "source": {
        "advisory": "GHSA-69j4-grxj-j64p",
        "discovery": "UNKNOWN"
      },
      "title": "vLLM vulnerable to DoS via large Chat Completion or Tokenization requests with specially crafted `chat_template_kwargs`"
    }
  },
  "cveMetadata": {
    "assignerOrgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
    "assignerShortName": "GitHub_M",
    "cveId": "CVE-2025-62426",
    "datePublished": "2025-11-21T01:21:29.546Z",
    "dateReserved": "2025-10-13T16:26:12.180Z",
    "dateUpdated": "2025-11-24T18:12:23.183Z",
    "state": "PUBLISHED"
  },
  "dataType": "CVE_RECORD",
  "dataVersion": "5.2",
  "vulnerability-lookup:meta": {
    "nvd": "{\"cve\":{\"id\":\"CVE-2025-62426\",\"sourceIdentifier\":\"security-advisories@github.com\",\"published\":\"2025-11-21T02:15:43.570\",\"lastModified\":\"2025-12-04T17:42:10.913\",\"vulnStatus\":\"Analyzed\",\"cveTags\":[],\"descriptions\":[{\"lang\":\"en\",\"value\":\"vLLM is an inference and serving engine for large language models (LLMs). From version 0.5.5 to before 0.11.1, the /v1/chat/completions and /tokenize endpoints allow a chat_template_kwargs request parameter that is used in the code before it is properly validated against the chat template. With the right chat_template_kwargs parameters, it is possible to block processing of the API server for long periods of time, delaying all other requests. This issue has been patched in version 0.11.1.\"}],\"metrics\":{\"cvssMetricV31\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Secondary\",\"cvssData\":{\"version\":\"3.1\",\"vectorString\":\"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H\",\"baseScore\":6.5,\"baseSeverity\":\"MEDIUM\",\"attackVector\":\"NETWORK\",\"attackComplexity\":\"LOW\",\"privilegesRequired\":\"LOW\",\"userInteraction\":\"NONE\",\"scope\":\"UNCHANGED\",\"confidentialityImpact\":\"NONE\",\"integrityImpact\":\"NONE\",\"availabilityImpact\":\"HIGH\"},\"exploitabilityScore\":2.8,\"impactScore\":3.6}]},\"weaknesses\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Primary\",\"description\":[{\"lang\":\"en\",\"value\":\"CWE-770\"}]}],\"configurations\":[{\"nodes\":[{\"operator\":\"OR\",\"negate\":false,\"cpeMatch\":[{\"vulnerable\":true,\"criteria\":\"cpe:2.3:a:vllm:vllm:*:*:*:*:*:*:*:*\",\"versionStartIncluding\":\"0.5.5\",\"versionEndExcluding\":\"0.11.1\",\"matchCriteriaId\":\"BA1047E1-ED1E-4685-B699-8CE5B2058D87\"},{\"vulnerable\":true,\"criteria\":\"cpe:2.3:a:vllm:vllm:0.11.1:rc0:*:*:*:*:*:*\",\"matchCriteriaId\":\"FEE054E1-1F84-4ACC-894C-D7E3652EF1B1\"},{\"vulnerable\":true,\"criteria\":\"cpe:2.3:a:vllm:vllm:0.11.1:rc1:*:*:*:*:*:*\",\"matchCriteriaId\":\"B05850DF-38FE-439F-9F7A-AA96DA9038CC\"}]}]}],\"references\":[{\"url\":\"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/chat_utils.py#L1602-L1610\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Product\"]},{\"url\":\"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/openai/serving_engine.py#L809-L814\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Product\"]},{\"url\":\"https://github.com/vllm-project/vllm/commit/3ada34f9cb4d1af763fdfa3b481862a93eb6bd2b\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Patch\"]},{\"url\":\"https://github.com/vllm-project/vllm/pull/27205\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Issue Tracking\"]},{\"url\":\"https://github.com/vllm-project/vllm/security/advisories/GHSA-69j4-grxj-j64p\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Vendor Advisory\"]}]}}",
    "vulnrichment": {
      "containers": "{\"adp\": [{\"title\": \"CISA ADP Vulnrichment\", \"metrics\": [{\"other\": {\"type\": \"ssvc\", \"content\": {\"id\": \"CVE-2025-62426\", \"role\": \"CISA Coordinator\", \"options\": [{\"Exploitation\": \"none\"}, {\"Automatable\": \"no\"}, {\"Technical Impact\": \"partial\"}], \"version\": \"2.0.3\", \"timestamp\": \"2025-11-24T17:12:00.809982Z\"}}}], \"providerMetadata\": {\"orgId\": \"134c704f-9b21-4f2e-91b3-4a467353bcc0\", \"shortName\": \"CISA-ADP\", \"dateUpdated\": \"2025-11-24T17:13:52.209Z\"}}], \"cna\": {\"title\": \"vLLM vulnerable to DoS via large Chat Completion or Tokenization requests with specially crafted `chat_template_kwargs`\", \"source\": {\"advisory\": \"GHSA-69j4-grxj-j64p\", \"discovery\": \"UNKNOWN\"}, \"metrics\": [{\"cvssV3_1\": {\"scope\": \"UNCHANGED\", \"version\": \"3.1\", \"baseScore\": 6.5, \"attackVector\": \"NETWORK\", \"baseSeverity\": \"MEDIUM\", \"vectorString\": \"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H\", \"integrityImpact\": \"NONE\", \"userInteraction\": \"NONE\", \"attackComplexity\": \"LOW\", \"availabilityImpact\": \"HIGH\", \"privilegesRequired\": \"LOW\", \"confidentialityImpact\": \"NONE\"}}], \"affected\": [{\"vendor\": \"vllm-project\", \"product\": \"vllm\", \"versions\": [{\"status\": \"affected\", \"version\": \"\u003e= 0.5.5, \u003c 0.11.1\"}]}], \"references\": [{\"url\": \"https://github.com/vllm-project/vllm/security/advisories/GHSA-69j4-grxj-j64p\", \"name\": \"https://github.com/vllm-project/vllm/security/advisories/GHSA-69j4-grxj-j64p\", \"tags\": [\"x_refsource_CONFIRM\"]}, {\"url\": \"https://github.com/vllm-project/vllm/pull/27205\", \"name\": \"https://github.com/vllm-project/vllm/pull/27205\", \"tags\": [\"x_refsource_MISC\"]}, {\"url\": \"https://github.com/vllm-project/vllm/commit/3ada34f9cb4d1af763fdfa3b481862a93eb6bd2b\", \"name\": \"https://github.com/vllm-project/vllm/commit/3ada34f9cb4d1af763fdfa3b481862a93eb6bd2b\", \"tags\": [\"x_refsource_MISC\"]}, {\"url\": \"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/chat_utils.py#L1602-L1610\", \"name\": \"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/chat_utils.py#L1602-L1610\", \"tags\": [\"x_refsource_MISC\"]}, {\"url\": \"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/openai/serving_engine.py#L809-L814\", \"name\": \"https://github.com/vllm-project/vllm/blob/2a6dc67eb520ddb9c4138d8b35ed6fe6226997fb/vllm/entrypoints/openai/serving_engine.py#L809-L814\", \"tags\": [\"x_refsource_MISC\"]}], \"descriptions\": [{\"lang\": \"en\", \"value\": \"vLLM is an inference and serving engine for large language models (LLMs). From version 0.5.5 to before 0.11.1, the /v1/chat/completions and /tokenize endpoints allow a chat_template_kwargs request parameter that is used in the code before it is properly validated against the chat template. With the right chat_template_kwargs parameters, it is possible to block processing of the API server for long periods of time, delaying all other requests. This issue has been patched in version 0.11.1.\"}], \"problemTypes\": [{\"descriptions\": [{\"lang\": \"en\", \"type\": \"CWE\", \"cweId\": \"CWE-770\", \"description\": \"CWE-770: Allocation of Resources Without Limits or Throttling\"}]}], \"providerMetadata\": {\"orgId\": \"a0819718-46f1-4df5-94e2-005712e83aaa\", \"shortName\": \"GitHub_M\", \"dateUpdated\": \"2025-11-21T01:21:29.546Z\"}}}",
      "cveMetadata": "{\"cveId\": \"CVE-2025-62426\", \"state\": \"PUBLISHED\", \"dateUpdated\": \"2025-11-24T18:12:23.183Z\", \"dateReserved\": \"2025-10-13T16:26:12.180Z\", \"assignerOrgId\": \"a0819718-46f1-4df5-94e2-005712e83aaa\", \"datePublished\": \"2025-11-21T01:21:29.546Z\", \"assignerShortName\": \"GitHub_M\"}",
      "dataType": "CVE_RECORD",
      "dataVersion": "5.2"
    }
  }
}


Log in or create an account to share your comment.




Tags
Taxonomy of the tags.


Loading…

Loading…

Loading…

Sightings

Author Source Type Date

Nomenclature

  • Seen: The vulnerability was mentioned, discussed, or seen somewhere by the user.
  • Confirmed: The vulnerability is confirmed from an analyst perspective.
  • Published Proof of Concept: A public proof of concept is available for this vulnerability.
  • Exploited: This vulnerability was exploited and seen by the user reporting the sighting.
  • Patched: This vulnerability was successfully patched by the user reporting the sighting.
  • Not exploited: This vulnerability was not exploited or seen by the user reporting the sighting.
  • Not confirmed: The user expresses doubt about the veracity of the vulnerability.
  • Not patched: This vulnerability was not successfully patched by the user reporting the sighting.


Loading…

Loading…