CVE-2025-25183
Vulnerability from cvelistv5
Published
2025-02-07 19:59
Modified
2025-02-12 20:51
Summary
vLLM using built-in hash() from Python 3.12 leads to predictable hash collisions in vLLM prefix cache
Impacted products
vllm-projectvllm
Show details on NVD website


{
  "containers": {
    "adp": [
      {
        "metrics": [
          {
            "other": {
              "content": {
                "id": "CVE-2025-25183",
                "options": [
                  {
                    "Exploitation": "none"
                  },
                  {
                    "Automatable": "no"
                  },
                  {
                    "Technical Impact": "partial"
                  }
                ],
                "role": "CISA Coordinator",
                "timestamp": "2025-02-07T20:33:57.205558Z",
                "version": "2.0.3"
              },
              "type": "ssvc"
            }
          }
        ],
        "providerMetadata": {
          "dateUpdated": "2025-02-12T20:51:46.402Z",
          "orgId": "134c704f-9b21-4f2e-91b3-4a467353bcc0",
          "shortName": "CISA-ADP"
        },
        "title": "CISA ADP Vulnrichment"
      }
    ],
    "cna": {
      "affected": [
        {
          "product": "vllm",
          "vendor": "vllm-project",
          "versions": [
            {
              "status": "affected",
              "version": "\u003c 0.7.2"
            }
          ]
        }
      ],
      "descriptions": [
        {
          "lang": "en",
          "value": "vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. Maliciously constructed statements can lead to hash collisions, resulting in cache reuse, which can interfere with subsequent responses and cause unintended behavior. Prefix caching makes use of Python\u0027s built-in hash() function. As of Python 3.12, the behavior of hash(None) has changed to be a predictable constant value. This makes it more feasible that someone could try exploit hash collisions. The impact of a collision would be using cache that was generated using different content. Given knowledge of prompts in use and predictable hashing behavior, someone could intentionally populate the cache using a prompt known to collide with another prompt in use. This issue has been addressed in version 0.7.2 and all users are advised to upgrade. There are no known workarounds for this vulnerability."
        }
      ],
      "metrics": [
        {
          "cvssV3_1": {
            "attackComplexity": "HIGH",
            "attackVector": "NETWORK",
            "availabilityImpact": "NONE",
            "baseScore": 2.6,
            "baseSeverity": "LOW",
            "confidentialityImpact": "NONE",
            "integrityImpact": "LOW",
            "privilegesRequired": "LOW",
            "scope": "UNCHANGED",
            "userInteraction": "REQUIRED",
            "vectorString": "CVSS:3.1/AV:N/AC:H/PR:L/UI:R/S:U/C:N/I:L/A:N",
            "version": "3.1"
          }
        }
      ],
      "problemTypes": [
        {
          "descriptions": [
            {
              "cweId": "CWE-354",
              "description": "CWE-354: Improper Validation of Integrity Check Value",
              "lang": "en",
              "type": "CWE"
            }
          ]
        }
      ],
      "providerMetadata": {
        "dateUpdated": "2025-02-07T19:59:01.370Z",
        "orgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
        "shortName": "GitHub_M"
      },
      "references": [
        {
          "name": "https://github.com/vllm-project/vllm/security/advisories/GHSA-rm76-4mrf-v9r8",
          "tags": [
            "x_refsource_CONFIRM"
          ],
          "url": "https://github.com/vllm-project/vllm/security/advisories/GHSA-rm76-4mrf-v9r8"
        },
        {
          "name": "https://github.com/vllm-project/vllm/pull/12621",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/vllm-project/vllm/pull/12621"
        },
        {
          "name": "https://github.com/python/cpython/commit/432117cd1f59c76d97da2eaff55a7d758301dbc7",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/python/cpython/commit/432117cd1f59c76d97da2eaff55a7d758301dbc7"
        }
      ],
      "source": {
        "advisory": "GHSA-rm76-4mrf-v9r8",
        "discovery": "UNKNOWN"
      },
      "title": "vLLM using built-in hash() from Python 3.12 leads to predictable hash collisions in vLLM prefix cache"
    }
  },
  "cveMetadata": {
    "assignerOrgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
    "assignerShortName": "GitHub_M",
    "cveId": "CVE-2025-25183",
    "datePublished": "2025-02-07T19:59:01.370Z",
    "dateReserved": "2025-02-03T19:30:53.399Z",
    "dateUpdated": "2025-02-12T20:51:46.402Z",
    "state": "PUBLISHED"
  },
  "dataType": "CVE_RECORD",
  "dataVersion": "5.1",
  "meta": {
    "nvd": "{\"cve\":{\"id\":\"CVE-2025-25183\",\"sourceIdentifier\":\"security-advisories@github.com\",\"published\":\"2025-02-07T20:15:34.083\",\"lastModified\":\"2025-07-01T20:58:00.170\",\"vulnStatus\":\"Analyzed\",\"cveTags\":[],\"descriptions\":[{\"lang\":\"en\",\"value\":\"vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. Maliciously constructed statements can lead to hash collisions, resulting in cache reuse, which can interfere with subsequent responses and cause unintended behavior. Prefix caching makes use of Python\u0027s built-in hash() function. As of Python 3.12, the behavior of hash(None) has changed to be a predictable constant value. This makes it more feasible that someone could try exploit hash collisions. The impact of a collision would be using cache that was generated using different content. Given knowledge of prompts in use and predictable hashing behavior, someone could intentionally populate the cache using a prompt known to collide with another prompt in use. This issue has been addressed in version 0.7.2 and all users are advised to upgrade. There are no known workarounds for this vulnerability.\"},{\"lang\":\"es\",\"value\":\"vLLM es un motor de inferencia y servicio de alto rendimiento y uso eficiente de la memoria para LLM. Las declaraciones construidas de forma malintencionada pueden provocar colisiones de hash, lo que da como resultado la reutilizaci\u00f3n de la memoria cach\u00e9, lo que puede interferir con las respuestas posteriores y provocar un comportamiento no deseado. El almacenamiento en cach\u00e9 de prefijos utiliza la funci\u00f3n hash() incorporada de Python. A partir de Python 3.12, el comportamiento de hash(None) ha cambiado para ser un valor constante predecible. Esto hace que sea m\u00e1s factible que alguien pueda intentar explotar las colisiones de hash. El impacto de una colisi\u00f3n ser\u00eda el uso de la memoria cach\u00e9 generada con un contenido diferente. Dado el conocimiento de los mensajes en uso y el comportamiento predecible del hash, alguien podr\u00eda rellenar intencionalmente la memoria cach\u00e9 utilizando un mensaje que se sabe que colisiona con otro mensaje en uso. Este problema se ha solucionado en la versi\u00f3n 0.7.2 y se recomienda a todos los usuarios que actualicen. No existen workarounds para esta vulnerabilidad.\"}],\"metrics\":{\"cvssMetricV31\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Secondary\",\"cvssData\":{\"version\":\"3.1\",\"vectorString\":\"CVSS:3.1/AV:N/AC:H/PR:L/UI:R/S:U/C:N/I:L/A:N\",\"baseScore\":2.6,\"baseSeverity\":\"LOW\",\"attackVector\":\"NETWORK\",\"attackComplexity\":\"HIGH\",\"privilegesRequired\":\"LOW\",\"userInteraction\":\"REQUIRED\",\"scope\":\"UNCHANGED\",\"confidentialityImpact\":\"NONE\",\"integrityImpact\":\"LOW\",\"availabilityImpact\":\"NONE\"},\"exploitabilityScore\":1.2,\"impactScore\":1.4}]},\"weaknesses\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Primary\",\"description\":[{\"lang\":\"en\",\"value\":\"CWE-354\"}]}],\"configurations\":[{\"nodes\":[{\"operator\":\"OR\",\"negate\":false,\"cpeMatch\":[{\"vulnerable\":true,\"criteria\":\"cpe:2.3:a:vllm:vllm:*:*:*:*:*:*:*:*\",\"versionEndExcluding\":\"0.7.2\",\"matchCriteriaId\":\"A5911C1A-F107-4B9B-BAE9-36A2B5181321\"}]}]}],\"references\":[{\"url\":\"https://github.com/python/cpython/commit/432117cd1f59c76d97da2eaff55a7d758301dbc7\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Not Applicable\"]},{\"url\":\"https://github.com/vllm-project/vllm/pull/12621\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Issue Tracking\"]},{\"url\":\"https://github.com/vllm-project/vllm/security/advisories/GHSA-rm76-4mrf-v9r8\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Vendor Advisory\"]}]}}"
  }
}


Log in or create an account to share your comment.




Tags
Taxonomy of the tags.


Loading...

Loading...

Loading...
  • Seen: The vulnerability was mentioned, discussed, or seen somewhere by the user.
  • Confirmed: The vulnerability is confirmed from an analyst perspective.
  • Exploited: This vulnerability was exploited and seen by the user reporting the sighting.
  • Patched: This vulnerability was successfully patched by the user reporting the sighting.
  • Not exploited: This vulnerability was not exploited or seen by the user reporting the sighting.
  • Not confirmed: The user expresses doubt about the veracity of the vulnerability.
  • Not patched: This vulnerability was not successfully patched by the user reporting the sighting.