Skip to content

vllm.utils.serial_utils

EMBED_DTYPE_TO_NUMPY_DTYPE_VIEW module-attribute

EMBED_DTYPE_TO_NUMPY_DTYPE_VIEW = {
    "float32": float32,
    "float16": float16,
    "bfloat16": float16,
    "fp8_e4m3": uint8,
    "fp8_e5m2": uint8,
}

EMBED_DTYPE_TO_TORCH_DTYPE module-attribute

EMBED_DTYPE_TO_TORCH_DTYPE = {
    "float32": float32,
    "float16": float16,
    "bfloat16": bfloat16,
    "fp8_e4m3": float8_e4m3fn,
    "fp8_e5m2": float8_e5m2,
}

EMBED_DTYPE_TO_TORCH_DTYPE_VIEW module-attribute

EMBED_DTYPE_TO_TORCH_DTYPE_VIEW = {
    "float32": float32,
    "float16": float16,
    "bfloat16": float16,
    "fp8_e4m3": uint8,
    "fp8_e5m2": uint8,
}

ENDIANNESS module-attribute

ENDIANNESS = ['native', 'big', 'little']

EmbedDType module-attribute

EmbedDType = Literal[
    "float32", "float16", "bfloat16", "fp8_e4m3", "fp8_e5m2"
]

EncodingFormat module-attribute

EncodingFormat = Literal['float', 'base64', 'bytes']

Endianness module-attribute

Endianness = Literal['native', 'big', 'little']

sys_byteorder module-attribute

sys_byteorder = byteorder

MetadataItem dataclass

Source code in vllm/utils/serial_utils.py
@dataclass
class MetadataItem:
    index: int
    embed_dtype: EmbedDType
    endianness: Endianness
    start: int
    end: int
    shape: tuple[int, ...]

embed_dtype instance-attribute

embed_dtype: EmbedDType

end instance-attribute

end: int

endianness instance-attribute

endianness: Endianness

index instance-attribute

index: int

shape instance-attribute

shape: tuple[int, ...]

start instance-attribute

start: int

__init__

__init__(
    index: int,
    embed_dtype: EmbedDType,
    endianness: Endianness,
    start: int,
    end: int,
    shape: tuple[int, ...],
) -> None

binary2tensor

binary2tensor(
    binary: bytes,
    shape: tuple[int, ...],
    embed_dtype: EmbedDType,
    endianness: Endianness,
) -> Tensor
Source code in vllm/utils/serial_utils.py
def binary2tensor(
    binary: bytes,
    shape: tuple[int, ...],
    embed_dtype: EmbedDType,
    endianness: Endianness,
) -> torch.Tensor:
    assert embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE
    assert embed_dtype in EMBED_DTYPE_TO_NUMPY_DTYPE_VIEW
    assert endianness in ENDIANNESS

    torch_dtype = EMBED_DTYPE_TO_TORCH_DTYPE[embed_dtype]
    np_dtype = EMBED_DTYPE_TO_NUMPY_DTYPE_VIEW[embed_dtype]

    np_array = np.frombuffer(binary, dtype=np_dtype).reshape(shape)

    if endianness != "native" and endianness != sys_byteorder:
        np_array = np_array.byteswap()

    return torch.from_numpy(np_array).view(torch_dtype)

decode_pooling_output

decode_pooling_output(
    items: list[MetadataItem], body: bytes
) -> list[Tensor]
Source code in vllm/utils/serial_utils.py
def decode_pooling_output(items: list[MetadataItem], body: bytes) -> list[torch.Tensor]:
    items.sort(key=lambda x: x.index)

    tensor_list: list[torch.Tensor] = []
    for item in items:
        binary = body[item.start : item.end]
        tensor = binary2tensor(binary, item.shape, item.embed_dtype, item.endianness)
        tensor_list.append(tensor)
    return tensor_list

encode_pooling_bytes

encode_pooling_bytes(
    pooling_outputs: list[PoolingRequestOutput],
    embed_dtype: EmbedDType,
    endianness: Endianness,
)
Source code in vllm/utils/serial_utils.py
def encode_pooling_bytes(
    pooling_outputs: list[PoolingRequestOutput],
    embed_dtype: EmbedDType,
    endianness: Endianness,
):
    num_prompt_tokens = 0
    items: list[dict[str, MetadataItem]] = []
    body = []
    offset = 0
    for idx, output in enumerate(pooling_outputs):
        binary = tensor2binary(
            tensor=output.outputs.data,
            embed_dtype=embed_dtype,
            endianness=endianness,
        )
        size = len(binary)

        item = {
            "index": idx,
            "embed_dtype": embed_dtype,
            "endianness": endianness,
            "start": offset,
            "end": offset + size,
            "shape": output.outputs.data.shape,
        }

        body.append(binary)
        items.append(item)
        prompt_token_ids = output.prompt_token_ids
        num_prompt_tokens += len(prompt_token_ids)
        offset += size

    usage = {
        "prompt_tokens": num_prompt_tokens,
        "total_tokens": num_prompt_tokens,
    }
    return body, items, usage

encode_pooling_output

encode_pooling_output(
    output: PoolingRequestOutput,
    encoding_format: EncodingFormat,
    embed_dtype: EmbedDType,
    endianness: Endianness,
) -> list[float] | str | bytes
Source code in vllm/utils/serial_utils.py
def encode_pooling_output(
    output: PoolingRequestOutput,
    encoding_format: EncodingFormat,
    embed_dtype: EmbedDType,
    endianness: Endianness,
) -> list[float] | str | bytes:
    if encoding_format == "float":
        return output.outputs.data.tolist()
    elif encoding_format == "base64":
        embedding_bytes = tensor2binary(output.outputs.data, embed_dtype, endianness)
        return base64.b64encode(embedding_bytes).decode("utf-8")
    elif encoding_format == "bytes":
        return tensor2binary(output.outputs.data, embed_dtype, endianness)
    assert_never(encoding_format)

tensor2binary

tensor2binary(
    tensor: Tensor,
    embed_dtype: EmbedDType,
    endianness: Endianness,
) -> bytes
Source code in vllm/utils/serial_utils.py
def tensor2binary(
    tensor: torch.Tensor, embed_dtype: EmbedDType, endianness: Endianness
) -> bytes:
    assert isinstance(tensor, torch.Tensor)
    assert embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE
    assert endianness in ENDIANNESS

    torch_dtype = EMBED_DTYPE_TO_TORCH_DTYPE[embed_dtype]
    torch_view_dtype = EMBED_DTYPE_TO_TORCH_DTYPE_VIEW[embed_dtype]

    np_array = (
        tensor.to(torch_dtype).flatten().contiguous().view(torch_view_dtype).numpy()
    )

    if endianness != "native" and endianness != sys_byteorder:
        np_array = np_array.byteswap()

    return np_array.tobytes()