Skip to content

vllm.benchmarks.lib.utils

convert_to_pytorch_benchmark_format

convert_to_pytorch_benchmark_format(
    args: Namespace,
    metrics: dict[str, list],
    extra_info: dict[str, Any],
) -> list

Save the benchmark results in the format used by PyTorch OSS benchmark with on metric per record https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database

Source code in vllm/benchmarks/lib/utils.py
def convert_to_pytorch_benchmark_format(
    args: argparse.Namespace, metrics: dict[str, list], extra_info: dict[str, Any]
) -> list:
    """
    Save the benchmark results in the format used by PyTorch OSS benchmark with
    on metric per record
    https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
    """
    records = []
    if not os.environ.get("SAVE_TO_PYTORCH_BENCHMARK_FORMAT", False):
        return records

    for name, benchmark_values in metrics.items():
        if not isinstance(benchmark_values, list):
            raise TypeError(
                f"benchmark_values for metric '{name}' must be a list, "
                f"but got {type(benchmark_values).__name__}"
            )

        record = {
            "benchmark": {
                "name": "vLLM benchmark",
                "extra_info": {
                    "args": vars(args),
                    "compilation_config.mode": extract_field(
                        args, extra_info, "compilation_config.mode"
                    ),
                    "optimization_level": extract_field(
                        args, extra_info, "optimization_level"
                    ),
                    # A boolean field used by vLLM benchmark HUD dashboard
                    "use_compile": use_compile(args, extra_info),
                },
            },
            "model": {
                "name": args.model,
            },
            "metric": {
                "name": name,
                "benchmark_values": benchmark_values,
                "extra_info": extra_info,
            },
        }

        tp = record["benchmark"]["extra_info"]["args"].get("tensor_parallel_size")
        # Save tensor_parallel_size parameter if it's part of the metadata
        if not tp and "tensor_parallel_size" in extra_info:
            record["benchmark"]["extra_info"]["args"]["tensor_parallel_size"] = (
                extra_info["tensor_parallel_size"]
            )

        records.append(record)

    return records

use_compile

use_compile(
    args: Namespace, extra_info: dict[str, Any]
) -> bool

Check if the benchmark is run with torch.compile

Source code in vllm/benchmarks/lib/utils.py
def use_compile(args: argparse.Namespace, extra_info: dict[str, Any]) -> bool:
    """
    Check if the benchmark is run with torch.compile
    """
    return not (
        extract_field(args, extra_info, "compilation_config.mode") == "0"
        or "eager" in getattr(args, "output_json", "")
        or "eager" in getattr(args, "result_filename", "")
    )