|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +"""Metrics collection and reporting for generator throughput benchmarks. |
| 8 | +
|
| 9 | +Based on vLLM's throughput benchmark metrics patterns. |
| 10 | +Reference: vllm/benchmarks/throughput.py (lines 762-809) |
| 11 | +""" |
| 12 | + |
| 13 | +import json |
| 14 | +from dataclasses import asdict, dataclass |
| 15 | + |
| 16 | +from forge.data_models.completion import Completion |
| 17 | + |
| 18 | + |
| 19 | +@dataclass |
| 20 | +class ThroughputMetrics: |
| 21 | + """Throughput benchmark metrics for offline inference. |
| 22 | + Reference: https://github.com/vllm-project/vllm/blob/main/vllm/benchmarks/throughput.py |
| 23 | +
|
| 24 | + Attributes: |
| 25 | + elapsed_time: Total wall-clock time in seconds |
| 26 | + num_requests: Total number of requests processed |
| 27 | + num_completions: Total number of completions (requests * n samples) |
| 28 | + total_prompt_tokens: Sum of all prompt tokens |
| 29 | + total_output_tokens: Sum of all generated output tokens |
| 30 | + total_tokens: Sum of prompt and output tokens |
| 31 | + requests_per_second: Request throughput (requests/sec) |
| 32 | + completions_per_second: Completion throughput (completions/sec) |
| 33 | + tokens_per_second: Total token throughput (tokens/sec) |
| 34 | + output_tokens_per_second: Output token throughput (output tokens/sec) |
| 35 | + model: Optional model name for reporting |
| 36 | + config: Optional benchmark configuration dict |
| 37 | + """ |
| 38 | + |
| 39 | + elapsed_time: float |
| 40 | + num_requests: int |
| 41 | + num_completions: int |
| 42 | + total_prompt_tokens: int |
| 43 | + total_output_tokens: int |
| 44 | + total_tokens: int |
| 45 | + requests_per_second: float |
| 46 | + completions_per_second: float |
| 47 | + tokens_per_second: float |
| 48 | + output_tokens_per_second: float |
| 49 | + model: str | None = None |
| 50 | + config: dict | None = None |
| 51 | + |
| 52 | + |
| 53 | +def extract_token_counts(completions: list[list[Completion]]) -> tuple[int, int]: |
| 54 | + """Extract token counts from generator completions. |
| 55 | +
|
| 56 | + Args: |
| 57 | + completions: List of completion lists from Generator.generate() calls. |
| 58 | + Each Generator.generate() call returns a list of Completion objects. |
| 59 | +
|
| 60 | + Returns: |
| 61 | + Tuple of (total_prompt_tokens, total_output_tokens) |
| 62 | + """ |
| 63 | + total_prompt_tokens = 0 |
| 64 | + total_output_tokens = 0 |
| 65 | + |
| 66 | + for completion_list in completions: |
| 67 | + for completion in completion_list: |
| 68 | + # Completion has prompt_ids and token_ids as torch.Tensor |
| 69 | + # Shape: (seq_len,) |
| 70 | + total_prompt_tokens += completion.prompt_ids.shape[0] |
| 71 | + total_output_tokens += completion.token_ids.shape[0] |
| 72 | + |
| 73 | + return total_prompt_tokens, total_output_tokens |
| 74 | + |
| 75 | + |
| 76 | +def calculate_metrics( |
| 77 | + completions: list[list[Completion]], |
| 78 | + elapsed_time: float, |
| 79 | + model: str | None = None, |
| 80 | + config: dict | None = None, |
| 81 | +) -> ThroughputMetrics: |
| 82 | + """Calculate throughput metrics from completions and timing. |
| 83 | +
|
| 84 | + Args: |
| 85 | + completions: List of completion lists from Generator.generate() calls |
| 86 | + elapsed_time: Total time elapsed in seconds |
| 87 | + model: Optional model name |
| 88 | + config: Optional benchmark configuration |
| 89 | +
|
| 90 | + Returns: |
| 91 | + ThroughputMetrics object with calculated metrics |
| 92 | + """ |
| 93 | + num_requests = len(completions) |
| 94 | + num_completions = sum(len(completion_list) for completion_list in completions) |
| 95 | + total_prompt_tokens, total_output_tokens = extract_token_counts(completions) |
| 96 | + total_tokens = total_prompt_tokens + total_output_tokens |
| 97 | + |
| 98 | + return ThroughputMetrics( |
| 99 | + elapsed_time=elapsed_time, |
| 100 | + num_requests=num_requests, |
| 101 | + num_completions=num_completions, |
| 102 | + total_prompt_tokens=total_prompt_tokens, |
| 103 | + total_output_tokens=total_output_tokens, |
| 104 | + total_tokens=total_tokens, |
| 105 | + requests_per_second=num_requests / elapsed_time if elapsed_time > 0 else 0.0, |
| 106 | + completions_per_second=( |
| 107 | + num_completions / elapsed_time if elapsed_time > 0 else 0.0 |
| 108 | + ), |
| 109 | + tokens_per_second=total_tokens / elapsed_time if elapsed_time > 0 else 0.0, |
| 110 | + output_tokens_per_second=( |
| 111 | + total_output_tokens / elapsed_time if elapsed_time > 0 else 0.0 |
| 112 | + ), |
| 113 | + model=model, |
| 114 | + config=config, |
| 115 | + ) |
| 116 | + |
| 117 | + |
| 118 | +def print_metrics(metrics: ThroughputMetrics) -> None: |
| 119 | + """Print metrics to console in a formatted table. |
| 120 | +
|
| 121 | + Args: |
| 122 | + metrics: ThroughputMetrics to print |
| 123 | + """ |
| 124 | + print("=" * 55) |
| 125 | + print("Throughput Benchmark Results".center(55)) |
| 126 | + print("=" * 55) |
| 127 | + |
| 128 | + if metrics.model: |
| 129 | + print(f"Model: {metrics.model}") |
| 130 | + |
| 131 | + # Calculate samples per request |
| 132 | + samples_per_request = ( |
| 133 | + metrics.num_completions / metrics.num_requests |
| 134 | + if metrics.num_requests > 0 |
| 135 | + else 0 |
| 136 | + ) |
| 137 | + |
| 138 | + print(f"Requests: {metrics.num_requests}") |
| 139 | + print( |
| 140 | + f"Completions: {metrics.num_completions} ({samples_per_request:.1f} per request)" |
| 141 | + ) |
| 142 | + print(f"Elapsed Time: {metrics.elapsed_time:.2f} seconds") |
| 143 | + print("-" * 55) |
| 144 | + print(f"Total Prompt Tokens: {metrics.total_prompt_tokens}") |
| 145 | + print(f"Total Output Tokens: {metrics.total_output_tokens}") |
| 146 | + print(f"Total Tokens: {metrics.total_tokens}") |
| 147 | + print("-" * 55) |
| 148 | + print("Throughput:") |
| 149 | + print(f" Requests/sec: {metrics.requests_per_second:.2f}") |
| 150 | + print(f" Completions/sec: {metrics.completions_per_second:.2f}") |
| 151 | + print(f" Total Tokens/sec: {metrics.tokens_per_second:.2f}") |
| 152 | + print(f" Output Tokens/sec: {metrics.output_tokens_per_second:.2f}") |
| 153 | + print("=" * 55) |
| 154 | + |
| 155 | + |
| 156 | +def save_metrics_json(metrics: ThroughputMetrics, output_path: str) -> None: |
| 157 | + """Save metrics to JSON file. |
| 158 | +
|
| 159 | + Args: |
| 160 | + metrics: ThroughputMetrics to save |
| 161 | + output_path: Path to output JSON file |
| 162 | + """ |
| 163 | + metrics_dict = asdict(metrics) |
| 164 | + |
| 165 | + with open(output_path, "w") as f: |
| 166 | + json.dump(metrics_dict, f, indent=2) |
| 167 | + |
| 168 | + print(f"\nMetrics saved to: {output_path}") |
0 commit comments