Skip to content

Commit

Permalink
use block_size as max_seq_len during profile_run
Browse files Browse the repository at this point in the history
  • Loading branch information
tae-su-kim committed Jul 19, 2024
1 parent 089eace commit 7407c12
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions vllm/worker/habana_model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -1034,7 +1034,7 @@ def profile_run(self) -> None:
num_layers = self.model_config.get_num_layers(self.parallel_config)
kv_caches = [None] * num_layers
max_batch_size = self.prompt_bs_bucket_cfg[-1]
max_seq_len = self.prompt_seq_bucket_cfg[-1]
max_seq_len = self.block_size

self.warmup_scenario(max_batch_size, max_seq_len, True, kv_caches)

Expand Down Expand Up @@ -1123,8 +1123,8 @@ def warmup_model(self, kv_caches: List[torch.Tensor]) -> None:
self.profiler.start('internal', 'warmup')
start_mem = HabanaMemoryProfiler.current_device_memory_usage()
start_time = time.perf_counter()
self.warmup_all_buckets(self.prompt_buckets, True, kv_caches)
self.warmup_all_buckets(self.decode_buckets, False, kv_caches)
# self.warmup_all_buckets(self.prompt_buckets, True, kv_caches)
# self.warmup_all_buckets(self.decode_buckets, False, kv_caches)

if not self.enforce_eager:
mem_margin = 1.0 - float(
Expand Down Expand Up @@ -1160,7 +1160,7 @@ def vocab_size(self) -> int:

def _maybe_wrap_in_hpu_graph(model):
return htorch.hpu.wrap_in_hpu_graph(HpuModelAdapter(
model)) if htorch.utils.internal.is_lazy() else HpuModelAdapter(model)
model), disable_tensor_cache=True) if htorch.utils.internal.is_lazy() else HpuModelAdapter(model)


class HabanaProfilerCounterHelper():
Expand Down

0 comments on commit 7407c12

Please sign in to comment.