From 7407c12e9171d924a0bd5aa59ecc1a00c301521d Mon Sep 17 00:00:00 2001 From: Taesu Kim Date: Fri, 19 Jul 2024 01:35:29 +0000 Subject: [PATCH] use block_size as max_seq_len during profile_run --- vllm/worker/habana_model_runner.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/worker/habana_model_runner.py b/vllm/worker/habana_model_runner.py index 69f57e75b175c..6e6aa018018ff 100644 --- a/vllm/worker/habana_model_runner.py +++ b/vllm/worker/habana_model_runner.py @@ -1034,7 +1034,7 @@ def profile_run(self) -> None: num_layers = self.model_config.get_num_layers(self.parallel_config) kv_caches = [None] * num_layers max_batch_size = self.prompt_bs_bucket_cfg[-1] - max_seq_len = self.prompt_seq_bucket_cfg[-1] + max_seq_len = self.block_size self.warmup_scenario(max_batch_size, max_seq_len, True, kv_caches) @@ -1123,8 +1123,8 @@ def warmup_model(self, kv_caches: List[torch.Tensor]) -> None: self.profiler.start('internal', 'warmup') start_mem = HabanaMemoryProfiler.current_device_memory_usage() start_time = time.perf_counter() - self.warmup_all_buckets(self.prompt_buckets, True, kv_caches) - self.warmup_all_buckets(self.decode_buckets, False, kv_caches) + # self.warmup_all_buckets(self.prompt_buckets, True, kv_caches) + # self.warmup_all_buckets(self.decode_buckets, False, kv_caches) if not self.enforce_eager: mem_margin = 1.0 - float( @@ -1160,7 +1160,7 @@ def vocab_size(self) -> int: def _maybe_wrap_in_hpu_graph(model): return htorch.hpu.wrap_in_hpu_graph(HpuModelAdapter( - model)) if htorch.utils.internal.is_lazy() else HpuModelAdapter(model) + model), disable_tensor_cache=True) if htorch.utils.internal.is_lazy() else HpuModelAdapter(model) class HabanaProfilerCounterHelper():