From 97003fe7c6fee8cc7eff53752ff485401ec21f83 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 23 Jan 2025 21:17:30 -0500 Subject: [PATCH] Set weights_only=True when using torch.load() (#12366) Signed-off-by: Russell Bryant --- vllm/assets/image.py | 2 +- vllm/lora/models.py | 3 ++- vllm/model_executor/model_loader/weight_utils.py | 8 +++++--- vllm/prompt_adapter/utils.py | 3 ++- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/vllm/assets/image.py b/vllm/assets/image.py index cb831cb0b5bb4..0a55506f88255 100644 --- a/vllm/assets/image.py +++ b/vllm/assets/image.py @@ -26,4 +26,4 @@ def image_embeds(self) -> torch.Tensor: """ image_path = get_vllm_public_assets(filename=f"{self.name}.pt", s3_prefix=VLM_IMAGES_DIR) - return torch.load(image_path, map_location="cpu") + return torch.load(image_path, map_location="cpu", weights_only=True) diff --git a/vllm/lora/models.py b/vllm/lora/models.py index 9809405ca9a61..b77b6b3d72ff4 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -273,7 +273,8 @@ def from_local_checkpoint( new_embeddings_tensor_path) elif os.path.isfile(new_embeddings_bin_file_path): embeddings = torch.load(new_embeddings_bin_file_path, - map_location=device) + map_location=device, + weights_only=True) return cls.from_lora_tensors( lora_model_id=get_lora_id() diff --git a/vllm/model_executor/model_loader/weight_utils.py b/vllm/model_executor/model_loader/weight_utils.py index b70407221312a..b764a940b1742 100644 --- a/vllm/model_executor/model_loader/weight_utils.py +++ b/vllm/model_executor/model_loader/weight_utils.py @@ -93,7 +93,7 @@ def convert_bin_to_safetensor_file( pt_filename: str, sf_filename: str, ) -> None: - loaded = torch.load(pt_filename, map_location="cpu") + loaded = torch.load(pt_filename, map_location="cpu", weights_only=True) if "state_dict" in loaded: loaded = loaded["state_dict"] shared = _shared_pointers(loaded) @@ -381,7 +381,9 @@ def np_cache_weights_iterator( disable=not enable_tqdm, bar_format=_BAR_FORMAT, ): - state = torch.load(bin_file, map_location="cpu") + state = torch.load(bin_file, + map_location="cpu", + weights_only=True) for name, param in state.items(): param_path = os.path.join(np_folder, name) with open(param_path, "wb") as f: @@ -447,7 +449,7 @@ def pt_weights_iterator( disable=not enable_tqdm, bar_format=_BAR_FORMAT, ): - state = torch.load(bin_file, map_location="cpu") + state = torch.load(bin_file, map_location="cpu", weights_only=True) yield from state.items() del state torch.cuda.empty_cache() diff --git a/vllm/prompt_adapter/utils.py b/vllm/prompt_adapter/utils.py index 473b87c89c21d..8b2732923c4e7 100644 --- a/vllm/prompt_adapter/utils.py +++ b/vllm/prompt_adapter/utils.py @@ -89,6 +89,7 @@ def load_peft_weights(model_id: str, adapters_weights = safe_load_file(filename, device=device) else: adapters_weights = torch.load(filename, - map_location=torch.device(device)) + map_location=torch.device(device), + weights_only=True) return adapters_weights