From b82fc936b719d286d921381f93199f7bf986f596 Mon Sep 17 00:00:00 2001 From: tomguluson92 <314913739@qq.com> Date: Thu, 28 Nov 2024 10:43:59 +0800 Subject: [PATCH] fixs: bugs of assign=True When I load Flux trained lora through: ``` from diffusers import AutoPipelineForText2Image, FluxPipeline from safetensors.torch import load_file pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to('cuda') pipe.load_lora_weights("model_qk_text.safetensors") ``` It raised this problem: ``` pipe.load_lora_weights("model_qk_text.safetensors") File "/output/diffusers/src/diffusers/loaders/lora_pipeline.py", line 1848, in load_lora_weights self.load_lora_into_transformer( File "/output/diffusers/src/diffusers/loaders/lora_pipeline.py", line 1951, in load_lora_into_transformer incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name, **peft_kwargs) File "/usr/local/lib/python3.8/site-packages/peft/utils/save_and_load.py", line 458, in set_peft_model_state_dict load_result = model.load_state_dict(peft_model_state_dict, strict=False, assign=True) ``` After remove `assign=True`, it all works. --- src/peft/utils/save_and_load.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/peft/utils/save_and_load.py b/src/peft/utils/save_and_load.py index 44a1cad5ff..12aad7f989 100644 --- a/src/peft/utils/save_and_load.py +++ b/src/peft/utils/save_and_load.py @@ -442,7 +442,7 @@ def renamed_dora_weights(k): model, peft_model_state_dict, ignore_mismatched_sizes=ignore_mismatched_sizes ) if low_cpu_mem_usage: - load_result = model.load_state_dict(peft_model_state_dict, strict=False, assign=True) + load_result = model.load_state_dict(peft_model_state_dict, strict=False) # ensure that the correct device is set for module in model.modules(): if hasattr(module, "_move_adapter_to_device_of_base_layer"):