python3 -m multi_loras \
extract_lora \
--base_model_name_or_path "mistralai/Mistral-7B-v0.1" \
--tuned_model_name_or_path "HuggingFaceH4/zephyr-7b-beta" \
--save_path "./mistral-zephyr-lora" \
--fp16 \
--bits 16 \
--lora_r 128
Run SVD: 0%| | 1/673 [00:03<43:49, 3.91s/it, layer=model.layers.0.self_attn.q_proj.lora_A.default, shape=torch.Size([
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.8/dist-packages/multi_loras/__main__.py", line 48, in <module>
main()
File "/usr/local/lib/python3.8/dist-packages/multi_loras/__main__.py", line 43, in main
cmd_func(args)
File "/usr/local/lib/python3.8/dist-packages/multi_loras/extract_lora.py", line 137, in do_extract_lora
assert lora_base.lora_A.default.weight.shape == Vh.shape
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1695, in __getattr__
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
AttributeError: 'Linear' object has no attribute 'lora_A'
errors in:
not depeding on quant or precision