mirror of https://github.com/leafspark/AutoGGUF
refactor: allow specifying output path in lora conversion subprocess
- remove the shutil.move operation - allow specifying the output path in arguments in convert_lora_to_ggml.py utility - bump max number of LoRA layers
This commit is contained in:
parent
5f354e692a
commit
db1733b4ed
|
@ -1099,21 +1099,10 @@ def restart_task(self, task_item) -> None:
|
|||
task_item.update_status(IN_PROGRESS)
|
||||
break
|
||||
|
||||
def lora_conversion_finished(self, thread, input_path, output_path) -> None:
|
||||
def lora_conversion_finished(self, thread) -> None:
|
||||
self.logger.info(LORA_CONVERSION_FINISHED)
|
||||
if thread in self.quant_threads:
|
||||
self.quant_threads.remove(thread)
|
||||
try:
|
||||
# Only move the file if the output type is GGML
|
||||
if self.lora_output_type_combo.currentText() == "GGML":
|
||||
source_file = os.path.join(input_path, "ggml-adapter-model.bin")
|
||||
if os.path.exists(source_file):
|
||||
shutil.move(source_file, output_path)
|
||||
self.logger.info(LORA_FILE_MOVED.format(source_file, output_path))
|
||||
else:
|
||||
self.logger.warning(LORA_FILE_NOT_FOUND.format(source_file))
|
||||
except Exception as e:
|
||||
self.logger.error(ERROR_MOVING_LORA_FILE.format(str(e)))
|
||||
|
||||
def download_finished(self, extract_dir) -> None:
|
||||
self.logger.info(DOWNLOAD_FINISHED_EXTRACTED_TO.format(extract_dir))
|
||||
|
|
|
@ -58,7 +58,7 @@ def pyinstaller_include():
|
|||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
logger.info(f"Usage: python {sys.argv[0]} <path> [arch]")
|
||||
logger.info(f"Usage: python {sys.argv[0]} <path> <output_path> [arch]")
|
||||
logger.info(
|
||||
"Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
|
||||
)
|
||||
|
@ -69,7 +69,7 @@ def pyinstaller_include():
|
|||
|
||||
input_json = os.path.join(sys.argv[1], "adapter_config.json")
|
||||
input_model = os.path.join(sys.argv[1], "adapter_model.bin")
|
||||
output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
|
||||
output_path = sys.argv[2]
|
||||
|
||||
if os.path.exists(input_model):
|
||||
model = torch.load(input_model, map_location="cpu")
|
||||
|
@ -80,7 +80,7 @@ def pyinstaller_include():
|
|||
|
||||
model = load_file(input_model, device="cpu")
|
||||
|
||||
arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
|
||||
arch_name = sys.argv[3] if len(sys.argv) == 4 else "llama"
|
||||
|
||||
if arch_name not in gguf.MODEL_ARCH_NAMES.values():
|
||||
logger.error(f"Error: unsupported architecture {arch_name}")
|
||||
|
@ -89,7 +89,7 @@ def pyinstaller_include():
|
|||
arch = list(gguf.MODEL_ARCH_NAMES.keys())[
|
||||
list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)
|
||||
]
|
||||
name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone
|
||||
name_map = gguf.TensorNameMap(arch, 500)
|
||||
|
||||
with open(input_json, "r") as f:
|
||||
params = json.load(f)
|
||||
|
|
|
@ -179,7 +179,12 @@ def convert_lora(self) -> None:
|
|||
raise ValueError(BASE_MODEL_PATH_REQUIRED)
|
||||
command.extend(["--base", base_model_path])
|
||||
else: # Use old GGML parameters for GGML
|
||||
command = ["python", "src/convert_lora_to_ggml.py", lora_input_path]
|
||||
command = [
|
||||
"python",
|
||||
"src/convert_lora_to_ggml.py",
|
||||
lora_input_path,
|
||||
lora_output_path,
|
||||
]
|
||||
|
||||
logs_path = self.logs_input.text()
|
||||
ensure_directory(logs_path)
|
||||
|
@ -203,11 +208,7 @@ def convert_lora(self) -> None:
|
|||
self.task_list.setItemWidget(list_item, task_item)
|
||||
|
||||
thread.status_signal.connect(task_item.update_status)
|
||||
thread.finished_signal.connect(
|
||||
lambda: self.lora_conversion_finished(
|
||||
thread, lora_input_path, lora_output_path
|
||||
)
|
||||
)
|
||||
thread.finished_signal.connect(lambda: self.lora_conversion_finished(thread))
|
||||
thread.error_signal.connect(
|
||||
lambda err: handle_error(self.logger, err, task_item)
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue