mirror of https://github.com/leafspark/AutoGGUF
refactor: move some strings to localizations
This commit is contained in:
parent
ab7ffb0ad3
commit
eca2ecc785
110
src/AutoGGUF.py
110
src/AutoGGUF.py
|
@ -20,6 +20,19 @@
|
||||||
from imports_and_globals import ensure_directory, open_file_safe, resource_path
|
from imports_and_globals import ensure_directory, open_file_safe, resource_path
|
||||||
from localizations import *
|
from localizations import *
|
||||||
|
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
|
|
||||||
|
def handle_load_preset_error(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
QMessageBox.critical(self, ERROR, FAILED_TO_LOAD_PRESET.format(str(e)))
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
class AutoGGUF(QMainWindow):
|
class AutoGGUF(QMainWindow):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
@ -679,53 +692,49 @@ def save_preset(self):
|
||||||
)
|
)
|
||||||
self.logger.info(PRESET_SAVED_TO.format(file_name))
|
self.logger.info(PRESET_SAVED_TO.format(file_name))
|
||||||
|
|
||||||
|
@handle_load_preset_error
|
||||||
def load_preset(self):
|
def load_preset(self):
|
||||||
self.logger.info(LOADING_PRESET)
|
self.logger.info(LOADING_PRESET)
|
||||||
file_name, _ = QFileDialog.getOpenFileName(self, LOAD_PRESET, "", JSON_FILES)
|
file_name, _ = QFileDialog.getOpenFileName(self, LOAD_PRESET, "", JSON_FILES)
|
||||||
if file_name:
|
if file_name:
|
||||||
try:
|
with open(file_name, "r") as f:
|
||||||
with open(file_name, "r") as f:
|
preset = json.load(f)
|
||||||
preset = json.load(f)
|
|
||||||
|
|
||||||
self.quant_type.clearSelection()
|
self.quant_type.clearSelection()
|
||||||
for quant_type in preset.get("quant_types", []):
|
for quant_type in preset.get("quant_types", []):
|
||||||
items = self.quant_type.findItems(quant_type, Qt.MatchExactly)
|
items = self.quant_type.findItems(quant_type, Qt.MatchExactly)
|
||||||
if items:
|
if items:
|
||||||
items[0].setSelected(True)
|
items[0].setSelected(True)
|
||||||
self.allow_requantize.setChecked(preset.get("allow_requantize", False))
|
self.allow_requantize.setChecked(preset.get("allow_requantize", False))
|
||||||
self.leave_output_tensor.setChecked(
|
self.leave_output_tensor.setChecked(
|
||||||
preset.get("leave_output_tensor", False)
|
preset.get("leave_output_tensor", False)
|
||||||
)
|
)
|
||||||
self.pure.setChecked(preset.get("pure", False))
|
self.pure.setChecked(preset.get("pure", False))
|
||||||
self.imatrix.setText(preset.get("imatrix", ""))
|
self.imatrix.setText(preset.get("imatrix", ""))
|
||||||
self.include_weights.setText(preset.get("include_weights", ""))
|
self.include_weights.setText(preset.get("include_weights", ""))
|
||||||
self.exclude_weights.setText(preset.get("exclude_weights", ""))
|
self.exclude_weights.setText(preset.get("exclude_weights", ""))
|
||||||
self.use_output_tensor_type.setChecked(
|
self.use_output_tensor_type.setChecked(
|
||||||
preset.get("use_output_tensor_type", False)
|
preset.get("use_output_tensor_type", False)
|
||||||
)
|
)
|
||||||
self.output_tensor_type.setCurrentText(
|
self.output_tensor_type.setCurrentText(preset.get("output_tensor_type", ""))
|
||||||
preset.get("output_tensor_type", "")
|
self.use_token_embedding_type.setChecked(
|
||||||
)
|
preset.get("use_token_embedding_type", False)
|
||||||
self.use_token_embedding_type.setChecked(
|
)
|
||||||
preset.get("use_token_embedding_type", False)
|
self.token_embedding_type.setCurrentText(
|
||||||
)
|
preset.get("token_embedding_type", "")
|
||||||
self.token_embedding_type.setCurrentText(
|
)
|
||||||
preset.get("token_embedding_type", "")
|
self.keep_split.setChecked(preset.get("keep_split", False))
|
||||||
)
|
self.extra_arguments.setText(preset.get("extra_arguments", ""))
|
||||||
self.keep_split.setChecked(preset.get("keep_split", False))
|
|
||||||
self.extra_arguments.setText(preset.get("extra_arguments", ""))
|
|
||||||
|
|
||||||
# Clear existing KV overrides and add new ones
|
# Clear existing KV overrides and add new ones
|
||||||
for entry in self.kv_override_entries:
|
for entry in self.kv_override_entries:
|
||||||
self.remove_kv_override(entry)
|
self.remove_kv_override(entry)
|
||||||
for override in preset.get("kv_overrides", []):
|
for override in preset.get("kv_overrides", []):
|
||||||
self.add_kv_override(override)
|
self.add_kv_override(override)
|
||||||
|
|
||||||
QMessageBox.information(
|
QMessageBox.information(
|
||||||
self, PRESET_LOADED, PRESET_LOADED_FROM.format(file_name)
|
self, PRESET_LOADED, PRESET_LOADED_FROM.format(file_name)
|
||||||
)
|
)
|
||||||
except Exception as e:
|
|
||||||
QMessageBox.critical(self, ERROR, FAILED_TO_LOAD_PRESET.format(str(e)))
|
|
||||||
self.logger.info(PRESET_LOADED_FROM.format(file_name))
|
self.logger.info(PRESET_LOADED_FROM.format(file_name))
|
||||||
|
|
||||||
def save_task_preset(self, task_item):
|
def save_task_preset(self, task_item):
|
||||||
|
@ -812,9 +821,7 @@ def delete_lora_adapter_item(self, adapter_widget):
|
||||||
|
|
||||||
def browse_hf_model_input(self):
|
def browse_hf_model_input(self):
|
||||||
self.logger.info("Browsing for HuggingFace model directory")
|
self.logger.info("Browsing for HuggingFace model directory")
|
||||||
model_dir = QFileDialog.getExistingDirectory(
|
model_dir = QFileDialog.getExistingDirectory(self, SELECT_HF_MODEL_DIRECTORY)
|
||||||
self, "Select HuggingFace Model Directory"
|
|
||||||
)
|
|
||||||
if model_dir:
|
if model_dir:
|
||||||
self.hf_model_input.setText(os.path.abspath(model_dir))
|
self.hf_model_input.setText(os.path.abspath(model_dir))
|
||||||
|
|
||||||
|
@ -1340,11 +1347,6 @@ def cancel_task(self, item):
|
||||||
task_item.update_status(CANCELED)
|
task_item.update_status(CANCELED)
|
||||||
break
|
break
|
||||||
|
|
||||||
def retry_task(self, item):
|
|
||||||
task_item = self.task_list.itemWidget(item)
|
|
||||||
# TODO: Implement the logic to restart the task
|
|
||||||
pass
|
|
||||||
|
|
||||||
def delete_task(self, item):
|
def delete_task(self, item):
|
||||||
self.logger.info(DELETING_TASK.format(item.text()))
|
self.logger.info(DELETING_TASK.format(item.text()))
|
||||||
reply = QMessageBox.question(
|
reply = QMessageBox.question(
|
||||||
|
@ -1666,7 +1668,6 @@ def quantize_model(self):
|
||||||
|
|
||||||
def update_model_info(self, model_info):
|
def update_model_info(self, model_info):
|
||||||
self.logger.debug(UPDATING_MODEL_INFO.format(model_info))
|
self.logger.debug(UPDATING_MODEL_INFO.format(model_info))
|
||||||
# TODO: Do something with this
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def parse_progress(self, line, task_item):
|
def parse_progress(self, line, task_item):
|
||||||
|
@ -1734,17 +1735,6 @@ def browse_imatrix_output(self):
|
||||||
if output_file:
|
if output_file:
|
||||||
self.imatrix_output.setText(os.path.abspath(output_file))
|
self.imatrix_output.setText(os.path.abspath(output_file))
|
||||||
|
|
||||||
def update_gpu_offload_spinbox(self, value):
|
|
||||||
self.gpu_offload_spinbox.setValue(value)
|
|
||||||
|
|
||||||
def update_gpu_offload_slider(self, value):
|
|
||||||
self.gpu_offload_slider.setValue(value)
|
|
||||||
|
|
||||||
def toggle_gpu_offload_auto(self, state):
|
|
||||||
is_auto = state == Qt.CheckState.Checked
|
|
||||||
self.gpu_offload_slider.setEnabled(not is_auto)
|
|
||||||
self.gpu_offload_spinbox.setEnabled(not is_auto)
|
|
||||||
|
|
||||||
def generate_imatrix(self):
|
def generate_imatrix(self):
|
||||||
self.logger.info(STARTING_IMATRIX_GENERATION)
|
self.logger.info(STARTING_IMATRIX_GENERATION)
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
from PyQt6.QtWidgets import QMessageBox
|
from PyQt6.QtWidgets import QMessageBox
|
||||||
from localizations import *
|
from localizations import *
|
||||||
|
|
||||||
|
|
||||||
def show_error(logger, message):
|
def show_error(logger, message):
|
||||||
logger.error(ERROR_MESSAGE.format(message))
|
logger.error(ERROR_MESSAGE.format(message))
|
||||||
QMessageBox.critical(None, ERROR, message)
|
QMessageBox.critical(None, ERROR, message)
|
||||||
|
|
|
@ -100,6 +100,7 @@ def __init__(self):
|
||||||
self.SPLIT_MAX_SIZE = ""
|
self.SPLIT_MAX_SIZE = ""
|
||||||
self.DRY_RUN = ""
|
self.DRY_RUN = ""
|
||||||
self.CONVERT_HF_TO_GGUF = ""
|
self.CONVERT_HF_TO_GGUF = ""
|
||||||
|
self.SELECT_HF_MODEL_DIRECTORY = ""
|
||||||
|
|
||||||
# General Messages
|
# General Messages
|
||||||
self.ERROR = ""
|
self.ERROR = ""
|
||||||
|
@ -372,6 +373,7 @@ def __init__(self):
|
||||||
self.SPLIT_MAX_SIZE = "Split Max Size:"
|
self.SPLIT_MAX_SIZE = "Split Max Size:"
|
||||||
self.DRY_RUN = "Dry Run"
|
self.DRY_RUN = "Dry Run"
|
||||||
self.CONVERT_HF_TO_GGUF = "Convert HF to GGUF"
|
self.CONVERT_HF_TO_GGUF = "Convert HF to GGUF"
|
||||||
|
self.SELECT_HF_MODEL_DIRECTORY = "Select HuggingFace Model Directory"
|
||||||
|
|
||||||
# General Messages
|
# General Messages
|
||||||
self.ERROR = "Error"
|
self.ERROR = "Error"
|
||||||
|
|
Loading…
Reference in New Issue