From 8aedb5fc3fdddd6d7f6cd34d45dcf2e7f3ddc96b Mon Sep 17 00:00:00 2001 From: BuildTools Date: Sun, 4 Aug 2024 14:24:29 -0700 Subject: [PATCH] add localizations support for LoRA and model features --- src/localizations.py | 249 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 247 insertions(+), 2 deletions(-) diff --git a/src/localizations.py b/src/localizations.py index dde4180..016ae98 100644 --- a/src/localizations.py +++ b/src/localizations.py @@ -2,6 +2,7 @@ class _Localization: def __init__(self): + super().__init__() self.WINDOW_TITLE = "" self.RAM_USAGE = "" self.CPU_USAGE = "" @@ -169,7 +170,60 @@ def __init__(self): self.CONTEXT_SIZE = "" self.CONTEXT_SIZE_FOR_IMATRIX = "" self.THREADS = "" - self.NUMBER_OF_THREADS_FOR_IMATRIX = "" + self.NUMBER_OF_THREADS_FOR_IMATRIX = "" + self.LORA_CONVERSION = "" + self.LORA_INPUT_PATH = "" + self.LORA_OUTPUT_PATH = "" + self.SELECT_LORA_INPUT_DIRECTORY = "" + self.SELECT_LORA_OUTPUT_FILE = "" + self.CONVERT_LORA = "" + self.STARTING_LORA_CONVERSION = "" + self.LORA_INPUT_PATH_REQUIRED = "" + self.LORA_OUTPUT_PATH_REQUIRED = "" + self.ERROR_STARTING_LORA_CONVERSION = "" + self.LORA_CONVERSION_TASK_STARTED = "" + self.BIN_FILES = "" + self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "" + self.BROWSING_FOR_LORA_OUTPUT_FILE = "" + self.CONVERTING_LORA = "" + self.LORA_CONVERSION_FINISHED = "" + self.LORA_FILE_MOVED = "" + self.LORA_FILE_NOT_FOUND = "" + self.ERROR_MOVING_LORA_FILE = "" + self.EXPORT_LORA = "" + self.MODEL_PATH_REQUIRED = "" + self.OUTPUT_PATH_REQUIRED = "" + self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "" + self.INVALID_LORA_SCALE_VALUE = "" + self.ERROR_STARTING_LORA_EXPORT = "" + self.LORA_EXPORT_TASK_STARTED = "" + self.GGML_LORA_ADAPTERS = "" + self.SELECT_LORA_ADAPTER_FILES = "" + self.ADD_ADAPTER = "" + self.DELETE_ADAPTER = "" + self.LORA_SCALE = "" + self.ENTER_LORA_SCALE_VALUE = "" + self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "" + self.EXPORTING_LORA = "" + self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "" + self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "" + self.ADDING_LORA_ADAPTER = "" + self.DELETING_LORA_ADAPTER = "" + self.LORA_FILES = "" + self.SELECT_LORA_ADAPTER_FILE = "" + self.STARTING_LORA_EXPORT = "" + self.OUTPUT_TYPE = "" + self.SELECT_OUTPUT_TYPE = "" + self.GGUF_AND_BIN_FILES = "" + self.BASE_MODEL = "" + self.SELECT_BASE_MODEL_FILE = "" + self.BASE_MODEL_PATH_REQUIRED = "" + self.BROWSING_FOR_BASE_MODEL_FILE = "" + self.SELECT_BASE_MODEL_FOLDER = "" + self.BROWSING_FOR_BASE_MODEL_FOLDER = "" + self.LORA_CONVERSION_FROM_TO = "" + self.GENERATING_IMATRIX_FOR = "" + self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "" class _English(_Localization): def __init__(self): @@ -563,7 +617,67 @@ def __init__(self): self.SET_GPU_OFFLOAD_VALUE = "Définir la valeur de déchargement GPU (-ngl)" self.COMPLETED = "Terminé" self.REFRESH_MODELS = "Actualiser les modèles" - + self.REFRESH_MODELS = "Actualiser les modèles" + self.EXTRA_ARGUMENTS = "Arguments supplémentaires :" + self.EXTRA_ARGUMENTS_LABEL = "Arguments supplémentaires en ligne de commande" + self.CONTEXT_SIZE = "Taille du contexte :" + self.CONTEXT_SIZE_FOR_IMATRIX = "Taille du contexte pour la génération d'IMatrix" + self.THREADS = "Threads :" + self.NUMBER_OF_THREADS_FOR_IMATRIX = "Nombre de threads pour la génération d'IMatrix" + self.LORA_CONVERSION = "Conversion LoRA" + self.LORA_INPUT_PATH = "Chemin d'entrée LoRA" + self.LORA_OUTPUT_PATH = "Chemin de sortie LoRA" + self.SELECT_LORA_INPUT_DIRECTORY = "Sélectionner le répertoire d'entrée LoRA" + self.SELECT_LORA_OUTPUT_FILE = "Sélectionner le fichier de sortie LoRA" + self.CONVERT_LORA = "Convertir LoRA" + self.STARTING_LORA_CONVERSION = "Démarrage de la conversion LoRA" + self.LORA_INPUT_PATH_REQUIRED = "Le chemin d'entrée LoRA est requis." + self.LORA_OUTPUT_PATH_REQUIRED = "Le chemin de sortie LoRA est requis." + self.ERROR_STARTING_LORA_CONVERSION = "Erreur lors du démarrage de la conversion LoRA : {}" + self.LORA_CONVERSION_TASK_STARTED = "Tâche de conversion LoRA démarrée." + self.BIN_FILES = "Fichiers binaires (*.bin)" + self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "Recherche du répertoire d'entrée LoRA..." + self.BROWSING_FOR_LORA_OUTPUT_FILE = "Recherche du fichier de sortie LoRA..." + self.CONVERTING_LORA = "Conversion LoRA" + self.LORA_CONVERSION_FINISHED = "Conversion LoRA terminée." + self.LORA_FILE_MOVED = "Fichier LoRA déplacé de {} à {}." + self.LORA_FILE_NOT_FOUND = "Fichier LoRA non trouvé : {}." + self.ERROR_MOVING_LORA_FILE = "Erreur lors du déplacement du fichier LoRA : {}" + self.EXPORT_LORA = "Exporter LoRA" + self.MODEL_PATH_REQUIRED = "Le chemin du modèle est requis." + self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis." + self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "Au moins un adaptateur LoRA est requis." + self.INVALID_LORA_SCALE_VALUE = "Valeur d'échelle LoRA invalide." + self.ERROR_STARTING_LORA_EXPORT = "Erreur lors du démarrage de l'exportation LoRA : {}" + self.LORA_EXPORT_TASK_STARTED = "Tâche d'exportation LoRA démarrée." + self.GGML_LORA_ADAPTERS = "Adaptateurs LoRA GGML" + self.SELECT_LORA_ADAPTER_FILES = "Sélectionner les fichiers d'adaptateur LoRA" + self.ADD_ADAPTER = "Ajouter un adaptateur" + self.DELETE_ADAPTER = "Supprimer" + self.LORA_SCALE = "Échelle LoRA" + self.ENTER_LORA_SCALE_VALUE = "Entrez la valeur d'échelle LoRA (Optionnel)" + self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "Nombre de threads pour l'exportation LoRA" + self.EXPORTING_LORA = "Exportation de LoRA..." + self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "Recherche du fichier de modèle LoRA à exporter..." + self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "Recherche du fichier de sortie LoRA à exporter..." + self.ADDING_LORA_ADAPTER = "Ajout d'un adaptateur LoRA..." + self.DELETING_LORA_ADAPTER = "Suppression de l'adaptateur LoRA..." + self.LORA_FILES = "Fichiers LoRA (*.bin)" + self.SELECT_LORA_ADAPTER_FILE = "Sélectionner le fichier d'adaptateur LoRA" + self.STARTING_LORA_EXPORT = "Démarrage de l'exportation LoRA..." + self.OUTPUT_TYPE = "Type de sortie" + self.SELECT_OUTPUT_TYPE = "Sélectionner le type de sortie (GGUF ou GGML)" + self.GGUF_AND_BIN_FILES = "Fichiers GGUF et binaires (*.gguf *.bin)" + self.BASE_MODEL = "Modèle de base" + self.SELECT_BASE_MODEL_FILE = "Sélectionner le fichier du modèle de base (GGUF)" + self.BASE_MODEL_PATH_REQUIRED = "Le chemin du modèle de base est requis pour la sortie GGUF." + self.BROWSING_FOR_BASE_MODEL_FILE = "Recherche du fichier du modèle de base..." + self.SELECT_BASE_MODEL_FOLDER = "Sélectionner le dossier du modèle de base (contenant safetensors)" + self.BROWSING_FOR_BASE_MODEL_FOLDER = "Recherche du dossier du modèle de base..." + self.LORA_CONVERSION_FROM_TO = "Conversion LoRA de {} à {}" + self.GENERATING_IMATRIX_FOR = "Génération d'IMatrix pour {}" + self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "Le chemin du modèle est requis pour la génération d'IMatrix." + class _SimplifiedChinese(_Localization): def __init__(self): super().__init__() @@ -729,6 +843,66 @@ def __init__(self): self.SET_GPU_OFFLOAD_VALUE = "设置GPU卸载值(-ngl)" self.COMPLETED = "已完成" self.REFRESH_MODELS = "刷新模型" + self.REFRESH_MODELS = "刷新模型" + self.EXTRA_ARGUMENTS = "额外参数:" + self.EXTRA_ARGUMENTS_LABEL = "附加命令行参数" + self.CONTEXT_SIZE = "上下文大小:" + self.CONTEXT_SIZE_FOR_IMATRIX = "IMatrix生成的上下文大小" + self.THREADS = "线程数:" + self.NUMBER_OF_THREADS_FOR_IMATRIX = "IMatrix生成的线程数" + self.LORA_CONVERSION = "LoRA转换" + self.LORA_INPUT_PATH = "LoRA输入路径" + self.LORA_OUTPUT_PATH = "LoRA输出路径" + self.SELECT_LORA_INPUT_DIRECTORY = "选择LoRA输入目录" + self.SELECT_LORA_OUTPUT_FILE = "选择LoRA输出文件" + self.CONVERT_LORA = "转换LoRA" + self.STARTING_LORA_CONVERSION = "开始LoRA转换" + self.LORA_INPUT_PATH_REQUIRED = "需要LoRA输入路径。" + self.LORA_OUTPUT_PATH_REQUIRED = "需要LoRA输出路径。" + self.ERROR_STARTING_LORA_CONVERSION = "启动LoRA转换时出错:{}" + self.LORA_CONVERSION_TASK_STARTED = "LoRA转换任务已启动。" + self.BIN_FILES = "二进制文件 (*.bin)" + self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "正在浏览LoRA输入目录..." + self.BROWSING_FOR_LORA_OUTPUT_FILE = "正在浏览LoRA输出文件..." + self.CONVERTING_LORA = "LoRA转换" + self.LORA_CONVERSION_FINISHED = "LoRA转换完成。" + self.LORA_FILE_MOVED = "LoRA文件已从{}移动到{}。" + self.LORA_FILE_NOT_FOUND = "未找到LoRA文件:{}。" + self.ERROR_MOVING_LORA_FILE = "移动LoRA文件时出错:{}" + self.EXPORT_LORA = "导出LoRA" + self.MODEL_PATH_REQUIRED = "需要模型路径。" + self.OUTPUT_PATH_REQUIRED = "需要输出路径。" + self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "至少需要一个LoRA适配器。" + self.INVALID_LORA_SCALE_VALUE = "无效的LoRA比例值。" + self.ERROR_STARTING_LORA_EXPORT = "启动LoRA导出时出错:{}" + self.LORA_EXPORT_TASK_STARTED = "LoRA导出任务已启动。" + self.GGML_LORA_ADAPTERS = "GGML LoRA适配器" + self.SELECT_LORA_ADAPTER_FILES = "选择LoRA适配器文件" + self.ADD_ADAPTER = "添加适配器" + self.DELETE_ADAPTER = "删除" + self.LORA_SCALE = "LoRA比例" + self.ENTER_LORA_SCALE_VALUE = "输入LoRA比例值(可选)" + self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "LoRA导出的线程数" + self.EXPORTING_LORA = "正在导出LoRA..." + self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "正在浏览导出LoRA模型文件..." + self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "正在浏览导出LoRA输出文件..." + self.ADDING_LORA_ADAPTER = "正在添加LoRA适配器..." + self.DELETING_LORA_ADAPTER = "正在删除LoRA适配器..." + self.LORA_FILES = "LoRA文件 (*.bin)" + self.SELECT_LORA_ADAPTER_FILE = "选择LoRA适配器文件" + self.STARTING_LORA_EXPORT = "开始LoRA导出..." + self.OUTPUT_TYPE = "输出类型" + self.SELECT_OUTPUT_TYPE = "选择输出类型(GGUF或GGML)" + self.GGUF_AND_BIN_FILES = "GGUF和二进制文件 (*.gguf *.bin)" + self.BASE_MODEL = "基础模型" + self.SELECT_BASE_MODEL_FILE = "选择基础模型文件(GGUF)" + self.BASE_MODEL_PATH_REQUIRED = "GGUF输出需要基础模型路径。" + self.BROWSING_FOR_BASE_MODEL_FILE = "正在浏览基础模型文件..." + self.SELECT_BASE_MODEL_FOLDER = "选择基础模型文件夹(包含safetensors)" + self.BROWSING_FOR_BASE_MODEL_FOLDER = "正在浏览基础模型文件夹..." + self.LORA_CONVERSION_FROM_TO = "LoRA从{}转换到{}" + self.GENERATING_IMATRIX_FOR = "正在为{}生成IMatrix" + self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "IMatrix生成需要模型路径。" class _Spanish(_Localization): def __init__(self): @@ -895,6 +1069,66 @@ def __init__(self): self.SET_GPU_OFFLOAD_VALUE = "Establecer valor de descarga GPU (-ngl)" self.COMPLETED = "Completado" self.REFRESH_MODELS = "Actualizar modelos" + self.REFRESH_MODELS = "Actualizar modelos" + self.EXTRA_ARGUMENTS = "Argumentos adicionales:" + self.EXTRA_ARGUMENTS_LABEL = "Argumentos adicionales de línea de comandos" + self.CONTEXT_SIZE = "Tamaño del contexto:" + self.CONTEXT_SIZE_FOR_IMATRIX = "Tamaño del contexto para generación de IMatrix" + self.THREADS = "Hilos:" + self.NUMBER_OF_THREADS_FOR_IMATRIX = "Número de hilos para generación de IMatrix" + self.LORA_CONVERSION = "Conversión LoRA" + self.LORA_INPUT_PATH = "Ruta de entrada LoRA" + self.LORA_OUTPUT_PATH = "Ruta de salida LoRA" + self.SELECT_LORA_INPUT_DIRECTORY = "Seleccionar directorio de entrada LoRA" + self.SELECT_LORA_OUTPUT_FILE = "Seleccionar archivo de salida LoRA" + self.CONVERT_LORA = "Convertir LoRA" + self.STARTING_LORA_CONVERSION = "Iniciando conversión LoRA" + self.LORA_INPUT_PATH_REQUIRED = "Se requiere la ruta de entrada LoRA." + self.LORA_OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida LoRA." + self.ERROR_STARTING_LORA_CONVERSION = "Error al iniciar la conversión LoRA: {}" + self.LORA_CONVERSION_TASK_STARTED = "Tarea de conversión LoRA iniciada." + self.BIN_FILES = "Archivos binarios (*.bin)" + self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "Buscando directorio de entrada LoRA..." + self.BROWSING_FOR_LORA_OUTPUT_FILE = "Buscando archivo de salida LoRA..." + self.CONVERTING_LORA = "Convirtiendo LoRA" + self.LORA_CONVERSION_FINISHED = "Conversión LoRA finalizada." + self.LORA_FILE_MOVED = "Archivo LoRA movido de {} a {}." + self.LORA_FILE_NOT_FOUND = "Archivo LoRA no encontrado: {}." + self.ERROR_MOVING_LORA_FILE = "Error al mover el archivo LoRA: {}" + self.EXPORT_LORA = "Exportar LoRA" + self.MODEL_PATH_REQUIRED = "Se requiere la ruta del modelo." + self.OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida." + self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "Se requiere al menos un adaptador LoRA." + self.INVALID_LORA_SCALE_VALUE = "Valor de escala LoRA inválido." + self.ERROR_STARTING_LORA_EXPORT = "Error al iniciar la exportación LoRA: {}" + self.LORA_EXPORT_TASK_STARTED = "Tarea de exportación LoRA iniciada." + self.GGML_LORA_ADAPTERS = "Adaptadores LoRA GGML" + self.SELECT_LORA_ADAPTER_FILES = "Seleccionar archivos de adaptador LoRA" + self.ADD_ADAPTER = "Añadir adaptador" + self.DELETE_ADAPTER = "Eliminar" + self.LORA_SCALE = "Escala LoRA" + self.ENTER_LORA_SCALE_VALUE = "Ingresar valor de escala LoRA (Opcional)" + self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "Número de hilos para exportación LoRA" + self.EXPORTING_LORA = "Exportando LoRA..." + self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "Buscando archivo de modelo LoRA para exportar..." + self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "Buscando archivo de salida LoRA para exportar..." + self.ADDING_LORA_ADAPTER = "Añadiendo adaptador LoRA..." + self.DELETING_LORA_ADAPTER = "Eliminando adaptador LoRA..." + self.LORA_FILES = "Archivos LoRA (*.bin)" + self.SELECT_LORA_ADAPTER_FILE = "Seleccionar archivo de adaptador LoRA" + self.STARTING_LORA_EXPORT = "Iniciando exportación LoRA..." + self.OUTPUT_TYPE = "Tipo de salida" + self.SELECT_OUTPUT_TYPE = "Seleccionar tipo de salida (GGUF o GGML)" + self.GGUF_AND_BIN_FILES = "Archivos GGUF y binarios (*.gguf *.bin)" + self.BASE_MODEL = "Modelo base" + self.SELECT_BASE_MODEL_FILE = "Seleccionar archivo de modelo base (GGUF)" + self.BASE_MODEL_PATH_REQUIRED = "Se requiere la ruta del modelo base para la salida GGUF." + self.BROWSING_FOR_BASE_MODEL_FILE = "Buscando archivo de modelo base..." + self.SELECT_BASE_MODEL_FOLDER = "Seleccionar carpeta de modelo base (que contiene safetensors)" + self.BROWSING_FOR_BASE_MODEL_FOLDER = "Buscando carpeta de modelo base..." + self.LORA_CONVERSION_FROM_TO = "Conversión LoRA de {} a {}" + self.GENERATING_IMATRIX_FOR = "Generando IMatrix para {}" + self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "Se requiere la ruta del modelo para la generación de IMatrix." class _Hindi(_Localization): def __init__(self): @@ -4987,6 +5221,17 @@ def set_language(lang_code): global WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS, OVERRIDE_MODEL_METADATA, INPUT_DATA_FILE_FOR_IMATRIX, MODEL_TO_BE_QUANTIZED global OUTPUT_PATH_FOR_GENERATED_IMATRIX, HOW_OFTEN_TO_SAVE_IMATRIX, SET_GPU_OFFLOAD_VALUE, COMPLETED, REFRESH_MODELS global CONTEXT_SIZE, CONTEXT_SIZE_FOR_IMATRIX, THREADS, NUMBER_OF_THREADS_FOR_IMATRIX, EXTRA_ARGUMENTS, EXTRA_ARGUMENTS_LABEL + global LORA_CONVERSION, LORA_INPUT_PATH, LORA_OUTPUT_PATH, SELECT_LORA_INPUT_DIRECTORY, SELECT_LORA_OUTPUT_FILE + global CONVERT_LORA, STARTING_LORA_CONVERSION, LORA_INPUT_PATH_REQUIRED, LORA_OUTPUT_PATH_REQUIRED, ERROR_STARTING_LORA_CONVERSION + global LORA_CONVERSION_TASK_STARTED, BIN_FILES, BROWSING_FOR_LORA_INPUT_DIRECTORY, BROWSING_FOR_LORA_OUTPUT_FILE, CONVERTING_LORA + global LORA_CONVERSION_FINISHED, LORA_FILE_MOVED, LORA_FILE_NOT_FOUND, ERROR_MOVING_LORA_FILE, EXPORT_LORA + global MODEL_PATH_REQUIRED, AT_LEAST_ONE_LORA_ADAPTER_REQUIRED, INVALID_LORA_SCALE_VALUE, ERROR_STARTING_LORA_EXPORT, LORA_EXPORT_TASK_STARTED + global GGML_LORA_ADAPTERS, SELECT_LORA_ADAPTER_FILES, ADD_ADAPTER, DELETE_ADAPTER, LORA_SCALE + global ENTER_LORA_SCALE_VALUE, NUMBER_OF_THREADS_FOR_LORA_EXPORT, EXPORTING_LORA, BROWSING_FOR_EXPORT_LORA_MODEL_FILE, BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE + global ADDING_LORA_ADAPTER, DELETING_LORA_ADAPTER, LORA_FILES, SELECT_LORA_ADAPTER_FILE, STARTING_LORA_EXPORT + global OUTPUT_TYPE, SELECT_OUTPUT_TYPE, GGUF_AND_BIN_FILES, BASE_MODEL, SELECT_BASE_MODEL_FILE + global BASE_MODEL_PATH_REQUIRED, BROWSING_FOR_BASE_MODEL_FILE, SELECT_BASE_MODEL_FOLDER, BROWSING_FOR_BASE_MODEL_FOLDER + global LORA_CONVERSION_FROM_TO, GENERATING_IMATRIX_FOR, MODEL_PATH_REQUIRED_FOR_IMATRIX loc = _languages.get(lang_code, _English)() english_loc = _English() # Create an instance of English localization for fallback