diff --git a/AutoGGUF.py b/AutoGGUF.py new file mode 100644 index 0000000..e69e315 --- /dev/null +++ b/AutoGGUF.py @@ -0,0 +1,930 @@ +from PyQt6.QtWidgets import * +from PyQt6.QtCore import * +from PyQt6.QtGui import * +import os +import sys +import psutil +import shutil +import subprocess +import time +import signal +import json +import platform +import requests +import zipfile +from datetime import datetime +from imports_and_globals import ensure_directory, open_file_safe +from DownloadThread import DownloadThread +from ModelInfoDialog import ModelInfoDialog +from TaskListItem import TaskListItem +from QuantizationThread import QuantizationThread +from KVOverrideEntry import KVOverrideEntry +from Logger import Logger +from localizations import * + +class AutoGGUF(QMainWindow): + def __init__(self): + super().__init__() + self.logger = Logger("AutoGGUF", "logs") + + self.logger.info(INITIALIZING_AUTOGGUF) + self.setWindowTitle(WINDOW_TITLE) + self.setGeometry(100, 100, 1300, 1100) + + main_layout = QHBoxLayout() + left_layout = QVBoxLayout() + right_layout = QVBoxLayout() + + # System info + self.ram_bar = QProgressBar() + self.cpu_label = QLabel(CPU_USAGE) + left_layout.addWidget(QLabel(RAM_USAGE)) + left_layout.addWidget(self.ram_bar) + left_layout.addWidget(self.cpu_label) + + # Modify the backend selection + backend_layout = QHBoxLayout() + self.backend_combo = QComboBox() + self.refresh_backends_button = QPushButton(REFRESH_BACKENDS) + self.refresh_backends_button.clicked.connect(self.refresh_backends) + backend_layout.addWidget(QLabel(BACKEND)) + backend_layout.addWidget(self.backend_combo) + backend_layout.addWidget(self.refresh_backends_button) + left_layout.addLayout(backend_layout) + + # Modify the Download llama.cpp section + download_group = QGroupBox(DOWNLOAD_LLAMACPP) + download_layout = QFormLayout() + + self.release_combo = QComboBox() + self.refresh_releases_button = QPushButton(REFRESH_RELEASES) + self.refresh_releases_button.clicked.connect(self.refresh_releases) + release_layout = QHBoxLayout() + release_layout.addWidget(self.release_combo) + release_layout.addWidget(self.refresh_releases_button) + download_layout.addRow(SELECT_RELEASE, release_layout) + + self.asset_combo = QComboBox() + self.asset_combo.currentIndexChanged.connect(self.update_cuda_option) + download_layout.addRow(SELECT_ASSET, self.asset_combo) + + self.cuda_extract_checkbox = QCheckBox(EXTRACT_CUDA_FILES) + self.cuda_extract_checkbox.setVisible(False) + download_layout.addRow(self.cuda_extract_checkbox) + + self.cuda_backend_label = QLabel(SELECT_CUDA_BACKEND) + self.cuda_backend_label.setVisible(False) + self.backend_combo_cuda = QComboBox() + self.backend_combo_cuda.setVisible(False) + download_layout.addRow(self.cuda_backend_label, self.backend_combo_cuda) + + self.download_progress = QProgressBar() + self.download_button = QPushButton(DOWNLOAD) + self.download_button.clicked.connect(self.download_llama_cpp) + download_layout.addRow(self.download_progress) + download_layout.addRow(self.download_button) + + download_group.setLayout(download_layout) + right_layout.addWidget(download_group) + + # Initialize releases and backends + self.refresh_releases() + self.refresh_backends() + + # Models path + models_layout = QHBoxLayout() + self.models_input = QLineEdit(os.path.abspath("models")) + models_button = QPushButton(BROWSE) + models_button.clicked.connect(self.browse_models) + models_layout.addWidget(QLabel(MODELS_PATH)) + models_layout.addWidget(self.models_input) + models_layout.addWidget(models_button) + left_layout.addLayout(models_layout) + + # Output path + output_layout = QHBoxLayout() + self.output_input = QLineEdit(os.path.abspath("quantized_models")) + output_button = QPushButton(BROWSE) + output_button.clicked.connect(self.browse_output) + output_layout.addWidget(QLabel(OUTPUT_PATH)) + output_layout.addWidget(self.output_input) + output_layout.addWidget(output_button) + left_layout.addLayout(output_layout) + + # Logs path + logs_layout = QHBoxLayout() + self.logs_input = QLineEdit(os.path.abspath("logs")) + logs_button = QPushButton(BROWSE) + logs_button.clicked.connect(self.browse_logs) + logs_layout.addWidget(QLabel(LOGS_PATH)) + logs_layout.addWidget(self.logs_input) + logs_layout.addWidget(logs_button) + left_layout.addLayout(logs_layout) + + # Model list + self.model_list = QListWidget() + self.load_models() + left_layout.addWidget(QLabel(AVAILABLE_MODELS)) + left_layout.addWidget(self.model_list) + + # Quantization options + quant_options_scroll = QScrollArea() + quant_options_widget = QWidget() + quant_options_layout = QFormLayout() + + self.quant_type = QComboBox() + self.quant_type.addItems([ + "Q4_0", "Q4_1", "Q5_0", "Q5_1", "IQ2_XXS", "IQ2_XS", "IQ2_S", "IQ2_M", "IQ1_S", "IQ1_M", + "Q2_K", "Q2_K_S", "IQ3_XXS", "IQ3_S", "IQ3_M", "Q3_K", "IQ3_XS", "Q3_K_S", "Q3_K_M", "Q3_K_L", + "IQ4_NL", "IQ4_XS", "Q4_K", "Q4_K_S", "Q4_K_M", "Q5_K", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0", + "Q4_0_4_4", "Q4_0_4_8", "Q4_0_8_8", "F16", "BF16", "F32", "COPY" + ]) + quant_options_layout.addRow(self.create_label(QUANTIZATION_TYPE, SELECT_QUANTIZATION_TYPE), self.quant_type) + + self.allow_requantize = QCheckBox(ALLOW_REQUANTIZE) + self.leave_output_tensor = QCheckBox(LEAVE_OUTPUT_TENSOR) + self.pure = QCheckBox(PURE) + quant_options_layout.addRow(self.create_label("", ALLOWS_REQUANTIZING), self.allow_requantize) + quant_options_layout.addRow(self.create_label("", LEAVE_OUTPUT_WEIGHT), self.leave_output_tensor) + quant_options_layout.addRow(self.create_label("", DISABLE_K_QUANT_MIXTURES), self.pure) + + self.imatrix = QLineEdit() + self.imatrix_button = QPushButton(BROWSE) + self.imatrix_button.clicked.connect(self.browse_imatrix) + imatrix_layout = QHBoxLayout() + imatrix_layout.addWidget(self.imatrix) + imatrix_layout.addWidget(self.imatrix_button) + quant_options_layout.addRow(self.create_label(IMATRIX, USE_DATA_AS_IMPORTANCE_MATRIX), imatrix_layout) + + self.include_weights = QLineEdit() + self.exclude_weights = QLineEdit() + quant_options_layout.addRow(self.create_label(INCLUDE_WEIGHTS, USE_IMPORTANCE_MATRIX_FOR_TENSORS), self.include_weights) + quant_options_layout.addRow(self.create_label(EXCLUDE_WEIGHTS, DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS), self.exclude_weights) + + self.use_output_tensor_type = QCheckBox(USE_OUTPUT_TENSOR_TYPE) + self.output_tensor_type = QComboBox() + self.output_tensor_type.addItems(["F32", "F16", "Q4_0", "Q4_1", "Q5_0", "Q5_1", "Q8_0"]) + self.output_tensor_type.setEnabled(False) + self.use_output_tensor_type.toggled.connect(lambda checked: self.output_tensor_type.setEnabled(checked)) + output_tensor_layout = QHBoxLayout() + output_tensor_layout.addWidget(self.use_output_tensor_type) + output_tensor_layout.addWidget(self.output_tensor_type) + quant_options_layout.addRow(self.create_label(OUTPUT_TENSOR_TYPE, USE_THIS_TYPE_FOR_OUTPUT_WEIGHT), output_tensor_layout) + + self.use_token_embedding_type = QCheckBox(USE_TOKEN_EMBEDDING_TYPE) + self.token_embedding_type = QComboBox() + self.token_embedding_type.addItems(["F32", "F16", "Q4_0", "Q4_1", "Q5_0", "Q5_1", "Q8_0"]) + self.token_embedding_type.setEnabled(False) + self.use_token_embedding_type.toggled.connect(lambda checked: self.token_embedding_type.setEnabled(checked)) + token_embedding_layout = QHBoxLayout() + token_embedding_layout.addWidget(self.use_token_embedding_type) + token_embedding_layout.addWidget(self.token_embedding_type) + quant_options_layout.addRow(self.create_label(TOKEN_EMBEDDING_TYPE, USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS), token_embedding_layout) + + self.keep_split = QCheckBox(KEEP_SPLIT) + self.override_kv = QLineEdit() + quant_options_layout.addRow(self.create_label("", WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS), self.keep_split) + # KV Override section + self.kv_override_widget = QWidget() + self.kv_override_layout = QVBoxLayout(self.kv_override_widget) + self.kv_override_entries = [] + + add_override_button = QPushButton(ADD_NEW_OVERRIDE) + add_override_button.clicked.connect(self.add_kv_override) + + kv_override_scroll = QScrollArea() + kv_override_scroll.setWidgetResizable(True) + kv_override_scroll.setWidget(self.kv_override_widget) + kv_override_scroll.setMinimumHeight(200) + + kv_override_main_layout = QVBoxLayout() + kv_override_main_layout.addWidget(kv_override_scroll) + kv_override_main_layout.addWidget(add_override_button) + + quant_options_layout.addRow(self.create_label(KV_OVERRIDES, OVERRIDE_MODEL_METADATA), kv_override_main_layout) + + quant_options_widget.setLayout(quant_options_layout) + quant_options_scroll.setWidget(quant_options_widget) + quant_options_scroll.setWidgetResizable(True) + left_layout.addWidget(quant_options_scroll) + + # Quantize button layout + quantize_layout = QHBoxLayout() + quantize_button = QPushButton(QUANTIZE_MODEL) + quantize_button.clicked.connect(self.quantize_model) + save_preset_button = QPushButton(SAVE_PRESET) + save_preset_button.clicked.connect(self.save_preset) + load_preset_button = QPushButton(LOAD_PRESET) + load_preset_button.clicked.connect(self.load_preset) + quantize_layout.addWidget(quantize_button) + quantize_layout.addWidget(save_preset_button) + quantize_layout.addWidget(load_preset_button) + left_layout.addLayout(quantize_layout) + + # Task list + self.task_list = QListWidget() + self.task_list.setSelectionMode(QListWidget.SelectionMode.NoSelection) + self.task_list.itemDoubleClicked.connect(self.show_task_details) + left_layout.addWidget(QLabel(TASKS)) + left_layout.addWidget(self.task_list) + + # IMatrix section + imatrix_group = QGroupBox(IMATRIX_GENERATION) + imatrix_layout = QFormLayout() + + self.imatrix_datafile = QLineEdit() + self.imatrix_datafile_button = QPushButton(BROWSE) + self.imatrix_datafile_button.clicked.connect(self.browse_imatrix_datafile) + imatrix_datafile_layout = QHBoxLayout() + imatrix_datafile_layout.addWidget(self.imatrix_datafile) + imatrix_datafile_layout.addWidget(self.imatrix_datafile_button) + imatrix_layout.addRow(self.create_label(DATA_FILE, INPUT_DATA_FILE_FOR_IMATRIX), imatrix_datafile_layout) + + self.imatrix_model = QLineEdit() + self.imatrix_model_button = QPushButton(BROWSE) + self.imatrix_model_button.clicked.connect(self.browse_imatrix_model) + imatrix_model_layout = QHBoxLayout() + imatrix_model_layout.addWidget(self.imatrix_model) + imatrix_model_layout.addWidget(self.imatrix_model_button) + imatrix_layout.addRow(self.create_label(MODEL, MODEL_TO_BE_QUANTIZED), imatrix_model_layout) + + self.imatrix_output = QLineEdit() + self.imatrix_output_button = QPushButton(BROWSE) + self.imatrix_output_button.clicked.connect(self.browse_imatrix_output) + imatrix_output_layout = QHBoxLayout() + imatrix_output_layout.addWidget(self.imatrix_output) + imatrix_output_layout.addWidget(self.imatrix_output_button) + imatrix_layout.addRow(self.create_label(OUTPUT, OUTPUT_PATH_FOR_GENERATED_IMATRIX), imatrix_output_layout) + + self.imatrix_frequency = QLineEdit() + imatrix_layout.addRow(self.create_label(OUTPUT_FREQUENCY, HOW_OFTEN_TO_SAVE_IMATRIX), self.imatrix_frequency) + + # GPU Offload for IMatrix + gpu_offload_layout = QHBoxLayout() + self.gpu_offload_slider = QSlider(Qt.Orientation.Horizontal) + self.gpu_offload_slider.setRange(0, 200) + self.gpu_offload_slider.valueChanged.connect(self.update_gpu_offload_spinbox) + + self.gpu_offload_spinbox = QSpinBox() + self.gpu_offload_spinbox.setRange(0, 1000) + self.gpu_offload_spinbox.valueChanged.connect(self.update_gpu_offload_slider) + self.gpu_offload_spinbox.setMinimumWidth(75) # Set the minimum width to 75 pixels + + self.gpu_offload_auto = QCheckBox(AUTO) + self.gpu_offload_auto.stateChanged.connect(self.toggle_gpu_offload_auto) + + gpu_offload_layout.addWidget(self.gpu_offload_slider) + gpu_offload_layout.addWidget(self.gpu_offload_spinbox) + gpu_offload_layout.addWidget(self.gpu_offload_auto) + imatrix_layout.addRow(self.create_label(GPU_OFFLOAD, SET_GPU_OFFLOAD_VALUE), gpu_offload_layout) + + imatrix_generate_button = QPushButton(GENERATE_IMATRIX) + imatrix_generate_button.clicked.connect(self.generate_imatrix) + imatrix_layout.addRow(imatrix_generate_button) + + imatrix_group.setLayout(imatrix_layout) + right_layout.addWidget(imatrix_group) + + main_widget = QWidget() + main_layout.addLayout(left_layout, 2) + main_layout.addLayout(right_layout, 1) + main_widget.setLayout(main_layout) + self.setCentralWidget(main_widget) + + # Modify the task list to support right-click menu + self.task_list.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu) + self.task_list.customContextMenuRequested.connect(self.show_task_context_menu) + + # Timer for updating system info + self.timer = QTimer() + self.timer.timeout.connect(self.update_system_info) + self.timer.start(200) + + # Initialize threads + self.quant_threads = [] + + self.logger.info(AUTOGGUF_INITIALIZATION_COMPLETE) + + def refresh_backends(self): + self.logger.info(REFRESHING_BACKENDS) + llama_bin = os.path.abspath("llama_bin") + if not os.path.exists(llama_bin): + os.makedirs(llama_bin) + + self.backend_combo.clear() + valid_backends = [] + for item in os.listdir(llama_bin): + item_path = os.path.join(llama_bin, item) + if os.path.isdir(item_path) and "cudart-llama" not in item.lower(): + valid_backends.append((item, item_path)) + + if valid_backends: + for name, path in valid_backends: + self.backend_combo.addItem(name, userData=path) + self.backend_combo.setEnabled(True) # Enable the combo box if there are valid backends + else: + self.backend_combo.addItem(NO_BACKENDS_AVAILABLE) + self.backend_combo.setEnabled(False) + self.logger.info(FOUND_VALID_BACKENDS.format(self.backend_combo.count())) + + def save_preset(self): + self.logger.info(SAVING_PRESET) + preset = { + "quant_type": self.quant_type.currentText(), + "allow_requantize": self.allow_requantize.isChecked(), + "leave_output_tensor": self.leave_output_tensor.isChecked(), + "pure": self.pure.isChecked(), + "imatrix": self.imatrix.text(), + "include_weights": self.include_weights.text(), + "exclude_weights": self.exclude_weights.text(), + "use_output_tensor_type": self.use_output_tensor_type.isChecked(), + "output_tensor_type": self.output_tensor_type.currentText(), + "use_token_embedding_type": self.use_token_embedding_type.isChecked(), + "token_embedding_type": self.token_embedding_type.currentText(), + "keep_split": self.keep_split.isChecked(), + "kv_overrides": [entry.get_override_string() for entry in self.kv_override_entries] + } + + file_name, _ = QFileDialog.getSaveFileName(self, SAVE_PRESET, "", JSON_FILES) + if file_name: + with open(file_name, 'w') as f: + json.dump(preset, f, indent=4) + QMessageBox.information(self, PRESET_SAVED, PRESET_SAVED_TO.format(file_name)) + self.logger.info(PRESET_SAVED_TO.format(file_name)) + + def load_preset(self): + self.logger.info(LOADING_PRESET) + file_name, _ = QFileDialog.getOpenFileName(self, LOAD_PRESET, "", JSON_FILES) + if file_name: + try: + with open(file_name, 'r') as f: + preset = json.load(f) + + self.quant_type.setCurrentText(preset.get("quant_type", "")) + self.allow_requantize.setChecked(preset.get("allow_requantize", False)) + self.leave_output_tensor.setChecked(preset.get("leave_output_tensor", False)) + self.pure.setChecked(preset.get("pure", False)) + self.imatrix.setText(preset.get("imatrix", "")) + self.include_weights.setText(preset.get("include_weights", "")) + self.exclude_weights.setText(preset.get("exclude_weights", "")) + self.use_output_tensor_type.setChecked(preset.get("use_output_tensor_type", False)) + self.output_tensor_type.setCurrentText(preset.get("output_tensor_type", "")) + self.use_token_embedding_type.setChecked(preset.get("use_token_embedding_type", False)) + self.token_embedding_type.setCurrentText(preset.get("token_embedding_type", "")) + self.keep_split.setChecked(preset.get("keep_split", False)) + + # Clear existing KV overrides and add new ones + for entry in self.kv_override_entries: + self.remove_kv_override(entry) + for override in preset.get("kv_overrides", []): + self.add_kv_override(override) + + QMessageBox.information(self, PRESET_LOADED, PRESET_LOADED_FROM.format(file_name)) + except Exception as e: + QMessageBox.critical(self, ERROR, FAILED_TO_LOAD_PRESET.format(str(e))) + self.logger.info(PRESET_LOADED_FROM.format(file_name)) + + def add_kv_override(self, override_string=None): + self.logger.debug(ADDING_KV_OVERRIDE.format(override_string)) + entry = KVOverrideEntry() + entry.deleted.connect(self.remove_kv_override) + if override_string: + key, value = override_string.split('=') + type_, val = value.split(':') + entry.key_input.setText(key) + entry.type_combo.setCurrentText(type_) + entry.value_input.setText(val) + self.kv_override_layout.addWidget(entry) + self.kv_override_entries.append(entry) + + def save_task_preset(self, task_item): + self.logger.info(SAVING_TASK_PRESET.format(task_item.task_name)) + for thread in self.quant_threads: + if thread.log_file == task_item.log_file: + preset = { + "command": thread.command, + "backend_path": thread.cwd, + "log_file": thread.log_file + } + file_name, _ = QFileDialog.getSaveFileName(self, SAVE_TASK_PRESET, "", JSON_FILES) + if file_name: + with open(file_name, 'w') as f: + json.dump(preset, f, indent=4) + QMessageBox.information(self, TASK_PRESET_SAVED, TASK_PRESET_SAVED_TO.format(file_name)) + break + + def restart_task(self, task_item): + self.logger.info(RESTARTING_TASK.format(task_item.task_name)) + for thread in self.quant_threads: + if thread.log_file == task_item.log_file: + new_thread = QuantizationThread(thread.command, thread.cwd, thread.log_file) + self.quant_threads.append(new_thread) + new_thread.status_signal.connect(task_item.update_status) + new_thread.finished_signal.connect(lambda: self.task_finished(new_thread)) + new_thread.error_signal.connect(lambda err: self.handle_error(err, task_item)) + new_thread.model_info_signal.connect(self.update_model_info) + new_thread.start() + task_item.update_status(IN_PROGRESS) + break + + def download_finished(self, extract_dir): + self.logger.info(DOWNLOAD_FINISHED_EXTRACTED_TO.format(extract_dir)) + self.download_button.setEnabled(True) + self.download_progress.setValue(100) + + if self.cuda_extract_checkbox.isChecked() and self.cuda_extract_checkbox.isVisible(): + cuda_backend = self.backend_combo_cuda.currentData() + if cuda_backend and cuda_backend != NO_SUITABLE_CUDA_BACKENDS: + self.extract_cuda_files(extract_dir, cuda_backend) + QMessageBox.information(self, DOWNLOAD_COMPLETE, LLAMACPP_DOWNLOADED_AND_EXTRACTED.format(extract_dir, cuda_backend)) + else: + QMessageBox.warning(self, CUDA_EXTRACTION_FAILED, NO_SUITABLE_CUDA_BACKEND_FOUND) + else: + QMessageBox.information(self, DOWNLOAD_COMPLETE, LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED.format(extract_dir)) + + self.refresh_backends() # Refresh the backends after successful download + self.update_cuda_option() # Update CUDA options in case a CUDA-capable backend was downloaded + + # Select the newly downloaded backend + new_backend_name = os.path.basename(extract_dir) + index = self.backend_combo.findText(new_backend_name) + if index >= 0: + self.backend_combo.setCurrentIndex(index) + + def refresh_releases(self): + self.logger.info(REFRESHING_LLAMACPP_RELEASES) + try: + response = requests.get("https://api.github.com/repos/ggerganov/llama.cpp/releases") + releases = response.json() + self.release_combo.clear() + for release in releases: + self.release_combo.addItem(release['tag_name'], userData=release) + self.release_combo.currentIndexChanged.connect(self.update_assets) + self.update_assets() + except Exception as e: + self.show_error(ERROR_FETCHING_RELEASES.format(str(e))) + + def update_assets(self): + self.logger.debug(UPDATING_ASSET_LIST) + self.asset_combo.clear() + release = self.release_combo.currentData() + if release: + for asset in release['assets']: + self.asset_combo.addItem(asset['name'], userData=asset) + self.update_cuda_option() + + def update_cuda_option(self): + self.logger.debug(UPDATING_CUDA_OPTIONS) + asset = self.asset_combo.currentData() + is_cuda = asset and "cudart" in asset['name'].lower() + self.cuda_extract_checkbox.setVisible(is_cuda) + self.cuda_backend_label.setVisible(is_cuda) + self.backend_combo_cuda.setVisible(is_cuda) + if is_cuda: + self.update_cuda_backends() + + def download_llama_cpp(self): + self.logger.info(STARTING_LLAMACPP_DOWNLOAD) + asset = self.asset_combo.currentData() + if not asset: + self.show_error(NO_ASSET_SELECTED) + return + + llama_bin = os.path.abspath("llama_bin") + if not os.path.exists(llama_bin): + os.makedirs(llama_bin) + + save_path = os.path.join(llama_bin, asset['name']) + + self.download_thread = DownloadThread(asset['browser_download_url'], save_path) + self.download_thread.progress_signal.connect(self.update_download_progress) + self.download_thread.finished_signal.connect(self.download_finished) + self.download_thread.error_signal.connect(self.download_error) + self.download_thread.start() + + self.download_button.setEnabled(False) + self.download_progress.setValue(0) + + def update_cuda_backends(self): + self.logger.debug(UPDATING_CUDA_BACKENDS) + self.backend_combo_cuda.clear() + llama_bin = os.path.abspath("llama_bin") + if os.path.exists(llama_bin): + for item in os.listdir(llama_bin): + item_path = os.path.join(llama_bin, item) + if os.path.isdir(item_path) and "cudart-llama" not in item.lower(): + if "cu1" in item.lower(): # Only include CUDA-capable backends + self.backend_combo_cuda.addItem(item, userData=item_path) + + if self.backend_combo_cuda.count() == 0: + self.backend_combo_cuda.addItem(NO_SUITABLE_CUDA_BACKENDS) + self.backend_combo_cuda.setEnabled(False) + else: + self.backend_combo_cuda.setEnabled(True) + + def update_download_progress(self, progress): + self.download_progress.setValue(progress) + + def download_finished(self, extract_dir): + self.download_button.setEnabled(True) + self.download_progress.setValue(100) + + if self.cuda_extract_checkbox.isChecked() and self.cuda_extract_checkbox.isVisible(): + cuda_backend = self.backend_combo_cuda.currentData() + if cuda_backend: + self.extract_cuda_files(extract_dir, cuda_backend) + QMessageBox.information(self, DOWNLOAD_COMPLETE, LLAMACPP_DOWNLOADED_AND_EXTRACTED.format(extract_dir, cuda_backend)) + else: + QMessageBox.warning(self, CUDA_EXTRACTION_FAILED, NO_CUDA_BACKEND_SELECTED) + else: + QMessageBox.information(self, DOWNLOAD_COMPLETE, LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED.format(extract_dir)) + + self.refresh_backends() + + def extract_cuda_files(self, extract_dir, destination): + self.logger.info(EXTRACTING_CUDA_FILES.format(extract_dir, destination)) + for root, dirs, files in os.walk(extract_dir): + for file in files: + if file.lower().endswith('.dll'): + source_path = os.path.join(root, file) + dest_path = os.path.join(destination, file) + shutil.copy2(source_path, dest_path) + + + def download_error(self, error_message): + self.logger.error(DOWNLOAD_ERROR.format(error_message)) + self.download_button.setEnabled(True) + self.download_progress.setValue(0) + self.show_error(DOWNLOAD_FAILED.format(error_message)) + + # Clean up any partially downloaded files + asset = self.asset_combo.currentData() + if asset: + partial_file = os.path.join(os.path.abspath("llama_bin"), asset['name']) + if os.path.exists(partial_file): + os.remove(partial_file) + + def show_task_context_menu(self, position): + self.logger.debug(SHOWING_TASK_CONTEXT_MENU) + item = self.task_list.itemAt(position) + if item is not None: + context_menu = QMenu(self) + + properties_action = QAction(PROPERTIES, self) + properties_action.triggered.connect(lambda: self.show_task_properties(item)) + context_menu.addAction(properties_action) + + task_item = self.task_list.itemWidget(item) + if task_item.status != COMPLETED: + cancel_action = QAction(CANCEL, self) + cancel_action.triggered.connect(lambda: self.cancel_task(item)) + context_menu.addAction(cancel_action) + + if task_item.status == CANCELED: + restart_action = QAction(RESTART, self) + restart_action.triggered.connect(lambda: self.restart_task(task_item)) + context_menu.addAction(restart_action) + + save_preset_action = QAction(SAVE_PRESET, self) + save_preset_action.triggered.connect(lambda: self.save_task_preset(task_item)) + context_menu.addAction(save_preset_action) + + delete_action = QAction(DELETE, self) + delete_action.triggered.connect(lambda: self.delete_task(item)) + context_menu.addAction(delete_action) + + context_menu.exec(self.task_list.viewport().mapToGlobal(position)) + + def show_task_properties(self, item): + self.logger.debug(SHOWING_PROPERTIES_FOR_TASK.format(item.text())) + task_item = self.task_list.itemWidget(item) + for thread in self.quant_threads: + if thread.log_file == task_item.log_file: + model_info_dialog = ModelInfoDialog(thread.model_info, self) + model_info_dialog.exec() + break + + def cancel_task(self, item): + self.logger.info(CANCELLING_TASK.format(item.text())) + task_item = self.task_list.itemWidget(item) + for thread in self.quant_threads: + if thread.log_file == task_item.log_file: + thread.terminate() + task_item.update_status(CANCELED) + break + + def retry_task(self, item): + task_item = self.task_list.itemWidget(item) + # TODO: Implement the logic to restart the task + pass + + def delete_task(self, item): + self.logger.info(DELETING_TASK.format(item.text())) + reply = QMessageBox.question(self, CONFIRM_DELETION_TITLE, + CONFIRM_DELETION, + QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, + QMessageBox.StandardButton.No) + if reply == QMessageBox.StandardButton.Yes: + row = self.task_list.row(item) + self.task_list.takeItem(row) + # If the task is still running, terminate it + task_item = self.task_list.itemWidget(item) + for thread in self.quant_threads: + if thread.log_file == task_item.log_file: + thread.terminate() + self.quant_threads.remove(thread) + break + + def create_label(self, text, tooltip): + label = QLabel(text) + label.setToolTip(tooltip) + return label + + def load_models(self): + self.logger.info(LOADING_MODELS) + models_dir = self.models_input.text() + ensure_directory(models_dir) + self.model_list.clear() + for file in os.listdir(models_dir): + if file.endswith(".gguf"): + self.model_list.addItem(file) + self.logger.info(LOADED_MODELS.format(self.model_list.count())) + + + def browse_models(self): + self.logger.info(BROWSING_FOR_MODELS_DIRECTORY) + models_path = QFileDialog.getExistingDirectory(self, SELECT_MODELS_DIRECTORY) + if models_path: + self.models_input.setText(os.path.abspath(models_path)) + ensure_directory(models_path) + self.load_models() + + def browse_output(self): + self.logger.info(BROWSING_FOR_OUTPUT_DIRECTORY) + output_path = QFileDialog.getExistingDirectory(self, SELECT_OUTPUT_DIRECTORY) + if output_path: + self.output_input.setText(os.path.abspath(output_path)) + ensure_directory(output_path) + + def browse_logs(self): + self.logger.info(BROWSING_FOR_LOGS_DIRECTORY) + logs_path = QFileDialog.getExistingDirectory(self, SELECT_LOGS_DIRECTORY) + if logs_path: + self.logs_input.setText(os.path.abspath(logs_path)) + ensure_directory(logs_path) + + def browse_imatrix(self): + self.logger.info(BROWSING_FOR_IMATRIX_FILE) + imatrix_file, _ = QFileDialog.getOpenFileName(self, SELECT_IMATRIX_FILE, "", DAT_FILES) + if imatrix_file: + self.imatrix.setText(os.path.abspath(imatrix_file)) + + def update_system_info(self): + ram = psutil.virtual_memory() + cpu = psutil.cpu_percent() + self.ram_bar.setValue(int(ram.percent)) + self.ram_bar.setFormat(RAM_USAGE_FORMAT.format(ram.percent, ram.used // 1024 // 1024, ram.total // 1024 // 1024)) + self.cpu_label.setText(CPU_USAGE_FORMAT.format(cpu)) + + def validate_quantization_inputs(self): + self.logger.debug(VALIDATING_QUANTIZATION_INPUTS) + errors = [] + if not self.backend_combo.currentData(): + errors.append(NO_BACKEND_SELECTED) + if not self.models_input.text(): + errors.append(MODELS_PATH_REQUIRED) + if not self.output_input.text(): + errors.append(OUTPUT_PATH_REQUIRED) + if not self.logs_input.text(): + errors.append(LOGS_PATH_REQUIRED) + if not self.model_list.currentItem(): + errors.append(NO_MODEL_SELECTED) + + if errors: + raise ValueError("\n".join(errors)) + + def add_kv_override(self): + entry = KVOverrideEntry() + entry.deleted.connect(self.remove_kv_override) + self.kv_override_layout.addWidget(entry) + self.kv_override_entries.append(entry) + + def remove_kv_override(self, entry): + self.kv_override_layout.removeWidget(entry) + self.kv_override_entries.remove(entry) + entry.deleteLater() + + def quantize_model(self): + self.logger.info(STARTING_MODEL_QUANTIZATION) + try: + self.validate_quantization_inputs() + selected_model = self.model_list.currentItem() + if not selected_model: + raise ValueError(NO_MODEL_SELECTED) + + model_name = selected_model.text() + backend_path = self.backend_combo.currentData() + if not backend_path: + raise ValueError(NO_BACKEND_SELECTED) + quant_type = self.quant_type.currentText() + + input_path = os.path.join(self.models_input.text(), model_name) + output_name = f"{os.path.splitext(model_name)[0]}_{quant_type}.gguf" + output_path = os.path.join(self.output_input.text(), output_name) + + if not os.path.exists(input_path): + raise FileNotFoundError(INPUT_FILE_NOT_EXIST.format(input_path)) + + command = [os.path.join(backend_path, "llama-quantize")] + + if self.allow_requantize.isChecked(): + command.append("--allow-requantize") + if self.leave_output_tensor.isChecked(): + command.append("--leave-output-tensor") + if self.pure.isChecked(): + command.append("--pure") + if self.imatrix.text(): + command.extend(["--imatrix", self.imatrix.text()]) + if self.include_weights.text(): + command.extend(["--include-weights", self.include_weights.text()]) + if self.exclude_weights.text(): + command.extend(["--exclude-weights", self.exclude_weights.text()]) + if self.use_output_tensor_type.isChecked(): + command.extend(["--output-tensor-type", self.output_tensor_type.currentText()]) + if self.use_token_embedding_type.isChecked(): + command.extend(["--token-embedding-type", self.token_embedding_type.currentText()]) + if self.keep_split.isChecked(): + command.append("--keep-split") + if self.override_kv.text(): + for entry in self.kv_override_entries: + override_string = entry.get_override_string() + if override_string: + command.extend(["--override-kv", override_string]) + + command.extend([input_path, output_path, quant_type]) + + logs_path = self.logs_input.text() + ensure_directory(logs_path) + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + log_file = os.path.join(logs_path, f"{model_name}_{timestamp}_{quant_type}.log") + + thread = QuantizationThread(command, backend_path, log_file) + self.quant_threads.append(thread) + + task_item = TaskListItem(QUANTIZING_MODEL_TO.format(model_name, quant_type), log_file) + list_item = QListWidgetItem(self.task_list) + list_item.setSizeHint(task_item.sizeHint()) + self.task_list.addItem(list_item) + self.task_list.setItemWidget(list_item, task_item) + + thread.status_signal.connect(task_item.update_status) + thread.finished_signal.connect(lambda: self.task_finished(thread)) + thread.error_signal.connect(lambda err: self.handle_error(err, task_item)) + thread.model_info_signal.connect(self.update_model_info) + thread.start() + self.logger.info(QUANTIZATION_TASK_STARTED.format(model_name)) + except ValueError as e: + self.show_error(str(e)) + except Exception as e: + self.show_error(ERROR_STARTING_QUANTIZATION.format(str(e))) + + def update_model_info(self, model_info): + self.logger.debug(UPDATING_MODEL_INFO.format(model_info)) + # TODO: Do something with this + pass + + def task_finished(self, thread): + self.logger.info(TASK_FINISHED.format(thread.log_file)) + if thread in self.quant_threads: + self.quant_threads.remove(thread) + + def show_task_details(self, item): + self.logger.debug(SHOWING_TASK_DETAILS_FOR.format(item.text())) + task_item = self.task_list.itemWidget(item) + if task_item: + log_dialog = QDialog(self) + log_dialog.setWindowTitle(LOG_FOR.format(task_item.task_name)) + log_dialog.setGeometry(200, 200, 800, 600) + + log_text = QPlainTextEdit() + log_text.setReadOnly(True) + + layout = QVBoxLayout() + layout.addWidget(log_text) + log_dialog.setLayout(layout) + + # Load existing content + if os.path.exists(task_item.log_file): + with open_file_safe(task_item.log_file, 'r') as f: + log_text.setPlainText(f.read()) + + # Connect to the thread if it's still running + for thread in self.quant_threads: + if thread.log_file == task_item.log_file: + thread.output_signal.connect(log_text.appendPlainText) + break + + log_dialog.exec() + + def browse_imatrix_datafile(self): + self.logger.info(BROWSING_FOR_IMATRIX_DATA_FILE) + datafile, _ = QFileDialog.getOpenFileName(self, SELECT_DATA_FILE, "", ALL_FILES) + if datafile: + self.imatrix_datafile.setText(os.path.abspath(datafile)) + + def browse_imatrix_model(self): + self.logger.info(BROWSING_FOR_IMATRIX_MODEL_FILE) + model_file, _ = QFileDialog.getOpenFileName(self, SELECT_MODEL_FILE, "", GGUF_FILES) + if model_file: + self.imatrix_model.setText(os.path.abspath(model_file)) + + def browse_imatrix_output(self): + self.logger.info(BROWSING_FOR_IMATRIX_OUTPUT_FILE) + output_file, _ = QFileDialog.getSaveFileName(self, SELECT_OUTPUT_FILE, "", DAT_FILES) + if output_file: + self.imatrix_output.setText(os.path.abspath(output_file)) + + def update_gpu_offload_spinbox(self, value): + self.gpu_offload_spinbox.setValue(value) + + def update_gpu_offload_slider(self, value): + self.gpu_offload_slider.setValue(value) + + def toggle_gpu_offload_auto(self, state): + is_auto = state == Qt.CheckState.Checked + self.gpu_offload_slider.setEnabled(not is_auto) + self.gpu_offload_spinbox.setEnabled(not is_auto) + + def generate_imatrix(self): + self.logger.info(STARTING_IMATRIX_GENERATION) + try: + backend_path = self.backend_combo.currentData() + if not os.path.exists(backend_path): + raise FileNotFoundError(BACKEND_PATH_NOT_EXIST.format(backend_path)) + + command = [ + os.path.join(backend_path, "llama-imatrix"), + "-f", self.imatrix_datafile.text(), + "-m", self.imatrix_model.text(), + "-o", self.imatrix_output.text(), + "--output-frequency", self.imatrix_frequency.text() + ] + + if self.gpu_offload_auto.isChecked(): + command.extend(["-ngl", "99"]) + elif self.gpu_offload_spinbox.value() > 0: + command.extend(["-ngl", str(self.gpu_offload_spinbox.value())]) + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + log_file = os.path.join(self.logs_input.text(), f"imatrix_{timestamp}.log") + + thread = QuantizationThread(command, backend_path, log_file) + self.quant_threads.append(thread) + + task_item = TaskListItem(GENERATING_IMATRIX, log_file) + list_item = QListWidgetItem(self.task_list) + list_item.setSizeHint(task_item.sizeHint()) + self.task_list.addItem(list_item) + self.task_list.setItemWidget(list_item, task_item) + + thread.status_signal.connect(task_item.update_status) + thread.finished_signal.connect(lambda: self.task_finished(thread)) + thread.error_signal.connect(lambda err: self.handle_error(err, task_item)) + thread.start() + except Exception as e: + self.show_error(ERROR_STARTING_IMATRIX_GENERATION.format(str(e))) + self.logger.info(IMATRIX_GENERATION_TASK_STARTED) + + def show_error(self, message): + self.logger.error(ERROR_MESSAGE.format(message)) + QMessageBox.critical(self, ERROR, message) + + def handle_error(self, error_message, task_item): + self.logger.error(TASK_ERROR.format(error_message)) + self.show_error(error_message) + task_item.set_error() + + def closeEvent(self, event: QCloseEvent): + self.logger.info(APPLICATION_CLOSING) + if self.quant_threads: + reply = QMessageBox.question(self, WARNING, + TASK_RUNNING_WARNING, + QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, + QMessageBox.StandardButton.No) + + if reply == QMessageBox.StandardButton.Yes: + for thread in self.quant_threads: + thread.terminate() + event.accept() + else: + event.ignore() + else: + event.accept() + self.logger.info(APPLICATION_CLOSED) + +if __name__ == "__main__": + app = QApplication(sys.argv) + window = AutoGGUF() + window.show() + sys.exit(app.exec()) diff --git a/DownloadThread.py b/DownloadThread.py new file mode 100644 index 0000000..fa9e8ed --- /dev/null +++ b/DownloadThread.py @@ -0,0 +1,54 @@ +from PyQt6.QtWidgets import * +from PyQt6.QtCore import * +from PyQt6.QtGui import * +import os +import sys +import psutil +import subprocess +import time +import signal +import json +import platform +import requests +import zipfile +from datetime import datetime + +class DownloadThread(QThread): + progress_signal = pyqtSignal(int) + finished_signal = pyqtSignal(str) + error_signal = pyqtSignal(str) + + def __init__(self, url, save_path): + super().__init__() + self.url = url + self.save_path = save_path + + def run(self): + try: + response = requests.get(self.url, stream=True) + response.raise_for_status() + total_size = int(response.headers.get('content-length', 0)) + block_size = 8192 + downloaded = 0 + + with open(self.save_path, 'wb') as file: + for data in response.iter_content(block_size): + size = file.write(data) + downloaded += size + if total_size: + progress = int((downloaded / total_size) * 100) + self.progress_signal.emit(progress) + + # Extract the downloaded zip file + extract_dir = os.path.splitext(self.save_path)[0] + with zipfile.ZipFile(self.save_path, 'r') as zip_ref: + zip_ref.extractall(extract_dir) + + # Remove the zip file after extraction + os.remove(self.save_path) + + self.finished_signal.emit(extract_dir) + except Exception as e: + self.error_signal.emit(str(e)) + if os.path.exists(self.save_path): + os.remove(self.save_path) diff --git a/KVOverrideEntry.py b/KVOverrideEntry.py new file mode 100644 index 0000000..36a1dd7 --- /dev/null +++ b/KVOverrideEntry.py @@ -0,0 +1,33 @@ +from PyQt6.QtWidgets import QWidget, QHBoxLayout, QLineEdit, QComboBox, QPushButton +from PyQt6.QtCore import pyqtSignal + +class KVOverrideEntry(QWidget): + deleted = pyqtSignal(QWidget) + + def __init__(self, parent=None): + super().__init__(parent) + layout = QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + self.key_input = QLineEdit() + self.key_input.setPlaceholderText("Key") + layout.addWidget(self.key_input) + + self.type_combo = QComboBox() + self.type_combo.addItems(["int", "str", "float"]) + layout.addWidget(self.type_combo) + + self.value_input = QLineEdit() + self.value_input.setPlaceholderText("Value") + layout.addWidget(self.value_input) + + delete_button = QPushButton("X") + delete_button.setFixedSize(30, 30) + delete_button.clicked.connect(self.delete_clicked) + layout.addWidget(delete_button) + + def delete_clicked(self): + self.deleted.emit(self) + + def get_override_string(self): + return f"{self.key_input.text()}={self.type_combo.currentText()}:{self.value_input.text()}" diff --git a/Logger.py b/Logger.py new file mode 100644 index 0000000..aaa5bb9 --- /dev/null +++ b/Logger.py @@ -0,0 +1,46 @@ +import logging +from logging.handlers import RotatingFileHandler +import os +import sys +from datetime import datetime + +class Logger: + def __init__(self, name, log_dir): + self.logger = logging.getLogger(name) + self.logger.setLevel(logging.DEBUG) + + # Create logs directory if it doesn't exist + os.makedirs(log_dir, exist_ok=True) + + # Console handler + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.INFO) + console_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + console_handler.setFormatter(console_format) + + # File handler + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + log_file = os.path.join(log_dir, f"latest_{timestamp}.log") + file_handler = RotatingFileHandler(log_file, maxBytes=10*1024*1024, backupCount=5, encoding='utf-8') + file_handler.setLevel(logging.DEBUG) + file_format = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s') + file_handler.setFormatter(file_format) + + # Add handlers to logger + self.logger.addHandler(console_handler) + self.logger.addHandler(file_handler) + + def debug(self, message): + self.logger.debug(message) + + def info(self, message): + self.logger.info(message) + + def warning(self, message): + self.logger.warning(message) + + def error(self, message): + self.logger.error(message) + + def critical(self, message): + self.logger.critical(message) \ No newline at end of file diff --git a/ModelInfoDialog.py b/ModelInfoDialog.py new file mode 100644 index 0000000..1f46897 --- /dev/null +++ b/ModelInfoDialog.py @@ -0,0 +1,48 @@ +from PyQt6.QtWidgets import * +from PyQt6.QtCore import * +from PyQt6.QtGui import * +import os +import sys +import psutil +import subprocess +import time +import signal +import json +import platform +import requests +import zipfile +from datetime import datetime + +class ModelInfoDialog(QDialog): + def __init__(self, model_info, parent=None): + super().__init__(parent) + self.setWindowTitle("Model Information") + self.setGeometry(200, 200, 600, 400) + + layout = QVBoxLayout() + + info_text = QTextEdit() + info_text.setReadOnly(True) + info_text.setHtml(self.format_model_info(model_info)) + + layout.addWidget(info_text) + + close_button = QPushButton("Close") + close_button.clicked.connect(self.accept) + layout.addWidget(close_button) + + self.setLayout(layout) + + def format_model_info(self, model_info): + html = "

Model Information

" + html += f"

Architecture: {model_info.get('architecture', 'N/A')}

" + html += f"

Quantization Type: {model_info.get('quantization_type', 'N/A')}

" + html += f"

KV Pairs: {model_info.get('kv_pairs', 'N/A')}

" + html += f"

Tensors: {model_info.get('tensors', 'N/A')}

" + + html += "

Key-Value Pairs:

" + for key, value in model_info.get('kv_data', {}).items(): + html += f"

{key}: {value}

" + + return html + diff --git a/QuantizationThread.py b/QuantizationThread.py new file mode 100644 index 0000000..5c2d5df --- /dev/null +++ b/QuantizationThread.py @@ -0,0 +1,79 @@ +from PyQt6.QtWidgets import * +from PyQt6.QtCore import * +from PyQt6.QtGui import * +import os +import sys +import psutil +import subprocess +import time +import signal +import json +import platform +import requests +import zipfile +import traceback +from datetime import datetime +from imports_and_globals import open_file_safe + +class QuantizationThread(QThread): + output_signal = pyqtSignal(str) + status_signal = pyqtSignal(str) + finished_signal = pyqtSignal() + error_signal = pyqtSignal(str) + model_info_signal = pyqtSignal(dict) + + def __init__(self, command, cwd, log_file): + super().__init__() + self.command = command + self.cwd = cwd + self.log_file = log_file + self.process = None + self.model_info = {} + + def run(self): + try: + self.process = subprocess.Popen(self.command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + text=True, cwd=self.cwd) + with open_file_safe(self.log_file, 'w') as log: + for line in self.process.stdout: + line = line.strip() + self.output_signal.emit(line) + log.write(line + '\n') + log.flush() + self.status_signal.emit("In Progress") + self.parse_model_info(line) + self.process.wait() + if self.process.returncode == 0: + self.status_signal.emit("Completed") + self.model_info_signal.emit(self.model_info) + else: + self.error_signal.emit(f"Process exited with code {self.process.returncode}") + self.finished_signal.emit() + except Exception as e: + self.error_signal.emit(str(e)) + + def parse_model_info(self, line): + if "llama_model_loader: loaded meta data with" in line: + parts = line.split() + self.model_info['kv_pairs'] = parts[6] + self.model_info['tensors'] = parts[9] + elif "general.architecture" in line: + self.model_info['architecture'] = line.split('=')[-1].strip() + elif line.startswith("llama_model_loader: - kv"): + key = line.split(':')[2].strip() + value = line.split('=')[-1].strip() + self.model_info.setdefault('kv_data', {})[key] = value + elif line.startswith("llama_model_loader: - type"): + parts = line.split(':') + if len(parts) > 1: + quant_type = parts[1].strip() + tensors = parts[2].strip().split()[0] + self.model_info.setdefault('quantization_type', []).append(f"{quant_type}: {tensors} tensors") + + def terminate(self): + if self.process: + os.kill(self.process.pid, signal.SIGTERM) + self.process.wait(timeout=5) + if self.process.poll() is None: + os.kill(self.process.pid, signal.SIGKILL) + diff --git a/TaskListItem.py b/TaskListItem.py new file mode 100644 index 0000000..b521716 --- /dev/null +++ b/TaskListItem.py @@ -0,0 +1,57 @@ +from PyQt6.QtWidgets import * +from PyQt6.QtCore import * +from PyQt6.QtGui import * +import os +import sys +import psutil +import subprocess +import time +import signal +import json +import platform +import requests +import zipfile +from datetime import datetime + +class TaskListItem(QWidget): + def __init__(self, task_name, log_file, parent=None): + super().__init__(parent) + self.task_name = task_name + self.log_file = log_file + self.status = "Pending" + layout = QHBoxLayout(self) + self.task_label = QLabel(task_name) + self.progress_bar = QProgressBar() + self.progress_bar.setRange(0, 100) + self.status_label = QLabel(self.status) + layout.addWidget(self.task_label) + layout.addWidget(self.progress_bar) + layout.addWidget(self.status_label) + self.progress_timer = QTimer(self) + self.progress_timer.timeout.connect(self.update_progress) + self.progress_value = 0 + + def update_status(self, status): + self.status = status + self.status_label.setText(status) + if status == "In Progress": + self.progress_bar.setRange(0, 100) + self.progress_timer.start(100) + elif status == "Completed": + self.progress_timer.stop() + self.progress_bar.setValue(100) + elif status == "Canceled": + self.progress_timer.stop() + self.progress_bar.setValue(0) + + def set_error(self): + self.status = "Error" + self.status_label.setText("Error") + self.status_label.setStyleSheet("color: red;") + self.progress_bar.setRange(0, 100) + self.progress_timer.stop() + + def update_progress(self): + self.progress_value = (self.progress_value + 1) % 101 + self.progress_bar.setValue(self.progress_value) + diff --git a/imports_and_globals.py b/imports_and_globals.py new file mode 100644 index 0000000..d54328e --- /dev/null +++ b/imports_and_globals.py @@ -0,0 +1,30 @@ +import os +import sys +import psutil +import subprocess +import time +import signal +import json +import platform +import requests +import zipfile +from datetime import datetime +from PyQt6.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QWidget, QPushButton, + QListWidget, QLineEdit, QLabel, QFileDialog, QProgressBar, QComboBox, QTextEdit, + QCheckBox, QGroupBox, QFormLayout, QScrollArea, QSlider, QSpinBox, QListWidgetItem, + QMessageBox, QDialog, QPlainTextEdit, QMenu) +from PyQt6.QtCore import QTimer, QThread, pyqtSignal, Qt, QSize +from PyQt6.QtGui import QCloseEvent, QAction + +def ensure_directory(path): + if not os.path.exists(path): + os.makedirs(path) + +def open_file_safe(file_path, mode='r'): + encodings = ['utf-8', 'latin-1', 'ascii', 'utf-16'] + for encoding in encodings: + try: + return open(file_path, mode, encoding=encoding) + except UnicodeDecodeError: + continue + raise ValueError(f"Unable to open file {file_path} with any of the encodings: {encodings}") \ No newline at end of file diff --git a/localizations.py b/localizations.py new file mode 100644 index 0000000..98a4180 --- /dev/null +++ b/localizations.py @@ -0,0 +1,4876 @@ +import os + +class _Localization: + def __init__(self): + self.WINDOW_TITLE = "" + self.RAM_USAGE = "" + self.CPU_USAGE = "" + self.BACKEND = "" + self.REFRESH_BACKENDS = "" + self.MODELS_PATH = "" + self.OUTPUT_PATH = "" + self.LOGS_PATH = "" + self.BROWSE = "" + self.AVAILABLE_MODELS = "" + self.QUANTIZATION_TYPE = "" + self.ALLOW_REQUANTIZE = "" + self.LEAVE_OUTPUT_TENSOR = "" + self.PURE = "" + self.IMATRIX = "" + self.INCLUDE_WEIGHTS = "" + self.EXCLUDE_WEIGHTS = "" + self.USE_OUTPUT_TENSOR_TYPE = "" + self.USE_TOKEN_EMBEDDING_TYPE = "" + self.KEEP_SPLIT = "" + self.KV_OVERRIDES = "" + self.ADD_NEW_OVERRIDE = "" + self.QUANTIZE_MODEL = "" + self.SAVE_PRESET = "" + self.LOAD_PRESET = "" + self.TASKS = "" + self.DOWNLOAD_LLAMACPP = "" + self.SELECT_RELEASE = "" + self.SELECT_ASSET = "" + self.EXTRACT_CUDA_FILES = "" + self.SELECT_CUDA_BACKEND = "" + self.DOWNLOAD = "" + self.IMATRIX_GENERATION = "" + self.DATA_FILE = "" + self.MODEL = "" + self.OUTPUT = "" + self.OUTPUT_FREQUENCY = "" + self.GPU_OFFLOAD = "" + self.AUTO = "" + self.GENERATE_IMATRIX = "" + self.ERROR = "" + self.WARNING = "" + self.PROPERTIES = "" + self.CANCEL = "" + self.RESTART = "" + self.DELETE = "" + self.CONFIRM_DELETION = "" + self.TASK_RUNNING_WARNING = "" + self.YES = "" + self.NO = "" + self.DOWNLOAD_COMPLETE = "" + self.CUDA_EXTRACTION_FAILED = "" + self.PRESET_SAVED = "" + self.PRESET_LOADED = "" + self.NO_ASSET_SELECTED = "" + self.DOWNLOAD_FAILED = "" + self.NO_BACKEND_SELECTED = "" + self.NO_MODEL_SELECTED = "" + self.REFRESH_RELEASES = "" + self.NO_SUITABLE_CUDA_BACKENDS = "" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "" + self.CUDA_FILES_EXTRACTED = "" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "" + self.ERROR_FETCHING_RELEASES = "" + self.CONFIRM_DELETION_TITLE = "" + self.LOG_FOR = "" + self.ALL_FILES = "" + self.GGUF_FILES = "" + self.DAT_FILES = "" + self.JSON_FILES = "" + self.FAILED_LOAD_PRESET = "" + self.INITIALIZING_AUTOGGUF = "" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "" + self.REFRESHING_BACKENDS = "" + self.NO_BACKENDS_AVAILABLE = "" + self.FOUND_VALID_BACKENDS = "" + self.SAVING_PRESET = "" + self.PRESET_SAVED_TO = "" + self.LOADING_PRESET = "" + self.PRESET_LOADED_FROM = "" + self.ADDING_KV_OVERRIDE = "" + self.SAVING_TASK_PRESET = "" + self.TASK_PRESET_SAVED = "" + self.TASK_PRESET_SAVED_TO = "" + self.RESTARTING_TASK = "" + self.IN_PROGRESS = "" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "" + self.REFRESHING_LLAMACPP_RELEASES = "" + self.UPDATING_ASSET_LIST = "" + self.UPDATING_CUDA_OPTIONS = "" + self.STARTING_LLAMACPP_DOWNLOAD = "" + self.UPDATING_CUDA_BACKENDS = "" + self.NO_CUDA_BACKEND_SELECTED = "" + self.EXTRACTING_CUDA_FILES = "" + self.DOWNLOAD_ERROR = "" + self.SHOWING_TASK_CONTEXT_MENU = "" + self.SHOWING_PROPERTIES_FOR_TASK = "" + self.CANCELLING_TASK = "" + self.CANCELED = "" + self.DELETING_TASK = "" + self.LOADING_MODELS = "" + self.LOADED_MODELS = "" + self.BROWSING_FOR_MODELS_DIRECTORY = "" + self.SELECT_MODELS_DIRECTORY = "" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "" + self.SELECT_OUTPUT_DIRECTORY = "" + self.BROWSING_FOR_LOGS_DIRECTORY = "" + self.SELECT_LOGS_DIRECTORY = "" + self.BROWSING_FOR_IMATRIX_FILE = "" + self.SELECT_IMATRIX_FILE = "" + self.RAM_USAGE_FORMAT = "" + self.CPU_USAGE_FORMAT = "" + self.VALIDATING_QUANTIZATION_INPUTS = "" + self.MODELS_PATH_REQUIRED = "" + self.OUTPUT_PATH_REQUIRED = "" + self.LOGS_PATH_REQUIRED = "" + self.STARTING_MODEL_QUANTIZATION = "" + self.INPUT_FILE_NOT_EXIST = "" + self.QUANTIZING_MODEL_TO = "" + self.QUANTIZATION_TASK_STARTED = "" + self.ERROR_STARTING_QUANTIZATION = "" + self.UPDATING_MODEL_INFO = "" + self.TASK_FINISHED = "" + self.SHOWING_TASK_DETAILS_FOR = "" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "" + self.SELECT_DATA_FILE = "" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "" + self.SELECT_MODEL_FILE = "" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "" + self.SELECT_OUTPUT_FILE = "" + self.STARTING_IMATRIX_GENERATION = "" + self.BACKEND_PATH_NOT_EXIST = "" + self.GENERATING_IMATRIX = "" + self.ERROR_STARTING_IMATRIX_GENERATION = "" + self.IMATRIX_GENERATION_TASK_STARTED = "" + self.ERROR_MESSAGE = "" + self.TASK_ERROR = "" + self.APPLICATION_CLOSING = "" + self.APPLICATION_CLOSED = "" + self.SELECT_QUANTIZATION_TYPE = "" + self.ALLOWS_REQUANTIZING = "" + self.LEAVE_OUTPUT_WEIGHT = "" + self.DISABLE_K_QUANT_MIXTURES = "" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "" + self.OUTPUT_TENSOR_TYPE = "" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "" + self.TOKEN_EMBEDDING_TYPE = "" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "" + self.OVERRIDE_MODEL_METADATA = "" + self.INPUT_DATA_FILE_FOR_IMATRIX = "" + self.MODEL_TO_BE_QUANTIZED = "" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "" + self.SET_GPU_OFFLOAD_VALUE = "" + self.COMPLETED = "" + +class _English(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)" + self.RAM_USAGE = "RAM Usage:" + self.CPU_USAGE = "CPU Usage:" + self.BACKEND = "Llama.cpp Backend:" + self.REFRESH_BACKENDS = "Refresh Backends" + self.MODELS_PATH = "Models Path:" + self.OUTPUT_PATH = "Output Path:" + self.LOGS_PATH = "Logs Path:" + self.BROWSE = "Browse" + self.AVAILABLE_MODELS = "Available Models:" + self.QUANTIZATION_TYPE = "Quantization Type:" + self.ALLOW_REQUANTIZE = "Allow Requantize" + self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor" + self.PURE = "Pure" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Include Weights:" + self.EXCLUDE_WEIGHTS = "Exclude Weights:" + self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type" + self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type" + self.KEEP_SPLIT = "Keep Split" + self.KV_OVERRIDES = "KV Overrides:" + self.ADD_NEW_OVERRIDE = "Add new override" + self.QUANTIZE_MODEL = "Quantize Model" + self.SAVE_PRESET = "Save Preset" + self.LOAD_PRESET = "Load Preset" + self.TASKS = "Tasks:" + self.DOWNLOAD_LLAMACPP = "Download llama.cpp" + self.SELECT_RELEASE = "Select Release:" + self.SELECT_ASSET = "Select Asset:" + self.EXTRACT_CUDA_FILES = "Extract CUDA files" + self.SELECT_CUDA_BACKEND = "Select CUDA Backend:" + self.DOWNLOAD = "Download" + self.IMATRIX_GENERATION = "IMatrix Generation" + self.DATA_FILE = "Data File:" + self.MODEL = "Model:" + self.OUTPUT = "Output:" + self.OUTPUT_FREQUENCY = "Output Frequency:" + self.GPU_OFFLOAD = "GPU Offload:" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Generate IMatrix" + self.ERROR = "Error" + self.WARNING = "Warning" + self.PROPERTIES = "Properties" + self.CANCEL = "Cancel" + self.RESTART = "Restart" + self.DELETE = "Delete" + self.CONFIRM_DELETION = "Are you sure you want to delete this task?" + self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?" + self.YES = "Yes" + self.NO = "No" + self.DOWNLOAD_COMPLETE = "Download Complete" + self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed" + self.PRESET_SAVED = "Preset Saved" + self.PRESET_LOADED = "Preset Loaded" + self.NO_ASSET_SELECTED = "No asset selected" + self.DOWNLOAD_FAILED = "Download failed" + self.NO_BACKEND_SELECTED = "No backend selected" + self.NO_MODEL_SELECTED = "No model selected" + self.REFRESH_RELEASES = "Refresh Releases" + self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.CUDA_FILES_EXTRACTED = "CUDA files extracted to" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction" + self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}" + self.CONFIRM_DELETION_TITLE = "Confirm Deletion" + self.LOG_FOR = "Log for {0}" + self.ALL_FILES = "All Files (*)" + self.GGUF_FILES = "GGUF Files (*.gguf)" + self.DAT_FILES = "DAT Files (*.dat)" + self.JSON_FILES = "JSON Files (*.json)" + self.FAILED_LOAD_PRESET = "Failed to load preset: {0}" + self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete" + self.REFRESHING_BACKENDS = "Refreshing backends" + self.NO_BACKENDS_AVAILABLE = "No backends available" + self.FOUND_VALID_BACKENDS = "Found {0} valid backends" + self.SAVING_PRESET = "Saving preset" + self.PRESET_SAVED_TO = "Preset saved to {0}" + self.LOADING_PRESET = "Loading preset" + self.PRESET_LOADED_FROM = "Preset loaded from {0}" + self.ADDING_KV_OVERRIDE = "Adding KV override: {0}" + self.SAVING_TASK_PRESET = "Saving task preset for {0}" + self.TASK_PRESET_SAVED = "Task Preset Saved" + self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}" + self.RESTARTING_TASK = "Restarting task: {0}" + self.IN_PROGRESS = "In Progress" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases" + self.UPDATING_ASSET_LIST = "Updating asset list" + self.UPDATING_CUDA_OPTIONS = "Updating CUDA options" + self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download" + self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends" + self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction" + self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}" + self.DOWNLOAD_ERROR = "Download error: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu" + self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}" + self.CANCELLING_TASK = "Cancelling task: {0}" + self.CANCELED = "Canceled" + self.DELETING_TASK = "Deleting task: {0}" + self.LOADING_MODELS = "Loading models" + self.LOADED_MODELS = "Loaded {0} models" + self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory" + self.SELECT_MODELS_DIRECTORY = "Select Models Directory" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory" + self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory" + self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory" + self.SELECT_LOGS_DIRECTORY = "Select Logs Directory" + self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file" + self.SELECT_IMATRIX_FILE = "Select IMatrix File" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs" + self.MODELS_PATH_REQUIRED = "Models path is required" + self.OUTPUT_PATH_REQUIRED = "Output path is required" + self.LOGS_PATH_REQUIRED = "Logs path is required" + self.STARTING_MODEL_QUANTIZATION = "Starting model quantization" + self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist." + self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}" + self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}" + self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}" + self.UPDATING_MODEL_INFO = "Updating model info: {0}" + self.TASK_FINISHED = "Task finished: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file" + self.SELECT_DATA_FILE = "Select Data File" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file" + self.SELECT_MODEL_FILE = "Select Model File" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file" + self.SELECT_OUTPUT_FILE = "Select Output File" + self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation" + self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}" + self.GENERATING_IMATRIX = "Generating IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started" + self.ERROR_MESSAGE = "Error: {0}" + self.TASK_ERROR = "Task error: {0}" + self.APPLICATION_CLOSING = "Application closing" + self.APPLICATION_CLOSED = "Application closed" + self.SELECT_QUANTIZATION_TYPE = "Select the quantization type" + self.ALLOWS_REQUANTIZING = "Allows requantizing tensors that have already been quantized" + self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized" + self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantize all tensors to the same type" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimizations" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors" + self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor" + self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantized model in the same shards as input" + self.OVERRIDE_MODEL_METADATA = "Override model metadata" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation" + self.MODEL_TO_BE_QUANTIZED = "Model to be quantized" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)" + self.COMPLETED = "Completed" + +class _French: + # French localization + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (quantificateur automatisé de modèles GGUF)" + self.RAM_USAGE = "Utilisation RAM :" + self.CPU_USAGE = "Utilisation CPU :" + self.BACKEND = "Backend Llama.cpp :" + self.REFRESH_BACKENDS = "Actualiser les Backends" + self.MODELS_PATH = "Chemin des Modèles :" + self.OUTPUT_PATH = "Chemin de Sortie :" + self.LOGS_PATH = "Chemin des Logs :" + self.BROWSE = "Parcourir" + self.AVAILABLE_MODELS = "Modèles Disponibles :" + self.QUANTIZATION_TYPE = "Type de Quantification :" + self.ALLOW_REQUANTIZE = "Autoriser la Requantification" + self.LEAVE_OUTPUT_TENSOR = "Laisser le Tenseur de Sortie" + self.PURE = "Pur" + self.IMATRIX = "IMatrix :" + self.INCLUDE_WEIGHTS = "Inclure les Poids :" + self.EXCLUDE_WEIGHTS = "Exclure les Poids :" + self.USE_OUTPUT_TENSOR_TYPE = "Utiliser le Type de Tenseur de Sortie" + self.USE_TOKEN_EMBEDDING_TYPE = "Utiliser le Type d'Embedding de Token" + self.KEEP_SPLIT = "Garder la Division" + self.KV_OVERRIDES = "Remplacements KV :" + self.ADD_NEW_OVERRIDE = "Ajouter un nouveau remplacement" + self.QUANTIZE_MODEL = "Quantifier le Modèle" + self.SAVE_PRESET = "Sauvegarder le Préréglage" + self.LOAD_PRESET = "Charger le Préréglage" + self.TASKS = "Tâches :" + self.DOWNLOAD_LLAMACPP = "Télécharger llama.cpp" + self.SELECT_RELEASE = "Sélectionner la Version :" + self.SELECT_ASSET = "Sélectionner l'Asset :" + self.EXTRACT_CUDA_FILES = "Extraire les fichiers CUDA" + self.SELECT_CUDA_BACKEND = "Sélectionner le Backend CUDA :" + self.DOWNLOAD = "Télécharger" + self.IMATRIX_GENERATION = "Génération IMatrix" + self.DATA_FILE = "Fichier de Données :" + self.MODEL = "Modèle :" + self.OUTPUT = "Sortie :" + self.OUTPUT_FREQUENCY = "Fréquence de Sortie :" + self.GPU_OFFLOAD = "Déchargement GPU :" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Générer IMatrix" + self.ERROR = "Erreur" + self.WARNING = "Avertissement" + self.PROPERTIES = "Propriétés" + self.CANCEL = "Annuler" + self.RESTART = "Redémarrer" + self.DELETE = "Supprimer" + self.CONFIRM_DELETION = "Êtes-vous sûr de vouloir supprimer cette tâche ?" + self.TASK_RUNNING_WARNING = "Certaines tâches sont encore en cours. Êtes-vous sûr de vouloir quitter ?" + self.YES = "Oui" + self.NO = "Non" + self.DOWNLOAD_COMPLETE = "Téléchargement Terminé" + self.CUDA_EXTRACTION_FAILED = "Échec de l'Extraction CUDA" + self.PRESET_SAVED = "Préréglage Sauvegardé" + self.PRESET_LOADED = "Préréglage Chargé" + self.NO_ASSET_SELECTED = "Aucun asset sélectionné" + self.DOWNLOAD_FAILED = "Échec du téléchargement" + self.NO_BACKEND_SELECTED = "Aucun backend sélectionné" + self.NO_MODEL_SELECTED = "Aucun modèle sélectionné" + self.REFRESH_RELEASES = "Actualiser les Versions" + self.NO_SUITABLE_CUDA_BACKENDS = "Aucun backend CUDA approprié trouvé" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}\nFichiers CUDA extraits vers {1}" + self.CUDA_FILES_EXTRACTED = "Fichiers CUDA extraits vers" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Aucun backend CUDA approprié trouvé pour l'extraction" + self.ERROR_FETCHING_RELEASES = "Erreur lors de la récupération des versions : {0}" + self.CONFIRM_DELETION_TITLE = "Confirmer la Suppression" + self.LOG_FOR = "Log pour {0}" + self.ALL_FILES = "Tous les Fichiers (*)" + self.GGUF_FILES = "Fichiers GGUF (*.gguf)" + self.DAT_FILES = "Fichiers DAT (*.dat)" + self.JSON_FILES = "Fichiers JSON (*.json)" + self.FAILED_LOAD_PRESET = "Échec du chargement du préréglage : {0}" + self.INITIALIZING_AUTOGGUF = "Initialisation de l'application AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Initialisation d'AutoGGUF terminée" + self.REFRESHING_BACKENDS = "Actualisation des backends" + self.NO_BACKENDS_AVAILABLE = "Aucun backend disponible" + self.FOUND_VALID_BACKENDS = "{0} backends valides trouvés" + self.SAVING_PRESET = "Sauvegarde du préréglage" + self.PRESET_SAVED_TO = "Préréglage sauvegardé dans {0}" + self.LOADING_PRESET = "Chargement du préréglage" + self.PRESET_LOADED_FROM = "Préréglage chargé depuis {0}" + self.ADDING_KV_OVERRIDE = "Ajout du remplacement KV : {0}" + self.SAVING_TASK_PRESET = "Sauvegarde du préréglage de tâche pour {0}" + self.TASK_PRESET_SAVED = "Préréglage de Tâche Sauvegardé" + self.TASK_PRESET_SAVED_TO = "Préréglage de tâche sauvegardé dans {0}" + self.RESTARTING_TASK = "Redémarrage de la tâche : {0}" + self.IN_PROGRESS = "En Cours" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Téléchargement terminé. Extrait vers : {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}\nFichiers CUDA extraits vers {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Aucun backend CUDA approprié trouvé pour l'extraction" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Actualisation des versions de llama.cpp" + self.UPDATING_ASSET_LIST = "Mise à jour de la liste des assets" + self.UPDATING_CUDA_OPTIONS = "Mise à jour des options CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Démarrage du téléchargement de llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Mise à jour des backends CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Aucun backend CUDA sélectionné pour l'extraction" + self.EXTRACTING_CUDA_FILES = "Extraction des fichiers CUDA de {0} vers {1}" + self.DOWNLOAD_ERROR = "Erreur de téléchargement : {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Affichage du menu contextuel de tâche" + self.SHOWING_PROPERTIES_FOR_TASK = "Affichage des propriétés pour la tâche : {0}" + self.CANCELLING_TASK = "Annulation de la tâche : {0}" + self.CANCELED = "Annulé" + self.DELETING_TASK = "Suppression de la tâche : {0}" + self.LOADING_MODELS = "Chargement des modèles" + self.LOADED_MODELS = "{0} modèles chargés" + self.BROWSING_FOR_MODELS_DIRECTORY = "Recherche du répertoire des modèles" + self.SELECT_MODELS_DIRECTORY = "Sélectionner le Répertoire des Modèles" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Recherche du répertoire de sortie" + self.SELECT_OUTPUT_DIRECTORY = "Sélectionner le Répertoire de Sortie" + self.BROWSING_FOR_LOGS_DIRECTORY = "Recherche du répertoire des logs" + self.SELECT_LOGS_DIRECTORY = "Sélectionner le Répertoire des Logs" + self.BROWSING_FOR_IMATRIX_FILE = "Recherche du fichier IMatrix" + self.SELECT_IMATRIX_FILE = "Sélectionner le Fichier IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mo / {2} Mo)" + self.CPU_USAGE_FORMAT = "Utilisation CPU : {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validation des entrées de quantification" + self.MODELS_PATH_REQUIRED = "Le chemin des modèles est requis" + self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis" + self.LOGS_PATH_REQUIRED = "Le chemin des logs est requis" + self.STARTING_MODEL_QUANTIZATION = "Démarrage de la quantification du modèle" + self.INPUT_FILE_NOT_EXIST = "Le fichier d'entrée '{0}' n'existe pas." + self.QUANTIZING_MODEL_TO = "Quantification de {0} vers {1}" + self.QUANTIZATION_TASK_STARTED = "Tâche de quantification démarrée pour {0}" + self.ERROR_STARTING_QUANTIZATION = "Erreur au démarrage de la quantification : {0}" + self.UPDATING_MODEL_INFO = "Mise à jour des infos du modèle : {0}" + self.TASK_FINISHED = "Tâche terminée : {0}" + self.SHOWING_TASK_DETAILS_FOR = "Affichage des détails de la tâche pour : {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Recherche du fichier de données IMatrix" + self.SELECT_DATA_FILE = "Sélectionner le Fichier de Données" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Recherche du fichier modèle IMatrix" + self.SELECT_MODEL_FILE = "Sélectionner le Fichier Modèle" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Recherche du fichier de sortie IMatrix" + self.SELECT_OUTPUT_FILE = "Sélectionner le Fichier de Sortie" + self.STARTING_IMATRIX_GENERATION = "Démarrage de la génération IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Le chemin du backend n'existe pas : {0}" + self.GENERATING_IMATRIX = "Génération de l'IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Erreur au démarrage de la génération IMatrix : {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Tâche de génération IMatrix démarrée" + self.ERROR_MESSAGE = "Erreur : {0}" + self.TASK_ERROR = "Erreur de tâche : {0}" + self.APPLICATION_CLOSING = "Fermeture de l'application" + self.APPLICATION_CLOSED = "Application fermée" + self.SELECT_QUANTIZATION_TYPE = "Sélectionnez le type de quantification" + self.ALLOWS_REQUANTIZING = "Permet de requantifier les tenseurs déjà quantifiés" + self.LEAVE_OUTPUT_WEIGHT = "Laissera output.weight non (re)quantifié" + self.DISABLE_K_QUANT_MIXTURES = "Désactive les mélanges k-quant et quantifie tous les tenseurs au même type" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilise les données du fichier comme matrice d'importance pour les optimisations de quant" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Utiliser la matrice d'importance pour ces tenseurs" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Ne pas utiliser la matrice d'importance pour ces tenseurs" + self.OUTPUT_TENSOR_TYPE = "Type de Tenseur de Sortie :" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Utiliser ce type pour le tenseur output.weight" + self.TOKEN_EMBEDDING_TYPE = "Type d'Embedding de Token :" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Utiliser ce type pour le tenseur des embeddings de token" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Générera le modèle quantifié dans les mêmes shards que l'entrée" + self.OVERRIDE_MODEL_METADATA = "Remplacer les métadonnées du modèle" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Fichier de données d'entrée pour la génération IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Modèle à quantifier" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Chemin de sortie pour l'IMatrix généré" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Fréquence de sauvegarde de l'IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Définir la valeur de déchargement GPU (-ngl)" + self.COMPLETED = "Terminé" + +class _SimplifiedChinese(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF(自动GGUF模型量化器)" + self.RAM_USAGE = "内存使用率:" + self.CPU_USAGE = "CPU使用率:" + self.BACKEND = "Llama.cpp后端:" + self.REFRESH_BACKENDS = "刷新后端" + self.MODELS_PATH = "模型路径:" + self.OUTPUT_PATH = "输出路径:" + self.LOGS_PATH = "日志路径:" + self.BROWSE = "浏览" + self.AVAILABLE_MODELS = "可用模型:" + self.QUANTIZATION_TYPE = "量化类型:" + self.ALLOW_REQUANTIZE = "允许重新量化" + self.LEAVE_OUTPUT_TENSOR = "保留输出张量" + self.PURE = "纯净" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "包含权重:" + self.EXCLUDE_WEIGHTS = "排除权重:" + self.USE_OUTPUT_TENSOR_TYPE = "使用输出张量类型" + self.USE_TOKEN_EMBEDDING_TYPE = "使用令牌嵌入类型" + self.KEEP_SPLIT = "保持分割" + self.KV_OVERRIDES = "KV覆盖:" + self.ADD_NEW_OVERRIDE = "添加新覆盖" + self.QUANTIZE_MODEL = "量化模型" + self.SAVE_PRESET = "保存预设" + self.LOAD_PRESET = "加载预设" + self.TASKS = "任务:" + self.DOWNLOAD_LLAMACPP = "下载llama.cpp" + self.SELECT_RELEASE = "选择发布版本:" + self.SELECT_ASSET = "选择资源:" + self.EXTRACT_CUDA_FILES = "提取CUDA文件" + self.SELECT_CUDA_BACKEND = "选择CUDA后端:" + self.DOWNLOAD = "下载" + self.IMATRIX_GENERATION = "IMatrix生成" + self.DATA_FILE = "数据文件:" + self.MODEL = "模型:" + self.OUTPUT = "输出:" + self.OUTPUT_FREQUENCY = "输出频率:" + self.GPU_OFFLOAD = "GPU卸载:" + self.AUTO = "自动" + self.GENERATE_IMATRIX = "生成IMatrix" + self.ERROR = "错误" + self.WARNING = "警告" + self.PROPERTIES = "属性" + self.CANCEL = "取消" + self.RESTART = "重启" + self.DELETE = "删除" + self.CONFIRM_DELETION = "您确定要删除此任务吗?" + self.TASK_RUNNING_WARNING = "某些任务仍在运行。您确定要退出吗?" + self.YES = "是" + self.NO = "否" + self.DOWNLOAD_COMPLETE = "下载完成" + self.CUDA_EXTRACTION_FAILED = "CUDA提取失败" + self.PRESET_SAVED = "预设已保存" + self.PRESET_LOADED = "预设已加载" + self.NO_ASSET_SELECTED = "未选择资源" + self.DOWNLOAD_FAILED = "下载失败" + self.NO_BACKEND_SELECTED = "未选择后端" + self.NO_MODEL_SELECTED = "未选择模型" + self.REFRESH_RELEASES = "刷新发布版本" + self.NO_SUITABLE_CUDA_BACKENDS = "未找到合适的CUDA后端" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp二进制文件已下载并提取到{0}\nCUDA文件已提取到{1}" + self.CUDA_FILES_EXTRACTED = "CUDA文件已提取到" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "未找到适合提取的CUDA后端" + self.ERROR_FETCHING_RELEASES = "获取发布版本时出错:{0}" + self.CONFIRM_DELETION_TITLE = "确认删除" + self.LOG_FOR = "{0}的日志" + self.ALL_FILES = "所有文件 (*)" + self.GGUF_FILES = "GGUF文件 (*.gguf)" + self.DAT_FILES = "DAT文件 (*.dat)" + self.JSON_FILES = "JSON文件 (*.json)" + self.FAILED_LOAD_PRESET = "加载预设失败:{0}" + self.INITIALIZING_AUTOGGUF = "初始化AutoGGUF应用程序" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF初始化完成" + self.REFRESHING_BACKENDS = "刷新后端" + self.NO_BACKENDS_AVAILABLE = "没有可用的后端" + self.FOUND_VALID_BACKENDS = "找到{0}个有效后端" + self.SAVING_PRESET = "保存预设" + self.PRESET_SAVED_TO = "预设已保存到{0}" + self.LOADING_PRESET = "加载预设" + self.PRESET_LOADED_FROM = "从{0}加载了预设" + self.ADDING_KV_OVERRIDE = "添加KV覆盖:{0}" + self.SAVING_TASK_PRESET = "保存{0}的任务预设" + self.TASK_PRESET_SAVED = "任务预设已保存" + self.TASK_PRESET_SAVED_TO = "任务预设已保存到{0}" + self.RESTARTING_TASK = "重启任务:{0}" + self.IN_PROGRESS = "进行中" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "下载完成。已提取到:{0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp二进制文件已下载并提取到{0}\nCUDA文件已提取到{1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "未找到适合提取的CUDA后端" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp二进制文件已下载并提取到{0}" + self.REFRESHING_LLAMACPP_RELEASES = "刷新llama.cpp发布版本" + self.UPDATING_ASSET_LIST = "更新资源列表" + self.UPDATING_CUDA_OPTIONS = "更新CUDA选项" + self.STARTING_LLAMACPP_DOWNLOAD = "开始下载llama.cpp" + self.UPDATING_CUDA_BACKENDS = "更新CUDA后端" + self.NO_CUDA_BACKEND_SELECTED = "未选择要提取的CUDA后端" + self.EXTRACTING_CUDA_FILES = "从{0}提取CUDA文件到{1}" + self.DOWNLOAD_ERROR = "下载错误:{0}" + self.SHOWING_TASK_CONTEXT_MENU = "显示任务上下文菜单" + self.SHOWING_PROPERTIES_FOR_TASK = "显示任务属性:{0}" + self.CANCELLING_TASK = "取消任务:{0}" + self.CANCELED = "已取消" + self.DELETING_TASK = "删除任务:{0}" + self.LOADING_MODELS = "加载模型" + self.LOADED_MODELS = "已加载{0}个模型" + self.BROWSING_FOR_MODELS_DIRECTORY = "浏览模型目录" + self.SELECT_MODELS_DIRECTORY = "选择模型目录" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "浏览输出目录" + self.SELECT_OUTPUT_DIRECTORY = "选择输出目录" + self.BROWSING_FOR_LOGS_DIRECTORY = "浏览日志目录" + self.SELECT_LOGS_DIRECTORY = "选择日志目录" + self.BROWSING_FOR_IMATRIX_FILE = "浏览IMatrix文件" + self.SELECT_IMATRIX_FILE = "选择IMatrix文件" + self.RAM_USAGE_FORMAT = "{0:.1f}%({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU使用率:{0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "验证量化输入" + self.MODELS_PATH_REQUIRED = "需要模型路径" + self.OUTPUT_PATH_REQUIRED = "需要输出路径" + self.LOGS_PATH_REQUIRED = "需要日志路径" + self.STARTING_MODEL_QUANTIZATION = "开始模型量化" + self.INPUT_FILE_NOT_EXIST = "输入文件'{0}'不存在。" + self.QUANTIZING_MODEL_TO = "将{0}量化为{1}" + self.QUANTIZATION_TASK_STARTED = "已启动{0}的量化任务" + self.ERROR_STARTING_QUANTIZATION = "启动量化时出错:{0}" + self.UPDATING_MODEL_INFO = "更新模型信息:{0}" + self.TASK_FINISHED = "任务完成:{0}" + self.SHOWING_TASK_DETAILS_FOR = "显示任务详情:{0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "浏览IMatrix数据文件" + self.SELECT_DATA_FILE = "选择数据文件" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "浏览IMatrix模型文件" + self.SELECT_MODEL_FILE = "选择模型文件" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "浏览IMatrix输出文件" + self.SELECT_OUTPUT_FILE = "选择输出文件" + self.STARTING_IMATRIX_GENERATION = "开始IMatrix生成" + self.BACKEND_PATH_NOT_EXIST = "后端路径不存在:{0}" + self.GENERATING_IMATRIX = "生成IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "启动IMatrix生成时出错:{0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix生成任务已启动" + self.ERROR_MESSAGE = "错误:{0}" + self.TASK_ERROR = "任务错误:{0}" + self.APPLICATION_CLOSING = "应用程序正在关闭" + self.APPLICATION_CLOSED = "应用程序已关闭" + self.SELECT_QUANTIZATION_TYPE = "选择量化类型" + self.ALLOWS_REQUANTIZING = "允许重新量化已经量化的张量" + self.LEAVE_OUTPUT_WEIGHT = "将保留output.weight不被(重新)量化" + self.DISABLE_K_QUANT_MIXTURES = "禁用k-quant混合并将所有张量量化为相同类型" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "使用文件中的数据作为量化优化的重要性矩阵" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "对这些张量使用重要性矩阵" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "不对这些张量使用重要性矩阵" + self.OUTPUT_TENSOR_TYPE = "输出张量类型:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "对output.weight张量使用此类型" + self.TOKEN_EMBEDDING_TYPE = "令牌嵌入类型:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "对令牌嵌入张量使用此类型" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "将在与输入相同的分片中生成量化模型" + self.OVERRIDE_MODEL_METADATA = "覆盖模型元数据" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix生成的输入数据文件" + self.MODEL_TO_BE_QUANTIZED = "要量化的模型" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "生成的IMatrix的输出路径" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "保存IMatrix的频率" + self.SET_GPU_OFFLOAD_VALUE = "设置GPU卸载值(-ngl)" + self.COMPLETED = "已完成" + +class _Spanish(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (cuantizador automático de modelos GGUF)" + self.RAM_USAGE = "Uso de RAM:" + self.CPU_USAGE = "Uso de CPU:" + self.BACKEND = "Backend de Llama.cpp:" + self.REFRESH_BACKENDS = "Actualizar Backends" + self.MODELS_PATH = "Ruta de Modelos:" + self.OUTPUT_PATH = "Ruta de Salida:" + self.LOGS_PATH = "Ruta de Registros:" + self.BROWSE = "Explorar" + self.AVAILABLE_MODELS = "Modelos Disponibles:" + self.QUANTIZATION_TYPE = "Tipo de Cuantización:" + self.ALLOW_REQUANTIZE = "Permitir Recuantización" + self.LEAVE_OUTPUT_TENSOR = "Dejar Tensor de Salida" + self.PURE = "Puro" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Incluir Pesos:" + self.EXCLUDE_WEIGHTS = "Excluir Pesos:" + self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Salida" + self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incrustación de Token" + self.KEEP_SPLIT = "Mantener División" + self.KV_OVERRIDES = "Anulaciones KV:" + self.ADD_NEW_OVERRIDE = "Agregar nueva anulación" + self.QUANTIZE_MODEL = "Cuantizar Modelo" + self.SAVE_PRESET = "Guardar Preajuste" + self.LOAD_PRESET = "Cargar Preajuste" + self.TASKS = "Tareas:" + self.DOWNLOAD_LLAMACPP = "Descargar llama.cpp" + self.SELECT_RELEASE = "Seleccionar Versión:" + self.SELECT_ASSET = "Seleccionar Activo:" + self.EXTRACT_CUDA_FILES = "Extraer archivos CUDA" + self.SELECT_CUDA_BACKEND = "Seleccionar Backend CUDA:" + self.DOWNLOAD = "Descargar" + self.IMATRIX_GENERATION = "Generación de IMatrix" + self.DATA_FILE = "Archivo de Datos:" + self.MODEL = "Modelo:" + self.OUTPUT = "Salida:" + self.OUTPUT_FREQUENCY = "Frecuencia de Salida:" + self.GPU_OFFLOAD = "Descarga GPU:" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Generar IMatrix" + self.ERROR = "Error" + self.WARNING = "Advertencia" + self.PROPERTIES = "Propiedades" + self.CANCEL = "Cancelar" + self.RESTART = "Reiniciar" + self.DELETE = "Eliminar" + self.CONFIRM_DELETION = "¿Estás seguro de que quieres eliminar esta tarea?" + self.TASK_RUNNING_WARNING = "Algunas tareas aún se están ejecutando. ¿Estás seguro de que quieres salir?" + self.YES = "Sí" + self.NO = "No" + self.DOWNLOAD_COMPLETE = "Descarga Completa" + self.CUDA_EXTRACTION_FAILED = "Extracción de CUDA Fallida" + self.PRESET_SAVED = "Preajuste Guardado" + self.PRESET_LOADED = "Preajuste Cargado" + self.NO_ASSET_SELECTED = "Ningún activo seleccionado" + self.DOWNLOAD_FAILED = "Descarga fallida" + self.NO_BACKEND_SELECTED = "Ningún backend seleccionado" + self.NO_MODEL_SELECTED = "Ningún modelo seleccionado" + self.REFRESH_RELEASES = "Actualizar Versiones" + self.NO_SUITABLE_CUDA_BACKENDS = "No se encontraron backends CUDA adecuados" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}\nArchivos CUDA extraídos en {1}" + self.CUDA_FILES_EXTRACTED = "Archivos CUDA extraídos en" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No se encontró un backend CUDA adecuado para la extracción" + self.ERROR_FETCHING_RELEASES = "Error al obtener versiones: {0}" + self.CONFIRM_DELETION_TITLE = "Confirmar Eliminación" + self.LOG_FOR = "Registro para {0}" + self.ALL_FILES = "Todos los Archivos (*)" + self.GGUF_FILES = "Archivos GGUF (*.gguf)" + self.DAT_FILES = "Archivos DAT (*.dat)" + self.JSON_FILES = "Archivos JSON (*.json)" + self.FAILED_LOAD_PRESET = "Error al cargar el preajuste: {0}" + self.INITIALIZING_AUTOGGUF = "Inicializando aplicación AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialización de AutoGGUF completa" + self.REFRESHING_BACKENDS = "Actualizando backends" + self.NO_BACKENDS_AVAILABLE = "No hay backends disponibles" + self.FOUND_VALID_BACKENDS = "Se encontraron {0} backends válidos" + self.SAVING_PRESET = "Guardando preajuste" + self.PRESET_SAVED_TO = "Preajuste guardado en {0}" + self.LOADING_PRESET = "Cargando preajuste" + self.PRESET_LOADED_FROM = "Preajuste cargado desde {0}" + self.ADDING_KV_OVERRIDE = "Agregando anulación KV: {0}" + self.SAVING_TASK_PRESET = "Guardando preajuste de tarea para {0}" + self.TASK_PRESET_SAVED = "Preajuste de Tarea Guardado" + self.TASK_PRESET_SAVED_TO = "Preajuste de tarea guardado en {0}" + self.RESTARTING_TASK = "Reiniciando tarea: {0}" + self.IN_PROGRESS = "En Progreso" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Descarga finalizada. Extraído en: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}\nArchivos CUDA extraídos en {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No se encontró un backend CUDA adecuado para la extracción" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Actualizando versiones de llama.cpp" + self.UPDATING_ASSET_LIST = "Actualizando lista de activos" + self.UPDATING_CUDA_OPTIONS = "Actualizando opciones de CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Iniciando descarga de llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Actualizando backends CUDA" + self.NO_CUDA_BACKEND_SELECTED = "No se seleccionó backend CUDA para extracción" + self.EXTRACTING_CUDA_FILES = "Extrayendo archivos CUDA de {0} a {1}" + self.DOWNLOAD_ERROR = "Error de descarga: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Mostrando menú contextual de tarea" + self.SHOWING_PROPERTIES_FOR_TASK = "Mostrando propiedades para la tarea: {0}" + self.CANCELLING_TASK = "Cancelando tarea: {0}" + self.CANCELED = "Cancelado" + self.DELETING_TASK = "Eliminando tarea: {0}" + self.LOADING_MODELS = "Cargando modelos" + self.LOADED_MODELS = "Cargados {0} modelos" + self.BROWSING_FOR_MODELS_DIRECTORY = "Explorando directorio de modelos" + self.SELECT_MODELS_DIRECTORY = "Seleccionar Directorio de Modelos" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Explorando directorio de salida" + self.SELECT_OUTPUT_DIRECTORY = "Seleccionar Directorio de Salida" + self.BROWSING_FOR_LOGS_DIRECTORY = "Explorando directorio de registros" + self.SELECT_LOGS_DIRECTORY = "Seleccionar Directorio de Registros" + self.BROWSING_FOR_IMATRIX_FILE = "Explorando archivo IMatrix" + self.SELECT_IMATRIX_FILE = "Seleccionar Archivo IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Uso de CPU: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validando entradas de cuantización" + self.MODELS_PATH_REQUIRED = "Se requiere la ruta de modelos" + self.OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida" + self.LOGS_PATH_REQUIRED = "Se requiere la ruta de registros" + self.STARTING_MODEL_QUANTIZATION = "Iniciando cuantización de modelo" + self.INPUT_FILE_NOT_EXIST = "El archivo de entrada '{0}' no existe." + self.QUANTIZING_MODEL_TO = "Cuantizando {0} a {1}" + self.QUANTIZATION_TASK_STARTED = "Tarea de cuantización iniciada para {0}" + self.ERROR_STARTING_QUANTIZATION = "Error al iniciar la cuantización: {0}" + self.UPDATING_MODEL_INFO = "Actualizando información del modelo: {0}" + self.TASK_FINISHED = "Tarea finalizada: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Mostrando detalles de la tarea para: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Explorando archivo de datos IMatrix" + self.SELECT_DATA_FILE = "Seleccionar Archivo de Datos" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Explorando archivo de modelo IMatrix" + self.SELECT_MODEL_FILE = "Seleccionar Archivo de Modelo" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Explorando archivo de salida IMatrix" + self.SELECT_OUTPUT_FILE = "Seleccionar Archivo de Salida" + self.STARTING_IMATRIX_GENERATION = "Iniciando generación de IMatrix" + self.BACKEND_PATH_NOT_EXIST = "La ruta del backend no existe: {0}" + self.GENERATING_IMATRIX = "Generando IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Error al iniciar la generación de IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Tarea de generación de IMatrix iniciada" + self.ERROR_MESSAGE = "Error: {0}" + self.TASK_ERROR = "Error de tarea: {0}" + self.APPLICATION_CLOSING = "Cerrando aplicación" + self.APPLICATION_CLOSED = "Aplicación cerrada" + self.SELECT_QUANTIZATION_TYPE = "Seleccione el tipo de cuantización" + self.ALLOWS_REQUANTIZING = "Permite recuantizar tensores que ya han sido cuantizados" + self.LEAVE_OUTPUT_WEIGHT = "Dejará output.weight sin (re)cuantizar" + self.DISABLE_K_QUANT_MIXTURES = "Desactiva las mezclas k-quant y cuantiza todos los tensores al mismo tipo" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usa los datos en el archivo como matriz de importancia para optimizaciones de cuantización" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usar matriz de importancia para estos tensores" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "No usar matriz de importancia para estos tensores" + self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Salida:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usar este tipo para el tensor output.weight" + self.TOKEN_EMBEDDING_TYPE = "Tipo de Incrustación de Token:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usar este tipo para el tensor de incrustaciones de token" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Generará el modelo cuantizado en los mismos fragmentos que la entrada" + self.OVERRIDE_MODEL_METADATA = "Anular metadatos del modelo" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Archivo de datos de entrada para generación de IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Modelo a cuantizar" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Ruta de salida para el IMatrix generado" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Con qué frecuencia guardar el IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Establecer valor de descarga GPU (-ngl)" + self.COMPLETED = "Completado" + +class _Hindi(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (स्वचालित GGUF मॉडल क्वांटाइज़र)" + self.RAM_USAGE = "RAM उपयोग:" + self.CPU_USAGE = "CPU उपयोग:" + self.BACKEND = "Llama.cpp बैकएंड:" + self.REFRESH_BACKENDS = "बैकएंड रीफ्रेश करें" + self.MODELS_PATH = "मॉडल पथ:" + self.OUTPUT_PATH = "आउटपुट पथ:" + self.LOGS_PATH = "लॉग पथ:" + self.BROWSE = "ब्राउज़ करें" + self.AVAILABLE_MODELS = "उपलब्ध मॉडल:" + self.QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार:" + self.ALLOW_REQUANTIZE = "पुनः क्वांटाइज़ करने की अनुमति दें" + self.LEAVE_OUTPUT_TENSOR = "आउटपुट टेंसर छोड़ें" + self.PURE = "शुद्ध" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "वेट शामिल करें:" + self.EXCLUDE_WEIGHTS = "वेट बाहर रखें:" + self.USE_OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार का उपयोग करें" + self.USE_TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार का उपयोग करें" + self.KEEP_SPLIT = "विभाजन रखें" + self.KV_OVERRIDES = "KV ओवरराइड:" + self.ADD_NEW_OVERRIDE = "नया ओवरराइड जोड़ें" + self.QUANTIZE_MODEL = "मॉडल क्वांटाइज़ करें" + self.SAVE_PRESET = "प्रीसेट सहेजें" + self.LOAD_PRESET = "प्रीसेट लोड करें" + self.TASKS = "कार्य:" + self.DOWNLOAD_LLAMACPP = "llama.cpp डाउनलोड करें" + self.SELECT_RELEASE = "रिलीज़ चुनें:" + self.SELECT_ASSET = "एसेट चुनें:" + self.EXTRACT_CUDA_FILES = "CUDA फ़ाइलें निकालें" + self.SELECT_CUDA_BACKEND = "CUDA बैकएंड चुनें:" + self.DOWNLOAD = "डाउनलोड करें" + self.IMATRIX_GENERATION = "IMatrix उत्पादन" + self.DATA_FILE = "डेटा फ़ाइल:" + self.MODEL = "मॉडल:" + self.OUTPUT = "आउटपुट:" + self.OUTPUT_FREQUENCY = "आउटपुट आवृत्ति:" + self.GPU_OFFLOAD = "GPU ऑफलोड:" + self.AUTO = "स्वचालित" + self.GENERATE_IMATRIX = "IMatrix उत्पन्न करें" + self.ERROR = "त्रुटि" + self.WARNING = "चेतावनी" + self.PROPERTIES = "गुण" + self.WINDOW_TITLE = "AutoGGUF (स्वचालित GGUF मॉडल क्वांटाइज़र)" + self.RAM_USAGE = "RAM उपयोग:" + self.CPU_USAGE = "CPU उपयोग:" + self.BACKEND = "Llama.cpp बैकएंड:" + self.REFRESH_BACKENDS = "बैकएंड रीफ्रेश करें" + self.MODELS_PATH = "मॉडल पथ:" + self.OUTPUT_PATH = "आउटपुट पथ:" + self.LOGS_PATH = "लॉग पथ:" + self.BROWSE = "ब्राउज़ करें" + self.AVAILABLE_MODELS = "उपलब्ध मॉडल:" + self.QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार:" + self.ALLOW_REQUANTIZE = "पुनः क्वांटाइज़ करने की अनुमति दें" + self.LEAVE_OUTPUT_TENSOR = "आउटपुट टेंसर छोड़ें" + self.PURE = "शुद्ध" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "वेट शामिल करें:" + self.EXCLUDE_WEIGHTS = "वेट बाहर रखें:" + self.USE_OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार का उपयोग करें" + self.USE_TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार का उपयोग करें" + self.KEEP_SPLIT = "विभाजन रखें" + self.KV_OVERRIDES = "KV ओवरराइड:" + self.ADD_NEW_OVERRIDE = "नया ओवरराइड जोड़ें" + self.QUANTIZE_MODEL = "मॉडल क्वांटाइज़ करें" + self.SAVE_PRESET = "प्रीसेट सहेजें" + self.LOAD_PRESET = "प्रीसेट लोड करें" + self.TASKS = "कार्य:" + self.DOWNLOAD_LLAMACPP = "llama.cpp डाउनलोड करें" + self.SELECT_RELEASE = "रिलीज़ चुनें:" + self.SELECT_ASSET = "एसेट चुनें:" + self.EXTRACT_CUDA_FILES = "CUDA फ़ाइलें निकालें" + self.SELECT_CUDA_BACKEND = "CUDA बैकएंड चुनें:" + self.DOWNLOAD = "डाउनलोड करें" + self.IMATRIX_GENERATION = "IMatrix उत्पादन" + self.DATA_FILE = "डेटा फ़ाइल:" + self.MODEL = "मॉडल:" + self.OUTPUT = "आउटपुट:" + self.OUTPUT_FREQUENCY = "आउटपुट आवृत्ति:" + self.GPU_OFFLOAD = "GPU ऑफलोड:" + self.AUTO = "स्वचालित" + self.GENERATE_IMATRIX = "IMatrix उत्पन्न करें" + self.ERROR = "त्रुटि" + self.WARNING = "चेतावनी" + self.PROPERTIES = "गुण" + self.CANCEL = "रद्द करें" + self.RESTART = "पुनः आरंभ करें" + self.DELETE = "हटाएं" + self.CONFIRM_DELETION = "क्या आप वाकई इस कार्य को हटाना चाहते हैं?" + self.TASK_RUNNING_WARNING = "कुछ कार्य अभी भी चल रहे हैं। क्या आप वाकई बाहर निकलना चाहते हैं?" + self.YES = "हां" + self.NO = "नहीं" + self.DOWNLOAD_COMPLETE = "डाउनलोड पूरा हुआ" + self.CUDA_EXTRACTION_FAILED = "CUDA निष्कर्षण विफल" + self.PRESET_SAVED = "प्रीसेट सहेजा गया" + self.PRESET_LOADED = "प्रीसेट लोड किया गया" + self.NO_ASSET_SELECTED = "कोई एसेट चयनित नहीं" + self.DOWNLOAD_FAILED = "डाउनलोड विफल" + self.NO_BACKEND_SELECTED = "कोई बैकएंड चयनित नहीं" + self.NO_MODEL_SELECTED = "कोई मॉडल चयनित नहीं" + self.REFRESH_RELEASES = "रिलीज़ रीफ्रेश करें" + self.NO_SUITABLE_CUDA_BACKENDS = "कोई उपयुक्त CUDA बैकएंड नहीं मिला" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई\nCUDA फ़ाइलें {1} में निकाली गईं" + self.CUDA_FILES_EXTRACTED = "CUDA फ़ाइलें निकाली गईं" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "निष्कर्षण के लिए कोई उपयुक्त CUDA बैकएंड नहीं मिला" + self.ERROR_FETCHING_RELEASES = "रिलीज़ प्राप्त करने में त्रुटि: {0}" + self.CONFIRM_DELETION_TITLE = "हटाने की पुष्टि करें" + self.LOG_FOR = "{0} के लिए लॉग" + self.ALL_FILES = "सभी फ़ाइलें (*)" + self.GGUF_FILES = "GGUF फ़ाइलें (*.gguf)" + self.DAT_FILES = "DAT फ़ाइलें (*.dat)" + self.JSON_FILES = "JSON फ़ाइलें (*.json)" + self.FAILED_LOAD_PRESET = "प्रीसेट लोड करने में विफल: {0}" + self.INITIALIZING_AUTOGGUF = "AutoGGUF एप्लिकेशन प्रारंभ हो रहा है" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF प्रारंभीकरण पूरा हुआ" + self.REFRESHING_BACKENDS = "बैकएंड रीफ्रेश हो रहे हैं" + self.NO_BACKENDS_AVAILABLE = "कोई बैकएंड उपलब्ध नहीं" + self.FOUND_VALID_BACKENDS = "{0} मान्य बैकएंड मिले" + self.SAVING_PRESET = "प्रीसेट सहेजा जा रहा है" + self.PRESET_SAVED_TO = "प्रीसेट {0} में सहेजा गया" + self.LOADING_PRESET = "प्रीसेट लोड हो रहा है" + self.PRESET_LOADED_FROM = "{0} से प्रीसेट लोड किया गया" + self.ADDING_KV_OVERRIDE = "KV ओवरराइड जोड़ा जा रहा है: {0}" + self.SAVING_TASK_PRESET = "{0} के लिए कार्य प्रीसेट सहेजा जा रहा है" + self.TASK_PRESET_SAVED = "कार्य प्रीसेट सहेजा गया" + self.TASK_PRESET_SAVED_TO = "कार्य प्रीसेट {0} में सहेजा गया" + self.RESTARTING_TASK = "कार्य पुनः आरंभ हो रहा है: {0}" + self.IN_PROGRESS = "प्रगति में" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "डाउनलोड समाप्त। निकाला गया: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई\nCUDA फ़ाइलें {1} में निकाली गईं" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "निष्कर्षण के लिए कोई उपयुक्त CUDA बैकएंड नहीं मिला" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई" + self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp रिलीज़ रीफ्रेश हो रही हैं" + self.UPDATING_ASSET_LIST = "एसेट सूची अपडेट हो रही है" + self.UPDATING_CUDA_OPTIONS = "CUDA विकल्प अपडेट हो रहे हैं" + self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp डाउनलोड शुरू हो रहा है" + self.UPDATING_CUDA_BACKENDS = "CUDA बैकएंड अपडेट हो रहे हैं" + self.NO_CUDA_BACKEND_SELECTED = "निष्कर्षण के लिए कोई CUDA बैकएंड चयनित नहीं" + self.EXTRACTING_CUDA_FILES = "{0} से {1} में CUDA फ़ाइलें निकाली जा रही हैं" + self.DOWNLOAD_ERROR = "डाउनलोड त्रुटि: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "कार्य संदर्भ मेनू दिखाया जा रहा है" + self.SHOWING_PROPERTIES_FOR_TASK = "कार्य के लिए गुण दिखाए जा रहे हैं: {0}" + self.CANCELLING_TASK = "कार्य रद्द किया जा रहा है: {0}" + self.CANCELED = "रद्द किया गया" + self.DELETING_TASK = "कार्य हटाया जा रहा है: {0}" + self.LOADING_MODELS = "मॉडल लोड हो रहे हैं" + self.LOADED_MODELS = "{0} मॉडल लोड किए गए" + self.BROWSING_FOR_MODELS_DIRECTORY = "मॉडल निर्देशिका के लिए ब्राउज़ किया जा रहा है" + self.SELECT_MODELS_DIRECTORY = "मॉडल निर्देशिका चुनें" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "आउटपुट निर्देशिका के लिए ब्राउज़ किया जा रहा है" + self.SELECT_OUTPUT_DIRECTORY = "आउटपुट निर्देशिका चुनें" + self.BROWSING_FOR_LOGS_DIRECTORY = "लॉग निर्देशिका के लिए ब्राउज़ किया जा रहा है" + self.SELECT_LOGS_DIRECTORY = "लॉग निर्देशिका चुनें" + self.BROWSING_FOR_IMATRIX_FILE = "IMatrix फ़ाइल के लिए ब्राउज़ किया जा रहा है" + self.SELECT_IMATRIX_FILE = "IMatrix फ़ाइल चुनें" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU उपयोग: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "क्वांटाइजेशन इनपुट सत्यापित किए जा रहे हैं" + self.MODELS_PATH_REQUIRED = "मॉडल पथ आवश्यक है" + self.OUTPUT_PATH_REQUIRED = "आउटपुट पथ आवश्यक है" + self.LOGS_PATH_REQUIRED = "लॉग पथ आवश्यक है" + self.STARTING_MODEL_QUANTIZATION = "मॉडल क्वांटाइजेशन शुरू हो रहा है" + self.INPUT_FILE_NOT_EXIST = "इनपुट फ़ाइल '{0}' मौजूद नहीं है।" + self.QUANTIZING_MODEL_TO = "{0} को {1} में क्वांटाइज़ किया जा रहा है" + self.QUANTIZATION_TASK_STARTED = "{0} के लिए क्वांटाइजेशन कार्य शुरू हुआ" + self.ERROR_STARTING_QUANTIZATION = "क्वांटाइजेशन शुरू करने में त्रुटि: {0}" + self.UPDATING_MODEL_INFO = "मॉडल जानकारी अपडेट हो रही है: {0}" + self.TASK_FINISHED = "कार्य समाप्त: {0}" + self.SHOWING_TASK_DETAILS_FOR = "कार्य विवरण दिखाए जा रहे हैं: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix डेटा फ़ाइल के लिए ब्राउज़ किया जा रहा है" + self.SELECT_DATA_FILE = "डेटा फ़ाइल चुनें" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix मॉडल फ़ाइल के लिए ब्राउज़ किया जा रहा है" + self.SELECT_MODEL_FILE = "मॉडल फ़ाइल चुनें" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix आउटपुट फ़ाइल के लिए ब्राउज़ किया जा रहा है" + self.SELECT_OUTPUT_FILE = "आउटपुट फ़ाइल चुनें" + self.STARTING_IMATRIX_GENERATION = "IMatrix उत्पादन शुरू हो रहा है" + self.BACKEND_PATH_NOT_EXIST = "बैकएंड पथ मौजूद नहीं है: {0}" + self.GENERATING_IMATRIX = "IMatrix उत्पन्न किया जा रहा है" + self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix उत्पादन शुरू करने में त्रुटि: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix उत्पादन कार्य शुरू हुआ" + self.ERROR_MESSAGE = "त्रुटि: {0}" + self.TASK_ERROR = "कार्य त्रुटि: {0}" + self.APPLICATION_CLOSING = "एप्लिकेशन बंद हो रहा है" + self.APPLICATION_CLOSED = "एप्लिकेशन बंद हो गया" + self.SELECT_QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार चुनें" + self.ALLOWS_REQUANTIZING = "पहले से क्वांटाइज़ किए गए टेंसर को पुनः क्वांटाइज़ करने की अनुमति देता है" + self.LEAVE_OUTPUT_WEIGHT = "output.weight को अक्वांटाइज़ (या पुनः क्वांटाइज़) छोड़ देगा" + self.DISABLE_K_QUANT_MIXTURES = "k-quant मिश्रण को अक्षम करें और सभी टेंसर को एक ही प्रकार में क्वांटाइज़ करें" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "क्वांट अनुकूलन के लिए फ़ाइल में डेटा को महत्व मैट्रिक्स के रूप में उपयोग करें" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "इन टेंसर के लिए महत्व मैट्रिक्स का उपयोग करें" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "इन टेंसर के लिए महत्व मैट्रिक्स का उपयोग न करें" + self.OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight टेंसर के लिए इस प्रकार का उपयोग करें" + self.TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "टोकन एम्बेडिंग टेंसर के लिए इस प्रकार का उपयोग करें" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "इनपुट के समान शार्ड्स में क्वांटाइज़ किए गए मॉडल को उत्पन्न करेगा" + self.OVERRIDE_MODEL_METADATA = "मॉडल मेटाडेटा को ओवरराइड करें" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix उत्पादन के लिए इनपुट डेटा फ़ाइल" + self.MODEL_TO_BE_QUANTIZED = "क्वांटाइज़ किए जाने वाला मॉडल" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "उत्पन्न IMatrix के लिए आउटपुट पथ" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix को कितनी बार सहेजना है" + self.SET_GPU_OFFLOAD_VALUE = "GPU ऑफलोड मान सेट करें (-ngl)" + self.COMPLETED = "पूरा हुआ" + + +class _Russian(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (автоматический квантователь моделей GGUF)" + self.RAM_USAGE = "Использование ОЗУ:" + self.CPU_USAGE = "Использование ЦП:" + self.BACKEND = "Бэкенд Llama.cpp:" + self.REFRESH_BACKENDS = "Обновить бэкенды" + self.MODELS_PATH = "Путь к моделям:" + self.OUTPUT_PATH = "Путь вывода:" + self.LOGS_PATH = "Путь к логам:" + self.BROWSE = "Обзор" + self.AVAILABLE_MODELS = "Доступные модели:" + self.QUANTIZATION_TYPE = "Тип квантования:" + self.ALLOW_REQUANTIZE = "Разрешить переквантование" + self.LEAVE_OUTPUT_TENSOR = "Оставить выходной тензор" + self.PURE = "Чистый" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Включить веса:" + self.EXCLUDE_WEIGHTS = "Исключить веса:" + self.USE_OUTPUT_TENSOR_TYPE = "Использовать тип выходного тензора" + self.USE_TOKEN_EMBEDDING_TYPE = "Использовать тип встраивания токенов" + self.KEEP_SPLIT = "Сохранить разделение" + self.KV_OVERRIDES = "KV переопределения:" + self.ADD_NEW_OVERRIDE = "Добавить новое переопределение" + self.QUANTIZE_MODEL = "Квантовать модель" + self.SAVE_PRESET = "Сохранить пресет" + self.LOAD_PRESET = "Загрузить пресет" + self.TASKS = "Задачи:" + self.DOWNLOAD_LLAMACPP = "Скачать llama.cpp" + self.SELECT_RELEASE = "Выбрать релиз:" + self.SELECT_ASSET = "Выбрать актив:" + self.EXTRACT_CUDA_FILES = "Извлечь файлы CUDA" + self.SELECT_CUDA_BACKEND = "Выбрать бэкенд CUDA:" + self.DOWNLOAD = "Скачать" + self.IMATRIX_GENERATION = "Генерация IMatrix" + self.DATA_FILE = "Файл данных:" + self.MODEL = "Модель:" + self.OUTPUT = "Вывод:" + self.OUTPUT_FREQUENCY = "Частота вывода:" + self.GPU_OFFLOAD = "Разгрузка GPU:" + self.AUTO = "Авто" + self.GENERATE_IMATRIX = "Сгенерировать IMatrix" + self.ERROR = "Ошибка" + self.WARNING = "Предупреждение" + self.PROPERTIES = "Свойства" + self.CANCEL = "Отмена" + self.RESTART = "Перезапуск" + self.DELETE = "Удалить" + self.CONFIRM_DELETION = "Вы уверены, что хотите удалить эту задачу?" + self.TASK_RUNNING_WARNING = "Некоторые задачи все еще выполняются. Вы уверены, что хотите выйти?" + self.YES = "Да" + self.NO = "Нет" + self.DOWNLOAD_COMPLETE = "Загрузка завершена" + self.CUDA_EXTRACTION_FAILED = "Извлечение CUDA не удалось" + self.PRESET_SAVED = "Пресет сохранен" + self.PRESET_LOADED = "Пресет загружен" + self.NO_ASSET_SELECTED = "Актив не выбран" + self.DOWNLOAD_FAILED = "Загрузка не удалась" + self.NO_BACKEND_SELECTED = "Бэкенд не выбран" + self.NO_MODEL_SELECTED = "Модель не выбрана" + self.REFRESH_RELEASES = "Обновить релизы" + self.NO_SUITABLE_CUDA_BACKENDS = "Подходящие бэкенды CUDA не найдены" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}\nФайлы CUDA извлечены в {1}" + self.CUDA_FILES_EXTRACTED = "Файлы CUDA извлечены в" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Подходящий бэкенд CUDA для извлечения не найден" + self.ERROR_FETCHING_RELEASES = "Ошибка получения релизов: {0}" + self.CONFIRM_DELETION_TITLE = "Подтвердить удаление" + self.LOG_FOR = "Лог для {0}" + self.ALL_FILES = "Все файлы (*)" + self.GGUF_FILES = "Файлы GGUF (*.gguf)" + self.DAT_FILES = "Файлы DAT (*.dat)" + self.JSON_FILES = "Файлы JSON (*.json)" + self.FAILED_LOAD_PRESET = "Не удалось загрузить пресет: {0}" + self.INITIALIZING_AUTOGGUF = "Инициализация приложения AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Инициализация AutoGGUF завершена" + self.REFRESHING_BACKENDS = "Обновление бэкендов" + self.NO_BACKENDS_AVAILABLE = "Бэкенды недоступны" + self.FOUND_VALID_BACKENDS = "Найдено {0} действительных бэкендов" + self.SAVING_PRESET = "Сохранение пресета" + self.PRESET_SAVED_TO = "Пресет сохранен в {0}" + self.LOADING_PRESET = "Загрузка пресета" + self.PRESET_LOADED_FROM = "Пресет загружен из {0}" + self.ADDING_KV_OVERRIDE = "Добавление KV переопределения: {0}" + self.SAVING_TASK_PRESET = "Сохранение пресета задачи для {0}" + self.TASK_PRESET_SAVED = "Пресет задачи сохранен" + self.TASK_PRESET_SAVED_TO = "Пресет задачи сохранен в {0}" + self.RESTARTING_TASK = "Перезапуск задачи: {0}" + self.IN_PROGRESS = "В процессе" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Загрузка завершена. Извлечено в: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}\nФайлы CUDA извлечены в {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Подходящий бэкенд CUDA для извлечения не найден" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Обновление релизов llama.cpp" + self.UPDATING_ASSET_LIST = "Обновление списка активов" + self.UPDATING_CUDA_OPTIONS = "Обновление параметров CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Начало загрузки llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Обновление бэкендов CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Бэкенд CUDA для извлечения не выбран" + self.EXTRACTING_CUDA_FILES = "Извлечение файлов CUDA из {0} в {1}" + self.DOWNLOAD_ERROR = "Ошибка загрузки: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Отображение контекстного меню задачи" + self.SHOWING_PROPERTIES_FOR_TASK = "Отображение свойств задачи: {0}" + self.CANCELLING_TASK = "Отмена задачи: {0}" + self.CANCELED = "Отменено" + self.DELETING_TASK = "Удаление задачи: {0}" + self.LOADING_MODELS = "Загрузка моделей" + self.LOADED_MODELS = "Загружено {0} моделей" + self.BROWSING_FOR_MODELS_DIRECTORY = "Поиск каталога моделей" + self.SELECT_MODELS_DIRECTORY = "Выберите каталог моделей" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Поиск выходного каталога" + self.SELECT_OUTPUT_DIRECTORY = "Выберите выходной каталог" + self.BROWSING_FOR_LOGS_DIRECTORY = "Поиск каталога логов" + self.SELECT_LOGS_DIRECTORY = "Выберите каталог логов" + self.BROWSING_FOR_IMATRIX_FILE = "Поиск файла IMatrix" + self.SELECT_IMATRIX_FILE = "Выберите файл IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} МБ / {2} МБ)" + self.CPU_USAGE_FORMAT = "Использование ЦП: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Проверка входных данных квантования" + self.MODELS_PATH_REQUIRED = "Требуется путь к моделям" + self.OUTPUT_PATH_REQUIRED = "Требуется путь вывода" + self.LOGS_PATH_REQUIRED = "Требуется путь к логам" + self.STARTING_MODEL_QUANTIZATION = "Начало квантования модели" + self.INPUT_FILE_NOT_EXIST = "Входной файл '{0}' не существует." + self.QUANTIZING_MODEL_TO = "Квантование {0} в {1}" + self.QUANTIZATION_TASK_STARTED = "Задача квантования запущена для {0}" + self.ERROR_STARTING_QUANTIZATION = "Ошибка запуска квантования: {0}" + self.UPDATING_MODEL_INFO = "Обновление информации о модели: {0}" + self.TASK_FINISHED = "Задача завершена: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Отображение сведений о задаче для: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Поиск файла данных IMatrix" + self.SELECT_DATA_FILE = "Выберите файл данных" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Поиск файла модели IMatrix" + self.SELECT_MODEL_FILE = "Выберите файл модели" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Поиск выходного файла IMatrix" + self.SELECT_OUTPUT_FILE = "Выберите выходной файл" + self.STARTING_IMATRIX_GENERATION = "Начало генерации IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Путь бэкенда не существует: {0}" + self.GENERATING_IMATRIX = "Генерация IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Ошибка запуска генерации IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Задача генерации IMatrix запущена" + self.ERROR_MESSAGE = "Ошибка: {0}" + self.TASK_ERROR = "Ошибка задачи: {0}" + self.APPLICATION_CLOSING = "Закрытие приложения" + self.APPLICATION_CLOSED = "Приложение закрыто" + self.SELECT_QUANTIZATION_TYPE = "Выберите тип квантования" + self.ALLOWS_REQUANTIZING = "Позволяет переквантовать тензоры, которые уже были квантованы" + self.LEAVE_OUTPUT_WEIGHT = "Оставит output.weight не (пере)квантованным" + self.DISABLE_K_QUANT_MIXTURES = "Отключить k-квантовые смеси и квантовать все тензоры к одному типу" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Использовать данные в файле как матрицу важности для оптимизации квантования" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Использовать матрицу важности для этих тензоров" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Не использовать матрицу важности для этих тензоров" + self.OUTPUT_TENSOR_TYPE = "Тип выходного тензора:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Использовать этот тип для тензора output.weight" + self.TOKEN_EMBEDDING_TYPE = "Тип встраивания токенов:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Использовать этот тип для тензора встраивания токенов" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Будет генерировать квантованную модель в тех же шардах, что и входные данные" + self.OVERRIDE_MODEL_METADATA = "Переопределить метаданные модели" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Входной файл данных для генерации IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Модель для квантования" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Выходной путь для сгенерированного IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Как часто сохранять IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Установить значение разгрузки GPU (-ngl)" + self.COMPLETED = "Завершено" + +class _Ukrainian(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (автоматичний квантувальник моделей GGUF)" + self.RAM_USAGE = "Використання ОЗУ:" + self.CPU_USAGE = "Використання ЦП:" + self.BACKEND = "Бекенд Llama.cpp:" + self.REFRESH_BACKENDS = "Оновити бекенди" + self.MODELS_PATH = "Шлях до моделей:" + self.OUTPUT_PATH = "Шлях виводу:" + self.LOGS_PATH = "Шлях до логів:" + self.BROWSE = "Огляд" + self.AVAILABLE_MODELS = "Доступні моделі:" + self.QUANTIZATION_TYPE = "Тип квантування:" + self.ALLOW_REQUANTIZE = "Дозволити переквантування" + self.LEAVE_OUTPUT_TENSOR = "Залишити вихідний тензор" + self.PURE = "Чистий" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Включити ваги:" + self.EXCLUDE_WEIGHTS = "Виключити ваги:" + self.USE_OUTPUT_TENSOR_TYPE = "Використовувати тип вихідного тензора" + self.USE_TOKEN_EMBEDDING_TYPE = "Використовувати тип вбудовування токенів" + self.KEEP_SPLIT = "Зберегти розділення" + self.KV_OVERRIDES = "KV перевизначення:" + self.ADD_NEW_OVERRIDE = "Додати нове перевизначення" + self.QUANTIZE_MODEL = "Квантувати модель" + self.SAVE_PRESET = "Зберегти пресет" + self.LOAD_PRESET = "Завантажити пресет" + self.TASKS = "Завдання:" + self.DOWNLOAD_LLAMACPP = "Завантажити llama.cpp" + self.SELECT_RELEASE = "Вибрати реліз:" + self.SELECT_ASSET = "Вибрати актив:" + self.EXTRACT_CUDA_FILES = "Витягнути файли CUDA" + self.SELECT_CUDA_BACKEND = "Вибрати бекенд CUDA:" + self.DOWNLOAD = "Завантажити" + self.IMATRIX_GENERATION = "Генерація IMatrix" + self.DATA_FILE = "Файл даних:" + self.MODEL = "Модель:" + self.OUTPUT = "Вивід:" + self.OUTPUT_FREQUENCY = "Частота виводу:" + self.GPU_OFFLOAD = "Розвантаження GPU:" + self.AUTO = "Авто" + self.GENERATE_IMATRIX = "Згенерувати IMatrix" + self.ERROR = "Помилка" + self.WARNING = "Попередження" + self.PROPERTIES = "Властивості" + self.CANCEL = "Скасувати" + self.RESTART = "Перезапустити" + self.DELETE = "Видалити" + self.CONFIRM_DELETION = "Ви впевнені, що хочете видалити це завдання?" + self.TASK_RUNNING_WARNING = "Деякі завдання все ще виконуються. Ви впевнені, що хочете вийти?" + self.YES = "Так" + self.NO = "Ні" + self.DOWNLOAD_COMPLETE = "Завантаження завершено" + self.CUDA_EXTRACTION_FAILED = "Витягнення CUDA не вдалося" + self.PRESET_SAVED = "Пресет збережено" + self.PRESET_LOADED = "Пресет завантажено" + self.NO_ASSET_SELECTED = "Актив не вибрано" + self.DOWNLOAD_FAILED = "Завантаження не вдалося" + self.NO_BACKEND_SELECTED = "Бекенд не вибрано" + self.NO_MODEL_SELECTED = "Модель не вибрано" + self.REFRESH_RELEASES = "Оновити релізи" + self.NO_SUITABLE_CUDA_BACKENDS = "Підходящі бекенди CUDA не знайдено" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}\nФайли CUDA витягнуто в {1}" + self.CUDA_FILES_EXTRACTED = "Файли CUDA витягнуто в" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Підходящий бекенд CUDA для витягнення не знайдено" + self.ERROR_FETCHING_RELEASES = "Помилка отримання релізів: {0}" + self.CONFIRM_DELETION_TITLE = "Підтвердити видалення" + self.LOG_FOR = "Лог для {0}" + self.ALL_FILES = "Всі файли (*)" + self.GGUF_FILES = "Файли GGUF (*.gguf)" + self.DAT_FILES = "Файли DAT (*.dat)" + self.JSON_FILES = "Файли JSON (*.json)" + self.FAILED_LOAD_PRESET = "Не вдалося завантажити пресет: {0}" + self.INITIALIZING_AUTOGGUF = "Ініціалізація програми AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Ініціалізація AutoGGUF завершена" + self.REFRESHING_BACKENDS = "Оновлення бекендів" + self.NO_BACKENDS_AVAILABLE = "Бекенди недоступні" + self.FOUND_VALID_BACKENDS = "Знайдено {0} дійсних бекендів" + self.SAVING_PRESET = "Збереження пресета" + self.PRESET_SAVED_TO = "Пресет збережено в {0}" + self.LOADING_PRESET = "Завантаження пресета" + self.PRESET_LOADED_FROM = "Пресет завантажено з {0}" + self.ADDING_KV_OVERRIDE = "Додавання KV перевизначення: {0}" + self.SAVING_TASK_PRESET = "Збереження пресета завдання для {0}" + self.TASK_PRESET_SAVED = "Пресет завдання збережено" + self.TASK_PRESET_SAVED_TO = "Пресет завдання збережено в {0}" + self.RESTARTING_TASK = "Перезапуск завдання: {0}" + self.IN_PROGRESS = "В процесі" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Завантаження завершено. Витягнуто в: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}\nФайли CUDA витягнуто в {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Підходящий бекенд CUDA для витягнення не знайдено" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Оновлення релізів llama.cpp" + self.UPDATING_ASSET_LIST = "Оновлення списку активів" + self.UPDATING_CUDA_OPTIONS = "Оновлення параметрів CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Початок завантаження llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Оновлення бекендів CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Бекенд CUDA для витягнення не вибрано" + self.EXTRACTING_CUDA_FILES = "Витягнення файлів CUDA з {0} в {1}" + self.DOWNLOAD_ERROR = "Помилка завантаження: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Відображення контекстного меню завдання" + self.SHOWING_PROPERTIES_FOR_TASK = "Відображення властивостей завдання: {0}" + self.CANCELLING_TASK = "Скасування завдання: {0}" + self.CANCELED = "Скасовано" + self.DELETING_TASK = "Видалення завдання: {0}" + self.LOADING_MODELS = "Завантаження моделей" + self.LOADED_MODELS = "Завантажено {0} моделей" + self.BROWSING_FOR_MODELS_DIRECTORY = "Пошук каталогу моделей" + self.SELECT_MODELS_DIRECTORY = "Виберіть каталог моделей" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Пошук вихідного каталогу" + self.SELECT_OUTPUT_DIRECTORY = "Виберіть вихідний каталог" + self.BROWSING_FOR_LOGS_DIRECTORY = "Пошук каталогу логів" + self.SELECT_LOGS_DIRECTORY = "Виберіть каталог логів" + self.BROWSING_FOR_IMATRIX_FILE = "Пошук файлу IMatrix" + self.SELECT_IMATRIX_FILE = "Виберіть файл IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} МБ / {2} МБ)" + self.CPU_USAGE_FORMAT = "Використання ЦП: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Перевірка вхідних даних квантування" + self.MODELS_PATH_REQUIRED = "Потрібен шлях до моделей" + self.OUTPUT_PATH_REQUIRED = "Потрібен шлях виводу" + self.LOGS_PATH_REQUIRED = "Потрібен шлях до логів" + self.STARTING_MODEL_QUANTIZATION = "Початок квантування моделі" + self.INPUT_FILE_NOT_EXIST = "Вхідний файл '{0}' не існує." + self.QUANTIZING_MODEL_TO = "Квантування {0} в {1}" + self.QUANTIZATION_TASK_STARTED = "Завдання квантування запущено для {0}" + self.ERROR_STARTING_QUANTIZATION = "Помилка запуску квантування: {0}" + self.UPDATING_MODEL_INFO = "Оновлення інформації про модель: {0}" + self.TASK_FINISHED = "Завдання завершено: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Відображення відомостей про завдання для: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Пошук файлу даних IMatrix" + self.SELECT_DATA_FILE = "Виберіть файл даних" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Пошук файлу моделі IMatrix" + self.SELECT_MODEL_FILE = "Виберіть файл моделі" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Пошук вихідного файлу IMatrix" + self.SELECT_OUTPUT_FILE = "Виберіть вихідний файл" + self.STARTING_IMATRIX_GENERATION = "Початок генерації IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Шлях бекенда не існує: {0}" + self.GENERATING_IMATRIX = "Генерація IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Помилка запуску генерації IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Завдання генерації IMatrix запущено" + self.ERROR_MESSAGE = "Помилка: {0}" + self.TASK_ERROR = "Помилка завдання: {0}" + self.APPLICATION_CLOSING = "Закриття програми" + self.APPLICATION_CLOSED = "Програма закрита" + self.SELECT_QUANTIZATION_TYPE = "Виберіть тип квантування" + self.ALLOWS_REQUANTIZING = "Дозволяє переквантувати тензори, які вже були квантовані" + self.LEAVE_OUTPUT_WEIGHT = "Залишить output.weight не (пере)квантованим" + self.DISABLE_K_QUANT_MIXTURES = "Вимкнути k-квантові суміші та квантувати всі тензори до одного типу" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Використовувати дані у файлі як матрицю важливості для оптимізації квантування" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Використовувати матрицю важливості для цих тензорів" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Не використовувати матрицю важливості для цих тензорів" + self.OUTPUT_TENSOR_TYPE = "Тип вихідного тензора:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Використовувати цей тип для тензора output.weight" + self.TOKEN_EMBEDDING_TYPE = "Тип вбудовування токенів:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Використовувати цей тип для тензора вбудовування токенів" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Генеруватиме квантовану модель у тих самих шардах, що й вхідні дані" + self.OVERRIDE_MODEL_METADATA = "Перевизначити метадані моделі" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Вхідний файл даних для генерації IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Модель для квантування" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Вихідний шлях для згенерованого IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Як часто зберігати IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Встановити значення розвантаження GPU (-ngl)" + self.COMPLETED = "Завершено" + +class _Japanese(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (自動GGUFモデル量子化器)" + self.RAM_USAGE = "RAM使用量:" + self.CPU_USAGE = "CPU使用率:" + self.BACKEND = "Llama.cppバックエンド:" + self.REFRESH_BACKENDS = "バックエンドを更新" + self.MODELS_PATH = "モデルパス:" + self.OUTPUT_PATH = "出力パス:" + self.LOGS_PATH = "ログパス:" + self.BROWSE = "参照" + self.AVAILABLE_MODELS = "利用可能なモデル:" + self.QUANTIZATION_TYPE = "量子化タイプ:" + self.ALLOW_REQUANTIZE = "再量子化を許可" + self.LEAVE_OUTPUT_TENSOR = "出力テンソルを残す" + self.PURE = "純粋" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "重みを含める:" + self.EXCLUDE_WEIGHTS = "重みを含めない:" + self.USE_OUTPUT_TENSOR_TYPE = "出力テンソルタイプを使用" + self.USE_TOKEN_EMBEDDING_TYPE = "トークン埋め込みタイプを使用" + self.KEEP_SPLIT = "分割を維持" + self.KV_OVERRIDES = "KVオーバーライド:" + self.ADD_NEW_OVERRIDE = "新しいオーバーライドを追加" + self.QUANTIZE_MODEL = "モデルを量子化" + self.SAVE_PRESET = "プリセットを保存" + self.LOAD_PRESET = "プリセットを読み込む" + self.TASKS = "タスク:" + self.DOWNLOAD_LLAMACPP = "llama.cppをダウンロード" + self.SELECT_RELEASE = "リリースを選択:" + self.SELECT_ASSET = "アセットを選択:" + self.EXTRACT_CUDA_FILES = "CUDAファイルを抽出" + self.SELECT_CUDA_BACKEND = "CUDAバックエンドを選択:" + self.DOWNLOAD = "ダウンロード" + self.IMATRIX_GENERATION = "IMatrix生成" + self.DATA_FILE = "データファイル:" + self.MODEL = "モデル:" + self.OUTPUT = "出力:" + self.OUTPUT_FREQUENCY = "出力頻度:" + self.GPU_OFFLOAD = "GPUオフロード:" + self.AUTO = "自動" + self.GENERATE_IMATRIX = "IMatrixを生成" + self.ERROR = "エラー" + self.WARNING = "警告" + self.PROPERTIES = "プロパティ" + self.CANCEL = "キャンセル" + self.RESTART = "再起動" + self.DELETE = "削除" + self.CONFIRM_DELETION = "このタスクを削除してもよろしいですか?" + self.TASK_RUNNING_WARNING = "一部のタスクはまだ実行中です。終了してもよろしいですか?" + self.YES = "はい" + self.NO = "いいえ" + self.DOWNLOAD_COMPLETE = "ダウンロード完了" + self.CUDA_EXTRACTION_FAILED = "CUDA抽出に失敗しました" + self.PRESET_SAVED = "プリセットが保存されました" + self.PRESET_LOADED = "プリセットが読み込まれました" + self.NO_ASSET_SELECTED = "アセットが選択されていません" + self.DOWNLOAD_FAILED = "ダウンロードに失敗しました" + self.NO_BACKEND_SELECTED = "バックエンドが選択されていません" + self.NO_MODEL_SELECTED = "モデルが選択されていません" + self.REFRESH_RELEASES = "リリースを更新" + self.NO_SUITABLE_CUDA_BACKENDS = "適切なCUDAバックエンドが見つかりませんでした" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました\nCUDAファイルは{1}に抽出されました" + self.CUDA_FILES_EXTRACTED = "CUDAファイルはに抽出されました" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "抽出に適したCUDAバックエンドが見つかりませんでした" + self.ERROR_FETCHING_RELEASES = "リリースの取得中にエラーが発生しました: {0}" + self.CONFIRM_DELETION_TITLE = "削除の確認" + self.LOG_FOR = "{0}のログ" + self.ALL_FILES = "すべてのファイル (*)" + self.GGUF_FILES = "GGUFファイル (*.gguf)" + self.DAT_FILES = "DATファイル (*.dat)" + self.JSON_FILES = "JSONファイル (*.json)" + self.FAILED_LOAD_PRESET = "プリセットの読み込みに失敗しました: {0}" + self.INITIALIZING_AUTOGGUF = "AutoGGUFアプリケーションを初期化しています" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUFの初期化が完了しました" + self.REFRESHING_BACKENDS = "バックエンドを更新しています" + self.NO_BACKENDS_AVAILABLE = "利用可能なバックエンドがありません" + self.FOUND_VALID_BACKENDS = "{0}個の有効なバックエンドが見つかりました" + self.SAVING_PRESET = "プリセットを保存しています" + self.PRESET_SAVED_TO = "プリセットは{0}に保存されました" + self.LOADING_PRESET = "プリセットを読み込んでいます" + self.PRESET_LOADED_FROM = "{0}からプリセットが読み込まれました" + self.ADDING_KV_OVERRIDE = "KVオーバーライドを追加しています: {0}" + self.SAVING_TASK_PRESET = "{0}のタスクプリセットを保存しています" + self.TASK_PRESET_SAVED = "タスクプリセットが保存されました" + self.TASK_PRESET_SAVED_TO = "タスクプリセットは{0}に保存されました" + self.RESTARTING_TASK = "タスクを再起動しています: {0}" + self.IN_PROGRESS = "処理中" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "ダウンロードが完了しました。抽出先: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました\nCUDAファイルは{1}に抽出されました" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "抽出に適したCUDAバックエンドが見つかりませんでした" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました" + self.REFRESHING_LLAMACPP_RELEASES = "llama.cppリリースを更新しています" + self.UPDATING_ASSET_LIST = "アセットリストを更新しています" + self.UPDATING_CUDA_OPTIONS = "CUDAオプションを更新しています" + self.STARTING_LLAMACPP_DOWNLOAD = "llama.cppのダウンロードを開始しています" + self.UPDATING_CUDA_BACKENDS = "CUDAバックエンドを更新しています" + self.NO_CUDA_BACKEND_SELECTED = "抽出にCUDAバックエンドが選択されていません" + self.EXTRACTING_CUDA_FILES = "{0}から{1}にCUDAファイルを抽出しています" + self.DOWNLOAD_ERROR = "ダウンロードエラー: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "タスクコンテキストメニューを表示しています" + self.SHOWING_PROPERTIES_FOR_TASK = "タスクのプロパティを表示しています: {0}" + self.CANCELLING_TASK = "タスクをキャンセルしています: {0}" + self.CANCELED = "キャンセル済み" + self.DELETING_TASK = "タスクを削除しています: {0}" + self.LOADING_MODELS = "モデルを読み込んでいます" + self.LOADED_MODELS = "{0}個のモデルが読み込まれました" + self.BROWSING_FOR_MODELS_DIRECTORY = "モデルディレクトリを参照しています" + self.SELECT_MODELS_DIRECTORY = "モデルディレクトリを選択" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "出力ディレクトリを参照しています" + self.SELECT_OUTPUT_DIRECTORY = "出力ディレクトリを選択" + self.BROWSING_FOR_LOGS_DIRECTORY = "ログディレクトリを参照しています" + self.SELECT_LOGS_DIRECTORY = "ログディレクトリを選択" + self.BROWSING_FOR_IMATRIX_FILE = "IMatrixファイルを参照しています" + self.SELECT_IMATRIX_FILE = "IMatrixファイルを選択" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU使用率: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "量子化入力を検証しています" + self.MODELS_PATH_REQUIRED = "モデルパスが必要です" + self.OUTPUT_PATH_REQUIRED = "出力パスが必要です" + self.LOGS_PATH_REQUIRED = "ログパスが必要です" + self.STARTING_MODEL_QUANTIZATION = "モデルの量子化を開始しています" + self.INPUT_FILE_NOT_EXIST = "入力ファイル '{0}' は存在しません。" + self.QUANTIZING_MODEL_TO = "{0} を {1} に量子化しています" + self.QUANTIZATION_TASK_STARTED = "{0} の量子化タスクが開始されました" + self.ERROR_STARTING_QUANTIZATION = "量子化の開始中にエラーが発生しました: {0}" + self.UPDATING_MODEL_INFO = "モデル情報を更新しています: {0}" + self.TASK_FINISHED = "タスクが完了しました: {0}" + self.SHOWING_TASK_DETAILS_FOR = "タスクの詳細を表示しています: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrixデータファイルを参照しています" + self.SELECT_DATA_FILE = "データファイルを選択" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrixモデルファイルを参照しています" + self.SELECT_MODEL_FILE = "モデルファイルを選択" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix出力ファイルを参照しています" + self.SELECT_OUTPUT_FILE = "出力ファイルを選択" + self.STARTING_IMATRIX_GENERATION = "IMatrixの生成を開始しています" + self.BACKEND_PATH_NOT_EXIST = "バックエンドパスが存在しません: {0}" + self.GENERATING_IMATRIX = "IMatrixを生成しています" + self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrixの生成を開始中にエラーが発生しました: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix生成タスクが開始されました" + self.ERROR_MESSAGE = "エラー: {0}" + self.TASK_ERROR = "タスクエラー: {0}" + self.APPLICATION_CLOSING = "アプリケーションを終了しています" + self.APPLICATION_CLOSED = "アプリケーションが終了しました" + self.SELECT_QUANTIZATION_TYPE = "量子化タイプを選択してください" + self.ALLOWS_REQUANTIZING = "すでに量子化されているテンソルの再量子化を許可します" + self.LEAVE_OUTPUT_WEIGHT = "output.weightは(再)量子化されません" + self.DISABLE_K_QUANT_MIXTURES = "k-quant混合を無効にし、すべてのテンソルを同じタイプに量子化します" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "量子化最適化の重要度マトリックスとしてファイル内のデータを使用します" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "これらのテンソルに重要度マトリックスを使用します" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "これらのテンソルに重要度マトリックスを使用しません" + self.OUTPUT_TENSOR_TYPE = "出力テンソルタイプ:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weightテンソルにこのタイプを使用します" + self.TOKEN_EMBEDDING_TYPE = "トークン埋め込みタイプ:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "トークン埋め込みテンソルにこのタイプを使用します" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "入力と同じシャードで量子化されたモデルを生成します" + self.OVERRIDE_MODEL_METADATA = "モデルメタデータを上書きする" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix生成用の入力データファイル" + self.MODEL_TO_BE_QUANTIZED = "量子化されるモデル" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "生成されたIMatrixの出力パス" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrixを保存する頻度" + self.SET_GPU_OFFLOAD_VALUE = "GPUオフロード値を設定 (-ngl)" + self.COMPLETED = "完了しました" + +class _German(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (automatisierter GGUF-Modellquantisierer)" + self.RAM_USAGE = "RAM-Nutzung:" + self.CPU_USAGE = "CPU-Auslastung:" + self.BACKEND = "Llama.cpp-Backend:" + self.REFRESH_BACKENDS = "Backends aktualisieren" + self.MODELS_PATH = "Modelle Pfad:" + self.OUTPUT_PATH = "Ausgabepfad:" + self.LOGS_PATH = "Log-Pfad:" + self.BROWSE = "Durchsuchen" + self.AVAILABLE_MODELS = "Verfügbare Modelle:" + self.QUANTIZATION_TYPE = "Quantisierungstyp:" + self.ALLOW_REQUANTIZE = "Requantisierung zulassen" + self.LEAVE_OUTPUT_TENSOR = "Ausgabetensor belassen" + self.PURE = "Rein" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Gewichte einschließen:" + self.EXCLUDE_WEIGHTS = "Gewichte ausschließen:" + self.USE_OUTPUT_TENSOR_TYPE = "Ausgabetensortyp verwenden" + self.USE_TOKEN_EMBEDDING_TYPE = "Token-Einbettungstyp verwenden" + self.KEEP_SPLIT = "Aufteilung beibehalten" + self.KV_OVERRIDES = "KV-Überschreibungen:" + self.ADD_NEW_OVERRIDE = "Neue Überschreibung hinzufügen" + self.QUANTIZE_MODEL = "Modell quantisieren" + self.SAVE_PRESET = "Preset speichern" + self.LOAD_PRESET = "Preset laden" + self.TASKS = "Aufgaben:" + self.DOWNLOAD_LLAMACPP = "llama.cpp herunterladen" + self.SELECT_RELEASE = "Release auswählen:" + self.SELECT_ASSET = "Asset auswählen:" + self.EXTRACT_CUDA_FILES = "CUDA-Dateien extrahieren" + self.SELECT_CUDA_BACKEND = "CUDA-Backend auswählen:" + self.DOWNLOAD = "Herunterladen" + self.IMATRIX_GENERATION = "IMatrix-Generierung" + self.DATA_FILE = "Datendatei:" + self.MODEL = "Modell:" + self.OUTPUT = "Ausgabe:" + self.OUTPUT_FREQUENCY = "Ausgabefrequenz:" + self.GPU_OFFLOAD = "GPU-Offload:" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "IMatrix generieren" + self.ERROR = "Fehler" + self.WARNING = "Warnung" + self.PROPERTIES = "Eigenschaften" + self.CANCEL = "Abbrechen" + self.RESTART = "Neustart" + self.DELETE = "Löschen" + self.CONFIRM_DELETION = "Sind Sie sicher, dass Sie diese Aufgabe löschen möchten?" + self.TASK_RUNNING_WARNING = "Einige Aufgaben laufen noch. Möchten Sie wirklich beenden?" + self.YES = "Ja" + self.NO = "Nein" + self.DOWNLOAD_COMPLETE = "Download abgeschlossen" + self.CUDA_EXTRACTION_FAILED = "CUDA-Extraktion fehlgeschlagen" + self.PRESET_SAVED = "Preset gespeichert" + self.PRESET_LOADED = "Preset geladen" + self.NO_ASSET_SELECTED = "Kein Asset ausgewählt" + self.DOWNLOAD_FAILED = "Download fehlgeschlagen" + self.NO_BACKEND_SELECTED = "Kein Backend ausgewählt" + self.NO_MODEL_SELECTED = "Kein Modell ausgewählt" + self.REFRESH_RELEASES = "Releases aktualisieren" + self.NO_SUITABLE_CUDA_BACKENDS = "Keine geeigneten CUDA-Backends gefunden" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}\nCUDA-Dateien extrahiert nach {1}" + self.CUDA_FILES_EXTRACTED = "CUDA-Dateien extrahiert nach" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Kein geeignetes CUDA-Backend für die Extraktion gefunden" + self.ERROR_FETCHING_RELEASES = "Fehler beim Abrufen der Releases: {0}" + self.CONFIRM_DELETION_TITLE = "Löschen bestätigen" + self.LOG_FOR = "Log für {0}" + self.ALL_FILES = "Alle Dateien (*)" + self.GGUF_FILES = "GGUF-Dateien (*.gguf)" + self.DAT_FILES = "DAT-Dateien (*.dat)" + self.JSON_FILES = "JSON-Dateien (*.json)" + self.FAILED_LOAD_PRESET = "Preset konnte nicht geladen werden: {0}" + self.INITIALIZING_AUTOGGUF = "AutoGGUF-Anwendung wird initialisiert" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-Initialisierung abgeschlossen" + self.REFRESHING_BACKENDS = "Backends werden aktualisiert" + self.NO_BACKENDS_AVAILABLE = "Keine Backends verfügbar" + self.FOUND_VALID_BACKENDS = "{0} gültige Backends gefunden" + self.SAVING_PRESET = "Preset wird gespeichert" + self.PRESET_SAVED_TO = "Preset gespeichert unter {0}" + self.LOADING_PRESET = "Preset wird geladen" + self.PRESET_LOADED_FROM = "Preset von {0} geladen" + self.ADDING_KV_OVERRIDE = "KV-Überschreibung wird hinzugefügt: {0}" + self.SAVING_TASK_PRESET = "Task-Preset für {0} wird gespeichert" + self.TASK_PRESET_SAVED = "Task-Preset gespeichert" + self.TASK_PRESET_SAVED_TO = "Task-Preset gespeichert unter {0}" + self.RESTARTING_TASK = "Aufgabe wird neu gestartet: {0}" + self.IN_PROGRESS = "In Bearbeitung" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download abgeschlossen. Extrahiert nach: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}\nCUDA-Dateien extrahiert nach {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Kein geeignetes CUDA-Backend für die Extraktion gefunden" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}" + self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp-Releases werden aktualisiert" + self.UPDATING_ASSET_LIST = "Asset-Liste wird aktualisiert" + self.UPDATING_CUDA_OPTIONS = "CUDA-Optionen werden aktualisiert" + self.STARTING_LLAMACPP_DOWNLOAD = "Download von llama.cpp wird gestartet" + self.UPDATING_CUDA_BACKENDS = "CUDA-Backends werden aktualisiert" + self.NO_CUDA_BACKEND_SELECTED = "Kein CUDA-Backend für die Extraktion ausgewählt" + self.EXTRACTING_CUDA_FILES = "CUDA-Dateien werden von {0} nach {1} extrahiert" + self.DOWNLOAD_ERROR = "Download-Fehler: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Kontextmenü der Aufgabe wird angezeigt" + self.SHOWING_PROPERTIES_FOR_TASK = "Eigenschaften für Aufgabe werden angezeigt: {0}" + self.CANCELLING_TASK = "Aufgabe wird abgebrochen: {0}" + self.CANCELED = "Abgebrochen" + self.DELETING_TASK = "Aufgabe wird gelöscht: {0}" + self.LOADING_MODELS = "Modelle werden geladen" + self.LOADED_MODELS = "{0} Modelle geladen" + self.BROWSING_FOR_MODELS_DIRECTORY = "Modelle-Verzeichnis wird durchsucht" + self.SELECT_MODELS_DIRECTORY = "Modelle-Verzeichnis auswählen" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Ausgabeverzeichnis wird durchsucht" + self.SELECT_OUTPUT_DIRECTORY = "Ausgabeverzeichnis auswählen" + self.BROWSING_FOR_LOGS_DIRECTORY = "Log-Verzeichnis wird durchsucht" + self.SELECT_LOGS_DIRECTORY = "Log-Verzeichnis auswählen" + self.BROWSING_FOR_IMATRIX_FILE = "IMatrix-Datei wird durchsucht" + self.SELECT_IMATRIX_FILE = "IMatrix-Datei auswählen" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU-Auslastung: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Quantisierungseingaben werden validiert" + self.MODELS_PATH_REQUIRED = "Modelle-Pfad ist erforderlich" + self.OUTPUT_PATH_REQUIRED = "Ausgabepfad ist erforderlich" + self.LOGS_PATH_REQUIRED = "Log-Pfad ist erforderlich" + self.STARTING_MODEL_QUANTIZATION = "Modellquantisierung wird gestartet" + self.INPUT_FILE_NOT_EXIST = "Die Eingabedatei '{0}' existiert nicht." + self.QUANTIZING_MODEL_TO = "Quantisierung von {0} zu {1}" + self.QUANTIZATION_TASK_STARTED = "Quantisierungsaufgabe für {0} gestartet" + self.ERROR_STARTING_QUANTIZATION = "Fehler beim Starten der Quantisierung: {0}" + self.UPDATING_MODEL_INFO = "Modellinformationen werden aktualisiert: {0}" + self.TASK_FINISHED = "Aufgabe abgeschlossen: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Aufgabendetails werden angezeigt für: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix-Datendatei wird durchsucht" + self.SELECT_DATA_FILE = "Datendatei auswählen" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix-Modelldatei wird durchsucht" + self.SELECT_MODEL_FILE = "Modelldatei auswählen" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix-Ausgabedatei wird durchsucht" + self.SELECT_OUTPUT_FILE = "Ausgabedatei auswählen" + self.STARTING_IMATRIX_GENERATION = "IMatrix-Generierung wird gestartet" + self.BACKEND_PATH_NOT_EXIST = "Backend-Pfad existiert nicht: {0}" + self.GENERATING_IMATRIX = "IMatrix wird generiert" + self.ERROR_STARTING_IMATRIX_GENERATION = "Fehler beim Starten der IMatrix-Generierung: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-Generierungsaufgabe gestartet" + self.ERROR_MESSAGE = "Fehler: {0}" + self.TASK_ERROR = "Aufgabenfehler: {0}" + self.APPLICATION_CLOSING = "Anwendung wird geschlossen" + self.APPLICATION_CLOSED = "Anwendung geschlossen" + self.SELECT_QUANTIZATION_TYPE = "Wählen Sie den Quantisierungstyp aus" + self.ALLOWS_REQUANTIZING = "Ermöglicht die Requantisierung von Tensoren, die bereits quantisiert wurden" + self.LEAVE_OUTPUT_WEIGHT = "Lässt output.weight nicht (re)quantisiert" + self.DISABLE_K_QUANT_MIXTURES = "Deaktivieren Sie k-Quant-Mischungen und quantisieren Sie alle Tensoren auf denselben Typ" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Verwenden Sie Daten in der Datei als Wichtigkeitsmatrix für Quant-Optimierungen" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Verwenden Sie die Wichtigkeitsmatrix für diese Tensoren" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Verwenden Sie die Wichtigkeitsmatrix nicht für diese Tensoren" + self.OUTPUT_TENSOR_TYPE = "Ausgabetensortyp:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Verwenden Sie diesen Typ für den output.weight-Tensor" + self.TOKEN_EMBEDDING_TYPE = "Token-Einbettungstyp:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Verwenden Sie diesen Typ für den Token-Einbettungstensor" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Generiert ein quantisiertes Modell in denselben Shards wie die Eingabe" + self.OVERRIDE_MODEL_METADATA = "Modellmetadaten überschreiben" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Eingabedatendatei für die IMatrix-Generierung" + self.MODEL_TO_BE_QUANTIZED = "Zu quantisierendes Modell" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Ausgabepfad für die generierte IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Wie oft die IMatrix gespeichert werden soll" + self.SET_GPU_OFFLOAD_VALUE = "GPU-Offload-Wert festlegen (-ngl)" + self.COMPLETED = "Abgeschlossen" + +class _Portuguese(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Quantizador Automático de Modelos GGUF)" + self.RAM_USAGE = "Uso de RAM:" + self.CPU_USAGE = "Uso da CPU:" + self.BACKEND = "Backend do Llama.cpp:" + self.REFRESH_BACKENDS = "Atualizar Backends" + self.MODELS_PATH = "Caminho dos Modelos:" + self.OUTPUT_PATH = "Caminho de Saída:" + self.LOGS_PATH = "Caminho dos Logs:" + self.BROWSE = "Navegar" + self.AVAILABLE_MODELS = "Modelos Disponíveis:" + self.QUANTIZATION_TYPE = "Tipo de Quantização:" + self.ALLOW_REQUANTIZE = "Permitir Requantização" + self.LEAVE_OUTPUT_TENSOR = "Manter Tensor de Saída" + self.PURE = "Puro" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Incluir Pesos:" + self.EXCLUDE_WEIGHTS = "Excluir Pesos:" + self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Saída" + self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incorporação de Token" + self.KEEP_SPLIT = "Manter Divisão" + self.KV_OVERRIDES = "Substituições KV:" + self.ADD_NEW_OVERRIDE = "Adicionar Nova Substituição" + self.QUANTIZE_MODEL = "Quantizar Modelo" + self.SAVE_PRESET = "Salvar Predefinição" + self.LOAD_PRESET = "Carregar Predefinição" + self.TASKS = "Tarefas:" + self.DOWNLOAD_LLAMACPP = "Baixar llama.cpp" + self.SELECT_RELEASE = "Selecionar Versão:" + self.SELECT_ASSET = "Selecionar Ativo:" + self.EXTRACT_CUDA_FILES = "Extrair Arquivos CUDA" + self.SELECT_CUDA_BACKEND = "Selecionar Backend CUDA:" + self.DOWNLOAD = "Baixar" + self.IMATRIX_GENERATION = "Geração de IMatrix" + self.DATA_FILE = "Arquivo de Dados:" + self.MODEL = "Modelo:" + self.OUTPUT = "Saída:" + self.OUTPUT_FREQUENCY = "Frequência de Saída:" + self.GPU_OFFLOAD = "Offload da GPU:" + self.AUTO = "Automático" + self.GENERATE_IMATRIX = "Gerar IMatrix" + self.ERROR = "Erro" + self.WARNING = "Aviso" + self.PROPERTIES = "Propriedades" + self.CANCEL = "Cancelar" + self.RESTART = "Reiniciar" + self.DELETE = "Excluir" + self.CONFIRM_DELETION = "Tem certeza de que deseja excluir esta tarefa?" + self.TASK_RUNNING_WARNING = "Algumas tarefas ainda estão em execução. Tem certeza de que deseja sair?" + self.YES = "Sim" + self.NO = "Não" + self.DOWNLOAD_COMPLETE = "Download Concluído" + self.CUDA_EXTRACTION_FAILED = "Falha na Extração do CUDA" + self.PRESET_SAVED = "Predefinição Salva" + self.PRESET_LOADED = "Predefinição Carregada" + self.NO_ASSET_SELECTED = "Nenhum ativo selecionado" + self.DOWNLOAD_FAILED = "Falha no download" + self.NO_BACKEND_SELECTED = "Nenhum backend selecionado" + self.NO_MODEL_SELECTED = "Nenhum modelo selecionado" + self.REFRESH_RELEASES = "Atualizar Versões" + self.NO_SUITABLE_CUDA_BACKENDS = "Nenhum backend CUDA adequado encontrado" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}\nArquivos CUDA extraídos para {1}" + self.CUDA_FILES_EXTRACTED = "Arquivos CUDA extraídos para" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nenhum backend CUDA adequado encontrado para extração" + self.ERROR_FETCHING_RELEASES = "Erro ao buscar versões: {0}" + self.CONFIRM_DELETION_TITLE = "Confirmar Exclusão" + self.LOG_FOR = "Log para {0}" + self.ALL_FILES = "Todos os Arquivos (*)" + self.GGUF_FILES = "Arquivos GGUF (*.gguf)" + self.DAT_FILES = "Arquivos DAT (*.dat)" + self.JSON_FILES = "Arquivos JSON (*.json)" + self.FAILED_LOAD_PRESET = "Falha ao carregar a predefinição: {0}" + self.INITIALIZING_AUTOGGUF = "Inicializando o aplicativo AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialização do AutoGGUF concluída" + self.REFRESHING_BACKENDS = "Atualizando backends" + self.NO_BACKENDS_AVAILABLE = "Nenhum backend disponível" + self.FOUND_VALID_BACKENDS = "{0} backends válidos encontrados" + self.SAVING_PRESET = "Salvando predefinição" + self.PRESET_SAVED_TO = "Predefinição salva em {0}" + self.LOADING_PRESET = "Carregando predefinição" + self.PRESET_LOADED_FROM = "Predefinição carregada de {0}" + self.ADDING_KV_OVERRIDE = "Adicionando substituição KV: {0}" + self.SAVING_TASK_PRESET = "Salvando predefinição de tarefa para {0}" + self.TASK_PRESET_SAVED = "Predefinição de Tarefa Salva" + self.TASK_PRESET_SAVED_TO = "Predefinição de tarefa salva em {0}" + self.RESTARTING_TASK = "Reiniciando tarefa: {0}" + self.IN_PROGRESS = "Em Andamento" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download concluído. Extraído para: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}\nArquivos CUDA extraídos para {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nenhum backend CUDA adequado encontrado para extração" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Atualizando versões do llama.cpp" + self.UPDATING_ASSET_LIST = "Atualizando lista de ativos" + self.UPDATING_CUDA_OPTIONS = "Atualizando opções CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Iniciando download do llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Atualizando backends CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Nenhum backend CUDA selecionado para extração" + self.EXTRACTING_CUDA_FILES = "Extraindo arquivos CUDA de {0} para {1}" + self.DOWNLOAD_ERROR = "Erro de download: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Exibindo menu de contexto da tarefa" + self.SHOWING_PROPERTIES_FOR_TASK = "Exibindo propriedades para a tarefa: {0}" + self.CANCELLING_TASK = "Cancelando tarefa: {0}" + self.CANCELED = "Cancelado" + self.DELETING_TASK = "Excluindo tarefa: {0}" + self.LOADING_MODELS = "Carregando modelos" + self.LOADED_MODELS = "{0} modelos carregados" + self.BROWSING_FOR_MODELS_DIRECTORY = "Navegando pelo diretório de modelos" + self.SELECT_MODELS_DIRECTORY = "Selecionar Diretório de Modelos" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Navegando pelo diretório de saída" + self.SELECT_OUTPUT_DIRECTORY = "Selecionar Diretório de Saída" + self.BROWSING_FOR_LOGS_DIRECTORY = "Navegando pelo diretório de logs" + self.SELECT_LOGS_DIRECTORY = "Selecionar Diretório de Logs" + self.BROWSING_FOR_IMATRIX_FILE = "Navegando pelo arquivo IMatrix" + self.SELECT_IMATRIX_FILE = "Selecionar Arquivo IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Uso da CPU: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validando entradas de quantização" + self.MODELS_PATH_REQUIRED = "O caminho dos modelos é obrigatório" + self.OUTPUT_PATH_REQUIRED = "O caminho de saída é obrigatório" + self.LOGS_PATH_REQUIRED = "O caminho dos logs é obrigatório" + self.STARTING_MODEL_QUANTIZATION = "Iniciando a quantização do modelo" + self.INPUT_FILE_NOT_EXIST = "O arquivo de entrada '{0}' não existe." + self.QUANTIZING_MODEL_TO = "Quantizando {0} para {1}" + self.QUANTIZATION_TASK_STARTED = "Tarefa de quantização iniciada para {0}" + self.ERROR_STARTING_QUANTIZATION = "Erro ao iniciar a quantização: {0}" + self.UPDATING_MODEL_INFO = "Atualizando informações do modelo: {0}" + self.TASK_FINISHED = "Tarefa concluída: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Mostrando detalhes da tarefa para: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Navegando pelo arquivo de dados IMatrix" + self.SELECT_DATA_FILE = "Selecionar Arquivo de Dados" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Navegando pelo arquivo de modelo IMatrix" + self.SELECT_MODEL_FILE = "Selecionar Arquivo de Modelo" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Navegando pelo arquivo de saída IMatrix" + self.SELECT_OUTPUT_FILE = "Selecionar Arquivo de Saída" + self.STARTING_IMATRIX_GENERATION = "Iniciando a geração de IMatrix" + self.BACKEND_PATH_NOT_EXIST = "O caminho do backend não existe: {0}" + self.GENERATING_IMATRIX = "Gerando IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Erro ao iniciar a geração de IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Tarefa de geração de IMatrix iniciada" + self.ERROR_MESSAGE = "Erro: {0}" + self.TASK_ERROR = "Erro de tarefa: {0}" + self.APPLICATION_CLOSING = "Fechando o aplicativo" + self.APPLICATION_CLOSED = "Aplicativo fechado" + self.SELECT_QUANTIZATION_TYPE = "Selecione o tipo de quantização" + self.ALLOWS_REQUANTIZING = "Permite requantizar tensores que já foram quantizados" + self.LEAVE_OUTPUT_WEIGHT = "Deixará output.weight não (re)quantizado" + self.DISABLE_K_QUANT_MIXTURES = "Desabilitar misturas k-quant e quantizar todos os tensores para o mesmo tipo" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usar os dados no arquivo como matriz de importância para otimizações de quantização" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usar matriz de importância para estes tensores" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Não usar matriz de importância para estes tensores" + self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Saída:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usar este tipo para o tensor output.weight" + self.TOKEN_EMBEDDING_TYPE = "Tipo de Incorporação de Token:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usar este tipo para o tensor de incorporações de token" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Irá gerar o modelo quantizado nos mesmos shards da entrada" + self.OVERRIDE_MODEL_METADATA = "Substituir metadados do modelo" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Arquivo de dados de entrada para geração de IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Modelo a ser quantizado" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Caminho de saída para o IMatrix gerado" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Com que frequência salvar o IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Definir valor de offload da GPU (-ngl)" + self.COMPLETED = "Concluído" + +class _Arabic(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (مُكَمِّم نماذج GGUF التلقائي)" + self.RAM_USAGE = "استخدام ذاكرة الوصول العشوائي:" + self.CPU_USAGE = "استخدام وحدة المعالجة المركزية:" + self.BACKEND = "الخلفية Llama.cpp:" + self.REFRESH_BACKENDS = "تحديث الخلفيات" + self.MODELS_PATH = "مسار النماذج:" + self.OUTPUT_PATH = "مسار الإخراج:" + self.LOGS_PATH = "مسار السجلات:" + self.BROWSE = "استعراض" + self.AVAILABLE_MODELS = "النماذج المتاحة:" + self.QUANTIZATION_TYPE = "نوع التكميم:" + self.ALLOW_REQUANTIZE = "السماح بإعادة التكميم" + self.LEAVE_OUTPUT_TENSOR = "ترك موتر الإخراج" + self.PURE = "نقي" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "تضمين الأوزان:" + self.EXCLUDE_WEIGHTS = "استبعاد الأوزان:" + self.USE_OUTPUT_TENSOR_TYPE = "استخدام نوع موتر الإخراج" + self.USE_TOKEN_EMBEDDING_TYPE = "استخدام نوع تضمين الرمز المميز" + self.KEEP_SPLIT = "الحفاظ على التقسيم" + self.KV_OVERRIDES = "تجاوزات KV:" + self.ADD_NEW_OVERRIDE = "إضافة تجاوز جديد" + self.QUANTIZE_MODEL = "تكميم النموذج" + self.SAVE_PRESET = "حفظ الإعداد المسبق" + self.LOAD_PRESET = "تحميل الإعداد المسبق" + self.TASKS = "المهام:" + self.DOWNLOAD_LLAMACPP = "تنزيل llama.cpp" + self.SELECT_RELEASE = "تحديد الإصدار:" + self.SELECT_ASSET = "تحديد الأصل:" + self.EXTRACT_CUDA_FILES = "استخراج ملفات CUDA" + self.SELECT_CUDA_BACKEND = "تحديد خلفية CUDA:" + self.DOWNLOAD = "تنزيل" + self.IMATRIX_GENERATION = "توليد IMatrix" + self.DATA_FILE = "ملف البيانات:" + self.MODEL = "النموذج:" + self.OUTPUT = "الإخراج:" + self.OUTPUT_FREQUENCY = "تردد الإخراج:" + self.GPU_OFFLOAD = "تفريغ GPU:" + self.AUTO = "تلقائي" + self.GENERATE_IMATRIX = "توليد IMatrix" + self.ERROR = "خطأ" + self.WARNING = "تحذير" + self.PROPERTIES = "الخصائص" + self.CANCEL = "إلغاء" + self.RESTART = "إعادة تشغيل" + self.DELETE = "حذف" + self.CONFIRM_DELETION = "هل أنت متأكد أنك تريد حذف هذه المهمة؟" + self.TASK_RUNNING_WARNING = "لا تزال بعض المهام قيد التشغيل. هل أنت متأكد أنك تريد الإنهاء؟" + self.YES = "نعم" + self.NO = "لا" + self.DOWNLOAD_COMPLETE = "اكتمل التنزيل" + self.CUDA_EXTRACTION_FAILED = "فشل استخراج CUDA" + self.PRESET_SAVED = "تم حفظ الإعداد المسبق" + self.PRESET_LOADED = "تم تحميل الإعداد المسبق" + self.NO_ASSET_SELECTED = "لم يتم تحديد أصل" + self.DOWNLOAD_FAILED = "فشل التنزيل" + self.NO_BACKEND_SELECTED = "لم يتم تحديد خلفية" + self.NO_MODEL_SELECTED = "لم يتم تحديد نموذج" + self.REFRESH_RELEASES = "تحديث الإصدارات" + self.NO_SUITABLE_CUDA_BACKENDS = "لم يتم العثور على خلفيات CUDA مناسبة" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}\nتم استخراج ملفات CUDA إلى {1}" + self.CUDA_FILES_EXTRACTED = "تم استخراج ملفات CUDA إلى" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "لم يتم العثور على خلفية CUDA مناسبة للاستخراج" + self.ERROR_FETCHING_RELEASES = "خطأ في جلب الإصدارات: {0}" + self.CONFIRM_DELETION_TITLE = "تأكيد الحذف" + self.LOG_FOR = "سجل لـ {0}" + self.ALL_FILES = "جميع الملفات (*)" + self.GGUF_FILES = "ملفات GGUF (*.gguf)" + self.DAT_FILES = "ملفات DAT (*.dat)" + self.JSON_FILES = "ملفات JSON (*.json)" + self.FAILED_LOAD_PRESET = "فشل تحميل الإعداد المسبق: {0}" + self.INITIALIZING_AUTOGGUF = "تهيئة تطبيق AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "اكتملت تهيئة AutoGGUF" + self.REFRESHING_BACKENDS = "تحديث الخلفيات" + self.NO_BACKENDS_AVAILABLE = "لا توجد خلفيات متاحة" + self.FOUND_VALID_BACKENDS = "تم العثور على {0} خلفيات صالحة" + self.SAVING_PRESET = "حفظ الإعداد المسبق" + self.PRESET_SAVED_TO = "تم حفظ الإعداد المسبق إلى {0}" + self.LOADING_PRESET = "تحميل الإعداد المسبق" + self.PRESET_LOADED_FROM = "تم تحميل الإعداد المسبق من {0}" + self.ADDING_KV_OVERRIDE = "إضافة تجاوز KV: {0}" + self.SAVING_TASK_PRESET = "حفظ الإعداد المسبق للمهمة لـ {0}" + self.TASK_PRESET_SAVED = "تم حفظ الإعداد المسبق للمهمة" + self.TASK_PRESET_SAVED_TO = "تم حفظ الإعداد المسبق للمهمة إلى {0}" + self.RESTARTING_TASK = "إعادة تشغيل المهمة: {0}" + self.IN_PROGRESS = "قيد التقدم" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "اكتمل التنزيل. تم الاستخراج إلى: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}\nتم استخراج ملفات CUDA إلى {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "لم يتم العثور على خلفية CUDA مناسبة للاستخراج" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}" + self.REFRESHING_LLAMACPP_RELEASES = "تحديث إصدارات llama.cpp" + self.UPDATING_ASSET_LIST = "تحديث قائمة الأصول" + self.UPDATING_CUDA_OPTIONS = "تحديث خيارات CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "بدء تنزيل llama.cpp" + self.UPDATING_CUDA_BACKENDS = "تحديث خلفيات CUDA" + self.NO_CUDA_BACKEND_SELECTED = "لم يتم تحديد خلفية CUDA للاستخراج" + self.EXTRACTING_CUDA_FILES = "استخراج ملفات CUDA من {0} إلى {1}" + self.DOWNLOAD_ERROR = "خطأ في التنزيل: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "إظهار قائمة سياق المهمة" + self.SHOWING_PROPERTIES_FOR_TASK = "إظهار خصائص المهمة: {0}" + self.CANCELLING_TASK = "إلغاء المهمة: {0}" + self.CANCELED = "تم الإلغاء" + self.DELETING_TASK = "حذف المهمة: {0}" + self.LOADING_MODELS = "تحميل النماذج" + self.LOADED_MODELS = "تم تحميل {0} نماذج" + self.BROWSING_FOR_MODELS_DIRECTORY = "استعراض دليل النماذج" + self.SELECT_MODELS_DIRECTORY = "حدد دليل النماذج" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "استعراض دليل الإخراج" + self.SELECT_OUTPUT_DIRECTORY = "حدد دليل الإخراج" + self.BROWSING_FOR_LOGS_DIRECTORY = "استعراض دليل السجلات" + self.SELECT_LOGS_DIRECTORY = "حدد دليل السجلات" + self.BROWSING_FOR_IMATRIX_FILE = "استعراض ملف IMatrix" + self.SELECT_IMATRIX_FILE = "حدد ملف IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} ميغابايت / {2} ميغابايت)" + self.CPU_USAGE_FORMAT = "استخدام وحدة المعالجة المركزية: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "التحقق من صحة مدخلات التكميم" + self.MODELS_PATH_REQUIRED = "مسار النماذج مطلوب" + self.OUTPUT_PATH_REQUIRED = "مسار الإخراج مطلوب" + self.LOGS_PATH_REQUIRED = "مسار السجلات مطلوب" + self.STARTING_MODEL_QUANTIZATION = "بدء تكميم النموذج" + self.INPUT_FILE_NOT_EXIST = "ملف الإدخال '{0}' غير موجود." + self.QUANTIZING_MODEL_TO = "تكميم {0} إلى {1}" + self.QUANTIZATION_TASK_STARTED = "بدأت مهمة التكميم لـ {0}" + self.ERROR_STARTING_QUANTIZATION = "خطأ في بدء التكميم: {0}" + self.UPDATING_MODEL_INFO = "تحديث معلومات النموذج: {0}" + self.TASK_FINISHED = "انتهت المهمة: {0}" + self.SHOWING_TASK_DETAILS_FOR = "إظهار تفاصيل المهمة لـ: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "استعراض ملف بيانات IMatrix" + self.SELECT_DATA_FILE = "حدد ملف البيانات" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "استعراض ملف نموذج IMatrix" + self.SELECT_MODEL_FILE = "حدد ملف النموذج" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "استعراض ملف إخراج IMatrix" + self.SELECT_OUTPUT_FILE = "حدد ملف الإخراج" + self.STARTING_IMATRIX_GENERATION = "بدء توليد IMatrix" + self.BACKEND_PATH_NOT_EXIST = "مسار الخلفية غير موجود: {0}" + self.GENERATING_IMATRIX = "توليد IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "خطأ في بدء توليد IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "بدأت مهمة توليد IMatrix" + self.ERROR_MESSAGE = "خطأ: {0}" + self.TASK_ERROR = "خطأ في المهمة: {0}" + self.APPLICATION_CLOSING = "إغلاق التطبيق" + self.APPLICATION_CLOSED = "تم إغلاق التطبيق" + self.SELECT_QUANTIZATION_TYPE = "حدد نوع التكميم" + self.ALLOWS_REQUANTIZING = "يسمح بإعادة تكميم الموترات التي تم تكميمها بالفعل" + self.LEAVE_OUTPUT_WEIGHT = "سيترك output.weight غير مُكَمَّم (أو مُعاد تكميمه)" + self.DISABLE_K_QUANT_MIXTURES = "تعطيل خلطات k-quant وتكميم جميع الموترات إلى نفس النوع" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "استخدام البيانات في الملف كمصفوفة أهمية لتحسينات التكميم" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "استخدام مصفوفة الأهمية لهذه الموترات" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "عدم استخدام مصفوفة الأهمية لهذه الموترات" + self.OUTPUT_TENSOR_TYPE = "نوع موتر الإخراج:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "استخدم هذا النوع لموتر output.weight" + self.TOKEN_EMBEDDING_TYPE = "نوع تضمين الرمز المميز:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "استخدم هذا النوع لموتر تضمينات الرمز المميز" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "سيولد نموذجًا مُكَمَّمًا في نفس شظايا الإدخال" + self.OVERRIDE_MODEL_METADATA = "تجاوز بيانات تعريف النموذج" + self.INPUT_DATA_FILE_FOR_IMATRIX = "ملف بيانات الإدخال لتوليد IMatrix" + self.MODEL_TO_BE_QUANTIZED = "النموذج المراد تكميمه" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "مسار الإخراج لـ IMatrix الذي تم إنشاؤه" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "عدد مرات حفظ IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "تعيين قيمة تفريغ GPU (-ngl)" + self.COMPLETED = "مكتمل" + +class _Korean(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (자동 GGUF 모델 양자화기)" + self.RAM_USAGE = "RAM 사용량:" + self.CPU_USAGE = "CPU 사용량:" + self.BACKEND = "Llama.cpp 백엔드:" + self.REFRESH_BACKENDS = "백엔드 새로 고침" + self.MODELS_PATH = "모델 경로:" + self.OUTPUT_PATH = "출력 경로:" + self.LOGS_PATH = "로그 경로:" + self.BROWSE = "찾아보기" + self.AVAILABLE_MODELS = "사용 가능한 모델:" + self.QUANTIZATION_TYPE = "양자화 유형:" + self.ALLOW_REQUANTIZE = "재양자화 허용" + self.LEAVE_OUTPUT_TENSOR = "출력 텐서 유지" + self.PURE = "순수" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "가중치 포함:" + self.EXCLUDE_WEIGHTS = "가중치 제외:" + self.USE_OUTPUT_TENSOR_TYPE = "출력 텐서 유형 사용" + self.USE_TOKEN_EMBEDDING_TYPE = "토큰 임베딩 유형 사용" + self.KEEP_SPLIT = "분할 유지" + self.KV_OVERRIDES = "KV 재정의:" + self.ADD_NEW_OVERRIDE = "새 재정의 추가" + self.QUANTIZE_MODEL = "모델 양자화" + self.SAVE_PRESET = "프리셋 저장" + self.LOAD_PRESET = "프리셋 로드" + self.TASKS = "작업:" + self.DOWNLOAD_LLAMACPP = "llama.cpp 다운로드" + self.SELECT_RELEASE = "릴리스 선택:" + self.SELECT_ASSET = "자산 선택:" + self.EXTRACT_CUDA_FILES = "CUDA 파일 추출" + self.SELECT_CUDA_BACKEND = "CUDA 백엔드 선택:" + self.DOWNLOAD = "다운로드" + self.IMATRIX_GENERATION = "IMatrix 생성" + self.DATA_FILE = "데이터 파일:" + self.MODEL = "모델:" + self.OUTPUT = "출력:" + self.OUTPUT_FREQUENCY = "출력 빈도:" + self.GPU_OFFLOAD = "GPU 오프로드:" + self.AUTO = "자동" + self.GENERATE_IMATRIX = "IMatrix 생성" + self.ERROR = "오류" + self.WARNING = "경고" + self.PROPERTIES = "속성" + self.CANCEL = "취소" + self.RESTART = "다시 시작" + self.DELETE = "삭제" + self.CONFIRM_DELETION = "이 작업을 삭제하시겠습니까?" + self.TASK_RUNNING_WARNING = "일부 작업이 아직 실행 중입니다. 종료하시겠습니까?" + self.YES = "예" + self.NO = "아니요" + self.DOWNLOAD_COMPLETE = "다운로드 완료" + self.CUDA_EXTRACTION_FAILED = "CUDA 추출 실패" + self.PRESET_SAVED = "프리셋 저장됨" + self.PRESET_LOADED = "프리셋 로드됨" + self.NO_ASSET_SELECTED = "자산이 선택되지 않았습니다" + self.DOWNLOAD_FAILED = "다운로드 실패" + self.NO_BACKEND_SELECTED = "백엔드가 선택되지 않았습니다" + self.NO_MODEL_SELECTED = "모델이 선택되지 않았습니다" + self.REFRESH_RELEASES = "릴리스 새로 고침" + self.NO_SUITABLE_CUDA_BACKENDS = "적합한 CUDA 백엔드를 찾을 수 없습니다" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다.\nCUDA 파일이 {1}에 추출되었습니다." + self.CUDA_FILES_EXTRACTED = "CUDA 파일이 에 추출되었습니다." + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "추출에 적합한 CUDA 백엔드를 찾을 수 없습니다." + self.ERROR_FETCHING_RELEASES = "릴리스를 가져오는 중 오류가 발생했습니다: {0}" + self.CONFIRM_DELETION_TITLE = "삭제 확인" + self.LOG_FOR = "{0}에 대한 로그" + self.ALL_FILES = "모든 파일 (*)" + self.GGUF_FILES = "GGUF 파일 (*.gguf)" + self.DAT_FILES = "DAT 파일 (*.dat)" + self.JSON_FILES = "JSON 파일 (*.json)" + self.FAILED_LOAD_PRESET = "프리셋을 로드하지 못했습니다: {0}" + self.INITIALIZING_AUTOGGUF = "AutoGGUF 애플리케이션을 초기화하는 중입니다." + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF 초기화가 완료되었습니다." + self.REFRESHING_BACKENDS = "백엔드를 새로 고치는 중입니다." + self.NO_BACKENDS_AVAILABLE = "사용 가능한 백엔드가 없습니다." + self.FOUND_VALID_BACKENDS = "{0}개의 유효한 백엔드를 찾았습니다." + self.SAVING_PRESET = "프리셋을 저장하는 중입니다." + self.PRESET_SAVED_TO = "프리셋이 {0}에 저장되었습니다." + self.LOADING_PRESET = "프리셋을 로드하는 중입니다." + self.PRESET_LOADED_FROM = "{0}에서 프리셋을 로드했습니다." + self.ADDING_KV_OVERRIDE = "KV 재정의를 추가하는 중입니다: {0}" + self.SAVING_TASK_PRESET = "{0}에 대한 작업 프리셋을 저장하는 중입니다." + self.TASK_PRESET_SAVED = "작업 프리셋이 저장되었습니다." + self.TASK_PRESET_SAVED_TO = "작업 프리셋이 {0}에 저장되었습니다." + self.RESTARTING_TASK = "작업을 다시 시작하는 중입니다: {0}" + self.IN_PROGRESS = "진행 중" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "다운로드가 완료되었습니다. 추출 위치: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다.\nCUDA 파일이 {1}에 추출되었습니다." + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "추출에 적합한 CUDA 백엔드를 찾을 수 없습니다." + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다." + self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp 릴리스를 새로 고치는 중입니다." + self.UPDATING_ASSET_LIST = "자산 목록을 업데이트하는 중입니다." + self.UPDATING_CUDA_OPTIONS = "CUDA 옵션을 업데이트하는 중입니다." + self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp 다운로드를 시작하는 중입니다." + self.UPDATING_CUDA_BACKENDS = "CUDA 백엔드를 업데이트하는 중입니다." + self.NO_CUDA_BACKEND_SELECTED = "추출에 CUDA 백엔드가 선택되지 않았습니다." + self.EXTRACTING_CUDA_FILES = "{0}에서 {1}로 CUDA 파일을 추출하는 중입니다." + self.DOWNLOAD_ERROR = "다운로드 오류: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "작업 컨텍스트 메뉴를 표시하는 중입니다." + self.SHOWING_PROPERTIES_FOR_TASK = "작업에 대한 속성을 표시하는 중입니다: {0}" + self.CANCELLING_TASK = "작업을 취소하는 중입니다: {0}" + self.CANCELED = "취소됨" + self.DELETING_TASK = "작업을 삭제하는 중입니다: {0}" + self.LOADING_MODELS = "모델을 로드하는 중입니다." + self.LOADED_MODELS = "{0}개의 모델이 로드되었습니다." + self.BROWSING_FOR_MODELS_DIRECTORY = "모델 디렉토리를 찾아보는 중입니다." + self.SELECT_MODELS_DIRECTORY = "모델 디렉토리 선택" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "출력 디렉토리를 찾아보는 중입니다." + self.SELECT_OUTPUT_DIRECTORY = "출력 디렉토리 선택" + self.BROWSING_FOR_LOGS_DIRECTORY = "로그 디렉토리를 찾아보는 중입니다." + self.SELECT_LOGS_DIRECTORY = "로그 디렉토리 선택" + self.BROWSING_FOR_IMATRIX_FILE = "IMatrix 파일을 찾아보는 중입니다." + self.SELECT_IMATRIX_FILE = "IMatrix 파일 선택" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU 사용량: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "양자화 입력을 검증하는 중입니다." + self.MODELS_PATH_REQUIRED = "모델 경로가 필요합니다." + self.OUTPUT_PATH_REQUIRED = "출력 경로가 필요합니다." + self.LOGS_PATH_REQUIRED = "로그 경로가 필요합니다." + self.STARTING_MODEL_QUANTIZATION = "모델 양자화를 시작하는 중입니다." + self.INPUT_FILE_NOT_EXIST = "입력 파일 '{0}'이 존재하지 않습니다." + self.QUANTIZING_MODEL_TO = "{0}을 {1}(으)로 양자화하는 중입니다." + self.QUANTIZATION_TASK_STARTED = "{0}에 대한 양자화 작업이 시작되었습니다." + self.ERROR_STARTING_QUANTIZATION = "양자화를 시작하는 중 오류가 발생했습니다: {0}" + self.UPDATING_MODEL_INFO = "모델 정보를 업데이트하는 중입니다: {0}" + self.TASK_FINISHED = "작업이 완료되었습니다: {0}" + self.SHOWING_TASK_DETAILS_FOR = "다음에 대한 작업 세부 정보를 표시하는 중입니다: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix 데이터 파일을 찾아보는 중입니다." + self.SELECT_DATA_FILE = "데이터 파일 선택" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix 모델 파일을 찾아보는 중입니다." + self.SELECT_MODEL_FILE = "모델 파일 선택" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix 출력 파일을 찾아보는 중입니다." + self.SELECT_OUTPUT_FILE = "출력 파일 선택" + self.STARTING_IMATRIX_GENERATION = "IMatrix 생성을 시작하는 중입니다." + self.BACKEND_PATH_NOT_EXIST = "백엔드 경로가 존재하지 않습니다: {0}" + self.GENERATING_IMATRIX = "IMatrix를 생성하는 중입니다." + self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix 생성을 시작하는 중 오류가 발생했습니다: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix 생성 작업이 시작되었습니다." + self.ERROR_MESSAGE = "오류: {0}" + self.TASK_ERROR = "작업 오류: {0}" + self.APPLICATION_CLOSING = "애플리케이션을 닫는 중입니다." + self.APPLICATION_CLOSED = "애플리케이션이 닫혔습니다." + self.SELECT_QUANTIZATION_TYPE = "양자화 유형을 선택하세요." + self.ALLOWS_REQUANTIZING = "이미 양자화된 텐서의 재양자화를 허용합니다." + self.LEAVE_OUTPUT_WEIGHT = "output.weight를 (재)양자화하지 않은 상태로 둡니다." + self.DISABLE_K_QUANT_MIXTURES = "k-양자 혼합을 비활성화하고 모든 텐서를 동일한 유형으로 양자화합니다." + self.USE_DATA_AS_IMPORTANCE_MATRIX = "양자 최적화를 위한 중요도 행렬로 파일의 데이터를 사용합니다." + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "이러한 텐서에 중요도 행렬을 사용합니다." + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "이러한 텐서에 중요도 행렬을 사용하지 않습니다." + self.OUTPUT_TENSOR_TYPE = "출력 텐서 유형:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight 텐서에 이 유형을 사용합니다." + self.TOKEN_EMBEDDING_TYPE = "토큰 임베딩 유형:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "토큰 임베딩 텐서에 이 유형을 사용합니다." + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "입력과 동일한 샤드에 양자화된 모델을 생성합니다." + self.OVERRIDE_MODEL_METADATA = "모델 메타데이터를 재정의합니다." + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix 생성을 위한 입력 데이터 파일" + self.MODEL_TO_BE_QUANTIZED = "양자화될 모델" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "생성된 IMatrix의 출력 경로" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix를 저장할 빈도" + self.SET_GPU_OFFLOAD_VALUE = "GPU 오프로드 값 설정 (-ngl)" + self.COMPLETED = "완료됨" + +class _Italian(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Quantizzatore Automatico di Modelli GGUF)" + self.RAM_USAGE = "Utilizzo RAM:" + self.CPU_USAGE = "Utilizzo CPU:" + self.BACKEND = "Backend Llama.cpp:" + self.REFRESH_BACKENDS = "Aggiorna Backend" + self.MODELS_PATH = "Percorso Modelli:" + self.OUTPUT_PATH = "Percorso Output:" + self.LOGS_PATH = "Percorso Log:" + self.BROWSE = "Sfoglia" + self.AVAILABLE_MODELS = "Modelli Disponibili:" + self.QUANTIZATION_TYPE = "Tipo di Quantizzazione:" + self.ALLOW_REQUANTIZE = "Consenti Riquantizzazione" + self.LEAVE_OUTPUT_TENSOR = "Lascia Tensore di Output" + self.PURE = "Puro" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Includi Pesi:" + self.EXCLUDE_WEIGHTS = "Escludi Pesi:" + self.USE_OUTPUT_TENSOR_TYPE = "Usa Tipo di Tensore di Output" + self.USE_TOKEN_EMBEDDING_TYPE = "Usa Tipo di Incorporamento Token" + self.KEEP_SPLIT = "Mantieni Divisione" + self.KV_OVERRIDES = "Override KV:" + self.ADD_NEW_OVERRIDE = "Aggiungi Nuovo Override" + self.QUANTIZE_MODEL = "Quantizza Modello" + self.SAVE_PRESET = "Salva Preimpostazione" + self.LOAD_PRESET = "Carica Preimpostazione" + self.TASKS = "Attività:" + self.DOWNLOAD_LLAMACPP = "Scarica llama.cpp" + self.SELECT_RELEASE = "Seleziona Versione:" + self.SELECT_ASSET = "Seleziona Asset:" + self.EXTRACT_CUDA_FILES = "Estrai File CUDA" + self.SELECT_CUDA_BACKEND = "Seleziona Backend CUDA:" + self.DOWNLOAD = "Scarica" + self.IMATRIX_GENERATION = "Generazione IMatrix" + self.DATA_FILE = "File Dati:" + self.MODEL = "Modello:" + self.OUTPUT = "Output:" + self.OUTPUT_FREQUENCY = "Frequenza di Output:" + self.GPU_OFFLOAD = "Offload GPU:" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Genera IMatrix" + self.ERROR = "Errore" + self.WARNING = "Avviso" + self.PROPERTIES = "Proprietà" + self.CANCEL = "Annulla" + self.RESTART = "Riavvia" + self.DELETE = "Elimina" + self.CONFIRM_DELETION = "Sei sicuro di voler eliminare questa attività?" + self.TASK_RUNNING_WARNING = "Alcune attività sono ancora in esecuzione. Sei sicuro di voler uscire?" + self.YES = "Sì" + self.NO = "No" + self.DOWNLOAD_COMPLETE = "Download Completato" + self.CUDA_EXTRACTION_FAILED = "Estrazione CUDA Fallita" + self.PRESET_SAVED = "Preimpostazione Salvata" + self.PRESET_LOADED = "Preimpostazione Caricata" + self.NO_ASSET_SELECTED = "Nessun asset selezionato" + self.DOWNLOAD_FAILED = "Download fallito" + self.NO_BACKEND_SELECTED = "Nessun backend selezionato" + self.NO_MODEL_SELECTED = "Nessun modello selezionato" + self.REFRESH_RELEASES = "Aggiorna Versioni" + self.NO_SUITABLE_CUDA_BACKENDS = "Nessun backend CUDA adatto trovato" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binario llama.cpp scaricato ed estratto in {0}\nFile CUDA estratti in {1}" + self.CUDA_FILES_EXTRACTED = "File CUDA estratti in" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nessun backend CUDA adatto trovato per l'estrazione" + self.ERROR_FETCHING_RELEASES = "Errore durante il recupero delle versioni: {0}" + self.CONFIRM_DELETION_TITLE = "Conferma Eliminazione" + self.LOG_FOR = "Log per {0}" + self.ALL_FILES = "Tutti i File (*)" + self.GGUF_FILES = "File GGUF (*.gguf)" + self.DAT_FILES = "File DAT (*.dat)" + self.JSON_FILES = "File JSON (*.json)" + self.FAILED_LOAD_PRESET = "Impossibile caricare la preimpostazione: {0}" + self.INITIALIZING_AUTOGGUF = "Inizializzazione dell'applicazione AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inizializzazione di AutoGGUF completata" + self.REFRESHING_BACKENDS = "Aggiornamento backend" + self.NO_BACKENDS_AVAILABLE = "Nessun backend disponibile" + self.FOUND_VALID_BACKENDS = "Trovati {0} backend validi" + self.SAVING_PRESET = "Salvataggio preimpostazione" + self.PRESET_SAVED_TO = "Preimpostazione salvata in {0}" + self.LOADING_PRESET = "Caricamento preimpostazione" + self.PRESET_LOADED_FROM = "Preimpostazione caricata da {0}" + self.ADDING_KV_OVERRIDE = "Aggiunta override KV: {0}" + self.SAVING_TASK_PRESET = "Salvataggio preimpostazione attività per {0}" + self.TASK_PRESET_SAVED = "Preimpostazione Attività Salvata" + self.TASK_PRESET_SAVED_TO = "Preimpostazione attività salvata in {0}" + self.RESTARTING_TASK = "Riavvio attività: {0}" + self.IN_PROGRESS = "In Corso" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download completato. Estratto in: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binario llama.cpp scaricato ed estratto in {0}\nFile CUDA estratti in {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nessun backend CUDA adatto trovato per l'estrazione" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binario llama.cpp scaricato ed estratto in {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Aggiornamento versioni di llama.cpp" + self.UPDATING_ASSET_LIST = "Aggiornamento elenco asset" + self.UPDATING_CUDA_OPTIONS = "Aggiornamento opzioni CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Avvio download di llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Aggiornamento backend CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Nessun backend CUDA selezionato per l'estrazione" + self.EXTRACTING_CUDA_FILES = "Estrazione file CUDA da {0} a {1}" + self.DOWNLOAD_ERROR = "Errore di download: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Visualizzazione menu contestuale attività" + self.SHOWING_PROPERTIES_FOR_TASK = "Visualizzazione proprietà per l'attività: {0}" + self.CANCELLING_TASK = "Annullamento attività: {0}" + self.CANCELED = "Annullato" + self.DELETING_TASK = "Eliminazione attività: {0}" + self.LOADING_MODELS = "Caricamento modelli" + self.LOADED_MODELS = "{0} modelli caricati" + self.BROWSING_FOR_MODELS_DIRECTORY = "Esplorazione directory modelli" + self.SELECT_MODELS_DIRECTORY = "Seleziona Directory Modelli" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Esplorazione directory output" + self.SELECT_OUTPUT_DIRECTORY = "Seleziona Directory Output" + self.BROWSING_FOR_LOGS_DIRECTORY = "Esplorazione directory log" + self.SELECT_LOGS_DIRECTORY = "Seleziona Directory Log" + self.BROWSING_FOR_IMATRIX_FILE = "Esplorazione file IMatrix" + self.SELECT_IMATRIX_FILE = "Seleziona File IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Utilizzo CPU: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Convalida input di quantizzazione" + self.MODELS_PATH_REQUIRED = "Il percorso dei modelli è obbligatorio" + self.OUTPUT_PATH_REQUIRED = "Il percorso di output è obbligatorio" + self.LOGS_PATH_REQUIRED = "Il percorso dei log è obbligatorio" + self.STARTING_MODEL_QUANTIZATION = "Avvio quantizzazione del modello" + self.INPUT_FILE_NOT_EXIST = "Il file di input '{0}' non esiste." + self.QUANTIZING_MODEL_TO = "Quantizzazione di {0} a {1}" + self.QUANTIZATION_TASK_STARTED = "Attività di quantizzazione avviata per {0}" + self.ERROR_STARTING_QUANTIZATION = "Errore durante l'avvio della quantizzazione: {0}" + self.UPDATING_MODEL_INFO = "Aggiornamento informazioni sul modello: {0}" + self.TASK_FINISHED = "Attività completata: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Visualizzazione dettagli attività per: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Esplorazione file dati IMatrix" + self.SELECT_DATA_FILE = "Seleziona File Dati" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Esplorazione file modello IMatrix" + self.SELECT_MODEL_FILE = "Seleziona File Modello" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Esplorazione file output IMatrix" + self.SELECT_OUTPUT_FILE = "Seleziona File Output" + self.STARTING_IMATRIX_GENERATION = "Avvio generazione IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Il percorso del backend non esiste: {0}" + self.GENERATING_IMATRIX = "Generazione IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Errore durante l'avvio della generazione di IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Attività di generazione IMatrix avviata" + self.ERROR_MESSAGE = "Errore: {0}" + self.TASK_ERROR = "Errore attività: {0}" + self.APPLICATION_CLOSING = "Chiusura applicazione" + self.APPLICATION_CLOSED = "Applicazione chiusa" + self.SELECT_QUANTIZATION_TYPE = "Seleziona il tipo di quantizzazione" + self.ALLOWS_REQUANTIZING = "Consente di riquantizzare tensori che sono già stati quantizzati" + self.LEAVE_OUTPUT_WEIGHT = "Lascerà output.weight non (ri)quantizzato" + self.DISABLE_K_QUANT_MIXTURES = "Disabilita le miscele k-quant e quantizza tutti i tensori allo stesso tipo" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilizza i dati nel file come matrice di importanza per le ottimizzazioni di quantizzazione" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usa la matrice di importanza per questi tensori" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Non usare la matrice di importanza per questi tensori" + self.OUTPUT_TENSOR_TYPE = "Tipo di Tensore di Output:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usa questo tipo per il tensore output.weight" + self.TOKEN_EMBEDDING_TYPE = "Tipo di Incorporamento Token:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usa questo tipo per il tensore di incorporamenti token" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Genererà il modello quantizzato negli stessi frammenti dell'input" + self.OVERRIDE_MODEL_METADATA = "Sovrascrivi i metadati del modello" + self.INPUT_DATA_FILE_FOR_IMATRIX = "File di dati di input per la generazione di IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Modello da quantizzare" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Percorso di output per l'IMatrix generato" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Con quale frequenza salvare l'IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Imposta il valore di offload GPU (-ngl)" + self.COMPLETED = "Completato" + +class _Turkish(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Otomatik GGUF Modeli Niceleyici)" + self.RAM_USAGE = "RAM Kullanımı:" + self.CPU_USAGE = "CPU Kullanımı:" + self.BACKEND = "Llama.cpp Arka Uç:" + self.REFRESH_BACKENDS = "Arka Uçları Yenile" + self.MODELS_PATH = "Modeller Yolu:" + self.OUTPUT_PATH = "Çıkış Yolu:" + self.LOGS_PATH = "Günlükler Yolu:" + self.BROWSE = "Gözat" + self.AVAILABLE_MODELS = "Kullanılabilir Modeller:" + self.QUANTIZATION_TYPE = "Niceleme Türü:" + self.ALLOW_REQUANTIZE = "Yeniden Nicelemeye İzin Ver" + self.LEAVE_OUTPUT_TENSOR = "Çıkış Tensörünü Bırak" + self.PURE = "Saf" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Ağırlıkları Dahil Et:" + self.EXCLUDE_WEIGHTS = "Ağırlıkları Hariç Tut:" + self.USE_OUTPUT_TENSOR_TYPE = "Çıkış Tensör Türünü Kullan" + self.USE_TOKEN_EMBEDDING_TYPE = "Token Gömme Türünü Kullan" + self.KEEP_SPLIT = "Bölmeyi Koru" + self.KV_OVERRIDES = "KV Geçersiz Kılmaları:" + self.ADD_NEW_OVERRIDE = "Yeni Geçersiz Kılma Ekle" + self.QUANTIZE_MODEL = "Modeli Nicele" + self.SAVE_PRESET = "Ön Ayarı Kaydet" + self.LOAD_PRESET = "Ön Ayarı Yükle" + self.TASKS = "Görevler:" + self.DOWNLOAD_LLAMACPP = "llama.cpp'yi İndir" + self.SELECT_RELEASE = "Sürümü Seç:" + self.SELECT_ASSET = "Varlığı Seç:" + self.EXTRACT_CUDA_FILES = "CUDA Dosyalarını Çıkar" + self.SELECT_CUDA_BACKEND = "CUDA Arka Ucunu Seç:" + self.DOWNLOAD = "İndir" + self.IMATRIX_GENERATION = "IMatrix Üretimi" + self.DATA_FILE = "Veri Dosyası:" + self.MODEL = "Model:" + self.OUTPUT = "Çıkış:" + self.OUTPUT_FREQUENCY = "Çıkış Sıklığı:" + self.GPU_OFFLOAD = "GPU Yük Boşaltma:" + self.AUTO = "Otomatik" + self.GENERATE_IMATRIX = "IMatrix Oluştur" + self.ERROR = "Hata" + self.WARNING = "Uyarı" + self.PROPERTIES = "Özellikler" + self.CANCEL = "İptal" + self.RESTART = "Yeniden Başlat" + self.DELETE = "Sil" + self.CONFIRM_DELETION = "Bu görevi silmek istediğinizden emin misiniz?" + self.TASK_RUNNING_WARNING = "Bazı görevler hala çalışıyor. Çıkmak istediğinizden emin misiniz?" + self.YES = "Evet" + self.NO = "Hayır" + self.DOWNLOAD_COMPLETE = "İndirme Tamamlandı" + self.CUDA_EXTRACTION_FAILED = "CUDA Çıkarma Başarısız" + self.PRESET_SAVED = "Ön Ayar Kaydedildi" + self.PRESET_LOADED = "Ön Ayar Yüklendi" + self.NO_ASSET_SELECTED = "Varlık seçilmedi" + self.DOWNLOAD_FAILED = "İndirme başarısız" + self.NO_BACKEND_SELECTED = "Arka uç seçilmedi" + self.NO_MODEL_SELECTED = "Model seçilmedi" + self.REFRESH_RELEASES = "Sürümleri Yenile" + self.NO_SUITABLE_CUDA_BACKENDS = "Uygun CUDA arka uçları bulunamadı" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı\nCUDA dosyaları {1} konumuna çıkarıldı" + self.CUDA_FILES_EXTRACTED = "CUDA dosyaları konumuna çıkarıldı" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Çıkarma için uygun bir CUDA arka ucu bulunamadı" + self.ERROR_FETCHING_RELEASES = "Sürümleri getirirken hata oluştu: {0}" + self.CONFIRM_DELETION_TITLE = "Silmeyi Onayla" + self.LOG_FOR = "{0} için Günlük" + self.ALL_FILES = "Tüm Dosyalar (*)" + self.GGUF_FILES = "GGUF Dosyaları (*.gguf)" + self.DAT_FILES = "DAT Dosyaları (*.dat)" + self.JSON_FILES = "JSON Dosyaları (*.json)" + self.FAILED_LOAD_PRESET = "Ön ayarı yükleme başarısız: {0}" + self.INITIALIZING_AUTOGGUF = "AutoGGUF uygulaması başlatılıyor" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF başlatması tamamlandı" + self.REFRESHING_BACKENDS = "Arka uçlar yenileniyor" + self.NO_BACKENDS_AVAILABLE = "Kullanılabilir arka uç yok" + self.FOUND_VALID_BACKENDS = "{0} geçerli arka uç bulundu" + self.SAVING_PRESET = "Ön ayar kaydediliyor" + self.PRESET_SAVED_TO = "Ön ayar {0} konumuna kaydedildi" + self.LOADING_PRESET = "Ön ayar yükleniyor" + self.PRESET_LOADED_FROM = "Ön ayar {0} konumundan yüklendi" + self.ADDING_KV_OVERRIDE = "KV geçersiz kılma ekleniyor: {0}" + self.SAVING_TASK_PRESET = "{0} için görev ön ayarı kaydediliyor" + self.TASK_PRESET_SAVED = "Görev Ön Ayarı Kaydedildi" + self.TASK_PRESET_SAVED_TO = "Görev ön ayarı {0} konumuna kaydedildi" + self.RESTARTING_TASK = "Görev yeniden başlatılıyor: {0}" + self.IN_PROGRESS = "Devam Ediyor" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "İndirme tamamlandı. Şuraya çıkarıldı: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı\nCUDA dosyaları {1} konumuna çıkarıldı" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Çıkarma için uygun bir CUDA arka ucu bulunamadı" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı" + self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp sürümleri yenileniyor" + self.UPDATING_ASSET_LIST = "Varlık listesi güncelleniyor" + self.UPDATING_CUDA_OPTIONS = "CUDA seçenekleri güncelleniyor" + self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp indirme başlatılıyor" + self.UPDATING_CUDA_BACKENDS = "CUDA arka uçları güncelleniyor" + self.NO_CUDA_BACKEND_SELECTED = "Çıkarma için CUDA arka ucu seçilmedi" + self.EXTRACTING_CUDA_FILES = "CUDA dosyaları {0} konumundan {1} konumuna çıkarılıyor" + self.DOWNLOAD_ERROR = "İndirme hatası: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Görev bağlam menüsü gösteriliyor" + self.SHOWING_PROPERTIES_FOR_TASK = "Görev için özellikler gösteriliyor: {0}" + self.CANCELLING_TASK = "Görev iptal ediliyor: {0}" + self.CANCELED = "İptal Edildi" + self.DELETING_TASK = "Görev siliniyor: {0}" + self.LOADING_MODELS = "Modeller yükleniyor" + self.LOADED_MODELS = "{0} model yüklendi" + self.BROWSING_FOR_MODELS_DIRECTORY = "Modeller dizinine göz atılıyor" + self.SELECT_MODELS_DIRECTORY = "Modeller Dizini Seç" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Çıkış dizinine göz atılıyor" + self.SELECT_OUTPUT_DIRECTORY = "Çıkış Dizini Seç" + self.BROWSING_FOR_LOGS_DIRECTORY = "Günlükler dizinine göz atılıyor" + self.SELECT_LOGS_DIRECTORY = "Günlükler Dizini Seç" + self.BROWSING_FOR_IMATRIX_FILE = "IMatrix dosyasına göz atılıyor" + self.SELECT_IMATRIX_FILE = "IMatrix Dosyası Seç" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU Kullanımı: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Niceleme girişleri doğrulanıyor" + self.MODELS_PATH_REQUIRED = "Modeller yolu gerekli" + self.OUTPUT_PATH_REQUIRED = "Çıkış yolu gerekli" + self.LOGS_PATH_REQUIRED = "Günlükler yolu gerekli" + self.STARTING_MODEL_QUANTIZATION = "Model niceleme başlatılıyor" + self.INPUT_FILE_NOT_EXIST = "Giriş dosyası '{0}' mevcut değil." + self.QUANTIZING_MODEL_TO = "{0} öğesini {1} öğesine niceleme" + self.QUANTIZATION_TASK_STARTED = "{0} için niceleme görevi başlatıldı" + self.ERROR_STARTING_QUANTIZATION = "Niceleme başlatılırken hata oluştu: {0}" + self.UPDATING_MODEL_INFO = "Model bilgileri güncelleniyor: {0}" + self.TASK_FINISHED = "Görev tamamlandı: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Şunun için görev ayrıntıları gösteriliyor: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix veri dosyasına göz atılıyor" + self.SELECT_DATA_FILE = "Veri Dosyası Seç" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix model dosyasına göz atılıyor" + self.SELECT_MODEL_FILE = "Model Dosyası Seç" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix çıkış dosyasına göz atılıyor" + self.SELECT_OUTPUT_FILE = "Çıkış Dosyası Seç" + self.STARTING_IMATRIX_GENERATION = "IMatrix üretimi başlatılıyor" + self.BACKEND_PATH_NOT_EXIST = "Arka uç yolu mevcut değil: {0}" + self.GENERATING_IMATRIX = "IMatrix oluşturuluyor" + self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix üretimi başlatılırken hata oluştu: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix oluşturma görevi başlatıldı" + self.ERROR_MESSAGE = "Hata: {0}" + self.TASK_ERROR = "Görev hatası: {0}" + self.APPLICATION_CLOSING = "Uygulama kapatılıyor" + self.APPLICATION_CLOSED = "Uygulama kapatıldı" + self.SELECT_QUANTIZATION_TYPE = "Niceleme türünü seçin" + self.ALLOWS_REQUANTIZING = "Zaten niceleme yapılmış tensörlerin yeniden nicelemesine izin verir" + self.LEAVE_OUTPUT_WEIGHT = "output.weight öğesini (yeniden) nicelememiş halde bırakır" + self.DISABLE_K_QUANT_MIXTURES = "k-Quant karışımlarını devre dışı bırakın ve tüm tensörleri aynı türe niceleyin" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Quant optimizasyonları için dosyadaki verileri önem matrisi olarak kullanın" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Bu tensörler için önem matrisini kullanın" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Bu tensörler için önem matrisini kullanmayın" + self.OUTPUT_TENSOR_TYPE = "Çıkış Tensör Türü:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight tensörü için bu türü kullanın" + self.TOKEN_EMBEDDING_TYPE = "Token Gömme Türü:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Token gömme tensörü için bu türü kullanın" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Nicelemeli modeli girişle aynı parçalarda oluşturacaktır" + self.OVERRIDE_MODEL_METADATA = "Model meta verilerini geçersiz kıl" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix oluşturma için giriş veri dosyası" + self.MODEL_TO_BE_QUANTIZED = "Nicelemeli model" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Oluşturulan IMatrix için çıkış yolu" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix'in ne sıklıkta kaydedileceği" + self.SET_GPU_OFFLOAD_VALUE = "GPU yük boşaltma değerini ayarla (-ngl)" + self.COMPLETED = "Tamamlandı" + +class _Dutch(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (automatische GGUF-modelkwantisering)" + self.RAM_USAGE = "RAM-gebruik:" + self.CPU_USAGE = "CPU-gebruik:" + self.BACKEND = "Llama.cpp Backend:" + self.REFRESH_BACKENDS = "Backends vernieuwen" + self.MODELS_PATH = "Modelpad:" + self.OUTPUT_PATH = "Uitvoerpad:" + self.LOGS_PATH = "Logboekpad:" + self.BROWSE = "Bladeren" + self.AVAILABLE_MODELS = "Beschikbare modellen:" + self.QUANTIZATION_TYPE = "Kwantiseringstype:" + self.ALLOW_REQUANTIZE = "Herkwantisering toestaan" + self.LEAVE_OUTPUT_TENSOR = "Uitvoertensor behouden" + self.PURE = "Zuiver" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Gewichten opnemen:" + self.EXCLUDE_WEIGHTS = "Gewichten uitsluiten:" + self.USE_OUTPUT_TENSOR_TYPE = "Uitvoertensortype gebruiken" + self.USE_TOKEN_EMBEDDING_TYPE = "Tokeninbeddingstype gebruiken" + self.KEEP_SPLIT = "Splitsing behouden" + self.KV_OVERRIDES = "KV-overschrijvingen:" + self.ADD_NEW_OVERRIDE = "Nieuwe overschrijving toevoegen" + self.QUANTIZE_MODEL = "Model kwantiseren" + self.SAVE_PRESET = "Voorinstelling opslaan" + self.LOAD_PRESET = "Voorinstelling laden" + self.TASKS = "Taken:" + self.DOWNLOAD_LLAMACPP = "Download llama.cpp" + self.SELECT_RELEASE = "Selecteer release:" + self.SELECT_ASSET = "Selecteer item:" + self.EXTRACT_CUDA_FILES = "CUDA-bestanden uitpakken" + self.SELECT_CUDA_BACKEND = "Selecteer CUDA-backend:" + self.DOWNLOAD = "Downloaden" + self.IMATRIX_GENERATION = "IMatrix-generatie" + self.DATA_FILE = "Gegevensbestand:" + self.MODEL = "Model:" + self.OUTPUT = "Uitvoer:" + self.OUTPUT_FREQUENCY = "Uitvoerfrequentie:" + self.GPU_OFFLOAD = "GPU-offload:" + self.AUTO = "Automatisch" + self.GENERATE_IMATRIX = "IMatrix genereren" + self.ERROR = "Fout" + self.WARNING = "Waarschuwing" + self.PROPERTIES = "Eigenschappen" + self.CANCEL = "Annuleren" + self.RESTART = "Opnieuw starten" + self.DELETE = "Verwijderen" + self.CONFIRM_DELETION = "Weet u zeker dat u deze taak wilt verwijderen?" + self.TASK_RUNNING_WARNING = "Sommige taken worden nog uitgevoerd. Weet u zeker dat u wilt afsluiten?" + self.YES = "Ja" + self.NO = "Nee" + self.DOWNLOAD_COMPLETE = "Download voltooid" + self.CUDA_EXTRACTION_FAILED = "CUDA-extractie mislukt" + self.PRESET_SAVED = "Voorinstelling opgeslagen" + self.PRESET_LOADED = "Voorinstelling geladen" + self.NO_ASSET_SELECTED = "Geen item geselecteerd" + self.DOWNLOAD_FAILED = "Download mislukt" + self.NO_BACKEND_SELECTED = "Geen backend geselecteerd" + self.NO_MODEL_SELECTED = "Geen model geselecteerd" + self.REFRESH_RELEASES = "Releases vernieuwen" + self.NO_SUITABLE_CUDA_BACKENDS = "Geen geschikte CUDA-backends gevonden" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}\nCUDA-bestanden uitgepakt naar {1}" + self.CUDA_FILES_EXTRACTED = "CUDA-bestanden uitgepakt naar" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Geen geschikte CUDA-backend gevonden voor extractie" + self.ERROR_FETCHING_RELEASES = "Fout bij het ophalen van releases: {0}" + self.CONFIRM_DELETION_TITLE = "Verwijdering bevestigen" + self.LOG_FOR = "Logboek voor {0}" + self.ALL_FILES = "Alle bestanden (*)" + self.GGUF_FILES = "GGUF-bestanden (*.gguf)" + self.DAT_FILES = "DAT-bestanden (*.dat)" + self.JSON_FILES = "JSON-bestanden (*.json)" + self.FAILED_LOAD_PRESET = "Voorinstelling laden mislukt: {0}" + self.INITIALIZING_AUTOGGUF = "AutoGGUF-applicatie wordt geïnitialiseerd" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-initialisatie voltooid" + self.REFRESHING_BACKENDS = "Backends worden vernieuwd" + self.NO_BACKENDS_AVAILABLE = "Geen backends beschikbaar" + self.FOUND_VALID_BACKENDS = "{0} geldige backends gevonden" + self.SAVING_PRESET = "Voorinstelling wordt opgeslagen" + self.PRESET_SAVED_TO = "Voorinstelling opgeslagen in {0}" + self.LOADING_PRESET = "Voorinstelling wordt geladen" + self.PRESET_LOADED_FROM = "Voorinstelling geladen van {0}" + self.ADDING_KV_OVERRIDE = "KV-overschrijving toevoegen: {0}" + self.SAVING_TASK_PRESET = "Taakvoorinstelling opslaan voor {0}" + self.TASK_PRESET_SAVED = "Taakvoorinstelling opgeslagen" + self.TASK_PRESET_SAVED_TO = "Taakvoorinstelling opgeslagen in {0}" + self.RESTARTING_TASK = "Taak opnieuw starten: {0}" + self.IN_PROGRESS = "Bezig" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download voltooid. Uitgepakt naar: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}\nCUDA-bestanden uitgepakt naar {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Geen geschikte CUDA-backend gevonden voor extractie" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}" + self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp-releases worden vernieuwd" + self.UPDATING_ASSET_LIST = "Itemlijst wordt bijgewerkt" + self.UPDATING_CUDA_OPTIONS = "CUDA-opties worden bijgewerkt" + self.STARTING_LLAMACPP_DOWNLOAD = "Downloaden van llama.cpp wordt gestart" + self.UPDATING_CUDA_BACKENDS = "CUDA-backends worden bijgewerkt" + self.NO_CUDA_BACKEND_SELECTED = "Geen CUDA-backend geselecteerd voor extractie" + self.EXTRACTING_CUDA_FILES = "CUDA-bestanden uitpakken van {0} naar {1}" + self.DOWNLOAD_ERROR = "Downloadfout: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Taakcontextmenu weergeven" + self.SHOWING_PROPERTIES_FOR_TASK = "Eigenschappen voor taak weergeven: {0}" + self.CANCELLING_TASK = "Taak annuleren: {0}" + self.CANCELED = "Geannuleerd" + self.DELETING_TASK = "Taak verwijderen: {0}" + self.LOADING_MODELS = "Modellen laden" + self.LOADED_MODELS = "{0} modellen geladen" + self.BROWSING_FOR_MODELS_DIRECTORY = "Bladeren naar modelmap" + self.SELECT_MODELS_DIRECTORY = "Selecteer modelmap" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Bladeren naar uitvoermap" + self.SELECT_OUTPUT_DIRECTORY = "Selecteer uitvoermap" + self.BROWSING_FOR_LOGS_DIRECTORY = "Bladeren naar logboekmap" + self.SELECT_LOGS_DIRECTORY = "Selecteer logboekmap" + self.BROWSING_FOR_IMATRIX_FILE = "Bladeren naar IMatrix-bestand" + self.SELECT_IMATRIX_FILE = "Selecteer IMatrix-bestand" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU-gebruik: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Kwantiseringsinvoer valideren" + self.MODELS_PATH_REQUIRED = "Modelpad is vereist" + self.OUTPUT_PATH_REQUIRED = "Uitvoerpad is vereist" + self.LOGS_PATH_REQUIRED = "Logboekpad is vereist" + self.STARTING_MODEL_QUANTIZATION = "Modelkwantisering starten" + self.INPUT_FILE_NOT_EXIST = "Invoerbestand '{0}' bestaat niet." + self.QUANTIZING_MODEL_TO = "Kwantiseren van {0} naar {1}" + self.QUANTIZATION_TASK_STARTED = "Kwantiseringstaak gestart voor {0}" + self.ERROR_STARTING_QUANTIZATION = "Fout bij het starten van kwantisering: {0}" + self.UPDATING_MODEL_INFO = "Modelinformatie bijwerken: {0}" + self.TASK_FINISHED = "Taak voltooid: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Taakdetails weergeven voor: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Bladeren naar IMatrix-gegevensbestand" + self.SELECT_DATA_FILE = "Selecteer gegevensbestand" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Bladeren naar IMatrix-modelbestand" + self.SELECT_MODEL_FILE = "Selecteer modelbestand" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Bladeren naar IMatrix-uitvoerbestand" + self.SELECT_OUTPUT_FILE = "Selecteer uitvoerbestand" + self.STARTING_IMATRIX_GENERATION = "IMatrix-generatie starten" + self.BACKEND_PATH_NOT_EXIST = "Backendpad bestaat niet: {0}" + self.GENERATING_IMATRIX = "IMatrix genereren" + self.ERROR_STARTING_IMATRIX_GENERATION = "Fout bij het starten van IMatrix-generatie: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-generatietaak gestart" + self.ERROR_MESSAGE = "Fout: {0}" + self.TASK_ERROR = "Taakfout: {0}" + self.APPLICATION_CLOSING = "Applicatie wordt afgesloten" + self.APPLICATION_CLOSED = "Applicatie afgesloten" + self.SELECT_QUANTIZATION_TYPE = "Selecteer het kwantiseringstype" + self.ALLOWS_REQUANTIZING = "Staat herkwantisering toe van tensoren die al gekwantiseerd zijn" + self.LEAVE_OUTPUT_WEIGHT = "Laat output.weight niet (opnieuw) gekwantiseerd" + self.DISABLE_K_QUANT_MIXTURES = "Schakel k-kwant-mengsels uit en kwantiseer alle tensoren naar hetzelfde type" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Gebruik gegevens in bestand als belangrijkheidsmatrix voor kwant-optimalisaties" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Gebruik belangrijkheidsmatrix voor deze tensoren" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Gebruik geen belangrijkheidsmatrix voor deze tensoren" + self.OUTPUT_TENSOR_TYPE = "Uitvoertensortype:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Gebruik dit type voor de output.weight-tensor" + self.TOKEN_EMBEDDING_TYPE = "Tokeninbeddingstype:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Gebruik dit type voor de tokeninbeddingstensor" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Genereert een gekwantiseerd model in dezelfde shards als de invoer" + self.OVERRIDE_MODEL_METADATA = "Modelmetadata overschrijven" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Invoergegevensbestand voor IMatrix-generatie" + self.MODEL_TO_BE_QUANTIZED = "Te kwantiseren model" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Uitvoerpad voor de gegenereerde IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Hoe vaak de IMatrix moet worden opgeslagen" + self.SET_GPU_OFFLOAD_VALUE = "Stel de GPU-offloadwaarde in (-ngl)" + self.COMPLETED = "Voltooid" + +class _Finnish(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (automaattinen GGUF-mallien kvantisoija)" + self.RAM_USAGE = "RAM-muistin käyttö:" + self.CPU_USAGE = "CPU:n käyttö:" + self.BACKEND = "Llama.cpp-taustaosa:" + self.REFRESH_BACKENDS = "Päivitä taustaosat" + self.MODELS_PATH = "Mallien polku:" + self.OUTPUT_PATH = "Tulostepolku:" + self.LOGS_PATH = "Lokien polku:" + self.BROWSE = "Selaa" + self.AVAILABLE_MODELS = "Käytettävissä olevat mallit:" + self.QUANTIZATION_TYPE = "Kvantisointityyppi:" + self.ALLOW_REQUANTIZE = "Salli uudelleenkvantisointi" + self.LEAVE_OUTPUT_TENSOR = "Jätä tulostensori" + self.PURE = "Puhdas" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Sisällytä painot:" + self.EXCLUDE_WEIGHTS = "Sulje pois painot:" + self.USE_OUTPUT_TENSOR_TYPE = "Käytä tulostensorin tyyppiä" + self.USE_TOKEN_EMBEDDING_TYPE = "Käytä token-upotustyyppiä" + self.KEEP_SPLIT = "Säilytä jako" + self.KV_OVERRIDES = "KV-ohitukset:" + self.ADD_NEW_OVERRIDE = "Lisää uusi ohitus" + self.QUANTIZE_MODEL = "Kvantisoi malli" + self.SAVE_PRESET = "Tallenna esiasetus" + self.LOAD_PRESET = "Lataa esiasetus" + self.TASKS = "Tehtävät:" + self.DOWNLOAD_LLAMACPP = "Lataa llama.cpp" + self.SELECT_RELEASE = "Valitse julkaisu:" + self.SELECT_ASSET = "Valitse resurssi:" + self.EXTRACT_CUDA_FILES = "Pura CUDA-tiedostot" + self.SELECT_CUDA_BACKEND = "Valitse CUDA-taustaosa:" + self.DOWNLOAD = "Lataa" + self.IMATRIX_GENERATION = "IMatrix-generointi" + self.DATA_FILE = "Datatiedosto:" + self.MODEL = "Malli:" + self.OUTPUT = "Tuloste:" + self.OUTPUT_FREQUENCY = "Tulostetaajuus:" + self.GPU_OFFLOAD = "GPU-kuormansiirto:" + self.AUTO = "Automaattinen" + self.GENERATE_IMATRIX = "Generoi IMatrix" + self.ERROR = "Virhe" + self.WARNING = "Varoitus" + self.PROPERTIES = "Ominaisuudet" + self.CANCEL = "Peruuta" + self.RESTART = "Käynnistä uudelleen" + self.DELETE = "Poista" + self.CONFIRM_DELETION = "Haluatko varmasti poistaa tämän tehtävän?" + self.TASK_RUNNING_WARNING = "Jotkin tehtävät ovat vielä käynnissä. Haluatko varmasti lopettaa?" + self.YES = "Kyllä" + self.NO = "Ei" + self.DOWNLOAD_COMPLETE = "Lataus valmis" + self.CUDA_EXTRACTION_FAILED = "CUDA-purku epäonnistui" + self.PRESET_SAVED = "Esiasetus tallennettu" + self.PRESET_LOADED = "Esiasetus ladattu" + self.NO_ASSET_SELECTED = "Ei resurssia valittuna" + self.DOWNLOAD_FAILED = "Lataus epäonnistui" + self.NO_BACKEND_SELECTED = "Ei taustaosaa valittuna" + self.NO_MODEL_SELECTED = "Ei mallia valittuna" + self.REFRESH_RELEASES = "Päivitä julkaisut" + self.NO_SUITABLE_CUDA_BACKENDS = "Sopivia CUDA-taustaosoja ei löytynyt" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}\nCUDA-tiedostot purettu kansioon {1}" + self.CUDA_FILES_EXTRACTED = "CUDA-tiedostot purettu kansioon" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Sopivaa CUDA-taustaosaa purkua varten ei löytynyt" + self.ERROR_FETCHING_RELEASES = "Virhe haettaessa julkaisuja: {0}" + self.CONFIRM_DELETION_TITLE = "Vahvista poisto" + self.LOG_FOR = "Loki kohteelle {0}" + self.ALL_FILES = "Kaikki tiedostot (*)" + self.GGUF_FILES = "GGUF-tiedostot (*.gguf)" + self.DAT_FILES = "DAT-tiedostot (*.dat)" + self.JSON_FILES = "JSON-tiedostot (*.json)" + self.FAILED_LOAD_PRESET = "Esiasetuksen lataus epäonnistui: {0}" + self.INITIALIZING_AUTOGGUF = "Alustetaan AutoGGUF-sovellusta" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-alustus valmis" + self.REFRESHING_BACKENDS = "Päivitetään taustaosoja" + self.NO_BACKENDS_AVAILABLE = "Ei käytettävissä olevia taustaosoja" + self.FOUND_VALID_BACKENDS = "Löydettiin {0} kelvollista taustaosaa" + self.SAVING_PRESET = "Tallennetaan esiasetusta" + self.PRESET_SAVED_TO = "Esiasetus tallennettu kansioon {0}" + self.LOADING_PRESET = "Ladataan esiasetusta" + self.PRESET_LOADED_FROM = "Esiasetus ladattu kansiosta {0}" + self.ADDING_KV_OVERRIDE = "Lisätään KV-ohitus: {0}" + self.SAVING_TASK_PRESET = "Tallennetaan tehtäväesiasetusta kohteelle {0}" + self.TASK_PRESET_SAVED = "Tehtäväesiasetus tallennettu" + self.TASK_PRESET_SAVED_TO = "Tehtäväesiasetus tallennettu kansioon {0}" + self.RESTARTING_TASK = "Käynnistetään tehtävä uudelleen: {0}" + self.IN_PROGRESS = "Käynnissä" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Lataus valmis. Purettu kansioon: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}\nCUDA-tiedostot purettu kansioon {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Sopivaa CUDA-taustaosaa purkua varten ei löytynyt" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Päivitetään llama.cpp-julkaisuja" + self.UPDATING_ASSET_LIST = "Päivitetään resurssilistaa" + self.UPDATING_CUDA_OPTIONS = "Päivitetään CUDA-asetuksia" + self.STARTING_LLAMACPP_DOWNLOAD = "Aloitetaan llama.cpp:n lataus" + self.UPDATING_CUDA_BACKENDS = "Päivitetään CUDA-taustaosoja" + self.NO_CUDA_BACKEND_SELECTED = "Ei CUDA-taustaosaa valittuna purkua varten" + self.EXTRACTING_CUDA_FILES = "Puretaan CUDA-tiedostoja kansiosta {0} kansioon {1}" + self.DOWNLOAD_ERROR = "Latausvirhe: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Näytetään tehtäväkontekstivalikko" + self.SHOWING_PROPERTIES_FOR_TASK = "Näytetään tehtävän ominaisuudet: {0}" + self.CANCELLING_TASK = "Peruutetaan tehtävää: {0}" + self.CANCELED = "Peruutettu" + self.DELETING_TASK = "Poistetaan tehtävää: {0}" + self.LOADING_MODELS = "Ladataan malleja" + self.LOADED_MODELS = "{0} mallia ladattu" + self.BROWSING_FOR_MODELS_DIRECTORY = "Selaillaan mallikansiota" + self.SELECT_MODELS_DIRECTORY = "Valitse mallikansio" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Selaillaan tulostekansiota" + self.SELECT_OUTPUT_DIRECTORY = "Valitse tulostekansio" + self.BROWSING_FOR_LOGS_DIRECTORY = "Selaillaan lokikansiota" + self.SELECT_LOGS_DIRECTORY = "Valitse lokikansio" + self.BROWSING_FOR_IMATRIX_FILE = "Selaillaan IMatrix-tiedostoa" + self.SELECT_IMATRIX_FILE = "Valitse IMatrix-tiedosto" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mt / {2} Mt)" + self.CPU_USAGE_FORMAT = "CPU:n käyttö: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Vahvistetaan kvantisointisyötteet" + self.MODELS_PATH_REQUIRED = "Mallien polku on pakollinen" + self.OUTPUT_PATH_REQUIRED = "Tulostepolku on pakollinen" + self.LOGS_PATH_REQUIRED = "Lokien polku on pakollinen" + self.STARTING_MODEL_QUANTIZATION = "Aloitetaan mallin kvantisointi" + self.INPUT_FILE_NOT_EXIST = "Syötetiedostoa '{0}' ei ole." + self.QUANTIZING_MODEL_TO = "Kvantisoidaan mallia {0} muotoon {1}" + self.QUANTIZATION_TASK_STARTED = "Kvantisointitehtävä käynnistetty kohteelle {0}" + self.ERROR_STARTING_QUANTIZATION = "Virhe kvantisoinnin käynnistyksessä: {0}" + self.UPDATING_MODEL_INFO = "Päivitetään mallitietoja: {0}" + self.TASK_FINISHED = "Tehtävä valmis: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Näytetään tehtävän tiedot kohteelle: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Selaillaan IMatrix-datatiedostoa" + self.SELECT_DATA_FILE = "Valitse datatiedosto" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Selaillaan IMatrix-mallitiedostoa" + self.SELECT_MODEL_FILE = "Valitse mallitiedosto" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Selaillaan IMatrix-tulostetiedostoa" + self.SELECT_OUTPUT_FILE = "Valitse tulostetiedosto" + self.STARTING_IMATRIX_GENERATION = "Aloitetaan IMatrix-generointi" + self.BACKEND_PATH_NOT_EXIST = "Taustaosan polkua ei ole: {0}" + self.GENERATING_IMATRIX = "Generoidaan IMatrixia" + self.ERROR_STARTING_IMATRIX_GENERATION = "Virhe IMatrix-generoinnin käynnistyksessä: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-generointi käynnistetty" + self.ERROR_MESSAGE = "Virhe: {0}" + self.TASK_ERROR = "Tehtävävirhe: {0}" + self.APPLICATION_CLOSING = "Sovellus suljetaan" + self.APPLICATION_CLOSED = "Sovellus suljettu" + self.SELECT_QUANTIZATION_TYPE = "Valitse kvantisointityyppi" + self.ALLOWS_REQUANTIZING = "Sallii jo kvantisoitujen tensoreiden uudelleenkvantisoinnin" + self.LEAVE_OUTPUT_WEIGHT = "Jättää output.weight-tensorin (uudelleen)kvantisoimatta" + self.DISABLE_K_QUANT_MIXTURES = "Poista käytöstä k-kvanttisekoitukset ja kvantisoi kaikki tensorit samaan tyyppiin" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Käytä tiedoston tietoja kvantisoinnin optimoinnin tärkeysmatriisina" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Käytä tärkeysmatriisia näille tensoreille" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Älä käytä tärkeysmatriisia näille tensoreille" + self.OUTPUT_TENSOR_TYPE = "Tulostensorin tyyppi:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Käytä tätä tyyppiä output.weight-tensorille" + self.TOKEN_EMBEDDING_TYPE = "Token-upotustyyppi:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Käytä tätä tyyppiä token-upotustensorille" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Generoi kvantisoidun mallin samoihin osiin kuin syöte" + self.OVERRIDE_MODEL_METADATA = "Ohita mallitiedot" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix-generoinnin syötedatatiedosto" + self.MODEL_TO_BE_QUANTIZED = "Kvantisoitava malli" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Generoidun IMatrixin tulostepolku" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Kuinka usein IMatrix tallennetaan" + self.SET_GPU_OFFLOAD_VALUE = "Aseta GPU-kuormansiirron arvo (-ngl)" + self.COMPLETED = "Valmis" + +class _Bengali(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (স্বয়ংক্রিয় GGUF মডেল কোয়ান্টাইজার)" + self.RAM_USAGE = "RAM ব্যবহার:" + self.CPU_USAGE = "CPU ব্যবহার:" + self.BACKEND = "Llama.cpp ব্যাকএন্ড:" + self.REFRESH_BACKENDS = "ব্যাকএন্ড রিফ্রেশ করুন" + self.MODELS_PATH = "মডেল পাথ:" + self.OUTPUT_PATH = "আউটপুট পাথ:" + self.LOGS_PATH = "লগ পাথ:" + self.BROWSE = "ব্রাউজ করুন" + self.AVAILABLE_MODELS = "উপলব্ধ মডেল:" + self.QUANTIZATION_TYPE = "কোয়ান্টাইজেশন ধরণ:" + self.ALLOW_REQUANTIZE = "পুনরায় কোয়ান্টাইজ করার অনুমতি দিন" + self.LEAVE_OUTPUT_TENSOR = "আউটপুট টেন্সর রেখে দিন" + self.PURE = "বিশুদ্ধ" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "ওজন অন্তর্ভুক্ত করুন:" + self.EXCLUDE_WEIGHTS = "ওজন বাদ দিন:" + self.USE_OUTPUT_TENSOR_TYPE = "আউটপুট টেন্সর ধরণ ব্যবহার করুন" + self.USE_TOKEN_EMBEDDING_TYPE = "টোকেন এম্বেডিং ধরণ ব্যবহার করুন" + self.KEEP_SPLIT = "বিভাজন রাখুন" + self.KV_OVERRIDES = "KV ওভাররাইড:" + self.ADD_NEW_OVERRIDE = "নতুন ওভাররাইড যুক্ত করুন" + self.QUANTIZE_MODEL = "মডেল কোয়ান্টাইজ করুন" + self.SAVE_PRESET = "প্রিসেট সংরক্ষণ করুন" + self.LOAD_PRESET = "প্রিসেট লোড করুন" + self.TASKS = "কার্য:" + self.DOWNLOAD_LLAMACPP = "llama.cpp ডাউনলোড করুন" + self.SELECT_RELEASE = "রিলিজ নির্বাচন করুন:" + self.SELECT_ASSET = "অ্যাসেট নির্বাচন করুন:" + self.EXTRACT_CUDA_FILES = "CUDA ফাইলগুলি বের করুন" + self.SELECT_CUDA_BACKEND = "CUDA ব্যাকএন্ড নির্বাচন করুন:" + self.DOWNLOAD = "ডাউনলোড করুন" + self.IMATRIX_GENERATION = "IMatrix জেনারেশন" + self.DATA_FILE = "ডেটা ফাইল:" + self.MODEL = "মডেল:" + self.OUTPUT = "আউটপুট:" + self.OUTPUT_FREQUENCY = "আউটপুট ফ্রিকোয়েন্সি:" + self.GPU_OFFLOAD = "GPU অফলোড:" + self.AUTO = "স্বয়ংক্রিয়" + self.GENERATE_IMATRIX = "IMatrix তৈরি করুন" + self.ERROR = "ত্রুটি" + self.WARNING = "সতর্কীকরণ" + self.PROPERTIES = "বৈশিষ্ট্য" + self.CANCEL = "বাতিল করুন" + self.RESTART = "পুনরায় আরম্ভ করুন" + self.DELETE = "মুছে ফেলুন" + self.CONFIRM_DELETION = "আপনি কি নিশ্চিত যে আপনি এই কাজটি মুছে ফেলতে চান?" + self.TASK_RUNNING_WARNING = "কিছু কাজ এখনও চলছে। আপনি কি নিশ্চিত যে আপনি প্রস্থান করতে চান?" + self.YES = "হ্যাঁ" + self.NO = "না" + self.DOWNLOAD_COMPLETE = "ডাউনলোড সম্পন্ন" + self.CUDA_EXTRACTION_FAILED = "CUDA এক্সট্র্যাকশন ব্যর্থ" + self.PRESET_SAVED = "প্রিসেট সংরক্ষিত" + self.PRESET_LOADED = "প্রিসেট লোড করা হয়েছে" + self.NO_ASSET_SELECTED = "কোন অ্যাসেট নির্বাচন করা হয়নি" + self.DOWNLOAD_FAILED = "ডাউনলোড ব্যর্থ" + self.NO_BACKEND_SELECTED = "কোন ব্যাকএন্ড নির্বাচন করা হয়নি" + self.NO_MODEL_SELECTED = "কোন মডেল নির্বাচন করা হয়নি" + self.REFRESH_RELEASES = "রিলিজগুলি রিফ্রেশ করুন" + self.NO_SUITABLE_CUDA_BACKENDS = "কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে\nCUDA ফাইলগুলি {1} এ বের করা হয়েছে" + self.CUDA_FILES_EXTRACTED = "CUDA ফাইলগুলি তে বের করা হয়েছে" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "এক্সট্র্যাকশনের জন্য কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি" + self.ERROR_FETCHING_RELEASES = "রিলিজগুলি আনতে ত্রুটি: {0}" + self.CONFIRM_DELETION_TITLE = "মুছে ফেলা নিশ্চিত করুন" + self.LOG_FOR = "{0} এর জন্য লগ" + self.ALL_FILES = "সমস্ত ফাইল (*)" + self.GGUF_FILES = "GGUF ফাইল (*.gguf)" + self.DAT_FILES = "DAT ফাইল (*.dat)" + self.JSON_FILES = "JSON ফাইল (*.json)" + self.FAILED_LOAD_PRESET = "প্রিসেট লোড করতে ব্যর্থ: {0}" + self.INITIALIZING_AUTOGGUF = "AutoGGUF অ্যাপ্লিকেশন শুরু হচ্ছে" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF ইনিশিয়ালাইজেশন সম্পন্ন" + self.REFRESHING_BACKENDS = "ব্যাকএন্ডগুলি রিফ্রেশ করা হচ্ছে" + self.NO_BACKENDS_AVAILABLE = "কোন ব্যাকএন্ড উপলব্ধ নেই" + self.FOUND_VALID_BACKENDS = "{0} টি বৈধ ব্যাকএন্ড পাওয়া গেছে" + self.SAVING_PRESET = "প্রিসেট সংরক্ষণ করা হচ্ছে" + self.PRESET_SAVED_TO = "{0} এ প্রিসেট সংরক্ষিত" + self.LOADING_PRESET = "প্রিসেট লোড করা হচ্ছে" + self.PRESET_LOADED_FROM = "{0} থেকে প্রিসেট লোড করা হয়েছে" + self.ADDING_KV_OVERRIDE = "KV ওভাররাইড যুক্ত করা হচ্ছে: {0}" + self.SAVING_TASK_PRESET = "{0} এর জন্য টাস্ক প্রিসেট সংরক্ষণ করা হচ্ছে" + self.TASK_PRESET_SAVED = "টাস্ক প্রিসেট সংরক্ষিত" + self.TASK_PRESET_SAVED_TO = "{0} এ টাস্ক প্রিসেট সংরক্ষিত" + self.RESTARTING_TASK = "টাস্ক পুনরায় শুরু করা হচ্ছে: {0}" + self.IN_PROGRESS = "চলছে" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "ডাউনলোড সম্পন্ন। বের করা হয়েছে: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে\nCUDA ফাইলগুলি {1} এ বের করা হয়েছে" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "এক্সট্র্যাকশনের জন্য কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে" + self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp রিলিজগুলি রিফ্রেশ করা হচ্ছে" + self.UPDATING_ASSET_LIST = "অ্যাসেট তালিকা আপডেট করা হচ্ছে" + self.UPDATING_CUDA_OPTIONS = "CUDA অপশনগুলি আপডেট করা হচ্ছে" + self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp ডাউনলোড শুরু করা হচ্ছে" + self.UPDATING_CUDA_BACKENDS = "CUDA ব্যাকএন্ডগুলি আপডেট করা হচ্ছে" + self.NO_CUDA_BACKEND_SELECTED = "এক্সট্র্যাকশনের জন্য কোন CUDA ব্যাকএন্ড নির্বাচন করা হয়নি" + self.EXTRACTING_CUDA_FILES = "{0} থেকে {1} এ CUDA ফাইলগুলি বের করা হচ্ছে" + self.DOWNLOAD_ERROR = "ডাউনলোড ত্রুটি: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "টাস্ক কনটেক্সট মেনু দেখানো হচ্ছে" + self.SHOWING_PROPERTIES_FOR_TASK = "টাস্কের জন্য বৈশিষ্ট্য দেখানো হচ্ছে: {0}" + self.CANCELLING_TASK = "টাস্ক বাতিল করা হচ্ছে: {0}" + self.CANCELED = "বাতিল করা হয়েছে" + self.DELETING_TASK = "টাস্ক মুছে ফেলা হচ্ছে: {0}" + self.LOADING_MODELS = "মডেলগুলি লোড করা হচ্ছে" + self.LOADED_MODELS = "{0} টি মডেল লোড করা হয়েছে" + self.BROWSING_FOR_MODELS_DIRECTORY = "মডেল ডিরেক্টরি ব্রাউজ করা হচ্ছে" + self.SELECT_MODELS_DIRECTORY = "মডেল ডিরেক্টরি নির্বাচন করুন" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "আউটপুট ডিরেক্টরি ব্রাউজ করা হচ্ছে" + self.SELECT_OUTPUT_DIRECTORY = "আউটপুট ডিরেক্টরি নির্বাচন করুন" + self.BROWSING_FOR_LOGS_DIRECTORY = "লগ ডিরেক্টরি ব্রাউজ করা হচ্ছে" + self.SELECT_LOGS_DIRECTORY = "লগ ডিরেক্টরি নির্বাচন করুন" + self.BROWSING_FOR_IMATRIX_FILE = "IMatrix ফাইল ব্রাউজ করা হচ্ছে" + self.SELECT_IMATRIX_FILE = "IMatrix ফাইল নির্বাচন করুন" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU ব্যবহার: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "কোয়ান্টাইজেশন ইনপুট যাচাই করা হচ্ছে" + self.MODELS_PATH_REQUIRED = "মডেল পাথ প্রয়োজন" + self.OUTPUT_PATH_REQUIRED = "আউটপুট পাথ প্রয়োজন" + self.LOGS_PATH_REQUIRED = "লগ পাথ প্রয়োজন" + self.STARTING_MODEL_QUANTIZATION = "মডেল কোয়ান্টাইজেশন শুরু হচ্ছে" + self.INPUT_FILE_NOT_EXIST = "ইনপুট ফাইল '{0}' বিদ্যমান নেই।" + self.QUANTIZING_MODEL_TO = "{0} কে {1} এ কোয়ান্টাইজ করা হচ্ছে" + self.QUANTIZATION_TASK_STARTED = "{0} এর জন্য কোয়ান্টাইজেশন টাস্ক শুরু হয়েছে" + self.ERROR_STARTING_QUANTIZATION = "কোয়ান্টাইজেশন শুরু করতে ত্রুটি: {0}" + self.UPDATING_MODEL_INFO = "মডেল তথ্য আপডেট করা হচ্ছে: {0}" + self.TASK_FINISHED = "টাস্ক সম্পন্ন: {0}" + self.SHOWING_TASK_DETAILS_FOR = "এর জন্য টাস্কের বিবরণ দেখানো হচ্ছে: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix ডেটা ফাইল ব্রাউজ করা হচ্ছে" + self.SELECT_DATA_FILE = "ডেটা ফাইল নির্বাচন করুন" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix মডেল ফাইল ব্রাউজ করা হচ্ছে" + self.SELECT_MODEL_FILE = "মডেল ফাইল নির্বাচন করুন" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix আউটপুট ফাইল ব্রাউজ করা হচ্ছে" + self.SELECT_OUTPUT_FILE = "আউটপুট ফাইল নির্বাচন করুন" + self.STARTING_IMATRIX_GENERATION = "IMatrix জেনারেশন শুরু হচ্ছে" + self.BACKEND_PATH_NOT_EXIST = "ব্যাকএন্ড পাথ বিদ্যমান নেই: {0}" + self.GENERATING_IMATRIX = "IMatrix তৈরি করা হচ্ছে" + self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix জেনারেশন শুরু করতে ত্রুটি: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix জেনারেশন টাস্ক শুরু হয়েছে" + self.ERROR_MESSAGE = "ত্রুটি: {0}" + self.TASK_ERROR = "টাস্ক ত্রুটি: {0}" + self.APPLICATION_CLOSING = "অ্যাপ্লিকেশন বন্ধ করা হচ্ছে" + self.APPLICATION_CLOSED = "অ্যাপ্লিকেশন বন্ধ" + self.SELECT_QUANTIZATION_TYPE = "কোয়ান্টাইজেশন ধরণ নির্বাচন করুন" + self.ALLOWS_REQUANTIZING = "যে টেন্সরগুলি ইতিমধ্যে কোয়ান্টাইজ করা হয়েছে তাদের পুনরায় কোয়ান্টাইজ করার অনুমতি দেয়" + self.LEAVE_OUTPUT_WEIGHT = "output.weight কে (পুনরায়) কোয়ান্টাইজ না করে রেখে দেবে" + self.DISABLE_K_QUANT_MIXTURES = "k-কোয়ান্ট মিশ্রণগুলি অক্ষম করুন এবং সমস্ত টেন্সরকে একই ধরণের কোয়ান্টাইজ করুন" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "কোয়ান্ট অপ্টিমাইজেশনের জন্য ফাইলের ডেটা গুরুত্বপূর্ণ ম্যাট্রিক্স হিসাবে ব্যবহার করুন" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "এই টেন্সরগুলির জন্য গুরুত্বপূর্ণ ম্যাট্রিক্স ব্যবহার করুন" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "এই টেন্সরগুলির জন্য গুরুত্বপূর্ণ ম্যাট্রিক্স ব্যবহার করবেন না" + self.OUTPUT_TENSOR_TYPE = "আউটপুট টেন্সর ধরণ:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight টেন্সরের জন্য এই ধরণটি ব্যবহার করুন" + self.TOKEN_EMBEDDING_TYPE = "টোকেন এম্বেডিং ধরণ:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "টোকেন এম্বেডিং টেন্সরের জন্য এই ধরণটি ব্যবহার করুন" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "ইনপুটের মতো একই শার্ডে কোয়ান্টাইজ করা মডেল তৈরি করবে" + self.OVERRIDE_MODEL_METADATA = "মডেল মেটাডেটা ওভাররাইড করুন" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix জেনারেশনের জন্য ইনপুট ডেটা ফাইল" + self.MODEL_TO_BE_QUANTIZED = "কোয়ান্টাইজ করার জন্য মডেল" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "তৈরি করা IMatrix এর জন্য আউটপুট পাথ" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix কতবার সংরক্ষণ করবেন" + self.SET_GPU_OFFLOAD_VALUE = "GPU অফলোড মান সেট করুন (-ngl)" + self.COMPLETED = "সম্পন্ন" + +class _Polish(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Automatyczny kwantyzator modeli GGUF)" + self.RAM_USAGE = "Użycie pamięci RAM:" + self.CPU_USAGE = "Użycie procesora:" + self.BACKEND = "Backend Llama.cpp:" + self.REFRESH_BACKENDS = "Odśwież backendy" + self.MODELS_PATH = "Ścieżka modeli:" + self.OUTPUT_PATH = "Ścieżka wyjściowa:" + self.LOGS_PATH = "Ścieżka logów:" + self.BROWSE = "Przeglądaj" + self.AVAILABLE_MODELS = "Dostępne modele:" + self.QUANTIZATION_TYPE = "Typ kwantyzacji:" + self.ALLOW_REQUANTIZE = "Zezwól na ponowną kwantyzację" + self.LEAVE_OUTPUT_TENSOR = "Pozostaw tensor wyjściowy" + self.PURE = "Czysty" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Uwzględnij wagi:" + self.EXCLUDE_WEIGHTS = "Wyklucz wagi:" + self.USE_OUTPUT_TENSOR_TYPE = "Użyj typu tensora wyjściowego" + self.USE_TOKEN_EMBEDDING_TYPE = "Użyj typu osadzania tokenów" + self.KEEP_SPLIT = "Zachowaj podział" + self.KV_OVERRIDES = "Nadpisania KV:" + self.ADD_NEW_OVERRIDE = "Dodaj nowe nadpisanie" + self.QUANTIZE_MODEL = "Kwantyzuj model" + self.SAVE_PRESET = "Zapisz ustawienia predefiniowane" + self.LOAD_PRESET = "Wczytaj ustawienia predefiniowane" + self.TASKS = "Zadania:" + self.DOWNLOAD_LLAMACPP = "Pobierz llama.cpp" + self.SELECT_RELEASE = "Wybierz wersję:" + self.SELECT_ASSET = "Wybierz zasób:" + self.EXTRACT_CUDA_FILES = "Wyodrębnij pliki CUDA" + self.SELECT_CUDA_BACKEND = "Wybierz backend CUDA:" + self.DOWNLOAD = "Pobierz" + self.IMATRIX_GENERATION = "Generowanie IMatrix" + self.DATA_FILE = "Plik danych:" + self.MODEL = "Model:" + self.OUTPUT = "Wyjście:" + self.OUTPUT_FREQUENCY = "Częstotliwość wyjścia:" + self.GPU_OFFLOAD = "Odciążenie GPU:" + self.AUTO = "Automatyczny" + self.GENERATE_IMATRIX = "Generuj IMatrix" + self.ERROR = "Błąd" + self.WARNING = "Ostrzeżenie" + self.PROPERTIES = "Właściwości" + self.CANCEL = "Anuluj" + self.RESTART = "Uruchom ponownie" + self.DELETE = "Usuń" + self.CONFIRM_DELETION = "Czy na pewno chcesz usunąć to zadanie?" + self.TASK_RUNNING_WARNING = "Niektóre zadania są nadal uruchomione. Czy na pewno chcesz wyjść?" + self.YES = "Tak" + self.NO = "Nie" + self.DOWNLOAD_COMPLETE = "Pobieranie zakończone" + self.CUDA_EXTRACTION_FAILED = "Wyodrębnianie CUDA nie powiodło się" + self.PRESET_SAVED = "Ustawienia predefiniowane zapisane" + self.PRESET_LOADED = "Ustawienia predefiniowane wczytane" + self.NO_ASSET_SELECTED = "Nie wybrano zasobu" + self.DOWNLOAD_FAILED = "Pobieranie nie powiodło się" + self.NO_BACKEND_SELECTED = "Nie wybrano backendu" + self.NO_MODEL_SELECTED = "Nie wybrano modelu" + self.REFRESH_RELEASES = "Odśwież wersje" + self.NO_SUITABLE_CUDA_BACKENDS = "Nie znaleziono odpowiednich backendów CUDA" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}\nPliki CUDA wyodrębnione do {1}" + self.CUDA_FILES_EXTRACTED = "Pliki CUDA wyodrębnione do" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nie znaleziono odpowiedniego backendu CUDA do wyodrębnienia" + self.ERROR_FETCHING_RELEASES = "Błąd podczas pobierania wersji: {0}" + self.CONFIRM_DELETION_TITLE = "Potwierdź usunięcie" + self.LOG_FOR = "Dziennik dla {0}" + self.ALL_FILES = "Wszystkie pliki (*)" + self.GGUF_FILES = "Pliki GGUF (*.gguf)" + self.DAT_FILES = "Pliki DAT (*.dat)" + self.JSON_FILES = "Pliki JSON (*.json)" + self.FAILED_LOAD_PRESET = "Nie udało się wczytać ustawień predefiniowanych: {0}" + self.INITIALIZING_AUTOGGUF = "Inicjalizacja aplikacji AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicjalizacja AutoGGUF zakończona" + self.REFRESHING_BACKENDS = "Odświeżanie backendów" + self.NO_BACKENDS_AVAILABLE = "Brak dostępnych backendów" + self.FOUND_VALID_BACKENDS = "Znaleziono {0} prawidłowych backendów" + self.SAVING_PRESET = "Zapisywanie ustawień predefiniowanych" + self.PRESET_SAVED_TO = "Ustawienia predefiniowane zapisane do {0}" + self.LOADING_PRESET = "Wczytywanie ustawień predefiniowanych" + self.PRESET_LOADED_FROM = "Ustawienia predefiniowane wczytane z {0}" + self.ADDING_KV_OVERRIDE = "Dodawanie nadpisania KV: {0}" + self.SAVING_TASK_PRESET = "Zapisywanie ustawień predefiniowanych zadania dla {0}" + self.TASK_PRESET_SAVED = "Ustawienia predefiniowane zadania zapisane" + self.TASK_PRESET_SAVED_TO = "Ustawienia predefiniowane zadania zapisane do {0}" + self.RESTARTING_TASK = "Ponowne uruchamianie zadania: {0}" + self.IN_PROGRESS = "W trakcie" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Pobieranie zakończone. Wyodrębniono do: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}\nPliki CUDA wyodrębnione do {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nie znaleziono odpowiedniego backendu CUDA do wyodrębnienia" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Odświeżanie wersji llama.cpp" + self.UPDATING_ASSET_LIST = "Aktualizacja listy zasobów" + self.UPDATING_CUDA_OPTIONS = "Aktualizacja opcji CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Rozpoczynanie pobierania llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Aktualizacja backendów CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Nie wybrano backendu CUDA do wyodrębnienia" + self.EXTRACTING_CUDA_FILES = "Wyodrębnianie plików CUDA z {0} do {1}" + self.DOWNLOAD_ERROR = "Błąd pobierania: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Wyświetlanie menu kontekstowego zadania" + self.SHOWING_PROPERTIES_FOR_TASK = "Wyświetlanie właściwości zadania: {0}" + self.CANCELLING_TASK = "Anulowanie zadania: {0}" + self.CANCELED = "Anulowano" + self.DELETING_TASK = "Usuwanie zadania: {0}" + self.LOADING_MODELS = "Ładowanie modeli" + self.LOADED_MODELS = "Załadowano {0} modeli" + self.BROWSING_FOR_MODELS_DIRECTORY = "Przeglądanie katalogu modeli" + self.SELECT_MODELS_DIRECTORY = "Wybierz katalog modeli" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Przeglądanie katalogu wyjściowego" + self.SELECT_OUTPUT_DIRECTORY = "Wybierz katalog wyjściowy" + self.BROWSING_FOR_LOGS_DIRECTORY = "Przeglądanie katalogu logów" + self.SELECT_LOGS_DIRECTORY = "Wybierz katalog logów" + self.BROWSING_FOR_IMATRIX_FILE = "Przeglądanie pliku IMatrix" + self.SELECT_IMATRIX_FILE = "Wybierz plik IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Użycie procesora: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Walidacja danych wejściowych kwantyzacji" + self.MODELS_PATH_REQUIRED = "Ścieżka modeli jest wymagana" + self.OUTPUT_PATH_REQUIRED = "Ścieżka wyjściowa jest wymagana" + self.LOGS_PATH_REQUIRED = "Ścieżka logów jest wymagana" + self.STARTING_MODEL_QUANTIZATION = "Rozpoczynanie kwantyzacji modelu" + self.INPUT_FILE_NOT_EXIST = "Plik wejściowy '{0}' nie istnieje." + self.QUANTIZING_MODEL_TO = "Kwantyzacja {0} do {1}" + self.QUANTIZATION_TASK_STARTED = "Zadanie kwantyzacji uruchomione dla {0}" + self.ERROR_STARTING_QUANTIZATION = "Błąd podczas uruchamiania kwantyzacji: {0}" + self.UPDATING_MODEL_INFO = "Aktualizacja informacji o modelu: {0}" + self.TASK_FINISHED = "Zadanie zakończone: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Wyświetlanie szczegółów zadania dla: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Przeglądanie pliku danych IMatrix" + self.SELECT_DATA_FILE = "Wybierz plik danych" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Przeglądanie pliku modelu IMatrix" + self.SELECT_MODEL_FILE = "Wybierz plik modelu" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Przeglądanie pliku wyjściowego IMatrix" + self.SELECT_OUTPUT_FILE = "Wybierz plik wyjściowy" + self.STARTING_IMATRIX_GENERATION = "Rozpoczynanie generowania IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Ścieżka backendu nie istnieje: {0}" + self.GENERATING_IMATRIX = "Generowanie IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Błąd podczas uruchamiania generowania IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Zadanie generowania IMatrix uruchomione" + self.ERROR_MESSAGE = "Błąd: {0}" + self.TASK_ERROR = "Błąd zadania: {0}" + self.APPLICATION_CLOSING = "Zamykanie aplikacji" + self.APPLICATION_CLOSED = "Aplikacja zamknięta" + self.SELECT_QUANTIZATION_TYPE = "Wybierz typ kwantyzacji" + self.ALLOWS_REQUANTIZING = "Pozwala na ponowną kwantyzację tensorów, które zostały już skwantyzowane" + self.LEAVE_OUTPUT_WEIGHT = "Pozostawi output.weight nieskwantyzowany (lub nieskwantyzowany ponownie)" + self.DISABLE_K_QUANT_MIXTURES = "Wyłącz mieszanki k-kwant i kwantyzuj wszystkie tensory do tego samego typu" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Użyj danych w pliku jako macierzy ważności dla optymalizacji kwantyzacji" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Użyj macierzy ważności dla tych tensorów" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Nie używaj macierzy ważności dla tych tensorów" + self.OUTPUT_TENSOR_TYPE = "Typ tensora wyjściowego:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Użyj tego typu dla tensora output.weight" + self.TOKEN_EMBEDDING_TYPE = "Typ osadzania tokenów:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Użyj tego typu dla tensora osadzania tokenów" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Wygeneruje skwantyzowany model w tych samych fragmentach co dane wejściowe" + self.OVERRIDE_MODEL_METADATA = "Zastąp metadane modelu" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Plik danych wejściowych do generowania IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Model do kwantyzacji" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Ścieżka wyjściowa dla wygenerowanego IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Jak często zapisywać IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Ustaw wartość odciążenia GPU (-ngl)" + self.COMPLETED = "Ukończono" + +class _Romanian(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Cuantizator automat de modele GGUF)" + self.RAM_USAGE = "Utilizare RAM:" + self.CPU_USAGE = "Utilizare CPU:" + self.BACKEND = "Backend Llama.cpp:" + self.REFRESH_BACKENDS = "Reîmprospătați backends" + self.MODELS_PATH = "Cale modele:" + self.OUTPUT_PATH = "Cale ieșire:" + self.LOGS_PATH = "Cale jurnale:" + self.BROWSE = "Răsfoiți" + self.AVAILABLE_MODELS = "Modele disponibile:" + self.QUANTIZATION_TYPE = "Tipul de cuantizare:" + self.ALLOW_REQUANTIZE = "Permiteți recuantizarea" + self.LEAVE_OUTPUT_TENSOR = "Lăsați tensorul de ieșire" + self.PURE = "Pur" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Includeți ponderile:" + self.EXCLUDE_WEIGHTS = "Excludeți ponderile:" + self.USE_OUTPUT_TENSOR_TYPE = "Utilizați tipul tensorului de ieșire" + self.USE_TOKEN_EMBEDDING_TYPE = "Utilizați tipul de încorporare a tokenului" + self.KEEP_SPLIT = "Păstrați divizarea" + self.KV_OVERRIDES = "Suprascrieri KV:" + self.ADD_NEW_OVERRIDE = "Adăugați o nouă suprascriere" + self.QUANTIZE_MODEL = "Cuantizați modelul" + self.SAVE_PRESET = "Salvați presetarea" + self.LOAD_PRESET = "Încărcați presetarea" + self.TASKS = "Sarcini:" + self.DOWNLOAD_LLAMACPP = "Descărcați llama.cpp" + self.SELECT_RELEASE = "Selectați versiunea:" + self.SELECT_ASSET = "Selectați activul:" + self.EXTRACT_CUDA_FILES = "Extrageți fișierele CUDA" + self.SELECT_CUDA_BACKEND = "Selectați backend CUDA:" + self.DOWNLOAD = "Descărcați" + self.IMATRIX_GENERATION = "Generare IMatrix" + self.DATA_FILE = "Fișier de date:" + self.MODEL = "Model:" + self.OUTPUT = "Ieșire:" + self.OUTPUT_FREQUENCY = "Frecvența ieșirii:" + self.GPU_OFFLOAD = "Descărcare GPU:" + self.AUTO = "Automat" + self.GENERATE_IMATRIX = "Generați IMatrix" + self.ERROR = "Eroare" + self.WARNING = "Avertisment" + self.PROPERTIES = "Proprietăți" + self.CANCEL = "Anulați" + self.RESTART = "Reporniți" + self.DELETE = "Ștergeți" + self.CONFIRM_DELETION = "Sunteți sigur că doriți să ștergeți această sarcină?" + self.TASK_RUNNING_WARNING = "Unele sarcini sunt încă în curs de execuție. Sunteți sigur că doriți să ieșiți?" + self.YES = "Da" + self.NO = "Nu" + self.DOWNLOAD_COMPLETE = "Descărcare finalizată" + self.CUDA_EXTRACTION_FAILED = "Extragerea CUDA a eșuat" + self.PRESET_SAVED = "Presetare salvată" + self.PRESET_LOADED = "Presetare încărcată" + self.NO_ASSET_SELECTED = "Niciun activ selectat" + self.DOWNLOAD_FAILED = "Descărcarea a eșuat" + self.NO_BACKEND_SELECTED = "Niciun backend selectat" + self.NO_MODEL_SELECTED = "Niciun model selectat" + self.REFRESH_RELEASES = "Reîmprospătați versiunile" + self.NO_SUITABLE_CUDA_BACKENDS = "Nu s-au găsit backends CUDA potrivite" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}\nFișierele CUDA au fost extrase în {1}" + self.CUDA_FILES_EXTRACTED = "Fișierele CUDA au fost extrase în" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nu s-a găsit un backend CUDA potrivit pentru extragere" + self.ERROR_FETCHING_RELEASES = "Eroare la preluarea versiunilor: {0}" + self.CONFIRM_DELETION_TITLE = "Confirmați ștergerea" + self.LOG_FOR = "Jurnal pentru {0}" + self.ALL_FILES = "Toate fișierele (*)" + self.GGUF_FILES = "Fișiere GGUF (*.gguf)" + self.DAT_FILES = "Fișiere DAT (*.dat)" + self.JSON_FILES = "Fișiere JSON (*.json)" + self.FAILED_LOAD_PRESET = "Nu s-a putut încărca presetarea: {0}" + self.INITIALIZING_AUTOGGUF = "Inițializarea aplicației AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inițializarea AutoGGUF finalizată" + self.REFRESHING_BACKENDS = "Reîmprospătarea backends" + self.NO_BACKENDS_AVAILABLE = "Nu există backends disponibile" + self.FOUND_VALID_BACKENDS = "S-au găsit {0} backends valide" + self.SAVING_PRESET = "Salvarea presetării" + self.PRESET_SAVED_TO = "Presetare salvată în {0}" + self.LOADING_PRESET = "Încărcarea presetării" + self.PRESET_LOADED_FROM = "Presetare încărcată din {0}" + self.ADDING_KV_OVERRIDE = "Adăugarea suprascrierii KV: {0}" + self.SAVING_TASK_PRESET = "Salvarea presetării sarcinii pentru {0}" + self.TASK_PRESET_SAVED = "Presetare sarcină salvată" + self.TASK_PRESET_SAVED_TO = "Presetare sarcină salvată în {0}" + self.RESTARTING_TASK = "Repornirea sarcinii: {0}" + self.IN_PROGRESS = "În curs" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Descărcare finalizată. Extras în: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}\nFișierele CUDA au fost extrase în {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nu s-a găsit un backend CUDA potrivit pentru extragere" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Reîmprospătarea versiunilor llama.cpp" + self.UPDATING_ASSET_LIST = "Actualizarea listei de active" + self.UPDATING_CUDA_OPTIONS = "Actualizarea opțiunilor CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Începerea descărcării llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Actualizarea backends CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Niciun backend CUDA selectat pentru extragere" + self.EXTRACTING_CUDA_FILES = "Extragerea fișierelor CUDA din {0} în {1}" + self.DOWNLOAD_ERROR = "Eroare de descărcare: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Afișarea meniului contextual al sarcinii" + self.SHOWING_PROPERTIES_FOR_TASK = "Afișarea proprietăților pentru sarcina: {0}" + self.CANCELLING_TASK = "Anularea sarcinii: {0}" + self.CANCELED = "Anulat" + self.DELETING_TASK = "Ștergerea sarcinii: {0}" + self.LOADING_MODELS = "Încărcarea modelelor" + self.LOADED_MODELS = "{0} modele încărcate" + self.BROWSING_FOR_MODELS_DIRECTORY = "Răsfoirea directorului de modele" + self.SELECT_MODELS_DIRECTORY = "Selectați directorul de modele" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Răsfoirea directorului de ieșire" + self.SELECT_OUTPUT_DIRECTORY = "Selectați directorul de ieșire" + self.BROWSING_FOR_LOGS_DIRECTORY = "Răsfoirea directorului de jurnale" + self.SELECT_LOGS_DIRECTORY = "Selectați directorul de jurnale" + self.BROWSING_FOR_IMATRIX_FILE = "Răsfoirea fișierului IMatrix" + self.SELECT_IMATRIX_FILE = "Selectați fișierul IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Utilizare CPU: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validarea intrărilor de cuantizare" + self.MODELS_PATH_REQUIRED = "Calea modelelor este obligatorie" + self.OUTPUT_PATH_REQUIRED = "Calea ieșirii este obligatorie" + self.LOGS_PATH_REQUIRED = "Calea jurnalelor este obligatorie" + self.STARTING_MODEL_QUANTIZATION = "Pornirea cuantizării modelului" + self.INPUT_FILE_NOT_EXIST = "Fișierul de intrare '{0}' nu există." + self.QUANTIZING_MODEL_TO = "Cuantizarea {0} la {1}" + self.QUANTIZATION_TASK_STARTED = "Sarcina de cuantizare a fost pornită pentru {0}" + self.ERROR_STARTING_QUANTIZATION = "Eroare la pornirea cuantizării: {0}" + self.UPDATING_MODEL_INFO = "Actualizarea informațiilor despre model: {0}" + self.TASK_FINISHED = "Sarcină finalizată: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Afișarea detaliilor sarcinii pentru: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Răsfoirea fișierului de date IMatrix" + self.SELECT_DATA_FILE = "Selectați fișierul de date" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Răsfoirea fișierului de model IMatrix" + self.SELECT_MODEL_FILE = "Selectați fișierul model" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Răsfoirea fișierului de ieșire IMatrix" + self.SELECT_OUTPUT_FILE = "Selectați fișierul de ieșire" + self.STARTING_IMATRIX_GENERATION = "Pornirea generării IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Calea backendului nu există: {0}" + self.GENERATING_IMATRIX = "Generarea IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Eroare la pornirea generării IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Sarcina de generare IMatrix a fost pornită" + self.ERROR_MESSAGE = "Eroare: {0}" + self.TASK_ERROR = "Eroare de sarcină: {0}" + self.APPLICATION_CLOSING = "Închiderea aplicației" + self.APPLICATION_CLOSED = "Aplicație închisă" + self.SELECT_QUANTIZATION_TYPE = "Selectați tipul de cuantizare" + self.ALLOWS_REQUANTIZING = "Permite recuantizarea tensorilor care au fost deja cuantizați" + self.LEAVE_OUTPUT_WEIGHT = "Va lăsa output.weight necuantizat (sau recuantizat)" + self.DISABLE_K_QUANT_MIXTURES = "Dezactivați mixurile k-quant și cuantizați toți tensorii la același tip" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilizați datele din fișier ca matrice de importanță pentru optimizările de cuantizare" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Utilizați matricea de importanță pentru acești tensori" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Nu utilizați matricea de importanță pentru acești tensori" + self.OUTPUT_TENSOR_TYPE = "Tipul tensorului de ieșire:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Utilizați acest tip pentru tensorul output.weight" + self.TOKEN_EMBEDDING_TYPE = "Tipul de încorporare a tokenului:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Utilizați acest tip pentru tensorul de încorporări ale tokenului" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Va genera modelul cuantizat în aceleași fragmente ca și intrarea" + self.OVERRIDE_MODEL_METADATA = "Suprascrieți metadatele modelului" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Fișier de date de intrare pentru generarea IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Modelul de cuantizat" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Calea de ieșire pentru IMatrix generat" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Cât de des să salvați IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Setați valoarea de descărcare GPU (-ngl)" + self.COMPLETED = "Finalizat" + +class _Czech(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Automatický kvantizátor modelů GGUF)" + self.RAM_USAGE = "Využití RAM:" + self.CPU_USAGE = "Využití CPU:" + self.BACKEND = "Backend Llama.cpp:" + self.REFRESH_BACKENDS = "Obnovit backendy" + self.MODELS_PATH = "Cesta k modelům:" + self.OUTPUT_PATH = "Výstupní cesta:" + self.LOGS_PATH = "Cesta k logům:" + self.BROWSE = "Procházet" + self.AVAILABLE_MODELS = "Dostupné modely:" + self.QUANTIZATION_TYPE = "Typ kvantizace:" + self.ALLOW_REQUANTIZE = "Povolit rekvantizaci" + self.LEAVE_OUTPUT_TENSOR = "Ponechat výstupní tenzor" + self.PURE = "Čistý" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Zahrnout váhy:" + self.EXCLUDE_WEIGHTS = "Vyloučit váhy:" + self.USE_OUTPUT_TENSOR_TYPE = "Použít typ výstupního tenzoru" + self.USE_TOKEN_EMBEDDING_TYPE = "Použít typ vkládání tokenů" + self.KEEP_SPLIT = "Zachovat rozdělení" + self.KV_OVERRIDES = "Přepsání KV:" + self.ADD_NEW_OVERRIDE = "Přidat nové přepsání" + self.QUANTIZE_MODEL = "Kvantizovat model" + self.SAVE_PRESET = "Uložit předvolbu" + self.LOAD_PRESET = "Načíst předvolbu" + self.TASKS = "Úkoly:" + self.DOWNLOAD_LLAMACPP = "Stáhnout llama.cpp" + self.SELECT_RELEASE = "Vybrat verzi:" + self.SELECT_ASSET = "Vybrat aktivum:" + self.EXTRACT_CUDA_FILES = "Extrahovat soubory CUDA" + self.SELECT_CUDA_BACKEND = "Vybrat backend CUDA:" + self.DOWNLOAD = "Stáhnout" + self.IMATRIX_GENERATION = "Generování IMatrix" + self.DATA_FILE = "Datový soubor:" + self.MODEL = "Model:" + self.OUTPUT = "Výstup:" + self.OUTPUT_FREQUENCY = "Frekvence výstupu:" + self.GPU_OFFLOAD = "Odlehčení GPU:" + self.AUTO = "Automaticky" + self.GENERATE_IMATRIX = "Generovat IMatrix" + self.ERROR = "Chyba" + self.WARNING = "Varování" + self.PROPERTIES = "Vlastnosti" + self.CANCEL = "Zrušit" + self.RESTART = "Restartovat" + self.DELETE = "Smazat" + self.CONFIRM_DELETION = "Jste si jisti, že chcete smazat tento úkol?" + self.TASK_RUNNING_WARNING = "Některé úkoly stále běží. Jste si jisti, že chcete ukončit?" + self.YES = "Ano" + self.NO = "Ne" + self.DOWNLOAD_COMPLETE = "Stahování dokončeno" + self.CUDA_EXTRACTION_FAILED = "Extrahování CUDA se nezdařilo" + self.PRESET_SAVED = "Předvolba uložena" + self.PRESET_LOADED = "Předvolba načtena" + self.NO_ASSET_SELECTED = "Nebylo vybráno žádné aktivum" + self.DOWNLOAD_FAILED = "Stahování se nezdařilo" + self.NO_BACKEND_SELECTED = "Nebyl vybrán žádný backend" + self.NO_MODEL_SELECTED = "Nebyl vybrán žádný model" + self.REFRESH_RELEASES = "Obnovit verze" + self.NO_SUITABLE_CUDA_BACKENDS = "Nebyly nalezeny žádné vhodné backendy CUDA" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}\nSoubory CUDA extrahovány do {1}" + self.CUDA_FILES_EXTRACTED = "Soubory CUDA extrahovány do" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nebyl nalezen žádný vhodný backend CUDA pro extrakci" + self.ERROR_FETCHING_RELEASES = "Chyba při načítání verzí: {0}" + self.CONFIRM_DELETION_TITLE = "Potvrdit smazání" + self.LOG_FOR = "Log pro {0}" + self.ALL_FILES = "Všechny soubory (*)" + self.GGUF_FILES = "Soubory GGUF (*.gguf)" + self.DAT_FILES = "Soubory DAT (*.dat)" + self.JSON_FILES = "Soubory JSON (*.json)" + self.FAILED_LOAD_PRESET = "Nepodařilo se načíst předvolbu: {0}" + self.INITIALIZING_AUTOGGUF = "Inicializace aplikace AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicializace AutoGGUF dokončena" + self.REFRESHING_BACKENDS = "Obnovování backendů" + self.NO_BACKENDS_AVAILABLE = "Žádné dostupné backendy" + self.FOUND_VALID_BACKENDS = "Nalezeno {0} platných backendů" + self.SAVING_PRESET = "Ukládání předvolby" + self.PRESET_SAVED_TO = "Předvolba uložena do {0}" + self.LOADING_PRESET = "Načítání předvolby" + self.PRESET_LOADED_FROM = "Předvolba načtena z {0}" + self.ADDING_KV_OVERRIDE = "Přidávání přepsání KV: {0}" + self.SAVING_TASK_PRESET = "Ukládání předvolby úkolu pro {0}" + self.TASK_PRESET_SAVED = "Předvolba úkolu uložena" + self.TASK_PRESET_SAVED_TO = "Předvolba úkolu uložena do {0}" + self.RESTARTING_TASK = "Restartování úkolu: {0}" + self.IN_PROGRESS = "Probíhá" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Stahování dokončeno. Extrahováno do: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}\nSoubory CUDA extrahovány do {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nebyl nalezen žádný vhodný backend CUDA pro extrakci" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Obnovování verzí llama.cpp" + self.UPDATING_ASSET_LIST = "Aktualizace seznamu aktiv" + self.UPDATING_CUDA_OPTIONS = "Aktualizace možností CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Zahájení stahování llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Aktualizace backendů CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Nebyl vybrán žádný backend CUDA pro extrakci" + self.EXTRACTING_CUDA_FILES = "Extrahování souborů CUDA z {0} do {1}" + self.DOWNLOAD_ERROR = "Chyba stahování: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Zobrazení kontextové nabídky úkolu" + self.SHOWING_PROPERTIES_FOR_TASK = "Zobrazení vlastností úkolu: {0}" + self.CANCELLING_TASK = "Zrušení úkolu: {0}" + self.CANCELED = "Zrušeno" + self.DELETING_TASK = "Mazání úkolu: {0}" + self.LOADING_MODELS = "Načítání modelů" + self.LOADED_MODELS = "Načteno {0} modelů" + self.BROWSING_FOR_MODELS_DIRECTORY = "Procházení adresáře modelů" + self.SELECT_MODELS_DIRECTORY = "Vyberte adresář modelů" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Procházení výstupního adresáře" + self.SELECT_OUTPUT_DIRECTORY = "Vyberte výstupní adresář" + self.BROWSING_FOR_LOGS_DIRECTORY = "Procházení adresáře logů" + self.SELECT_LOGS_DIRECTORY = "Vyberte adresář logů" + self.BROWSING_FOR_IMATRIX_FILE = "Procházení souboru IMatrix" + self.SELECT_IMATRIX_FILE = "Vyberte soubor IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Využití CPU: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Ověřování vstupů kvantizace" + self.MODELS_PATH_REQUIRED = "Cesta k modelům je vyžadována" + self.OUTPUT_PATH_REQUIRED = "Výstupní cesta je vyžadována" + self.LOGS_PATH_REQUIRED = "Cesta k logům je vyžadována" + self.STARTING_MODEL_QUANTIZATION = "Spuštění kvantizace modelu" + self.INPUT_FILE_NOT_EXIST = "Vstupní soubor '{0}' neexistuje." + self.QUANTIZING_MODEL_TO = "Kvantizace {0} na {1}" + self.QUANTIZATION_TASK_STARTED = "Úkol kvantizace spuštěn pro {0}" + self.ERROR_STARTING_QUANTIZATION = "Chyba při spuštění kvantizace: {0}" + self.UPDATING_MODEL_INFO = "Aktualizace informací o modelu: {0}" + self.TASK_FINISHED = "Úkol dokončen: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Zobrazení detailů úkolu pro: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Procházení datového souboru IMatrix" + self.SELECT_DATA_FILE = "Vyberte datový soubor" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Procházení souboru modelu IMatrix" + self.SELECT_MODEL_FILE = "Vyberte soubor modelu" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Procházení výstupního souboru IMatrix" + self.SELECT_OUTPUT_FILE = "Vyberte výstupní soubor" + self.STARTING_IMATRIX_GENERATION = "Spuštění generování IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Cesta backendu neexistuje: {0}" + self.GENERATING_IMATRIX = "Generování IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Chyba při spuštění generování IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Úkol generování IMatrix spuštěn" + self.ERROR_MESSAGE = "Chyba: {0}" + self.TASK_ERROR = "Chyba úkolu: {0}" + self.APPLICATION_CLOSING = "Zavírání aplikace" + self.APPLICATION_CLOSED = "Aplikace zavřena" + self.SELECT_QUANTIZATION_TYPE = "Vyberte typ kvantizace" + self.ALLOWS_REQUANTIZING = "Umožňuje rekvantizovat tenzory, které již byly kvantizovány" + self.LEAVE_OUTPUT_WEIGHT = "Ponechá output.weight nekvantizovaný (nebo rekvantizovaný)" + self.DISABLE_K_QUANT_MIXTURES = "Zakázat k-kvantové směsi a kvantizovat všechny tenzory na stejný typ" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Použít data v souboru jako matici důležitosti pro optimalizace kvantizace" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Použít matici důležitosti pro tyto tenzory" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Nepoužívat matici důležitosti pro tyto tenzory" + self.OUTPUT_TENSOR_TYPE = "Typ výstupního tenzoru:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Použít tento typ pro tenzor output.weight" + self.TOKEN_EMBEDDING_TYPE = "Typ vkládání tokenů:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Použít tento typ pro tenzor vkládání tokenů" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Vygeneruje kvantizovaný model ve stejných fragmentech jako vstup" + self.OVERRIDE_MODEL_METADATA = "Přepsat metadata modelu" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Vstupní datový soubor pro generování IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Model, který má být kvantizován" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Výstupní cesta pro generovaný IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Jak často ukládat IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Nastavit hodnotu odlehčení GPU (-ngl)" + self.COMPLETED = "Dokončeno" + +class _CanadianFrench(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Quantificateur automatique de modèles GGUF)" + self.RAM_USAGE = "Utilisation de la RAM :" # Spacing + self.CPU_USAGE = "Utilisation du CPU :" # Spacing + self.BACKEND = "Moteur d'arrière-plan Llama.cpp :" # Spacing and terminology + self.REFRESH_BACKENDS = "Actualiser les moteurs d'arrière-plan" + self.MODELS_PATH = "Chemin des modèles :" # Spacing + self.OUTPUT_PATH = "Chemin de sortie :" # Spacing + self.LOGS_PATH = "Chemin des journaux :" # Spacing + self.BROWSE = "Parcourir" + self.AVAILABLE_MODELS = "Modèles disponibles :" # Spacing + self.QUANTIZATION_TYPE = "Type de quantification :" # Spacing + self.ALLOW_REQUANTIZE = "Autoriser la requantification" + self.LEAVE_OUTPUT_TENSOR = "Laisser le tenseur de sortie" + self.PURE = "Pur" + self.IMATRIX = "IMatrix :" # Spacing + self.INCLUDE_WEIGHTS = "Inclure les poids :" # Spacing + self.EXCLUDE_WEIGHTS = "Exclure les poids :" # Spacing + self.USE_OUTPUT_TENSOR_TYPE = "Utiliser le type de tenseur de sortie" + self.USE_TOKEN_EMBEDDING_TYPE = "Utiliser le type d'intégration de jeton" + self.KEEP_SPLIT = "Conserver la division" + self.KV_OVERRIDES = "Remplacements KV :" # Spacing + self.ADD_NEW_OVERRIDE = "Ajouter un nouveau remplacement" + self.QUANTIZE_MODEL = "Quantifier le modèle" + self.SAVE_PRESET = "Enregistrer le préréglage" + self.LOAD_PRESET = "Charger le préréglage" + self.TASKS = "Tâches :" # Spacing + self.DOWNLOAD_LLAMACPP = "Télécharger llama.cpp" + self.SELECT_RELEASE = "Sélectionner la version :" # Spacing + self.SELECT_ASSET = "Sélectionner l'actif :" # Spacing + self.EXTRACT_CUDA_FILES = "Extraire les fichiers CUDA" + self.SELECT_CUDA_BACKEND = "Sélectionner le backend CUDA :" # Spacing + self.DOWNLOAD = "Télécharger" + self.IMATRIX_GENERATION = "Génération d'IMatrix" + self.DATA_FILE = "Fichier de données :" # Spacing + self.MODEL = "Modèle :" # Spacing + self.OUTPUT = "Sortie :" # Spacing + self.OUTPUT_FREQUENCY = "Fréquence de sortie :" # Spacing + self.GPU_OFFLOAD = "Déchargement GPU :" # Spacing + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Générer IMatrix" + self.ERROR = "Erreur" + self.WARNING = "Avertissement" + self.PROPERTIES = "Propriétés" + self.CANCEL = "Annuler" + self.RESTART = "Redémarrer" + self.DELETE = "Supprimer" + self.CONFIRM_DELETION = "Êtes-vous sûr de vouloir supprimer cette tâche ?" # Spacing + self.TASK_RUNNING_WARNING = "Certaines tâches sont encore en cours d'exécution. Êtes-vous sûr de vouloir quitter ?" # Spacing + self.YES = "Oui" + self.NO = "Non" + self.DOWNLOAD_COMPLETE = "Téléchargement terminé" + self.CUDA_EXTRACTION_FAILED = "Échec de l'extraction CUDA" + self.PRESET_SAVED = "Préréglage enregistré" + self.PRESET_LOADED = "Préréglage chargé" + self.NO_ASSET_SELECTED = "Aucun actif sélectionné" + self.DOWNLOAD_FAILED = "Échec du téléchargement" + self.NO_BACKEND_SELECTED = "Aucun backend sélectionné" + self.NO_MODEL_SELECTED = "Aucun modèle sélectionné" + self.REFRESH_RELEASES = "Actualiser les versions" + self.NO_SUITABLE_CUDA_BACKENDS = "Aucun backend CUDA approprié trouvé" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}\nLes fichiers CUDA ont été extraits dans {1}" + self.CUDA_FILES_EXTRACTED = "Les fichiers CUDA ont été extraits dans" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Aucun backend CUDA approprié trouvé pour l'extraction" + self.ERROR_FETCHING_RELEASES = "Erreur lors de la récupération des versions : {0}" # Spacing + self.CONFIRM_DELETION_TITLE = "Confirmer la suppression" + self.LOG_FOR = "Journal pour {0}" + self.ALL_FILES = "Tous les fichiers (*)" + self.GGUF_FILES = "Fichiers GGUF (*.gguf)" + self.DAT_FILES = "Fichiers DAT (*.dat)" + self.JSON_FILES = "Fichiers JSON (*.json)" + self.FAILED_LOAD_PRESET = "Échec du chargement du préréglage : {0}" # Spacing + self.INITIALIZING_AUTOGGUF = "Initialisation de l'application AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Initialisation d'AutoGGUF terminée" + self.REFRESHING_BACKENDS = "Actualisation des moteurs d'arrière-plan" + self.NO_BACKENDS_AVAILABLE = "Aucun moteur d'arrière-plan disponible" + self.FOUND_VALID_BACKENDS = "{0} moteurs d'arrière-plan valides trouvés" + self.SAVING_PRESET = "Enregistrement du préréglage" + self.PRESET_SAVED_TO = "Préréglage enregistré dans {0}" + self.LOADING_PRESET = "Chargement du préréglage" + self.PRESET_LOADED_FROM = "Préréglage chargé depuis {0}" + self.ADDING_KV_OVERRIDE = "Ajout de remplacement KV : {0}" # Spacing + self.SAVING_TASK_PRESET = "Enregistrement du préréglage de tâche pour {0}" + self.TASK_PRESET_SAVED = "Préréglage de tâche enregistré" + self.TASK_PRESET_SAVED_TO = "Préréglage de tâche enregistré dans {0}" + self.RESTARTING_TASK = "Redémarrage de la tâche : {0}" # Spacing + self.IN_PROGRESS = "En cours" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Téléchargement terminé. Extrait dans : {0}" # Spacing + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}\nLes fichiers CUDA ont été extraits dans {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Aucun backend CUDA approprié trouvé pour l'extraction" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Actualisation des versions de llama.cpp" + self.UPDATING_ASSET_LIST = "Mise à jour de la liste des actifs" + self.UPDATING_CUDA_OPTIONS = "Mise à jour des options CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Démarrage du téléchargement de llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Mise à jour des backends CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Aucun backend CUDA sélectionné pour l'extraction" + self.EXTRACTING_CUDA_FILES = "Extraction des fichiers CUDA de {0} à {1}" + self.DOWNLOAD_ERROR = "Erreur de téléchargement : {0}" # Spacing + self.SHOWING_TASK_CONTEXT_MENU = "Affichage du menu contextuel de la tâche" + self.SHOWING_PROPERTIES_FOR_TASK = "Affichage des propriétés de la tâche : {0}" # Spacing + self.CANCELLING_TASK = "Annulation de la tâche : {0}" # Spacing + self.CANCELED = "Annulée" + self.DELETING_TASK = "Suppression de la tâche : {0}" # Spacing + self.LOADING_MODELS = "Chargement des modèles" + self.LOADED_MODELS = "{0} modèles chargés" + self.BROWSING_FOR_MODELS_DIRECTORY = "Navigation dans le répertoire des modèles" + self.SELECT_MODELS_DIRECTORY = "Sélectionner le répertoire des modèles" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Navigation dans le répertoire de sortie" + self.SELECT_OUTPUT_DIRECTORY = "Sélectionner le répertoire de sortie" + self.BROWSING_FOR_LOGS_DIRECTORY = "Navigation dans le répertoire des journaux" + self.SELECT_LOGS_DIRECTORY = "Sélectionner le répertoire des journaux" + self.BROWSING_FOR_IMATRIX_FILE = "Navigation dans le fichier IMatrix" + self.SELECT_IMATRIX_FILE = "Sélectionner le fichier IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mo / {2} Mo)" + self.CPU_USAGE_FORMAT = "Utilisation du CPU : {0:.1f}%" # Spacing + self.VALIDATING_QUANTIZATION_INPUTS = "Validation des entrées de quantification" + self.MODELS_PATH_REQUIRED = "Le chemin des modèles est requis" + self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis" + self.LOGS_PATH_REQUIRED = "Le chemin des journaux est requis" + self.STARTING_MODEL_QUANTIZATION = "Démarrage de la quantification du modèle" + self.INPUT_FILE_NOT_EXIST = "Le fichier d'entrée '{0}' n'existe pas." + self.QUANTIZING_MODEL_TO = "Quantification de {0} en {1}" + self.QUANTIZATION_TASK_STARTED = "Tâche de quantification démarrée pour {0}" + self.ERROR_STARTING_QUANTIZATION = "Erreur lors du démarrage de la quantification : {0}" # Spacing + self.UPDATING_MODEL_INFO = "Mise à jour des informations sur le modèle : {0}" # Spacing + self.TASK_FINISHED = "Tâche terminée : {0}" # Spacing + self.SHOWING_TASK_DETAILS_FOR = "Affichage des détails de la tâche pour : {0}" # Spacing + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Navigation dans le fichier de données IMatrix" + self.SELECT_DATA_FILE = "Sélectionner le fichier de données" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Navigation dans le fichier de modèle IMatrix" + self.SELECT_MODEL_FILE = "Sélectionner le fichier de modèle" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Navigation dans le fichier de sortie IMatrix" + self.SELECT_OUTPUT_FILE = "Sélectionner le fichier de sortie" + self.STARTING_IMATRIX_GENERATION = "Démarrage de la génération d'IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Le chemin du backend n'existe pas : {0}" # Spacing + self.GENERATING_IMATRIX = "Génération d'IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Erreur lors du démarrage de la génération d'IMatrix : {0}" # Spacing + self.IMATRIX_GENERATION_TASK_STARTED = "Tâche de génération d'IMatrix démarrée" + self.ERROR_MESSAGE = "Erreur : {0}" # Spacing + self.TASK_ERROR = "Erreur de tâche : {0}" # Spacing + self.APPLICATION_CLOSING = "Fermeture de l'application" + self.APPLICATION_CLOSED = "Application fermée" + self.SELECT_QUANTIZATION_TYPE = "Sélectionnez le type de quantification" + self.ALLOWS_REQUANTIZING = "Permet de requantifier les tenseurs qui ont déjà été quantifiés" + self.LEAVE_OUTPUT_WEIGHT = "Laissera output.weight non (re)quantifié" + self.DISABLE_K_QUANT_MIXTURES = "Désactiver les mélanges k-quant et quantifier tous les tenseurs du même type" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utiliser les données du fichier comme matrice d'importance pour les optimisations de quant" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Utiliser la matrice d'importance pour ces tenseurs" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Ne pas utiliser la matrice d'importance pour ces tenseurs" + self.OUTPUT_TENSOR_TYPE = "Type de tenseur de sortie :" # Spacing + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Utiliser ce type pour le tenseur output.weight" + self.TOKEN_EMBEDDING_TYPE = "Type d'intégration de jeton :" # Spacing + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Utiliser ce type pour le tenseur d'intégration de jetons" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Générera le modèle quantifié dans les mêmes fragments que l'entrée" + self.OVERRIDE_MODEL_METADATA = "Remplacer les métadonnées du modèle" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Fichier de données d'entrée pour la génération d'IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Modèle à quantifier" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Chemin de sortie pour l'IMatrix généré" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Fréquence d'enregistrement de l'IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Définir la valeur de déchargement GPU (-ngl)" + self.COMPLETED = "Terminé" + +class _Portuguese_PT(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Quantificador Automático de Modelos GGUF)" + self.RAM_USAGE = "Utilização de RAM:" + self.CPU_USAGE = "Utilização da CPU:" + self.BACKEND = "Backend Llama.cpp:" + self.REFRESH_BACKENDS = "Atualizar Backends" + self.MODELS_PATH = "Caminho dos Modelos:" + self.OUTPUT_PATH = "Caminho de Saída:" + self.LOGS_PATH = "Caminho dos Logs:" + self.BROWSE = "Navegar" + self.AVAILABLE_MODELS = "Modelos Disponíveis:" + self.QUANTIZATION_TYPE = "Tipo de Quantização:" + self.ALLOW_REQUANTIZE = "Permitir Requantização" + self.LEAVE_OUTPUT_TENSOR = "Manter Tensor de Saída" + self.PURE = "Puro" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Incluir Pesos:" + self.EXCLUDE_WEIGHTS = "Excluir Pesos:" + self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Saída" + self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incorporação de Token" + self.KEEP_SPLIT = "Manter Divisão" + self.KV_OVERRIDES = "Substituições KV:" + self.ADD_NEW_OVERRIDE = "Adicionar Nova Substituição" + self.QUANTIZE_MODEL = "Quantizar Modelo" + self.SAVE_PRESET = "Guardar Predefinição" + self.LOAD_PRESET = "Carregar Predefinição" + self.TASKS = "Tarefas:" + self.DOWNLOAD_LLAMACPP = "Descarregar llama.cpp" + self.SELECT_RELEASE = "Selecionar Versão:" + self.SELECT_ASSET = "Selecionar Ativo:" + self.EXTRACT_CUDA_FILES = "Extrair Ficheiros CUDA" + self.SELECT_CUDA_BACKEND = "Selecionar Backend CUDA:" + self.DOWNLOAD = "Descarregar" + self.IMATRIX_GENERATION = "Geração de IMatrix" + self.DATA_FILE = "Ficheiro de Dados:" + self.MODEL = "Modelo:" + self.OUTPUT = "Saída:" + self.OUTPUT_FREQUENCY = "Frequência de Saída:" + self.GPU_OFFLOAD = "Offload da GPU:" + self.AUTO = "Automático" + self.GENERATE_IMATRIX = "Gerar IMatrix" + self.ERROR = "Erro" + self.WARNING = "Aviso" + self.PROPERTIES = "Propriedades" + self.CANCEL = "Cancelar" + self.RESTART = "Reiniciar" + self.DELETE = "Eliminar" + self.CONFIRM_DELETION = "Tem a certeza de que pretende eliminar esta tarefa?" + self.TASK_RUNNING_WARNING = "Algumas tarefas ainda estão em execução. Tem a certeza de que pretende sair?" + self.YES = "Sim" + self.NO = "Não" + self.DOWNLOAD_COMPLETE = "Transferência Concluída" + self.CUDA_EXTRACTION_FAILED = "Falha na Extração do CUDA" + self.PRESET_SAVED = "Predefinição Guardada" + self.PRESET_LOADED = "Predefinição Carregada" + self.NO_ASSET_SELECTED = "Nenhum ativo selecionado" + self.DOWNLOAD_FAILED = "Falha na transferência" + self.NO_BACKEND_SELECTED = "Nenhum backend selecionado" + self.NO_MODEL_SELECTED = "Nenhum modelo selecionado" + self.REFRESH_RELEASES = "Atualizar Versões" + self.NO_SUITABLE_CUDA_BACKENDS = "Nenhum backend CUDA adequado encontrado" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}\nFicheiros CUDA extraídos para {1}" + self.CUDA_FILES_EXTRACTED = "Ficheiros CUDA extraídos para" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nenhum backend CUDA adequado encontrado para extração" + self.ERROR_FETCHING_RELEASES = "Erro ao obter versões: {0}" + self.CONFIRM_DELETION_TITLE = "Confirmar Eliminação" + self.LOG_FOR = "Log para {0}" + self.ALL_FILES = "Todos os Ficheiros (*)" + self.GGUF_FILES = "Ficheiros GGUF (*.gguf)" + self.DAT_FILES = "Ficheiros DAT (*.dat)" + self.JSON_FILES = "Ficheiros JSON (*.json)" + self.FAILED_LOAD_PRESET = "Falha ao carregar a predefinição: {0}" + self.INITIALIZING_AUTOGGUF = "A inicializar a aplicação AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialização do AutoGGUF concluída" + self.REFRESHING_BACKENDS = "A atualizar backends" + self.NO_BACKENDS_AVAILABLE = "Nenhum backend disponível" + self.FOUND_VALID_BACKENDS = "{0} backends válidos encontrados" + self.SAVING_PRESET = "A guardar predefinição" + self.PRESET_SAVED_TO = "Predefinição guardada em {0}" + self.LOADING_PRESET = "A carregar predefinição" + self.PRESET_LOADED_FROM = "Predefinição carregada de {0}" + self.ADDING_KV_OVERRIDE = "A adicionar substituição KV: {0}" + self.SAVING_TASK_PRESET = "A guardar predefinição de tarefa para {0}" + self.TASK_PRESET_SAVED = "Predefinição de Tarefa Guardada" + self.TASK_PRESET_SAVED_TO = "Predefinição de tarefa guardada em {0}" + self.RESTARTING_TASK = "A reiniciar tarefa: {0}" + self.IN_PROGRESS = "Em Andamento" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Transferência concluída. Extraído para: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}\nFicheiros CUDA extraídos para {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nenhum backend CUDA adequado encontrado para extração" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}" + self.REFRESHING_LLAMACPP_RELEASES = "A atualizar versões do llama.cpp" + self.UPDATING_ASSET_LIST = "A atualizar lista de ativos" + self.UPDATING_CUDA_OPTIONS = "A atualizar opções CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "A iniciar transferência do llama.cpp" + self.UPDATING_CUDA_BACKENDS = "A atualizar backends CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Nenhum backend CUDA selecionado para extração" + self.EXTRACTING_CUDA_FILES = "A extrair ficheiros CUDA de {0} para {1}" + self.DOWNLOAD_ERROR = "Erro de transferência: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "A exibir menu de contexto da tarefa" + self.SHOWING_PROPERTIES_FOR_TASK = "A exibir propriedades para a tarefa: {0}" + self.CANCELLING_TASK = "A cancelar tarefa: {0}" + self.CANCELED = "Cancelado" + self.DELETING_TASK = "A eliminar tarefa: {0}" + self.LOADING_MODELS = "A carregar modelos" + self.LOADED_MODELS = "{0} modelos carregados" + self.BROWSING_FOR_MODELS_DIRECTORY = "A navegar pelo diretório de modelos" + self.SELECT_MODELS_DIRECTORY = "Selecionar Diretório de Modelos" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "A navegar pelo diretório de saída" + self.SELECT_OUTPUT_DIRECTORY = "Selecionar Diretório de Saída" + self.BROWSING_FOR_LOGS_DIRECTORY = "A navegar pelo diretório de logs" + self.SELECT_LOGS_DIRECTORY = "Selecionar Diretório de Logs" + self.BROWSING_FOR_IMATRIX_FILE = "A navegar pelo ficheiro IMatrix" + self.SELECT_IMATRIX_FILE = "Selecionar Ficheiro IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Utilização da CPU: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "A validar entradas de quantização" + self.MODELS_PATH_REQUIRED = "O caminho dos modelos é obrigatório" + self.OUTPUT_PATH_REQUIRED = "O caminho de saída é obrigatório" + self.LOGS_PATH_REQUIRED = "O caminho dos logs é obrigatório" + self.STARTING_MODEL_QUANTIZATION = "A iniciar a quantização do modelo" + self.INPUT_FILE_NOT_EXIST = "O ficheiro de entrada '{0}' não existe." + self.QUANTIZING_MODEL_TO = "A quantizar {0} para {1}" + self.QUANTIZATION_TASK_STARTED = "Tarefa de quantização iniciada para {0}" + self.ERROR_STARTING_QUANTIZATION = "Erro ao iniciar a quantização: {0}" + self.UPDATING_MODEL_INFO = "A atualizar informações do modelo: {0}" + self.TASK_FINISHED = "Tarefa concluída: {0}" + self.SHOWING_TASK_DETAILS_FOR = "A mostrar detalhes da tarefa para: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "A navegar pelo ficheiro de dados IMatrix" + self.SELECT_DATA_FILE = "Selecionar Ficheiro de Dados" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "A navegar pelo ficheiro de modelo IMatrix" + self.SELECT_MODEL_FILE = "Selecionar Ficheiro de Modelo" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "A navegar pelo ficheiro de saída IMatrix" + self.SELECT_OUTPUT_FILE = "Selecionar Ficheiro de Saída" + self.STARTING_IMATRIX_GENERATION = "A iniciar a geração de IMatrix" + self.BACKEND_PATH_NOT_EXIST = "O caminho do backend não existe: {0}" + self.GENERATING_IMATRIX = "A gerar IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Erro ao iniciar a geração de IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Tarefa de geração de IMatrix iniciada" + self.ERROR_MESSAGE = "Erro: {0}" + self.TASK_ERROR = "Erro de tarefa: {0}" + self.APPLICATION_CLOSING = "A fechar a aplicação" + self.APPLICATION_CLOSED = "Aplicação fechada" + self.SELECT_QUANTIZATION_TYPE = "Selecione o tipo de quantização" + self.ALLOWS_REQUANTIZING = "Permite requantizar tensores que já foram quantizados" + self.LEAVE_OUTPUT_WEIGHT = "Deixará output.weight não (re)quantizado" + self.DISABLE_K_QUANT_MIXTURES = "Desativar misturas k-quant e quantizar todos os tensores para o mesmo tipo" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usar os dados no ficheiro como matriz de importância para otimizações de quantização" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usar matriz de importância para estes tensores" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Não usar matriz de importância para estes tensores" + self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Saída:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usar este tipo para o tensor output.weight" + self.TOKEN_EMBEDDING_TYPE = "Tipo de Incorporação de Token:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usar este tipo para o tensor de incorporações de token" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Irá gerar o modelo quantizado nos mesmos shards da entrada" + self.OVERRIDE_MODEL_METADATA = "Substituir metadados do modelo" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Ficheiro de dados de entrada para geração de IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Modelo a ser quantizado" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Caminho de saída para o IMatrix gerado" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Com que frequência guardar o IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Definir valor de offload da GPU (-ngl)" + self.COMPLETED = "Concluído" + +class _Greek(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Αυτόματος Κβαντιστής Μοντέλων GGUF)" + self.RAM_USAGE = "Χρήση RAM:" + self.CPU_USAGE = "Χρήση CPU:" + self.BACKEND = "Backend Llama.cpp:" + self.REFRESH_BACKENDS = "Ανανέωση Backends" + self.MODELS_PATH = "Διαδρομή Μοντέλων:" + self.OUTPUT_PATH = "Διαδρομή Εξόδου:" + self.LOGS_PATH = "Διαδρομή Αρχείων Καταγραφής:" + self.BROWSE = "Περιήγηση" + self.AVAILABLE_MODELS = "Διαθέσιμα Μοντέλα:" + self.QUANTIZATION_TYPE = "Τύπος Κβαντισμού:" + self.ALLOW_REQUANTIZE = "Να Επιτρέπεται η Επανακβάντιση" + self.LEAVE_OUTPUT_TENSOR = "Διατήρηση Tensor Εξόδου" + self.PURE = "Καθαρό" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Συμπερίληψη Βαρών:" + self.EXCLUDE_WEIGHTS = "Εξαίρεση Βαρών:" + self.USE_OUTPUT_TENSOR_TYPE = "Χρήση Τύπου Tensor Εξόδου" + self.USE_TOKEN_EMBEDDING_TYPE = "Χρήση Τύπου Ενσωμάτωσης Token" + self.KEEP_SPLIT = "Διατήρηση Διαίρεσης" + self.KV_OVERRIDES = "Υπερβάσεις KV:" + self.ADD_NEW_OVERRIDE = "Προσθήκη Νέας Υπέρβασης" + self.QUANTIZE_MODEL = "Κβάντιση Μοντέλου" + self.SAVE_PRESET = "Αποθήκευση Προεπιλογής" + self.LOAD_PRESET = "Φόρτωση Προεπιλογής" + self.TASKS = "Εργασίες:" + self.DOWNLOAD_LLAMACPP = "Λήψη llama.cpp" + self.SELECT_RELEASE = "Επιλογή Έκδοσης:" + self.SELECT_ASSET = "Επιλογή Στοιχείου:" + self.EXTRACT_CUDA_FILES = "Εξαγωγή Αρχείων CUDA" + self.SELECT_CUDA_BACKEND = "Επιλογή Backend CUDA:" + self.DOWNLOAD = "Λήψη" + self.IMATRIX_GENERATION = "Δημιουργία IMatrix" + self.DATA_FILE = "Αρχείο Δεδομένων:" + self.MODEL = "Μοντέλο:" + self.OUTPUT = "Έξοδος:" + self.OUTPUT_FREQUENCY = "Συχνότητα Εξόδου:" + self.GPU_OFFLOAD = "Εκφόρτωση GPU:" + self.AUTO = "Αυτόματο" + self.GENERATE_IMATRIX = "Δημιουργία IMatrix" + self.ERROR = "Σφάλμα" + self.WARNING = "Προειδοποίηση" + self.PROPERTIES = "Ιδιότητες" + self.CANCEL = "Ακύρωση" + self.RESTART = "Επανεκκίνηση" + self.DELETE = "Διαγραφή" + self.CONFIRM_DELETION = "Είστε βέβαιοι ότι θέλετε να διαγράψετε αυτήν την εργασία;" + self.TASK_RUNNING_WARNING = "Ορισμένες εργασίες εκτελούνται ακόμη. Είστε βέβαιοι ότι θέλετε να τερματίσετε;" + self.YES = "Ναι" + self.NO = "Όχι" + self.DOWNLOAD_COMPLETE = "Η Λήψη Ολοκληρώθηκε" + self.CUDA_EXTRACTION_FAILED = "Αποτυχία Εξαγωγής CUDA" + self.PRESET_SAVED = "Η Προεπιλογή Αποθηκεύτηκε" + self.PRESET_LOADED = "Η Προεπιλογή Φορτώθηκε" + self.NO_ASSET_SELECTED = "Δεν Έχει Επιλεγεί Στοιχείο" + self.DOWNLOAD_FAILED = "Αποτυχία Λήψης" + self.NO_BACKEND_SELECTED = "Δεν Έχει Επιλεγεί Backend" + self.NO_MODEL_SELECTED = "Δεν Έχει Επιλεγεί Μοντέλο" + self.REFRESH_RELEASES = "Ανανέωση Εκδόσεων" + self.NO_SUITABLE_CUDA_BACKENDS = "Δεν Βρέθηκαν Κατάλληλα Backends CUDA" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}\nΤα Αρχεία CUDA Εξήχθησαν στο {1}" + self.CUDA_FILES_EXTRACTED = "Τα Αρχεία CUDA Εξήχθησαν στο" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Δεν Βρέθηκε Κατάλληλο Backend CUDA για Εξαγωγή" + self.ERROR_FETCHING_RELEASES = "Σφάλμα κατά την Ανάκτηση Εκδόσεων: {0}" + self.CONFIRM_DELETION_TITLE = "Επιβεβαίωση Διαγραφής" + self.LOG_FOR = "Αρχείο Καταγραφής για {0}" + self.ALL_FILES = "Όλα τα Αρχεία (*)" + self.GGUF_FILES = "Αρχεία GGUF (*.gguf)" + self.DAT_FILES = "Αρχεία DAT (*.dat)" + self.JSON_FILES = "Αρχεία JSON (*.json)" + self.FAILED_LOAD_PRESET = "Αποτυχία Φόρτωσης Προεπιλογής: {0}" + self.INITIALIZING_AUTOGGUF = "Εκκίνηση Εφαρμογής AutoGGUF" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Η Εκκίνηση του AutoGGUF Ολοκληρώθηκε" + self.REFRESHING_BACKENDS = "Ανανέωση Backends" + self.NO_BACKENDS_AVAILABLE = "Δεν Υπάρχουν Διαθέσιμα Backends" + self.FOUND_VALID_BACKENDS = "Βρέθηκαν {0} Έγκυρα Backends" + self.SAVING_PRESET = "Αποθήκευση Προεπιλογής" + self.PRESET_SAVED_TO = "Η Προεπιλογή Αποθηκεύτηκε στο {0}" + self.LOADING_PRESET = "Φόρτωση Προεπιλογής" + self.PRESET_LOADED_FROM = "Η Προεπιλογή Φορτώθηκε από το {0}" + self.ADDING_KV_OVERRIDE = "Προσθήκη Υπέρβασης KV: {0}" + self.SAVING_TASK_PRESET = "Αποθήκευση Προεπιλογής Εργασίας για {0}" + self.TASK_PRESET_SAVED = "Η Προεπιλογή Εργασίας Αποθηκεύτηκε" + self.TASK_PRESET_SAVED_TO = "Η Προεπιλογή Εργασίας Αποθηκεύτηκε στο {0}" + self.RESTARTING_TASK = "Επανεκκίνηση Εργασίας: {0}" + self.IN_PROGRESS = "Σε Εξέλιξη" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Η Λήψη Ολοκληρώθηκε. Εξήχθη στο: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}\nΤα Αρχεία CUDA Εξήχθησαν στο {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Δεν Βρέθηκε Κατάλληλο Backend CUDA για Εξαγωγή" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Ανανέωση Εκδόσεων llama.cpp" + self.UPDATING_ASSET_LIST = "Ενημέρωση Λίστας Στοιχείων" + self.UPDATING_CUDA_OPTIONS = "Ενημέρωση Επιλογών CUDA" + self.STARTING_LLAMACPP_DOWNLOAD = "Έναρξη Λήψης llama.cpp" + self.UPDATING_CUDA_BACKENDS = "Ενημέρωση Backends CUDA" + self.NO_CUDA_BACKEND_SELECTED = "Δεν Έχει Επιλεγεί Backend CUDA για Εξαγωγή" + self.EXTRACTING_CUDA_FILES = "Εξαγωγή Αρχείων CUDA από {0} στο {1}" + self.DOWNLOAD_ERROR = "Σφάλμα Λήψης: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Εμφάνιση Μενού Περιεχομένου Εργασίας" + self.SHOWING_PROPERTIES_FOR_TASK = "Εμφάνιση Ιδιοτήτων για την Εργασία: {0}" + self.CANCELLING_TASK = "Ακύρωση Εργασίας: {0}" + self.CANCELED = "Ακυρώθηκε" + self.DELETING_TASK = "Διαγραφή Εργασίας: {0}" + self.LOADING_MODELS = "Φόρτωση Μοντέλων" + self.LOADED_MODELS = "{0} Μοντέλα Φορτώθηκαν" + self.BROWSING_FOR_MODELS_DIRECTORY = "Περιήγηση σε Φάκελο Μοντέλων" + self.SELECT_MODELS_DIRECTORY = "Επιλέξτε Φάκελο Μοντέλων" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Περιήγηση σε Φάκελο Εξόδου" + self.SELECT_OUTPUT_DIRECTORY = "Επιλέξτε Φάκελο Εξόδου" + self.BROWSING_FOR_LOGS_DIRECTORY = "Περιήγηση σε Φάκελο Αρχείων Καταγραφής" + self.SELECT_LOGS_DIRECTORY = "Επιλέξτε Φάκελο Αρχείων Καταγραφής" + self.BROWSING_FOR_IMATRIX_FILE = "Περιήγηση σε Αρχείο IMatrix" + self.SELECT_IMATRIX_FILE = "Επιλέξτε Αρχείο IMatrix" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "Χρήση CPU: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Επικύρωση Εισόδων Κβαντισμού" + self.MODELS_PATH_REQUIRED = "Απαιτείται η Διαδρομή Μοντέλων" + self.OUTPUT_PATH_REQUIRED = "Απαιτείται η Διαδρομή Εξόδου" + self.LOGS_PATH_REQUIRED = "Απαιτείται η Διαδρομή Αρχείων Καταγραφής" + self.STARTING_MODEL_QUANTIZATION = "Έναρξη Κβαντισμού Μοντέλου" + self.INPUT_FILE_NOT_EXIST = "Το Αρχείο Εισόδου '{0}' Δεν Υπάρχει." + self.QUANTIZING_MODEL_TO = "Κβάντιση του {0} σε {1}" + self.QUANTIZATION_TASK_STARTED = "Η Εργασία Κβαντισμού Ξεκίνησε για {0}" + self.ERROR_STARTING_QUANTIZATION = "Σφάλμα κατά την Έναρξη Κβαντισμού: {0}" + self.UPDATING_MODEL_INFO = "Ενημέρωση Πληροφοριών Μοντέλου: {0}" + self.TASK_FINISHED = "Η Εργασία Ολοκληρώθηκε: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Εμφάνιση Λεπτομερειών Εργασίας για: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Περιήγηση σε Αρχείο Δεδομένων IMatrix" + self.SELECT_DATA_FILE = "Επιλέξτε Αρχείο Δεδομένων" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Περιήγηση σε Αρχείο Μοντέλου IMatrix" + self.SELECT_MODEL_FILE = "Επιλέξτε Αρχείο Μοντέλου" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Περιήγηση σε Αρχείο Εξόδου IMatrix" + self.SELECT_OUTPUT_FILE = "Επιλέξτε Αρχείο Εξόδου" + self.STARTING_IMATRIX_GENERATION = "Έναρξη Δημιουργίας IMatrix" + self.BACKEND_PATH_NOT_EXIST = "Η Διαδρομή Backend Δεν Υπάρχει: {0}" + self.GENERATING_IMATRIX = "Δημιουργία IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Σφάλμα κατά την Έναρξη Δημιουργίας IMatrix: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "Η Εργασία Δημιουργίας IMatrix Ξεκίνησε" + self.ERROR_MESSAGE = "Σφάλμα: {0}" + self.TASK_ERROR = "Σφάλμα Εργασίας: {0}" + self.APPLICATION_CLOSING = "Κλείσιμο Εφαρμογής" + self.APPLICATION_CLOSED = "Η Εφαρμογή Έκλεισε" + self.SELECT_QUANTIZATION_TYPE = "Επιλέξτε τον τύπο κβαντισμού" + self.ALLOWS_REQUANTIZING = "Επιτρέπει την επανακβάντιση τανυστών που έχουν ήδη κβαντιστεί" + self.LEAVE_OUTPUT_WEIGHT = "Θα αφήσει το output.weight χωρίς (επανα)κβάντιση" + self.DISABLE_K_QUANT_MIXTURES = "Απενεργοποιήστε τα μείγματα k-quant και κβαντίστε όλους τους τανυστές στον ίδιο τύπο" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Χρησιμοποιήστε τα δεδομένα στο αρχείο ως πίνακα σημασίας για βελτιστοποιήσεις κβαντισμού" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Χρησιμοποιήστε τον πίνακα σημασίας για αυτούς τους τανυστές" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Μην χρησιμοποιείτε τον πίνακα σημασίας για αυτούς τους τανυστές" + self.OUTPUT_TENSOR_TYPE = "Τύπος Tensor Εξόδου:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Χρησιμοποιήστε αυτόν τον τύπο για τον τανυστή output.weight" + self.TOKEN_EMBEDDING_TYPE = "Τύπος Ενσωμάτωσης Token:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Χρησιμοποιήστε αυτόν τον τύπο για τον τανυστή ενσωματώσεων token" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Θα δημιουργήσει το κβαντισμένο μοντέλο στα ίδια θραύσματα με την είσοδο" + self.OVERRIDE_MODEL_METADATA = "Αντικατάσταση μεταδεδομένων μοντέλου" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Αρχείο δεδομένων εισόδου για τη δημιουργία IMatrix" + self.MODEL_TO_BE_QUANTIZED = "Μοντέλο προς κβάντιση" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Διαδρομή εξόδου για το δημιουργημένο IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Πόσο συχνά να αποθηκεύεται το IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Ορίστε την τιμή εκφόρτωσης GPU (-ngl)" + self.COMPLETED = "Ολοκληρώθηκε" + +class _Hungarian(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (Automatizált GGUF modell kvantáló)" + self.RAM_USAGE = "RAM használat:" + self.CPU_USAGE = "CPU használat:" + self.BACKEND = "Llama.cpp háttérrendszer:" + self.REFRESH_BACKENDS = "Háttérrendszerek frissítése" + self.MODELS_PATH = "Modellek elérési útja:" + self.OUTPUT_PATH = "Kimeneti útvonal:" + self.LOGS_PATH = "Naplók elérési útja:" + self.BROWSE = "Tallózás" + self.AVAILABLE_MODELS = "Elérhető modellek:" + self.QUANTIZATION_TYPE = "Kvantálási típus:" + self.ALLOW_REQUANTIZE = "Újrakvantálás engedélyezése" + self.LEAVE_OUTPUT_TENSOR = "Kimeneti tenzor meghagyása" + self.PURE = "Tiszta" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Súlyok belefoglalása:" + self.EXCLUDE_WEIGHTS = "Súlyok kizárása:" + self.USE_OUTPUT_TENSOR_TYPE = "Kimeneti tenzor típusának használata" + self.USE_TOKEN_EMBEDDING_TYPE = "Token beágyazási típusának használata" + self.KEEP_SPLIT = "Felosztás megtartása" + self.KV_OVERRIDES = "KV felülbírálások:" + self.ADD_NEW_OVERRIDE = "Új felülbírálás hozzáadása" + self.QUANTIZE_MODEL = "Modell kvantálása" + self.SAVE_PRESET = "Esetbeállítás mentése" + self.LOAD_PRESET = "Esetbeállítás betöltése" + self.TASKS = "Feladatok:" + self.DOWNLOAD_LLAMACPP = "llama.cpp letöltése" + self.SELECT_RELEASE = "Kiadás kiválasztása:" + self.SELECT_ASSET = "Eszköz kiválasztása:" + self.EXTRACT_CUDA_FILES = "CUDA fájlok kibontása" + self.SELECT_CUDA_BACKEND = "CUDA háttérrendszer kiválasztása:" + self.DOWNLOAD = "Letöltés" + self.IMATRIX_GENERATION = "IMatrix generálás" + self.DATA_FILE = "Adatfájl:" + self.MODEL = "Modell:" + self.OUTPUT = "Kimenet:" + self.OUTPUT_FREQUENCY = "Kimeneti frekvencia:" + self.GPU_OFFLOAD = "GPU tehermentesítés:" + self.AUTO = "Automatikus" + self.GENERATE_IMATRIX = "IMatrix generálása" + self.ERROR = "Hiba" + self.WARNING = "Figyelmeztetés" + self.PROPERTIES = "Tulajdonságok" + self.CANCEL = "Mégse" + self.RESTART = "Újraindítás" + self.DELETE = "Törlés" + self.CONFIRM_DELETION = "Biztosan törölni szeretné ezt a feladatot?" + self.TASK_RUNNING_WARNING = "Néhány feladat még fut. Biztosan kilép?" + self.YES = "Igen" + self.NO = "Nem" + self.DOWNLOAD_COMPLETE = "Letöltés befejeződött" + self.CUDA_EXTRACTION_FAILED = "CUDA kibontás sikertelen" + self.PRESET_SAVED = "Esetbeállítás mentve" + self.PRESET_LOADED = "Esetbeállítás betöltve" + self.NO_ASSET_SELECTED = "Nincs kiválasztott eszköz" + self.DOWNLOAD_FAILED = "Letöltés sikertelen" + self.NO_BACKEND_SELECTED = "Nincs kiválasztott háttérrendszer" + self.NO_MODEL_SELECTED = "Nincs kiválasztott modell" + self.REFRESH_RELEASES = "Kiadások frissítése" + self.NO_SUITABLE_CUDA_BACKENDS = "Nem található megfelelő CUDA háttérrendszer" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}\nA CUDA fájlok kibontva ide: {1}" + self.CUDA_FILES_EXTRACTED = "A CUDA fájlok kibontva ide:" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nem található megfelelő CUDA háttérrendszer a kibontáshoz" + self.ERROR_FETCHING_RELEASES = "Hiba a kiadások lekérdezésekor: {0}" + self.CONFIRM_DELETION_TITLE = "Törlés megerősítése" + self.LOG_FOR = "Napló a következőhöz: {0}" + self.ALL_FILES = "Minden fájl (*)" + self.GGUF_FILES = "GGUF fájlok (*.gguf)" + self.DAT_FILES = "DAT fájlok (*.dat)" + self.JSON_FILES = "JSON fájlok (*.json)" + self.FAILED_LOAD_PRESET = "Az esetbeállítás betöltése sikertelen: {0}" + self.INITIALIZING_AUTOGGUF = "Az AutoGGUF alkalmazás inicializálása" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "Az AutoGGUF inicializálása befejeződött" + self.REFRESHING_BACKENDS = "Háttérrendszerek frissítése" + self.NO_BACKENDS_AVAILABLE = "Nincsenek elérhető háttérrendszerek" + self.FOUND_VALID_BACKENDS = "{0} érvényes háttérrendszer található" + self.SAVING_PRESET = "Esetbeállítás mentése" + self.PRESET_SAVED_TO = "Esetbeállítás mentve ide: {0}" + self.LOADING_PRESET = "Esetbeállítás betöltése" + self.PRESET_LOADED_FROM = "Esetbeállítás betöltve innen: {0}" + self.ADDING_KV_OVERRIDE = "KV felülbírálás hozzáadása: {0}" + self.SAVING_TASK_PRESET = "Feladat esetbeállítás mentése ehhez: {0}" + self.TASK_PRESET_SAVED = "Feladat esetbeállítás mentve" + self.TASK_PRESET_SAVED_TO = "Feladat esetbeállítás mentve ide: {0}" + self.RESTARTING_TASK = "Feladat újraindítása: {0}" + self.IN_PROGRESS = "Folyamatban" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Letöltés befejeződött. Kibontva ide: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}\nA CUDA fájlok kibontva ide: {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nem található megfelelő CUDA háttérrendszer a kibontáshoz" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}" + self.REFRESHING_LLAMACPP_RELEASES = "A llama.cpp kiadások frissítése" + self.UPDATING_ASSET_LIST = "Eszközlista frissítése" + self.UPDATING_CUDA_OPTIONS = "CUDA beállítások frissítése" + self.STARTING_LLAMACPP_DOWNLOAD = "A llama.cpp letöltésének megkezdése" + self.UPDATING_CUDA_BACKENDS = "CUDA háttérrendszerek frissítése" + self.NO_CUDA_BACKEND_SELECTED = "Nincs kiválasztott CUDA háttérrendszer a kibontáshoz" + self.EXTRACTING_CUDA_FILES = "CUDA fájlok kibontása innen: {0} ide: {1}" + self.DOWNLOAD_ERROR = "Letöltési hiba: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Feladat helyi menüjének megjelenítése" + self.SHOWING_PROPERTIES_FOR_TASK = "Feladat tulajdonságainak megjelenítése: {0}" + self.CANCELLING_TASK = "Feladat megszakítása: {0}" + self.CANCELED = "Megszakítva" + self.DELETING_TASK = "Feladat törlése: {0}" + self.LOADING_MODELS = "Modellek betöltése" + self.LOADED_MODELS = "{0} modell betöltve" + self.BROWSING_FOR_MODELS_DIRECTORY = "Modellek könyvtárának tallózása" + self.SELECT_MODELS_DIRECTORY = "Modellek könyvtárának kiválasztása" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Kimeneti könyvtár tallózása" + self.SELECT_OUTPUT_DIRECTORY = "Kimeneti könyvtár kiválasztása" + self.BROWSING_FOR_LOGS_DIRECTORY = "Naplók könyvtárának tallózása" + self.SELECT_LOGS_DIRECTORY = "Naplók könyvtárának kiválasztása" + self.BROWSING_FOR_IMATRIX_FILE = "IMatrix fájl tallózása" + self.SELECT_IMATRIX_FILE = "IMatrix fájl kiválasztása" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU használat: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Kvantálási bemenetek ellenőrzése" + self.MODELS_PATH_REQUIRED = "A modellek elérési útja kötelező" + self.OUTPUT_PATH_REQUIRED = "A kimeneti útvonal kötelező" + self.LOGS_PATH_REQUIRED = "A naplók elérési útja kötelező" + self.STARTING_MODEL_QUANTIZATION = "Modell kvantálásának indítása" + self.INPUT_FILE_NOT_EXIST = "A bemeneti fájl '{0}' nem létezik." + self.QUANTIZING_MODEL_TO = "{0} kvantálása erre: {1}" + self.QUANTIZATION_TASK_STARTED = "Kvantálási feladat elindítva ehhez: {0}" + self.ERROR_STARTING_QUANTIZATION = "Hiba a kvantálás indításakor: {0}" + self.UPDATING_MODEL_INFO = "Modellinformációk frissítése: {0}" + self.TASK_FINISHED = "Feladat befejezve: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Feladat részleteinek megjelenítése ehhez: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix adatfájl tallózása" + self.SELECT_DATA_FILE = "Adatfájl kiválasztása" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix modellfájl tallózása" + self.SELECT_MODEL_FILE = "Modellfájl kiválasztása" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix kimeneti fájl tallózása" + self.SELECT_OUTPUT_FILE = "Kimeneti fájl kiválasztása" + self.STARTING_IMATRIX_GENERATION = "IMatrix generálásának indítása" + self.BACKEND_PATH_NOT_EXIST = "A háttérrendszer elérési útja nem létezik: {0}" + self.GENERATING_IMATRIX = "IMatrix generálása" + self.ERROR_STARTING_IMATRIX_GENERATION = "Hiba az IMatrix generálásának indításakor: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generálási feladat elindítva" + self.ERROR_MESSAGE = "Hiba: {0}" + self.TASK_ERROR = "Feladat hiba: {0}" + self.APPLICATION_CLOSING = "Alkalmazás bezárása" + self.APPLICATION_CLOSED = "Alkalmazás bezárva" + self.SELECT_QUANTIZATION_TYPE = "Válassza ki a kvantálási típust" + self.ALLOWS_REQUANTIZING = "Lehetővé teszi a már kvantált tenzorok újrakvantálását" + self.LEAVE_OUTPUT_WEIGHT = "Az output.weight-et (újra)kvantálatlanul hagyja" + self.DISABLE_K_QUANT_MIXTURES = "Tiltsa le a k-kvant keverékeket, és kvantálja az összes tenzort ugyanarra a típusra" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Használja a fájlban lévő adatokat fontossági mátrixként a kvantálási optimalizálásokhoz" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Használja a fontossági mátrixot ezekre a tenzorokra" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Ne használja a fontossági mátrixot ezekre a tenzorokra" + self.OUTPUT_TENSOR_TYPE = "Kimeneti tenzor típusa:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Használja ezt a típust az output.weight tenzorhoz" + self.TOKEN_EMBEDDING_TYPE = "Token beágyazási típusa:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Használja ezt a típust a token beágyazási tenzorhoz" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "A kvantált modellt ugyanazokban a szegmensekben fogja generálni, mint a bemenet" + self.OVERRIDE_MODEL_METADATA = "Modell metaadatok felülbírálása" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix generáláshoz bemeneti adatfájl" + self.MODEL_TO_BE_QUANTIZED = "Kvantálandó modell" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "A generált IMatrix kimeneti útvonala" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "Milyen gyakran mentse az IMatrixot" + self.SET_GPU_OFFLOAD_VALUE = "GPU tehermentesítési érték beállítása (-ngl)" + self.COMPLETED = "Befejezve" + +class _BritishEnglish(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantiser)" + self.RAM_USAGE = "RAM Usage:" + self.CPU_USAGE = "CPU Usage:" + self.BACKEND = "Llama.cpp Backend:" + self.REFRESH_BACKENDS = "Refresh Backends" + self.MODELS_PATH = "Models Path:" + self.OUTPUT_PATH = "Output Path:" + self.LOGS_PATH = "Logs Path:" + self.BROWSE = "Browse" + self.AVAILABLE_MODELS = "Available Models:" + self.QUANTIZATION_TYPE = "Quantisation Type:" # Note the British spelling + self.ALLOW_REQUANTIZE = "Allow Requantise" + self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor" + self.PURE = "Pure" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Include Weights:" + self.EXCLUDE_WEIGHTS = "Exclude Weights:" + self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type" + self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type" + self.KEEP_SPLIT = "Keep Split" + self.KV_OVERRIDES = "KV Overrides:" + self.ADD_NEW_OVERRIDE = "Add new override" + self.QUANTIZE_MODEL = "Quantise Model" # Note the British spelling + self.SAVE_PRESET = "Save Preset" + self.LOAD_PRESET = "Load Preset" + self.TASKS = "Tasks:" + self.DOWNLOAD_LLAMACPP = "Download llama.cpp" + self.SELECT_RELEASE = "Select Release:" + self.SELECT_ASSET = "Select Asset:" + self.EXTRACT_CUDA_FILES = "Extract CUDA files" + self.SELECT_CUDA_BACKEND = "Select CUDA Backend:" + self.DOWNLOAD = "Download" + self.IMATRIX_GENERATION = "IMatrix Generation" + self.DATA_FILE = "Data File:" + self.MODEL = "Model:" + self.OUTPUT = "Output:" + self.OUTPUT_FREQUENCY = "Output Frequency:" + self.GPU_OFFLOAD = "GPU Offload:" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Generate IMatrix" + self.ERROR = "Error" + self.WARNING = "Warning" + self.PROPERTIES = "Properties" + self.CANCEL = "Cancel" + self.RESTART = "Restart" + self.DELETE = "Delete" + self.CONFIRM_DELETION = "Are you sure you want to delete this task?" + self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?" + self.YES = "Yes" + self.NO = "No" + self.DOWNLOAD_COMPLETE = "Download Complete" + self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed" + self.PRESET_SAVED = "Preset Saved" + self.PRESET_LOADED = "Preset Loaded" + self.NO_ASSET_SELECTED = "No asset selected" + self.DOWNLOAD_FAILED = "Download failed" + self.NO_BACKEND_SELECTED = "No backend selected" + self.NO_MODEL_SELECTED = "No model selected" + self.REFRESH_RELEASES = "Refresh Releases" + self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.CUDA_FILES_EXTRACTED = "CUDA files extracted to" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction" + self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}" + self.CONFIRM_DELETION_TITLE = "Confirm Deletion" + self.LOG_FOR = "Log for {0}" + self.ALL_FILES = "All Files (*)" + self.GGUF_FILES = "GGUF Files (*.gguf)" + self.DAT_FILES = "DAT Files (*.dat)" + self.JSON_FILES = "JSON Files (*.json)" + self.FAILED_LOAD_PRESET = "Failed to load preset: {0}" + self.INITIALIZING_AUTOGGUF = "Initialising AutoGGUF application" # Note the British spelling + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialisation complete" # Note the British spelling + self.REFRESHING_BACKENDS = "Refreshing backends" + self.NO_BACKENDS_AVAILABLE = "No backends available" + self.FOUND_VALID_BACKENDS = "Found {0} valid backends" + self.SAVING_PRESET = "Saving preset" + self.PRESET_SAVED_TO = "Preset saved to {0}" + self.LOADING_PRESET = "Loading preset" + self.PRESET_LOADED_FROM = "Preset loaded from {0}" + self.ADDING_KV_OVERRIDE = "Adding KV override: {0}" + self.SAVING_TASK_PRESET = "Saving task preset for {0}" + self.TASK_PRESET_SAVED = "Task Preset Saved" + self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}" + self.RESTARTING_TASK = "Restarting task: {0}" + self.IN_PROGRESS = "In Progress" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases" + self.UPDATING_ASSET_LIST = "Updating asset list" + self.UPDATING_CUDA_OPTIONS = "Updating CUDA options" + self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download" + self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends" + self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction" + self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}" + self.DOWNLOAD_ERROR = "Download error: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu" + self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}" + self.CANCELLING_TASK = "Cancelling task: {0}" + self.CANCELED = "Cancelled" + self.DELETING_TASK = "Deleting task: {0}" + self.LOADING_MODELS = "Loading models" + self.LOADED_MODELS = "Loaded {0} models" + self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory" + self.SELECT_MODELS_DIRECTORY = "Select Models Directory" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory" + self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory" + self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory" + self.SELECT_LOGS_DIRECTORY = "Select Logs Directory" + self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file" + self.SELECT_IMATRIX_FILE = "Select IMatrix File" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantisation inputs" # Note the British spelling + self.MODELS_PATH_REQUIRED = "Models path is required" + self.OUTPUT_PATH_REQUIRED = "Output path is required" + self.LOGS_PATH_REQUIRED = "Logs path is required" + self.STARTING_MODEL_QUANTIZATION = "Starting model quantisation" # Note the British spelling + self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist." + self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}" + self.QUANTIZATION_TASK_STARTED = "Quantisation task started for {0}" # Note the British spelling + self.ERROR_STARTING_QUANTIZATION = "Error starting quantisation: {0}" # Note the British spelling + self.UPDATING_MODEL_INFO = "Updating model info: {0}" + self.TASK_FINISHED = "Task finished: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file" + self.SELECT_DATA_FILE = "Select Data File" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file" + self.SELECT_MODEL_FILE = "Select Model File" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file" + self.SELECT_OUTPUT_FILE = "Select Output File" + self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation" + self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}" + self.GENERATING_IMATRIX = "Generating IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started" + self.ERROR_MESSAGE = "Error: {0}" + self.TASK_ERROR = "Task error: {0}" + self.APPLICATION_CLOSING = "Application closing" + self.APPLICATION_CLOSED = "Application closed" + self.SELECT_QUANTIZATION_TYPE = "Select the quantisation type" # Note the British spelling + self.ALLOWS_REQUANTIZING = "Allows requantising tensors that have already been quantised" # Note the British spelling + self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantised" + self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantise all tensors to the same type" # Note the British spelling + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimisations" # Note the British spelling + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors" + self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor" + self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantised model in the same shards as input" # Note the British spelling + self.OVERRIDE_MODEL_METADATA = "Override model metadata" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation" + self.MODEL_TO_BE_QUANTIZED = "Model to be quantised" # Note the British spelling + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)" + self.COMPLETED = "Completed" + +class _IndianEnglish(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)" + self.RAM_USAGE = "RAM Usage:" + self.CPU_USAGE = "CPU Usage:" + self.BACKEND = "Llama.cpp Backend:" + self.REFRESH_BACKENDS = "Refresh Backends" + self.MODELS_PATH = "Models Path:" + self.OUTPUT_PATH = "Output Path:" + self.LOGS_PATH = "Logs Path:" + self.BROWSE = "Browse" + self.AVAILABLE_MODELS = "Available Models:" + self.QUANTIZATION_TYPE = "Quantization Type:" + self.ALLOW_REQUANTIZE = "Allow Requantize" + self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor" + self.PURE = "Pure" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Include Weights:" + self.EXCLUDE_WEIGHTS = "Exclude Weights:" + self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type" + self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type" + self.KEEP_SPLIT = "Keep Split" + self.KV_OVERRIDES = "KV Overrides:" + self.ADD_NEW_OVERRIDE = "Add new override" + self.QUANTIZE_MODEL = "Quantize Model" + self.SAVE_PRESET = "Save Preset" + self.LOAD_PRESET = "Load Preset" + self.TASKS = "Tasks:" + self.DOWNLOAD_LLAMACPP = "Download llama.cpp" + self.SELECT_RELEASE = "Select Release:" + self.SELECT_ASSET = "Select Asset:" + self.EXTRACT_CUDA_FILES = "Extract CUDA files" + self.SELECT_CUDA_BACKEND = "Select CUDA Backend:" + self.DOWNLOAD = "Download" + self.IMATRIX_GENERATION = "IMatrix Generation" + self.DATA_FILE = "Data File:" + self.MODEL = "Model:" + self.OUTPUT = "Output:" + self.OUTPUT_FREQUENCY = "Output Frequency:" + self.GPU_OFFLOAD = "GPU Offload:" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Generate IMatrix" + self.ERROR = "Error" + self.WARNING = "Warning" + self.PROPERTIES = "Properties" + self.CANCEL = "Cancel" + self.RESTART = "Restart" + self.DELETE = "Delete" + self.CONFIRM_DELETION = "Are you sure you want to delete this task?" + self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?" + self.YES = "Yes" + self.NO = "No" + self.DOWNLOAD_COMPLETE = "Download Complete" + self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed" + self.PRESET_SAVED = "Preset Saved" + self.PRESET_LOADED = "Preset Loaded" + self.NO_ASSET_SELECTED = "No asset selected" + self.DOWNLOAD_FAILED = "Download failed" + self.NO_BACKEND_SELECTED = "No backend selected" + self.NO_MODEL_SELECTED = "No model selected" + self.REFRESH_RELEASES = "Refresh Releases" + self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.CUDA_FILES_EXTRACTED = "CUDA files extracted to" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction" + self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}" + self.CONFIRM_DELETION_TITLE = "Confirm Deletion" + self.LOG_FOR = "Log for {0}" + self.ALL_FILES = "All Files (*)" + self.GGUF_FILES = "GGUF Files (*.gguf)" + self.DAT_FILES = "DAT Files (*.dat)" + self.JSON_FILES = "JSON Files (*.json)" + self.FAILED_LOAD_PRESET = "Failed to load preset: {0}" + self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete" + self.REFRESHING_BACKENDS = "Refreshing backends" + self.NO_BACKENDS_AVAILABLE = "No backends available" + self.FOUND_VALID_BACKENDS = "Found {0} valid backends" + self.SAVING_PRESET = "Saving preset" + self.PRESET_SAVED_TO = "Preset saved to {0}" + self.LOADING_PRESET = "Loading preset" + self.PRESET_LOADED_FROM = "Preset loaded from {0}" + self.ADDING_KV_OVERRIDE = "Adding KV override: {0}" + self.SAVING_TASK_PRESET = "Saving task preset for {0}" + self.TASK_PRESET_SAVED = "Task Preset Saved" + self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}" + self.RESTARTING_TASK = "Restarting task: {0}" + self.IN_PROGRESS = "In Progress" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases" + self.UPDATING_ASSET_LIST = "Updating asset list" + self.UPDATING_CUDA_OPTIONS = "Updating CUDA options" + self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download" + self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends" + self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction" + self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}" + self.DOWNLOAD_ERROR = "Download error: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu" + self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}" + self.CANCELLING_TASK = "Cancelling task: {0}" + self.CANCELED = "Cancelled" + self.DELETING_TASK = "Deleting task: {0}" + self.LOADING_MODELS = "Loading models" + self.LOADED_MODELS = "Loaded {0} models" + self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory" + self.SELECT_MODELS_DIRECTORY = "Select Models Directory" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory" + self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory" + self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory" + self.SELECT_LOGS_DIRECTORY = "Select Logs Directory" + self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file" + self.SELECT_IMATRIX_FILE = "Select IMatrix File" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs" + self.MODELS_PATH_REQUIRED = "Models path is required" + self.OUTPUT_PATH_REQUIRED = "Output path is required" + self.LOGS_PATH_REQUIRED = "Logs path is required" + self.STARTING_MODEL_QUANTIZATION = "Starting model quantization" + self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist." + self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}" + self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}" + self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}" + self.UPDATING_MODEL_INFO = "Updating model info: {0}" + self.TASK_FINISHED = "Task finished: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file" + self.SELECT_DATA_FILE = "Select Data File" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file" + self.SELECT_MODEL_FILE = "Select Model File" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file" + self.SELECT_OUTPUT_FILE = "Select Output File" + self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation" + self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}" + self.GENERATING_IMATRIX = "Generating IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started" + self.ERROR_MESSAGE = "Error: {0}" + self.TASK_ERROR = "Task error: {0}" + self.APPLICATION_CLOSING = "Application closing" + self.APPLICATION_CLOSED = "Application closed" + self.SELECT_QUANTIZATION_TYPE = "Select the quantization type" + self.ALLOWS_REQUANTIZING = "Allows requantizing tensors that have already been quantized" + self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized" + self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantize all tensors to the same type" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimisations" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors" + self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor" + self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantized model in the same shards as input" + self.OVERRIDE_MODEL_METADATA = "Override model metadata" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation" + self.MODEL_TO_BE_QUANTIZED = "Model to be quantized" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)" + self.COMPLETED = "Completed" + +class _CanadianEnglish(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)" + self.RAM_USAGE = "RAM Usage:" + self.CPU_USAGE = "CPU Usage:" + self.BACKEND = "Llama.cpp Backend:" + self.REFRESH_BACKENDS = "Refresh Backends" + self.MODELS_PATH = "Models Path:" + self.OUTPUT_PATH = "Output Path:" + self.LOGS_PATH = "Logs Path:" + self.BROWSE = "Browse" + self.AVAILABLE_MODELS = "Available Models:" + self.QUANTIZATION_TYPE = "Quantization Type:" + self.ALLOW_REQUANTIZE = "Allow Requantize" + self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor" + self.PURE = "Pure" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "Include Weights:" + self.EXCLUDE_WEIGHTS = "Exclude Weights:" + self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type" + self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type" + self.KEEP_SPLIT = "Keep Split" + self.KV_OVERRIDES = "KV Overrides:" + self.ADD_NEW_OVERRIDE = "Add new override" + self.QUANTIZE_MODEL = "Quantize Model" + self.SAVE_PRESET = "Save Preset" + self.LOAD_PRESET = "Load Preset" + self.TASKS = "Tasks:" + self.DOWNLOAD_LLAMACPP = "Download llama.cpp" + self.SELECT_RELEASE = "Select Release:" + self.SELECT_ASSET = "Select Asset:" + self.EXTRACT_CUDA_FILES = "Extract CUDA files" + self.SELECT_CUDA_BACKEND = "Select CUDA Backend:" + self.DOWNLOAD = "Download" + self.IMATRIX_GENERATION = "IMatrix Generation" + self.DATA_FILE = "Data File:" + self.MODEL = "Model:" + self.OUTPUT = "Output:" + self.OUTPUT_FREQUENCY = "Output Frequency:" + self.GPU_OFFLOAD = "GPU Offload:" + self.AUTO = "Auto" + self.GENERATE_IMATRIX = "Generate IMatrix" + self.ERROR = "Error" + self.WARNING = "Warning" + self.PROPERTIES = "Properties" + self.CANCEL = "Cancel" + self.RESTART = "Restart" + self.DELETE = "Delete" + self.CONFIRM_DELETION = "Are you sure you want to delete this task?" + self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?" + self.YES = "Yes" + self.NO = "No" + self.DOWNLOAD_COMPLETE = "Download Complete" + self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed" + self.PRESET_SAVED = "Preset Saved" + self.PRESET_LOADED = "Preset Loaded" + self.NO_ASSET_SELECTED = "No asset selected" + self.DOWNLOAD_FAILED = "Download failed" + self.NO_BACKEND_SELECTED = "No backend selected" + self.NO_MODEL_SELECTED = "No model selected" + self.REFRESH_RELEASES = "Refresh Releases" + self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.CUDA_FILES_EXTRACTED = "CUDA files extracted to" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction" + self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}" + self.CONFIRM_DELETION_TITLE = "Confirm Deletion" + self.LOG_FOR = "Log for {0}" + self.ALL_FILES = "All Files (*)" + self.GGUF_FILES = "GGUF Files (*.gguf)" + self.DAT_FILES = "DAT Files (*.dat)" + self.JSON_FILES = "JSON Files (*.json)" + self.FAILED_LOAD_PRESET = "Failed to load preset: {0}" + self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete" + self.REFRESHING_BACKENDS = "Refreshing backends" + self.NO_BACKENDS_AVAILABLE = "No backends available" + self.FOUND_VALID_BACKENDS = "Found {0} valid backends" + self.SAVING_PRESET = "Saving preset" + self.PRESET_SAVED_TO = "Preset saved to {0}" + self.LOADING_PRESET = "Loading preset" + self.PRESET_LOADED_FROM = "Preset loaded from {0}" + self.ADDING_KV_OVERRIDE = "Adding KV override: {0}" + self.SAVING_TASK_PRESET = "Saving task preset for {0}" + self.TASK_PRESET_SAVED = "Task Preset Saved" + self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}" + self.RESTARTING_TASK = "Restarting task: {0}" + self.IN_PROGRESS = "In Progress" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}" + self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases" + self.UPDATING_ASSET_LIST = "Updating asset list" + self.UPDATING_CUDA_OPTIONS = "Updating CUDA options" + self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download" + self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends" + self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction" + self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}" + self.DOWNLOAD_ERROR = "Download error: {0}" + self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu" + self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}" + self.CANCELLING_TASK = "Cancelling task: {0}" + self.CANCELED = "Cancelled" + self.DELETING_TASK = "Deleting task: {0}" + self.LOADING_MODELS = "Loading models" + self.LOADED_MODELS = "Loaded {0} models" + self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory" + self.SELECT_MODELS_DIRECTORY = "Select Models Directory" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory" + self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory" + self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory" + self.SELECT_LOGS_DIRECTORY = "Select Logs Directory" + self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file" + self.SELECT_IMATRIX_FILE = "Select IMatrix File" + self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs" + self.MODELS_PATH_REQUIRED = "Models path is required" + self.OUTPUT_PATH_REQUIRED = "Output path is required" + self.LOGS_PATH_REQUIRED = "Logs path is required" + self.STARTING_MODEL_QUANTIZATION = "Starting model quantization" + self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist." + self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}" + self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}" + self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}" + self.UPDATING_MODEL_INFO = "Updating model info: {0}" + self.TASK_FINISHED = "Task finished: {0}" + self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file" + self.SELECT_DATA_FILE = "Select Data File" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file" + self.SELECT_MODEL_FILE = "Select Model File" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file" + self.SELECT_OUTPUT_FILE = "Select Output File" + self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation" + self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}" + self.GENERATING_IMATRIX = "Generating IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started" + self.ERROR_MESSAGE = "Error: {0}" + self.TASK_ERROR = "Task error: {0}" + self.APPLICATION_CLOSING = "Application closing" + self.APPLICATION_CLOSED = "Application closed" + self.SELECT_QUANTIZATION_TYPE = "Select the quantization type" + self.ALLOWS_REQUANTIZING = "Allows requantizing tensors that have already been quantized" + self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized" + self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantize all tensors to the same type" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimisations" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors" + self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor" + self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantized model in the same shards as input" + self.OVERRIDE_MODEL_METADATA = "Override model metadata" + self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation" + self.MODEL_TO_BE_QUANTIZED = "Model to be quantized" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix" + self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)" + self.COMPLETED = "Completed" + +class _TraditionalChinese(_Localization): + def __init__(self): + super().__init__() + self.WINDOW_TITLE = "AutoGGUF(自動 GGUF 模型量化器)" + self.RAM_USAGE = "RAM 使用量:" + self.CPU_USAGE = "CPU 使用率:" + self.BACKEND = "Llama.cpp 後端:" + self.REFRESH_BACKENDS = "重新整理後端" + self.MODELS_PATH = "模型路徑:" + self.OUTPUT_PATH = "輸出路徑:" + self.LOGS_PATH = "日誌路徑:" + self.BROWSE = "瀏覽" + self.AVAILABLE_MODELS = "可用模型:" + self.QUANTIZATION_TYPE = "量化類型:" + self.ALLOW_REQUANTIZE = "允許重新量化" + self.LEAVE_OUTPUT_TENSOR = "保留輸出張量" + self.PURE = "純粹" + self.IMATRIX = "IMatrix:" + self.INCLUDE_WEIGHTS = "包含權重:" + self.EXCLUDE_WEIGHTS = "排除權重:" + self.USE_OUTPUT_TENSOR_TYPE = "使用輸出張量類型" + self.USE_TOKEN_EMBEDDING_TYPE = "使用權杖嵌入類型" + self.KEEP_SPLIT = "保持分割" + self.KV_OVERRIDES = "KV 覆蓋:" + self.ADD_NEW_OVERRIDE = "新增覆蓋" + self.QUANTIZE_MODEL = "量化模型" + self.SAVE_PRESET = "儲存預設" + self.LOAD_PRESET = "載入預設" + self.TASKS = "任務:" + self.DOWNLOAD_LLAMACPP = "下載 llama.cpp" + self.SELECT_RELEASE = "選擇版本:" + self.SELECT_ASSET = "選擇資源:" + self.EXTRACT_CUDA_FILES = "解壓縮 CUDA 檔案" + self.SELECT_CUDA_BACKEND = "選擇 CUDA 後端:" + self.DOWNLOAD = "下載" + self.IMATRIX_GENERATION = "IMatrix 產生" + self.DATA_FILE = "資料檔案:" + self.MODEL = "模型:" + self.OUTPUT = "輸出:" + self.OUTPUT_FREQUENCY = "輸出頻率:" + self.GPU_OFFLOAD = "GPU 卸載:" + self.AUTO = "自動" + self.GENERATE_IMATRIX = "產生 IMatrix" + self.ERROR = "錯誤" + self.WARNING = "警告" + self.PROPERTIES = "屬性" + self.CANCEL = "取消" + self.RESTART = "重新啟動" + self.DELETE = "刪除" + self.CONFIRM_DELETION = "您確定要刪除此任務嗎?" + self.TASK_RUNNING_WARNING = "某些任務仍在執行中。您確定要結束嗎?" + self.YES = "是" + self.NO = "否" + self.DOWNLOAD_COMPLETE = "下載完成" + self.CUDA_EXTRACTION_FAILED = "CUDA 解壓縮失敗" + self.PRESET_SAVED = "預設已儲存" + self.PRESET_LOADED = "預設已載入" + self.NO_ASSET_SELECTED = "未選擇資源" + self.DOWNLOAD_FAILED = "下載失敗" + self.NO_BACKEND_SELECTED = "未選擇後端" + self.NO_MODEL_SELECTED = "未選擇模型" + self.REFRESH_RELEASES = "重新整理版本" + self.NO_SUITABLE_CUDA_BACKENDS = "找不到合適的 CUDA 後端" + self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp 二進位檔案已下載並解壓縮至 {0}\nCUDA 檔案已解壓縮至 {1}" + self.CUDA_FILES_EXTRACTED = "CUDA 檔案已解壓縮至" + self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "找不到合適的 CUDA 後端進行解壓縮" + self.ERROR_FETCHING_RELEASES = "擷取版本時發生錯誤:{0}" + self.CONFIRM_DELETION_TITLE = "確認刪除" + self.LOG_FOR = "{0} 的日誌" + self.ALL_FILES = "所有檔案 (*)" + self.GGUF_FILES = "GGUF 檔案 (*.gguf)" + self.DAT_FILES = "DAT 檔案 (*.dat)" + self.JSON_FILES = "JSON 檔案 (*.json)" + self.FAILED_LOAD_PRESET = "載入預設失敗:{0}" + self.INITIALIZING_AUTOGGUF = "正在初始化 AutoGGUF 應用程式" + self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF 初始化完成" + self.REFRESHING_BACKENDS = "正在重新整理後端" + self.NO_BACKENDS_AVAILABLE = "沒有可用的後端" + self.FOUND_VALID_BACKENDS = "找到 {0} 個有效的後端" + self.SAVING_PRESET = "正在儲存預設" + self.PRESET_SAVED_TO = "預設已儲存至 {0}" + self.LOADING_PRESET = "正在載入預設" + self.PRESET_LOADED_FROM = "從 {0} 載入了預設" + self.ADDING_KV_OVERRIDE = "正在新增 KV 覆蓋:{0}" + self.SAVING_TASK_PRESET = "正在儲存 {0} 的任務預設" + self.TASK_PRESET_SAVED = "任務預設已儲存" + self.TASK_PRESET_SAVED_TO = "任務預設已儲存至 {0}" + self.RESTARTING_TASK = "正在重新啟動任務:{0}" + self.IN_PROGRESS = "處理中" + self.DOWNLOAD_FINISHED_EXTRACTED_TO = "下載完成。已解壓縮至:{0}" + self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp 二進位檔案已下載並解壓縮至 {0}\nCUDA 檔案已解壓縮至 {1}" + self.NO_SUITABLE_CUDA_BACKEND_FOUND = "找不到合適的 CUDA 後端進行解壓縮" + self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp 二進位檔案已下載並解壓縮至 {0}" + self.REFRESHING_LLAMACPP_RELEASES = "正在重新整理 llama.cpp 版本" + self.UPDATING_ASSET_LIST = "正在更新資源清單" + self.UPDATING_CUDA_OPTIONS = "正在更新 CUDA 選項" + self.STARTING_LLAMACPP_DOWNLOAD = "正在開始下載 llama.cpp" + self.UPDATING_CUDA_BACKENDS = "正在更新 CUDA 後端" + self.NO_CUDA_BACKEND_SELECTED = "未選擇要解壓縮的 CUDA 後端" + self.EXTRACTING_CUDA_FILES = "正在從 {0} 解壓縮 CUDA 檔案至 {1}" + self.DOWNLOAD_ERROR = "下載錯誤:{0}" + self.SHOWING_TASK_CONTEXT_MENU = "正在顯示任務操作選單" + self.SHOWING_PROPERTIES_FOR_TASK = "正在顯示任務的屬性:{0}" + self.CANCELLING_TASK = "正在取消任務:{0}" + self.CANCELED = "已取消" + self.DELETING_TASK = "正在刪除任務:{0}" + self.LOADING_MODELS = "正在載入模型" + self.LOADED_MODELS = "已載入 {0} 個模型" + self.BROWSING_FOR_MODELS_DIRECTORY = "正在瀏覽模型目錄" + self.SELECT_MODELS_DIRECTORY = "選擇模型目錄" + self.BROWSING_FOR_OUTPUT_DIRECTORY = "正在瀏覽輸出目錄" + self.SELECT_OUTPUT_DIRECTORY = "選擇輸出目錄" + self.BROWSING_FOR_LOGS_DIRECTORY = "正在瀏覽日誌目錄" + self.SELECT_LOGS_DIRECTORY = "選擇日誌目錄" + self.BROWSING_FOR_IMATRIX_FILE = "正在瀏覽 IMatrix 檔案" + self.SELECT_IMATRIX_FILE = "選擇 IMatrix 檔案" + self.RAM_USAGE_FORMAT = "{0:.1f}%({1} MB / {2} MB)" + self.CPU_USAGE_FORMAT = "CPU 使用率:{0:.1f}%" + self.VALIDATING_QUANTIZATION_INPUTS = "正在驗證量化輸入" + self.MODELS_PATH_REQUIRED = "需要模型路徑" + self.OUTPUT_PATH_REQUIRED = "需要輸出路徑" + self.LOGS_PATH_REQUIRED = "需要日誌路徑" + self.STARTING_MODEL_QUANTIZATION = "正在開始模型量化" + self.INPUT_FILE_NOT_EXIST = "輸入檔案 '{0}' 不存在。" + self.QUANTIZING_MODEL_TO = "正在將 {0} 量化為 {1}" + self.QUANTIZATION_TASK_STARTED = "已啟動 {0} 的量化任務" + self.ERROR_STARTING_QUANTIZATION = "啟動量化時發生錯誤:{0}" + self.UPDATING_MODEL_INFO = "正在更新模型資訊:{0}" + self.TASK_FINISHED = "任務完成:{0}" + self.SHOWING_TASK_DETAILS_FOR = "正在顯示任務詳細資訊:{0}" + self.BROWSING_FOR_IMATRIX_DATA_FILE = "正在瀏覽 IMatrix 資料檔案" + self.SELECT_DATA_FILE = "選擇資料檔案" + self.BROWSING_FOR_IMATRIX_MODEL_FILE = "正在瀏覽 IMatrix 模型檔案" + self.SELECT_MODEL_FILE = "選擇模型檔案" + self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "正在瀏覽 IMatrix 輸出檔案" + self.SELECT_OUTPUT_FILE = "選擇輸出檔案" + self.STARTING_IMATRIX_GENERATION = "正在開始 IMatrix 產生" + self.BACKEND_PATH_NOT_EXIST = "後端路徑不存在:{0}" + self.GENERATING_IMATRIX = "正在產生 IMatrix" + self.ERROR_STARTING_IMATRIX_GENERATION = "啟動 IMatrix 產生時發生錯誤:{0}" + self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix 產生任務已啟動" + self.ERROR_MESSAGE = "錯誤:{0}" + self.TASK_ERROR = "任務錯誤:{0}" + self.APPLICATION_CLOSING = "應用程式正在關閉" + self.APPLICATION_CLOSED = "應用程式已關閉" + self.SELECT_QUANTIZATION_TYPE = "請選擇量化類型" + self.ALLOWS_REQUANTIZING = "允許重新量化已量化的張量" + self.LEAVE_OUTPUT_WEIGHT = "將保留 output.weight 不被(重新)量化" + self.DISABLE_K_QUANT_MIXTURES = "停用 k-quant 混合並將所有張量量化為相同類型" + self.USE_DATA_AS_IMPORTANCE_MATRIX = "使用檔案中的資料作為量化最佳化的重要性矩陣" + self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "對這些張量使用重要性矩陣" + self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "不要對這些張量使用重要性矩陣" + self.OUTPUT_TENSOR_TYPE = "輸出張量類型:" + self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "對 output.weight 張量使用此類型" + self.TOKEN_EMBEDDING_TYPE = "權杖嵌入類型:" + self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "對權杖嵌入張量使用此類型" + self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "將在與輸入相同的分片中產生量化模型" + self.OVERRIDE_MODEL_METADATA = "覆蓋模型中繼資料" + self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix 產生的輸入資料檔案" + self.MODEL_TO_BE_QUANTIZED = "要量化的模型" + self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "產生的 IMatrix 的輸出路徑" + self.HOW_OFTEN_TO_SAVE_IMATRIX = "儲存 IMatrix 的頻率" + self.SET_GPU_OFFLOAD_VALUE = "設定 GPU 卸載值(-ngl)" + self.COMPLETED = "已完成" + +# Dictionary to map language codes to classes +_languages = { + 'en-US': _English, # American English + 'fr-FR': _French, # Metropolitan French + 'zh-CN': _SimplifiedChinese, # Simplified Chinese + 'es-ES': _Spanish, # Spanish (Spain) + 'hi-IN': _Hindi, # Hindi (India) + 'ru-RU': _Russian, # Russian (Russia) + 'uk-UA': _Ukrainian, # Ukrainian (Ukraine) + 'ja-JP': _Japanese, # Japanese (Japan) + 'de-DE': _German, # German (Germany) + 'pt-BR': _Portuguese, # Portuguese (Brazil) + 'ar-SA': _Arabic, # Arabic (Saudi Arabia) + 'ko-KR': _Korean, # Korean (Korea) + 'it-IT': _Italian, # Italian (Italy) + 'tr-TR': _Turkish, # Turkish (Turkey) + 'nl-NL': _Dutch, # Dutch (Netherlands) + 'fi-FI': _Finnish, # Finnish (Finland) + 'bn-BD': _Bengali, # Bengali (Bangladesh) + 'cs-CZ': _Czech, # Czech (Czech Republic) + 'pl-PL': _Polish, # Polish (Poland) + 'ro-RO': _Romanian, # Romanian (Romania) + 'el-GR': _Greek, # Greek (Greece) + 'pt-PT': _Portuguese_PT, # Portuguese (Portugal) + 'hu-HU': _Hungarian, # Hungarian (Hungary) + 'en-GB': _BritishEnglish, # British English + 'fr-CA': _CanadianFrench, # Canadian French + 'en-IN': _IndianEnglish, # Indian English + 'en-CA': _CanadianEnglish, # Canadian English + 'zh-TW': _TraditionalChinese, # Traditional Chinese (Taiwan) +} + +def set_language(lang_code): + global WINDOW_TITLE, RAM_USAGE, CPU_USAGE, BACKEND, REFRESH_BACKENDS, MODELS_PATH, OUTPUT_PATH, LOGS_PATH, BROWSE, AVAILABLE_MODELS, QUANTIZATION_TYPE, ALLOW_REQUANTIZE, LEAVE_OUTPUT_TENSOR, PURE, IMATRIX, INCLUDE_WEIGHTS, EXCLUDE_WEIGHTS, USE_OUTPUT_TENSOR_TYPE, USE_TOKEN_EMBEDDING_TYPE, KEEP_SPLIT, KV_OVERRIDES, ADD_NEW_OVERRIDE, QUANTIZE_MODEL, SAVE_PRESET, LOAD_PRESET, TASKS, DOWNLOAD_LLAMACPP, SELECT_RELEASE, SELECT_ASSET, EXTRACT_CUDA_FILES, SELECT_CUDA_BACKEND, DOWNLOAD, IMATRIX_GENERATION, DATA_FILE, MODEL, OUTPUT, OUTPUT_FREQUENCY, GPU_OFFLOAD, AUTO, GENERATE_IMATRIX, ERROR, WARNING, PROPERTIES, CANCEL, RESTART, DELETE, CONFIRM_DELETION, TASK_RUNNING_WARNING, YES, NO, DOWNLOAD_COMPLETE, CUDA_EXTRACTION_FAILED, PRESET_SAVED, PRESET_LOADED, NO_ASSET_SELECTED, DOWNLOAD_FAILED, NO_BACKEND_SELECTED, NO_MODEL_SELECTED, REFRESH_RELEASES, NO_SUITABLE_CUDA_BACKENDS, LLAMACPP_DOWNLOADED_EXTRACTED, CUDA_FILES_EXTRACTED, NO_SUITABLE_CUDA_BACKEND_EXTRACTION, ERROR_FETCHING_RELEASES, CONFIRM_DELETION_TITLE, LOG_FOR, ALL_FILES, GGUF_FILES, DAT_FILES, JSON_FILES, FAILED_LOAD_PRESET, INITIALIZING_AUTOGGUF, AUTOGGUF_INITIALIZATION_COMPLETE, REFRESHING_BACKENDS, NO_BACKENDS_AVAILABLE, FOUND_VALID_BACKENDS, SAVING_PRESET, PRESET_SAVED_TO, LOADING_PRESET, PRESET_LOADED_FROM, ADDING_KV_OVERRIDE, SAVING_TASK_PRESET, TASK_PRESET_SAVED, TASK_PRESET_SAVED_TO, RESTARTING_TASK, IN_PROGRESS, DOWNLOAD_FINISHED_EXTRACTED_TO, LLAMACPP_DOWNLOADED_AND_EXTRACTED, NO_SUITABLE_CUDA_BACKEND_FOUND, LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED, REFRESHING_LLAMACPP_RELEASES, UPDATING_ASSET_LIST, UPDATING_CUDA_OPTIONS, STARTING_LLAMACPP_DOWNLOAD, UPDATING_CUDA_BACKENDS, NO_CUDA_BACKEND_SELECTED, EXTRACTING_CUDA_FILES, DOWNLOAD_ERROR, SHOWING_TASK_CONTEXT_MENU, SHOWING_PROPERTIES_FOR_TASK, CANCELLING_TASK, CANCELED, DELETING_TASK, LOADING_MODELS, LOADED_MODELS, BROWSING_FOR_MODELS_DIRECTORY, SELECT_MODELS_DIRECTORY, BROWSING_FOR_OUTPUT_DIRECTORY, SELECT_OUTPUT_DIRECTORY, BROWSING_FOR_LOGS_DIRECTORY, SELECT_LOGS_DIRECTORY, BROWSING_FOR_IMATRIX_FILE, SELECT_IMATRIX_FILE, RAM_USAGE_FORMAT, CPU_USAGE_FORMAT, VALIDATING_QUANTIZATION_INPUTS, MODELS_PATH_REQUIRED, OUTPUT_PATH_REQUIRED, LOGS_PATH_REQUIRED, STARTING_MODEL_QUANTIZATION, INPUT_FILE_NOT_EXIST, QUANTIZING_MODEL_TO, QUANTIZATION_TASK_STARTED, ERROR_STARTING_QUANTIZATION, UPDATING_MODEL_INFO, TASK_FINISHED, SHOWING_TASK_DETAILS_FOR, BROWSING_FOR_IMATRIX_DATA_FILE, SELECT_DATA_FILE, BROWSING_FOR_IMATRIX_MODEL_FILE, SELECT_MODEL_FILE, BROWSING_FOR_IMATRIX_OUTPUT_FILE, SELECT_OUTPUT_FILE, STARTING_IMATRIX_GENERATION, BACKEND_PATH_NOT_EXIST, GENERATING_IMATRIX, ERROR_STARTING_IMATRIX_GENERATION, IMATRIX_GENERATION_TASK_STARTED, ERROR_MESSAGE, TASK_ERROR, APPLICATION_CLOSING, APPLICATION_CLOSED, SELECT_QUANTIZATION_TYPE, ALLOWS_REQUANTIZING, LEAVE_OUTPUT_WEIGHT, DISABLE_K_QUANT_MIXTURES, USE_DATA_AS_IMPORTANCE_MATRIX, USE_IMPORTANCE_MATRIX_FOR_TENSORS, DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS, OUTPUT_TENSOR_TYPE, USE_THIS_TYPE_FOR_OUTPUT_WEIGHT, TOKEN_EMBEDDING_TYPE, USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS, WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS, OVERRIDE_MODEL_METADATA, INPUT_DATA_FILE_FOR_IMATRIX, MODEL_TO_BE_QUANTIZED, OUTPUT_PATH_FOR_GENERATED_IMATRIX, HOW_OFTEN_TO_SAVE_IMATRIX, SET_GPU_OFFLOAD_VALUE, COMPLETED + + loc = _languages.get(lang_code, _English)() + for key, value in loc.__dict__.items(): + if not key.startswith('_'): + globals()[key] = value + +# Get the language from the AUTOGGUF_LANGUAGE environment variable, default to 'en' +language_code = os.getenv('AUTOGGUF_LANGUAGE', 'en-US') + +# Set default language +set_language(language_code) \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..6e24354 --- /dev/null +++ b/main.py @@ -0,0 +1,9 @@ +import sys +from PyQt6.QtWidgets import QApplication +from AutoGGUF import AutoGGUF + +if __name__ == "__main__": + app = QApplication(sys.argv) + window = AutoGGUF() + window.show() + sys.exit(app.exec()) \ No newline at end of file