mirror of https://github.com/leafspark/AutoGGUF
feat(ui): update display of properties and add certifi
- updated project files - added certifi to backend download and update checking - add and fix type hints - small file formatting changes - update formatting of KV pairs to be cleaner - update tensor data formatting and remove redundant KV pairs property - add human readable mappings from KV pairs into model properties - update CUDA backend check for latest llama.cpp format - use urllib globally
This commit is contained in:
parent
1381665d00
commit
7c2a0b7ec1
|
@ -4,10 +4,10 @@ ## Supported Versions
|
|||
|
||||
| Version | Supported |
|
||||
|-----------------|--------------------|
|
||||
| stable (v1.9.x) | :white_check_mark: |
|
||||
| stable (v2.0.x) | :white_check_mark: |
|
||||
|
||||
Beta versions are not officially supported and may contain unknown security vulnerabilities. Use them at your own risk.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Use the Issues tab, or for severe vulnerabilities please contact the maintainers via email.
|
||||
Use the Issues tab, or for severe vulnerabilities, please contact the maintainers via email.
|
||||
|
|
|
@ -6,8 +6,9 @@ safetensors~=0.5.3
|
|||
numpy<2.0.0
|
||||
torch~=2.7.0
|
||||
sentencepiece~=0.2.0
|
||||
setuptools~=80.4.0
|
||||
huggingface-hub~=0.31.1
|
||||
setuptools~=80.7.1
|
||||
huggingface-hub~=0.31.2
|
||||
transformers~=4.51.3
|
||||
fastapi~=0.115.12
|
||||
uvicorn~=0.34.2
|
||||
certifi~=2025.4.26
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
import shutil
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
import certifi
|
||||
import ssl
|
||||
from datetime import datetime
|
||||
from functools import partial, wraps
|
||||
from typing import List
|
||||
|
@ -1148,7 +1150,10 @@ def check_for_updates(self) -> None:
|
|||
url = "https://api.github.com/repos/leafspark/AutoGGUF/releases/latest"
|
||||
req = urllib.request.Request(url)
|
||||
|
||||
with urllib.request.urlopen(req) as response:
|
||||
# Create SSL context with certifi certificates
|
||||
ssl_context = ssl.create_default_context(cafile=certifi.where())
|
||||
|
||||
with urllib.request.urlopen(req, context=ssl_context) as response:
|
||||
if response.status != 200:
|
||||
raise urllib.error.HTTPError(
|
||||
url, response.status, "HTTP Error", response.headers, None
|
||||
|
|
|
@ -98,7 +98,7 @@ def mouseMoveEvent(self, event) -> None:
|
|||
def mouseReleaseEvent(self, event) -> None:
|
||||
self.pressing = False
|
||||
|
||||
def toggle_maximize(self):
|
||||
def toggle_maximize(self) -> None:
|
||||
if self.isMaximized:
|
||||
self.parent.showNormal()
|
||||
if self.normal_size:
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
import urllib.request
|
||||
import urllib.error
|
||||
import zipfile
|
||||
import ssl
|
||||
import certifi
|
||||
from PySide6.QtCore import QThread, Signal
|
||||
|
||||
|
||||
|
@ -19,7 +21,10 @@ def run(self) -> None:
|
|||
try:
|
||||
req = urllib.request.Request(self.url)
|
||||
|
||||
with urllib.request.urlopen(req) as response:
|
||||
# Create SSL context with certifi certificates
|
||||
ssl_context = ssl.create_default_context(cafile=certifi.where())
|
||||
|
||||
with urllib.request.urlopen(req, context=ssl_context) as response:
|
||||
if response.status != 200:
|
||||
raise urllib.error.HTTPError(
|
||||
self.url, response.status, "HTTP Error", response.headers, None
|
||||
|
|
|
@ -22,6 +22,7 @@ def __init__(self, parent=None) -> None:
|
|||
|
||||
self.key_input = QLineEdit()
|
||||
self.key_input.setPlaceholderText("Key")
|
||||
|
||||
# Set validator for key input (letters and dots only)
|
||||
key_validator = QRegularExpressionValidator(QRegularExpression(r"[A-Za-z.]+"))
|
||||
self.key_input.setValidator(key_validator)
|
||||
|
|
|
@ -365,7 +365,7 @@ def __init__(self):
|
|||
self.ADDING_LORA_ADAPTER = "Adding LoRA Adapter..."
|
||||
self.DELETING_LORA_ADAPTER = "Deleting LoRA Adapter..."
|
||||
self.SELECT_LORA_ADAPTER_FILE = "Select LoRA Adapter File"
|
||||
self.STARTING_LORA_EXPORT = "Starting LoRA export..."
|
||||
self.STARTING_LORA_EXPORT = "Starting LoRA export"
|
||||
self.SELECT_OUTPUT_TYPE = "Select Output Type (GGUF or GGML)"
|
||||
self.BASE_MODEL = "Base Model"
|
||||
self.SELECT_BASE_MODEL_FILE = "Select Base Model File (GGUF)"
|
||||
|
|
|
@ -24,8 +24,21 @@ def __init__(self, model_info, parent=None) -> None:
|
|||
def format_model_info(self, model_info) -> str:
|
||||
html = "<h2>Model Information</h2>"
|
||||
html += f"<p><b>Architecture:</b> {model_info.get('architecture', 'N/A')}</p>"
|
||||
html += f"<p><b>Quantization Type:</b> {model_info.get('quantization_type', 'N/A')}</p>"
|
||||
html += f"<p><b>KV Pairs:</b> {model_info.get('kv_pairs', 'N/A')}</p>"
|
||||
|
||||
# Format quantization types
|
||||
quant_types = model_info.get("quantization_type", [])
|
||||
if quant_types:
|
||||
# Clean up the format: remove "- type " prefix and join with " | "
|
||||
formatted_types = []
|
||||
for qtype in quant_types:
|
||||
# Remove "- type " prefix if present
|
||||
clean_type = qtype.replace("- type ", "").strip()
|
||||
formatted_types.append(clean_type)
|
||||
quant_display = " | ".join(formatted_types)
|
||||
else:
|
||||
quant_display = "N/A"
|
||||
|
||||
html += f"<p><b>Quantization Type:</b> {quant_display}</p>"
|
||||
html += f"<p><b>Tensors:</b> {model_info.get('tensors', 'N/A')}</p>"
|
||||
|
||||
html += "<h3>Key-Value Pairs:</h3>"
|
||||
|
|
|
@ -59,6 +59,34 @@ def run(self) -> None:
|
|||
self.error_signal.emit(str(e))
|
||||
|
||||
def parse_model_info(self, line) -> None:
|
||||
# Mapping of technical keys to human-readable names
|
||||
key_mappings = {
|
||||
"general.architecture": "Architecture",
|
||||
"general.name": "Model Name",
|
||||
"general.file_type": "File Type",
|
||||
"general.quantization_version": "Quantization Version",
|
||||
"llama.block_count": "Layers",
|
||||
"llama.context_length": "Context Length",
|
||||
"llama.embedding_length": "Embedding Size",
|
||||
"llama.feed_forward_length": "Feed Forward Length",
|
||||
"llama.attention.head_count": "Attention Heads",
|
||||
"llama.attention.head_count_kv": "Key-Value Heads",
|
||||
"llama.attention.layer_norm_rms_epsilon": "RMS Norm Epsilon",
|
||||
"llama.rope.freq_base": "RoPE Frequency Base",
|
||||
"llama.rope.dimension_count": "RoPE Dimensions",
|
||||
"llama.vocab_size": "Vocabulary Size",
|
||||
"tokenizer.ggml.model": "Tokenizer Model",
|
||||
"tokenizer.ggml.pre": "Tokenizer Preprocessing",
|
||||
"tokenizer.ggml.tokens": "Tokens",
|
||||
"tokenizer.ggml.token_type": "Token Types",
|
||||
"tokenizer.ggml.merges": "BPE Merges",
|
||||
"tokenizer.ggml.bos_token_id": "Begin of Sequence Token ID",
|
||||
"tokenizer.ggml.eos_token_id": "End of Sequence Token ID",
|
||||
"tokenizer.chat_template": "Chat Template",
|
||||
"tokenizer.ggml.padding_token_id": "Padding Token ID",
|
||||
"tokenizer.ggml.unk_token_id": "Unknown Token ID",
|
||||
}
|
||||
|
||||
# Parse output for model information
|
||||
if "llama_model_loader: loaded meta data with" in line:
|
||||
parts = line.split()
|
||||
|
@ -66,10 +94,25 @@ def parse_model_info(self, line) -> None:
|
|||
self.model_info["tensors"] = parts[9]
|
||||
elif "general.architecture" in line:
|
||||
self.model_info["architecture"] = line.split("=")[-1].strip()
|
||||
elif line.startswith("llama_model_loader: - kv"):
|
||||
key = line.split(":")[2].strip()
|
||||
value = line.split("=")[-1].strip()
|
||||
self.model_info.setdefault("kv_data", {})[key] = value
|
||||
elif line.startswith("llama_model_loader: - kv") and "=" in line:
|
||||
# Split on '=' and take the parts
|
||||
parts = line.split("=", 1) # Split only on first '='
|
||||
left_part = parts[0].strip()
|
||||
value = parts[1].strip()
|
||||
|
||||
# Extract key and type from left part
|
||||
# Format: "llama_model_loader: - kv N: key type"
|
||||
kv_parts = left_part.split(":")
|
||||
if len(kv_parts) >= 3:
|
||||
key_type_part = kv_parts[2].strip() # This is "key type"
|
||||
key = key_type_part.rsplit(" ", 1)[
|
||||
0
|
||||
] # Everything except last word (type)
|
||||
|
||||
# Use human-readable name if available, otherwise use original key
|
||||
display_key = key_mappings.get(key, key)
|
||||
|
||||
self.model_info.setdefault("kv_data", {})[display_key] = value
|
||||
elif line.startswith("llama_model_loader: - type"):
|
||||
parts = line.split(":")
|
||||
if len(parts) > 1:
|
||||
|
|
|
@ -95,11 +95,8 @@ def show_task_context_menu(self, position) -> None:
|
|||
|
||||
def show_task_properties(self, item) -> None:
|
||||
self.logger.debug(SHOWING_PROPERTIES_FOR_TASK.format(item.text()))
|
||||
task_item = self.task_list.itemWidget(item)
|
||||
for thread in self.quant_threads:
|
||||
if thread.log_file == task_item.log_file:
|
||||
model_info_dialog = ModelInfoDialog(thread.model_info, self)
|
||||
|
||||
model_info_dialog.exec()
|
||||
break
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
from typing import Any, List, TextIO, Union
|
||||
from typing import Any, IO, List, TextIO, Union
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QMessageBox,
|
||||
|
@ -86,9 +86,9 @@ def show_about(self) -> None:
|
|||
|
||||
A tool for managing and converting GGUF models.
|
||||
This application is licensed under the Apache License 2.0.
|
||||
Copyright (c) 2025 leafspark.
|
||||
Copyright (c) 2024-2025 leafspark.
|
||||
It also utilizes llama.cpp, licensed under the MIT License.
|
||||
Copyright (c) 2023-2024 The ggml authors."""
|
||||
Copyright (c) 2023-2025 The ggml authors."""
|
||||
QMessageBox.about(self, "About AutoGGUF", about_text)
|
||||
|
||||
|
||||
|
@ -97,7 +97,7 @@ def ensure_directory(path) -> None:
|
|||
os.makedirs(path)
|
||||
|
||||
|
||||
def open_file_safe(file_path, mode="r") -> TextIO:
|
||||
def open_file_safe(file_path, mode="r") -> IO[Any]:
|
||||
encodings = ["utf-8", "latin-1", "ascii", "utf-16"]
|
||||
for encoding in encodings:
|
||||
try:
|
||||
|
|
|
@ -159,7 +159,9 @@ def update_cuda_backends(self) -> None:
|
|||
for item in os.listdir(llama_bin):
|
||||
item_path = os.path.join(llama_bin, item)
|
||||
if os.path.isdir(item_path) and "cudart-llama" not in item.lower():
|
||||
if "cu1" in item.lower(): # Only include CUDA-capable backends
|
||||
if (
|
||||
"cu1" in item.lower() or "cuda-1" in item.lower()
|
||||
): # Only include CUDA-capable backends
|
||||
self.backend_combo_cuda.addItem(item, userData=item_path)
|
||||
|
||||
if self.backend_combo_cuda.count() == 0:
|
||||
|
|
26
src/utils.py
26
src/utils.py
|
@ -1,6 +1,10 @@
|
|||
from typing import Any, Union
|
||||
|
||||
import requests
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
import json
|
||||
import ssl
|
||||
import certifi
|
||||
from PySide6.QtCore import Qt
|
||||
from PySide6.QtWidgets import QFileDialog, QInputDialog, QMenu
|
||||
|
||||
|
@ -188,16 +192,28 @@ def refresh_releases(self) -> None:
|
|||
owner, repo = get_repo_from_env()
|
||||
url = f"https://api.github.com/repos/{owner}/{repo}/releases"
|
||||
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
# Create SSL context with certifi certificates
|
||||
ssl_context = ssl.create_default_context(cafile=certifi.where())
|
||||
|
||||
# Create request
|
||||
req = urllib.request.Request(url)
|
||||
|
||||
# Make the request
|
||||
with urllib.request.urlopen(req, context=ssl_context) as response:
|
||||
if response.status != 200:
|
||||
raise urllib.error.HTTPError(
|
||||
url, response.status, "HTTP Error", response.headers, None
|
||||
)
|
||||
|
||||
releases = json.loads(response.read().decode("utf-8"))
|
||||
|
||||
releases = response.json()
|
||||
self.release_combo.clear()
|
||||
for release in releases:
|
||||
self.release_combo.addItem(release["tag_name"], userData=release)
|
||||
self.release_combo.currentIndexChanged.connect(self.update_assets)
|
||||
self.update_assets()
|
||||
|
||||
except ValueError as e:
|
||||
show_error(self.logger, f"Invalid repository configuration: {str(e)}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
except (urllib.error.URLError, urllib.error.HTTPError) as e:
|
||||
show_error(self.logger, ERROR_FETCHING_RELEASES.format(str(e)))
|
||||
|
|
Loading…
Reference in New Issue