mirror of https://github.com/leafspark/AutoGGUF
Compare commits
163 Commits
Author | SHA1 | Date |
---|---|---|
|
4458ad1e58 | |
|
fe5c943b7d | |
|
8681a36452 | |
|
9516762dae | |
|
f7a83b3cd8 | |
|
cd16f3eab6 | |
|
8f3d93461a | |
|
50792722e9 | |
|
7c2a0b7ec1 | |
|
1381665d00 | |
|
35ad690198 | |
|
0d97ea1d46 | |
|
0625a0776e | |
|
6f74245f29 | |
|
a8ac35d6b7 | |
|
b4817eee06 | |
|
c9c2b04534 | |
|
cc47e59f37 | |
|
59bc29b2ab | |
|
14ceec61da | |
|
23ebe47d26 | |
|
29886faff6 | |
|
4742e6b242 | |
|
3ebc0d63f4 | |
|
ab0035f2e9 | |
|
a266dfba92 | |
|
97d5050a8b | |
|
93daedc285 | |
|
a0d00ab999 | |
|
8c79a5d213 | |
|
1955495899 | |
|
18dd8878a4 | |
|
102e3a14fd | |
|
ddbf96c8e9 | |
|
403546bfcf | |
|
53482af554 | |
|
b49d4ca774 | |
|
62e5560650 | |
|
3f5d9e6a1b | |
|
980a5b6656 | |
|
2ac27d62f1 | |
|
7dd39b208a | |
|
749f3215ec | |
|
6aaefb2ccb | |
|
9955640f03 | |
|
50a36e5abe | |
|
da7d1152ea | |
|
6b3d9ce9b1 | |
|
6230450f6e | |
|
b9aad59fa0 | |
|
24e19dad9d | |
|
0855a88433 | |
|
96c31b58c9 | |
|
c8d6cf0ea8 | |
|
0d95af5f72 | |
|
988b5b61c3 | |
|
f66b7fb870 | |
|
87ddc00452 | |
|
fe914f84c2 | |
|
3b49ceedb1 | |
|
4df2525e8a | |
|
5747807391 | |
|
0c1df319cd | |
|
118273f618 | |
|
5167f8f0f7 | |
|
7575c97f6a | |
|
4011bbdd09 | |
|
73e2f44cf9 | |
|
35839eee77 | |
|
8c1bd73db3 | |
|
b1b3a3549a | |
|
c2075586c5 | |
|
ef4bbcef2a | |
|
c1ff9d1033 | |
|
e67992ec71 | |
|
d1d9c687f9 | |
|
cb3edb9a4a | |
|
ce6fa92742 | |
|
c831622d6b | |
|
b5dede1ac7 | |
|
ac0f011784 | |
|
4e4ba99370 | |
|
f3a71cad00 | |
|
0e92fb34ed | |
|
39441e503f | |
|
4e51ed2f56 | |
|
45b10bdcbb | |
|
a2516fb3f6 | |
|
c02a02fbc1 | |
|
9b4ec4718b | |
|
607f32b154 | |
|
fa6077c161 | |
|
1133422456 | |
|
79c74bb54a | |
|
e7d0c66405 | |
|
ca69ac0c1e | |
|
a80775b0a2 | |
|
122510c2a4 | |
|
747aa7b9a8 | |
|
a6ef9b9023 | |
|
3804da0a3f | |
|
4aa3eafef8 | |
|
e5e18f9966 | |
|
cee4294ecf | |
|
c96380cbf8 | |
|
528ed4c5ca | |
|
0c49e71ce9 | |
|
d918ece029 | |
|
240dd5a07d | |
|
23dc666214 | |
|
fb092f8701 | |
|
be38e35d99 | |
|
9cf3c01331 | |
|
3138266439 | |
|
e46c6260ee | |
|
454fb193f9 | |
|
bd9889a2d4 | |
|
9e200a0e3b | |
|
8fb500a27d | |
|
45d0212abe | |
|
f8651ef19f | |
|
b461ed2e39 | |
|
8c333b0ffe | |
|
44f69b2b99 | |
|
24ae0066ed | |
|
d55cb9ea9b | |
|
3adee266ca | |
|
a91f804ec1 | |
|
a7f2dec852 | |
|
e43bc480c9 | |
|
79deff70ed | |
|
f416f020a2 | |
|
81b2d4137b | |
|
cdc215a00f | |
|
22bd74b399 | |
|
db1733b4ed | |
|
5f354e692a | |
|
a59b49fd97 | |
|
fb9addb8c0 | |
|
5d6b6fb67d | |
|
6583412b76 | |
|
33dc02c5ad | |
|
7349aedc78 | |
|
7886c38bfb | |
|
e307a4d3b5 | |
|
624949b56d | |
|
fec22adc58 | |
|
ce8a545f07 | |
|
d4b99b621a | |
|
86d7bbba2d | |
|
ca9f9a4a76 | |
|
6e424462ab | |
|
d4be39a22c | |
|
53ab6a688e | |
|
e8026892e9 | |
|
c68eee75c3 | |
|
bfd12bbfc6 | |
|
bb6f0efa63 | |
|
a97a545a28 | |
|
4f2c8057e1 | |
|
88875e3d67 | |
|
89d3762317 | |
|
32f29b9f7c |
|
@ -0,0 +1,13 @@
|
|||
AUTOGGUF_RESOLUTION=1650x1100
|
||||
AUTOGGUF_THEME=
|
||||
AUTOGGUF_CHECK_BACKEND=disabled
|
||||
AUTOGGUF_CHECK_UPDATE=disabled
|
||||
AUTOGGUF_SERVER_API_KEY=
|
||||
AUTOGGUF_MODEL_DIR_NAME=models
|
||||
AUTOGGUF_OUTPUT_DIR_NAME=quantized_models
|
||||
AUTOGGUF_RESIZE_FACTOR=1.1
|
||||
AUTOGGUF_SERVER=enabled
|
||||
AUTOGGUF_SERVER_PORT=7001
|
||||
AUTOGGUF_SERVER_API_KEY=
|
||||
AUTOGGUF_LANGUAGE=en-US
|
||||
AUTOGGUF_BACKEND_REPO=ggerganov/llama.cpp
|
|
@ -12,8 +12,8 @@ jobs:
|
|||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: psf/black@stable
|
||||
with:
|
||||
options: "--check --verbose"
|
||||
|
|
|
@ -18,18 +18,17 @@ jobs:
|
|||
matrix:
|
||||
os: [windows-latest, ubuntu-latest, macos-latest]
|
||||
arch: [x64]
|
||||
include:
|
||||
- os: windows-latest
|
||||
arch: x86
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
artifact-names: ${{ steps.set-outputs.outputs.artifact-names }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
python-version: '3.12'
|
||||
architecture: ${{ matrix.arch }}
|
||||
|
||||
- name: Install dependencies
|
||||
|
@ -61,23 +60,76 @@ jobs:
|
|||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
$distPath = if ("${{ github.event.inputs.build_type }}" -eq "RELEASE") { "build\release\dist" } else { "build\dev\dist" }
|
||||
New-Item -ItemType Directory -Force -Path "$distPath\src\gguf-py"
|
||||
Copy-Item -Path "src\gguf-py\*" -Destination "$distPath\src\gguf-py" -Recurse
|
||||
New-Item -ItemType Directory -Force -Path "$distPath\src\gguf"
|
||||
Copy-Item -Path "src\gguf\*" -Destination "$distPath\src\gguf" -Recurse
|
||||
Copy-Item -Path "src\convert_hf_to_gguf.py" -Destination "$distPath\src"
|
||||
Copy-Item -Path "src\convert_lora_to_gguf.py" -Destination "$distPath\src"
|
||||
Copy-Item -Path "src\convert_lora_to_ggml.py" -Destination "$distPath\src"
|
||||
Copy-Item -Path "src\quantize_to_fp8_dynamic.py" -Destination "$distPath\src"
|
||||
Copy-Item -Path ".env.example" -Destination "$distPath\"
|
||||
|
||||
- name: Copy additional files (Linux/macOS)
|
||||
if: matrix.os != 'windows-latest'
|
||||
run: |
|
||||
distPath=$(if [ "${{ github.event.inputs.build_type }}" = "RELEASE" ]; then echo "build/release/dist"; else echo "build/dev/dist"; fi)
|
||||
mkdir -p $distPath/src/gguf-py
|
||||
cp -R src/gguf-py/* $distPath/src/gguf-py/
|
||||
mkdir -p $distPath/src/gguf
|
||||
cp -R src/gguf/* $distPath/src/gguf/
|
||||
cp src/convert_hf_to_gguf.py $distPath/src/
|
||||
cp src/convert_lora_to_gguf.py $distPath/src/
|
||||
cp src/convert_lora_to_ggml.py $distPath/src/
|
||||
cp src/quantize_to_fp8_dynamic.py $distPath/src/
|
||||
cp .env.example $distPath/
|
||||
|
||||
- name: Set outputs for artifact name
|
||||
id: set-outputs
|
||||
run: echo "artifact-name=AutoGGUF-${{ matrix.os }}-${{ matrix.arch }}-${{ github.event.inputs.build_type }}-${{ github.sha }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: AutoGGUF-${{ matrix.os }}-${{ matrix.arch }}-${{ github.event.inputs.build_type }}-${{ github.sha }}
|
||||
path: build/${{ github.event.inputs.build_type == 'RELEASE' && 'release' || 'dev' }}/dist
|
||||
|
||||
generate-checksums:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
|
||||
- name: Generate SHA256 checksums for all artifacts
|
||||
run: |
|
||||
cd artifacts
|
||||
versionHash=$(echo ${{ github.sha }} | cut -c1-7)
|
||||
echo "# AutoGGUF Build Checksums" > ../checksums.txt
|
||||
echo "Build: ${{ github.event.inputs.build_type }}" >> ../checksums.txt
|
||||
echo "Commit: ${{ github.sha }}" >> ../checksums.txt
|
||||
echo "Date: $(date -u)" >> ../checksums.txt
|
||||
echo "" >> ../checksums.txt
|
||||
|
||||
# Find all artifact directories and generate checksums of their zip equivalents
|
||||
for artifact_dir in AutoGGUF-*-${{ github.event.inputs.build_type }}-${{ github.sha }}; do
|
||||
if [ -d "$artifact_dir" ]; then
|
||||
echo "Processing $artifact_dir..."
|
||||
cd "$artifact_dir"
|
||||
|
||||
# Create a temporary zip to calculate hash (simulating what GitHub creates)
|
||||
zip -r "../temp_${artifact_dir}.zip" .
|
||||
cd ..
|
||||
|
||||
# Generate SHA256 of the zip file
|
||||
hash=$(sha256sum "temp_${artifact_dir}.zip" | cut -d' ' -f1)
|
||||
echo "${hash} ${artifact_dir}.zip" >> ../checksums.txt
|
||||
|
||||
# Clean up the temporary zip
|
||||
rm "temp_${artifact_dir}.zip"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Upload checksums
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: AutoGGUF-${{ github.sha }}-SHA256
|
||||
path: checksums.txt
|
||||
|
|
|
@ -14,10 +14,10 @@ jobs:
|
|||
audit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
@ -52,7 +52,7 @@ jobs:
|
|||
cat requirements.txt >> detailed_report.txt
|
||||
|
||||
- name: Upload audit results
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: pip-audit-report
|
||||
path: detailed_report.txt
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
name: Pylint
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
|
@ -7,23 +6,23 @@ on:
|
|||
pull_request:
|
||||
paths:
|
||||
- '**.py'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10"]
|
||||
python-version: ["3.9", "3.10"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pylint PyQt6 psutil requests
|
||||
pip install $(grep -v "^torch" requirements.txt | tr '\n' ' ')
|
||||
pip install pylint
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py') --disable=all --enable=E0001,E0100,E0101,E0102,E0103,E0104,E0105,E0107,E0108,E0110,E0111,E0112,E0113,E0114,E0115,E0116,E0117,E0118,E0202,E0203,E0211,E0213,E0236,E0237,E0238,E0239,E0240,E0241,E0301,E0302,E0303,E0401,E0402,E0701,E0702,E0703,E0704,E0710,E0711,E0712,E1003,E1101,E1102,E1111,E1120,E1121,E1123,E1124,E1125,E1126,E1127,E1128,E1129,E1130,E1131,E1132,E1133,E1134,E1135,E1136,E1137,E1138,E1139,E1200,E1201,E1205,E1206,E1300,E1301,E1302,E1303,E1304,E1305,E1306,E1310,E1700,E1701,W0311,W0312,W0611,W0612,W0613,W0702,W1401,W1402,C0123,C0200,C0325,C0411,C0412 --fail-under=5
|
||||
|
|
|
@ -13,12 +13,12 @@ jobs:
|
|||
radon:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
|
|
@ -13,17 +13,27 @@ __pycache__/
|
|||
|
||||
# Allow these files
|
||||
!.pre-commit-config.yaml
|
||||
!.env.example
|
||||
!setup.py
|
||||
|
||||
# Allow src folder and its .py files
|
||||
!src/
|
||||
src/*
|
||||
!src/*.py
|
||||
!src/gguf
|
||||
src/gguf/*
|
||||
!src/gguf/*.py
|
||||
|
||||
# Allow docs folder and its .py files
|
||||
!docs/
|
||||
docs/*
|
||||
!docs/*.py
|
||||
|
||||
# Allow plugins folder and its .py files
|
||||
!plugins/
|
||||
plugins/*
|
||||
!plugins/*.py
|
||||
|
||||
# Allow assets folder, but only .svg, .png, .rc, .css, .iss and .ico files
|
||||
!assets/
|
||||
assets/*
|
||||
|
@ -41,6 +51,3 @@ assets/*
|
|||
|
||||
# Don't ignore .gitignore
|
||||
!.gitignore
|
||||
|
||||
# Allow PyPI setup script
|
||||
!setup.py
|
||||
|
|
188
CHANGELOG.md
188
CHANGELOG.md
|
@ -1,5 +1,167 @@
|
|||
# Changelog
|
||||
|
||||
## [v2.0.1] - 2025-05-24
|
||||
|
||||
### Added
|
||||
- Human readable mappings from KV pairs into model properties
|
||||
- certifi library for backend download and update checking
|
||||
- Automated checksums in CI process
|
||||
|
||||
### Changed
|
||||
- Updated llama.cpp backend
|
||||
- Improved backend UI, logging, and task handling
|
||||
- Enhanced display of model properties and cleaner formatting of KV pairs
|
||||
- Updated tensor data formatting and removed redundant KV pairs property
|
||||
- Updated CUDA backend check for latest llama.cpp release format
|
||||
- Global urllib usage implementation
|
||||
- Updated README with more information about patches and updates
|
||||
- Edited quick start instructions
|
||||
- Small file formatting improvements
|
||||
|
||||
### Fixed
|
||||
- Type hints corrections
|
||||
- Build errors in CI
|
||||
- `@upload-artifact` updated to v4
|
||||
|
||||
## [v2.0.0] - 2025-01-27
|
||||
|
||||
### Added
|
||||
- Clipboard support for save/load preset functionality with shift-click option
|
||||
- Support for shift-clicking to get quantization command
|
||||
- AUTOGGUF_BACKEND_REPO environment variable for custom GitHub repository fetching
|
||||
- New HF to GGUF conversion types: `tq1_0` and `tq2_0`
|
||||
|
||||
### Changed
|
||||
- Updated multiple dependencies:
|
||||
- PySide6, PyTorch, Transformers, FastAPI, uvicorn, and other core libraries to their latest compatible versions
|
||||
- Adjusted monitoring intervals from 0.2s to 0.5s
|
||||
- Updated copyright year to 2025
|
||||
- Bundled llama.cpp licensing text in About menu
|
||||
- Removed x86 build matrix from CI
|
||||
- Removed Import Model confirmation dialog
|
||||
|
||||
### Fixed
|
||||
- Resolved PySide6 segfault issue
|
||||
- Fixed error when deleting models from list
|
||||
- Corrected incorrect menu bar name for Load Preset
|
||||
|
||||
## [v1.9.1] - 2024-10-13
|
||||
|
||||
### Added
|
||||
- Support for specifying log directory name using AUTOGGUF_LOG_DIR_NAME environment variable
|
||||
- Work in progress GGUF merge window
|
||||
- Support for repository types in HF Transfer utility
|
||||
- New `dequantize_gguf.py` script
|
||||
- Support for MiniCPM3, RWKVv6, OLMoE, IBM Granite, and Jamba in llama.cpp convert scripts (conversion only)
|
||||
- Add Nuitka build script for Linux
|
||||
|
||||
### Changed
|
||||
- Updated Finnish and Russian localizations using Claude 3 Opus
|
||||
- Improved layout of HF Upload window
|
||||
- Updated gguf library from upstream
|
||||
- Refactored code to use localizations for menubar
|
||||
- Renamed imports_and_globals.py to globals.py
|
||||
- Moved general functions verify_gguf and process_args to globals.py
|
||||
- Created Plugins class for extensibility
|
||||
- Updated dependencies:
|
||||
- huggingface-hub
|
||||
- fastapi (~=0.115.0)
|
||||
- setuptools (~=75.1.0)
|
||||
- pyside6 (~=6.7.3)
|
||||
- uvicorn (~=0.31.0)
|
||||
|
||||
### Fixed
|
||||
- Corrected localization strings and file select types for GGUF merging
|
||||
- Fix minor errors in build scripts
|
||||
|
||||
## [v1.9.0] - 2024-09-15
|
||||
|
||||
### Added
|
||||
- Implemented Hugging Face (HF) upload functionality with GUI definitions
|
||||
- Added RAM and CPU usage graphs to UI
|
||||
- Input validation using wraps added to UI
|
||||
- Right-click context menu added to the models list in UI
|
||||
- Support for iMatrix generation tracking
|
||||
- GGUF splitting feature added
|
||||
- Japanese and German localizations updated
|
||||
|
||||
### Changed
|
||||
- Refactored to move functions out of `AutoGGUF` to reduce bloat
|
||||
- Localized GGUF split strings
|
||||
- Optimized GGUF imports and renamed related modules
|
||||
- Removed old `HFTransfer` class
|
||||
- Adjusted logging strings and updated French and Dutch localizations
|
||||
- Improved startup time by optimizing default configuration, disabling network fetches for backends/updates
|
||||
- Removed `requests` and `python-dotenv` to reduce size
|
||||
- Updated `fastapi` requirement from `~=0.112.2` to `~=0.114.2`
|
||||
- Updated `torch` requirement from `~=2.4.0` to `~=2.4.1`
|
||||
- Updated `setuptools` requirement from `~=74.0.0` to `~=74.1.2`
|
||||
- Updated `safetensors` requirement from `~=0.4.4` to `~=0.4.5`
|
||||
- Updated `huggingface-hub` requirement from `~=0.24.6` to `~=0.24.7`
|
||||
|
||||
### Fixed
|
||||
- Adjusted indeterminate progress bar behavior
|
||||
- Removed comments in `requirements.txt` and updated its formatting
|
||||
|
||||
## [v1.8.1] - 2024-09-04
|
||||
|
||||
### Added
|
||||
- AutoFP8 quantization classes and window (currently WIP)
|
||||
- Minimize/maximize buttons to title bar
|
||||
- API key authentication support for the local server
|
||||
- HuggingFace upload/download class
|
||||
- OpenAPI docs for endpoints
|
||||
- Added new showcase image
|
||||
|
||||
### Changed
|
||||
- Replaced Flask with FastAPI and Uvicorn for improved performance
|
||||
- Moved functions out of AutoGGUF.py into utils.py and TaskListItem.py
|
||||
- Updated llama.cpp convert scripts
|
||||
- Improved LoRA conversion process:
|
||||
- Allow specifying output path in arguments
|
||||
- Removed shutil.move operation
|
||||
- Increased max number of LoRA layers
|
||||
- Changed default port to 7001
|
||||
- Now binding to localhost (127.0.0.1) instead of 0.0.0.0
|
||||
- Upadted Spanish localizations
|
||||
- Updated setuptools requirement from ~=68.2.0 to ~=74.0.0
|
||||
- Updated .env.example with new configuration parameters
|
||||
|
||||
### Fixed
|
||||
- Web page not found error
|
||||
- Use of proper status in TaskListItem
|
||||
- Passing of quant_threads and Logger to TaskListItem
|
||||
- Improved window moving smoothness
|
||||
- Prevention of moving window below taskbar
|
||||
- Optimized imports in various files
|
||||
- Remove aliased quant types
|
||||
|
||||
## [v1.8.0] - 2024-08-26
|
||||
|
||||
### Added
|
||||
- .env.example file added
|
||||
- Sha256 generation support added to build.yml
|
||||
- Allow importing models from any directory on the system
|
||||
- Added manual model import functionality
|
||||
- Verification for manual imports and support for concatenated files
|
||||
- Implemented plugins feature using importlib
|
||||
- Configuration options for AUTOGGUF_MODEL_DIR_NAME, AUTOGGUF_OUTPUT_DIR_NAME, and AUTOGGUF_RESIZE_FACTOR added
|
||||
|
||||
### Changed
|
||||
- Moved get helper functions to utils.py
|
||||
- Added type hints
|
||||
- Reformat TaskListItem.py for better readability
|
||||
- Separate macOS and Linux runs in CI/CD
|
||||
- Updated .gitignore for better file management
|
||||
- Updated numpy requirement from <2.0.0 to <3.0.0
|
||||
|
||||
### Fixed
|
||||
- Fixed sha256 file format and avoided overwriting
|
||||
- Updated regex for progress tracking
|
||||
- Arabic and French localizations fixed
|
||||
- Only count valid backends instead of total backend combos
|
||||
- Import missing modules
|
||||
|
||||
## [v1.7.2] - 2024-08-19
|
||||
|
||||
### Added
|
||||
|
@ -98,7 +260,7 @@ ### Notes
|
|||
- Fast build: Higher unzipped size (97MB), smaller download (38MB)
|
||||
- Standard build: Created with PyInstaller, medium download and unzipped size (50MB), potentially slower
|
||||
|
||||
## [1.6.0] - 2024-08-08
|
||||
## [v1.6.0] - 2024-08-08
|
||||
|
||||
### Changed
|
||||
- Resolve licensing issues by using PySide6
|
||||
|
@ -106,7 +268,7 @@ ### Changed
|
|||
### Added
|
||||
- Add GPU monitoring support for NVIDIA GPUs
|
||||
|
||||
## [1.5.1] - 2024-08-08
|
||||
## [v1.5.1] - 2024-08-08
|
||||
|
||||
### Changed
|
||||
- Refactor localizations to use them in HF conversion area
|
||||
|
@ -118,7 +280,7 @@ ### Removed
|
|||
### Added
|
||||
- Support loading *.gguf file types
|
||||
|
||||
## [1.5.0] - 2024-08-06
|
||||
## [v1.5.0] - 2024-08-06
|
||||
|
||||
### Changed
|
||||
- Refactor localizations to use them in HF conversion area
|
||||
|
@ -131,7 +293,7 @@ ### Added
|
|||
### Fixed
|
||||
- Fix scaling on low resolution screens, interface now scrolls
|
||||
|
||||
## [1.4.3] - 2024-08-05
|
||||
## [v1.4.3] - 2024-08-05
|
||||
|
||||
### Changed
|
||||
- Updated src file in release to be Black formatted
|
||||
|
@ -144,7 +306,7 @@ ### Added
|
|||
- Added model sharding management support
|
||||
- Allow multiple quantization types to be selected and started simultaneously
|
||||
|
||||
## [1.4.2] - 2024-08-04
|
||||
## [v1.4.2] - 2024-08-04
|
||||
|
||||
### Fixed
|
||||
- Resolves bug where Base Model text was shown even when GGML type was selected
|
||||
|
@ -153,13 +315,13 @@ ### Fixed
|
|||
### Changed
|
||||
- Minor repository changes
|
||||
|
||||
## [1.4.1] - 2024-08-04
|
||||
## [v1.4.1] - 2024-08-04
|
||||
|
||||
### Added
|
||||
- Dynamic KV Overrides (see wiki: AutoGGUF/wiki/Dynamic-KV-Overrides)
|
||||
- Quantization commands are now printed and logged
|
||||
|
||||
## [1.4.0] - 2024-08-04
|
||||
## [v1.4.0] - 2024-08-04
|
||||
|
||||
### Added
|
||||
- LoRA Conversion:
|
||||
|
@ -183,7 +345,7 @@ ### Added
|
|||
- Currently includes src folder with conversion tools
|
||||
- No console window popup
|
||||
|
||||
## [1.3.1] - 2024-08-04
|
||||
## [v1.3.1] - 2024-08-04
|
||||
|
||||
### Added
|
||||
- AUTOGGUF_CHECK_BACKEND environment variable to disable backend check on start
|
||||
|
@ -191,7 +353,7 @@ ### Added
|
|||
### Changed
|
||||
- --onefile build with PyInstaller, _internal directory is no longer required
|
||||
|
||||
## [1.3.0] - 2024-08-03
|
||||
## [v1.3.0] - 2024-08-03
|
||||
|
||||
### Added
|
||||
- Support for new llama-imatrix parameters:
|
||||
|
@ -213,7 +375,7 @@ ### Fixed
|
|||
### Removed
|
||||
- Duplicated functions
|
||||
|
||||
## [1.2.1] - 2024-08-03
|
||||
## [v1.2.1] - 2024-08-03
|
||||
|
||||
### Added
|
||||
- Refresh Models button
|
||||
|
@ -222,13 +384,13 @@ ### Added
|
|||
### Fixed
|
||||
- iostream llama.cpp issue, quantized_models directory created on launch
|
||||
|
||||
## [1.2.0] - 2024-08-03
|
||||
## [v1.2.0] - 2024-08-03
|
||||
|
||||
### Added
|
||||
- More robust logging (find logs at latest_<timestamp>.log in logs folder)
|
||||
- Localizations with support for 28 languages (machine translated using Gemini Experimental 0801)
|
||||
|
||||
## [1.1.0] - 2024-08-03
|
||||
## [v1.1.0] - 2024-08-03
|
||||
|
||||
### Added
|
||||
- Dynamic KV override functionality
|
||||
|
@ -251,7 +413,7 @@ ### Added
|
|||
### Fixed
|
||||
- Issue where quantization errored with "AutoGGUF does not have x attribute"
|
||||
|
||||
## [1.0.0] - 2024-08-02
|
||||
## [v1.0.0] - 2024-08-02
|
||||
|
||||
### Added
|
||||
- Initial release
|
||||
|
|
|
@ -2,8 +2,6 @@ # Contributing to AutoGGUF
|
|||
|
||||
First off, thanks for taking the time to contribute! 🎉👍
|
||||
|
||||
## How Can I Contribute?
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
- Use the issue tracker to report bugs
|
||||
|
@ -15,17 +13,18 @@ ### Suggesting Enhancements
|
|||
- Use the issue tracker to suggest enhancements
|
||||
- Explain why this enhancement would be useful
|
||||
|
||||
### Your First Code Contribution
|
||||
### Code Contributions
|
||||
|
||||
You can find issues labeled with "good first issue" in the Issues tab as a starting point. Code refactors and optimizations are also appreciated, although if there's a vulnrability please report it privately in the Security tab. For feature PRs, please make a discussion first to make sure your feature can be added and continously maintained.
|
||||
|
||||
1. Fork the repo
|
||||
2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
|
||||
3. Install pre-commit: (`pip install pre-commit`)
|
||||
4. Set up the git hook scripts: (`pre-commit install`)
|
||||
5. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
|
||||
6. Push to the branch (`git push origin feature/AmazingFeature`)
|
||||
7. Open a Pull Request
|
||||
2. Clone your fork (`git clone https://github.com/your-username/AutoGGUF.git && cd AutoGGUF`)
|
||||
3. Create your feature branch (`git checkout -b feature/AmazingFeature`)
|
||||
5. Install pre-commit: (`pip install pre-commit`)
|
||||
6. Set up the git hook scripts: (`pre-commit install`)
|
||||
7. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
|
||||
8. Push to the branch (`git push origin feature/AmazingFeature`)
|
||||
9. Open a Pull Request on GitHub
|
||||
|
||||
## Styleguides
|
||||
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -186,7 +186,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2024 leafspark
|
||||
Copyright (c) 2024-2025 leafspark
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
106
README.md
106
README.md
|
@ -9,66 +9,85 @@ # AutoGGUF - automated GGUF model quantizer
|
|||
|
||||
<!-- Project Info -->
|
||||
[](https://github.com/ggerganov/llama.cpp)
|
||||

|
||||
[]()
|
||||
[](https://github.com/leafspark/AutoGGUF/blob/main/LICENSE)
|
||||

|
||||
|
||||
<!-- Repository Stats -->
|
||||

|
||||

|
||||

|
||||

|
||||
<!--  -->
|
||||
|
||||
<!-- Contribution -->
|
||||
[](https://github.com/psf/black)
|
||||
[](https://github.com/leafspark/AutoGGUF/issues)
|
||||
[](https://github.com/psf/black)
|
||||
[](https://github.com/leafspark/AutoGGUF/pulls)
|
||||
|
||||
AutoGGUF provides a graphical user interface for quantizing GGUF models using the llama.cpp library. It allows users to download different versions of llama.cpp, manage multiple backends, and perform quantization tasks with various options.
|
||||
The most comprehensive GUI tool for GGUF model quantization. Stop wrestling with command lines - quantize, merge, and optimize your models with just a few clicks.
|
||||
|
||||
## Features
|
||||
|
||||
- Download and manage llama.cpp backends
|
||||
- Select and quantize GGUF models
|
||||
- Configure quantization parameters
|
||||
- Monitor system resources during quantization
|
||||
- Parallel quantization + imatrix generation
|
||||
- LoRA conversion and merging
|
||||
- Preset saving and loading
|
||||
- 📩 Update and manage llama.cpp backends
|
||||
- 🗃️ Download and quantize GGUF/safetensors models
|
||||
- 📐 Configure quantization parameters
|
||||
- 💻 Monitor system resources in real time during quantization
|
||||
- ⏳ Parallel quantization + imatrix generation
|
||||
- 🎉 LoRA conversion and merging
|
||||
- 📁 Preset saving and loading
|
||||
- 8️⃣ AutoFP8 quantization
|
||||
- 🪓 GGUF splitting and merging
|
||||
- 🌐 HTTP API for automation and monitoring
|
||||
|
||||
## Usage
|
||||
## Why AutoGGUF?
|
||||
- Fast: Saves time on manual configuration
|
||||
- Simple: Clean UI, no terminal needed
|
||||
- Powerful: Handles models up to infinite size, only limited by your RAM
|
||||
- Resource-aware: Optimized memory management and efficient UI library
|
||||
|
||||
### Cross-platform
|
||||
1. Install dependencies:
|
||||

|
||||
|
||||
## Quick Start
|
||||
|
||||
### Cross-platform (recommended)
|
||||
1. `git clone https://github.com/leafspark/AutoGGUF`
|
||||
2. `cd AutoGGUF`
|
||||
3. Install dependencies:
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
2. Run the application:
|
||||
4. Run the application:
|
||||
```
|
||||
python src/main.py
|
||||
```
|
||||
or use the `run.bat` script.
|
||||
|
||||
### Windows
|
||||
macOS and Ubuntu builds are provided with GitHub Actions, you may download the binaries in the releases section.
|
||||
|
||||
### Windows (for the impatient)
|
||||
Standard builds:
|
||||
1. Download the latest release
|
||||
2. Extract all files to a folder
|
||||
3. Run `AutoGGUF-x64.exe`
|
||||
4. Any necessary folders will be automatically created
|
||||
|
||||
Setup builds:
|
||||
1. Download setup varient of latest release
|
||||
1. Download the setup variant of latest release
|
||||
2. Extract all files to a folder
|
||||
3. Run the setup program
|
||||
4. The .GGUF extension will be registered with the program automatically
|
||||
4. The .gguf extension will be registered with the program automatically
|
||||
5. Run the program from the Start Menu or desktop shortcuts
|
||||
|
||||
After launching the program, you may access its local server at port 7001 (set `AUTOGGUF_SERVER` to "enabled" first).
|
||||
|
||||
### Verifying Releases
|
||||
|
||||
#### Linux/macOS:
|
||||
```bash
|
||||
gpg --import AutoGGUF-v1.5.0-prerel.asc
|
||||
gpg --verify AutoGGUF-v1.5.0-Windows-avx2-prerel.zip.sig AutoGGUF-v1.5.0-Windows-avx2-prerel.zip
|
||||
sha256sum -c AutoGGUF-v1.5.0-prerel.sha256
|
||||
gpg --verify AutoGGUF-v1.9.1-Windows-avx2.zip.sig AutoGGUF-v1.9.1-Windows-avx2.zip
|
||||
sha256sum -c AutoGGUF-v1.9.1.sha256
|
||||
```
|
||||
|
||||
#### Windows (PowerShell):
|
||||
|
@ -77,11 +96,11 @@ # Import the public key
|
|||
gpg --import AutoGGUF-v1.5.0-prerel.asc
|
||||
|
||||
# Verify the signature
|
||||
gpg --verify AutoGGUF-v1.5.0-Windows-avx2-prerel.zip.sig AutoGGUF-v1.5.0-Windows-avx2-prerel.zip
|
||||
gpg --verify AutoGGUF-v1.9.1-Windows-avx2.zip.sig AutoGGUF-v1.9.1-Windows-avx2.zip
|
||||
|
||||
# Check SHA256
|
||||
$fileHash = (Get-FileHash -Algorithm SHA256 AutoGGUF-v1.5.0-Windows-avx2-prerel.zip).Hash.ToLower()
|
||||
$storedHash = (Get-Content AutoGGUF-v1.5.0-prerel.sha256 | Select-String AutoGGUF-v1.5.0-Windows-avx2-prerel.zip).Line.Split()[0]
|
||||
$fileHash = (Get-FileHash -Algorithm SHA256 AutoGGUF-v1.9.1-Windows-avx2.zip).Hash.ToLower()
|
||||
$storedHash = (Get-Content AutoGGUF-v1.9.1.sha256 | Select-String AutoGGUF-v1.9.1-Windows-avx2.zip).Line.Split()[0]
|
||||
if ($fileHash -eq $storedHash) { "SHA256 Match" } else { "SHA256 Mismatch" }
|
||||
```
|
||||
|
||||
|
@ -99,50 +118,53 @@ ### Cross-platform
|
|||
|
||||
### Windows
|
||||
```bash
|
||||
pip install -U pyinstaller
|
||||
build RELEASE | DEV
|
||||
```
|
||||
Find the executable in `build/<type>/dist/AutoGGUF.exe`.
|
||||
Find the executable in `build/<type>/dist/AutoGGUF-x64.exe`.
|
||||
|
||||
You can also use the slower build but faster executable script (Nuitka):
|
||||
You can also use Nuitka, which may result in a slower build but a faster output executable:
|
||||
```bash
|
||||
build_optimized RELEASE | DEV
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
Find them in `requirements.txt`.
|
||||
|
||||
## Localizations
|
||||
|
||||
View the list of supported languages at [AutoGGUF/wiki/Installation#configuration](https://github.com/leafspark/AutoGGUF/wiki/Installation#configuration) (LLM translated, except for English).
|
||||
|
||||
To use a specific language, set the `AUTOGGUF_LANGUAGE` environment variable to one of the listed language codes (note: some languages may not be fully supported yet, those will fall back to English).
|
||||
Languages will be updated as soon as possible after an update, or as a part of the update.
|
||||
|
||||
## Known Issues
|
||||
To use a specific language, set the `AUTOGGUF_LANGUAGE` environment variable to one of the listed language codes (note: some languages may not be fully supported yet, in which the UI elements will fall back to English).
|
||||
|
||||
- None!
|
||||
## Issues
|
||||
|
||||
- Some inconsistent logging and signal handling
|
||||
- Missing or duplicated translations (priority)
|
||||
- Buggy/incomplete API interfaces
|
||||
- Code review and formatting (priority)
|
||||
|
||||
## Planned Features
|
||||
|
||||
- Time estimation for quantization
|
||||
- Actual progress bar tracking
|
||||
- Perplexity testing
|
||||
- Web API and management (partially implemented in v1.6.2)
|
||||
- ~~Themes~~ (added in v1.7.1)
|
||||
- ~~Sleek UI menubar~~ (added in v1.7.1)
|
||||
- [ ] Time estimation for quantization
|
||||
- [ ] Quantization file size estimate
|
||||
- [ ] Perplexity testing
|
||||
- [ ] bitsandbytes support
|
||||
|
||||
## Troubleshooting
|
||||
#### Project Status
|
||||
|
||||
AutoGGUF has now entered maintenance mode. It's considered stable and feature-complete for most use cases, so I'm not actively developing new features, but I’ll continue to publish occasional builds, update dependencies regularly, and fix critical bugs as needed. If you encounter issues or have suggestions, feel free to open an issue.
|
||||
|
||||
## Support
|
||||
|
||||
- SSL module cannot be found error: Install OpenSSL or run from source using `python src/main.py` with the `run.bat` script (`pip install requests`)
|
||||
- Check out the [Wiki](https://github.com/leafspark/AutoGGUF/wiki) for advanced usage and configuration
|
||||
|
||||
## Contributing
|
||||
|
||||
Fork the repo, make your changes, and ensure you have the latest commits when merging. Include a changelog of new features in your pull request description. Read `CONTRIBUTING.md` for more information.
|
||||
|
||||
## User Interface
|
||||
|
||||

|
||||
|
||||
## Stargazers
|
||||
|
||||
[](https://star-history.com/#leafspark/AutoGGUF&Date)
|
||||
|
||||
`Last Updated: May 24, 2025`
|
||||
|
|
|
@ -4,8 +4,10 @@ ## Supported Versions
|
|||
|
||||
| Version | Supported |
|
||||
|-----------------|--------------------|
|
||||
| stable (v1.6.2) | :white_check_mark: |
|
||||
| stable (v2.0.x) | :white_check_mark: |
|
||||
|
||||
Beta versions are not officially supported and may contain unknown security vulnerabilities. Use them at your own risk.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Use the Issues tab.
|
||||
Use the Issues tab, or for severe vulnerabilities, please contact the maintainers via email.
|
||||
|
|
BIN
assets/icon.RES
BIN
assets/icon.RES
Binary file not shown.
|
@ -1 +0,0 @@
|
|||
IDI_ICON1 ICON "favicon.ico"
|
35
build.sh
35
build.sh
|
@ -1,33 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "Usage: $0 [RELEASE|DEV]"
|
||||
echo "Usage: build.sh [RELEASE|DEV]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUILD_TYPE=$1
|
||||
ICON_PATH="../../assets/favicon_large.png"
|
||||
ASSETS_PATH="../../assets"
|
||||
SRC_PATH="src/main.py"
|
||||
|
||||
case $BUILD_TYPE in
|
||||
RELEASE)
|
||||
OUTPUT_DIR="build/release"
|
||||
EXTRA_ARGS="--windowed"
|
||||
;;
|
||||
DEV)
|
||||
OUTPUT_DIR="build/dev"
|
||||
EXTRA_ARGS=""
|
||||
;;
|
||||
*)
|
||||
echo "Invalid build type. Use RELEASE or DEV."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Building $BUILD_TYPE version..."
|
||||
|
||||
pyinstaller $EXTRA_ARGS --onefile --name=AutoGGUF --icon=$ICON_PATH --add-data "$ASSETS_PATH:assets" --distpath=$OUTPUT_DIR/dist --workpath=$OUTPUT_DIR/build --specpath=$OUTPUT_DIR $SRC_PATH
|
||||
if [ "${1,,}" = "release" ]; then
|
||||
echo "Building RELEASE version..."
|
||||
pyinstaller --windowed --onefile --name=AutoGGUF --icon=../../assets/favicon_large.png --add-data "../../assets:assets" --distpath=build/release/dist --workpath=build/release/build --specpath=build/release src/main.py
|
||||
elif [ "${1,,}" = "dev" ]; then
|
||||
echo "Building DEV version..."
|
||||
pyinstaller --onefile --name=AutoGGUF --icon=../../assets/favicon_large.png --add-data "../../assets:assets" --distpath=build/dev/dist --workpath=build/dev/build --specpath=build/dev src/main.py
|
||||
else
|
||||
echo "Invalid argument. Use RELEASE or DEV."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Build failed."
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
@echo off
|
||||
|
||||
if "%1"=="" (
|
||||
echo Usage: build_fast.bat [RELEASE^|DEV]
|
||||
echo Usage: build_optimized.bat [RELEASE^|DEV]
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "Usage: build_fast.sh [RELEASE|DEV]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COMMON_FLAGS="--standalone --enable-plugin=pyside6 --include-data-dir=assets=assets"
|
||||
|
||||
if [ "$1" == "RELEASE" ]; then
|
||||
echo "Building RELEASE version..."
|
||||
python -m nuitka $COMMON_FLAGS --windows-console-mode=disable --output-dir=build/release src/main.py --lto=yes
|
||||
elif [ "$1" == "DEV" ]; then
|
||||
echo "Building DEV version..."
|
||||
python -m nuitka $COMMON_FLAGS --output-dir=build/dev src/main.py
|
||||
else
|
||||
echo "Invalid argument. Use RELEASE or DEV."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Build failed."
|
||||
exit 1
|
||||
else
|
||||
echo "Build completed successfully."
|
||||
fi
|
|
@ -1,29 +1,34 @@
|
|||
import importlib
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
import os
|
||||
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import requests
|
||||
from PySide6.QtCore import *
|
||||
from PySide6.QtGui import *
|
||||
from PySide6.QtWidgets import *
|
||||
from dotenv import load_dotenv
|
||||
|
||||
import lora_conversion
|
||||
import presets
|
||||
import ui_update
|
||||
import utils
|
||||
from CustomTitleBar import CustomTitleBar
|
||||
from GPUMonitor import GPUMonitor
|
||||
from KVOverrideEntry import KVOverrideEntry
|
||||
from Localizations import *
|
||||
from Logger import Logger
|
||||
from ModelInfoDialog import ModelInfoDialog
|
||||
from error_handling import show_error, handle_error
|
||||
from QuantizationThread import QuantizationThread
|
||||
from TaskListItem import TaskListItem
|
||||
from error_handling import handle_error, show_error
|
||||
from imports_and_globals import (
|
||||
ensure_directory,
|
||||
open_file_safe,
|
||||
resource_path,
|
||||
show_about,
|
||||
ensure_directory,
|
||||
)
|
||||
from Localizations import *
|
||||
import presets
|
||||
import ui_update
|
||||
import lora_conversion
|
||||
import utils
|
||||
|
||||
|
||||
class CustomTitleBar(QWidget):
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
import os
|
||||
import zipfile
|
||||
|
||||
import requests
|
||||
from PySide6.QtCore import QThread, Signal
|
||||
|
||||
|
||||
class DownloadThread(QThread):
|
||||
"""
|
||||
A QThread subclass for downloading and extracting zip files.
|
||||
|
||||
This thread downloads a file from a given URL, saves it to a specified path,
|
||||
extracts its contents if it's a zip file, and then removes the original zip file.
|
||||
|
||||
Signals:
|
||||
progress_signal (int): Emits the download progress as a percentage.
|
||||
finished_signal (str): Emits the path of the extracted directory upon successful completion.
|
||||
error_signal (str): Emits an error message if an exception occurs during the process.
|
||||
"""
|
||||
|
||||
def __init__(self, url: str, save_path: str) -> None:
|
||||
"""
|
||||
Initialize the DownloadThread.
|
||||
|
||||
Args:
|
||||
url (str): The URL of the file to download.
|
||||
save_path (str): The local path where the file will be saved.
|
||||
"""
|
||||
|
||||
def run(self) -> None:
|
||||
"""
|
||||
Execute the download, extraction, and cleanup process.
|
||||
|
||||
This method performs the following steps:
|
||||
1. Downloads the file from the specified URL.
|
||||
2. Saves the file to the specified path.
|
||||
3. Extracts the contents if it's a zip file.
|
||||
4. Removes the original zip file after extraction.
|
||||
5. Emits signals for progress updates, completion, or errors.
|
||||
|
||||
Raises:
|
||||
Exception: Any exception that occurs during the process is caught
|
||||
and emitted through the error_signal.
|
||||
"""
|
|
@ -0,0 +1,28 @@
|
|||
class ModelInfoDialog(QDialog):
|
||||
"""
|
||||
A dialog window for displaying model information.
|
||||
|
||||
This class creates a dialog that shows detailed information about a machine learning model,
|
||||
including its architecture, quantization type, and other relevant data.
|
||||
|
||||
Attributes:
|
||||
None
|
||||
|
||||
Args:
|
||||
model_info (dict): A dictionary containing the model's information.
|
||||
parent (QWidget, optional): The parent widget of this dialog. Defaults to None.
|
||||
"""
|
||||
|
||||
def format_model_info(self, model_info) -> str:
|
||||
"""
|
||||
Formats the model information into HTML for display.
|
||||
|
||||
This method takes the raw model information and converts it into a formatted HTML string,
|
||||
which can be displayed in the dialog's QTextEdit widget.
|
||||
|
||||
Args:
|
||||
model_info (dict): A dictionary containing the model's information.
|
||||
|
||||
Returns:
|
||||
str: Formatted HTML string containing the model information.
|
||||
"""
|
|
@ -0,0 +1,13 @@
|
|||
class ExamplePlugin:
|
||||
def init(self, autogguf_instance):
|
||||
# This gets called after the plugin is loaded
|
||||
print("Plugin initialized")
|
||||
|
||||
def __data__(self):
|
||||
return {
|
||||
"name": "ExamplePlugin",
|
||||
"description": "This is an example plugin.",
|
||||
"compatible_versions": ["*"],
|
||||
"author": "leafspark",
|
||||
"version": "v1.0.0",
|
||||
}
|
|
@ -1,11 +1,14 @@
|
|||
psutil~=6.0.0
|
||||
requests~=2.32.3
|
||||
numpy<2.0.0
|
||||
torch~=2.4.0
|
||||
sentencepiece~=0.2.0
|
||||
PyYAML~=6.0.2
|
||||
pynvml~=11.5.3
|
||||
PySide6~=6.7.2
|
||||
flask~=3.0.3
|
||||
python-dotenv~=1.0.1
|
||||
safetensors~=0.4.4
|
||||
psutil~=7.0.0
|
||||
pynvml~=12.0.0
|
||||
PySide6~=6.9.1
|
||||
safetensors~=0.5.3
|
||||
numpy<2.0.0
|
||||
torch~=2.7.0
|
||||
sentencepiece~=0.2.0
|
||||
setuptools~=80.7.1
|
||||
huggingface-hub~=0.33.1
|
||||
transformers~=4.51.3
|
||||
fastapi~=0.115.12
|
||||
uvicorn~=0.34.2
|
||||
certifi~=2025.4.26
|
||||
|
|
29
run.sh
29
run.sh
|
@ -1,6 +1,31 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Check if Python is installed
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
echo "Error: Python 3 is not installed or not in the PATH."
|
||||
echo "Please install Python 3 and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set environment variables
|
||||
export PYTHONIOENCODING=utf-8
|
||||
export AUTOGGUF_LANGUAGE=en-US
|
||||
export AUTOGGUF_CHECK_BACKEND=disabled
|
||||
python3 src/main.py
|
||||
|
||||
# Try to run main.py in the current directory
|
||||
if [ -f "main.py" ]; then
|
||||
echo "Running main.py in the current directory..."
|
||||
python3 main.py
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If main.py doesn't exist in the current directory, try src/main.py
|
||||
if [ -f "src/main.py" ]; then
|
||||
echo "Running src/main.py..."
|
||||
python3 src/main.py
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If neither file is found, display an error message
|
||||
echo "Error: Neither main.py nor src/main.py found."
|
||||
echo "Please make sure the script is in the correct directory."
|
||||
exit 1
|
||||
|
|
4
setup.py
4
setup.py
|
@ -5,12 +5,12 @@
|
|||
|
||||
setup(
|
||||
name="AutoGGUF",
|
||||
version="v1.7.1",
|
||||
version="v2.0.1",
|
||||
packages=[""],
|
||||
url="https://github.com/leafspark/AutoGGUF",
|
||||
license="apache-2.0",
|
||||
author="leafspark",
|
||||
author_email="",
|
||||
author_email="leafspark@proton.me",
|
||||
description="automatically quant GGUF models",
|
||||
install_requires=required,
|
||||
entry_points={"console_scripts": ["autogguf-gui = main:main"]},
|
||||
|
|
1339
src/AutoGGUF.py
1339
src/AutoGGUF.py
File diff suppressed because it is too large
Load Diff
|
@ -1,12 +1,9 @@
|
|||
from PySide6.QtCore import QPoint
|
||||
from PySide6.QtGui import QPixmap
|
||||
from PySide6.QtCore import QPoint, Qt
|
||||
from PySide6.QtWidgets import QHBoxLayout, QLabel, QMenuBar, QPushButton, QWidget
|
||||
|
||||
from imports_and_globals import resource_path
|
||||
|
||||
|
||||
class CustomTitleBar(QWidget):
|
||||
def __init__(self, parent=None):
|
||||
def __init__(self, parent=None) -> None:
|
||||
super().__init__(parent)
|
||||
self.parent = parent
|
||||
layout = QHBoxLayout(self)
|
||||
|
@ -46,7 +43,29 @@ def __init__(self, parent=None):
|
|||
"""
|
||||
)
|
||||
|
||||
# Enable mouse tracking for smoother movement
|
||||
self.setMouseTracking(True)
|
||||
|
||||
# Add maximize button
|
||||
self.maximize_button = QPushButton("□")
|
||||
self.maximize_button.setFixedSize(30, 30)
|
||||
self.maximize_button.setStyleSheet(
|
||||
"""
|
||||
QPushButton {
|
||||
border: none;
|
||||
background-color: transparent;
|
||||
padding: 2px;
|
||||
font-size: 15px;
|
||||
}
|
||||
QPushButton:hover {
|
||||
background-color: rgba(255, 255, 255, 0.1);
|
||||
}
|
||||
"""
|
||||
)
|
||||
self.maximize_button.clicked.connect(self.toggle_maximize)
|
||||
|
||||
layout.addWidget(self.minimize_button)
|
||||
layout.addWidget(self.maximize_button)
|
||||
layout.addWidget(self.close_button)
|
||||
|
||||
self.minimize_button.clicked.connect(self.parent.showMinimized)
|
||||
|
@ -54,22 +73,40 @@ def __init__(self, parent=None):
|
|||
|
||||
self.start = QPoint(0, 0)
|
||||
self.pressing = False
|
||||
self.isMaximized = False # Flag to track maximization state
|
||||
self.normal_size = None # Store the normal window size
|
||||
|
||||
def mousePressEvent(self, event):
|
||||
self.start = self.mapToGlobal(event.pos())
|
||||
self.pressing = True
|
||||
def mousePressEvent(self, event) -> None:
|
||||
if event.button() == Qt.LeftButton:
|
||||
self.start = event.globalPos() - self.parent.frameGeometry().topLeft()
|
||||
self.pressing = True
|
||||
|
||||
def mouseMoveEvent(self, event):
|
||||
def mouseMoveEvent(self, event) -> None:
|
||||
if self.pressing:
|
||||
end = self.mapToGlobal(event.pos())
|
||||
movement = end - self.start
|
||||
self.parent.setGeometry(
|
||||
self.parent.x() + movement.x(),
|
||||
self.parent.y() + movement.y(),
|
||||
self.parent.width(),
|
||||
self.parent.height(),
|
||||
)
|
||||
self.start = end
|
||||
new_pos = event.globalPos() - self.start
|
||||
screen = self.parent.screen()
|
||||
screen_geo = screen.availableGeometry()
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
# Check if the new position would put the titlebar below the taskbar
|
||||
if (
|
||||
new_pos.y() + self.parent.height() > screen_geo.bottom()
|
||||
): # Use screen_geo.bottom()
|
||||
new_pos.setY(screen_geo.bottom() - self.parent.height())
|
||||
|
||||
self.parent.move(new_pos)
|
||||
|
||||
def mouseReleaseEvent(self, event) -> None:
|
||||
self.pressing = False
|
||||
|
||||
def toggle_maximize(self) -> None:
|
||||
if self.isMaximized:
|
||||
self.parent.showNormal()
|
||||
if self.normal_size:
|
||||
self.parent.resize(self.normal_size)
|
||||
self.maximize_button.setText("□") # Change back to maximize symbol
|
||||
self.isMaximized = False
|
||||
else:
|
||||
self.normal_size = self.parent.size() # Store the current size
|
||||
self.parent.showMaximized()
|
||||
self.maximize_button.setText("❐") # Change to restore symbol
|
||||
self.isMaximized = True
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import os
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
import zipfile
|
||||
|
||||
import requests
|
||||
from PySide6.QtCore import *
|
||||
import ssl
|
||||
import certifi
|
||||
from PySide6.QtCore import QThread, Signal
|
||||
|
||||
|
||||
class DownloadThread(QThread):
|
||||
|
@ -10,26 +12,38 @@ class DownloadThread(QThread):
|
|||
finished_signal = Signal(str)
|
||||
error_signal = Signal(str)
|
||||
|
||||
def __init__(self, url, save_path):
|
||||
def __init__(self, url, save_path) -> None:
|
||||
super().__init__()
|
||||
self.url = url
|
||||
self.save_path = save_path
|
||||
|
||||
def run(self):
|
||||
def run(self) -> None:
|
||||
try:
|
||||
response = requests.get(self.url, stream=True)
|
||||
response.raise_for_status()
|
||||
total_size = int(response.headers.get("content-length", 0))
|
||||
block_size = 8192
|
||||
downloaded = 0
|
||||
req = urllib.request.Request(self.url)
|
||||
|
||||
with open(self.save_path, "wb") as file:
|
||||
for data in response.iter_content(block_size):
|
||||
size = file.write(data)
|
||||
downloaded += size
|
||||
if total_size:
|
||||
progress = int((downloaded / total_size) * 100)
|
||||
self.progress_signal.emit(progress)
|
||||
# Create SSL context with certifi certificates
|
||||
ssl_context = ssl.create_default_context(cafile=certifi.where())
|
||||
|
||||
with urllib.request.urlopen(req, context=ssl_context) as response:
|
||||
if response.status != 200:
|
||||
raise urllib.error.HTTPError(
|
||||
self.url, response.status, "HTTP Error", response.headers, None
|
||||
)
|
||||
|
||||
total_size = int(response.headers.get("Content-Length", 0))
|
||||
block_size = 8192
|
||||
downloaded = 0
|
||||
|
||||
with open(self.save_path, "wb") as file:
|
||||
while True:
|
||||
data = response.read(block_size)
|
||||
if not data:
|
||||
break
|
||||
size = file.write(data)
|
||||
downloaded += size
|
||||
if total_size:
|
||||
progress = int((downloaded / total_size) * 100)
|
||||
self.progress_signal.emit(progress)
|
||||
|
||||
# Extract the downloaded zip file
|
||||
extract_dir = os.path.splitext(self.save_path)[0]
|
||||
|
|
|
@ -22,13 +22,15 @@
|
|||
VRAM_USAGE_OVER_TIME,
|
||||
NO_GPU_DETECTED,
|
||||
AMD_GPU_NOT_SUPPORTED,
|
||||
CPU_USAGE_OVER_TIME,
|
||||
RAM_USAGE_OVER_TIME,
|
||||
)
|
||||
|
||||
from ui_update import animate_bar
|
||||
|
||||
|
||||
class SimpleGraph(QGraphicsView):
|
||||
def __init__(self, title, parent=None):
|
||||
def __init__(self, title, parent=None) -> None:
|
||||
super().__init__(parent)
|
||||
self.setScene(QGraphicsScene(self))
|
||||
self.setRenderHint(QPainter.RenderHint.Antialiasing)
|
||||
|
@ -37,7 +39,7 @@ def __init__(self, title, parent=None):
|
|||
self.title = title
|
||||
self.data = []
|
||||
|
||||
def update_data(self, data):
|
||||
def update_data(self, data) -> None:
|
||||
self.data = data
|
||||
self.scene().clear()
|
||||
if not self.data:
|
||||
|
@ -65,13 +67,13 @@ def update_data(self, data):
|
|||
line.setPen(path)
|
||||
self.scene().addItem(line)
|
||||
|
||||
def resizeEvent(self, event):
|
||||
def resizeEvent(self, event) -> None:
|
||||
super().resizeEvent(event)
|
||||
self.update_data(self.data)
|
||||
|
||||
|
||||
class GPUMonitor(QWidget):
|
||||
def __init__(self, parent=None):
|
||||
def __init__(self, parent=None) -> None:
|
||||
super().__init__(parent)
|
||||
self.setMinimumHeight(30)
|
||||
self.setMaximumHeight(30)
|
||||
|
@ -93,7 +95,7 @@ def __init__(self, parent=None):
|
|||
|
||||
self.timer = QTimer(self)
|
||||
self.timer.timeout.connect(self.update_gpu_info)
|
||||
self.timer.start(200) # Update every 0.2 seconds
|
||||
self.timer.start(500) # Update every 0.5 seconds
|
||||
|
||||
self.gpu_data = []
|
||||
self.vram_data = []
|
||||
|
@ -125,17 +127,17 @@ def __init__(self, parent=None):
|
|||
if not self.handles:
|
||||
self.gpu_label.setText(NO_GPU_DETECTED)
|
||||
|
||||
def check_for_amd_gpu(self):
|
||||
def check_for_amd_gpu(self) -> None:
|
||||
# This is a placeholder. Implementing AMD GPU detection would require
|
||||
# platform-specific methods or additional libraries.
|
||||
self.gpu_label.setText(AMD_GPU_NOT_SUPPORTED)
|
||||
|
||||
def change_gpu(self, index):
|
||||
def change_gpu(self, index) -> None:
|
||||
self.current_gpu = index
|
||||
self.gpu_data.clear()
|
||||
self.vram_data.clear()
|
||||
|
||||
def update_gpu_info(self):
|
||||
def update_gpu_info(self) -> None:
|
||||
if self.handles:
|
||||
try:
|
||||
handle = self.handles[self.current_gpu]
|
||||
|
@ -165,11 +167,36 @@ def update_gpu_info(self):
|
|||
self.gpu_bar.setValue(0)
|
||||
self.gpu_label.setText(GPU_USAGE_FORMAT.format(0, 0, 0, 0))
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
def mouseDoubleClickEvent(self, event) -> None:
|
||||
if self.handles:
|
||||
self.show_detailed_stats()
|
||||
|
||||
def show_detailed_stats(self):
|
||||
def show_ram_graph(self, event) -> None:
|
||||
self.show_detailed_stats_std(RAM_USAGE_OVER_TIME, self.ram_data)
|
||||
|
||||
def show_cpu_graph(self, event) -> None:
|
||||
self.show_detailed_stats_std(CPU_USAGE_OVER_TIME, self.cpu_data)
|
||||
|
||||
def show_detailed_stats_std(self, title, data) -> None:
|
||||
dialog = QDialog(self)
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setMinimumSize(800, 600)
|
||||
|
||||
layout = QVBoxLayout(dialog)
|
||||
|
||||
graph = SimpleGraph(title)
|
||||
layout.addWidget(graph)
|
||||
|
||||
def update_graph_data() -> None:
|
||||
graph.update_data(data)
|
||||
|
||||
timer = QTimer(dialog)
|
||||
timer.timeout.connect(update_graph_data)
|
||||
timer.start(500) # Update every 0.5 seconds
|
||||
|
||||
dialog.exec()
|
||||
|
||||
def show_detailed_stats(self) -> None:
|
||||
dialog = QDialog(self)
|
||||
dialog.setWindowTitle(GPU_DETAILS)
|
||||
dialog.setMinimumSize(800, 600)
|
||||
|
@ -194,20 +221,20 @@ def show_detailed_stats(self):
|
|||
gpu_graph = SimpleGraph(GPU_USAGE_OVER_TIME)
|
||||
vram_graph = SimpleGraph(VRAM_USAGE_OVER_TIME)
|
||||
|
||||
def update_graph_data():
|
||||
def update_graph_data() -> None:
|
||||
gpu_graph.update_data(self.gpu_data)
|
||||
vram_graph.update_data(self.vram_data)
|
||||
|
||||
timer = QTimer(dialog)
|
||||
timer.timeout.connect(update_graph_data)
|
||||
timer.start(200) # Update every 0.2 seconds
|
||||
timer.start(500) # Update every 0.5 seconds
|
||||
|
||||
tab_widget.addTab(gpu_graph, GPU_USAGE_OVER_TIME)
|
||||
tab_widget.addTab(vram_graph, VRAM_USAGE_OVER_TIME)
|
||||
|
||||
dialog.exec()
|
||||
|
||||
def closeEvent(self, event):
|
||||
def closeEvent(self, event) -> None:
|
||||
if self.handles:
|
||||
pynvml.nvmlShutdown()
|
||||
super().closeEvent(event)
|
||||
|
|
|
@ -1,30 +1,35 @@
|
|||
from PySide6.QtWidgets import QWidget, QHBoxLayout, QLineEdit, QComboBox, QPushButton
|
||||
from PySide6.QtCore import Signal, QRegularExpression
|
||||
from PySide6.QtGui import QDoubleValidator, QIntValidator, QRegularExpressionValidator
|
||||
from datetime import datetime
|
||||
import time
|
||||
import locale
|
||||
import os
|
||||
import socket
|
||||
import platform
|
||||
import shutil
|
||||
import socket
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
import psutil
|
||||
from PySide6.QtCore import QRegularExpression, Signal
|
||||
from PySide6.QtGui import QDoubleValidator, QIntValidator, QRegularExpressionValidator
|
||||
from PySide6.QtWidgets import QComboBox, QHBoxLayout, QLineEdit, QPushButton, QWidget
|
||||
|
||||
|
||||
class KVOverrideEntry(QWidget):
|
||||
deleted = Signal(QWidget)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
def __init__(self, parent=None) -> None:
|
||||
super().__init__(parent)
|
||||
layout = QHBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
self.key_input = QLineEdit()
|
||||
self.key_input.setPlaceholderText("Key")
|
||||
|
||||
# Set validator for key input (letters and dots only)
|
||||
key_validator = QRegularExpressionValidator(QRegularExpression(r"[A-Za-z.]+"))
|
||||
self.key_input.setValidator(key_validator)
|
||||
layout.addWidget(self.key_input)
|
||||
|
||||
self.type_combo = QComboBox()
|
||||
self.type_combo.addItems(["int", "str", "float"])
|
||||
self.type_combo.addItems(["int", "str", "float", "u32", "i32"])
|
||||
layout.addWidget(self.type_combo)
|
||||
|
||||
self.value_input = QLineEdit()
|
||||
|
@ -42,12 +47,16 @@ def __init__(self, parent=None):
|
|||
# Initialize validator
|
||||
self.update_validator(self.type_combo.currentText())
|
||||
|
||||
def delete_clicked(self):
|
||||
def delete_clicked(self) -> None:
|
||||
self.deleted.emit(self)
|
||||
|
||||
def get_override_string(
|
||||
self, model_name=None, quant_type=None, output_path=None
|
||||
): # Add arguments
|
||||
self,
|
||||
model_name=None,
|
||||
quant_type=None,
|
||||
output_path=None,
|
||||
quantization_parameters=None,
|
||||
) -> str: # Add arguments
|
||||
key = self.key_input.text()
|
||||
type_ = self.type_combo.currentText()
|
||||
value = self.value_input.text()
|
||||
|
@ -61,8 +70,14 @@ def get_override_string(
|
|||
"{system.hostname}": lambda: socket.gethostname(),
|
||||
"{system.platform}": lambda: platform.system(),
|
||||
"{system.python.version}": lambda: platform.python_version(),
|
||||
"{system.time.milliseconds}": lambda: str(int(time.time() * 1000)),
|
||||
"{system.date}": lambda: datetime.now().strftime("%Y-%m-%d"),
|
||||
"{system.timezone}": lambda: time.tzname[time.daylight],
|
||||
"{system.cpus}": lambda: str(os.cpu_count()),
|
||||
"{system.memory.total}": lambda: str(psutil.virtual_memory().total),
|
||||
"{system.memory.free}": lambda: str(psutil.virtual_memory().free),
|
||||
"{system.filesystem.used}": lambda: str(shutil.disk_usage("/").used),
|
||||
"{system.kernel.version}": lambda: platform.release(),
|
||||
"{system.locale}": lambda: locale.getdefaultlocale()[0],
|
||||
"{process.nice}": lambda: str(os.nice(0)),
|
||||
"{model.name}": lambda: (
|
||||
model_name if model_name is not None else "Unknown Model"
|
||||
),
|
||||
|
@ -72,6 +87,21 @@ def get_override_string(
|
|||
"{output.path}": lambda: (
|
||||
output_path if output_path is not None else "Unknown Output Path"
|
||||
),
|
||||
"{quant.kv}": lambda: (
|
||||
quantization_parameters[0]
|
||||
if quantization_parameters is not None
|
||||
else False
|
||||
),
|
||||
"{quant.requantized}": lambda: (
|
||||
quantization_parameters[1]
|
||||
if quantization_parameters is not None
|
||||
else False
|
||||
),
|
||||
"{quant.leave_output_tensor}": lambda: (
|
||||
quantization_parameters[2]
|
||||
if quantization_parameters is not None
|
||||
else False
|
||||
),
|
||||
}
|
||||
|
||||
for param, func in dynamic_params.items():
|
||||
|
@ -79,11 +109,11 @@ def get_override_string(
|
|||
|
||||
return f"{key}={type_}:{value}"
|
||||
|
||||
def get_raw_override_string(self):
|
||||
def get_raw_override_string(self) -> str:
|
||||
# Return the raw override string with placeholders intact
|
||||
return f"{self.key_input.text()}={self.type_combo.currentText()}:{self.value_input.text()}"
|
||||
|
||||
def update_validator(self, type_):
|
||||
def update_validator(self, type_) -> None:
|
||||
if type_ == "int":
|
||||
self.value_input.setValidator(QIntValidator())
|
||||
elif type_ == "float":
|
||||
|
|
3484
src/Localizations.py
3484
src/Localizations.py
File diff suppressed because it is too large
Load Diff
|
@ -5,7 +5,7 @@
|
|||
|
||||
|
||||
class Logger:
|
||||
def __init__(self, name, log_dir):
|
||||
def __init__(self, name, log_dir) -> None:
|
||||
self.logger = logging.getLogger(name)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
@ -34,17 +34,17 @@ def __init__(self, name, log_dir):
|
|||
self.logger.addHandler(console_handler)
|
||||
self.logger.addHandler(file_handler)
|
||||
|
||||
def debug(self, message):
|
||||
def debug(self, message) -> None:
|
||||
self.logger.debug(message)
|
||||
|
||||
def info(self, message):
|
||||
def info(self, message) -> None:
|
||||
self.logger.info(message)
|
||||
|
||||
def warning(self, message):
|
||||
def warning(self, message) -> None:
|
||||
self.logger.warning(message)
|
||||
|
||||
def error(self, message):
|
||||
def error(self, message) -> None:
|
||||
self.logger.error(message)
|
||||
|
||||
def critical(self, message):
|
||||
def critical(self, message) -> None:
|
||||
self.logger.critical(message)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
from PySide6.QtWidgets import *
|
||||
from PySide6.QtWidgets import QVBoxLayout, QTextEdit, QDialog, QPushButton
|
||||
|
||||
|
||||
class ModelInfoDialog(QDialog):
|
||||
def __init__(self, model_info, parent=None):
|
||||
def __init__(self, model_info, parent=None) -> None:
|
||||
super().__init__(parent)
|
||||
self.setWindowTitle("Model Information")
|
||||
self.setGeometry(200, 200, 600, 400)
|
||||
|
@ -21,11 +21,24 @@ def __init__(self, model_info, parent=None):
|
|||
|
||||
self.setLayout(layout)
|
||||
|
||||
def format_model_info(self, model_info):
|
||||
def format_model_info(self, model_info) -> str:
|
||||
html = "<h2>Model Information</h2>"
|
||||
html += f"<p><b>Architecture:</b> {model_info.get('architecture', 'N/A')}</p>"
|
||||
html += f"<p><b>Quantization Type:</b> {model_info.get('quantization_type', 'N/A')}</p>"
|
||||
html += f"<p><b>KV Pairs:</b> {model_info.get('kv_pairs', 'N/A')}</p>"
|
||||
|
||||
# Format quantization types
|
||||
quant_types = model_info.get("quantization_type", [])
|
||||
if quant_types:
|
||||
# Clean up the format: remove "- type " prefix and join with " | "
|
||||
formatted_types = []
|
||||
for qtype in quant_types:
|
||||
# Remove "- type " prefix if present
|
||||
clean_type = qtype.replace("- type ", "").strip()
|
||||
formatted_types.append(clean_type)
|
||||
quant_display = " | ".join(formatted_types)
|
||||
else:
|
||||
quant_display = "N/A"
|
||||
|
||||
html += f"<p><b>Quantization Type:</b> {quant_display}</p>"
|
||||
html += f"<p><b>Tensors:</b> {model_info.get('tensors', 'N/A')}</p>"
|
||||
|
||||
html += "<h3>Key-Value Pairs:</h3>"
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
import importlib
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
from Localizations import *
|
||||
|
||||
|
||||
class Plugins:
|
||||
|
||||
def load_plugins(self) -> Dict[str, Dict[str, Any]]:
|
||||
plugins = {}
|
||||
plugin_dir = "plugins"
|
||||
|
||||
if not os.path.exists(plugin_dir):
|
||||
self.logger.info(PLUGINS_DIR_NOT_EXIST.format(plugin_dir))
|
||||
return plugins
|
||||
|
||||
if not os.path.isdir(plugin_dir):
|
||||
self.logger.warning(PLUGINS_DIR_NOT_DIRECTORY.format(plugin_dir))
|
||||
return plugins
|
||||
|
||||
for file in os.listdir(plugin_dir):
|
||||
if file.endswith(".py") and not file.endswith(".disabled.py"):
|
||||
name = file[:-3]
|
||||
path = os.path.join(plugin_dir, file)
|
||||
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(name, path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
for item_name in dir(module):
|
||||
item = getattr(module, item_name)
|
||||
if isinstance(item, type) and hasattr(item, "__data__"):
|
||||
plugin_instance = item()
|
||||
plugin_data = plugin_instance.__data__()
|
||||
|
||||
compatible_versions = plugin_data.get(
|
||||
"compatible_versions", []
|
||||
)
|
||||
if (
|
||||
"*" in compatible_versions
|
||||
or AUTOGGUF_VERSION in compatible_versions
|
||||
):
|
||||
plugins[name] = {
|
||||
"instance": plugin_instance,
|
||||
"data": plugin_data,
|
||||
}
|
||||
self.logger.info(
|
||||
PLUGIN_LOADED.format(
|
||||
plugin_data["name"], plugin_data["version"]
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.logger.warning(
|
||||
PLUGIN_INCOMPATIBLE.format(
|
||||
plugin_data["name"],
|
||||
plugin_data["version"],
|
||||
AUTOGGUF_VERSION,
|
||||
", ".join(compatible_versions),
|
||||
)
|
||||
)
|
||||
break
|
||||
except Exception as e:
|
||||
self.logger.error(PLUGIN_LOAD_FAILED.format(name, str(e)))
|
||||
|
||||
return plugins
|
||||
|
||||
def apply_plugins(self) -> None:
|
||||
if not self.plugins:
|
||||
self.logger.info(NO_PLUGINS_LOADED)
|
||||
return
|
||||
|
||||
for plugin_name, plugin_info in self.plugins.items():
|
||||
plugin_instance = plugin_info["instance"]
|
||||
for attr_name in dir(plugin_instance):
|
||||
if not attr_name.startswith("__") and attr_name != "init":
|
||||
attr_value = getattr(plugin_instance, attr_name)
|
||||
setattr(self, attr_name, attr_value)
|
||||
|
||||
if hasattr(plugin_instance, "init") and callable(plugin_instance.init):
|
||||
plugin_instance.init(self)
|
|
@ -1,10 +1,12 @@
|
|||
import os
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
|
||||
from PySide6.QtCore import *
|
||||
from PySide6.QtCore import Signal, QThread
|
||||
|
||||
from imports_and_globals import open_file_safe
|
||||
from Localizations import *
|
||||
from globals import open_file_safe
|
||||
from Localizations import IN_PROGRESS, COMPLETED
|
||||
|
||||
|
||||
class QuantizationThread(QThread):
|
||||
|
@ -15,7 +17,7 @@ class QuantizationThread(QThread):
|
|||
error_signal = Signal(str)
|
||||
model_info_signal = Signal(dict)
|
||||
|
||||
def __init__(self, command, cwd, log_file):
|
||||
def __init__(self, command, cwd, log_file) -> None:
|
||||
super().__init__()
|
||||
self.command = command
|
||||
self.cwd = cwd
|
||||
|
@ -23,7 +25,7 @@ def __init__(self, command, cwd, log_file):
|
|||
self.process = None
|
||||
self.model_info = {}
|
||||
|
||||
def run(self):
|
||||
def run(self) -> None:
|
||||
try:
|
||||
# Start the subprocess
|
||||
self.process = subprocess.Popen(
|
||||
|
@ -56,7 +58,35 @@ def run(self):
|
|||
except Exception as e:
|
||||
self.error_signal.emit(str(e))
|
||||
|
||||
def parse_model_info(self, line):
|
||||
def parse_model_info(self, line) -> None:
|
||||
# Mapping of technical keys to human-readable names
|
||||
key_mappings = {
|
||||
"general.architecture": "Architecture",
|
||||
"general.name": "Model Name",
|
||||
"general.file_type": "File Type",
|
||||
"general.quantization_version": "Quantization Version",
|
||||
"llama.block_count": "Layers",
|
||||
"llama.context_length": "Context Length",
|
||||
"llama.embedding_length": "Embedding Size",
|
||||
"llama.feed_forward_length": "Feed Forward Length",
|
||||
"llama.attention.head_count": "Attention Heads",
|
||||
"llama.attention.head_count_kv": "Key-Value Heads",
|
||||
"llama.attention.layer_norm_rms_epsilon": "RMS Norm Epsilon",
|
||||
"llama.rope.freq_base": "RoPE Frequency Base",
|
||||
"llama.rope.dimension_count": "RoPE Dimensions",
|
||||
"llama.vocab_size": "Vocabulary Size",
|
||||
"tokenizer.ggml.model": "Tokenizer Model",
|
||||
"tokenizer.ggml.pre": "Tokenizer Preprocessing",
|
||||
"tokenizer.ggml.tokens": "Tokens",
|
||||
"tokenizer.ggml.token_type": "Token Types",
|
||||
"tokenizer.ggml.merges": "BPE Merges",
|
||||
"tokenizer.ggml.bos_token_id": "Begin of Sequence Token ID",
|
||||
"tokenizer.ggml.eos_token_id": "End of Sequence Token ID",
|
||||
"tokenizer.chat_template": "Chat Template",
|
||||
"tokenizer.ggml.padding_token_id": "Padding Token ID",
|
||||
"tokenizer.ggml.unk_token_id": "Unknown Token ID",
|
||||
}
|
||||
|
||||
# Parse output for model information
|
||||
if "llama_model_loader: loaded meta data with" in line:
|
||||
parts = line.split()
|
||||
|
@ -64,10 +94,25 @@ def parse_model_info(self, line):
|
|||
self.model_info["tensors"] = parts[9]
|
||||
elif "general.architecture" in line:
|
||||
self.model_info["architecture"] = line.split("=")[-1].strip()
|
||||
elif line.startswith("llama_model_loader: - kv"):
|
||||
key = line.split(":")[2].strip()
|
||||
value = line.split("=")[-1].strip()
|
||||
self.model_info.setdefault("kv_data", {})[key] = value
|
||||
elif line.startswith("llama_model_loader: - kv") and "=" in line:
|
||||
# Split on '=' and take the parts
|
||||
parts = line.split("=", 1) # Split only on first '='
|
||||
left_part = parts[0].strip()
|
||||
value = parts[1].strip()
|
||||
|
||||
# Extract key and type from left part
|
||||
# Format: "llama_model_loader: - kv N: key type"
|
||||
kv_parts = left_part.split(":")
|
||||
if len(kv_parts) >= 3:
|
||||
key_type_part = kv_parts[2].strip() # This is "key type"
|
||||
key = key_type_part.rsplit(" ", 1)[
|
||||
0
|
||||
] # Everything except last word (type)
|
||||
|
||||
# Use human-readable name if available, otherwise use original key
|
||||
display_key = key_mappings.get(key, key)
|
||||
|
||||
self.model_info.setdefault("kv_data", {})[display_key] = value
|
||||
elif line.startswith("llama_model_loader: - type"):
|
||||
parts = line.split(":")
|
||||
if len(parts) > 1:
|
||||
|
@ -77,7 +122,31 @@ def parse_model_info(self, line):
|
|||
f"{quant_type}: {tensors} tensors"
|
||||
)
|
||||
|
||||
def terminate(self):
|
||||
def parse_progress(self, line, task_item, imatrix_chunks=None) -> None:
|
||||
# Parses the output line for progress information and updates the task item.
|
||||
match = re.search(r"\[\s*(\d+)\s*/\s*(\d+)\s*].*", line)
|
||||
|
||||
if match:
|
||||
current = int(match.group(1))
|
||||
total = int(match.group(2))
|
||||
progress = int((current / total) * 100)
|
||||
task_item.update_progress(progress)
|
||||
else:
|
||||
imatrix_match = re.search(
|
||||
r"compute_imatrix: computing over (\d+) chunks with batch_size \d+",
|
||||
line,
|
||||
)
|
||||
if imatrix_match:
|
||||
imatrix_chunks = int(imatrix_match.group(1))
|
||||
elif imatrix_chunks is not None:
|
||||
if "save_imatrix: stored collected data" in line:
|
||||
save_match = re.search(r"collected data after (\d+) chunks", line)
|
||||
if save_match:
|
||||
saved_chunks = int(save_match.group(1))
|
||||
progress = int((saved_chunks / self.imatrix_chunks) * 100)
|
||||
task_item.update_progress(progress)
|
||||
|
||||
def terminate(self) -> None:
|
||||
# Terminate the subprocess if it's still running
|
||||
if self.process:
|
||||
os.kill(self.process.pid, signal.SIGTERM)
|
||||
|
|
|
@ -1,14 +1,52 @@
|
|||
from typing import List
|
||||
|
||||
from PySide6.QtCore import *
|
||||
from PySide6.QtGui import QAction
|
||||
from PySide6.QtWidgets import *
|
||||
|
||||
from Localizations import (
|
||||
DELETING_TASK,
|
||||
CANCELLING_TASK,
|
||||
CONFIRM_DELETION_TITLE,
|
||||
CONFIRM_DELETION,
|
||||
SHOWING_TASK_CONTEXT_MENU,
|
||||
CANCELED,
|
||||
CANCEL,
|
||||
PROPERTIES,
|
||||
COMPLETED,
|
||||
SHOWING_PROPERTIES_FOR_TASK,
|
||||
DELETE,
|
||||
RESTART,
|
||||
IN_PROGRESS,
|
||||
ERROR,
|
||||
RESTARTING_TASK,
|
||||
)
|
||||
from ModelInfoDialog import ModelInfoDialog
|
||||
from QuantizationThread import QuantizationThread
|
||||
from Logger import Logger
|
||||
from error_handling import handle_error
|
||||
|
||||
|
||||
class TaskListItem(QWidget):
|
||||
def __init__(self, task_name, log_file, show_progress_bar=True, parent=None):
|
||||
def __init__(
|
||||
self,
|
||||
task_name,
|
||||
log_file,
|
||||
show_progress_bar=True,
|
||||
parent=None,
|
||||
show_properties=False,
|
||||
logger=Logger,
|
||||
quant_threads=List[QuantizationThread],
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
self.quant_threads = quant_threads
|
||||
self.task_name = task_name
|
||||
self.log_file = log_file
|
||||
self.logger = logger
|
||||
self.show_properties = show_properties
|
||||
self.status = "Pending"
|
||||
layout = QHBoxLayout(self)
|
||||
|
||||
self.task_label = QLabel(task_name)
|
||||
self.progress_bar = QProgressBar()
|
||||
self.progress_bar.setRange(0, 100)
|
||||
|
@ -28,34 +66,136 @@ def __init__(self, task_name, log_file, show_progress_bar=True, parent=None):
|
|||
self.progress_timer.timeout.connect(self.update_progress)
|
||||
self.progress_value = 0
|
||||
|
||||
def update_status(self, status):
|
||||
def show_task_context_menu(self, position) -> None:
|
||||
self.logger.debug(SHOWING_TASK_CONTEXT_MENU)
|
||||
item = self.task_list.itemAt(position)
|
||||
if item is not None:
|
||||
context_menu = QMenu(self)
|
||||
|
||||
properties_action = QAction(PROPERTIES, self)
|
||||
properties_action.triggered.connect(lambda: self.show_task_properties(item))
|
||||
context_menu.addAction(properties_action)
|
||||
|
||||
task_item = self.task_list.itemWidget(item)
|
||||
if task_item.status != COMPLETED:
|
||||
cancel_action = QAction(CANCEL, self)
|
||||
cancel_action.triggered.connect(lambda: self.cancel_task(item))
|
||||
context_menu.addAction(cancel_action)
|
||||
|
||||
if task_item.status == CANCELED:
|
||||
restart_action = QAction(RESTART, self)
|
||||
restart_action.triggered.connect(lambda: self.restart_task(task_item))
|
||||
context_menu.addAction(restart_action)
|
||||
|
||||
delete_action = QAction(DELETE, self)
|
||||
delete_action.triggered.connect(lambda: self.delete_task(item))
|
||||
context_menu.addAction(delete_action)
|
||||
|
||||
context_menu.exec(self.task_list.viewport().mapToGlobal(position))
|
||||
|
||||
def show_task_properties(self, item) -> None:
|
||||
self.logger.debug(SHOWING_PROPERTIES_FOR_TASK.format(item.text()))
|
||||
for thread in self.quant_threads:
|
||||
model_info_dialog = ModelInfoDialog(thread.model_info, self)
|
||||
model_info_dialog.exec()
|
||||
break
|
||||
|
||||
def cancel_task(self, item) -> None:
|
||||
# TODO: fix possibly buggy signal behavior
|
||||
task_item = self.task_list.itemWidget(item)
|
||||
if task_item:
|
||||
task_name = task_item.task_name # Store the name before any changes
|
||||
self.logger.info(CANCELLING_TASK.format(task_name))
|
||||
|
||||
# Find the thread and disconnect signals before terminating
|
||||
for thread in self.quant_threads:
|
||||
if thread.log_file == task_item.log_file:
|
||||
# Disconnect all signals from this thread first
|
||||
try:
|
||||
thread.error_signal.disconnect() # Disconnect all error signal connections
|
||||
thread.output_signal.disconnect() # Disconnect all output signal connections
|
||||
except TypeError:
|
||||
# No connections to disconnect
|
||||
pass
|
||||
|
||||
# Now terminate the thread
|
||||
thread.terminate()
|
||||
self.quant_threads.remove(thread)
|
||||
break
|
||||
|
||||
def delete_task(self, item) -> None:
|
||||
task_item = self.task_list.itemWidget(item)
|
||||
if not task_item:
|
||||
return
|
||||
|
||||
task_name = task_item.task_name # Store task_name before deletion
|
||||
self.logger.info(DELETING_TASK.format(task_name))
|
||||
|
||||
reply = QMessageBox.question(
|
||||
self,
|
||||
CONFIRM_DELETION_TITLE,
|
||||
CONFIRM_DELETION,
|
||||
QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No,
|
||||
QMessageBox.StandardButton.No,
|
||||
)
|
||||
|
||||
if reply == QMessageBox.StandardButton.Yes:
|
||||
# Cancel the task first (which disconnects signals)
|
||||
self.cancel_task(item)
|
||||
|
||||
# Now remove from list and delete
|
||||
row = self.task_list.row(item)
|
||||
self.task_list.takeItem(row)
|
||||
|
||||
# Delete the widget after removing from list
|
||||
task_item.deleteLater()
|
||||
|
||||
def update_status(self, status) -> None:
|
||||
self.status = status
|
||||
self.status_label.setText(status)
|
||||
if status == "In Progress":
|
||||
if status == IN_PROGRESS:
|
||||
# Only start timer if showing percentage progress
|
||||
if self.progress_bar.isVisible():
|
||||
self.progress_bar.setRange(0, 100)
|
||||
self.progress_timer.start(100)
|
||||
elif status == "Completed":
|
||||
elif status == COMPLETED:
|
||||
self.progress_timer.stop()
|
||||
self.progress_bar.setValue(100)
|
||||
elif status == "Canceled":
|
||||
elif status == CANCELED:
|
||||
self.progress_timer.stop()
|
||||
self.progress_bar.setValue(0)
|
||||
|
||||
def set_error(self):
|
||||
self.status = "Error"
|
||||
self.status_label.setText("Error")
|
||||
def set_error(self) -> None:
|
||||
self.status = ERROR
|
||||
self.status_label.setText(ERROR)
|
||||
self.status_label.setStyleSheet("color: red;")
|
||||
self.progress_bar.setRange(0, 100)
|
||||
self.progress_timer.stop()
|
||||
|
||||
def update_progress(self, value=None):
|
||||
def update_progress(self, value=None) -> None:
|
||||
if value is not None:
|
||||
# Update progress bar with specific value
|
||||
self.progress_value = value
|
||||
self.progress_bar.setValue(self.progress_value)
|
||||
else:
|
||||
# Increment progress bar for indeterminate progress
|
||||
self.progress_value = (self.progress_value + 1) % 101
|
||||
self.progress_bar.setValue(self.progress_value)
|
||||
return
|
||||
|
||||
def restart_task(self, task_item) -> None:
|
||||
self.logger.info(RESTARTING_TASK.format(task_item.task_name))
|
||||
for thread in self.quant_threads:
|
||||
if thread.log_file == task_item.log_file:
|
||||
new_thread = QuantizationThread(
|
||||
thread.command, thread.cwd, thread.log_file
|
||||
)
|
||||
self.quant_threads.append(new_thread)
|
||||
new_thread.status_signal.connect(task_item.update_status)
|
||||
new_thread.finished_signal.connect(
|
||||
lambda: self.task_finished(new_thread, task_item)
|
||||
)
|
||||
new_thread.error_signal.connect(
|
||||
lambda err: handle_error(self.logger, err, task_item)
|
||||
)
|
||||
new_thread.model_info_signal.connect(self.update_model_info)
|
||||
new_thread.start()
|
||||
task_item.update_status(IN_PROGRESS)
|
||||
break
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,19 +1,17 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, Sequence
|
||||
from typing import BinaryIO
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
if "NO_LOCAL_GGUF" not in os.environ:
|
||||
sys.path.insert(1, str(Path(__file__).parent / "gguf-py" / "gguf"))
|
||||
import gguf
|
||||
from gguf.constants import *
|
||||
from gguf.tensor_mapping import *
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("lora-to-gguf")
|
||||
|
@ -51,25 +49,20 @@ def write_tensor_header(
|
|||
fout.seek((fout.tell() + 31) & -32)
|
||||
|
||||
|
||||
def pyinstaller_include():
|
||||
# PyInstaller import
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
logger.info(f"Usage: python {sys.argv[0]} <path> [arch]")
|
||||
logger.info(f"Usage: python {sys.argv[0]} <path> <output_path> [arch]")
|
||||
logger.info(
|
||||
"Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
|
||||
)
|
||||
logger.info(
|
||||
f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)"
|
||||
f"Arch must be one of {list(MODEL_ARCH_NAMES.values())} (default: llama)"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
input_json = os.path.join(sys.argv[1], "adapter_config.json")
|
||||
input_model = os.path.join(sys.argv[1], "adapter_model.bin")
|
||||
output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
|
||||
output_path = sys.argv[2]
|
||||
|
||||
if os.path.exists(input_model):
|
||||
model = torch.load(input_model, map_location="cpu")
|
||||
|
@ -80,16 +73,16 @@ def pyinstaller_include():
|
|||
|
||||
model = load_file(input_model, device="cpu")
|
||||
|
||||
arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
|
||||
arch_name = sys.argv[3] if len(sys.argv) == 4 else "llama"
|
||||
|
||||
if arch_name not in gguf.MODEL_ARCH_NAMES.values():
|
||||
if arch_name not in MODEL_ARCH_NAMES.values():
|
||||
logger.error(f"Error: unsupported architecture {arch_name}")
|
||||
sys.exit(1)
|
||||
|
||||
arch = list(gguf.MODEL_ARCH_NAMES.keys())[
|
||||
list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)
|
||||
arch = list(MODEL_ARCH_NAMES.keys())[
|
||||
list(MODEL_ARCH_NAMES.values()).index(arch_name)
|
||||
]
|
||||
name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone
|
||||
name_map = TensorNameMap(arch, 500)
|
||||
|
||||
with open(input_json, "r") as f:
|
||||
params = json.load(f)
|
||||
|
|
|
@ -18,18 +18,16 @@
|
|||
SupportsIndex,
|
||||
cast,
|
||||
)
|
||||
from transformers import AutoConfig
|
||||
|
||||
import torch
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from torch import Tensor
|
||||
|
||||
if "NO_LOCAL_GGUF" not in os.environ:
|
||||
sys.path.insert(1, str(Path(__file__).parent / "gguf-py"))
|
||||
import gguf
|
||||
|
||||
# reuse model definitions from convert_hf_to_gguf.py
|
||||
from convert_hf_to_gguf import LazyTorchTensor, Model
|
||||
from convert_hf_to_gguf import LazyTorchTensor, ModelBase
|
||||
|
||||
logger = logging.getLogger("lora-to-gguf")
|
||||
|
||||
|
@ -242,17 +240,15 @@ def get_base_tensor_name(lora_tensor_name: str) -> str:
|
|||
base_name = lora_tensor_name.replace("base_model.model.", "")
|
||||
base_name = base_name.replace(".lora_A.weight", ".weight")
|
||||
base_name = base_name.replace(".lora_B.weight", ".weight")
|
||||
# models produced by mergekit-extract-lora have token embeddings in the adapter
|
||||
base_name = base_name.replace(".lora_embedding_A", ".weight")
|
||||
base_name = base_name.replace(".lora_embedding_B", ".weight")
|
||||
return base_name
|
||||
|
||||
|
||||
def pyinstaller_include():
|
||||
# PyInstaller import
|
||||
pass
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Convert a huggingface PEFT LoRA adapter to a GGML compatible file"
|
||||
description="Convert a Hugging Face PEFT LoRA adapter to a GGUF file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--outfile",
|
||||
|
@ -289,18 +285,28 @@ def parse_args() -> argparse.Namespace:
|
|||
parser.add_argument(
|
||||
"--base",
|
||||
type=Path,
|
||||
required=True,
|
||||
help="directory containing base model file",
|
||||
help="directory containing Hugging Face model config files (config.json, tokenizer.json) for the base model that the adapter is based on - only config is needed, actual model weights are not required. If base model is unspecified, it will be loaded from Hugging Face hub based on the adapter config",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base-model-id",
|
||||
type=str,
|
||||
help="the model ID of the base model, if it is not available locally or in the adapter config. If specified, it will ignore --base and load the base model config from the Hugging Face hub (Example: 'meta-llama/Llama-3.2-1B-Instruct')",
|
||||
)
|
||||
parser.add_argument(
|
||||
"lora_path",
|
||||
type=Path,
|
||||
help="directory containing LoRA adapter file",
|
||||
help="directory containing Hugging Face PEFT LoRA config (adapter_model.json) and weights (adapter_model.safetensors or adapter_model.bin)",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def load_hparams_from_hf(hf_model_id: str) -> dict[str, Any]:
|
||||
# normally, adapter does not come with base model config, we need to load it from AutoConfig
|
||||
config = AutoConfig.from_pretrained(hf_model_id)
|
||||
return config.to_dict()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
|
||||
|
@ -315,8 +321,9 @@ def parse_args() -> argparse.Namespace:
|
|||
|
||||
ftype = ftype_map[args.outtype]
|
||||
|
||||
dir_base_model: Path = args.base
|
||||
dir_base_model: Path | None = args.base
|
||||
dir_lora: Path = args.lora_path
|
||||
base_model_id: str | None = args.base_model_id
|
||||
lora_config = dir_lora / "adapter_config.json"
|
||||
input_model = dir_lora / "adapter_model.safetensors"
|
||||
|
||||
|
@ -335,12 +342,41 @@ def parse_args() -> argparse.Namespace:
|
|||
input_model = os.path.join(dir_lora, "adapter_model.bin")
|
||||
lora_model = torch.load(input_model, map_location="cpu", weights_only=True)
|
||||
|
||||
# load LoRA config
|
||||
with open(lora_config, "r") as f:
|
||||
lparams: dict[str, Any] = json.load(f)
|
||||
|
||||
# load base model
|
||||
logger.info(f"Loading base model: {dir_base_model.name}")
|
||||
hparams = Model.load_hparams(dir_base_model)
|
||||
if base_model_id is not None:
|
||||
logger.info(f"Loading base model from Hugging Face: {base_model_id}")
|
||||
hparams = load_hparams_from_hf(base_model_id)
|
||||
elif dir_base_model is None:
|
||||
if "base_model_name_or_path" in lparams:
|
||||
model_id = lparams["base_model_name_or_path"]
|
||||
logger.info(f"Loading base model from Hugging Face: {model_id}")
|
||||
try:
|
||||
hparams = load_hparams_from_hf(model_id)
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to load base model config: {e}")
|
||||
logger.error(
|
||||
"Please try downloading the base model and add its path to --base"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logger.error(
|
||||
"'base_model_name_or_path' is not found in adapter_config.json"
|
||||
)
|
||||
logger.error(
|
||||
"Base model config is required. Please download the base model and add its path to --base"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logger.info(f"Loading base model: {dir_base_model.name}")
|
||||
hparams = ModelBase.load_hparams(dir_base_model)
|
||||
|
||||
with torch.inference_mode():
|
||||
try:
|
||||
model_class = Model.from_model_architecture(hparams["architectures"][0])
|
||||
model_class = ModelBase.from_model_architecture(hparams["architectures"][0])
|
||||
except NotImplementedError:
|
||||
logger.error(f"Model {hparams['architectures'][0]} is not supported")
|
||||
sys.exit(1)
|
||||
|
@ -359,6 +395,9 @@ def __init__(
|
|||
self.dir_model_card = dir_lora_model
|
||||
self.lora_alpha = float(lora_alpha)
|
||||
|
||||
def set_vocab(self):
|
||||
pass
|
||||
|
||||
def set_type(self):
|
||||
self.gguf_writer.add_type(gguf.GGUFType.ADAPTER)
|
||||
self.gguf_writer.add_string(gguf.Keys.Adapter.TYPE, "lora")
|
||||
|
@ -367,7 +406,10 @@ def set_gguf_parameters(self):
|
|||
self.gguf_writer.add_float32(
|
||||
gguf.Keys.Adapter.LORA_ALPHA, self.lora_alpha
|
||||
)
|
||||
super().set_gguf_parameters()
|
||||
|
||||
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
|
||||
# Never add extra tensors (e.g. rope_freqs) for LoRA adapters
|
||||
return ()
|
||||
|
||||
def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
|
||||
tensor_map: dict[str, PartialLoraTensor] = {}
|
||||
|
@ -376,14 +418,26 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
|
|||
if self.lazy:
|
||||
tensor = LazyTorchTensor.from_eager(tensor)
|
||||
base_name = get_base_tensor_name(name)
|
||||
is_lora_a = ".lora_A.weight" in name
|
||||
is_lora_b = ".lora_B.weight" in name
|
||||
# note: mergekit-extract-lora also adds token embeddings to the adapter
|
||||
is_lora_a = ".lora_A.weight" in name or ".lora_embedding_A" in name
|
||||
is_lora_b = ".lora_B.weight" in name or ".lora_embedding_B" in name
|
||||
if not is_lora_a and not is_lora_b:
|
||||
if ".base_layer.weight" in name:
|
||||
continue
|
||||
# mergekit-extract-lora add these layernorm to the adapter, we need to keep them
|
||||
if "_layernorm" in name or ".norm" in name:
|
||||
yield (base_name, tensor)
|
||||
continue
|
||||
logger.error(
|
||||
f"Unexpected name '{name}': Not a lora_A or lora_B tensor"
|
||||
)
|
||||
if ".embed_tokens.weight" in name or ".lm_head.weight" in name:
|
||||
logger.error(
|
||||
"Embeddings is present in the adapter. This can be due to new tokens added during fine tuning"
|
||||
)
|
||||
logger.error(
|
||||
"Please refer to https://github.com/ggml-org/llama.cpp/pull/9948"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if base_name in tensor_map:
|
||||
|
@ -408,17 +462,34 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
|
|||
def modify_tensors(
|
||||
self, data_torch: Tensor, name: str, bid: int | None
|
||||
) -> Iterable[tuple[str, Tensor]]:
|
||||
dest = super().modify_tensors(data_torch, name, bid)
|
||||
dest = list(super().modify_tensors(data_torch, name, bid))
|
||||
# some archs may have the same tensor for lm_head and output (tie word embeddings)
|
||||
# in this case, adapters targeting lm_head will fail when using llama-export-lora
|
||||
# therefore, we ignore them for now
|
||||
# see: https://github.com/ggml-org/llama.cpp/issues/9065
|
||||
if name == "lm_head.weight" and len(dest) == 0:
|
||||
raise ValueError(
|
||||
"lm_head is present in adapter, but is ignored in base model"
|
||||
)
|
||||
for dest_name, dest_data in dest:
|
||||
# mergekit-extract-lora add these layernorm to the adapter
|
||||
if "_norm" in dest_name:
|
||||
assert dest_data.dim() == 1
|
||||
yield (dest_name, dest_data)
|
||||
continue
|
||||
|
||||
# otherwise, we must get the lora_A and lora_B tensors
|
||||
assert isinstance(dest_data, LoraTorchTensor)
|
||||
lora_a, lora_b = dest_data.get_lora_A_B()
|
||||
|
||||
# note: mergekit-extract-lora flip and transpose A and B
|
||||
# here we only need to transpose token_embd.lora_a, see llm_build_inp_embd()
|
||||
if "token_embd.weight" in dest_name:
|
||||
lora_a = lora_a.T
|
||||
|
||||
yield (dest_name + ".lora_a", lora_a)
|
||||
yield (dest_name + ".lora_b", lora_b)
|
||||
|
||||
with open(lora_config, "r") as f:
|
||||
lparams: dict[str, Any] = json.load(f)
|
||||
|
||||
alpha: float = lparams["lora_alpha"]
|
||||
|
||||
model_instance = LoraModel(
|
||||
|
@ -431,6 +502,7 @@ def modify_tensors(
|
|||
dry_run=args.dry_run,
|
||||
dir_lora_model=dir_lora,
|
||||
lora_alpha=alpha,
|
||||
hparams=hparams,
|
||||
)
|
||||
|
||||
logger.info("Exporting model...")
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from safetensors.torch import save_file
|
||||
|
||||
import gguf
|
||||
|
||||
|
||||
def dequantize_tensor(tensor):
|
||||
if tensor.tensor_type in [
|
||||
gguf.GGMLQuantizationType.F32,
|
||||
gguf.GGMLQuantizationType.F16,
|
||||
gguf.GGMLQuantizationType.BF16,
|
||||
]:
|
||||
return np.array(tensor.data)
|
||||
else:
|
||||
return tensor.data.astype(np.float32)
|
||||
|
||||
|
||||
def gguf_to_safetensors(gguf_path, safetensors_path, metadata_path=None):
|
||||
try:
|
||||
reader = gguf.GGUFReader(gguf_path)
|
||||
except Exception as e:
|
||||
print(f"Error reading GGUF file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
tensors = {}
|
||||
metadata = {}
|
||||
|
||||
for tensor in reader.tensors:
|
||||
try:
|
||||
dequantized_data = dequantize_tensor(tensor)
|
||||
tensors[tensor.name] = torch.from_numpy(
|
||||
dequantized_data.reshape(tuple(reversed(tensor.shape)))
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error processing tensor {tensor.name}: {e}", file=sys.stderr)
|
||||
continue
|
||||
|
||||
for field_name, field in reader.fields.items():
|
||||
if field.data:
|
||||
metadata[field_name] = field.parts[field.data[0]].tolist()
|
||||
|
||||
try:
|
||||
save_file(tensors, safetensors_path)
|
||||
except Exception as e:
|
||||
print(f"Error saving SafeTensors file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
decoded_metadata = {}
|
||||
for key, value in metadata.items():
|
||||
if isinstance(value, list) and all(isinstance(item, int) for item in value):
|
||||
decoded_value = ""
|
||||
for item in value:
|
||||
if 48 <= item <= 57:
|
||||
decoded_value += str(item - 48)
|
||||
elif 32 <= item <= 126:
|
||||
decoded_value += chr(item)
|
||||
else:
|
||||
decoded_value += str(item)
|
||||
decoded_metadata[key] = decoded_value
|
||||
else:
|
||||
decoded_metadata[key] = value
|
||||
|
||||
if metadata_path:
|
||||
try:
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(decoded_metadata, f, indent=4)
|
||||
except Exception as e:
|
||||
print(f"Error saving metadata file: {e}", file=sys.stderr)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Convert GGUF to SafeTensors format")
|
||||
parser.add_argument("gguf_path", type=str, help="Path to the input GGUF file")
|
||||
parser.add_argument(
|
||||
"safetensors_path", type=str, help="Path to save the SafeTensors file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--metadata_path",
|
||||
type=str,
|
||||
help="Optional path to save metadata as a JSON file",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
gguf_path = Path(args.gguf_path)
|
||||
safetensors_path = Path(args.safetensors_path)
|
||||
metadata_path = Path(args.metadata_path) if args.metadata_path else None
|
||||
|
||||
if not gguf_path.exists():
|
||||
print(f"Error: GGUF file '{gguf_path}' does not exist.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Converting {gguf_path} to {safetensors_path}")
|
||||
gguf_to_safetensors(gguf_path, safetensors_path, metadata_path)
|
||||
print("Conversion complete.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,13 +1,13 @@
|
|||
from PySide6.QtWidgets import QMessageBox
|
||||
from Localizations import *
|
||||
from Localizations import ERROR_MESSAGE, ERROR, TASK_ERROR
|
||||
|
||||
|
||||
def show_error(logger, message):
|
||||
logger.error(ERROR_MESSAGE.format(message))
|
||||
def show_error(logger, message) -> None:
|
||||
logger.error(message)
|
||||
QMessageBox.critical(None, ERROR, message)
|
||||
|
||||
|
||||
def handle_error(logger, error_message, task_item):
|
||||
def handle_error(logger, error_message, task_item) -> None:
|
||||
logger.error(TASK_ERROR.format(error_message))
|
||||
show_error(logger, error_message)
|
||||
task_item.update_status(ERROR)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,147 +0,0 @@
|
|||
from __future__ import annotations
|
||||
from typing import Callable, Sequence
|
||||
|
||||
from numpy.typing import DTypeLike
|
||||
|
||||
from .constants import GGML_QUANT_SIZES, GGMLQuantizationType
|
||||
from .lazy import LazyNumpyTensor
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def quant_shape_to_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
|
||||
block_size, type_size = GGML_QUANT_SIZES[quant_type]
|
||||
if shape[-1] % block_size != 0:
|
||||
raise ValueError(
|
||||
f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})"
|
||||
)
|
||||
return (*shape[:-1], shape[-1] // block_size * type_size)
|
||||
|
||||
|
||||
def quant_shape_from_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
|
||||
block_size, type_size = GGML_QUANT_SIZES[quant_type]
|
||||
if shape[-1] % type_size != 0:
|
||||
raise ValueError(
|
||||
f"Quantized tensor bytes per row ({shape[-1]}) is not a multiple of {quant_type.name} type size ({type_size})"
|
||||
)
|
||||
return (*shape[:-1], shape[-1] // type_size * block_size)
|
||||
|
||||
|
||||
# same as ggml_compute_fp32_to_bf16 in ggml-impl.h
|
||||
def __compute_fp32_to_bf16(n: np.ndarray) -> np.ndarray:
|
||||
n = n.astype(np.float32, copy=False).view(np.uint32)
|
||||
# force nan to quiet
|
||||
n = np.where(
|
||||
(n & 0x7FFFFFFF) > 0x7F800000,
|
||||
(n & np.uint32(0xFFFF0000)) | np.uint32(64 << 16),
|
||||
n,
|
||||
)
|
||||
# round to nearest even
|
||||
n = (np.uint64(n) + (0x7FFF + ((n >> 16) & 1))) >> 16
|
||||
return n.astype(np.uint16)
|
||||
|
||||
|
||||
# This is faster than np.vectorize and np.apply_along_axis because it works on more than one row at a time
|
||||
def __apply_over_grouped_rows(
|
||||
func: Callable[[np.ndarray], np.ndarray],
|
||||
arr: np.ndarray,
|
||||
otype: DTypeLike,
|
||||
oshape: tuple[int, ...],
|
||||
) -> np.ndarray:
|
||||
rows = arr.reshape((-1, arr.shape[-1]))
|
||||
osize = 1
|
||||
for dim in oshape:
|
||||
osize *= dim
|
||||
out = np.empty(shape=osize, dtype=otype)
|
||||
# compute over groups of 16 rows (arbitrary, but seems good for performance)
|
||||
n_groups = (rows.shape[0] // 16) or 1
|
||||
np.concatenate(
|
||||
[func(group).ravel() for group in np.array_split(rows, n_groups)],
|
||||
axis=0,
|
||||
out=out,
|
||||
)
|
||||
return out.reshape(oshape)
|
||||
|
||||
|
||||
def __quantize_bf16_array(n: np.ndarray) -> np.ndarray:
|
||||
return __apply_over_grouped_rows(
|
||||
__compute_fp32_to_bf16, arr=n, otype=np.uint16, oshape=n.shape
|
||||
)
|
||||
|
||||
|
||||
__quantize_bf16_lazy = LazyNumpyTensor._wrap_fn(
|
||||
__quantize_bf16_array, meta_noop=np.uint16
|
||||
)
|
||||
|
||||
|
||||
def quantize_bf16(n: np.ndarray):
|
||||
if type(n) is LazyNumpyTensor:
|
||||
return __quantize_bf16_lazy(n)
|
||||
else:
|
||||
return __quantize_bf16_array(n)
|
||||
|
||||
|
||||
__q8_block_size, __q8_type_size = GGML_QUANT_SIZES[GGMLQuantizationType.Q8_0]
|
||||
|
||||
|
||||
def can_quantize_to_q8_0(n: np.ndarray) -> bool:
|
||||
return n.shape[-1] % __q8_block_size == 0
|
||||
|
||||
|
||||
# round away from zero
|
||||
# ref: https://stackoverflow.com/a/59143326/22827863
|
||||
def np_roundf(n: np.ndarray) -> np.ndarray:
|
||||
a = abs(n)
|
||||
floored = np.floor(a)
|
||||
b = floored + np.floor(2 * (a - floored))
|
||||
return np.sign(n) * b
|
||||
|
||||
|
||||
def __quantize_q8_0_shape_change(s: tuple[int, ...]) -> tuple[int, ...]:
|
||||
return (*s[:-1], s[-1] // __q8_block_size * __q8_type_size)
|
||||
|
||||
|
||||
# Implementation of Q8_0 with bit-exact same results as reference implementation in ggml-quants.c
|
||||
def __quantize_q8_0_rows(n: np.ndarray) -> np.ndarray:
|
||||
shape = n.shape
|
||||
assert shape[-1] % __q8_block_size == 0
|
||||
|
||||
n_blocks = n.size // __q8_block_size
|
||||
|
||||
blocks = n.reshape((n_blocks, __q8_block_size)).astype(np.float32, copy=False)
|
||||
|
||||
d = abs(blocks).max(axis=1, keepdims=True) / 127
|
||||
with np.errstate(divide="ignore"):
|
||||
id = np.where(d == 0, 0, 1 / d)
|
||||
qs = np_roundf(blocks * id)
|
||||
|
||||
# (n_blocks, 2)
|
||||
d = d.astype(np.float16).view(np.uint8)
|
||||
# (n_blocks, block_size)
|
||||
qs = qs.astype(np.int8).view(np.uint8)
|
||||
|
||||
assert d.shape[1] + qs.shape[1] == __q8_type_size
|
||||
|
||||
return np.concatenate([d, qs], axis=1).reshape(__quantize_q8_0_shape_change(shape))
|
||||
|
||||
|
||||
def __quantize_q8_0_array(n: np.ndarray) -> np.ndarray:
|
||||
return __apply_over_grouped_rows(
|
||||
__quantize_q8_0_rows,
|
||||
arr=n,
|
||||
otype=np.uint8,
|
||||
oshape=__quantize_q8_0_shape_change(n.shape),
|
||||
)
|
||||
|
||||
|
||||
__quantize_q8_0_lazy = LazyNumpyTensor._wrap_fn(
|
||||
__quantize_q8_0_array,
|
||||
meta_noop=(np.uint8, __quantize_q8_0_shape_change),
|
||||
)
|
||||
|
||||
|
||||
def quantize_q8_0(data: np.ndarray):
|
||||
if type(data) is LazyNumpyTensor:
|
||||
return __quantize_q8_0_lazy(data)
|
||||
else:
|
||||
return __quantize_q8_0_array(data)
|
|
@ -1,633 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import Sequence
|
||||
|
||||
from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
|
||||
|
||||
class TensorNameMap:
|
||||
mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
|
||||
|
||||
MODEL_TENSOR.TOKEN_EMBD: (
|
||||
"gpt_neox.embed_in",
|
||||
"transformer.wte",
|
||||
"transformer.word_embeddings",
|
||||
"word_embeddings",
|
||||
"model.embed_tokens",
|
||||
"tok_embeddings",
|
||||
"embeddings.word_embeddings",
|
||||
"language_model.embedding.word_embeddings",
|
||||
"wte",
|
||||
"transformer.embd.wte",
|
||||
"model.tok_embeddings",
|
||||
"model.embedding",
|
||||
"backbone.embedding",
|
||||
"backbone.embeddings",
|
||||
"transformer.in_out_embed",
|
||||
"embedding.word_embeddings",
|
||||
"transformer.token_embeddings",
|
||||
"shared",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.TOKEN_TYPES: (
|
||||
"embeddings.token_type_embeddings",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.TOKEN_EMBD_NORM: (
|
||||
"word_embeddings_layernorm",
|
||||
"embeddings.LayerNorm",
|
||||
"emb_ln",
|
||||
"transformer.norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.POS_EMBD: (
|
||||
"transformer.wpe",
|
||||
"embeddings.position_embeddings",
|
||||
"wpe",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.OUTPUT: (
|
||||
"embed_out",
|
||||
"lm_head",
|
||||
"output",
|
||||
"word_embeddings_for_head",
|
||||
"lm_head.linear",
|
||||
"output_layer",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.OUTPUT_NORM: (
|
||||
"gpt_neox.final_layer_norm",
|
||||
"transformer.ln_f",
|
||||
"model.norm",
|
||||
"norm",
|
||||
"transformer.norm_f",
|
||||
"ln_f",
|
||||
"language_model.encoder.final_layernorm",
|
||||
"model.final_layernorm",
|
||||
"lm_head.ln",
|
||||
"model.norm_f",
|
||||
"backbone.norm_f",
|
||||
"transformer.rms_norm",
|
||||
"encoder.final_layernorm",
|
||||
"transformer.norm",
|
||||
"model.norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ROPE_FREQS: (
|
||||
"rope.freqs",
|
||||
"rotary_pos_emb.inv_freq",
|
||||
),
|
||||
}
|
||||
|
||||
block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
|
||||
|
||||
MODEL_TENSOR.ATTN_NORM: (
|
||||
"gpt_neox.layers.{bid}.input_layernorm",
|
||||
"transformer.h.{bid}.ln_1",
|
||||
"transformer.blocks.{bid}.norm_1",
|
||||
"transformer.h.{bid}.input_layernorm",
|
||||
"h.{bid}.input_layernorm",
|
||||
"transformer.h.{bid}.ln_mlp",
|
||||
"model.layers.{bid}.input_layernorm",
|
||||
"layers.{bid}.attention_norm",
|
||||
"language_model.encoder.layers.{bid}.input_layernorm",
|
||||
"model.layers.{bid}.ln1",
|
||||
"h.{bid}.ln_1",
|
||||
"transformer.h.{bid}.ln",
|
||||
"model.layers.layers.{bid}.norm",
|
||||
"model.layers.{bid}.attention_norm",
|
||||
"model.layers.{bid}.norm",
|
||||
"backbone.layers.{bid}.norm",
|
||||
"transformer.decoder_layer.{bid}.rms_norm",
|
||||
"transformer.blocks.{bid}.norm_attn_norm.norm_1",
|
||||
"encoder.layers.{bid}.input_layernorm",
|
||||
"transformer.layers.{bid}.attn_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_NORM_2: (
|
||||
"transformer.h.{bid}.ln_attn",
|
||||
"encoder.layer.{bid}.layer_norm_1",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_QKV: (
|
||||
"gpt_neox.layers.{bid}.attention.query_key_value",
|
||||
"transformer.h.{bid}.attn.c_attn",
|
||||
"transformer.blocks.{bid}.attn.Wqkv",
|
||||
"transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv",
|
||||
"transformer.h.{bid}.self_attention.query_key_value",
|
||||
"h.{bid}.self_attention.query_key_value",
|
||||
"language_model.encoder.layers.{bid}.self_attention.query_key_value",
|
||||
"model.layers.{bid}.self_attn.query_key_value",
|
||||
"h.{bid}.attn.c_attn",
|
||||
"transformer.h.{bid}.mixer.Wqkv",
|
||||
"encoder.layers.{bid}.attn.Wqkv",
|
||||
"model.layers.{bid}.self_attn.qkv_proj",
|
||||
"encoder.layers.{bid}.self_attention.query_key_value",
|
||||
"transformer.layers.{bid}.attn.qkv_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q: (
|
||||
"model.layers.{bid}.self_attn.q_proj",
|
||||
"layers.{bid}.attention.wq",
|
||||
"encoder.layer.{bid}.attention.self.query",
|
||||
"transformer.h.{bid}.attn.q_proj",
|
||||
"model.layers.layers.{bid}.self_attn.q_proj",
|
||||
"model.layers.{bid}.attention.wq",
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.query",
|
||||
"transformer.h.{bid}.attn.attention.q_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_K: (
|
||||
"model.layers.{bid}.self_attn.k_proj",
|
||||
"layers.{bid}.attention.wk",
|
||||
"encoder.layer.{bid}.attention.self.key",
|
||||
"transformer.h.{bid}.attn.k_proj",
|
||||
"transformer.h.{bid}.attn.k",
|
||||
"model.layers.layers.{bid}.self_attn.k_proj",
|
||||
"model.layers.{bid}.attention.wk",
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.key",
|
||||
"transformer.h.{bid}.attn.attention.k_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_V: (
|
||||
"model.layers.{bid}.self_attn.v_proj",
|
||||
"layers.{bid}.attention.wv",
|
||||
"encoder.layer.{bid}.attention.self.value",
|
||||
"transformer.h.{bid}.attn.v_proj",
|
||||
"transformer.h.{bid}.attn.v",
|
||||
"model.layers.layers.{bid}.self_attn.v_proj",
|
||||
"model.layers.{bid}.attention.wv",
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.value",
|
||||
"transformer.h.{bid}.attn.attention.v_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_OUT: (
|
||||
"gpt_neox.layers.{bid}.attention.dense",
|
||||
"transformer.h.{bid}.attn.c_proj",
|
||||
"transformer.blocks.{bid}.attn.out_proj",
|
||||
"transformer.h.{bid}.self_attention.dense",
|
||||
"h.{bid}.self_attention.dense",
|
||||
"model.layers.{bid}.self_attn.o_proj",
|
||||
"layers.{bid}.attention.wo",
|
||||
"encoder.layer.{bid}.attention.output.dense",
|
||||
"transformer.h.{bid}.attn.out_proj",
|
||||
"language_model.encoder.layers.{bid}.self_attention.dense",
|
||||
"model.layers.{bid}.self_attn.dense",
|
||||
"h.{bid}.attn.c_proj",
|
||||
"transformer.h.{bid}.mixer.out_proj",
|
||||
"model.layers.layers.{bid}.self_attn.o_proj",
|
||||
"model.layers.{bid}.attention.wo",
|
||||
"encoder.layers.{bid}.attn.out_proj",
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.linear",
|
||||
"transformer.blocks.{bid}.norm_attn_norm.attn.out_proj",
|
||||
"encoder.layers.{bid}.self_attention.dense",
|
||||
"transformer.layers.{bid}.attn.out_proj",
|
||||
"transformer.h.{bid}.attn.attention.out_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_OUT_NORM: (
|
||||
"encoder.layer.{bid}.attention.output.LayerNorm",
|
||||
"encoder.layers.{bid}.norm1",
|
||||
"transformer.decoder_layer.{bid}.rms_norm_1",
|
||||
"transformer.blocks.{bid}.norm_attn_norm.norm_2",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_POST_NORM: (
|
||||
"model.layers.{bid}.post_attention_layernorm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD: (
|
||||
"model.layers.{bid}.self_attn.rotary_emb.inv_freq",
|
||||
"layers.{bid}.attention.inner_attention.rope.freqs",
|
||||
"model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq",
|
||||
"transformer.h.{bid}.attn.rotary_emb.inv_freq",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_NORM: (
|
||||
"gpt_neox.layers.{bid}.post_attention_layernorm",
|
||||
"transformer.h.{bid}.ln_2",
|
||||
"h.{bid}.post_attention_layernorm",
|
||||
"transformer.blocks.{bid}.norm_2",
|
||||
"model.layers.{bid}.post_attention_layernorm",
|
||||
"layers.{bid}.ffn_norm",
|
||||
"language_model.encoder.layers.{bid}.post_attention_layernorm",
|
||||
"model.layers.{bid}.ln2",
|
||||
"h.{bid}.ln_2",
|
||||
"model.layers.{bid}.ffn_norm",
|
||||
"transformer.decoder_layer.{bid}.rms_norm_2",
|
||||
"encoder.layers.{bid}.post_attention_layernorm",
|
||||
"transformer.layers.{bid}.ffn_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_PRE_NORM: (
|
||||
"model.layers.{bid}.pre_feedforward_layernorm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_POST_NORM: (
|
||||
"model.layers.{bid}.post_feedforward_layernorm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_INP: (
|
||||
"layers.{bid}.feed_forward.gate",
|
||||
"model.layers.{bid}.block_sparse_moe.gate",
|
||||
"model.layers.{bid}.mlp.gate",
|
||||
"transformer.decoder_layer.{bid}.router",
|
||||
"transformer.blocks.{bid}.ffn.router.layer",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert_gate",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_UP: (
|
||||
"gpt_neox.layers.{bid}.mlp.dense_h_to_4h",
|
||||
"transformer.h.{bid}.mlp.c_fc",
|
||||
"transformer.blocks.{bid}.ffn.up_proj",
|
||||
"transformer.h.{bid}.mlp.dense_h_to_4h",
|
||||
"h.{bid}.mlp.dense_h_to_4h",
|
||||
"model.layers.{bid}.mlp.up_proj",
|
||||
"layers.{bid}.feed_forward.w3",
|
||||
"encoder.layer.{bid}.intermediate.dense",
|
||||
"transformer.h.{bid}.mlp.fc_in",
|
||||
"transformer.h.{bid}.mlp.linear_3",
|
||||
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h",
|
||||
"model.layers.{bid}.mlp.dense_h_to_4h",
|
||||
"transformer.h.{bid}.mlp.w1",
|
||||
"h.{bid}.mlp.c_fc",
|
||||
"transformer.h.{bid}.mlp.fc1",
|
||||
"model.layers.{bid}.mlp.fc1",
|
||||
"model.layers.{bid}.mlp.gate_up_proj",
|
||||
"model.layers.layers.{bid}.mlp.up_proj",
|
||||
"model.layers.{bid}.feed_forward.w3",
|
||||
"encoder.layers.{bid}.mlp.fc11",
|
||||
"model.layers.{bid}.mlp.c_fc",
|
||||
"encoder.layer.{bid}.mlp.gated_layers_v",
|
||||
"model.layers.{bid}.residual_mlp.w3",
|
||||
"encoder.layers.{bid}.mlp.dense_h_to_4h",
|
||||
"transformer.h.{bid}.mlp.c_fc_1",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_UP_EXP: (
|
||||
"layers.{bid}.feed_forward.experts.w3",
|
||||
"transformer.decoder_layer.{bid}.moe.linear_v",
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.v1",
|
||||
"model.layers.{bid}.mlp.experts.up_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_UP_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.up_proj",
|
||||
"model.layers.{bid}.mlp.shared_experts.up_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_ACT: (
|
||||
"transformer.blocks.{bid}.ffn.act",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE: (
|
||||
"model.layers.{bid}.mlp.gate_proj",
|
||||
"layers.{bid}.feed_forward.w1",
|
||||
"transformer.h.{bid}.mlp.w2",
|
||||
"transformer.h.{bid}.mlp.c_fc2",
|
||||
"model.layers.layers.{bid}.mlp.gate_proj",
|
||||
"model.layers.{bid}.feed_forward.w1",
|
||||
"encoder.layers.{bid}.mlp.fc12",
|
||||
"encoder.layer.{bid}.mlp.gated_layers_w",
|
||||
"transformer.h.{bid}.mlp.linear_1",
|
||||
"model.layers.{bid}.residual_mlp.w1",
|
||||
"transformer.h.{bid}.mlp.c_fc_0",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||
"layers.{bid}.feed_forward.experts.w1",
|
||||
"transformer.decoder_layer.{bid}.moe.linear",
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.w1",
|
||||
"model.layers.{bid}.mlp.experts.gate_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.gate_proj",
|
||||
"model.layers.{bid}.mlp.shared_experts.gate_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_DOWN: (
|
||||
"gpt_neox.layers.{bid}.mlp.dense_4h_to_h",
|
||||
"transformer.h.{bid}.mlp.c_proj",
|
||||
"transformer.blocks.{bid}.ffn.down_proj",
|
||||
"transformer.h.{bid}.mlp.dense_4h_to_h",
|
||||
"h.{bid}.mlp.dense_4h_to_h",
|
||||
"model.layers.{bid}.mlp.down_proj",
|
||||
"layers.{bid}.feed_forward.w2",
|
||||
"encoder.layer.{bid}.output.dense",
|
||||
"transformer.h.{bid}.mlp.fc_out",
|
||||
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h",
|
||||
"model.layers.{bid}.mlp.dense_4h_to_h",
|
||||
"h.{bid}.mlp.c_proj",
|
||||
"transformer.h.{bid}.mlp.fc2",
|
||||
"model.layers.{bid}.mlp.fc2",
|
||||
"model.layers.layers.{bid}.mlp.down_proj",
|
||||
"model.layers.{bid}.feed_forward.w2",
|
||||
"encoder.layers.{bid}.mlp.fc2",
|
||||
"model.layers.{bid}.mlp.c_proj",
|
||||
"encoder.layer.{bid}.mlp.wo",
|
||||
"transformer.layers.{bid}.ffn.proj_2",
|
||||
"model.layers.{bid}.residual_mlp.w2",
|
||||
"encoder.layer.{bid}.mlp.down_layer",
|
||||
"encoder.layers.{bid}.mlp.dense_4h_to_h",
|
||||
"model.layers.h.{bid}.mlp.c_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||
"layers.{bid}.feed_forward.experts.w2",
|
||||
"transformer.decoder_layer.{bid}.moe.linear_1",
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.w2",
|
||||
"model.layers.{bid}.mlp.experts.down_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_DOWN_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.down_proj",
|
||||
"model.layers.{bid}.mlp.shared_experts.down_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_NORM: (
|
||||
"language_model.encoder.layers.{bid}.self_attention.q_layernorm",
|
||||
"model.layers.{bid}.self_attn.q_layernorm",
|
||||
"model.layers.{bid}.self_attn.q_norm",
|
||||
"transformer.blocks.{bid}.attn.q_ln",
|
||||
"encoder.layer.{bid}.attention.self.layer_norm_q",
|
||||
"transformer.layers.{bid}.attn.q_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_K_NORM: (
|
||||
"language_model.encoder.layers.{bid}.self_attention.k_layernorm",
|
||||
"model.layers.{bid}.self_attn.k_layernorm",
|
||||
"model.layers.{bid}.self_attn.k_norm",
|
||||
"transformer.blocks.{bid}.attn.k_ln",
|
||||
"encoder.layer.{bid}.attention.self.layer_norm_k",
|
||||
"transformer.layers.{bid}.attn.k_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ROPE_FREQS: (
|
||||
"language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.LAYER_OUT_NORM: (
|
||||
"encoder.layer.{bid}.output.LayerNorm",
|
||||
"encoder.layers.{bid}.norm2",
|
||||
"transformer.decoder_layer.{bid}.rms_norm_3",
|
||||
"encoder.layer.{bid}.mlp.layernorm",
|
||||
"encoder.layer.{bid}.layer_norm_2"
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_IN: (
|
||||
"model.layers.{bid}.in_proj",
|
||||
"backbone.layers.{bid}.mixer.in_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_CONV1D: (
|
||||
"model.layers.{bid}.conv1d",
|
||||
"backbone.layers.{bid}.mixer.conv1d",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_X: (
|
||||
"model.layers.{bid}.x_proj",
|
||||
"backbone.layers.{bid}.mixer.x_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_DT: (
|
||||
"model.layers.{bid}.dt_proj",
|
||||
"backbone.layers.{bid}.mixer.dt_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_A: (
|
||||
"model.layers.{bid}.A_log",
|
||||
"backbone.layers.{bid}.mixer.A_log",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_D: (
|
||||
"model.layers.{bid}.D",
|
||||
"backbone.layers.{bid}.mixer.D",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_OUT: (
|
||||
"model.layers.{bid}.out_proj",
|
||||
"backbone.layers.{bid}.mixer.out_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_A: (
|
||||
"model.layers.{bid}.self_attn.q_a_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_B: (
|
||||
"model.layers.{bid}.self_attn.q_b_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_KV_A_MQA: (
|
||||
"model.layers.{bid}.self_attn.kv_a_proj_with_mqa",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_KV_B: (
|
||||
"model.layers.{bid}.self_attn.kv_b_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_A_NORM: (
|
||||
"model.layers.{bid}.self_attn.q_a_layernorm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_KV_A_NORM: (
|
||||
"model.layers.{bid}.self_attn.kv_a_layernorm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_SUB_NORM: (
|
||||
"model.layers.{bid}.self_attn.inner_attn_ln",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_SUB_NORM: (
|
||||
"model.layers.{bid}.mlp.ffn_layernorm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_NORM: (
|
||||
"decoder.block.{bid}.layer.0.layer_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_Q: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.q",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_K: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.k",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_V: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.v",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_OUT: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.o",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_REL_B: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
|
||||
"decoder.block.{bid}.layer.1.layer_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.q",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_K: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.k",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_V: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.v",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.o",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_NORM: (
|
||||
"decoder.block.{bid}.layer.2.layer_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_GATE: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi_0",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_UP: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi",
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi_1",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_DOWN: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wo",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_OUTPUT_NORM: (
|
||||
"decoder.final_layer_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_NORM: (
|
||||
"encoder.block.{bid}.layer.0.layer_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_Q: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.q",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_K: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.k",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_V: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.v",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_OUT: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.o",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_REL_B: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_NORM: (
|
||||
"encoder.block.{bid}.layer.1.layer_norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_GATE: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi_0",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_UP: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi",
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi_1",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_DOWN: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wo",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_OUTPUT_NORM: (
|
||||
"encoder.final_layer_norm",
|
||||
),
|
||||
}
|
||||
|
||||
arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
|
||||
MODEL_ARCH.ARCTIC: {
|
||||
MODEL_TENSOR.FFN_NORM: (
|
||||
"model.layers.{bid}.residual_layernorm",
|
||||
),
|
||||
MODEL_TENSOR.FFN_NORM_EXP: (
|
||||
"model.layers.{bid}.post_attention_layernorm",
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
mapping: dict[str, tuple[MODEL_TENSOR, str]]
|
||||
|
||||
def __init__(self, arch: MODEL_ARCH, n_blocks: int):
|
||||
self.mapping = {}
|
||||
for tensor, keys in self.mappings_cfg.items():
|
||||
if tensor not in MODEL_TENSORS[arch]:
|
||||
continue
|
||||
tensor_name = TENSOR_NAMES[tensor]
|
||||
self.mapping[tensor_name] = (tensor, tensor_name)
|
||||
for key in keys:
|
||||
self.mapping[key] = (tensor, tensor_name)
|
||||
if arch in self.arch_block_mappings_cfg:
|
||||
self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
|
||||
for bid in range(n_blocks):
|
||||
for tensor, keys in self.block_mappings_cfg.items():
|
||||
if tensor not in MODEL_TENSORS[arch]:
|
||||
continue
|
||||
|
||||
tensor_name = TENSOR_NAMES[tensor].format(bid = bid)
|
||||
self.mapping[tensor_name] = (tensor, tensor_name)
|
||||
for key in keys:
|
||||
key = key.format(bid = bid)
|
||||
self.mapping[key] = (tensor, tensor_name)
|
||||
|
||||
def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
|
||||
result = self.mapping.get(key)
|
||||
if result is not None:
|
||||
return result
|
||||
for suffix in try_suffixes:
|
||||
if key.endswith(suffix):
|
||||
result = self.mapping.get(key[:-len(suffix)])
|
||||
if result is not None:
|
||||
return result[0], result[1] + suffix
|
||||
return None
|
||||
|
||||
def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
|
||||
result = self.get_type_and_name(key, try_suffixes = try_suffixes)
|
||||
if result is None:
|
||||
return None
|
||||
return result[1]
|
||||
|
||||
def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
|
||||
result = self.get_type_and_name(key, try_suffixes = try_suffixes)
|
||||
if result is None:
|
||||
return None
|
||||
return result[0]
|
||||
|
||||
def __getitem__(self, key: str) -> str:
|
||||
try:
|
||||
return self.mapping[key][1]
|
||||
except KeyError:
|
||||
raise KeyError(key)
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
return key in self.mapping
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(self.mapping)
|
||||
|
||||
def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
|
||||
return TensorNameMap(arch, n_blocks)
|
|
@ -1,101 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import Literal
|
||||
|
||||
|
||||
def fill_templated_filename(filename: str, output_type: str | None) -> str:
|
||||
# Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf'
|
||||
ftype_lowercase: str = output_type.lower() if output_type is not None else ""
|
||||
ftype_uppercase: str = output_type.upper() if output_type is not None else ""
|
||||
return filename.format(
|
||||
ftype_lowercase,
|
||||
outtype=ftype_lowercase,
|
||||
ftype=ftype_lowercase,
|
||||
OUTTYPE=ftype_uppercase,
|
||||
FTYPE=ftype_uppercase,
|
||||
)
|
||||
|
||||
|
||||
def model_weight_count_rounded_notation(
|
||||
model_params_count: int, min_digits: int = 2
|
||||
) -> str:
|
||||
if model_params_count > 1e12:
|
||||
# Trillions Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-12
|
||||
scale_suffix = "T"
|
||||
elif model_params_count > 1e9:
|
||||
# Billions Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-9
|
||||
scale_suffix = "B"
|
||||
elif model_params_count > 1e6:
|
||||
# Millions Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-6
|
||||
scale_suffix = "M"
|
||||
else:
|
||||
# Thousands Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-3
|
||||
scale_suffix = "K"
|
||||
|
||||
fix = max(min_digits - len(str(round(scaled_model_params)).lstrip("0")), 0)
|
||||
|
||||
return f"{scaled_model_params:.{fix}f}{scale_suffix}"
|
||||
|
||||
|
||||
def size_label(
|
||||
total_params: int, shared_params: int, expert_params: int, expert_count: int
|
||||
) -> str:
|
||||
|
||||
if expert_count > 0:
|
||||
pretty_size = model_weight_count_rounded_notation(
|
||||
abs(shared_params) + abs(expert_params), min_digits=2
|
||||
)
|
||||
size_class = f"{expert_count}x{pretty_size}"
|
||||
else:
|
||||
size_class = model_weight_count_rounded_notation(
|
||||
abs(total_params), min_digits=2
|
||||
)
|
||||
|
||||
return size_class
|
||||
|
||||
|
||||
def naming_convention(
|
||||
model_name: str | None,
|
||||
base_name: str | None,
|
||||
finetune_string: str | None,
|
||||
version_string: str | None,
|
||||
size_label: str | None,
|
||||
output_type: str | None,
|
||||
model_type: Literal["vocab", "LoRA"] | None = None,
|
||||
) -> str:
|
||||
# Reference: https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#gguf-naming-convention
|
||||
|
||||
if base_name is not None:
|
||||
name = base_name.strip().replace(" ", "-").replace("/", "-")
|
||||
elif model_name is not None:
|
||||
name = model_name.strip().replace(" ", "-").replace("/", "-")
|
||||
else:
|
||||
name = "ggml-model"
|
||||
|
||||
parameters = f"-{size_label}" if size_label is not None else ""
|
||||
|
||||
finetune = (
|
||||
f"-{finetune_string.strip().replace(' ', '-')}"
|
||||
if finetune_string is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
version = (
|
||||
f"-{version_string.strip().replace(' ', '-')}"
|
||||
if version_string is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
encoding = (
|
||||
f"-{output_type.strip().replace(' ', '-').upper()}"
|
||||
if output_type is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
kind = f"-{model_type.strip().replace(' ', '-')}" if model_type is not None else ""
|
||||
|
||||
return f"{name}{parameters}{finetune}{version}{encoding}{kind}"
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,3 @@
|
|||
# This file left for compatibility. If you want to use the GGUF API from Python
|
||||
# then don't import gguf/gguf.py directly. If you're looking for examples, see the
|
||||
# examples/ directory for gguf-py
|
||||
|
||||
import importlib
|
||||
import sys
|
||||
from pathlib import Path
|
|
@ -1,11 +1,8 @@
|
|||
#
|
||||
# GGUF file reading/modification support. For API usage information,
|
||||
# please see the files scripts/ for some fairly simple examples.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
from typing import Any, Literal, NamedTuple, TypeVar, Union
|
||||
|
||||
|
@ -15,7 +12,6 @@
|
|||
from .quants import quant_shape_to_byte_shape
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Allow running file in package as a script.
|
||||
|
@ -28,6 +24,7 @@
|
|||
GGUF_VERSION,
|
||||
GGMLQuantizationType,
|
||||
GGUFValueType,
|
||||
GGUFEndian,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -53,6 +50,52 @@ class ReaderField(NamedTuple):
|
|||
|
||||
types: list[GGUFValueType] = []
|
||||
|
||||
def contents(self, index_or_slice: int | slice = slice(None)) -> Any:
|
||||
if self.types:
|
||||
to_string = lambda x: str(x.tobytes(), encoding="utf-8") # noqa: E731
|
||||
main_type = self.types[0]
|
||||
|
||||
if main_type == GGUFValueType.ARRAY:
|
||||
sub_type = self.types[-1]
|
||||
|
||||
if sub_type == GGUFValueType.STRING:
|
||||
indices = self.data[index_or_slice]
|
||||
|
||||
if isinstance(index_or_slice, int):
|
||||
return to_string(self.parts[indices]) # type: ignore
|
||||
else:
|
||||
return [to_string(self.parts[idx]) for idx in indices] # type: ignore
|
||||
else:
|
||||
# FIXME: When/if _get_field_parts() support multi-dimensional arrays, this must do so too
|
||||
|
||||
# Check if it's unsafe to perform slice optimization on data
|
||||
# if any(True for idx in self.data if len(self.parts[idx]) != 1):
|
||||
# optim_slice = slice(None)
|
||||
# else:
|
||||
# optim_slice = index_or_slice
|
||||
# index_or_slice = slice(None)
|
||||
|
||||
# if isinstance(optim_slice, int):
|
||||
# return self.parts[self.data[optim_slice]].tolist()[0]
|
||||
# else:
|
||||
# return [pv for idx in self.data[optim_slice] for pv in self.parts[idx].tolist()][index_or_slice]
|
||||
|
||||
if isinstance(index_or_slice, int):
|
||||
return self.parts[self.data[index_or_slice]].tolist()[0]
|
||||
else:
|
||||
return [
|
||||
pv
|
||||
for idx in self.data[index_or_slice]
|
||||
for pv in self.parts[idx].tolist()
|
||||
]
|
||||
|
||||
if main_type == GGUFValueType.STRING:
|
||||
return to_string(self.parts[-1])
|
||||
else:
|
||||
return self.parts[-1].tolist()[0]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class ReaderTensor(NamedTuple):
|
||||
name: str
|
||||
|
@ -103,12 +146,23 @@ def __init__(
|
|||
# If we get 0 here that means it's (probably) a GGUF file created for
|
||||
# the opposite byte order of the machine this script is running on.
|
||||
self.byte_order = "S"
|
||||
temp_version = temp_version.newbyteorder(self.byte_order)
|
||||
temp_version = temp_version.view(
|
||||
temp_version.dtype.newbyteorder(self.byte_order)
|
||||
)
|
||||
version = temp_version[0]
|
||||
if version not in READER_SUPPORTED_VERSIONS:
|
||||
raise ValueError(
|
||||
f"Sorry, file appears to be version {version} which we cannot handle"
|
||||
)
|
||||
if sys.byteorder == "little":
|
||||
# Host is little endian
|
||||
host_endian = GGUFEndian.LITTLE
|
||||
swapped_endian = GGUFEndian.BIG
|
||||
else:
|
||||
# Sorry PDP or other weird systems that don't use BE or LE.
|
||||
host_endian = GGUFEndian.BIG
|
||||
swapped_endian = GGUFEndian.LITTLE
|
||||
self.endianess = swapped_endian if self.byte_order == "S" else host_endian
|
||||
self.fields: OrderedDict[str, ReaderField] = OrderedDict()
|
||||
self.tensors: list[ReaderTensor] = []
|
||||
offs += self._push_field(
|
||||
|
@ -169,10 +223,11 @@ def _get(
|
|||
count = int(count)
|
||||
itemsize = int(np.empty([], dtype=dtype).itemsize)
|
||||
end_offs = offset + itemsize * count
|
||||
return (
|
||||
self.data[offset:end_offs]
|
||||
.view(dtype=dtype)[:count]
|
||||
.newbyteorder(override_order or self.byte_order)
|
||||
arr = self.data[offset:end_offs].view(dtype=dtype)[:count]
|
||||
return arr.view(
|
||||
arr.dtype.newbyteorder(
|
||||
self.byte_order if override_order is None else override_order
|
||||
)
|
||||
)
|
||||
|
||||
def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int:
|
||||
|
@ -219,6 +274,7 @@ def _get_field_parts(
|
|||
offs += int(alen.nbytes)
|
||||
aparts: list[npt.NDArray[Any]] = [raw_itype, alen]
|
||||
data_idxs: list[int] = []
|
||||
# FIXME: Handle multi-dimensional arrays properly instead of flattening
|
||||
for idx in range(alen[0]):
|
||||
curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(
|
||||
offs, raw_itype[0]
|
|
@ -26,6 +26,7 @@
|
|||
RopeScalingType,
|
||||
PoolingType,
|
||||
TokenType,
|
||||
ExpertGatingFuncType,
|
||||
)
|
||||
|
||||
from .quants import quant_shape_from_byte_shape
|
||||
|
@ -641,6 +642,11 @@ def add_base_model_organization(self, source_id: int, organization: str) -> None
|
|||
Keys.General.BASE_MODEL_ORGANIZATION.format(id=source_id), organization
|
||||
)
|
||||
|
||||
def add_base_model_description(self, source_id: int, description: str) -> None:
|
||||
self.add_string(
|
||||
Keys.General.BASE_MODEL_DESCRIPTION.format(id=source_id), description
|
||||
)
|
||||
|
||||
def add_base_model_url(self, source_id: int, url: str) -> None:
|
||||
self.add_string(Keys.General.BASE_MODEL_URL.format(id=source_id), url)
|
||||
|
||||
|
@ -653,15 +659,46 @@ def add_base_model_uuid(self, source_id: int, uuid: str) -> None:
|
|||
def add_base_model_repo_url(self, source_id: int, repo_url: str) -> None:
|
||||
self.add_string(Keys.General.BASE_MODEL_REPO_URL.format(id=source_id), repo_url)
|
||||
|
||||
def add_dataset_count(self, source_count: int) -> None:
|
||||
self.add_uint32(Keys.General.DATASET_COUNT, source_count)
|
||||
|
||||
def add_dataset_name(self, source_id: int, name: str) -> None:
|
||||
self.add_string(Keys.General.DATASET_NAME.format(id=source_id), name)
|
||||
|
||||
def add_dataset_author(self, source_id: int, author: str) -> None:
|
||||
self.add_string(Keys.General.DATASET_AUTHOR.format(id=source_id), author)
|
||||
|
||||
def add_dataset_version(self, source_id: int, version: str) -> None:
|
||||
self.add_string(Keys.General.DATASET_VERSION.format(id=source_id), version)
|
||||
|
||||
def add_dataset_organization(self, source_id: int, organization: str) -> None:
|
||||
self.add_string(
|
||||
Keys.General.DATASET_ORGANIZATION.format(id=source_id), organization
|
||||
)
|
||||
|
||||
def add_dataset_description(self, source_id: int, description: str) -> None:
|
||||
self.add_string(
|
||||
Keys.General.DATASET_DESCRIPTION.format(id=source_id), description
|
||||
)
|
||||
|
||||
def add_dataset_url(self, source_id: int, url: str) -> None:
|
||||
self.add_string(Keys.General.DATASET_URL.format(id=source_id), url)
|
||||
|
||||
def add_dataset_doi(self, source_id: int, doi: str) -> None:
|
||||
self.add_string(Keys.General.DATASET_DOI.format(id=source_id), doi)
|
||||
|
||||
def add_dataset_uuid(self, source_id: int, uuid: str) -> None:
|
||||
self.add_string(Keys.General.DATASET_UUID.format(id=source_id), uuid)
|
||||
|
||||
def add_dataset_repo_url(self, source_id: int, repo_url: str) -> None:
|
||||
self.add_string(Keys.General.DATASET_REPO_URL.format(id=source_id), repo_url)
|
||||
|
||||
def add_tags(self, tags: Sequence[str]) -> None:
|
||||
self.add_array(Keys.General.TAGS, tags)
|
||||
|
||||
def add_languages(self, languages: Sequence[str]) -> None:
|
||||
self.add_array(Keys.General.LANGUAGES, languages)
|
||||
|
||||
def add_datasets(self, datasets: Sequence[str]) -> None:
|
||||
self.add_array(Keys.General.DATASETS, datasets)
|
||||
|
||||
def add_tensor_data_layout(self, layout: str) -> None:
|
||||
self.add_string(Keys.LLM.TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
|
||||
|
||||
|
@ -674,6 +711,21 @@ def add_context_length(self, length: int) -> None:
|
|||
def add_embedding_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.EMBEDDING_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_features_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.FEATURES_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_posnet_embedding_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.PosNet.EMBEDDING_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_posnet_block_count(self, length: int) -> None:
|
||||
self.add_uint32(Keys.PosNet.BLOCK_COUNT.format(arch=self.arch), length)
|
||||
|
||||
def add_convnext_embedding_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.ConvNext.EMBEDDING_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_convnext_block_count(self, length: int) -> None:
|
||||
self.add_uint32(Keys.ConvNext.BLOCK_COUNT.format(arch=self.arch), length)
|
||||
|
||||
def add_block_count(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.BLOCK_COUNT.format(arch=self.arch), length)
|
||||
|
||||
|
@ -722,6 +774,12 @@ def add_key_length(self, length: int) -> None:
|
|||
def add_value_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.VALUE_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_key_length_mla(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.KEY_LENGTH_MLA.format(arch=self.arch), length)
|
||||
|
||||
def add_value_length_mla(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.VALUE_LENGTH_MLA.format(arch=self.arch), length)
|
||||
|
||||
def add_max_alibi_bias(self, bias: float) -> None:
|
||||
self.add_float32(Keys.Attention.MAX_ALIBI_BIAS.format(arch=self.arch), bias)
|
||||
|
||||
|
@ -749,12 +807,56 @@ def add_expert_shared_count(self, count: int) -> None:
|
|||
def add_expert_weights_scale(self, value: float) -> None:
|
||||
self.add_float32(Keys.LLM.EXPERT_WEIGHTS_SCALE.format(arch=self.arch), value)
|
||||
|
||||
def add_expert_weights_norm(self, value: bool) -> None:
|
||||
self.add_bool(Keys.LLM.EXPERT_WEIGHTS_NORM.format(arch=self.arch), value)
|
||||
|
||||
def add_expert_gating_func(self, value: ExpertGatingFuncType) -> None:
|
||||
self.add_uint32(Keys.LLM.EXPERT_GATING_FUNC.format(arch=self.arch), value.value)
|
||||
|
||||
def add_moe_every_n_layers(self, value: int) -> None:
|
||||
self.add_uint32(Keys.LLM.MOE_EVERY_N_LAYERS.format(arch=self.arch), value)
|
||||
|
||||
def add_swin_norm(self, value: bool) -> None:
|
||||
self.add_bool(Keys.LLM.SWIN_NORM.format(arch=self.arch), value)
|
||||
|
||||
def add_rescale_every_n_layers(self, count: int) -> None:
|
||||
self.add_uint32(Keys.LLM.RESCALE_EVERY_N_LAYERS.format(arch=self.arch), count)
|
||||
|
||||
def add_time_mix_extra_dim(self, dim: int) -> None:
|
||||
self.add_uint32(Keys.LLM.TIME_MIX_EXTRA_DIM.format(arch=self.arch), dim)
|
||||
|
||||
def add_time_decay_extra_dim(self, dim: int) -> None:
|
||||
self.add_uint32(Keys.LLM.TIME_DECAY_EXTRA_DIM.format(arch=self.arch), dim)
|
||||
|
||||
def add_residual_scale(self, value: float) -> None:
|
||||
self.add_float32(Keys.LLM.RESIDUAL_SCALE.format(arch=self.arch), value)
|
||||
|
||||
def add_embedding_scale(self, value: float) -> None:
|
||||
self.add_float32(Keys.LLM.EMBEDDING_SCALE.format(arch=self.arch), value)
|
||||
|
||||
def add_wkv_head_size(self, size: int) -> None:
|
||||
self.add_uint32(Keys.WKV.HEAD_SIZE.format(arch=self.arch), size)
|
||||
|
||||
def add_token_shift_count(self, count: int) -> None:
|
||||
self.add_uint32(Keys.LLM.TOKEN_SHIFT_COUNT.format(arch=self.arch), count)
|
||||
|
||||
def add_interleave_moe_layer_step(self, value: int) -> None:
|
||||
self.add_uint32(
|
||||
Keys.LLM.INTERLEAVE_MOE_LAYER_STEP.format(arch=self.arch), value
|
||||
)
|
||||
|
||||
def add_layer_norm_eps(self, value: float) -> None:
|
||||
self.add_float32(Keys.Attention.LAYERNORM_EPS.format(arch=self.arch), value)
|
||||
|
||||
def add_layer_norm_rms_eps(self, value: float) -> None:
|
||||
self.add_float32(Keys.Attention.LAYERNORM_RMS_EPS.format(arch=self.arch), value)
|
||||
|
||||
def add_group_norm_eps(self, value: float) -> None:
|
||||
self.add_float32(Keys.Attention.GROUPNORM_EPS.format(arch=self.arch), value)
|
||||
|
||||
def add_group_norm_groups(self, value: int) -> None:
|
||||
self.add_uint32(Keys.Attention.GROUPNORM_GROUPS.format(arch=self.arch), value)
|
||||
|
||||
def add_causal_attention(self, value: bool) -> None:
|
||||
self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)
|
||||
|
||||
|
@ -764,18 +866,38 @@ def add_q_lora_rank(self, length: int) -> None:
|
|||
def add_kv_lora_rank(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.KV_LORA_RANK.format(arch=self.arch), length)
|
||||
|
||||
def add_decay_lora_rank(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.DECAY_LORA_RANK.format(arch=self.arch), length)
|
||||
|
||||
def add_iclr_lora_rank(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.ICLR_LORA_RANK.format(arch=self.arch), length)
|
||||
|
||||
def add_value_residual_mix_lora_rank(self, length: int) -> None:
|
||||
self.add_uint32(
|
||||
Keys.Attention.VALUE_RESIDUAL_MIX_LORA_RANK.format(arch=self.arch), length
|
||||
)
|
||||
|
||||
def add_gate_lora_rank(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.GATE_LORA_RANK.format(arch=self.arch), length)
|
||||
|
||||
def add_relative_attn_buckets_count(self, value: int) -> None:
|
||||
self.add_uint32(Keys.Attention.REL_BUCKETS_COUNT.format(arch=self.arch), value)
|
||||
|
||||
def add_sliding_window(self, value: int) -> None:
|
||||
self.add_uint32(Keys.Attention.SLIDING_WINDOW.format(arch=self.arch), value)
|
||||
|
||||
def add_attention_scale(self, value: float) -> None:
|
||||
self.add_float32(Keys.Attention.SCALE.format(arch=self.arch), value)
|
||||
|
||||
def add_pooling_type(self, value: PoolingType) -> None:
|
||||
self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value.value)
|
||||
|
||||
def add_rope_dimension_count(self, count: int) -> None:
|
||||
self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count)
|
||||
|
||||
def add_rope_dimension_sections(self, dims: Sequence[int]) -> None:
|
||||
self.add_array(Keys.Rope.DIMENSION_SECTIONS.format(arch=self.arch), dims)
|
||||
|
||||
def add_rope_freq_base(self, value: float) -> None:
|
||||
self.add_float32(Keys.Rope.FREQ_BASE.format(arch=self.arch), value)
|
||||
|
||||
|
@ -809,6 +931,9 @@ def add_ssm_state_size(self, value: int) -> None:
|
|||
def add_ssm_time_step_rank(self, value: int) -> None:
|
||||
self.add_uint32(Keys.SSM.TIME_STEP_RANK.format(arch=self.arch), value)
|
||||
|
||||
def add_ssm_dt_b_c_rms(self, value: bool) -> None:
|
||||
self.add_bool(Keys.SSM.DT_B_C_RMS.format(arch=self.arch), value)
|
||||
|
||||
def add_tokenizer_model(self, model: str) -> None:
|
||||
self.add_string(Keys.Tokenizer.MODEL, model)
|
||||
|
||||
|
@ -849,9 +974,6 @@ def add_sep_token_id(self, id: int) -> None:
|
|||
def add_pad_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.PAD_ID, id)
|
||||
|
||||
def add_cls_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.CLS_ID, id)
|
||||
|
||||
def add_mask_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.MASK_ID, id)
|
||||
|
||||
|
@ -903,18 +1025,65 @@ def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None:
|
|||
|
||||
self.add_string(Keys.Tokenizer.CHAT_TEMPLATE, value)
|
||||
|
||||
def add_prefix_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.PREFIX_ID, id)
|
||||
|
||||
def add_suffix_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.SUFFIX_ID, id)
|
||||
|
||||
def add_middle_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.MIDDLE_ID, id)
|
||||
|
||||
def add_eot_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.EOT_ID, id)
|
||||
|
||||
def add_eom_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.Tokenizer.EOM_ID, id)
|
||||
|
||||
# for vision models
|
||||
|
||||
def add_vision_projection_dim(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.PROJECTION_DIM, value)
|
||||
|
||||
def add_vision_has_vision_encoder(self, value: bool) -> None:
|
||||
self.add_bool(Keys.ClipVision.HAS_VISION_ENCODER, value)
|
||||
|
||||
def add_vision_patch_size(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.PATCH_SIZE, value)
|
||||
|
||||
def add_vision_embedding_length(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.EMBEDDING_LENGTH, value)
|
||||
|
||||
def add_vision_feed_forward_length(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.FEED_FORWARD_LENGTH, value)
|
||||
|
||||
def add_vision_block_count(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.BLOCK_COUNT, value)
|
||||
|
||||
def add_vision_head_count(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.Attention.HEAD_COUNT, value)
|
||||
|
||||
def add_vision_projector_type(self, value: str) -> None:
|
||||
self.add_string(Keys.ClipVision.PROJECTOR_TYPE, value)
|
||||
|
||||
def add_vision_attention_layernorm_eps(self, value: float) -> None:
|
||||
self.add_float32(Keys.ClipVision.Attention.LAYERNORM_EPS, value)
|
||||
|
||||
def add_vision_image_size(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.IMAGE_SIZE, value)
|
||||
|
||||
def add_vision_image_mean(self, values: Sequence[float]) -> None:
|
||||
self.add_array(Keys.ClipVision.IMAGE_MEAN, values)
|
||||
|
||||
def add_vision_image_std(self, values: Sequence[float]) -> None:
|
||||
self.add_array(Keys.ClipVision.IMAGE_STD, values)
|
||||
|
||||
def add_vision_spatial_merge_size(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.SPATIAL_MERGE_SIZE, value)
|
||||
|
||||
def add_vision_use_gelu(self, value: bool) -> None:
|
||||
self.add_bool(Keys.ClipVision.USE_GELU, value)
|
||||
|
||||
def add_vision_use_silu(self, value: bool) -> None:
|
||||
self.add_bool(Keys.ClipVision.USE_SILU, value)
|
||||
|
||||
def add_vision_projector_scale_factor(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.Projector.SCALE_FACTOR, value)
|
||||
|
||||
def add_vision_n_wa_pattern(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.N_WA_PATTERN, value)
|
||||
|
||||
def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes:
|
||||
pack_prefix = ""
|
||||
if not skip_pack_prefix:
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
|
||||
class LazyMeta(ABCMeta):
|
||||
|
||||
def __new__(
|
||||
cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs
|
||||
):
|
||||
|
@ -34,7 +35,7 @@ def __getattr__(self, name: str) -> Any:
|
|||
|
||||
# need to make a builder for the wrapped wrapper to copy the name,
|
||||
# or else it fails with very cryptic error messages,
|
||||
# because somehow the same string would end up in every closure
|
||||
# because somehow the same string would end up in every closures
|
||||
def mk_wrap(op_name: str, *, meta_noop: bool = False):
|
||||
# need to wrap the wrapper to get self
|
||||
def wrapped_special_op(self, *args, **kwargs):
|
||||
|
@ -200,6 +201,27 @@ def wrapped_fn(*args, **kwargs):
|
|||
return cls(
|
||||
meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn
|
||||
)
|
||||
elif isinstance(res, tuple) and all(
|
||||
isinstance(t, cls._tensor_type) for t in res
|
||||
):
|
||||
# share the evaluation between lazy tuple elements
|
||||
shared_args: list = [args, None]
|
||||
|
||||
def eager_tuple_element(a: list[Any], i: int = 0, /, **kw) -> LazyBase:
|
||||
assert len(a) == 2
|
||||
if a[1] is None:
|
||||
a[1] = fn(*a[0], **kw)
|
||||
return a[1][i]
|
||||
|
||||
return tuple(
|
||||
cls(
|
||||
meta=cls.eager_to_meta(res[i]),
|
||||
args=(shared_args, i),
|
||||
kwargs=kwargs,
|
||||
func=eager_tuple_element,
|
||||
)
|
||||
for i in range(len(res))
|
||||
)
|
||||
else:
|
||||
del res # not needed
|
||||
# non-tensor return likely relies on the contents of the args
|
||||
|
@ -254,6 +276,8 @@ def from_eager(cls, t: Any) -> Any:
|
|||
class LazyNumpyTensor(LazyBase):
|
||||
_tensor_type = np.ndarray
|
||||
|
||||
shape: tuple[int, ...] # Makes the type checker happy in quants.py
|
||||
|
||||
@classmethod
|
||||
def meta_with_dtype_and_shape(
|
||||
cls, dtype: DTypeLike, shape: tuple[int, ...]
|
|
@ -41,7 +41,7 @@ class Metadata:
|
|||
base_models: Optional[list[dict]] = None
|
||||
tags: Optional[list[str]] = None
|
||||
languages: Optional[list[str]] = None
|
||||
datasets: Optional[list[str]] = None
|
||||
datasets: Optional[list[dict]] = None
|
||||
|
||||
@staticmethod
|
||||
def load(
|
||||
|
@ -50,7 +50,7 @@ def load(
|
|||
model_name: Optional[str] = None,
|
||||
total_params: int = 0,
|
||||
) -> Metadata:
|
||||
# This grabs as much contextual authorship metadata as possible from the model repository
|
||||
# This grabs as many contextual authorship metadata as possible from the model repository
|
||||
# making any conversion as required to match the gguf kv store metadata format
|
||||
# as well as giving users the ability to override any authorship metadata that may be incorrect
|
||||
|
||||
|
@ -126,13 +126,13 @@ def load(
|
|||
"general.base_models", metadata.base_models
|
||||
)
|
||||
|
||||
# Datasets is received here as an array of datasets
|
||||
metadata.datasets = metadata_override.get("general.datasets", metadata.datasets)
|
||||
|
||||
metadata.tags = metadata_override.get(Keys.General.TAGS, metadata.tags)
|
||||
metadata.languages = metadata_override.get(
|
||||
Keys.General.LANGUAGES, metadata.languages
|
||||
)
|
||||
metadata.datasets = metadata_override.get(
|
||||
Keys.General.DATASETS, metadata.datasets
|
||||
)
|
||||
|
||||
# Direct Metadata Override (via direct cli argument)
|
||||
if model_name is not None:
|
||||
|
@ -160,21 +160,41 @@ def load_model_card(model_path: Optional[Path] = None) -> dict[str, Any]:
|
|||
if not model_card_path.is_file():
|
||||
return {}
|
||||
|
||||
# The model card metadata is assumed to always be in YAML
|
||||
# The model card metadata is assumed to always be in YAML (frontmatter)
|
||||
# ref: https://github.com/huggingface/transformers/blob/a5c642fe7a1f25d3bdcd76991443ba6ff7ee34b2/src/transformers/modelcard.py#L468-L473
|
||||
yaml_content: str = ""
|
||||
with open(model_card_path, "r", encoding="utf-8") as f:
|
||||
if f.readline() == "---\n":
|
||||
raw = f.read().partition("---\n")[0]
|
||||
data = yaml.safe_load(raw)
|
||||
if isinstance(data, dict):
|
||||
return data
|
||||
else:
|
||||
logger.error(
|
||||
f"while reading YAML model card frontmatter, data is {type(data)} instead of dict"
|
||||
)
|
||||
return {}
|
||||
else:
|
||||
content = f.read()
|
||||
lines = content.splitlines()
|
||||
lines_yaml = []
|
||||
if len(lines) == 0:
|
||||
# Empty file
|
||||
return {}
|
||||
if len(lines) > 0 and lines[0] != "---":
|
||||
# No frontmatter
|
||||
return {}
|
||||
for line in lines[1:]:
|
||||
if line == "---":
|
||||
break # End of frontmatter
|
||||
else:
|
||||
lines_yaml.append(line)
|
||||
yaml_content = "\n".join(lines_yaml) + "\n"
|
||||
|
||||
# Quick hack to fix the Norway problem
|
||||
# https://hitchdev.com/strictyaml/why/implicit-typing-removed/
|
||||
yaml_content = yaml_content.replace("- no\n", '- "no"\n')
|
||||
|
||||
if yaml_content:
|
||||
data = yaml.safe_load(yaml_content)
|
||||
if isinstance(data, dict):
|
||||
return data
|
||||
else:
|
||||
logger.error(
|
||||
f"while reading YAML model card frontmatter, data is {type(data)} instead of dict"
|
||||
)
|
||||
return {}
|
||||
else:
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def load_hf_parameters(model_path: Optional[Path] = None) -> dict[str, Any]:
|
||||
|
@ -228,7 +248,11 @@ def get_model_id_components(
|
|||
org_component, model_full_name_component = None, model_id
|
||||
|
||||
# Check if we erroneously matched against './' or '../' etc...
|
||||
if org_component is not None and org_component[0] == ".":
|
||||
if (
|
||||
org_component is not None
|
||||
and len(org_component) > 0
|
||||
and org_component[0] == "."
|
||||
):
|
||||
org_component = None
|
||||
|
||||
name_parts: list[str] = model_full_name_component.split("-")
|
||||
|
@ -387,27 +411,86 @@ def apply_metadata_heuristic(
|
|||
########################
|
||||
if model_card is not None:
|
||||
|
||||
if "model_name" in model_card and metadata.name is None:
|
||||
# Not part of huggingface model card standard but notice some model creator using it
|
||||
# such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
|
||||
metadata.name = model_card.get("model_name")
|
||||
def use_model_card_metadata(metadata_key: str, model_card_key: str):
|
||||
if (
|
||||
model_card_key in model_card
|
||||
and getattr(metadata, metadata_key, None) is None
|
||||
):
|
||||
setattr(metadata, metadata_key, model_card.get(model_card_key))
|
||||
|
||||
if "model_creator" in model_card and metadata.author is None:
|
||||
# Not part of huggingface model card standard but notice some model creator using it
|
||||
# such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
|
||||
metadata.author = model_card.get("model_creator")
|
||||
def use_array_model_card_metadata(metadata_key: str, model_card_key: str):
|
||||
# Note: Will append rather than replace if already exist
|
||||
tags_value = model_card.get(model_card_key, None)
|
||||
if tags_value is None:
|
||||
return
|
||||
|
||||
if "model_type" in model_card and metadata.basename is None:
|
||||
# Not part of huggingface model card standard but notice some model creator using it
|
||||
# such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
|
||||
metadata.basename = model_card.get("model_type")
|
||||
current_value = getattr(metadata, metadata_key, None)
|
||||
if current_value is None:
|
||||
current_value = []
|
||||
|
||||
if "base_model" in model_card:
|
||||
if isinstance(tags_value, str):
|
||||
current_value.append(tags_value)
|
||||
elif isinstance(tags_value, list):
|
||||
current_value.extend(tags_value)
|
||||
|
||||
setattr(metadata, metadata_key, current_value)
|
||||
|
||||
# LLAMA.cpp's direct internal convention
|
||||
# (Definitely not part of hugging face formal/informal standard)
|
||||
#########################################
|
||||
use_model_card_metadata("name", "name")
|
||||
use_model_card_metadata("author", "author")
|
||||
use_model_card_metadata("version", "version")
|
||||
use_model_card_metadata("organization", "organization")
|
||||
use_model_card_metadata("description", "description")
|
||||
use_model_card_metadata("finetune", "finetune")
|
||||
use_model_card_metadata("basename", "basename")
|
||||
use_model_card_metadata("size_label", "size_label")
|
||||
use_model_card_metadata("source_url", "url")
|
||||
use_model_card_metadata("source_doi", "doi")
|
||||
use_model_card_metadata("source_uuid", "uuid")
|
||||
use_model_card_metadata("source_repo_url", "repo_url")
|
||||
|
||||
# LLAMA.cpp's huggingface style convention
|
||||
# (Definitely not part of hugging face formal/informal standard... but with model_ appended to match their style)
|
||||
###########################################
|
||||
use_model_card_metadata("name", "model_name")
|
||||
use_model_card_metadata("author", "model_author")
|
||||
use_model_card_metadata("version", "model_version")
|
||||
use_model_card_metadata("organization", "model_organization")
|
||||
use_model_card_metadata("description", "model_description")
|
||||
use_model_card_metadata("finetune", "model_finetune")
|
||||
use_model_card_metadata("basename", "model_basename")
|
||||
use_model_card_metadata("size_label", "model_size_label")
|
||||
use_model_card_metadata("source_url", "model_url")
|
||||
use_model_card_metadata("source_doi", "model_doi")
|
||||
use_model_card_metadata("source_uuid", "model_uuid")
|
||||
use_model_card_metadata("source_repo_url", "model_repo_url")
|
||||
|
||||
# Hugging Face Direct Convention
|
||||
#################################
|
||||
|
||||
# Not part of huggingface model card standard but notice some model creator using it
|
||||
# such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
|
||||
use_model_card_metadata("name", "model_name")
|
||||
use_model_card_metadata("author", "model_creator")
|
||||
use_model_card_metadata("basename", "model_type")
|
||||
|
||||
if (
|
||||
"base_model" in model_card
|
||||
or "base_models" in model_card
|
||||
or "base_model_sources" in model_card
|
||||
):
|
||||
# This represents the parent models that this is based on
|
||||
# Example: stabilityai/stable-diffusion-xl-base-1.0. Can also be a list (for merges)
|
||||
# Example of merges: https://huggingface.co/EmbeddedLLM/Mistral-7B-Merge-14-v0.1/blob/main/README.md
|
||||
metadata_base_models = []
|
||||
base_model_value = model_card.get("base_model", None)
|
||||
base_model_value = model_card.get(
|
||||
"base_model",
|
||||
model_card.get(
|
||||
"base_models", model_card.get("base_model_sources", None)
|
||||
),
|
||||
)
|
||||
|
||||
if base_model_value is not None:
|
||||
if isinstance(base_model_value, str):
|
||||
|
@ -420,86 +503,195 @@ def apply_metadata_heuristic(
|
|||
|
||||
for model_id in metadata_base_models:
|
||||
# NOTE: model size of base model is assumed to be similar to the size of the current model
|
||||
(
|
||||
model_full_name_component,
|
||||
org_component,
|
||||
basename,
|
||||
finetune,
|
||||
version,
|
||||
size_label,
|
||||
) = Metadata.get_model_id_components(model_id, total_params)
|
||||
base_model = {}
|
||||
if model_full_name_component is not None:
|
||||
base_model["name"] = Metadata.id_to_title(
|
||||
model_full_name_component
|
||||
if isinstance(model_id, str):
|
||||
if (
|
||||
model_id.startswith("http://")
|
||||
or model_id.startswith("https://")
|
||||
or model_id.startswith("ssh://")
|
||||
):
|
||||
base_model["repo_url"] = model_id
|
||||
|
||||
# Check if Hugging Face ID is present in URL
|
||||
if "huggingface.co" in model_id:
|
||||
match = re.match(
|
||||
r"https?://huggingface.co/([^/]+/[^/]+)$", model_id
|
||||
)
|
||||
if match:
|
||||
model_id_component = match.group(1)
|
||||
(
|
||||
model_full_name_component,
|
||||
org_component,
|
||||
basename,
|
||||
finetune,
|
||||
version,
|
||||
size_label,
|
||||
) = Metadata.get_model_id_components(
|
||||
model_id_component, total_params
|
||||
)
|
||||
|
||||
# Populate model dictionary with extracted components
|
||||
if model_full_name_component is not None:
|
||||
base_model["name"] = Metadata.id_to_title(
|
||||
model_full_name_component
|
||||
)
|
||||
if org_component is not None:
|
||||
base_model["organization"] = (
|
||||
Metadata.id_to_title(org_component)
|
||||
)
|
||||
if version is not None:
|
||||
base_model["version"] = version
|
||||
|
||||
else:
|
||||
# Likely a Hugging Face ID
|
||||
(
|
||||
model_full_name_component,
|
||||
org_component,
|
||||
basename,
|
||||
finetune,
|
||||
version,
|
||||
size_label,
|
||||
) = Metadata.get_model_id_components(model_id, total_params)
|
||||
|
||||
# Populate model dictionary with extracted components
|
||||
if model_full_name_component is not None:
|
||||
base_model["name"] = Metadata.id_to_title(
|
||||
model_full_name_component
|
||||
)
|
||||
if org_component is not None:
|
||||
base_model["organization"] = Metadata.id_to_title(
|
||||
org_component
|
||||
)
|
||||
if version is not None:
|
||||
base_model["version"] = version
|
||||
if (
|
||||
org_component is not None
|
||||
and model_full_name_component is not None
|
||||
):
|
||||
base_model["repo_url"] = (
|
||||
f"https://huggingface.co/{org_component}/{model_full_name_component}"
|
||||
)
|
||||
|
||||
elif isinstance(model_id, dict):
|
||||
base_model = model_id
|
||||
|
||||
else:
|
||||
logger.error(
|
||||
f"base model entry '{str(model_id)}' not in a known format"
|
||||
)
|
||||
if org_component is not None:
|
||||
base_model["organization"] = Metadata.id_to_title(org_component)
|
||||
if version is not None:
|
||||
base_model["version"] = version
|
||||
if (
|
||||
org_component is not None
|
||||
and model_full_name_component is not None
|
||||
):
|
||||
base_model[
|
||||
"repo_url"
|
||||
] = f"https://huggingface.co/{org_component}/{model_full_name_component}"
|
||||
|
||||
metadata.base_models.append(base_model)
|
||||
|
||||
if "license" in model_card and metadata.license is None:
|
||||
metadata.license = model_card.get("license")
|
||||
if (
|
||||
"datasets" in model_card
|
||||
or "dataset" in model_card
|
||||
or "dataset_sources" in model_card
|
||||
):
|
||||
# This represents the datasets that this was trained from
|
||||
metadata_datasets = []
|
||||
dataset_value = model_card.get(
|
||||
"datasets",
|
||||
model_card.get("dataset", model_card.get("dataset_sources", None)),
|
||||
)
|
||||
|
||||
if "license_name" in model_card and metadata.license_name is None:
|
||||
metadata.license_name = model_card.get("license_name")
|
||||
|
||||
if "license_link" in model_card and metadata.license_link is None:
|
||||
metadata.license_link = model_card.get("license_link")
|
||||
|
||||
tags_value = model_card.get("tags", None)
|
||||
if tags_value is not None:
|
||||
|
||||
if metadata.tags is None:
|
||||
metadata.tags = []
|
||||
|
||||
if isinstance(tags_value, str):
|
||||
metadata.tags.append(tags_value)
|
||||
elif isinstance(tags_value, list):
|
||||
metadata.tags.extend(tags_value)
|
||||
|
||||
pipeline_tags_value = model_card.get("pipeline_tag", None)
|
||||
if pipeline_tags_value is not None:
|
||||
|
||||
if metadata.tags is None:
|
||||
metadata.tags = []
|
||||
|
||||
if isinstance(pipeline_tags_value, str):
|
||||
metadata.tags.append(pipeline_tags_value)
|
||||
elif isinstance(pipeline_tags_value, list):
|
||||
metadata.tags.extend(pipeline_tags_value)
|
||||
|
||||
language_value = model_card.get(
|
||||
"languages", model_card.get("language", None)
|
||||
)
|
||||
if language_value is not None:
|
||||
|
||||
if metadata.languages is None:
|
||||
metadata.languages = []
|
||||
|
||||
if isinstance(language_value, str):
|
||||
metadata.languages.append(language_value)
|
||||
elif isinstance(language_value, list):
|
||||
metadata.languages.extend(language_value)
|
||||
|
||||
dataset_value = model_card.get("datasets", model_card.get("dataset", None))
|
||||
if dataset_value is not None:
|
||||
if dataset_value is not None:
|
||||
if isinstance(dataset_value, str):
|
||||
metadata_datasets.append(dataset_value)
|
||||
elif isinstance(dataset_value, list):
|
||||
metadata_datasets.extend(dataset_value)
|
||||
|
||||
if metadata.datasets is None:
|
||||
metadata.datasets = []
|
||||
|
||||
if isinstance(dataset_value, str):
|
||||
metadata.datasets.append(dataset_value)
|
||||
elif isinstance(dataset_value, list):
|
||||
metadata.datasets.extend(dataset_value)
|
||||
for dataset_id in metadata_datasets:
|
||||
# NOTE: model size of base model is assumed to be similar to the size of the current model
|
||||
dataset = {}
|
||||
if isinstance(dataset_id, str):
|
||||
if dataset_id.startswith(("http://", "https://", "ssh://")):
|
||||
dataset["repo_url"] = dataset_id
|
||||
|
||||
# Check if Hugging Face ID is present in URL
|
||||
if "huggingface.co" in dataset_id:
|
||||
match = re.match(
|
||||
r"https?://huggingface.co/([^/]+/[^/]+)$",
|
||||
dataset_id,
|
||||
)
|
||||
if match:
|
||||
dataset_id_component = match.group(1)
|
||||
(
|
||||
dataset_name_component,
|
||||
org_component,
|
||||
basename,
|
||||
finetune,
|
||||
version,
|
||||
size_label,
|
||||
) = Metadata.get_model_id_components(
|
||||
dataset_id_component, total_params
|
||||
)
|
||||
|
||||
# Populate dataset dictionary with extracted components
|
||||
if dataset_name_component is not None:
|
||||
dataset["name"] = Metadata.id_to_title(
|
||||
dataset_name_component
|
||||
)
|
||||
if org_component is not None:
|
||||
dataset["organization"] = Metadata.id_to_title(
|
||||
org_component
|
||||
)
|
||||
if version is not None:
|
||||
dataset["version"] = version
|
||||
|
||||
else:
|
||||
# Likely a Hugging Face ID
|
||||
(
|
||||
dataset_name_component,
|
||||
org_component,
|
||||
basename,
|
||||
finetune,
|
||||
version,
|
||||
size_label,
|
||||
) = Metadata.get_model_id_components(
|
||||
dataset_id, total_params
|
||||
)
|
||||
|
||||
# Populate dataset dictionary with extracted components
|
||||
if dataset_name_component is not None:
|
||||
dataset["name"] = Metadata.id_to_title(
|
||||
dataset_name_component
|
||||
)
|
||||
if org_component is not None:
|
||||
dataset["organization"] = Metadata.id_to_title(
|
||||
org_component
|
||||
)
|
||||
if version is not None:
|
||||
dataset["version"] = version
|
||||
if (
|
||||
org_component is not None
|
||||
and dataset_name_component is not None
|
||||
):
|
||||
dataset["repo_url"] = (
|
||||
f"https://huggingface.co/{org_component}/{dataset_name_component}"
|
||||
)
|
||||
|
||||
elif isinstance(dataset_id, dict):
|
||||
dataset = dataset_id
|
||||
|
||||
else:
|
||||
logger.error(
|
||||
f"dataset entry '{str(dataset_id)}' not in a known format"
|
||||
)
|
||||
|
||||
metadata.datasets.append(dataset)
|
||||
|
||||
use_model_card_metadata("license", "license")
|
||||
use_model_card_metadata("license_name", "license_name")
|
||||
use_model_card_metadata("license_link", "license_link")
|
||||
|
||||
use_array_model_card_metadata("tags", "tags")
|
||||
use_array_model_card_metadata("tags", "pipeline_tag")
|
||||
|
||||
use_array_model_card_metadata("languages", "languages")
|
||||
use_array_model_card_metadata("languages", "language")
|
||||
|
||||
# Hugging Face Parameter Heuristics
|
||||
####################################
|
||||
|
@ -508,7 +700,7 @@ def apply_metadata_heuristic(
|
|||
|
||||
hf_name_or_path = hf_params.get("_name_or_path")
|
||||
if hf_name_or_path is not None and hf_name_or_path.count("/") <= 1:
|
||||
# Use _name_or_path only if it's actually a model name and not some computer path
|
||||
# Use _name_or_path only if its actually a model name and not some computer path
|
||||
# e.g. 'meta-llama/Llama-2-7b-hf'
|
||||
model_id = hf_name_or_path
|
||||
(
|
||||
|
@ -584,7 +776,10 @@ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter):
|
|||
gguf_writer.add_size_label(self.size_label)
|
||||
|
||||
if self.license is not None:
|
||||
gguf_writer.add_license(self.license)
|
||||
if isinstance(self.license, list):
|
||||
gguf_writer.add_license(",".join(self.license))
|
||||
else:
|
||||
gguf_writer.add_license(self.license)
|
||||
if self.license_name is not None:
|
||||
gguf_writer.add_license_name(self.license_name)
|
||||
if self.license_link is not None:
|
||||
|
@ -621,6 +816,10 @@ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter):
|
|||
gguf_writer.add_base_model_organization(
|
||||
key, base_model_entry["organization"]
|
||||
)
|
||||
if "description" in base_model_entry:
|
||||
gguf_writer.add_base_model_description(
|
||||
key, base_model_entry["description"]
|
||||
)
|
||||
if "url" in base_model_entry:
|
||||
gguf_writer.add_base_model_url(key, base_model_entry["url"])
|
||||
if "doi" in base_model_entry:
|
||||
|
@ -632,9 +831,33 @@ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter):
|
|||
key, base_model_entry["repo_url"]
|
||||
)
|
||||
|
||||
if self.datasets is not None:
|
||||
gguf_writer.add_dataset_count(len(self.datasets))
|
||||
for key, dataset_entry in enumerate(self.datasets):
|
||||
if "name" in dataset_entry:
|
||||
gguf_writer.add_dataset_name(key, dataset_entry["name"])
|
||||
if "author" in dataset_entry:
|
||||
gguf_writer.add_dataset_author(key, dataset_entry["author"])
|
||||
if "version" in dataset_entry:
|
||||
gguf_writer.add_dataset_version(key, dataset_entry["version"])
|
||||
if "organization" in dataset_entry:
|
||||
gguf_writer.add_dataset_organization(
|
||||
key, dataset_entry["organization"]
|
||||
)
|
||||
if "description" in dataset_entry:
|
||||
gguf_writer.add_dataset_description(
|
||||
key, dataset_entry["description"]
|
||||
)
|
||||
if "url" in dataset_entry:
|
||||
gguf_writer.add_dataset_url(key, dataset_entry["url"])
|
||||
if "doi" in dataset_entry:
|
||||
gguf_writer.add_dataset_doi(key, dataset_entry["doi"])
|
||||
if "uuid" in dataset_entry:
|
||||
gguf_writer.add_dataset_uuid(key, dataset_entry["uuid"])
|
||||
if "repo_url" in dataset_entry:
|
||||
gguf_writer.add_dataset_repo_url(key, dataset_entry["repo_url"])
|
||||
|
||||
if self.tags is not None:
|
||||
gguf_writer.add_tags(self.tags)
|
||||
if self.languages is not None:
|
||||
gguf_writer.add_languages(self.languages)
|
||||
if self.datasets is not None:
|
||||
gguf_writer.add_datasets(self.datasets)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,884 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import Sequence
|
||||
|
||||
from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
|
||||
|
||||
|
||||
class TensorNameMap:
|
||||
mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
|
||||
# Token embeddings
|
||||
MODEL_TENSOR.TOKEN_EMBD: (
|
||||
"gpt_neox.embed_in", # gptneox
|
||||
"transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone
|
||||
"transformer.word_embeddings", # falcon
|
||||
"word_embeddings", # bloom
|
||||
"model.embed_tokens", # llama-hf nemotron olmoe olmo2 rwkv6qwen2 glm4-0414
|
||||
"tok_embeddings", # llama-pth
|
||||
"embeddings.word_embeddings", # bert nomic-bert
|
||||
"language_model.embedding.word_embeddings", # persimmon
|
||||
"wte", # gpt2
|
||||
"transformer.embd.wte", # phi2
|
||||
"model.tok_embeddings", # internlm2
|
||||
"model.embedding", # mamba-qbert
|
||||
"backbone.embedding", # mamba
|
||||
"backbone.embeddings", # mamba-hf
|
||||
"transformer.in_out_embed", # Grok
|
||||
"embedding.word_embeddings", # chatglm
|
||||
"transformer.token_embeddings", # openelm
|
||||
"shared", # t5
|
||||
"rwkv.embeddings", # rwkv6
|
||||
"model.embeddings", # rwkv7
|
||||
"model.word_embeddings", # bailingmoe
|
||||
"language_model.model.embed_tokens", # llama4
|
||||
),
|
||||
# Token type embeddings
|
||||
MODEL_TENSOR.TOKEN_TYPES: (
|
||||
"embeddings.token_type_embeddings", # bert nomic-bert
|
||||
),
|
||||
# Normalization of token embeddings
|
||||
MODEL_TENSOR.TOKEN_EMBD_NORM: (
|
||||
"word_embeddings_layernorm", # bloom
|
||||
"embeddings.LayerNorm", # bert
|
||||
"emb_ln", # nomic-bert
|
||||
"transformer.norm", # openelm
|
||||
"rwkv.blocks.0.pre_ln", # rwkv
|
||||
"rwkv.blocks.0.pre_ln", # rwkv6
|
||||
"model.pre_ln", # rwkv7
|
||||
"model.layers.0.pre_norm", # rwkv7
|
||||
"backbone.norm", # wavtokenizer
|
||||
),
|
||||
# Position embeddings
|
||||
MODEL_TENSOR.POS_EMBD: (
|
||||
"transformer.wpe", # gpt2
|
||||
"embeddings.position_embeddings", # bert
|
||||
"wpe", # gpt2
|
||||
),
|
||||
# Output
|
||||
MODEL_TENSOR.OUTPUT: (
|
||||
"embed_out", # gptneox
|
||||
"lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 phimoe
|
||||
"output", # llama-pth bloom internlm2
|
||||
"word_embeddings_for_head", # persimmon
|
||||
"lm_head.linear", # phi2
|
||||
"output_layer", # chatglm
|
||||
"head", # rwkv
|
||||
"head.out", # wavtokenizer
|
||||
"lm_head", # llama4
|
||||
),
|
||||
# Output norm
|
||||
MODEL_TENSOR.OUTPUT_NORM: (
|
||||
"gpt_neox.final_layer_norm", # gptneox
|
||||
"transformer.ln_f", # gpt2 gpt-j falcon jais exaone
|
||||
"model.norm", # llama-hf baichuan internlm2 olmoe olmo2 phimoe
|
||||
"norm", # llama-pth
|
||||
"transformer.norm_f", # mpt dbrx
|
||||
"ln_f", # refact bloom qwen gpt2
|
||||
"language_model.encoder.final_layernorm", # persimmon
|
||||
"model.final_layernorm", # persimmon
|
||||
"lm_head.ln", # phi2
|
||||
"model.norm_f", # mamba-qbert
|
||||
"backbone.norm_f", # mamba
|
||||
"transformer.rms_norm", # Grok
|
||||
"encoder.final_layernorm", # chatglm
|
||||
"transformer.norm", # openelm
|
||||
"model.norm", # nemotron
|
||||
"rwkv.ln_out", # rwkv6
|
||||
"model.ln_out", # rwkv7
|
||||
"backbone.final_layer_norm", # wavtokenizer
|
||||
"model.norm", # llama4
|
||||
),
|
||||
# Rope frequencies
|
||||
MODEL_TENSOR.ROPE_FREQS: (
|
||||
"rope.freqs", # llama-pth
|
||||
"rotary_pos_emb.inv_freq", # chatglm
|
||||
),
|
||||
MODEL_TENSOR.ROPE_FACTORS_LONG: (),
|
||||
MODEL_TENSOR.ROPE_FACTORS_SHORT: (),
|
||||
MODEL_TENSOR.CONV1D: ("backbone.embed",), # roberta
|
||||
}
|
||||
|
||||
block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
|
||||
# Attention norm
|
||||
MODEL_TENSOR.ATTN_NORM: (
|
||||
"gpt_neox.layers.{bid}.input_layernorm", # gptneox
|
||||
"transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone
|
||||
"transformer.blocks.{bid}.norm_1", # mpt
|
||||
"transformer.h.{bid}.input_layernorm", # falcon7b
|
||||
"h.{bid}.input_layernorm", # bloom
|
||||
"transformer.h.{bid}.ln_mlp", # falcon40b
|
||||
"model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe phimoe
|
||||
"layers.{bid}.attention_norm", # llama-pth
|
||||
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
|
||||
"model.layers.{bid}.ln1", # yi
|
||||
"h.{bid}.ln_1", # gpt2
|
||||
"transformer.h.{bid}.ln", # phi2
|
||||
"model.layers.layers.{bid}.norm", # plamo
|
||||
"model.layers.{bid}.attention_norm", # internlm2
|
||||
"model.layers.{bid}.norm", # mamba-qbert
|
||||
"backbone.layers.{bid}.norm", # mamba
|
||||
"transformer.decoder_layer.{bid}.rms_norm", # Grok
|
||||
"transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
|
||||
"encoder.layers.{bid}.input_layernorm", # chatglm
|
||||
"transformer.layers.{bid}.attn_norm", # openelm
|
||||
"rwkv.blocks.{bid}.ln1", # rwkv6
|
||||
"model.layers.{bid}.ln1", # rwkv7
|
||||
"model.layers.{bid}.input_layernorm", # llama4
|
||||
),
|
||||
# Attention norm 2
|
||||
MODEL_TENSOR.ATTN_NORM_2: (
|
||||
"transformer.h.{bid}.ln_attn", # falcon40b
|
||||
"encoder.layer.{bid}.layer_norm_1", # jina-v2-code
|
||||
"rwkv.blocks.{bid}.ln2", # rwkv6
|
||||
"model.layers.{bid}.ln2", # rwkv7
|
||||
),
|
||||
# Attention query-key-value
|
||||
MODEL_TENSOR.ATTN_QKV: (
|
||||
"gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
|
||||
"transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
|
||||
"transformer.blocks.{bid}.attn.Wqkv", # mpt
|
||||
"transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
|
||||
"transformer.h.{bid}.self_attention.query_key_value", # falcon
|
||||
"h.{bid}.self_attention.query_key_value", # bloom
|
||||
"language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
|
||||
"model.layers.{bid}.self_attn.query_key_value", # persimmon
|
||||
"h.{bid}.attn.c_attn", # gpt2
|
||||
"transformer.h.{bid}.mixer.Wqkv", # phi2
|
||||
"encoder.layers.{bid}.attn.Wqkv", # nomic-bert
|
||||
"model.layers.{bid}.self_attn.qkv_proj", # phi3
|
||||
"encoder.layers.{bid}.self_attention.query_key_value", # chatglm
|
||||
"transformer.layers.{bid}.attn.qkv_proj", # openelm
|
||||
),
|
||||
# Attention query
|
||||
MODEL_TENSOR.ATTN_Q: (
|
||||
"model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 phimoe
|
||||
"model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom
|
||||
"layers.{bid}.attention.wq", # llama-pth
|
||||
"encoder.layer.{bid}.attention.self.query", # bert
|
||||
"transformer.h.{bid}.attn.q_proj", # gpt-j
|
||||
"model.layers.layers.{bid}.self_attn.q_proj", # plamo
|
||||
"model.layers.{bid}.attention.wq", # internlm2
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.query", # Grok
|
||||
"transformer.h.{bid}.attn.attention.q_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.q_proj", # llama4
|
||||
),
|
||||
# Attention key
|
||||
MODEL_TENSOR.ATTN_K: (
|
||||
"model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 phimoe
|
||||
"model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom
|
||||
"layers.{bid}.attention.wk", # llama-pth
|
||||
"encoder.layer.{bid}.attention.self.key", # bert
|
||||
"transformer.h.{bid}.attn.k_proj", # gpt-j
|
||||
"transformer.h.{bid}.attn.k", # refact
|
||||
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
|
||||
"model.layers.{bid}.attention.wk", # internlm2
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.key", # Grok
|
||||
"transformer.h.{bid}.attn.attention.k_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.k_proj", # llama4
|
||||
),
|
||||
# Attention value
|
||||
MODEL_TENSOR.ATTN_V: (
|
||||
"model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe
|
||||
"layers.{bid}.attention.wv", # llama-pth
|
||||
"encoder.layer.{bid}.attention.self.value", # bert
|
||||
"transformer.h.{bid}.attn.v_proj", # gpt-j
|
||||
"transformer.h.{bid}.attn.v", # refact
|
||||
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
|
||||
"model.layers.{bid}.attention.wv", # internlm2
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.value", # Grok
|
||||
"transformer.h.{bid}.attn.attention.v_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.v_proj", # llama4
|
||||
),
|
||||
# Attention output
|
||||
MODEL_TENSOR.ATTN_OUT: (
|
||||
"gpt_neox.layers.{bid}.attention.dense", # gptneox
|
||||
"transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
|
||||
"transformer.blocks.{bid}.attn.out_proj", # mpt
|
||||
"transformer.h.{bid}.self_attention.dense", # falcon
|
||||
"h.{bid}.self_attention.dense", # bloom
|
||||
"model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 phimoe
|
||||
"model.layers.{bid}.self_attn.linear_attn", # deci
|
||||
"layers.{bid}.attention.wo", # llama-pth
|
||||
"encoder.layer.{bid}.attention.output.dense", # bert
|
||||
"transformer.h.{bid}.attn.out_proj", # gpt-j
|
||||
"language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
|
||||
"model.layers.{bid}.self_attn.dense", # persimmon
|
||||
"h.{bid}.attn.c_proj", # gpt2
|
||||
"transformer.h.{bid}.mixer.out_proj", # phi2
|
||||
"model.layers.layers.{bid}.self_attn.o_proj", # plamo
|
||||
"model.layers.{bid}.attention.wo", # internlm2
|
||||
"encoder.layers.{bid}.attn.out_proj", # nomic-bert
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
|
||||
"transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
|
||||
"encoder.layers.{bid}.self_attention.dense", # chatglm
|
||||
"transformer.layers.{bid}.attn.out_proj", # openelm
|
||||
"transformer.h.{bid}.attn.attention.out_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.o_proj", # llama4
|
||||
),
|
||||
# Attention output norm
|
||||
MODEL_TENSOR.ATTN_OUT_NORM: (
|
||||
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
||||
"encoder.layers.{bid}.norm1", # nomic-bert
|
||||
"transformer.decoder_layer.{bid}.rms_norm_1", # Grok
|
||||
"transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
|
||||
),
|
||||
MODEL_TENSOR.ATTN_POST_NORM: (
|
||||
"model.layers.{bid}.post_attention_layernorm", # gemma2 olmo2 # ge
|
||||
"model.layers.{bid}.post_self_attn_layernorm", # glm-4-0414
|
||||
),
|
||||
# Rotary embeddings
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD: (
|
||||
"model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
|
||||
"layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
|
||||
"model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
|
||||
"transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
|
||||
),
|
||||
# Feed-forward norm
|
||||
MODEL_TENSOR.FFN_NORM: (
|
||||
"gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
|
||||
"transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone
|
||||
"h.{bid}.post_attention_layernorm", # bloom
|
||||
"transformer.blocks.{bid}.norm_2", # mpt
|
||||
"model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe phimoe
|
||||
"layers.{bid}.ffn_norm", # llama-pth
|
||||
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
|
||||
"model.layers.{bid}.ln2", # yi
|
||||
"h.{bid}.ln_2", # gpt2
|
||||
"model.layers.{bid}.ffn_norm", # internlm2
|
||||
"transformer.decoder_layer.{bid}.rms_norm_2", # Grok
|
||||
"encoder.layers.{bid}.post_attention_layernorm", # chatglm
|
||||
"transformer.layers.{bid}.ffn_norm", # openelm
|
||||
"model.layers.{bid}.post_attention_layernorm", # llama4
|
||||
),
|
||||
# Post feed-forward norm
|
||||
MODEL_TENSOR.FFN_PRE_NORM: (
|
||||
"model.layers.{bid}.pre_feedforward_layernorm", # gemma2
|
||||
),
|
||||
# Post feed-forward norm
|
||||
MODEL_TENSOR.FFN_POST_NORM: (
|
||||
"model.layers.{bid}.post_feedforward_layernorm", # gemma2 olmo2
|
||||
"model.layers.{bid}.post_mlp_layernorm", # glm-4-0414
|
||||
),
|
||||
MODEL_TENSOR.FFN_GATE_INP: (
|
||||
"layers.{bid}.feed_forward.gate", # mixtral
|
||||
"model.layers.{bid}.block_sparse_moe.gate", # mixtral phimoe
|
||||
"model.layers.{bid}.mlp.gate", # qwen2moe olmoe
|
||||
"transformer.decoder_layer.{bid}.router", # Grok
|
||||
"transformer.blocks.{bid}.ffn.router.layer", # dbrx
|
||||
"model.layers.{bid}.block_sparse_moe.router.layer", # granitemoe
|
||||
"model.layers.{bid}.feed_forward.router", # llama4
|
||||
"encoder.layers.{bid}.mlp.router.layer", # nomic-bert-moe
|
||||
),
|
||||
MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
|
||||
),
|
||||
MODEL_TENSOR.FFN_EXP_PROBS_B: (
|
||||
"model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3
|
||||
),
|
||||
# Feed-forward up
|
||||
MODEL_TENSOR.FFN_UP: (
|
||||
"gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
|
||||
"transformer.h.{bid}.mlp.c_fc", # gpt2 jais
|
||||
"transformer.blocks.{bid}.ffn.up_proj", # mpt
|
||||
"transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
|
||||
"h.{bid}.mlp.dense_h_to_4h", # bloom
|
||||
"model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron olmo2
|
||||
"layers.{bid}.feed_forward.w3", # llama-pth
|
||||
"encoder.layer.{bid}.intermediate.dense", # bert
|
||||
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
||||
"transformer.h.{bid}.mlp.linear_3", # refact
|
||||
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
||||
"model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
||||
"transformer.h.{bid}.mlp.w1", # qwen
|
||||
"h.{bid}.mlp.c_fc", # gpt2
|
||||
"transformer.h.{bid}.mlp.fc1", # phi2
|
||||
"model.layers.{bid}.mlp.fc1", # phi2
|
||||
"model.layers.{bid}.mlp.gate_up_proj", # phi3 glm-4-0414
|
||||
"model.layers.layers.{bid}.mlp.up_proj", # plamo
|
||||
"model.layers.{bid}.feed_forward.w3", # internlm2
|
||||
"encoder.layers.{bid}.mlp.fc11", # nomic-bert
|
||||
"encoder.layers.{bid}.mlp.fc1", # nomic-bert-moe
|
||||
"model.layers.{bid}.mlp.c_fc", # starcoder2
|
||||
"encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
|
||||
"model.layers.{bid}.residual_mlp.w3", # arctic
|
||||
"encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
|
||||
"transformer.h.{bid}.mlp.c_fc_1", # exaone
|
||||
"model.layers.{bid}.feed_forward.up_proj", # llama4
|
||||
),
|
||||
MODEL_TENSOR.FFN_UP_EXP: (
|
||||
"layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
|
||||
"transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
|
||||
"model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged)
|
||||
"model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged)
|
||||
"model.layers.{bid}.feed_forward.experts.up_proj", # llama4
|
||||
"encoder.layers.{bid}.mlp.experts.mlp.w1", # nomic-bert-moe
|
||||
),
|
||||
MODEL_TENSOR.FFN_UP_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
|
||||
"model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek deepseek2
|
||||
"model.layers.{bid}.feed_forward.shared_expert.up_proj", # llama4
|
||||
),
|
||||
# AWQ-activation gate
|
||||
MODEL_TENSOR.FFN_ACT: ("transformer.blocks.{bid}.ffn.act",), # mpt
|
||||
# Feed-forward gate
|
||||
MODEL_TENSOR.FFN_GATE: (
|
||||
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact olmo2
|
||||
"layers.{bid}.feed_forward.w1", # llama-pth
|
||||
"transformer.h.{bid}.mlp.w2", # qwen
|
||||
"transformer.h.{bid}.mlp.c_fc2", # jais
|
||||
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
|
||||
"model.layers.{bid}.feed_forward.w1", # internlm2
|
||||
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
|
||||
"encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
|
||||
"transformer.h.{bid}.mlp.linear_1", # refact
|
||||
"model.layers.{bid}.residual_mlp.w1", # arctic
|
||||
"transformer.h.{bid}.mlp.c_fc_0", # exaone
|
||||
"model.layers.{bid}.feed_forward.gate_proj", # llama4
|
||||
),
|
||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||
"layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
|
||||
"transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
|
||||
"model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged)
|
||||
"model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged)
|
||||
"model.layers.{bid}.feed_forward.experts.gate_proj", # llama4
|
||||
),
|
||||
MODEL_TENSOR.FFN_GATE_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
|
||||
"model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek deepseek2
|
||||
"model.layers.{bid}.feed_forward.shared_expert.gate_proj", # llama4
|
||||
),
|
||||
# Feed-forward down
|
||||
MODEL_TENSOR.FFN_DOWN: (
|
||||
"gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
|
||||
"transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
|
||||
"transformer.blocks.{bid}.ffn.down_proj", # mpt
|
||||
"transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
|
||||
"h.{bid}.mlp.dense_4h_to_h", # bloom
|
||||
"model.layers.{bid}.mlp.down_proj", # llama-hf nemotron olmo2
|
||||
"layers.{bid}.feed_forward.w2", # llama-pth
|
||||
"encoder.layer.{bid}.output.dense", # bert
|
||||
"transformer.h.{bid}.mlp.fc_out", # gpt-j
|
||||
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
|
||||
"model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
|
||||
"h.{bid}.mlp.c_proj", # gpt2
|
||||
"transformer.h.{bid}.mlp.fc2", # phi2
|
||||
"model.layers.{bid}.mlp.fc2", # phi2
|
||||
"model.layers.layers.{bid}.mlp.down_proj", # plamo
|
||||
"model.layers.{bid}.feed_forward.w2", # internlm2
|
||||
"encoder.layers.{bid}.mlp.fc2", # nomic-bert
|
||||
"model.layers.{bid}.mlp.c_proj", # starcoder2
|
||||
"encoder.layer.{bid}.mlp.wo", # jina-bert-v2
|
||||
"transformer.layers.{bid}.ffn.proj_2", # openelm
|
||||
"model.layers.{bid}.residual_mlp.w2", # arctic
|
||||
"encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
|
||||
"encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
|
||||
"model.layers.h.{bid}.mlp.c_proj", # exaone
|
||||
"model.layers.{bid}.feed_forward.down_proj", # llama4
|
||||
),
|
||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||
"layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
|
||||
"transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
|
||||
"transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
|
||||
"model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged)
|
||||
"model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe
|
||||
"model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged)
|
||||
"model.layers.{bid}.feed_forward.experts.down_proj", # llama4
|
||||
"encoder.layers.{bid}.mlp.experts.mlp.w2", # nomic-bert-moe
|
||||
),
|
||||
MODEL_TENSOR.FFN_DOWN_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
|
||||
"model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek deepseek2
|
||||
"model.layers.{bid}.feed_forward.shared_expert.down_proj", # llama4
|
||||
"model.layers.{bid}.shared_mlp.output_linear", # granitemoe
|
||||
),
|
||||
MODEL_TENSOR.ATTN_Q_NORM: (
|
||||
"language_model.encoder.layers.{bid}.self_attention.q_layernorm",
|
||||
"model.layers.{bid}.self_attn.q_layernorm", # persimmon
|
||||
"model.layers.{bid}.self_attn.q_norm", # cohere olmoe chameleon olmo2
|
||||
"transformer.blocks.{bid}.attn.q_ln", # sea-lion
|
||||
"encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
|
||||
"transformer.layers.{bid}.attn.q_norm", # openelm
|
||||
),
|
||||
MODEL_TENSOR.ATTN_K_NORM: (
|
||||
"language_model.encoder.layers.{bid}.self_attention.k_layernorm",
|
||||
"model.layers.{bid}.self_attn.k_layernorm", # persimmon
|
||||
"model.layers.{bid}.self_attn.k_norm", # cohere olmoe chameleon olmo2
|
||||
"transformer.blocks.{bid}.attn.k_ln", # sea-lion
|
||||
"encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
|
||||
"transformer.layers.{bid}.attn.k_norm", # openelm
|
||||
),
|
||||
MODEL_TENSOR.ROPE_FREQS: (
|
||||
"language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
|
||||
),
|
||||
MODEL_TENSOR.LAYER_OUT_NORM: (
|
||||
"encoder.layer.{bid}.output.LayerNorm", # bert
|
||||
"encoder.layers.{bid}.norm2", # nomic-bert
|
||||
"transformer.decoder_layer.{bid}.rms_norm_3", # Grok
|
||||
"encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
|
||||
"encoder.layer.{bid}.layer_norm_2", # jina-v2-code
|
||||
),
|
||||
MODEL_TENSOR.SSM_IN: (
|
||||
"model.layers.{bid}.in_proj",
|
||||
"backbone.layers.{bid}.mixer.in_proj",
|
||||
),
|
||||
MODEL_TENSOR.SSM_CONV1D: (
|
||||
"model.layers.{bid}.conv1d",
|
||||
"backbone.layers.{bid}.mixer.conv1d",
|
||||
),
|
||||
MODEL_TENSOR.SSM_X: (
|
||||
"model.layers.{bid}.x_proj",
|
||||
"backbone.layers.{bid}.mixer.x_proj",
|
||||
),
|
||||
MODEL_TENSOR.SSM_DT: (
|
||||
"model.layers.{bid}.dt_proj",
|
||||
"backbone.layers.{bid}.mixer.dt_proj",
|
||||
),
|
||||
MODEL_TENSOR.SSM_A: (
|
||||
"model.layers.{bid}.A_log",
|
||||
"backbone.layers.{bid}.mixer.A_log",
|
||||
),
|
||||
MODEL_TENSOR.SSM_D: (
|
||||
"model.layers.{bid}.D",
|
||||
"backbone.layers.{bid}.mixer.D",
|
||||
),
|
||||
MODEL_TENSOR.SSM_OUT: (
|
||||
"model.layers.{bid}.out_proj",
|
||||
"backbone.layers.{bid}.mixer.out_proj",
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_W0: ("model.layers.{bid}.attention.w0",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_W1: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2
|
||||
"model.layers.{bid}.attention.w1", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_W2: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2
|
||||
"model.layers.{bid}.attention.w2", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_A0: ("model.layers.{bid}.attention.a0",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_A1: ("model.layers.{bid}.attention.a1",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_A2: ("model.layers.{bid}.attention.a2",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_V0: ("model.layers.{bid}.attention.v0",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_V1: ("model.layers.{bid}.attention.v1",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_V2: ("model.layers.{bid}.attention.v2",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_G1: ("model.layers.{bid}.attention.g1",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_G2: ("model.layers.{bid}.attention.g2",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_K_K: ("model.layers.{bid}.attention.k_k",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_K_A: ("model.layers.{bid}.attention.k_a",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_R_K: ("model.layers.{bid}.attention.r_k",), # rwkv7
|
||||
MODEL_TENSOR.TIME_MIX_LERP_X: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_x", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_x", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_LERP_K: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_k", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_k", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_LERP_V: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_v", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_v", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_LERP_R: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_r", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_r", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_LERP_G: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_g", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_g", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_LERP_W: (
|
||||
"rwkv.blocks.{bid}.attention.time_maa_w", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_maa_w", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_FIRST: (
|
||||
"rwkv.blocks.{bid}.attention.time_faaaa", # rwkv6
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_DECAY: (
|
||||
"rwkv.blocks.{bid}.attention.time_decay", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_decay", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_DECAY_W1: (
|
||||
"rwkv.blocks.{bid}.attention.time_decay_w1", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_decay_w1", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_DECAY_W2: (
|
||||
"rwkv.blocks.{bid}.attention.time_decay_w2", # rwkv6
|
||||
"model.layers.{bid}.self_attn.time_decay_w2", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_KEY: (
|
||||
"rwkv.blocks.{bid}.attention.key", # rwkv6
|
||||
"model.layers.{bid}.self_attn.k_proj", # rwkv6qwen2
|
||||
"model.layers.{bid}.attention.key", # rwkv7
|
||||
"model.layers.{bid}.attention.k_proj", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_VALUE: (
|
||||
"rwkv.blocks.{bid}.attention.value", # rwkv6
|
||||
"model.layers.{bid}.self_attn.v_proj", # rwkv6qwen2
|
||||
"model.layers.{bid}.attention.value", # rwkv7
|
||||
"model.layers.{bid}.attention.v_proj", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_RECEPTANCE: (
|
||||
"rwkv.blocks.{bid}.attention.receptance", # rwkv6
|
||||
"model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2
|
||||
"model.layers.{bid}.attention.receptance", # rwkv7
|
||||
"model.layers.{bid}.attention.r_proj", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_GATE: (
|
||||
"rwkv.blocks.{bid}.attention.gate", # rwkv6
|
||||
"model.layers.{bid}.self_attn.gate", # rwkv6qwen2
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_LN: (
|
||||
"rwkv.blocks.{bid}.attention.ln_x", # rwkv6
|
||||
"model.layers.{bid}.attention.ln_x", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.TIME_MIX_OUTPUT: (
|
||||
"rwkv.blocks.{bid}.attention.output", # rwkv6
|
||||
"model.layers.{bid}.self_attn.o_proj", # rwkv6qwen2
|
||||
"model.layers.{bid}.attention.output", # rwkv7
|
||||
"model.layers.{bid}.attention.o_proj", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.CHANNEL_MIX_LERP_K: (
|
||||
"rwkv.blocks.{bid}.feed_forward.time_maa_k", # rwkv6
|
||||
"model.layers.{bid}.feed_forward.x_k", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.CHANNEL_MIX_LERP_R: (
|
||||
"rwkv.blocks.{bid}.feed_forward.time_maa_r", # rwkv6
|
||||
),
|
||||
MODEL_TENSOR.CHANNEL_MIX_KEY: (
|
||||
"rwkv.blocks.{bid}.feed_forward.key", # rwkv6
|
||||
"model.layers.{bid}.feed_forward.key", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.CHANNEL_MIX_RECEPTANCE: (
|
||||
"rwkv.blocks.{bid}.feed_forward.receptance", # rwkv6
|
||||
),
|
||||
MODEL_TENSOR.CHANNEL_MIX_VALUE: (
|
||||
"rwkv.blocks.{bid}.feed_forward.value", # rwkv6
|
||||
"model.layers.{bid}.feed_forward.value", # rwkv7
|
||||
),
|
||||
MODEL_TENSOR.ATTN_Q_A: ("model.layers.{bid}.self_attn.q_a_proj",), # deepseek2
|
||||
MODEL_TENSOR.ATTN_Q_B: ("model.layers.{bid}.self_attn.q_b_proj",), # deepseek2
|
||||
MODEL_TENSOR.ATTN_KV_A_MQA: (
|
||||
"model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
|
||||
),
|
||||
MODEL_TENSOR.ATTN_KV_B: (
|
||||
"model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
|
||||
),
|
||||
MODEL_TENSOR.ATTN_K_B: ("model.layers.{bid}.self_attn.k_b_proj",), # deepseek2
|
||||
MODEL_TENSOR.ATTN_V_B: ("model.layers.{bid}.self_attn.v_b_proj",), # deepseek2
|
||||
MODEL_TENSOR.ATTN_Q_A_NORM: (
|
||||
"model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
|
||||
),
|
||||
MODEL_TENSOR.ATTN_KV_A_NORM: (
|
||||
"model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
|
||||
),
|
||||
MODEL_TENSOR.ATTN_SUB_NORM: (
|
||||
"model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
|
||||
),
|
||||
MODEL_TENSOR.FFN_SUB_NORM: ("model.layers.{bid}.mlp.ffn_layernorm",), # bitnet
|
||||
MODEL_TENSOR.DEC_ATTN_NORM: ("decoder.block.{bid}.layer.0.layer_norm",), # t5
|
||||
MODEL_TENSOR.DEC_ATTN_Q: ("decoder.block.{bid}.layer.0.SelfAttention.q",), # t5
|
||||
MODEL_TENSOR.DEC_ATTN_K: ("decoder.block.{bid}.layer.0.SelfAttention.k",), # t5
|
||||
MODEL_TENSOR.DEC_ATTN_V: ("decoder.block.{bid}.layer.0.SelfAttention.v",), # t5
|
||||
MODEL_TENSOR.DEC_ATTN_OUT: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.o", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_ATTN_REL_B: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
|
||||
"decoder.block.{bid}.layer.1.layer_norm", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_K: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_V: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_FFN_NORM: ("decoder.block.{bid}.layer.2.layer_norm",), # t5
|
||||
MODEL_TENSOR.DEC_FFN_GATE: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_FFN_UP: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_FFN_DOWN: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
|
||||
),
|
||||
MODEL_TENSOR.DEC_OUTPUT_NORM: ("decoder.final_layer_norm",), # t5
|
||||
MODEL_TENSOR.ENC_ATTN_NORM: ("encoder.block.{bid}.layer.0.layer_norm",), # t5
|
||||
MODEL_TENSOR.ENC_ATTN_Q: ("encoder.block.{bid}.layer.0.SelfAttention.q",), # t5
|
||||
MODEL_TENSOR.ENC_ATTN_K: ("encoder.block.{bid}.layer.0.SelfAttention.k",), # t5
|
||||
MODEL_TENSOR.ENC_ATTN_V: ("encoder.block.{bid}.layer.0.SelfAttention.v",), # t5
|
||||
MODEL_TENSOR.ENC_ATTN_OUT: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.o", # t5
|
||||
),
|
||||
MODEL_TENSOR.ENC_ATTN_REL_B: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
|
||||
),
|
||||
MODEL_TENSOR.ENC_FFN_NORM: ("encoder.block.{bid}.layer.1.layer_norm",), # t5
|
||||
MODEL_TENSOR.ENC_FFN_GATE: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
|
||||
),
|
||||
MODEL_TENSOR.ENC_FFN_UP: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
|
||||
),
|
||||
MODEL_TENSOR.ENC_FFN_DOWN: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
|
||||
),
|
||||
############################################################################
|
||||
# TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg
|
||||
MODEL_TENSOR.ENC_OUTPUT_NORM: ("encoder.final_layer_norm",), # t5
|
||||
MODEL_TENSOR.CLS: (
|
||||
"classifier", # jina
|
||||
"classifier.dense", # roberta
|
||||
),
|
||||
MODEL_TENSOR.CLS_OUT: ("classifier.out_proj",), # roberta
|
||||
#############################################################################
|
||||
MODEL_TENSOR.CONVNEXT_DW: ("backbone.convnext.{bid}.dwconv",), # wavtokenizer
|
||||
MODEL_TENSOR.CONVNEXT_NORM: ("backbone.convnext.{bid}.norm",), # wavtokenizer
|
||||
MODEL_TENSOR.CONVNEXT_PW1: ("backbone.convnext.{bid}.pwconv1",), # wavtokenizer
|
||||
MODEL_TENSOR.CONVNEXT_PW2: ("backbone.convnext.{bid}.pwconv2",), # wavtokenizer
|
||||
MODEL_TENSOR.CONVNEXT_GAMMA: ("backbone.convnext.{bid}.gamma",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_CONV1: ("backbone.posnet.{bid}.conv1",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_CONV2: ("backbone.posnet.{bid}.conv2",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_NORM: ("backbone.posnet.{bid}.norm",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_NORM1: ("backbone.posnet.{bid}.norm1",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_NORM2: ("backbone.posnet.{bid}.norm2",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_ATTN_NORM: ("backbone.posnet.{bid}.norm",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_ATTN_Q: ("backbone.posnet.{bid}.q",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_ATTN_K: ("backbone.posnet.{bid}.k",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_ATTN_V: ("backbone.posnet.{bid}.v",), # wavtokenizer
|
||||
MODEL_TENSOR.POSNET_ATTN_OUT: (
|
||||
"backbone.posnet.{bid}.proj_out", # wavtokenizer
|
||||
),
|
||||
#############################################################################
|
||||
## Vision encoder
|
||||
MODEL_TENSOR.V_MMPROJ: (
|
||||
"multi_modal_projector.linear_{bid}",
|
||||
"visual.merger.mlp.{bid}", # qwen2vl
|
||||
),
|
||||
MODEL_TENSOR.V_MMPROJ_FC: (
|
||||
"model.connector.modality_projection.proj", # SmolVLM
|
||||
),
|
||||
MODEL_TENSOR.V_MMPROJ_MLP: (
|
||||
"model.mm_projector.mlp.mlp.{bid}",
|
||||
"mlp1.{bid}", # InternVL
|
||||
),
|
||||
MODEL_TENSOR.V_MMPROJ_PEG: ("model.mm_projector.peg.peg.{bid}",),
|
||||
MODEL_TENSOR.V_ENC_EMBD_CLS: (
|
||||
"vision_tower.vision_model.embeddings.class_embedding",
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_EMBD_PATCH: (
|
||||
"vision_tower.vision_model.embeddings.patch_embedding",
|
||||
"vpm.embeddings.patch_embedding",
|
||||
"model.vision_model.embeddings.patch_embedding", # SmolVLM
|
||||
"vision_tower.patch_conv", # pixtral
|
||||
"visual.patch_embed.proj", # qwen2vl
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_EMBD_POS: (
|
||||
"vision_tower.vision_model.embeddings.position_embedding",
|
||||
"vpm.embeddings.position_embedding",
|
||||
"model.vision_model.embeddings.position_embedding", # SmolVLM
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_ATTN_Q: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.q_proj",
|
||||
"vpm.encoder.layers.{bid}.self_attn.q_proj",
|
||||
"model.vision_model.encoder.layers.{bid}.self_attn.q_proj", # SmolVLM
|
||||
"vision_tower.transformer.layers.{bid}.attention.q_proj", # pixtral
|
||||
"visual.blocks.{bid}.attn.q", # qwen2vl, generated
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_ATTN_Q_NORM: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.attn.q_norm", # InternVL
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_ATTN_K: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.k_proj",
|
||||
"vpm.encoder.layers.{bid}.self_attn.k_proj",
|
||||
"model.vision_model.encoder.layers.{bid}.self_attn.k_proj", # SmolVLM
|
||||
"vision_tower.transformer.layers.{bid}.attention.k_proj", # pixtral
|
||||
"visual.blocks.{bid}.attn.k", # qwen2vl, generated
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_ATTN_K_NORM: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.attn.k_norm", # InternVL
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_ATTN_V: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.v_proj",
|
||||
"vpm.encoder.layers.{bid}.self_attn.v_proj",
|
||||
"model.vision_model.encoder.layers.{bid}.self_attn.v_proj", # SmolVLM
|
||||
"vision_tower.transformer.layers.{bid}.attention.v_proj", # pixtral
|
||||
"visual.blocks.{bid}.attn.v", # qwen2vl, generated
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_INPUT_NORM: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.layer_norm1",
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.norm1", # InternVL
|
||||
"vpm.encoder.layers.{bid}.layer_norm1",
|
||||
"model.vision_model.encoder.layers.{bid}.layer_norm1", # SmolVLM
|
||||
"vision_tower.transformer.layers.{bid}.attention_norm", # pixtral
|
||||
"visual.blocks.{bid}.norm1", # qwen2vl
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_OUTPUT: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.out_proj",
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.attn.proj", # InternVL
|
||||
"vpm.encoder.layers.{bid}.self_attn.out_proj",
|
||||
"model.vision_model.encoder.layers.{bid}.self_attn.out_proj", # SmolVLM
|
||||
"vision_tower.transformer.layers.{bid}.attention.o_proj", # pixtral
|
||||
"visual.blocks.{bid}.attn.proj", # qwen2vl
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_OUTPUT_NORM: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.layer_norm2",
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.norm2", # InternVL
|
||||
"vpm.encoder.layers.{bid}.layer_norm2",
|
||||
"model.vision_model.encoder.layers.{bid}.layer_norm2", # SmolVLM
|
||||
"vision_tower.transformer.layers.{bid}.ffn_norm", # pixtral
|
||||
"visual.blocks.{bid}.norm2", # qwen2vl
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_FFN_UP: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.mlp.fc1",
|
||||
"vpm.encoder.layers.{bid}.mlp.fc1",
|
||||
"model.vision_model.encoder.layers.{bid}.mlp.fc1", # SmolVLM, gemma3
|
||||
"vision_tower.transformer.layers.{bid}.feed_forward.up_proj", # pixtral
|
||||
"visual.blocks.{bid}.mlp.fc1", # qwen2vl
|
||||
"visual.blocks.{bid}.mlp.up_proj", # qwen2.5vl
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_FFN_GATE: (
|
||||
"vision_tower.transformer.layers.{bid}.feed_forward.gate_proj", # pixtral
|
||||
"visual.blocks.{bid}.mlp.gate_proj", # qwen2.5vl
|
||||
),
|
||||
MODEL_TENSOR.V_ENC_FFN_DOWN: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.mlp.fc2",
|
||||
"vpm.encoder.layers.{bid}.mlp.fc2",
|
||||
"model.vision_model.encoder.layers.{bid}.mlp.fc2", # SmolVLM, gemma3
|
||||
"vision_tower.transformer.layers.{bid}.feed_forward.down_proj", # pixtral
|
||||
"visual.blocks.{bid}.mlp.fc2", # qwen2vl
|
||||
"visual.blocks.{bid}.mlp.down_proj", # qwen2.5vl
|
||||
),
|
||||
MODEL_TENSOR.V_LAYER_SCALE_1: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.ls1", # InternVL
|
||||
),
|
||||
MODEL_TENSOR.V_LAYER_SCALE_2: (
|
||||
"vision_tower.vision_model.encoder.layers.{bid}.ls2", # InternVL
|
||||
),
|
||||
MODEL_TENSOR.V_PRE_NORM: (
|
||||
"vision_tower.vision_model.pre_layrnorm",
|
||||
"vision_tower.ln_pre", # pixtral
|
||||
),
|
||||
MODEL_TENSOR.V_POST_NORM: (
|
||||
"vision_tower.vision_model.post_layernorm",
|
||||
"model.vision_model.post_layernorm", # SmolVLM
|
||||
"visual.merger.ln_q", # qwen2vl
|
||||
),
|
||||
MODEL_TENSOR.V_MM_INP_PROJ: ("multi_modal_projector.mm_input_projection",),
|
||||
MODEL_TENSOR.V_MM_INP_NORM: ("multi_modal_projector.norm",),
|
||||
MODEL_TENSOR.V_MM_SOFT_EMB_NORM: ("multi_modal_projector.mm_soft_emb_norm",),
|
||||
MODEL_TENSOR.V_RESMPL_POS_EMBD_K: ("resampler.pos_embed_k",),
|
||||
MODEL_TENSOR.V_RESMPL_ATTN_Q: (
|
||||
"resampler.attn.in_proj_q", # tensor generated from resampler.attn.in_proj
|
||||
),
|
||||
MODEL_TENSOR.V_RESMPL_ATTN_K: (
|
||||
"resampler.attn.in_proj_k", # tensor generated from resampler.attn.in_proj
|
||||
),
|
||||
MODEL_TENSOR.V_RESMPL_ATTN_V: (
|
||||
"resampler.attn.in_proj_v", # tensor generated from resampler.attn.in_proj
|
||||
),
|
||||
MODEL_TENSOR.V_RESMPL_ATTN_OUT: ("resampler.attn.out_proj",),
|
||||
MODEL_TENSOR.V_RESMPL_KV: ("resampler.kv_proj",),
|
||||
MODEL_TENSOR.V_RESMPL_POST_NORM: ("resampler.ln_post",),
|
||||
MODEL_TENSOR.V_RESMPL_KV_NORM: ("resampler.ln_kv",),
|
||||
MODEL_TENSOR.V_RESMPL_Q_NORM: ("resampler.ln_q",),
|
||||
MODEL_TENSOR.V_RESMPL_PROJ: ("resampler.proj",),
|
||||
MODEL_TENSOR.V_RESMPL_QUERY: ("resampler.query",),
|
||||
MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK: (
|
||||
"v.token_embd.img_break", # for pixtral, this is a generated vector
|
||||
),
|
||||
MODEL_TENSOR.V_MM_PATCH_MERGER: (
|
||||
"multi_modal_projector.patch_merger.merging_layer", # mistral small 3.1
|
||||
),
|
||||
}
|
||||
|
||||
# architecture-specific block mappings
|
||||
arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
|
||||
MODEL_ARCH.ARCTIC: {
|
||||
MODEL_TENSOR.FFN_NORM: ("model.layers.{bid}.residual_layernorm",),
|
||||
MODEL_TENSOR.FFN_NORM_EXP: ("model.layers.{bid}.post_attention_layernorm",),
|
||||
},
|
||||
}
|
||||
|
||||
mapping: dict[str, tuple[MODEL_TENSOR, str]]
|
||||
|
||||
def __init__(self, arch: MODEL_ARCH, n_blocks: int):
|
||||
self.mapping = {}
|
||||
for tensor, keys in self.mappings_cfg.items():
|
||||
if tensor not in MODEL_TENSORS[arch]:
|
||||
continue
|
||||
tensor_name = TENSOR_NAMES[tensor]
|
||||
self.mapping[tensor_name] = (tensor, tensor_name)
|
||||
for key in keys:
|
||||
self.mapping[key] = (tensor, tensor_name)
|
||||
if arch in self.arch_block_mappings_cfg:
|
||||
self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
|
||||
for bid in range(n_blocks):
|
||||
for tensor, keys in self.block_mappings_cfg.items():
|
||||
if tensor not in MODEL_TENSORS[arch]:
|
||||
continue
|
||||
|
||||
tensor_name = TENSOR_NAMES[tensor].format(bid=bid)
|
||||
self.mapping[tensor_name] = (tensor, tensor_name)
|
||||
for key in keys:
|
||||
key = key.format(bid=bid)
|
||||
self.mapping[key] = (tensor, tensor_name)
|
||||
|
||||
def get_type_and_name(
|
||||
self, key: str, try_suffixes: Sequence[str] = ()
|
||||
) -> tuple[MODEL_TENSOR, str] | None:
|
||||
result = self.mapping.get(key)
|
||||
if result is not None:
|
||||
return result
|
||||
for suffix in try_suffixes:
|
||||
if key.endswith(suffix):
|
||||
result = self.mapping.get(key[: -len(suffix)])
|
||||
if result is not None:
|
||||
return result[0], result[1] + suffix
|
||||
return None
|
||||
|
||||
def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
|
||||
result = self.get_type_and_name(key, try_suffixes=try_suffixes)
|
||||
if result is None:
|
||||
return None
|
||||
return result[1]
|
||||
|
||||
def get_type(
|
||||
self, key: str, try_suffixes: Sequence[str] = ()
|
||||
) -> MODEL_TENSOR | None:
|
||||
result = self.get_type_and_name(key, try_suffixes=try_suffixes)
|
||||
if result is None:
|
||||
return None
|
||||
return result[0]
|
||||
|
||||
def __getitem__(self, key: str) -> str:
|
||||
try:
|
||||
return self.mapping[key][1]
|
||||
except KeyError:
|
||||
raise KeyError(key)
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
return key in self.mapping
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(self.mapping)
|
||||
|
||||
|
||||
def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
|
||||
return TensorNameMap(arch, n_blocks)
|
|
@ -0,0 +1,316 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
|
||||
def fill_templated_filename(filename: str, output_type: str | None) -> str:
|
||||
# Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf'
|
||||
ftype_lowercase: str = output_type.lower() if output_type is not None else ""
|
||||
ftype_uppercase: str = output_type.upper() if output_type is not None else ""
|
||||
return filename.format(
|
||||
ftype_lowercase,
|
||||
outtype=ftype_lowercase,
|
||||
ftype=ftype_lowercase,
|
||||
OUTTYPE=ftype_uppercase,
|
||||
FTYPE=ftype_uppercase,
|
||||
)
|
||||
|
||||
|
||||
def model_weight_count_rounded_notation(
|
||||
model_params_count: int, min_digits: int = 2
|
||||
) -> str:
|
||||
if model_params_count > 1e12:
|
||||
# Trillions Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-12
|
||||
scale_suffix = "T"
|
||||
elif model_params_count > 1e9:
|
||||
# Billions Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-9
|
||||
scale_suffix = "B"
|
||||
elif model_params_count > 1e6:
|
||||
# Millions Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-6
|
||||
scale_suffix = "M"
|
||||
else:
|
||||
# Thousands Of Parameters
|
||||
scaled_model_params = model_params_count * 1e-3
|
||||
scale_suffix = "K"
|
||||
|
||||
fix = max(min_digits - len(str(round(scaled_model_params)).lstrip("0")), 0)
|
||||
|
||||
return f"{scaled_model_params:.{fix}f}{scale_suffix}"
|
||||
|
||||
|
||||
def size_label(
|
||||
total_params: int, shared_params: int, expert_params: int, expert_count: int
|
||||
) -> str:
|
||||
|
||||
if expert_count > 0:
|
||||
pretty_size = model_weight_count_rounded_notation(
|
||||
abs(shared_params) + abs(expert_params), min_digits=2
|
||||
)
|
||||
size_class = f"{expert_count}x{pretty_size}"
|
||||
else:
|
||||
size_class = model_weight_count_rounded_notation(
|
||||
abs(total_params), min_digits=2
|
||||
)
|
||||
|
||||
return size_class
|
||||
|
||||
|
||||
def naming_convention(
|
||||
model_name: str | None,
|
||||
base_name: str | None,
|
||||
finetune_string: str | None,
|
||||
version_string: str | None,
|
||||
size_label: str | None,
|
||||
output_type: str | None,
|
||||
model_type: Literal["vocab", "LoRA"] | None = None,
|
||||
) -> str:
|
||||
# Reference: https://github.com/ggml-org/ggml/blob/master/docs/gguf.md#gguf-naming-convention
|
||||
|
||||
if base_name is not None:
|
||||
name = base_name.strip().replace(" ", "-").replace("/", "-")
|
||||
elif model_name is not None:
|
||||
name = model_name.strip().replace(" ", "-").replace("/", "-")
|
||||
else:
|
||||
name = "ggml-model"
|
||||
|
||||
parameters = f"-{size_label}" if size_label is not None else ""
|
||||
|
||||
finetune = (
|
||||
f"-{finetune_string.strip().replace(' ', '-')}"
|
||||
if finetune_string is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
version = (
|
||||
f"-{version_string.strip().replace(' ', '-')}"
|
||||
if version_string is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
encoding = (
|
||||
f"-{output_type.strip().replace(' ', '-').upper()}"
|
||||
if output_type is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
kind = f"-{model_type.strip().replace(' ', '-')}" if model_type is not None else ""
|
||||
|
||||
return f"{name}{parameters}{finetune}{version}{encoding}{kind}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemoteTensor:
|
||||
dtype: str
|
||||
shape: tuple[int, ...]
|
||||
offset_start: int
|
||||
size: int
|
||||
url: str
|
||||
|
||||
def data(self) -> bytearray:
|
||||
# TODO: handle request errors (maybe with limited retries?)
|
||||
# NOTE: using a bytearray, otherwise PyTorch complains the buffer is not writeable
|
||||
data = bytearray(
|
||||
SafetensorRemote.get_data_by_range(
|
||||
url=self.url, start=self.offset_start, size=self.size
|
||||
)
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
class SafetensorRemote:
|
||||
"""
|
||||
Uility class to handle remote safetensor files.
|
||||
This class is designed to work with Hugging Face model repositories.
|
||||
|
||||
Example (one model has single safetensor file, the other has multiple):
|
||||
for model_id in ["ngxson/TEST-Tiny-Llama4", "Qwen/Qwen2.5-7B-Instruct"]:
|
||||
tensors = SafetensorRemote.get_list_tensors_hf_model(model_id)
|
||||
print(tensors)
|
||||
|
||||
Example reading tensor data:
|
||||
tensors = SafetensorRemote.get_list_tensors_hf_model(model_id)
|
||||
for name, meta in tensors.items():
|
||||
dtype, shape, offset_start, size, remote_safetensor_url = meta
|
||||
# read the tensor data
|
||||
data = SafetensorRemote.get_data_by_range(remote_safetensor_url, offset_start, size)
|
||||
print(data)
|
||||
"""
|
||||
|
||||
BASE_DOMAIN = "https://huggingface.co"
|
||||
ALIGNMENT = 8 # bytes
|
||||
|
||||
@classmethod
|
||||
def get_list_tensors_hf_model(cls, model_id: str) -> dict[str, RemoteTensor]:
|
||||
"""
|
||||
Get list of tensors from a Hugging Face model repository.
|
||||
|
||||
Returns a dictionary of tensor names and their metadata.
|
||||
Each tensor is represented as a tuple of (dtype, shape, offset_start, size, remote_safetensor_url)
|
||||
"""
|
||||
# case 1: model has only one single model.safetensor file
|
||||
is_single_file = cls.check_file_exist(
|
||||
f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/model.safetensors"
|
||||
)
|
||||
if is_single_file:
|
||||
url = f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/model.safetensors"
|
||||
return cls.get_list_tensors(url)
|
||||
|
||||
# case 2: model has multiple files
|
||||
index_url = (
|
||||
f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/model.safetensors.index.json"
|
||||
)
|
||||
is_multiple_files = cls.check_file_exist(index_url)
|
||||
if is_multiple_files:
|
||||
# read the index file
|
||||
index_data = cls.get_data_by_range(index_url, 0)
|
||||
index_str = index_data.decode("utf-8")
|
||||
index_json = json.loads(index_str)
|
||||
assert (
|
||||
index_json.get("weight_map") is not None
|
||||
), "weight_map not found in index file"
|
||||
weight_map = index_json["weight_map"]
|
||||
# get the list of files
|
||||
all_files = list(set(weight_map.values()))
|
||||
all_files.sort() # make sure we load shard files in order
|
||||
# get the list of tensors
|
||||
tensors: dict[str, RemoteTensor] = {}
|
||||
for file in all_files:
|
||||
url = f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/{file}"
|
||||
for key, val in cls.get_list_tensors(url).items():
|
||||
tensors[key] = val
|
||||
return tensors
|
||||
|
||||
raise ValueError(f"Model {model_id} does not have any safetensor files")
|
||||
|
||||
@classmethod
|
||||
def get_list_tensors(cls, url: str) -> dict[str, RemoteTensor]:
|
||||
"""
|
||||
Get list of tensors from a remote safetensor file.
|
||||
|
||||
Returns a dictionary of tensor names and their metadata.
|
||||
Each tensor is represented as a tuple of (dtype, shape, offset_start, size)
|
||||
"""
|
||||
metadata, data_start_offset = cls.get_metadata(url)
|
||||
res: dict[str, RemoteTensor] = {}
|
||||
|
||||
for name, meta in metadata.items():
|
||||
if name == "__metadata__":
|
||||
continue
|
||||
if not isinstance(meta, dict):
|
||||
raise ValueError(f"Invalid metadata for tensor '{name}': {meta}")
|
||||
try:
|
||||
dtype = meta["dtype"]
|
||||
shape = meta["shape"]
|
||||
offset_start_relative, offset_end_relative = meta["data_offsets"]
|
||||
size = offset_end_relative - offset_start_relative
|
||||
offset_start = data_start_offset + offset_start_relative
|
||||
res[name] = RemoteTensor(
|
||||
dtype=dtype,
|
||||
shape=tuple(shape),
|
||||
offset_start=offset_start,
|
||||
size=size,
|
||||
url=url,
|
||||
)
|
||||
except KeyError as e:
|
||||
raise ValueError(
|
||||
f"Missing key in metadata for tensor '{name}': {e}, meta = {meta}"
|
||||
)
|
||||
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def get_metadata(cls, url: str) -> tuple[dict, int]:
|
||||
"""
|
||||
Get JSON metadata from a remote safetensor file.
|
||||
|
||||
Returns tuple of (metadata, data_start_offset)
|
||||
"""
|
||||
# Request first 5MB of the file (hopefully enough for metadata)
|
||||
read_size = 5 * 1024 * 1024
|
||||
raw_data = cls.get_data_by_range(url, 0, read_size)
|
||||
|
||||
# Parse header
|
||||
# First 8 bytes contain the metadata length as u64 little-endian
|
||||
if len(raw_data) < 8:
|
||||
raise ValueError("Not enough data to read metadata size")
|
||||
metadata_length = int.from_bytes(raw_data[:8], byteorder="little")
|
||||
|
||||
# Calculate the data start offset
|
||||
data_start_offset = 8 + metadata_length
|
||||
alignment = SafetensorRemote.ALIGNMENT
|
||||
if data_start_offset % alignment != 0:
|
||||
data_start_offset += alignment - (data_start_offset % alignment)
|
||||
|
||||
# Check if we have enough data to read the metadata
|
||||
if len(raw_data) < 8 + metadata_length:
|
||||
raise ValueError(
|
||||
f"Could not read complete metadata. Need {8 + metadata_length} bytes, got {len(raw_data)}"
|
||||
)
|
||||
|
||||
# Extract metadata bytes and parse as JSON
|
||||
metadata_bytes = raw_data[8 : 8 + metadata_length]
|
||||
metadata_str = metadata_bytes.decode("utf-8")
|
||||
try:
|
||||
metadata = json.loads(metadata_str)
|
||||
return metadata, data_start_offset
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Failed to parse safetensor metadata as JSON: {e}")
|
||||
|
||||
@classmethod
|
||||
def get_data_by_range(cls, url: str, start: int, size: int = -1) -> bytes:
|
||||
"""
|
||||
Get raw byte data from a remote file by range.
|
||||
If size is not specified, it will read the entire file.
|
||||
"""
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
|
||||
parsed_url = urlparse(url)
|
||||
if not parsed_url.scheme or not parsed_url.netloc:
|
||||
raise ValueError(f"Invalid URL: {url}")
|
||||
|
||||
headers = cls._get_request_headers()
|
||||
if size > -1:
|
||||
headers["Range"] = f"bytes={start}-{start + size}"
|
||||
response = requests.get(url, allow_redirects=True, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
# Get raw byte data
|
||||
return response.content[:size]
|
||||
|
||||
@classmethod
|
||||
def check_file_exist(cls, url: str) -> bool:
|
||||
"""
|
||||
Check if a file exists at the given URL.
|
||||
Returns True if the file exists, False otherwise.
|
||||
"""
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
|
||||
parsed_url = urlparse(url)
|
||||
if not parsed_url.scheme or not parsed_url.netloc:
|
||||
raise ValueError(f"Invalid URL: {url}")
|
||||
|
||||
try:
|
||||
headers = cls._get_request_headers()
|
||||
headers["Range"] = "bytes=0-0"
|
||||
response = requests.head(url, allow_redirects=True, headers=headers)
|
||||
# Success (2xx) or redirect (3xx)
|
||||
return 200 <= response.status_code < 400
|
||||
except requests.RequestException:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _get_request_headers(cls) -> dict[str, str]:
|
||||
"""Prepare common headers for requests."""
|
||||
headers = {"User-Agent": "convert_hf_to_gguf"}
|
||||
if os.environ.get("HF_TOKEN"):
|
||||
headers["Authorization"] = f"Bearer {os.environ['HF_TOKEN']}"
|
||||
return headers
|
|
@ -157,8 +157,36 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool:
|
|||
tokenizer = json.load(f)
|
||||
if self.load_merges:
|
||||
merges = tokenizer.get("model", {}).get("merges")
|
||||
if isinstance(merges, list) and merges and isinstance(merges[0], str):
|
||||
self.merges = merges
|
||||
if isinstance(merges, list) and merges:
|
||||
if isinstance(merges[0], str):
|
||||
self.merges = merges
|
||||
elif (
|
||||
isinstance(merges[0], list)
|
||||
and len(merges[0]) == 2
|
||||
and isinstance(merges[0][0], str)
|
||||
):
|
||||
# New format since transformers 4.45 to support spaces in merges
|
||||
# ref: https://github.com/ggml-org/llama.cpp/issues/9692
|
||||
# TODO: internally store as the new format instead of converting to old
|
||||
if any(" " in s for pair in merges for s in pair):
|
||||
logger.warning(
|
||||
f'Spaces in merges detected, encoding as {chr(ord(" ") + 256)!r}'
|
||||
)
|
||||
self.merges = [
|
||||
" ".join(
|
||||
[
|
||||
# ensure the spaces are properly encoded
|
||||
"".join(
|
||||
chr(ord(c) + 256) if c == " " else c
|
||||
for c in part
|
||||
)
|
||||
for part in pair
|
||||
]
|
||||
)
|
||||
for pair in merges
|
||||
]
|
||||
else:
|
||||
raise ValueError("Unknown tokenizer merges format")
|
||||
added_tokens = tokenizer.get("added_tokens", {})
|
||||
else:
|
||||
added_tokens = {}
|
||||
|
@ -167,7 +195,12 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool:
|
|||
return True
|
||||
with open(tokenizer_config_file, encoding="utf-8") as f:
|
||||
tokenizer_config = json.load(f)
|
||||
chat_template = tokenizer_config.get("chat_template")
|
||||
chat_template_alt = None
|
||||
chat_template_file = path / "chat_template.json"
|
||||
if chat_template_file.is_file():
|
||||
with open(chat_template_file, encoding="utf-8") as f:
|
||||
chat_template_alt = json.load(f).get("chat_template")
|
||||
chat_template = tokenizer_config.get("chat_template", chat_template_alt)
|
||||
if chat_template is None or isinstance(chat_template, (str, list)):
|
||||
self.chat_template = chat_template
|
||||
else:
|
||||
|
@ -224,11 +257,8 @@ class Vocab(BaseVocab, Protocol):
|
|||
added_tokens_list: list[str]
|
||||
fname_tokenizer: Path
|
||||
|
||||
def __init__(self, base_path: Path):
|
||||
...
|
||||
|
||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
...
|
||||
def __init__(self, base_path: Path): ...
|
||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: ...
|
||||
|
||||
|
||||
class NoVocab(BaseVocab):
|
|
@ -0,0 +1,123 @@
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
from typing import Any, IO, List, TextIO, Union
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QMessageBox,
|
||||
)
|
||||
|
||||
from Localizations import (
|
||||
DOTENV_FILE_NOT_FOUND,
|
||||
COULD_NOT_PARSE_LINE,
|
||||
ERROR_LOADING_DOTENV,
|
||||
AUTOGGUF_VERSION,
|
||||
)
|
||||
|
||||
|
||||
def verify_gguf(file_path) -> bool:
|
||||
try:
|
||||
with open(file_path, "rb") as f:
|
||||
magic = f.read(4)
|
||||
return magic == b"GGUF"
|
||||
except (FileNotFoundError, IOError, OSError):
|
||||
return False
|
||||
|
||||
|
||||
def process_args(args: List[str]) -> bool:
|
||||
try:
|
||||
i = 1
|
||||
while i < len(args):
|
||||
key = (
|
||||
args[i][2:].replace("-", "_").upper()
|
||||
) # Strip the first two '--' and replace '-' with '_'
|
||||
if i + 1 < len(args) and not args[i + 1].startswith("--"):
|
||||
value = args[i + 1]
|
||||
i += 2
|
||||
else:
|
||||
value = "enabled"
|
||||
i += 1
|
||||
os.environ[key] = value
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def load_dotenv(self=Any) -> None:
|
||||
if not os.path.isfile(".env"):
|
||||
self.logger.warning(DOTENV_FILE_NOT_FOUND)
|
||||
return
|
||||
|
||||
try:
|
||||
with open(".env") as f:
|
||||
for line in f:
|
||||
# Strip leading/trailing whitespace
|
||||
line = line.strip()
|
||||
|
||||
# Ignore comments and empty lines
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
|
||||
# Match key-value pairs (unquoted and quoted values)
|
||||
match = re.match(r"^([^=]+)=(.*)$", line)
|
||||
if not match:
|
||||
self.logger.warning(COULD_NOT_PARSE_LINE.format(line))
|
||||
continue
|
||||
|
||||
key, value = match.groups()
|
||||
|
||||
# Remove any surrounding quotes from the value
|
||||
if value.startswith(("'", '"')) and value.endswith(("'", '"')):
|
||||
value = value[1:-1]
|
||||
|
||||
# Decode escape sequences
|
||||
value = bytes(value, "utf-8").decode("unicode_escape")
|
||||
|
||||
# Set the environment variable
|
||||
os.environ[key.strip()] = value.strip()
|
||||
except Exception as e:
|
||||
self.logger.error(ERROR_LOADING_DOTENV.format(e))
|
||||
|
||||
|
||||
def show_about(self) -> None:
|
||||
about_text = f"""AutoGGUF
|
||||
|
||||
Version: {AUTOGGUF_VERSION}
|
||||
|
||||
A tool for managing and converting GGUF models.
|
||||
This application is licensed under the Apache License 2.0.
|
||||
Copyright (c) 2024-2025 leafspark.
|
||||
It also utilizes llama.cpp, licensed under the MIT License.
|
||||
Copyright (c) 2023-2025 The ggml authors."""
|
||||
QMessageBox.about(self, "About AutoGGUF", about_text)
|
||||
|
||||
|
||||
def ensure_directory(path) -> None:
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
def open_file_safe(file_path, mode="r") -> IO[Any]:
|
||||
encodings = ["utf-8", "latin-1", "ascii", "utf-16"]
|
||||
for encoding in encodings:
|
||||
try:
|
||||
return open(file_path, mode, encoding=encoding)
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
raise ValueError(
|
||||
f"Unable to open file {file_path} with any of the encodings: {encodings}"
|
||||
)
|
||||
|
||||
|
||||
def resource_path(relative_path) -> Union[str, str, bytes]:
|
||||
if hasattr(sys, "_MEIPASS"):
|
||||
# PyInstaller path
|
||||
base_path = sys._MEIPASS
|
||||
elif "__compiled__" in globals():
|
||||
# Nuitka path
|
||||
base_path = os.path.dirname(sys.executable)
|
||||
else:
|
||||
# Regular Python path
|
||||
base_path = os.path.abspath(".")
|
||||
|
||||
return os.path.join(base_path, relative_path)
|
|
@ -1,81 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import psutil
|
||||
import subprocess
|
||||
import time
|
||||
import signal
|
||||
import json
|
||||
import platform
|
||||
import requests
|
||||
import zipfile
|
||||
from datetime import datetime
|
||||
from PySide6.QtWidgets import (
|
||||
QApplication,
|
||||
QMainWindow,
|
||||
QVBoxLayout,
|
||||
QHBoxLayout,
|
||||
QWidget,
|
||||
QPushButton,
|
||||
QListWidget,
|
||||
QLineEdit,
|
||||
QLabel,
|
||||
QFileDialog,
|
||||
QProgressBar,
|
||||
QComboBox,
|
||||
QTextEdit,
|
||||
QCheckBox,
|
||||
QGroupBox,
|
||||
QFormLayout,
|
||||
QScrollArea,
|
||||
QSlider,
|
||||
QSpinBox,
|
||||
QListWidgetItem,
|
||||
QMessageBox,
|
||||
QDialog,
|
||||
QPlainTextEdit,
|
||||
QMenu,
|
||||
)
|
||||
from PySide6.QtCore import QTimer, Signal, QThread, Qt, QSize
|
||||
from PySide6.QtGui import QCloseEvent, QAction
|
||||
|
||||
from Localizations import *
|
||||
|
||||
|
||||
def show_about(self):
|
||||
about_text = (
|
||||
"AutoGGUF\n\n"
|
||||
f"Version: {AUTOGGUF_VERSION}\n\n"
|
||||
"A tool for managing and converting GGUF models."
|
||||
)
|
||||
QMessageBox.about(self, "About AutoGGUF", about_text)
|
||||
|
||||
|
||||
def ensure_directory(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
def open_file_safe(file_path, mode="r"):
|
||||
encodings = ["utf-8", "latin-1", "ascii", "utf-16"]
|
||||
for encoding in encodings:
|
||||
try:
|
||||
return open(file_path, mode, encoding=encoding)
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
raise ValueError(
|
||||
f"Unable to open file {file_path} with any of the encodings: {encodings}"
|
||||
)
|
||||
|
||||
|
||||
def resource_path(relative_path):
|
||||
if hasattr(sys, "_MEIPASS"):
|
||||
# PyInstaller path
|
||||
base_path = sys._MEIPASS
|
||||
elif "__compiled__" in globals():
|
||||
# Nuitka path
|
||||
base_path = os.path.dirname(sys.executable)
|
||||
else:
|
||||
# Regular Python path
|
||||
base_path = os.path.abspath(".")
|
||||
|
||||
return os.path.join(base_path, relative_path)
|
|
@ -12,11 +12,11 @@
|
|||
from QuantizationThread import QuantizationThread
|
||||
from TaskListItem import TaskListItem
|
||||
from error_handling import handle_error, show_error
|
||||
from imports_and_globals import ensure_directory
|
||||
from globals import ensure_directory
|
||||
from Localizations import *
|
||||
|
||||
|
||||
def export_lora(self):
|
||||
def export_lora(self) -> None:
|
||||
self.logger.info(STARTING_LORA_EXPORT)
|
||||
try:
|
||||
model_path = self.export_lora_model.text()
|
||||
|
@ -98,7 +98,13 @@ def export_lora(self):
|
|||
show_error(self.logger, ERROR_STARTING_LORA_EXPORT.format(str(e)))
|
||||
|
||||
|
||||
def delete_lora_adapter_item(self, adapter_widget):
|
||||
def lora_conversion_finished(self, thread) -> None:
|
||||
self.logger.info(LORA_CONVERSION_FINISHED)
|
||||
if thread in self.quant_threads:
|
||||
self.quant_threads.remove(thread)
|
||||
|
||||
|
||||
def delete_lora_adapter_item(self, adapter_widget) -> None:
|
||||
self.logger.info(DELETING_LORA_ADAPTER)
|
||||
# Find the QListWidgetItem containing the adapter_widget
|
||||
for i in range(self.export_lora_adapters.count()):
|
||||
|
@ -108,14 +114,14 @@ def delete_lora_adapter_item(self, adapter_widget):
|
|||
break
|
||||
|
||||
|
||||
def browse_export_lora_model(self):
|
||||
def browse_export_lora_model(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_EXPORT_LORA_MODEL_FILE)
|
||||
model_file, _ = QFileDialog.getOpenFileName(self, SELECT_MODEL_FILE, "", GGUF_FILES)
|
||||
if model_file:
|
||||
self.export_lora_model.setText(os.path.abspath(model_file))
|
||||
|
||||
|
||||
def browse_export_lora_output(self):
|
||||
def browse_export_lora_output(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE)
|
||||
output_file, _ = QFileDialog.getSaveFileName(
|
||||
self, SELECT_OUTPUT_FILE, "", GGUF_FILES
|
||||
|
@ -124,7 +130,7 @@ def browse_export_lora_output(self):
|
|||
self.export_lora_output.setText(os.path.abspath(output_file))
|
||||
|
||||
|
||||
def add_lora_adapter(self):
|
||||
def add_lora_adapter(self) -> None:
|
||||
self.logger.info(ADDING_LORA_ADAPTER)
|
||||
adapter_path, _ = QFileDialog.getOpenFileName(
|
||||
self, SELECT_LORA_ADAPTER_FILE, "", LORA_FILES
|
||||
|
@ -154,7 +160,7 @@ def add_lora_adapter(self):
|
|||
self.export_lora_adapters.setItemWidget(list_item, adapter_widget)
|
||||
|
||||
|
||||
def convert_lora(self):
|
||||
def convert_lora(self) -> None:
|
||||
self.logger.info(STARTING_LORA_CONVERSION)
|
||||
try:
|
||||
lora_input_path = self.lora_input.text()
|
||||
|
@ -179,7 +185,12 @@ def convert_lora(self):
|
|||
raise ValueError(BASE_MODEL_PATH_REQUIRED)
|
||||
command.extend(["--base", base_model_path])
|
||||
else: # Use old GGML parameters for GGML
|
||||
command = ["python", "src/convert_lora_to_ggml.py", lora_input_path]
|
||||
command = [
|
||||
"python",
|
||||
"src/convert_lora_to_ggml.py",
|
||||
lora_input_path,
|
||||
lora_output_path,
|
||||
]
|
||||
|
||||
logs_path = self.logs_input.text()
|
||||
ensure_directory(logs_path)
|
||||
|
@ -203,11 +214,7 @@ def convert_lora(self):
|
|||
self.task_list.setItemWidget(list_item, task_item)
|
||||
|
||||
thread.status_signal.connect(task_item.update_status)
|
||||
thread.finished_signal.connect(
|
||||
lambda: self.lora_conversion_finished(
|
||||
thread, lora_input_path, lora_output_path
|
||||
)
|
||||
)
|
||||
thread.finished_signal.connect(lambda: self.lora_conversion_finished(thread))
|
||||
thread.error_signal.connect(
|
||||
lambda err: handle_error(self.logger, err, task_item)
|
||||
)
|
||||
|
|
207
src/main.py
207
src/main.py
|
@ -1,62 +1,195 @@
|
|||
import os
|
||||
import sys
|
||||
import threading
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
from PySide6.QtCore import QTimer
|
||||
from PySide6.QtWidgets import QApplication
|
||||
from fastapi import FastAPI, Query, Depends, HTTPException, Security
|
||||
from fastapi.security.api_key import APIKeyHeader
|
||||
from pydantic import BaseModel, Field
|
||||
from uvicorn import Config, Server
|
||||
|
||||
from AutoGGUF import AutoGGUF
|
||||
from flask import Flask, jsonify
|
||||
from Localizations import AUTOGGUF_VERSION
|
||||
|
||||
server = Flask(__name__)
|
||||
app = FastAPI(
|
||||
title="AutoGGUF",
|
||||
description="API for AutoGGUF - automatically quant GGUF models",
|
||||
version=AUTOGGUF_VERSION,
|
||||
license_info={
|
||||
"name": "Apache 2.0",
|
||||
"url": "https://raw.githubusercontent.com/leafspark/AutoGGUF/main/LICENSE",
|
||||
},
|
||||
)
|
||||
|
||||
# Global variable to hold the window reference
|
||||
window = None
|
||||
|
||||
|
||||
def main():
|
||||
@server.route("/v1/models", methods=["GET"])
|
||||
def models():
|
||||
if window:
|
||||
return jsonify({"models": window.get_models_data()})
|
||||
return jsonify({"models": []})
|
||||
class ModelType(str, Enum):
|
||||
single = "single"
|
||||
sharded = "sharded"
|
||||
|
||||
@server.route("/v1/tasks", methods=["GET"])
|
||||
def tasks():
|
||||
if window:
|
||||
return jsonify({"tasks": window.get_tasks_data()})
|
||||
return jsonify({"tasks": []})
|
||||
|
||||
@server.route("/v1/health", methods=["GET"])
|
||||
def ping():
|
||||
return jsonify({"status": "alive"})
|
||||
class Model(BaseModel):
|
||||
name: str = Field(..., description="Name of the model")
|
||||
type: str = Field(..., description="Type of the model")
|
||||
path: str = Field(..., description="Path to the model file")
|
||||
size: Optional[int] = Field(None, description="Size of the model in bytes")
|
||||
|
||||
@server.route("/v1/backends", methods=["GET"])
|
||||
def get_backends():
|
||||
backends = []
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"name": "Llama-3.1-8B-Instruct.fp16.gguf",
|
||||
"type": "single",
|
||||
"path": "Llama-3.1-8B-Instruct.fp16.gguf",
|
||||
"size": 13000000000,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
# id: str = Field(..., description="Unique identifier for the task")
|
||||
status: str = Field(..., description="Current status of the task")
|
||||
progress: float = Field(..., description="Progress of the task as a percentage")
|
||||
|
||||
class Config:
|
||||
json_json_schema_extra = {
|
||||
"example": {"id": "task_123", "status": "running", "progress": 75.5}
|
||||
}
|
||||
|
||||
|
||||
class Backend(BaseModel):
|
||||
name: str = Field(..., description="Name of the backend")
|
||||
path: str = Field(..., description="Path to the backend executable")
|
||||
|
||||
|
||||
class Plugin(BaseModel):
|
||||
name: str = Field(..., description="Name of the plugin")
|
||||
version: str = Field(..., description="Version of the plugin")
|
||||
description: str = Field(..., description="Description of the plugin")
|
||||
author: str = Field(..., description="Author of the plugin")
|
||||
|
||||
|
||||
# API Key configuration
|
||||
API_KEY_NAME = "Authorization"
|
||||
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
|
||||
|
||||
|
||||
def get_api_key(
|
||||
api_key_header: str = Security(api_key_header),
|
||||
) -> Optional[str]:
|
||||
api_key_env = os.getenv("AUTOGGUF_SERVER_API_KEY")
|
||||
if not api_key_env:
|
||||
return None # No API key restriction if not set
|
||||
|
||||
api_keys = [
|
||||
key.strip() for key in api_key_env.split(",") if key.strip()
|
||||
] # Split by comma and strip whitespace
|
||||
|
||||
if api_key_header and api_key_header.startswith("Bearer "):
|
||||
api_key = api_key_header[len("Bearer ") :]
|
||||
if api_key in api_keys:
|
||||
return api_key
|
||||
|
||||
raise HTTPException(status_code=403, detail="Could not validate API key")
|
||||
|
||||
|
||||
@app.get(
|
||||
"/v1/models",
|
||||
response_model=List[Model],
|
||||
tags=["Models"],
|
||||
dependencies=[Depends(get_api_key)],
|
||||
)
|
||||
async def get_models(
|
||||
type: Optional[ModelType] = Query(None, description="Filter models by type")
|
||||
) -> List[Model]:
|
||||
if window:
|
||||
models = window.get_models_data()
|
||||
if type:
|
||||
models = [m for m in models if m["type"] == type]
|
||||
|
||||
return [Model(**m) for m in models]
|
||||
return []
|
||||
|
||||
|
||||
@app.get(
|
||||
"/v1/tasks",
|
||||
response_model=List[Task],
|
||||
tags=["Tasks"],
|
||||
dependencies=[Depends(get_api_key)],
|
||||
)
|
||||
async def get_tasks() -> List[Task]:
|
||||
if window:
|
||||
return window.get_tasks_data()
|
||||
return []
|
||||
|
||||
|
||||
@app.get("/v1/health", tags=["System"], dependencies=[Depends(get_api_key)])
|
||||
async def health_check() -> dict:
|
||||
return {"status": "alive"}
|
||||
|
||||
|
||||
@app.get(
|
||||
"/v1/backends",
|
||||
response_model=List[Backend],
|
||||
tags=["System"],
|
||||
dependencies=[Depends(get_api_key)],
|
||||
)
|
||||
async def get_backends() -> List[Backend]:
|
||||
backends = []
|
||||
if window:
|
||||
for i in range(window.backend_combo.count()):
|
||||
backends.append(
|
||||
{
|
||||
"name": window.backend_combo.itemText(i),
|
||||
"path": window.backend_combo.itemData(i),
|
||||
}
|
||||
Backend(
|
||||
name=window.backend_combo.itemText(i),
|
||||
path=window.backend_combo.itemData(i),
|
||||
)
|
||||
)
|
||||
return jsonify({"backends": backends})
|
||||
return backends
|
||||
|
||||
def run_flask():
|
||||
if os.environ.get("AUTOGGUF_SERVER", "").lower() == "true":
|
||||
server.run(
|
||||
host="0.0.0.0",
|
||||
port=int(os.environ.get("AUTOGGUF_SERVER_PORT", 5000)),
|
||||
debug=False,
|
||||
use_reloader=False,
|
||||
)
|
||||
|
||||
app = QApplication(sys.argv)
|
||||
window = AutoGGUF()
|
||||
@app.get(
|
||||
"/v1/plugins",
|
||||
response_model=List[Plugin],
|
||||
tags=["System"],
|
||||
dependencies=[Depends(get_api_key)],
|
||||
)
|
||||
async def get_plugins() -> List[Plugin]:
|
||||
if window:
|
||||
return [
|
||||
Plugin(**plugin_data["data"]) for plugin_data in window.plugins.values()
|
||||
]
|
||||
return []
|
||||
|
||||
|
||||
def run_uvicorn() -> None:
|
||||
if os.environ.get("AUTOGGUF_SERVER", "").lower() == "enabled":
|
||||
config = Config(
|
||||
app=app,
|
||||
host="127.0.0.1",
|
||||
port=int(os.environ.get("AUTOGGUF_SERVER_PORT", 7001)),
|
||||
log_level="info",
|
||||
)
|
||||
server = Server(config)
|
||||
server.run()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
global window
|
||||
qt_app = QApplication(sys.argv)
|
||||
window = AutoGGUF(sys.argv)
|
||||
window.show()
|
||||
# Start Flask in a separate thread after a short delay
|
||||
|
||||
# Start Uvicorn in a separate thread after a short delay
|
||||
timer = QTimer()
|
||||
timer.singleShot(
|
||||
100, lambda: threading.Thread(target=run_flask, daemon=True).start()
|
||||
100, lambda: threading.Thread(target=run_uvicorn, daemon=True).start()
|
||||
)
|
||||
sys.exit(app.exec())
|
||||
|
||||
sys.exit(qt_app.exec())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,10 +1,21 @@
|
|||
import json
|
||||
|
||||
from PySide6.QtWidgets import QFileDialog, QMessageBox
|
||||
from Localizations import *
|
||||
from PySide6.QtCore import Qt
|
||||
from PySide6.QtWidgets import QApplication, QFileDialog, QMessageBox
|
||||
from Localizations import (
|
||||
SAVING_PRESET,
|
||||
SAVE_PRESET,
|
||||
JSON_FILES,
|
||||
PRESET_SAVED,
|
||||
PRESET_SAVED_TO,
|
||||
LOADING_PRESET,
|
||||
LOAD_PRESET,
|
||||
PRESET_LOADED,
|
||||
PRESET_LOADED_FROM,
|
||||
)
|
||||
|
||||
|
||||
def save_preset(self):
|
||||
def save_preset(self) -> None:
|
||||
self.logger.info(SAVING_PRESET)
|
||||
preset = {
|
||||
"quant_types": [item.text() for item in self.quant_type.selectedItems()],
|
||||
|
@ -25,20 +36,40 @@ def save_preset(self):
|
|||
"extra_arguments": self.extra_arguments.text(),
|
||||
}
|
||||
|
||||
file_name, _ = QFileDialog.getSaveFileName(self, SAVE_PRESET, "", JSON_FILES)
|
||||
if file_name:
|
||||
with open(file_name, "w") as f:
|
||||
json.dump(preset, f, indent=4)
|
||||
QMessageBox.information(self, PRESET_SAVED, PRESET_SAVED_TO.format(file_name))
|
||||
self.logger.info(PRESET_SAVED_TO.format(file_name))
|
||||
if not QApplication.keyboardModifiers() & Qt.ShiftModifier:
|
||||
file_name, _ = QFileDialog.getSaveFileName(self, SAVE_PRESET, "", JSON_FILES)
|
||||
if file_name:
|
||||
with open(file_name, "w") as f:
|
||||
json.dump(preset, f, indent=4)
|
||||
QMessageBox.information(
|
||||
self, PRESET_SAVED, PRESET_SAVED_TO.format(file_name)
|
||||
)
|
||||
self.logger.info(PRESET_SAVED_TO.format(file_name))
|
||||
else:
|
||||
clipboard = QApplication.clipboard()
|
||||
preset_str = json.dumps(preset, indent=1)
|
||||
clipboard.setText(preset_str)
|
||||
QMessageBox.information(self, PRESET_SAVED, "Preset copied to clipboard")
|
||||
self.logger.info("Preset copied to clipboard")
|
||||
|
||||
|
||||
def load_preset(self):
|
||||
def load_preset(self) -> None:
|
||||
self.logger.info(LOADING_PRESET)
|
||||
file_name, _ = QFileDialog.getOpenFileName(self, LOAD_PRESET, "", JSON_FILES)
|
||||
if file_name:
|
||||
with open(file_name, "r") as f:
|
||||
preset = json.load(f)
|
||||
|
||||
try:
|
||||
if QApplication.keyboardModifiers() & Qt.ShiftModifier:
|
||||
clipboard = QApplication.clipboard()
|
||||
preset = json.loads(clipboard.text())
|
||||
source = "clipboard"
|
||||
else:
|
||||
file_name, _ = QFileDialog.getOpenFileName(
|
||||
self, LOAD_PRESET, "", JSON_FILES
|
||||
)
|
||||
if not file_name:
|
||||
return
|
||||
with open(file_name, "r") as f:
|
||||
preset = json.load(f)
|
||||
source = file_name
|
||||
|
||||
self.quant_type.clearSelection()
|
||||
for quant_type in preset.get("quant_types", []):
|
||||
|
@ -69,6 +100,19 @@ def load_preset(self):
|
|||
self.add_kv_override(override)
|
||||
|
||||
QMessageBox.information(
|
||||
self, PRESET_LOADED, PRESET_LOADED_FROM.format(file_name)
|
||||
self,
|
||||
PRESET_LOADED,
|
||||
PRESET_LOADED_FROM.format(
|
||||
source
|
||||
if not QApplication.keyboardModifiers() & Qt.ShiftModifier
|
||||
else "clipboard"
|
||||
),
|
||||
)
|
||||
self.logger.info(PRESET_LOADED_FROM.format(file_name))
|
||||
self.logger.info(PRESET_LOADED_FROM.format(source))
|
||||
|
||||
except json.JSONDecodeError:
|
||||
QMessageBox.critical(self, "Error", "Invalid JSON in clipboard")
|
||||
self.logger.error("Failed to parse JSON from clipboard")
|
||||
except Exception as e:
|
||||
QMessageBox.critical(self, "Error", f"Failed to load preset: {str(e)}")
|
||||
self.logger.error(f"Failed to load preset: {str(e)}")
|
||||
|
|
|
@ -0,0 +1,559 @@
|
|||
import copy
|
||||
import gc
|
||||
import re
|
||||
import sys
|
||||
from typing import List
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
|
||||
# https://github.com/neuralmagic/AutoFP8
|
||||
|
||||
|
||||
class BaseQuantizeConfig:
|
||||
"""Configuration for model quantization.
|
||||
|
||||
Args:
|
||||
quant_method: Type/precision of quantization method to use.
|
||||
At the moment, this is just "fp8" which specifically means
|
||||
the fp8_e4m3 format in pytorch.
|
||||
activation_scheme: Choice of either "dynamic" or "static" quantization
|
||||
of activtions. If "static", then calibration samples are required
|
||||
during quantization to produce accurate per-tensor scales for
|
||||
activations of Linear modules.
|
||||
ignore_patterns: List of patterns used to ignore layers. If a string
|
||||
starts with "re:", then everything afterward is used as python
|
||||
regex style matching i.e. re.search(), for each Linear layer.
|
||||
By default, "re:.*lm_head" is included to ignore the embedding
|
||||
Linear layer usually at the end of decoder LLMs
|
||||
kv_cache_quant_targets: Tuple of Linear module names to target for
|
||||
calibration of the output scales for KV cache quantization.
|
||||
Usually, these should be `("k_proj", "v_proj")`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
quant_method: str = "fp8",
|
||||
activation_scheme: str = "static",
|
||||
ignore_patterns: List[str] = ["re:.*lm_head"],
|
||||
kv_cache_quant_targets: Optional[Tuple[str]] = None,
|
||||
):
|
||||
if quant_method != "fp8":
|
||||
raise ValueError("Only FP8 quantization is supported.")
|
||||
if activation_scheme not in ["static", "dynamic"]:
|
||||
raise ValueError(
|
||||
"Invalid activation_scheme. Choose either 'static' or 'dynamic'."
|
||||
)
|
||||
self.quant_method = quant_method
|
||||
self.activation_scheme = activation_scheme
|
||||
self.ignore_patterns = ignore_patterns
|
||||
self.kv_cache_quant_targets = kv_cache_quant_targets
|
||||
self.ignored_layers = []
|
||||
|
||||
|
||||
# Class responsible for quantizing weights
|
||||
class FP8DynamicLinear(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
weight: torch.Tensor,
|
||||
weight_scale: torch.Tensor,
|
||||
bias: torch.nn.Parameter,
|
||||
):
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(weight, requires_grad=False)
|
||||
self.weight_scale = torch.nn.Parameter(weight_scale, requires_grad=False)
|
||||
self.bias = bias
|
||||
|
||||
def forward(self, x):
|
||||
qinput, x_scale = per_tensor_quantize(x)
|
||||
output = fp8_gemm(
|
||||
A=qinput,
|
||||
A_scale=x_scale,
|
||||
B=self.weight,
|
||||
B_scale=self.weight_scale,
|
||||
bias=self.bias,
|
||||
out_dtype=x.dtype,
|
||||
)
|
||||
return output
|
||||
|
||||
|
||||
# Module responsible for taking already quantized weights, and recording input scales (and possibly output scales)
|
||||
# using an activation observer
|
||||
class FP8StaticLinearQuantizer(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
weight: torch.Tensor,
|
||||
weight_scale: torch.Tensor,
|
||||
bias: torch.nn.Parameter,
|
||||
quantize_output: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(weight, requires_grad=False)
|
||||
self.weight_scale = torch.nn.Parameter(weight_scale, requires_grad=False)
|
||||
self.bias = bias
|
||||
self.input_scale = None
|
||||
self.output_scale = None
|
||||
self.quantize_output = quantize_output
|
||||
|
||||
def forward(self, x):
|
||||
qinput, x_input_scale = per_tensor_quantize(x)
|
||||
if self.input_scale is None:
|
||||
self.input_scale = torch.nn.Parameter(x_input_scale, requires_grad=False)
|
||||
elif x_input_scale > self.input_scale:
|
||||
self.input_scale = torch.nn.Parameter(x_input_scale, requires_grad=False)
|
||||
output = fp8_gemm(
|
||||
A=qinput,
|
||||
A_scale=self.input_scale,
|
||||
B=self.weight,
|
||||
B_scale=self.weight_scale,
|
||||
bias=self.bias,
|
||||
out_dtype=x.dtype,
|
||||
)
|
||||
|
||||
# Optionally, quantize output and record scale
|
||||
if self.quantize_output:
|
||||
qoutput, output_scale = per_tensor_quantize(output)
|
||||
if self.output_scale is None:
|
||||
self.output_scale = torch.nn.Parameter(
|
||||
output_scale, requires_grad=False
|
||||
)
|
||||
elif output_scale > self.output_scale:
|
||||
self.output_scale = torch.nn.Parameter(
|
||||
output_scale, requires_grad=False
|
||||
)
|
||||
output = qoutput.to(output.dtype) * output_scale
|
||||
|
||||
return output
|
||||
|
||||
|
||||
# Module responsible for representing the final checkpoint representation
|
||||
class FP8StaticLinear(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
weight: torch.nn.Parameter,
|
||||
weight_scale: torch.nn.Parameter,
|
||||
bias: torch.nn.Parameter,
|
||||
input_scale: torch.nn.Parameter,
|
||||
output_scale: Optional[torch.nn.Parameter] = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.weight = weight
|
||||
self.weight_scale = weight_scale
|
||||
self.bias = bias
|
||||
self.input_scale = input_scale
|
||||
self.output_scale = output_scale
|
||||
|
||||
def forward(self, x):
|
||||
qinput = static_per_tensor_quantize(x, self.input_scale)
|
||||
output = fp8_gemm(
|
||||
A=qinput,
|
||||
A_scale=self.input_scale,
|
||||
B=self.weight,
|
||||
B_scale=self.weight_scale,
|
||||
bias=self.bias,
|
||||
out_dtype=x.dtype,
|
||||
)
|
||||
|
||||
if self.output_scale:
|
||||
qoutput = static_per_tensor_quantize(output, self.output_scale)
|
||||
output = qoutput.to(output.dtype) * self.output_scale
|
||||
|
||||
return output
|
||||
|
||||
|
||||
class AutoFP8ForCausalLM:
|
||||
def __init__(
|
||||
self,
|
||||
model: AutoModelForCausalLM,
|
||||
quantize_config: BaseQuantizeConfig,
|
||||
):
|
||||
self.model = model
|
||||
self.model_type = self.model.config.model_type
|
||||
self.config = self.model.config
|
||||
|
||||
# Gather the Linear module names that we want to ignore
|
||||
quantize_config.ignored_layers = get_layers_to_ignore(
|
||||
self.model, quantize_config.ignore_patterns
|
||||
)
|
||||
|
||||
if quantize_config.kv_cache_quant_targets:
|
||||
kv_cache_quant_layers = get_kv_cache_quant_layers(
|
||||
self.model, quantize_config.kv_cache_quant_targets
|
||||
)
|
||||
if len(kv_cache_quant_layers) == 0:
|
||||
raise ValueError(
|
||||
f"Could not find any kv cache layers using kv_cache_quant_targets={quantize_config.kv_cache_quant_targets}, please fix your argument."
|
||||
)
|
||||
quantize_config.kv_cache_quant_layers = kv_cache_quant_layers
|
||||
|
||||
self.quantize_config = quantize_config
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(
|
||||
cls,
|
||||
pretrained_model_name_or_path: str,
|
||||
quantize_config: BaseQuantizeConfig,
|
||||
**model_init_kwargs,
|
||||
):
|
||||
"""Load the un-quantized pretrained model"""
|
||||
|
||||
def skip(*args, **kwargs):
|
||||
pass
|
||||
|
||||
torch.nn.init.kaiming_uniform_ = skip
|
||||
torch.nn.init.uniform_ = skip
|
||||
torch.nn.init.normal_ = skip
|
||||
|
||||
# Parameters related to loading from Hugging Face Hub
|
||||
cache_dir = model_init_kwargs.pop("cache_dir", None)
|
||||
force_download = model_init_kwargs.pop("force_download", False)
|
||||
resume_download = model_init_kwargs.pop("resume_download", False)
|
||||
proxies = model_init_kwargs.pop("proxies", None)
|
||||
local_files_only = model_init_kwargs.pop("local_files_only", False)
|
||||
use_auth_token = model_init_kwargs.pop("use_auth_token", None)
|
||||
revision = model_init_kwargs.pop("revision", None)
|
||||
subfolder = model_init_kwargs.pop("subfolder", "")
|
||||
commit_hash = model_init_kwargs.pop("_commit_hash", None)
|
||||
|
||||
cached_file_kwargs = {
|
||||
"cache_dir": cache_dir,
|
||||
"force_download": force_download,
|
||||
"proxies": proxies,
|
||||
"resume_download": resume_download,
|
||||
"local_files_only": local_files_only,
|
||||
"use_auth_token": use_auth_token,
|
||||
"revision": revision,
|
||||
"subfolder": subfolder,
|
||||
"_commit_hash": commit_hash,
|
||||
}
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Important defaults
|
||||
if "torch_dtype" not in model_init_kwargs:
|
||||
model_init_kwargs["torch_dtype"] = "auto"
|
||||
|
||||
if "device_map" not in model_init_kwargs:
|
||||
model_init_kwargs["device_map"] = "auto"
|
||||
|
||||
merged_kwargs = {**model_init_kwargs, **cached_file_kwargs}
|
||||
print("Loading model with the following kwargs:", merged_kwargs)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
pretrained_model_name_or_path, **merged_kwargs
|
||||
)
|
||||
|
||||
model_config = model.config.to_dict()
|
||||
seq_len_keys = ["max_position_embeddings", "seq_length", "n_positions"]
|
||||
if any(k in model_config for k in seq_len_keys):
|
||||
for key in seq_len_keys:
|
||||
if key in model_config:
|
||||
model.seqlen = model_config[key]
|
||||
break
|
||||
else:
|
||||
print("Can't get model's sequence length, setting to 2048.")
|
||||
model.seqlen = 2048
|
||||
model.eval()
|
||||
|
||||
return cls(model, quantize_config)
|
||||
|
||||
def quantize(self, calibration_tokens: Optional[torch.Tensor] = None):
|
||||
|
||||
# Always quantize the weights as they do not require calibration data
|
||||
quantize_weights(self.model, self.quantize_config)
|
||||
|
||||
if self.quantize_config.activation_scheme == "static":
|
||||
assert (
|
||||
calibration_tokens is not None
|
||||
), "Calibration tokens required for activation quantization"
|
||||
|
||||
def _prepare_calibration_data(calibration_tokens):
|
||||
if hasattr(calibration_tokens, "input_ids"):
|
||||
return calibration_tokens.input_ids
|
||||
return calibration_tokens
|
||||
|
||||
quantize_activations(
|
||||
self.model,
|
||||
self.quantize_config,
|
||||
_prepare_calibration_data(calibration_tokens),
|
||||
)
|
||||
|
||||
def save_quantized(self, save_dir):
|
||||
save_quantized_model(
|
||||
self.model,
|
||||
quant_config=self.quantize_config,
|
||||
save_dir=save_dir,
|
||||
)
|
||||
|
||||
|
||||
def cleanup_memory():
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def per_tensor_quantize(tensor: torch.Tensor) -> Tuple[torch.Tensor, float]:
|
||||
"""Quantize a tensor using per-tensor static scaling factor.
|
||||
Args:
|
||||
tensor: The input tensor.
|
||||
"""
|
||||
finfo = torch.finfo(torch.float8_e4m3fn)
|
||||
# Calculate the scale as dtype max divided by absmax.
|
||||
# Since .abs() creates a new tensor, we use aminmax to get
|
||||
# the min and max first and then calculate the absmax.
|
||||
if tensor.numel() == 0:
|
||||
# Deal with empty tensors (triggered by empty MoE experts)
|
||||
min_val, max_val = (
|
||||
torch.tensor(-16.0, dtype=tensor.dtype),
|
||||
torch.tensor(16.0, dtype=tensor.dtype),
|
||||
)
|
||||
else:
|
||||
min_val, max_val = tensor.aminmax()
|
||||
amax = torch.maximum(min_val.abs(), max_val.abs())
|
||||
scale = finfo.max / amax.clamp(min=1e-12)
|
||||
# Scale and clamp the tensor to bring it to
|
||||
# the representative range of float8 data type
|
||||
# (as default cast is unsaturated)
|
||||
qweight = (tensor * scale).clamp(min=finfo.min, max=finfo.max)
|
||||
# Return both float8 data and the inverse scale (as float),
|
||||
# as both required as inputs to torch._scaled_mm
|
||||
qweight = qweight.to(torch.float8_e4m3fn)
|
||||
scale = scale.float().reciprocal()
|
||||
return qweight, scale
|
||||
|
||||
|
||||
def static_per_tensor_quantize(tensor: torch.Tensor, inv_scale: float) -> torch.Tensor:
|
||||
finfo = torch.finfo(torch.float8_e4m3fn)
|
||||
qweight = (tensor / inv_scale).clamp(min=finfo.min, max=finfo.max)
|
||||
return qweight.to(torch.float8_e4m3fn)
|
||||
|
||||
|
||||
def fp8_gemm(A, A_scale, B, B_scale, bias, out_dtype):
|
||||
if A.numel() == 0:
|
||||
# Deal with empty tensors (triggeted by empty MoE experts)
|
||||
return torch.empty(size=(0, B.shape[0]), dtype=out_dtype, device=A.device)
|
||||
|
||||
# TODO: Disable native fp8 gemm for now, always just dequantize
|
||||
# native_fp8_support = (
|
||||
# torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 9)
|
||||
# )
|
||||
native_fp8_support = False
|
||||
if native_fp8_support:
|
||||
need_reshape = A.dim() == 3
|
||||
if need_reshape:
|
||||
batch_size = A.shape[0]
|
||||
A_input = A.reshape(-1, A.shape[-1])
|
||||
else:
|
||||
batch_size = None
|
||||
A_input = A
|
||||
output, _ = torch._scaled_mm(
|
||||
A_input,
|
||||
B.t(),
|
||||
out_dtype=out_dtype,
|
||||
scale_a=A_scale,
|
||||
scale_b=B_scale,
|
||||
bias=bias,
|
||||
)
|
||||
if need_reshape:
|
||||
output = output.reshape(
|
||||
batch_size, output.shape[0] // batch_size, output.shape[1]
|
||||
)
|
||||
else:
|
||||
output = torch.nn.functional.linear(
|
||||
A.to(out_dtype) * A_scale,
|
||||
B.to(out_dtype) * B_scale.to(out_dtype),
|
||||
bias=bias,
|
||||
)
|
||||
return output
|
||||
|
||||
|
||||
def replace_module(model: AutoModelForCausalLM, name: str, new_module: torch.nn.Module):
|
||||
if "." in name:
|
||||
parent_name = name.rsplit(".", 1)[0]
|
||||
child_name = name[len(parent_name) + 1 :]
|
||||
parent = model.get_submodule(parent_name)
|
||||
else:
|
||||
parent_name = ""
|
||||
parent = model
|
||||
child_name = name
|
||||
setattr(parent, child_name, new_module)
|
||||
|
||||
|
||||
def quantize_weights(
|
||||
model: AutoModelForCausalLM,
|
||||
quantize_config: BaseQuantizeConfig,
|
||||
):
|
||||
named_modules = list(model.named_modules())
|
||||
for name, linear in tqdm.tqdm(named_modules, desc="Quantizing weights"):
|
||||
if (
|
||||
not isinstance(linear, torch.nn.Linear)
|
||||
or name in quantize_config.ignored_layers
|
||||
):
|
||||
continue
|
||||
quant_weight, weight_scale = per_tensor_quantize(linear.weight)
|
||||
bias = copy.deepcopy(linear.bias) if linear.bias is not None else None
|
||||
quant_linear = FP8DynamicLinear(
|
||||
weight=quant_weight, weight_scale=weight_scale, bias=bias
|
||||
)
|
||||
replace_module(model, name, quant_linear)
|
||||
del linear.weight
|
||||
del linear.bias
|
||||
del linear
|
||||
cleanup_memory()
|
||||
|
||||
|
||||
def quantize_activations(
|
||||
model: AutoModelForCausalLM,
|
||||
quantize_config: BaseQuantizeConfig,
|
||||
calibration_tokens,
|
||||
):
|
||||
# Replace weight quantizer with a dynamic activation quantizer observer
|
||||
for name, dynamic_quant_linear in model.named_modules():
|
||||
if (
|
||||
not isinstance(dynamic_quant_linear, FP8DynamicLinear)
|
||||
or name in quantize_config.ignored_layers
|
||||
):
|
||||
continue
|
||||
quantizer = FP8StaticLinearQuantizer(
|
||||
weight=dynamic_quant_linear.weight,
|
||||
weight_scale=dynamic_quant_linear.weight_scale,
|
||||
bias=dynamic_quant_linear.bias,
|
||||
quantize_output=(
|
||||
hasattr(quantize_config, "kv_cache_quant_layers")
|
||||
and name in quantize_config.kv_cache_quant_layers
|
||||
),
|
||||
)
|
||||
replace_module(model, name, quantizer)
|
||||
del dynamic_quant_linear
|
||||
cleanup_memory()
|
||||
|
||||
# Pass through calibration data to measure activation scales
|
||||
with torch.inference_mode():
|
||||
with tqdm.tqdm(
|
||||
total=calibration_tokens.shape[0], desc="Calibrating activation scales"
|
||||
) as pbar:
|
||||
for row_idx in range(calibration_tokens.shape[0]):
|
||||
model(calibration_tokens[row_idx].reshape(1, -1))
|
||||
cleanup_memory()
|
||||
pbar.update(1)
|
||||
|
||||
# Replace dynamic quantizer observer with StaticLinear for export
|
||||
for name, quantizer in model.named_modules():
|
||||
if (
|
||||
not isinstance(quantizer, FP8StaticLinearQuantizer)
|
||||
or name in quantize_config.ignored_layers
|
||||
):
|
||||
continue
|
||||
static_proj = FP8StaticLinear(
|
||||
weight=quantizer.weight,
|
||||
weight_scale=quantizer.weight_scale,
|
||||
bias=quantizer.bias,
|
||||
input_scale=quantizer.input_scale,
|
||||
output_scale=quantizer.output_scale,
|
||||
)
|
||||
replace_module(model, name, static_proj)
|
||||
del quantizer
|
||||
cleanup_memory()
|
||||
|
||||
# Post-process step for kv cache scales to take the k/v module
|
||||
# `output_scale` parameters, and store them in the parent attention
|
||||
# module as `k_scale` and `v_scale`
|
||||
if hasattr(quantize_config, "kv_cache_quant_layers"):
|
||||
# Assumes that list is ordered such that [layer0.k_proj, layer0.v_proj, layer1.k_proj, layer1.v_proj, ...]
|
||||
# so we make a list of tuples [(layer0.k_proj, layer0.v_proj), (layer1.k_proj, layer1.v_proj), ...]
|
||||
kv_proj_pairs = zip(*[iter(quantize_config.kv_cache_quant_layers)] * 2)
|
||||
for k_proj_name, v_proj_name in kv_proj_pairs:
|
||||
parent_module_name = ".".join(k_proj_name.split(".")[:-1])
|
||||
assert parent_module_name == ".".join(v_proj_name.split(".")[:-1])
|
||||
parent_module = dict(model.named_modules())[parent_module_name]
|
||||
|
||||
k_proj = dict(model.named_modules())[k_proj_name]
|
||||
v_proj = dict(model.named_modules())[v_proj_name]
|
||||
|
||||
parent_module.k_scale = torch.nn.Parameter(
|
||||
k_proj.output_scale, requires_grad=False
|
||||
)
|
||||
parent_module.v_scale = torch.nn.Parameter(
|
||||
v_proj.output_scale, requires_grad=False
|
||||
)
|
||||
|
||||
# Remove output_scale from k_proj and v_proj
|
||||
k_proj.output_scale = None
|
||||
v_proj.output_scale = None
|
||||
cleanup_memory()
|
||||
|
||||
|
||||
def save_quantized_model(
|
||||
model: AutoModelForCausalLM,
|
||||
quant_config: BaseQuantizeConfig,
|
||||
save_dir: str,
|
||||
):
|
||||
print(model)
|
||||
print(f"Saving the model to {save_dir}")
|
||||
static_q_dict = {
|
||||
"quantization_config": {
|
||||
"quant_method": "fp8",
|
||||
"activation_scheme": quant_config.activation_scheme,
|
||||
"ignored_layers": quant_config.ignored_layers,
|
||||
}
|
||||
}
|
||||
if hasattr(quant_config, "kv_cache_quant_layers"):
|
||||
static_q_dict["quantization_config"]["kv_cache_scheme"] = "static"
|
||||
model.config.update(static_q_dict)
|
||||
model.save_pretrained(save_dir)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model.config._name_or_path)
|
||||
tokenizer.save_pretrained(save_dir)
|
||||
|
||||
|
||||
def get_layers_to_ignore(model, ignore_patterns) -> List[str]:
|
||||
ignored_layers = set()
|
||||
|
||||
for name, linear in model.named_modules():
|
||||
if not isinstance(linear, torch.nn.Linear):
|
||||
continue
|
||||
|
||||
for ignore_pattern in ignore_patterns:
|
||||
regex_prefix = "re:"
|
||||
if ignore_pattern.startswith(regex_prefix):
|
||||
# check if name matches regex and add to set if true
|
||||
regex_pattern = ignore_pattern[len(regex_prefix) :]
|
||||
if re.search(regex_pattern, name):
|
||||
ignored_layers.add(name)
|
||||
else:
|
||||
# else, exact match
|
||||
if ignore_pattern == name:
|
||||
ignored_layers.add(name)
|
||||
|
||||
return list(ignored_layers)
|
||||
|
||||
|
||||
def get_kv_cache_quant_layers(model, kv_cache_quant_targets: Tuple[str]) -> List[str]:
|
||||
kv_cache_quant_layers = []
|
||||
|
||||
for name, linear in model.named_modules():
|
||||
if not isinstance(linear, torch.nn.Linear):
|
||||
continue
|
||||
|
||||
for output_quant_target in kv_cache_quant_targets:
|
||||
if name.endswith(output_quant_target):
|
||||
kv_cache_quant_layers.append(name)
|
||||
|
||||
return kv_cache_quant_layers
|
||||
|
||||
|
||||
def quantize_to_fp8_dynamic(input_model_dir: str, output_model_dir: str) -> None:
|
||||
# Define quantization config with static activation scales
|
||||
quantize_config = BaseQuantizeConfig(
|
||||
quant_method="fp8", activation_scheme="dynamic"
|
||||
)
|
||||
|
||||
# Load the model, quantize, and save checkpoint
|
||||
model = AutoFP8ForCausalLM.from_pretrained(input_model_dir, quantize_config)
|
||||
# No examples for dynamic quantization
|
||||
model.quantize([])
|
||||
model.save_quantized(output_model_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
quantize_to_fp8_dynamic(sys.argv[0], sys.argv[1])
|
124
src/ui_update.py
124
src/ui_update.py
|
@ -1,16 +1,100 @@
|
|||
from typing import Tuple
|
||||
|
||||
import psutil
|
||||
from PySide6.QtCore import QTimer
|
||||
from PySide6.QtGui import Qt
|
||||
from PySide6.QtWidgets import QFileDialog, QLabel
|
||||
|
||||
from Localizations import *
|
||||
import psutil
|
||||
from error_handling import show_error
|
||||
|
||||
|
||||
def update_model_info(logger, self, model_info):
|
||||
def resize_window(self, larger) -> None:
|
||||
factor = 1.1 if larger else 1 / 1.1
|
||||
current_width = self.width()
|
||||
current_height = self.height()
|
||||
new_width = int(current_width * factor)
|
||||
new_height = int(current_height * factor)
|
||||
self.resize(new_width, new_height)
|
||||
|
||||
|
||||
def reset_size(self) -> None:
|
||||
self.resize(self.default_width, self.default_height)
|
||||
|
||||
|
||||
def parse_resolution(self) -> Tuple[int, int]:
|
||||
res = os.environ.get("AUTOGGUF_RESOLUTION", "1650x1100")
|
||||
try:
|
||||
width, height = map(int, res.split("x"))
|
||||
if width <= 0 or height <= 0:
|
||||
raise ValueError
|
||||
return width, height
|
||||
except (ValueError, AttributeError):
|
||||
return 1650, 1100
|
||||
|
||||
|
||||
def browse_base_model(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_BASE_MODEL_FOLDER) # Updated log message
|
||||
base_model_folder = QFileDialog.getExistingDirectory(self, SELECT_BASE_MODEL_FOLDER)
|
||||
if base_model_folder:
|
||||
self.base_model_path.setText(os.path.abspath(base_model_folder))
|
||||
|
||||
|
||||
def browse_hf_model_input(self) -> None:
|
||||
self.logger.info(BROWSE_FOR_HF_MODEL_DIRECTORY)
|
||||
model_dir = QFileDialog.getExistingDirectory(self, SELECT_HF_MODEL_DIRECTORY)
|
||||
if model_dir:
|
||||
self.hf_model_input.setText(os.path.abspath(model_dir))
|
||||
|
||||
|
||||
def browse_hf_outfile(self) -> None:
|
||||
self.logger.info(BROWSE_FOR_HF_TO_GGUF_OUTPUT)
|
||||
outfile, _ = QFileDialog.getSaveFileName(self, SELECT_OUTPUT_FILE, "", GGUF_FILES)
|
||||
if outfile:
|
||||
self.hf_outfile.setText(os.path.abspath(outfile))
|
||||
|
||||
|
||||
def browse_imatrix_datafile(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_IMATRIX_DATA_FILE)
|
||||
datafile, _ = QFileDialog.getOpenFileName(self, SELECT_DATA_FILE, "", ALL_FILES)
|
||||
if datafile:
|
||||
self.imatrix_datafile.setText(os.path.abspath(datafile))
|
||||
|
||||
|
||||
def browse_imatrix_model(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_IMATRIX_MODEL_FILE)
|
||||
model_file, _ = QFileDialog.getOpenFileName(self, SELECT_MODEL_FILE, "", GGUF_FILES)
|
||||
if model_file:
|
||||
self.imatrix_model.setText(os.path.abspath(model_file))
|
||||
|
||||
|
||||
def browse_imatrix_output(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_IMATRIX_OUTPUT_FILE)
|
||||
output_file, _ = QFileDialog.getSaveFileName(
|
||||
self, SELECT_OUTPUT_FILE, "", DAT_FILES
|
||||
)
|
||||
if output_file:
|
||||
self.imatrix_output.setText(os.path.abspath(output_file))
|
||||
|
||||
|
||||
def create_label(self, text, tooltip) -> QLabel:
|
||||
label = QLabel(text)
|
||||
label.setToolTip(tooltip)
|
||||
return label
|
||||
|
||||
|
||||
def toggle_gpu_offload_auto(self, state) -> None:
|
||||
is_auto = state == Qt.CheckState.Checked
|
||||
self.gpu_offload_slider.setEnabled(not is_auto)
|
||||
self.gpu_offload_spinbox.setEnabled(not is_auto)
|
||||
|
||||
|
||||
def update_model_info(logger, model_info) -> None:
|
||||
logger.debug(UPDATING_MODEL_INFO.format(model_info))
|
||||
pass
|
||||
|
||||
|
||||
def update_system_info(self):
|
||||
def update_system_info(self) -> None:
|
||||
ram = psutil.virtual_memory()
|
||||
cpu = psutil.cpu_percent()
|
||||
|
||||
|
@ -27,8 +111,16 @@ def update_system_info(self):
|
|||
)
|
||||
self.cpu_label.setText(CPU_USAGE_FORMAT.format(cpu))
|
||||
|
||||
# Collect CPU and RAM usage data
|
||||
self.cpu_data.append(cpu)
|
||||
self.ram_data.append(ram.percent)
|
||||
|
||||
def animate_bar(self, bar, target_value):
|
||||
if len(self.cpu_data) > 60:
|
||||
self.cpu_data.pop(0)
|
||||
self.ram_data.pop(0)
|
||||
|
||||
|
||||
def animate_bar(self, bar, target_value) -> None:
|
||||
current_value = bar.value()
|
||||
difference = target_value - current_value
|
||||
|
||||
|
@ -42,7 +134,7 @@ def animate_bar(self, bar, target_value):
|
|||
timer.start(10) # Adjust the interval for animation speed
|
||||
|
||||
|
||||
def _animate_step(bar, target_value, step, timer):
|
||||
def _animate_step(bar, target_value, step, timer) -> None:
|
||||
current_value = bar.value()
|
||||
new_value = current_value + step
|
||||
|
||||
|
@ -55,11 +147,11 @@ def _animate_step(bar, target_value, step, timer):
|
|||
bar.setValue(new_value)
|
||||
|
||||
|
||||
def update_download_progress(self, progress):
|
||||
def update_download_progress(self, progress) -> None:
|
||||
self.download_progress.setValue(progress)
|
||||
|
||||
|
||||
def update_cuda_backends(self):
|
||||
def update_cuda_backends(self) -> None:
|
||||
self.logger.debug(UPDATING_CUDA_BACKENDS)
|
||||
self.backend_combo_cuda.clear()
|
||||
llama_bin = os.path.abspath("llama_bin")
|
||||
|
@ -67,7 +159,9 @@ def update_cuda_backends(self):
|
|||
for item in os.listdir(llama_bin):
|
||||
item_path = os.path.join(llama_bin, item)
|
||||
if os.path.isdir(item_path) and "cudart-llama" not in item.lower():
|
||||
if "cu1" in item.lower(): # Only include CUDA-capable backends
|
||||
if (
|
||||
"cu1" in item.lower() or "cuda-1" in item.lower()
|
||||
): # Only include CUDA-capable backends
|
||||
self.backend_combo_cuda.addItem(item, userData=item_path)
|
||||
|
||||
if self.backend_combo_cuda.count() == 0:
|
||||
|
@ -77,23 +171,23 @@ def update_cuda_backends(self):
|
|||
self.backend_combo_cuda.setEnabled(True)
|
||||
|
||||
|
||||
def update_threads_spinbox(self, value):
|
||||
def update_threads_spinbox(self, value) -> None:
|
||||
self.threads_spinbox.setValue(value)
|
||||
|
||||
|
||||
def update_threads_slider(self, value):
|
||||
def update_threads_slider(self, value) -> None:
|
||||
self.threads_slider.setValue(value)
|
||||
|
||||
|
||||
def update_gpu_offload_spinbox(self, value):
|
||||
def update_gpu_offload_spinbox(self, value) -> None:
|
||||
self.gpu_offload_spinbox.setValue(value)
|
||||
|
||||
|
||||
def update_gpu_offload_slider(self, value):
|
||||
def update_gpu_offload_slider(self, value) -> None:
|
||||
self.gpu_offload_slider.setValue(value)
|
||||
|
||||
|
||||
def update_cuda_option(self):
|
||||
def update_cuda_option(self) -> None:
|
||||
self.logger.debug(UPDATING_CUDA_OPTIONS)
|
||||
asset = self.asset_combo.currentData()
|
||||
|
||||
|
@ -113,7 +207,7 @@ def update_cuda_option(self):
|
|||
self.update_cuda_backends()
|
||||
|
||||
|
||||
def update_assets(self):
|
||||
def update_assets(self) -> None:
|
||||
self.logger.debug(UPDATING_ASSET_LIST)
|
||||
self.asset_combo.clear()
|
||||
release = self.release_combo.currentData()
|
||||
|
@ -128,6 +222,6 @@ def update_assets(self):
|
|||
self.update_cuda_option()
|
||||
|
||||
|
||||
def update_base_model_visibility(self, index):
|
||||
def update_base_model_visibility(self, index) -> None:
|
||||
is_gguf = self.lora_output_type_combo.itemText(index) == "GGUF"
|
||||
self.base_model_wrapper.setVisible(is_gguf)
|
||||
|
|
157
src/utils.py
157
src/utils.py
|
@ -1,14 +1,104 @@
|
|||
from PySide6.QtWidgets import QFileDialog
|
||||
from typing import Any, Union
|
||||
|
||||
from error_handling import show_error
|
||||
from Localizations import *
|
||||
import requests
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
import json
|
||||
import ssl
|
||||
import certifi
|
||||
from PySide6.QtCore import Qt
|
||||
from PySide6.QtWidgets import QFileDialog, QInputDialog, QMenu
|
||||
|
||||
from DownloadThread import DownloadThread
|
||||
from imports_and_globals import ensure_directory
|
||||
from Localizations import *
|
||||
from error_handling import show_error
|
||||
from globals import ensure_directory
|
||||
from KVOverrideEntry import KVOverrideEntry
|
||||
|
||||
|
||||
def browse_models(self):
|
||||
def show_model_context_menu(self, position):
|
||||
item = self.model_tree.itemAt(position)
|
||||
if item:
|
||||
# Child of a sharded model or top-level item without children
|
||||
if item.parent() is not None or item.childCount() == 0:
|
||||
menu = QMenu()
|
||||
rename_action = menu.addAction(RENAME)
|
||||
delete_action = menu.addAction(DELETE)
|
||||
|
||||
action = menu.exec(self.model_tree.viewport().mapToGlobal(position))
|
||||
if action == rename_action:
|
||||
self.rename_model(item)
|
||||
elif action == delete_action:
|
||||
self.delete_model(item)
|
||||
|
||||
|
||||
def rename_model(self, item):
|
||||
old_name = item.text(0)
|
||||
new_name, ok = QInputDialog.getText(self, RENAME, f"New name for {old_name}:")
|
||||
if ok and new_name:
|
||||
old_path = os.path.join(self.models_input.text(), old_name)
|
||||
new_path = os.path.join(self.models_input.text(), new_name)
|
||||
try:
|
||||
os.rename(old_path, new_path)
|
||||
item.setText(0, new_name)
|
||||
self.logger.info(MODEL_RENAMED_SUCCESSFULLY.format(old_name, new_name))
|
||||
except Exception as e:
|
||||
show_error(self.logger, f"Error renaming model: {e}")
|
||||
|
||||
|
||||
def add_kv_override(self, override_string=None) -> None:
|
||||
entry = KVOverrideEntry()
|
||||
entry.deleted.connect(self.remove_kv_override)
|
||||
if override_string:
|
||||
key, value = override_string.split("=")
|
||||
type_, val = value.split(":")
|
||||
entry.key_input.setText(key)
|
||||
entry.type_combo.setCurrentText(type_)
|
||||
entry.value_input.setText(val)
|
||||
self.kv_override_layout.addWidget(entry)
|
||||
self.kv_override_entries.append(entry)
|
||||
|
||||
|
||||
def remove_kv_override(self, entry) -> None:
|
||||
self.kv_override_layout.removeWidget(entry)
|
||||
self.kv_override_entries.remove(entry)
|
||||
entry.deleteLater()
|
||||
|
||||
|
||||
def get_models_data(self) -> list[dict[str, Union[str, Any]]]:
|
||||
models = []
|
||||
root = self.model_tree.invisibleRootItem()
|
||||
child_count = root.childCount()
|
||||
for i in range(child_count):
|
||||
item = root.child(i)
|
||||
model_name = item.text(0)
|
||||
model_type = "sharded" if "sharded" in model_name.lower() else "single"
|
||||
model_path = item.data(0, Qt.ItemDataRole.UserRole)
|
||||
models.append({"name": model_name, "type": model_type, "path": model_path})
|
||||
return models
|
||||
|
||||
|
||||
def get_tasks_data(self) -> list[dict[str, Union[int, Any]]]:
|
||||
tasks = []
|
||||
for i in range(self.task_list.count()):
|
||||
item = self.task_list.item(i)
|
||||
task_widget = self.task_list.itemWidget(item)
|
||||
if task_widget:
|
||||
tasks.append(
|
||||
{
|
||||
"name": task_widget.task_name,
|
||||
"status": task_widget.status,
|
||||
"progress": (
|
||||
task_widget.progress_bar.value()
|
||||
if hasattr(task_widget, "progress_bar")
|
||||
else 0
|
||||
),
|
||||
"log_file": task_widget.log_file,
|
||||
}
|
||||
)
|
||||
return tasks
|
||||
|
||||
|
||||
def browse_models(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_MODELS_DIRECTORY)
|
||||
models_path = QFileDialog.getExistingDirectory(self, SELECT_MODELS_DIRECTORY)
|
||||
if models_path:
|
||||
|
@ -17,7 +107,7 @@ def browse_models(self):
|
|||
self.load_models()
|
||||
|
||||
|
||||
def browse_output(self):
|
||||
def browse_output(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_OUTPUT_DIRECTORY)
|
||||
output_path = QFileDialog.getExistingDirectory(self, SELECT_OUTPUT_DIRECTORY)
|
||||
if output_path:
|
||||
|
@ -25,7 +115,7 @@ def browse_output(self):
|
|||
ensure_directory(output_path)
|
||||
|
||||
|
||||
def browse_logs(self):
|
||||
def browse_logs(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_LOGS_DIRECTORY)
|
||||
logs_path = QFileDialog.getExistingDirectory(self, SELECT_LOGS_DIRECTORY)
|
||||
if logs_path:
|
||||
|
@ -33,7 +123,7 @@ def browse_logs(self):
|
|||
ensure_directory(logs_path)
|
||||
|
||||
|
||||
def browse_imatrix(self):
|
||||
def browse_imatrix(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_IMATRIX_FILE)
|
||||
imatrix_file, _ = QFileDialog.getOpenFileName(
|
||||
self, SELECT_IMATRIX_FILE, "", DAT_FILES
|
||||
|
@ -42,7 +132,7 @@ def browse_imatrix(self):
|
|||
self.imatrix.setText(os.path.abspath(imatrix_file))
|
||||
|
||||
|
||||
def browse_lora_input(self):
|
||||
def browse_lora_input(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_LORA_INPUT_DIRECTORY)
|
||||
lora_input_path = QFileDialog.getExistingDirectory(
|
||||
self, SELECT_LORA_INPUT_DIRECTORY
|
||||
|
@ -52,7 +142,7 @@ def browse_lora_input(self):
|
|||
ensure_directory(lora_input_path)
|
||||
|
||||
|
||||
def browse_lora_output(self):
|
||||
def browse_lora_output(self) -> None:
|
||||
self.logger.info(BROWSING_FOR_LORA_OUTPUT_FILE)
|
||||
lora_output_file, _ = QFileDialog.getSaveFileName(
|
||||
self, SELECT_LORA_OUTPUT_FILE, "", GGUF_AND_BIN_FILES
|
||||
|
@ -61,7 +151,7 @@ def browse_lora_output(self):
|
|||
self.lora_output.setText(os.path.abspath(lora_output_file))
|
||||
|
||||
|
||||
def download_llama_cpp(self):
|
||||
def download_llama_cpp(self) -> None:
|
||||
self.logger.info(STARTING_LLAMACPP_DOWNLOAD)
|
||||
asset = self.asset_combo.currentData()
|
||||
if not asset:
|
||||
|
@ -83,18 +173,47 @@ def download_llama_cpp(self):
|
|||
self.download_progress.setValue(0)
|
||||
|
||||
|
||||
def refresh_releases(self):
|
||||
def get_repo_from_env() -> tuple[str, str]:
|
||||
repo = os.getenv("AUTOGGUF_BACKEND_REPO", "ggerganov/llama.cpp")
|
||||
|
||||
if not repo or "/" not in repo:
|
||||
raise ValueError(INVALID_REPOSITORY_FORMAT)
|
||||
|
||||
owner, repo_name = repo.split("/", 1)
|
||||
if not all(part.strip() for part in (owner, repo_name)):
|
||||
raise ValueError(REPO_CANNOT_BE_EMPTY)
|
||||
|
||||
return owner, repo_name
|
||||
|
||||
|
||||
def refresh_releases(self) -> None:
|
||||
self.logger.info(REFRESHING_LLAMACPP_RELEASES)
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://api.github.com/repos/ggerganov/llama.cpp/releases"
|
||||
)
|
||||
response.raise_for_status() # Raise an exception for bad status codes
|
||||
releases = response.json()
|
||||
owner, repo = get_repo_from_env()
|
||||
url = f"https://api.github.com/repos/{owner}/{repo}/releases"
|
||||
|
||||
# Create SSL context with certifi certificates
|
||||
ssl_context = ssl.create_default_context(cafile=certifi.where())
|
||||
|
||||
# Create request
|
||||
req = urllib.request.Request(url)
|
||||
|
||||
# Make the request
|
||||
with urllib.request.urlopen(req, context=ssl_context) as response:
|
||||
if response.status != 200:
|
||||
raise urllib.error.HTTPError(
|
||||
url, response.status, "HTTP Error", response.headers, None
|
||||
)
|
||||
|
||||
releases = json.loads(response.read().decode("utf-8"))
|
||||
|
||||
self.release_combo.clear()
|
||||
for release in releases:
|
||||
self.release_combo.addItem(release["tag_name"], userData=release)
|
||||
self.release_combo.currentIndexChanged.connect(self.update_assets)
|
||||
self.update_assets()
|
||||
except requests.exceptions.RequestException as e:
|
||||
|
||||
except ValueError as e:
|
||||
show_error(self.logger, f"Invalid repository configuration: {str(e)}")
|
||||
except (urllib.error.URLError, urllib.error.HTTPError) as e:
|
||||
show_error(self.logger, ERROR_FETCHING_RELEASES.format(str(e)))
|
||||
|
|
Loading…
Reference in New Issue