mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 21:34:36 +00:00
Merge branch 'master' into gg/flash-attn
This commit is contained in:
commit
9495d3982d
@ -1,5 +1,6 @@
|
|||||||
{
|
{
|
||||||
lib,
|
lib,
|
||||||
|
glibc,
|
||||||
config,
|
config,
|
||||||
stdenv,
|
stdenv,
|
||||||
mkShell,
|
mkShell,
|
||||||
@ -30,6 +31,11 @@
|
|||||||
useRocm ? config.rocmSupport,
|
useRocm ? config.rocmSupport,
|
||||||
useVulkan ? false,
|
useVulkan ? false,
|
||||||
llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
|
llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
|
||||||
|
|
||||||
|
# It's necessary to consistently use backendStdenv when building with CUDA support,
|
||||||
|
# otherwise we get libstdc++ errors downstream.
|
||||||
|
effectiveStdenv ? if useCuda then cudaPackages.backendStdenv else stdenv,
|
||||||
|
enableStatic ? effectiveStdenv.hostPlatform.isStatic
|
||||||
}@inputs:
|
}@inputs:
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -41,10 +47,7 @@ let
|
|||||||
versionOlder
|
versionOlder
|
||||||
;
|
;
|
||||||
|
|
||||||
# It's necessary to consistently use backendStdenv when building with CUDA support,
|
|
||||||
# otherwise we get libstdc++ errors downstream.
|
|
||||||
stdenv = throw "Use effectiveStdenv instead";
|
stdenv = throw "Use effectiveStdenv instead";
|
||||||
effectiveStdenv = if useCuda then cudaPackages.backendStdenv else inputs.stdenv;
|
|
||||||
|
|
||||||
suffices =
|
suffices =
|
||||||
lib.optionals useBlas [ "BLAS" ]
|
lib.optionals useBlas [ "BLAS" ]
|
||||||
@ -167,6 +170,9 @@ effectiveStdenv.mkDerivation (
|
|||||||
# TODO: Replace with autoAddDriverRunpath
|
# TODO: Replace with autoAddDriverRunpath
|
||||||
# once https://github.com/NixOS/nixpkgs/pull/275241 has been merged
|
# once https://github.com/NixOS/nixpkgs/pull/275241 has been merged
|
||||||
cudaPackages.autoAddOpenGLRunpathHook
|
cudaPackages.autoAddOpenGLRunpathHook
|
||||||
|
]
|
||||||
|
++ optionals (effectiveStdenv.hostPlatform.isGnu && enableStatic) [
|
||||||
|
glibc.static
|
||||||
];
|
];
|
||||||
|
|
||||||
buildInputs =
|
buildInputs =
|
||||||
@ -181,7 +187,7 @@ effectiveStdenv.mkDerivation (
|
|||||||
[
|
[
|
||||||
(cmakeBool "LLAMA_NATIVE" false)
|
(cmakeBool "LLAMA_NATIVE" false)
|
||||||
(cmakeBool "LLAMA_BUILD_SERVER" true)
|
(cmakeBool "LLAMA_BUILD_SERVER" true)
|
||||||
(cmakeBool "BUILD_SHARED_LIBS" true)
|
(cmakeBool "BUILD_SHARED_LIBS" (!enableStatic))
|
||||||
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
|
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
|
||||||
(cmakeBool "LLAMA_BLAS" useBlas)
|
(cmakeBool "LLAMA_BLAS" useBlas)
|
||||||
(cmakeBool "LLAMA_CLBLAST" useOpenCL)
|
(cmakeBool "LLAMA_CLBLAST" useOpenCL)
|
||||||
@ -190,6 +196,7 @@ effectiveStdenv.mkDerivation (
|
|||||||
(cmakeBool "LLAMA_METAL" useMetalKit)
|
(cmakeBool "LLAMA_METAL" useMetalKit)
|
||||||
(cmakeBool "LLAMA_MPI" useMpi)
|
(cmakeBool "LLAMA_MPI" useMpi)
|
||||||
(cmakeBool "LLAMA_VULKAN" useVulkan)
|
(cmakeBool "LLAMA_VULKAN" useVulkan)
|
||||||
|
(cmakeBool "LLAMA_STATIC" enableStatic)
|
||||||
]
|
]
|
||||||
++ optionals useCuda [
|
++ optionals useCuda [
|
||||||
(
|
(
|
||||||
|
221
.github/workflows/build.yml
vendored
221
.github/workflows/build.yml
vendored
@ -21,8 +21,123 @@ env:
|
|||||||
GGML_N_THREADS: 1
|
GGML_N_THREADS: 1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
macOS-latest-cmake-arm64:
|
||||||
|
runs-on: macos-14
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
brew update
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
sysctl -a
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL_EMBED_LIBRARY=ON -DLLAMA_CURL=ON ..
|
||||||
|
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
id: cmake_test
|
||||||
|
run: |
|
||||||
|
cd build
|
||||||
|
ctest -L main --verbose --timeout 900
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
BUILD_NUMBER="$(git rev-list --count HEAD)"
|
||||||
|
SHORT_HASH="$(git rev-parse --short=7 HEAD)"
|
||||||
|
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
|
||||||
|
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
|
||||||
|
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
|
run: |
|
||||||
|
cp LICENSE ./build/bin/
|
||||||
|
zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip
|
||||||
|
|
||||||
|
macOS-latest-cmake-x64:
|
||||||
|
runs-on: macos-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
brew update
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
sysctl -a
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL_EMBED_LIBRARY=ON -DLLAMA_CURL=ON ..
|
||||||
|
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
id: cmake_test
|
||||||
|
run: |
|
||||||
|
cd build
|
||||||
|
ctest -L main --verbose --timeout 900
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
BUILD_NUMBER="$(git rev-list --count HEAD)"
|
||||||
|
SHORT_HASH="$(git rev-parse --short=7 HEAD)"
|
||||||
|
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
|
||||||
|
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
|
||||||
|
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
|
run: |
|
||||||
|
cp LICENSE ./build/bin/
|
||||||
|
zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip
|
||||||
|
|
||||||
ubuntu-focal-make:
|
ubuntu-focal-make:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
env:
|
||||||
|
LLAMA_NODE_AVAILABLE: true
|
||||||
|
LLAMA_PYTHON_AVAILABLE: true
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -35,6 +150,14 @@ jobs:
|
|||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential gcc-8
|
sudo apt-get install build-essential gcc-8
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: "20"
|
||||||
|
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.11"
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
id: make_build
|
id: make_build
|
||||||
env:
|
env:
|
||||||
@ -48,6 +171,28 @@ jobs:
|
|||||||
CC=gcc-8 make tests -j $(nproc)
|
CC=gcc-8 make tests -j $(nproc)
|
||||||
make test -j $(nproc)
|
make test -j $(nproc)
|
||||||
|
|
||||||
|
ubuntu-focal-make-curl:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential gcc-8 libcurl4-openssl-dev
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: make_build
|
||||||
|
env:
|
||||||
|
LLAMA_FATAL_WARNINGS: 1
|
||||||
|
LLAMA_CURL: 1
|
||||||
|
run: |
|
||||||
|
CC=gcc-8 make -j $(nproc)
|
||||||
|
|
||||||
ubuntu-latest-cmake:
|
ubuntu-latest-cmake:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
@ -76,40 +221,40 @@ jobs:
|
|||||||
cd build
|
cd build
|
||||||
ctest -L main --verbose --timeout 900
|
ctest -L main --verbose --timeout 900
|
||||||
|
|
||||||
ubuntu-latest-cmake-sanitizer:
|
# ubuntu-latest-cmake-sanitizer:
|
||||||
runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
|
#
|
||||||
continue-on-error: true
|
# continue-on-error: true
|
||||||
|
#
|
||||||
strategy:
|
# strategy:
|
||||||
matrix:
|
# matrix:
|
||||||
sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
# sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
||||||
build_type: [Debug, Release]
|
# build_type: [Debug, Release]
|
||||||
|
#
|
||||||
steps:
|
# steps:
|
||||||
- name: Clone
|
# - name: Clone
|
||||||
id: checkout
|
# id: checkout
|
||||||
uses: actions/checkout@v3
|
# uses: actions/checkout@v3
|
||||||
|
#
|
||||||
- name: Dependencies
|
# - name: Dependencies
|
||||||
id: depends
|
# id: depends
|
||||||
run: |
|
# run: |
|
||||||
sudo apt-get update
|
# sudo apt-get update
|
||||||
sudo apt-get install build-essential
|
# sudo apt-get install build-essential
|
||||||
|
#
|
||||||
- name: Build
|
# - name: Build
|
||||||
id: cmake_build
|
# id: cmake_build
|
||||||
run: |
|
# run: |
|
||||||
mkdir build
|
# mkdir build
|
||||||
cd build
|
# cd build
|
||||||
cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
|
# cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
|
||||||
cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
|
# cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
|
||||||
|
#
|
||||||
- name: Test
|
# - name: Test
|
||||||
id: cmake_test
|
# id: cmake_test
|
||||||
run: |
|
# run: |
|
||||||
cd build
|
# cd build
|
||||||
ctest -L main --verbose --timeout 900
|
# ctest -L main --verbose --timeout 900
|
||||||
|
|
||||||
ubuntu-latest-cmake-mpi:
|
ubuntu-latest-cmake-mpi:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -333,6 +478,7 @@ jobs:
|
|||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Xcode .. \
|
cmake -G Xcode .. \
|
||||||
|
-DLLAMA_METAL_EMBED_LIBRARY=ON \
|
||||||
-DLLAMA_BUILD_EXAMPLES=OFF \
|
-DLLAMA_BUILD_EXAMPLES=OFF \
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
-DLLAMA_BUILD_SERVER=OFF \
|
-DLLAMA_BUILD_SERVER=OFF \
|
||||||
@ -361,6 +507,7 @@ jobs:
|
|||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Xcode .. \
|
cmake -G Xcode .. \
|
||||||
|
-DLLAMA_METAL_EMBED_LIBRARY=ON \
|
||||||
-DLLAMA_BUILD_EXAMPLES=OFF \
|
-DLLAMA_BUILD_EXAMPLES=OFF \
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
-DLLAMA_BUILD_SERVER=OFF \
|
-DLLAMA_BUILD_SERVER=OFF \
|
||||||
@ -425,6 +572,8 @@ jobs:
|
|||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
- build: 'vulkan'
|
- build: 'vulkan'
|
||||||
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
|
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
|
- build: 'arm64'
|
||||||
|
defines: '-A ARM64 -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -520,7 +669,7 @@ jobs:
|
|||||||
- name: Test
|
- name: Test
|
||||||
id: cmake_test
|
id: cmake_test
|
||||||
# not all machines have native AVX-512
|
# not all machines have native AVX-512
|
||||||
if: ${{ matrix.build != 'clblast' && matrix.build != 'kompute' && matrix.build != 'vulkan' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }}
|
if: ${{ matrix.build != 'arm64' && matrix.build != 'clblast' && matrix.build != 'kompute' && matrix.build != 'vulkan' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }}
|
||||||
run: |
|
run: |
|
||||||
cd build
|
cd build
|
||||||
ctest -L main -C Release --verbose --timeout 900
|
ctest -L main -C Release --verbose --timeout 900
|
||||||
@ -722,6 +871,8 @@ jobs:
|
|||||||
- macOS-latest-cmake
|
- macOS-latest-cmake
|
||||||
- windows-latest-cmake
|
- windows-latest-cmake
|
||||||
- windows-latest-cmake-cublas
|
- windows-latest-cmake-cublas
|
||||||
|
- macOS-latest-cmake-arm64
|
||||||
|
- macOS-latest-cmake-x64
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
23
.github/workflows/close-issue.yml
vendored
Normal file
23
.github/workflows/close-issue.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "42 0 * * *"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v5
|
||||||
|
with:
|
||||||
|
exempt-issue-labels: "refactor,help wanted,good first issue,research"
|
||||||
|
days-before-issue-stale: 30
|
||||||
|
days-before-issue-close: 14
|
||||||
|
stale-issue-label: "stale"
|
||||||
|
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
operations-per-run: 1000
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
82
.github/workflows/server.yml
vendored
82
.github/workflows/server.yml
vendored
@ -24,18 +24,15 @@ jobs:
|
|||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
# TODO: temporary disabled due to linux kernel issues
|
||||||
build_type: [Debug, Release]
|
#sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
||||||
|
sanitizer: [UNDEFINED]
|
||||||
|
build_type: [Debug]
|
||||||
include:
|
include:
|
||||||
- build_type: Release
|
- build_type: Release
|
||||||
sanitizer: ""
|
sanitizer: ""
|
||||||
exclude:
|
disabled_on_pr: true
|
||||||
- build_type: Release
|
fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken
|
||||||
sanitizer: ADDRESS
|
|
||||||
- build_type: Release
|
|
||||||
sanitizer: THREAD
|
|
||||||
- build_type: Release
|
|
||||||
sanitizer: UNDEFINED
|
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: ubuntu:latest
|
image: ubuntu:latest
|
||||||
@ -47,6 +44,8 @@ jobs:
|
|||||||
- name: Clone
|
- name: Clone
|
||||||
id: checkout
|
id: checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
id: depends
|
id: depends
|
||||||
@ -58,7 +57,8 @@ jobs:
|
|||||||
cmake \
|
cmake \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
wget \
|
wget \
|
||||||
psmisc
|
language-pack-en \
|
||||||
|
libcurl4-openssl-dev
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
id: cmake_build
|
id: cmake_build
|
||||||
@ -68,6 +68,7 @@ jobs:
|
|||||||
cmake .. \
|
cmake .. \
|
||||||
-DLLAMA_NATIVE=OFF \
|
-DLLAMA_NATIVE=OFF \
|
||||||
-DLLAMA_BUILD_SERVER=ON \
|
-DLLAMA_BUILD_SERVER=ON \
|
||||||
|
-DLLAMA_CURL=ON \
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
||||||
cmake --build . --config ${{ matrix.build_type }} -j $(nproc) --target server
|
cmake --build . --config ${{ matrix.build_type }} -j $(nproc) --target server
|
||||||
@ -79,13 +80,72 @@ jobs:
|
|||||||
|
|
||||||
- name: Tests
|
- name: Tests
|
||||||
id: server_integration_tests
|
id: server_integration_tests
|
||||||
|
if: ${{ !matrix.disabled_on_pr || !github.event.pull_request }}
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd examples/server/tests
|
||||||
PORT=8888 ./tests.sh
|
PORT=8888 ./tests.sh
|
||||||
|
|
||||||
- name: Slow tests
|
- name: Slow tests
|
||||||
id: server_integration_tests_slow
|
id: server_integration_tests_slow
|
||||||
if: ${{ github.event.schedule != '' && matrix.build_type == 'Release' || github.event.inputs.slow_tests == 'true' }}
|
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd examples/server/tests
|
||||||
PORT=8888 ./tests.sh --stop --no-skipped --no-capture --tags slow
|
PORT=8888 ./tests.sh --stop --no-skipped --no-capture --tags slow
|
||||||
|
|
||||||
|
|
||||||
|
server-windows:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: libCURL
|
||||||
|
id: get_libcurl
|
||||||
|
env:
|
||||||
|
CURL_VERSION: 8.6.0_6
|
||||||
|
run: |
|
||||||
|
curl.exe -o $env:RUNNER_TEMP/curl.zip -L "https://curl.se/windows/dl-${env:CURL_VERSION}/curl-${env:CURL_VERSION}-win64-mingw.zip"
|
||||||
|
mkdir $env:RUNNER_TEMP/libcurl
|
||||||
|
tar.exe -xvf $env:RUNNER_TEMP/curl.zip --strip-components=1 -C $env:RUNNER_TEMP/libcurl
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -DLLAMA_CURL=ON -DCURL_LIBRARY="$env:RUNNER_TEMP/libcurl/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:RUNNER_TEMP/libcurl/include"
|
||||||
|
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS} --target server
|
||||||
|
|
||||||
|
- name: Python setup
|
||||||
|
id: setup_python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Tests dependencies
|
||||||
|
id: test_dependencies
|
||||||
|
run: |
|
||||||
|
pip install -r examples/server/tests/requirements.txt
|
||||||
|
|
||||||
|
- name: Copy Libcurl
|
||||||
|
id: prepare_libcurl
|
||||||
|
run: |
|
||||||
|
cp $env:RUNNER_TEMP/libcurl/bin/libcurl-x64.dll ./build/bin/Release/libcurl-x64.dll
|
||||||
|
|
||||||
|
- name: Tests
|
||||||
|
id: server_integration_tests
|
||||||
|
if: ${{ !matrix.disabled_on_pr || !github.event.pull_request }}
|
||||||
|
run: |
|
||||||
|
cd examples/server/tests
|
||||||
|
behave.exe --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp
|
||||||
|
|
||||||
|
- name: Slow tests
|
||||||
|
id: server_integration_tests_slow
|
||||||
|
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
||||||
|
run: |
|
||||||
|
cd examples/server/tests
|
||||||
|
behave.exe --stop --no-skipped --no-capture --tags slow
|
||||||
|
20
.github/workflows/tidy-post.yml
vendored
20
.github/workflows/tidy-post.yml
vendored
@ -1,20 +0,0 @@
|
|||||||
name: clang-tidy review post comments
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
workflows: ["clang-tidy-review"]
|
|
||||||
types:
|
|
||||||
- completed
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: ZedThree/clang-tidy-review/post@v0.13.0
|
|
||||||
# lgtm_comment_body, max_comments, and annotations need to be set on the posting workflow in a split setup
|
|
||||||
with:
|
|
||||||
# adjust options as necessary
|
|
||||||
lgtm_comment_body: ''
|
|
||||||
annotations: false
|
|
||||||
max_comments: 25
|
|
23
.github/workflows/tidy-review.yml
vendored
23
.github/workflows/tidy-review.yml
vendored
@ -1,23 +0,0 @@
|
|||||||
name: clang-tidy-review
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
clang-tidy-review:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- uses: ZedThree/clang-tidy-review@v0.13.0
|
|
||||||
id: review
|
|
||||||
with:
|
|
||||||
lgtm_comment_body: ''
|
|
||||||
build_dir: build
|
|
||||||
cmake_command: cmake . -B build -DCMAKE_EXPORT_COMPILE_COMMANDS=on
|
|
||||||
split_workflow: true
|
|
||||||
|
|
||||||
- uses: ZedThree/clang-tidy-review/upload@v0.13.0
|
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -11,7 +11,10 @@
|
|||||||
*.gcda
|
*.gcda
|
||||||
*.dot
|
*.dot
|
||||||
*.bat
|
*.bat
|
||||||
|
*.tmp
|
||||||
*.metallib
|
*.metallib
|
||||||
|
*.etag
|
||||||
|
*.lastModified
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.build/
|
.build/
|
||||||
.cache/
|
.cache/
|
||||||
@ -25,6 +28,8 @@
|
|||||||
.vscode/
|
.vscode/
|
||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
|
ggml-metal-embed.metal
|
||||||
|
|
||||||
lcov-report/
|
lcov-report/
|
||||||
gcovr-report/
|
gcovr-report/
|
||||||
|
|
||||||
@ -45,6 +50,7 @@ models-mnt
|
|||||||
/embedding
|
/embedding
|
||||||
/gguf
|
/gguf
|
||||||
/gguf-llama-simple
|
/gguf-llama-simple
|
||||||
|
/gritlm
|
||||||
/imatrix
|
/imatrix
|
||||||
/infill
|
/infill
|
||||||
/libllama.so
|
/libllama.so
|
||||||
|
@ -99,6 +99,8 @@ option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some
|
|||||||
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
||||||
set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
|
set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
|
||||||
"llama: max. batch size for using peer access")
|
"llama: max. batch size for using peer access")
|
||||||
|
option(LLAMA_CUDA_NO_PEER_COPY "llama: do not use peer to peer copies" OFF)
|
||||||
|
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
|
||||||
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
|
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
|
||||||
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
|
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
|
||||||
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
||||||
@ -116,7 +118,9 @@ option(LLAMA_MPI "llama: use MPI"
|
|||||||
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
||||||
option(LLAMA_SYCL "llama: use SYCL" OFF)
|
option(LLAMA_SYCL "llama: use SYCL" OFF)
|
||||||
option(LLAMA_SYCL_F16 "llama: use 16 bit floats for sycl calculations" OFF)
|
option(LLAMA_SYCL_F16 "llama: use 16 bit floats for sycl calculations" OFF)
|
||||||
|
set(LLAMA_SYCL_TARGET "INTEL" CACHE STRING "llama: sycl target device")
|
||||||
option(LLAMA_CPU_HBM "llama: use memkind for CPU HBM" OFF)
|
option(LLAMA_CPU_HBM "llama: use memkind for CPU HBM" OFF)
|
||||||
|
set(LLAMA_SCHED_MAX_COPIES "4" CACHE STRING "llama: max input copies for pipeline parallelism")
|
||||||
|
|
||||||
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
||||||
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
||||||
@ -146,6 +150,8 @@ set(THREADS_PREFER_PTHREAD_FLAG ON)
|
|||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
include(CheckCXXCompilerFlag)
|
include(CheckCXXCompilerFlag)
|
||||||
|
|
||||||
|
add_compile_definitions(GGML_SCHED_MAX_COPIES=${LLAMA_SCHED_MAX_COPIES})
|
||||||
|
|
||||||
# enable libstdc++ assertions for debug builds
|
# enable libstdc++ assertions for debug builds
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
add_compile_definitions($<$<CONFIG:Debug>:_GLIBCXX_ASSERTIONS>)
|
add_compile_definitions($<$<CONFIG:Debug>:_GLIBCXX_ASSERTIONS>)
|
||||||
@ -196,35 +202,39 @@ if (LLAMA_METAL)
|
|||||||
add_compile_definitions(GGML_METAL_NDEBUG)
|
add_compile_definitions(GGML_METAL_NDEBUG)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# get full path to the file
|
# copy ggml-common.h and ggml-metal.metal to bin directory
|
||||||
#add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/")
|
configure_file(ggml-common.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COPYONLY)
|
||||||
|
|
||||||
# copy ggml-metal.metal to bin directory
|
|
||||||
configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY)
|
configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY)
|
||||||
|
|
||||||
if (LLAMA_METAL_EMBED_LIBRARY)
|
if (LLAMA_METAL_EMBED_LIBRARY)
|
||||||
enable_language(ASM)
|
enable_language(ASM)
|
||||||
add_compile_definitions(GGML_METAL_EMBED_LIBRARY)
|
add_compile_definitions(GGML_METAL_EMBED_LIBRARY)
|
||||||
|
|
||||||
set(METALLIB_SOURCE "${CMAKE_SOURCE_DIR}/ggml-metal.metal")
|
set(METALLIB_COMMON "${CMAKE_CURRENT_SOURCE_DIR}/ggml-common.h")
|
||||||
|
set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
|
||||||
|
|
||||||
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated")
|
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated")
|
||||||
set(EMBED_METALLIB_ASSEMBLY "${CMAKE_BINARY_DIR}/autogenerated/ggml-embed-metallib.s")
|
|
||||||
|
# merge ggml-common.h and ggml-metal.metal into a single file
|
||||||
|
set(METALLIB_EMBED_ASM "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.s")
|
||||||
|
set(METALLIB_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.metal")
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${EMBED_METALLIB_ASSEMBLY}
|
OUTPUT ${METALLIB_EMBED_ASM}
|
||||||
COMMAND echo ".section __DATA,__ggml_metallib" > ${EMBED_METALLIB_ASSEMBLY}
|
COMMAND echo "Embedding Metal library"
|
||||||
COMMAND echo ".globl _ggml_metallib_start" >> ${EMBED_METALLIB_ASSEMBLY}
|
COMMAND sed -e '/\#include \"ggml-common.h\"/r ${METALLIB_COMMON}' -e '/\#include \"ggml-common.h\"/d' < ${METALLIB_SOURCE} > ${METALLIB_SOURCE_EMBED}
|
||||||
COMMAND echo "_ggml_metallib_start:" >> ${EMBED_METALLIB_ASSEMBLY}
|
COMMAND echo ".section __DATA,__ggml_metallib" > ${METALLIB_EMBED_ASM}
|
||||||
COMMAND echo ".incbin \\\"${METALLIB_SOURCE}\\\"" >> ${EMBED_METALLIB_ASSEMBLY}
|
COMMAND echo ".globl _ggml_metallib_start" >> ${METALLIB_EMBED_ASM}
|
||||||
COMMAND echo ".globl _ggml_metallib_end" >> ${EMBED_METALLIB_ASSEMBLY}
|
COMMAND echo "_ggml_metallib_start:" >> ${METALLIB_EMBED_ASM}
|
||||||
COMMAND echo "_ggml_metallib_end:" >> ${EMBED_METALLIB_ASSEMBLY}
|
COMMAND echo ".incbin \\\"${METALLIB_SOURCE_EMBED}\\\"" >> ${METALLIB_EMBED_ASM}
|
||||||
DEPENDS ${METALLIB_SOURCE}
|
COMMAND echo ".globl _ggml_metallib_end" >> ${METALLIB_EMBED_ASM}
|
||||||
|
COMMAND echo "_ggml_metallib_end:" >> ${METALLIB_EMBED_ASM}
|
||||||
|
DEPENDS ggml-metal.metal ggml-common.h
|
||||||
COMMENT "Generate assembly for embedded Metal library"
|
COMMENT "Generate assembly for embedded Metal library"
|
||||||
)
|
)
|
||||||
|
|
||||||
set(GGML_SOURCES_METAL ${GGML_SOURCES_METAL} ${EMBED_METALLIB_ASSEMBLY})
|
set(GGML_SOURCES_METAL ${GGML_SOURCES_METAL} ${METALLIB_EMBED_ASM})
|
||||||
endif()
|
else()
|
||||||
|
|
||||||
if (LLAMA_METAL_SHADER_DEBUG)
|
if (LLAMA_METAL_SHADER_DEBUG)
|
||||||
# custom command to do the following:
|
# custom command to do the following:
|
||||||
# xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air
|
# xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air
|
||||||
@ -236,15 +246,18 @@ if (LLAMA_METAL)
|
|||||||
# note: unfortunately, we have to call it default.metallib instead of ggml.metallib
|
# note: unfortunately, we have to call it default.metallib instead of ggml.metallib
|
||||||
# ref: https://github.com/ggerganov/whisper.cpp/issues/1720
|
# ref: https://github.com/ggerganov/whisper.cpp/issues/1720
|
||||||
set(XC_FLAGS -fno-fast-math -fno-inline -g)
|
set(XC_FLAGS -fno-fast-math -fno-inline -g)
|
||||||
if (LLAMA_QKK_64)
|
else()
|
||||||
set(XC_FLAGS ${XC_FLAGS} -DQK_K=64)
|
set(XC_FLAGS -O3)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
||||||
COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air
|
COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air
|
||||||
COMMAND xcrun -sdk macosx metallib ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
COMMAND xcrun -sdk macosx metallib ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
||||||
DEPENDS ggml-metal.metal
|
COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air
|
||||||
|
COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h
|
||||||
|
COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal
|
||||||
|
DEPENDS ggml-metal.metal ggml-common.h
|
||||||
COMMENT "Compiling Metal kernels"
|
COMMENT "Compiling Metal kernels"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -252,7 +265,7 @@ if (LLAMA_METAL)
|
|||||||
ggml-metal ALL
|
ggml-metal ALL
|
||||||
DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
||||||
)
|
)
|
||||||
endif()
|
endif() # LLAMA_METAL_EMBED_LIBRARY
|
||||||
|
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
|
||||||
${FOUNDATION_LIBRARY}
|
${FOUNDATION_LIBRARY}
|
||||||
@ -375,6 +388,9 @@ if (LLAMA_CUBLAS)
|
|||||||
endif()
|
endif()
|
||||||
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
||||||
add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${LLAMA_CUDA_PEER_MAX_BATCH_SIZE})
|
add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${LLAMA_CUDA_PEER_MAX_BATCH_SIZE})
|
||||||
|
if (LLAMA_CUDA_NO_PEER_COPY)
|
||||||
|
add_compile_definitions(GGML_CUDA_NO_PEER_COPY)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
@ -519,6 +535,10 @@ if (LLAMA_HIPBLAS)
|
|||||||
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
|
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (LLAMA_CUDA_NO_PEER_COPY)
|
||||||
|
add_compile_definitions(GGML_CUDA_NO_PEER_COPY)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
||||||
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||||
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
||||||
@ -533,6 +553,10 @@ if (LLAMA_HIPBLAS)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_SYCL)
|
if (LLAMA_SYCL)
|
||||||
|
if (NOT LLAMA_SYCL_TARGET MATCHES "^(INTEL|NVIDIA)$")
|
||||||
|
message(FATAL_ERROR "Invalid backend chosen, supported options are INTEL or NVIDIA")
|
||||||
|
endif()
|
||||||
|
|
||||||
if ( NOT DEFINED ENV{ONEAPI_ROOT})
|
if ( NOT DEFINED ENV{ONEAPI_ROOT})
|
||||||
message(FATAL_ERROR "Not detect ENV {ONEAPI_ROOT}, please install oneAPI & source it, like: source /opt/intel/oneapi/setvars.sh")
|
message(FATAL_ERROR "Not detect ENV {ONEAPI_ROOT}, please install oneAPI & source it, like: source /opt/intel/oneapi/setvars.sh")
|
||||||
endif()
|
endif()
|
||||||
@ -554,6 +578,9 @@ if (LLAMA_SYCL)
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib")
|
||||||
|
if (LLAMA_SYCL_TARGET STREQUAL "NVIDIA")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda")
|
||||||
|
endif()
|
||||||
|
|
||||||
set(GGML_HEADERS_SYCL ggml-sycl.h)
|
set(GGML_HEADERS_SYCL ggml-sycl.h)
|
||||||
set(GGML_SOURCES_SYCL ggml-sycl.cpp)
|
set(GGML_SOURCES_SYCL ggml-sycl.cpp)
|
||||||
@ -561,7 +588,11 @@ if (LLAMA_SYCL)
|
|||||||
if (WIN32)
|
if (WIN32)
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl sycl7 OpenCL mkl_sycl_blas_dll.lib mkl_intel_ilp64_dll.lib mkl_sequential_dll.lib mkl_core_dll.lib)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl sycl7 OpenCL mkl_sycl_blas_dll.lib mkl_intel_ilp64_dll.lib mkl_sequential_dll.lib mkl_core_dll.lib)
|
||||||
else()
|
else()
|
||||||
|
if (LLAMA_SYCL_TARGET STREQUAL "INTEL")
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread)
|
||||||
|
elseif (LLAMA_SYCL_TARGET STREQUAL "NVIDIA")
|
||||||
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl pthread m dl onemkl)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -1128,6 +1159,8 @@ endif()
|
|||||||
add_library(llama
|
add_library(llama
|
||||||
llama.cpp
|
llama.cpp
|
||||||
llama.h
|
llama.h
|
||||||
|
unicode.h
|
||||||
|
unicode.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
target_include_directories(llama PUBLIC .)
|
target_include_directories(llama PUBLIC .)
|
||||||
|
65
Makefile
65
Makefile
@ -2,14 +2,15 @@
|
|||||||
BUILD_TARGETS = \
|
BUILD_TARGETS = \
|
||||||
main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
||||||
simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
|
simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
|
||||||
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey tests/test-c.o
|
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm tests/test-c.o
|
||||||
|
|
||||||
# Binaries only useful for tests
|
# Binaries only useful for tests
|
||||||
TEST_TARGETS = \
|
TEST_TARGETS = \
|
||||||
tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt \
|
tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt \
|
||||||
tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama \
|
tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama \
|
||||||
tests/test-tokenizer-0-falcon tests/test-tokenizer-1-llama tests/test-tokenizer-1-bpe tests/test-rope \
|
tests/test-tokenizer-0-falcon tests/test-tokenizer-1-llama tests/test-tokenizer-1-bpe tests/test-rope \
|
||||||
tests/test-backend-ops tests/test-model-load-cancel tests/test-autorelease
|
tests/test-backend-ops tests/test-model-load-cancel tests/test-autorelease \
|
||||||
|
tests/test-json-schema-to-grammar
|
||||||
|
|
||||||
# Code coverage output files
|
# Code coverage output files
|
||||||
COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report
|
COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report
|
||||||
@ -167,6 +168,10 @@ ifeq ($(UNAME_S),OpenBSD)
|
|||||||
MK_CPPFLAGS += -D_BSD_SOURCE
|
MK_CPPFLAGS += -D_BSD_SOURCE
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef LLAMA_SCHED_MAX_COPIES
|
||||||
|
MK_CPPFLAGS += -DGGML_SCHED_MAX_COPIES=$(LLAMA_SCHED_MAX_COPIES)
|
||||||
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_DEBUG
|
ifdef LLAMA_DEBUG
|
||||||
MK_CFLAGS += -O0 -g
|
MK_CFLAGS += -O0 -g
|
||||||
MK_CXXFLAGS += -O0 -g
|
MK_CXXFLAGS += -O0 -g
|
||||||
@ -201,6 +206,10 @@ ifdef LLAMA_SERVER_VERBOSE
|
|||||||
MK_CPPFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
|
MK_CPPFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef LLAMA_SERVER_SSL
|
||||||
|
MK_CPPFLAGS += -DCPPHTTPLIB_OPENSSL_SUPPORT
|
||||||
|
MK_LDFLAGS += -lssl -lcrypto
|
||||||
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_CODE_COVERAGE
|
ifdef LLAMA_CODE_COVERAGE
|
||||||
MK_CXXFLAGS += -fprofile-arcs -ftest-coverage -dumpbase ''
|
MK_CXXFLAGS += -fprofile-arcs -ftest-coverage -dumpbase ''
|
||||||
@ -443,13 +452,13 @@ ifdef LLAMA_CUDA_PEER_MAX_BATCH_SIZE
|
|||||||
else
|
else
|
||||||
MK_NVCCFLAGS += -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128
|
MK_NVCCFLAGS += -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128
|
||||||
endif # LLAMA_CUDA_PEER_MAX_BATCH_SIZE
|
endif # LLAMA_CUDA_PEER_MAX_BATCH_SIZE
|
||||||
#ifdef LLAMA_CUDA_CUBLAS
|
ifdef LLAMA_CUDA_NO_PEER_COPY
|
||||||
# MK_NVCCFLAGS += -DGGML_CUDA_CUBLAS
|
MK_NVCCFLAGS += -DGGML_CUDA_NO_PEER_COPY
|
||||||
#endif # LLAMA_CUDA_CUBLAS
|
endif # LLAMA_CUDA_NO_PEER_COPY
|
||||||
ifdef LLAMA_CUDA_CCBIN
|
ifdef LLAMA_CUDA_CCBIN
|
||||||
MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
|
MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
|
||||||
endif
|
endif
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml-common.h
|
||||||
ifdef JETSON_EOL_MODULE_DETECT
|
ifdef JETSON_EOL_MODULE_DETECT
|
||||||
$(NVCC) -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I/usr/local/cuda/targets/aarch64-linux/include -std=c++11 -O3 $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
$(NVCC) -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I/usr/local/cuda/targets/aarch64-linux/include -std=c++11 -O3 $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
||||||
else
|
else
|
||||||
@ -526,6 +535,9 @@ endif # LLAMA_HIP_UMA
|
|||||||
ifdef LLAMA_CUDA_FORCE_DMMV
|
ifdef LLAMA_CUDA_FORCE_DMMV
|
||||||
HIPFLAGS += -DGGML_CUDA_FORCE_DMMV
|
HIPFLAGS += -DGGML_CUDA_FORCE_DMMV
|
||||||
endif # LLAMA_CUDA_FORCE_DMMV
|
endif # LLAMA_CUDA_FORCE_DMMV
|
||||||
|
ifdef LLAMA_CUDA_NO_PEER_COPY
|
||||||
|
HIPFLAGS += -DGGML_CUDA_NO_PEER_COPY
|
||||||
|
endif # LLAMA_CUDA_NO_PEER_COPY
|
||||||
OBJS += ggml-cuda.o
|
OBJS += ggml-cuda.o
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
||||||
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
||||||
@ -545,17 +557,18 @@ endif
|
|||||||
endif # LLAMA_METAL
|
endif # LLAMA_METAL
|
||||||
|
|
||||||
ifdef LLAMA_METAL
|
ifdef LLAMA_METAL
|
||||||
ggml-metal.o: ggml-metal.m ggml-metal.h
|
ggml-metal.o: ggml-metal.m ggml-metal.h ggml.h
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
ifdef LLAMA_METAL_EMBED_LIBRARY
|
ifdef LLAMA_METAL_EMBED_LIBRARY
|
||||||
ggml-metal-embed.o: ggml-metal.metal
|
ggml-metal-embed.o: ggml-metal.metal ggml-common.h
|
||||||
@echo "Embedding Metal library"
|
@echo "Embedding Metal library"
|
||||||
|
@sed -e '/#include "ggml-common.h"/r ggml-common.h' -e '/#include "ggml-common.h"/d' < ggml-metal.metal > ggml-metal-embed.metal
|
||||||
$(eval TEMP_ASSEMBLY=$(shell mktemp))
|
$(eval TEMP_ASSEMBLY=$(shell mktemp))
|
||||||
@echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY)
|
@echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY)
|
||||||
@echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY)
|
@echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY)
|
||||||
@echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY)
|
@echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY)
|
||||||
@echo ".incbin \"$<\"" >> $(TEMP_ASSEMBLY)
|
@echo ".incbin \"ggml-metal-embed.metal\"" >> $(TEMP_ASSEMBLY)
|
||||||
@echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY)
|
@echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY)
|
||||||
@echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY)
|
@echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY)
|
||||||
@$(AS) $(TEMP_ASSEMBLY) -o $@
|
@$(AS) $(TEMP_ASSEMBLY) -o $@
|
||||||
@ -586,6 +599,11 @@ include scripts/get-flags.mk
|
|||||||
CUDA_CXXFLAGS := $(BASE_CXXFLAGS) $(GF_CXXFLAGS) -Wno-pedantic
|
CUDA_CXXFLAGS := $(BASE_CXXFLAGS) $(GF_CXXFLAGS) -Wno-pedantic
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef LLAMA_CURL
|
||||||
|
override CXXFLAGS := $(CXXFLAGS) -DLLAMA_USE_CURL
|
||||||
|
override LDFLAGS := $(LDFLAGS) -lcurl
|
||||||
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
# Print build information
|
# Print build information
|
||||||
#
|
#
|
||||||
@ -626,12 +644,15 @@ ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
|
|||||||
ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
|
ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h
|
ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h ggml-common.h
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
OBJS += ggml-alloc.o ggml-backend.o ggml-quants.o
|
unicode.o: unicode.cpp unicode.h
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
llama.o: llama.cpp ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h llama.h
|
OBJS += ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o
|
||||||
|
|
||||||
|
llama.o: llama.cpp unicode.h ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h llama.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
COMMON_H_DEPS = common/common.h common/sampling.h common/log.h
|
COMMON_H_DEPS = common/common.h common/sampling.h common/log.h
|
||||||
@ -649,6 +670,9 @@ console.o: common/console.cpp common/console.h
|
|||||||
grammar-parser.o: common/grammar-parser.cpp common/grammar-parser.h
|
grammar-parser.o: common/grammar-parser.cpp common/grammar-parser.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
json-schema-to-grammar.o: common/json-schema-to-grammar.cpp common/json-schema-to-grammar.h
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
train.o: common/train.cpp common/train.h
|
train.o: common/train.cpp common/train.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
@ -720,19 +744,26 @@ embedding: examples/embedding/embedding.cpp ggml.o llama.o $(C
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
gritlm: examples/gritlm/gritlm.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
server: examples/server/server.cpp examples/server/oai.hpp examples/server/utils.hpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h examples/llava/llava.h examples/llava/llava.cpp common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
|
server: examples/server/server.cpp examples/server/utils.hpp examples/server/httplib.h common/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp json-schema-to-grammar.o common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) -c examples/llava/clip.cpp -o $(call GET_OBJ_FILE, examples/llava/clip.cpp) -Wno-cast-qual
|
$(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Iexamples/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2)
|
||||||
$(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h %.hpp $< examples/llava/clip.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) -o $@ $(LDFLAGS) $(LWINSOCK2)
|
|
||||||
|
|
||||||
gguf: examples/gguf/gguf.cpp ggml.o $(OBJS)
|
gguf: examples/gguf/gguf.cpp ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
gguf-split: examples/gguf-split/gguf-split.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
|
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -841,6 +872,10 @@ tests/test-double-float: tests/test-double-float.cpp ggml.o $(OBJS)
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
tests/test-json-schema-to-grammar: tests/test-json-schema-to-grammar.cpp json-schema-to-grammar.o ggml.o llama.o grammar-parser.o $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-grad0: tests/test-grad0.cpp ggml.o $(OBJS)
|
tests/test-grad0: tests/test-grad0.cpp ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@ -31,6 +31,7 @@ let package = Package(
|
|||||||
sources: [
|
sources: [
|
||||||
"ggml.c",
|
"ggml.c",
|
||||||
"llama.cpp",
|
"llama.cpp",
|
||||||
|
"unicode.cpp",
|
||||||
"ggml-alloc.c",
|
"ggml-alloc.c",
|
||||||
"ggml-backend.c",
|
"ggml-backend.c",
|
||||||
"ggml-quants.c",
|
"ggml-quants.c",
|
||||||
|
156
README-sycl.md
156
README-sycl.md
@ -29,6 +29,8 @@ For Intel CPU, recommend to use llama.cpp for X86 (Intel MKL building).
|
|||||||
## News
|
## News
|
||||||
|
|
||||||
- 2024.3
|
- 2024.3
|
||||||
|
- A blog is published: **Run LLM on all Intel GPUs Using llama.cpp**: [intel.com](https://www.intel.com/content/www/us/en/developer/articles/technical/run-llm-on-all-gpus-using-llama-cpp-artical.html) or [medium.com](https://medium.com/@jianyu_neo/run-llm-on-all-intel-gpus-using-llama-cpp-fd2e2dcbd9bd).
|
||||||
|
- New base line is ready: [tag b2437](https://github.com/ggerganov/llama.cpp/tree/b2437).
|
||||||
- Support multiple cards: **--split-mode**: [none|layer]; not support [row], it's on developing.
|
- Support multiple cards: **--split-mode**: [none|layer]; not support [row], it's on developing.
|
||||||
- Support to assign main GPU by **--main-gpu**, replace $GGML_SYCL_DEVICE.
|
- Support to assign main GPU by **--main-gpu**, replace $GGML_SYCL_DEVICE.
|
||||||
- Support detecting all GPUs with level-zero and same top **Max compute units**.
|
- Support detecting all GPUs with level-zero and same top **Max compute units**.
|
||||||
@ -73,6 +75,29 @@ For iGPU, please make sure the shared memory from host memory is enough. For lla
|
|||||||
|
|
||||||
For dGPU, please make sure the device memory is enough. For llama-2-7b.Q4_0, recommend the device memory is 4GB+.
|
For dGPU, please make sure the device memory is enough. For llama-2-7b.Q4_0, recommend the device memory is 4GB+.
|
||||||
|
|
||||||
|
## Nvidia GPU
|
||||||
|
|
||||||
|
### Verified
|
||||||
|
|
||||||
|
|Intel GPU| Status | Verified Model|
|
||||||
|
|-|-|-|
|
||||||
|
|Ampere Series| Support| A100|
|
||||||
|
|
||||||
|
### oneMKL for CUDA
|
||||||
|
|
||||||
|
The current oneMKL release does not contain the oneMKL cuBlas backend.
|
||||||
|
As a result for Nvidia GPU's oneMKL must be built from source.
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone https://github.com/oneapi-src/oneMKL
|
||||||
|
cd oneMKL
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -G Ninja .. -DCMAKE_CXX_COMPILER=icpx -DCMAKE_C_COMPILER=icx -DENABLE_MKLGPU_BACKEND=OFF -DENABLE_MKLCPU_BACKEND=OFF -DENABLE_CUBLAS_BACKEND=ON
|
||||||
|
ninja
|
||||||
|
// Add paths as necessary
|
||||||
|
```
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
@ -91,7 +116,7 @@ You can choose between **F16** and **F32** build. F16 is faster for long-prompt
|
|||||||
# Or, for F32:
|
# Or, for F32:
|
||||||
docker build -t llama-cpp-sycl -f .devops/main-intel.Dockerfile .
|
docker build -t llama-cpp-sycl -f .devops/main-intel.Dockerfile .
|
||||||
|
|
||||||
# Note: you can also use the ".devops/main-server.Dockerfile", which compiles the "server" example
|
# Note: you can also use the ".devops/server-intel.Dockerfile", which compiles the "server" example
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
@ -186,6 +211,9 @@ source /opt/intel/oneapi/setvars.sh
|
|||||||
# Or, for FP32:
|
# Or, for FP32:
|
||||||
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
|
# For Nvidia GPUs
|
||||||
|
cmake .. -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
# Build example/main only
|
# Build example/main only
|
||||||
#cmake --build . --config Release --target main
|
#cmake --build . --config Release --target main
|
||||||
|
|
||||||
@ -228,16 +256,16 @@ Run without parameter:
|
|||||||
Check the ID in startup log, like:
|
Check the ID in startup log, like:
|
||||||
|
|
||||||
```
|
```
|
||||||
found 4 SYCL devices:
|
found 6 SYCL devices:
|
||||||
Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3,
|
| | | |Compute |Max compute|Max work|Max sub| |
|
||||||
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
|ID| Device Type| Name|capability|units |group |group |Global mem size|
|
||||||
Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2,
|
|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------|
|
||||||
max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280
|
| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136|
|
||||||
Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0,
|
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
||||||
max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280
|
| 2| [opencl:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 3.0| 512| 1024| 32| 16225243136|
|
||||||
Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0,
|
| 3| [opencl:gpu:1]| Intel(R) UHD Graphics 770| 3.0| 32| 512| 32| 53651849216|
|
||||||
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
| 4| [opencl:cpu:0]| 13th Gen Intel(R) Core(TM) i7-13700K| 3.0| 24| 8192| 64| 67064815616|
|
||||||
|
| 5| [opencl:acc:0]| Intel(R) FPGA Emulation Device| 1.2| 24|67108864| 64| 67064815616|
|
||||||
```
|
```
|
||||||
|
|
||||||
|Attribute|Note|
|
|Attribute|Note|
|
||||||
@ -245,12 +273,35 @@ found 4 SYCL devices:
|
|||||||
|compute capability 1.3|Level-zero running time, recommended |
|
|compute capability 1.3|Level-zero running time, recommended |
|
||||||
|compute capability 3.0|OpenCL running time, slower than level-zero in most cases|
|
|compute capability 3.0|OpenCL running time, slower than level-zero in most cases|
|
||||||
|
|
||||||
4. Set device ID and execute llama.cpp
|
4. Device selection and execution of llama.cpp
|
||||||
|
|
||||||
Set device ID = 0 by **GGML_SYCL_DEVICE=0**
|
There are two device selection modes:
|
||||||
|
|
||||||
|
- Single device: Use one device assigned by user.
|
||||||
|
- Multiple devices: Automatically choose the devices with the same biggest Max compute units.
|
||||||
|
|
||||||
|
|Device selection|Parameter|
|
||||||
|
|-|-|
|
||||||
|
|Single device|--split-mode none --main-gpu DEVICE_ID |
|
||||||
|
|Multiple devices|--split-mode layer (default)|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Use device 0:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
GGML_SYCL_DEVICE=0 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33
|
ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0
|
||||||
|
```
|
||||||
|
or run by script:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./examples/sycl/run_llama2.sh 0
|
||||||
|
```
|
||||||
|
|
||||||
|
- Use multiple devices:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer
|
||||||
```
|
```
|
||||||
or run by script:
|
or run by script:
|
||||||
|
|
||||||
@ -263,12 +314,18 @@ Note:
|
|||||||
- By default, mmap is used to read model file. In some cases, it leads to the hang issue. Recommend to use parameter **--no-mmap** to disable mmap() to skip this issue.
|
- By default, mmap is used to read model file. In some cases, it leads to the hang issue. Recommend to use parameter **--no-mmap** to disable mmap() to skip this issue.
|
||||||
|
|
||||||
|
|
||||||
5. Check the device ID in output
|
5. Verify the device ID in output
|
||||||
|
|
||||||
|
Verify to see if the selected GPU is shown in the output, like:
|
||||||
|
|
||||||
Like:
|
|
||||||
```
|
```
|
||||||
Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device
|
detect 1 SYCL GPUs: [0] with top Max compute units:512
|
||||||
```
|
```
|
||||||
|
Or
|
||||||
|
```
|
||||||
|
use 1 SYCL GPUs: [0] with Max compute units:512
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Windows
|
## Windows
|
||||||
|
|
||||||
@ -329,7 +386,7 @@ a. Download & install cmake for Windows: https://cmake.org/download/
|
|||||||
|
|
||||||
b. Download & install mingw-w64 make for Windows provided by w64devkit
|
b. Download & install mingw-w64 make for Windows provided by w64devkit
|
||||||
|
|
||||||
- Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
- Download the 1.19.0 version of [w64devkit](https://github.com/skeeto/w64devkit/releases/download/v1.19.0/w64devkit-1.19.0.zip).
|
||||||
|
|
||||||
- Extract `w64devkit` on your pc.
|
- Extract `w64devkit` on your pc.
|
||||||
|
|
||||||
@ -404,15 +461,16 @@ build\bin\main.exe
|
|||||||
Check the ID in startup log, like:
|
Check the ID in startup log, like:
|
||||||
|
|
||||||
```
|
```
|
||||||
found 4 SYCL devices:
|
found 6 SYCL devices:
|
||||||
Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3,
|
| | | |Compute |Max compute|Max work|Max sub| |
|
||||||
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
|ID| Device Type| Name|capability|units |group |group |Global mem size|
|
||||||
Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2,
|
|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------|
|
||||||
max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280
|
| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136|
|
||||||
Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0,
|
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
||||||
max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280
|
| 2| [opencl:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 3.0| 512| 1024| 32| 16225243136|
|
||||||
Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0,
|
| 3| [opencl:gpu:1]| Intel(R) UHD Graphics 770| 3.0| 32| 512| 32| 53651849216|
|
||||||
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
| 4| [opencl:cpu:0]| 13th Gen Intel(R) Core(TM) i7-13700K| 3.0| 24| 8192| 64| 67064815616|
|
||||||
|
| 5| [opencl:acc:0]| Intel(R) FPGA Emulation Device| 1.2| 24|67108864| 64| 67064815616|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -421,13 +479,31 @@ found 4 SYCL devices:
|
|||||||
|compute capability 1.3|Level-zero running time, recommended |
|
|compute capability 1.3|Level-zero running time, recommended |
|
||||||
|compute capability 3.0|OpenCL running time, slower than level-zero in most cases|
|
|compute capability 3.0|OpenCL running time, slower than level-zero in most cases|
|
||||||
|
|
||||||
4. Set device ID and execute llama.cpp
|
|
||||||
|
|
||||||
Set device ID = 0 by **set GGML_SYCL_DEVICE=0**
|
4. Device selection and execution of llama.cpp
|
||||||
|
|
||||||
|
There are two device selection modes:
|
||||||
|
|
||||||
|
- Single device: Use one device assigned by user.
|
||||||
|
- Multiple devices: Automatically choose the devices with the same biggest Max compute units.
|
||||||
|
|
||||||
|
|Device selection|Parameter|
|
||||||
|
|-|-|
|
||||||
|
|Single device|--split-mode none --main-gpu DEVICE_ID |
|
||||||
|
|Multiple devices|--split-mode layer (default)|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Use device 0:
|
||||||
|
|
||||||
```
|
```
|
||||||
set GGML_SYCL_DEVICE=0
|
build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm none -mg 0
|
||||||
build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0
|
```
|
||||||
|
|
||||||
|
- Use multiple devices:
|
||||||
|
|
||||||
|
```
|
||||||
|
build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm layer
|
||||||
```
|
```
|
||||||
or run by script:
|
or run by script:
|
||||||
|
|
||||||
@ -440,11 +516,17 @@ Note:
|
|||||||
- By default, mmap is used to read model file. In some cases, it leads to the hang issue. Recommend to use parameter **--no-mmap** to disable mmap() to skip this issue.
|
- By default, mmap is used to read model file. In some cases, it leads to the hang issue. Recommend to use parameter **--no-mmap** to disable mmap() to skip this issue.
|
||||||
|
|
||||||
|
|
||||||
5. Check the device ID in output
|
|
||||||
|
|
||||||
Like:
|
5. Verify the device ID in output
|
||||||
|
|
||||||
|
Verify to see if the selected GPU is shown in the output, like:
|
||||||
|
|
||||||
```
|
```
|
||||||
Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device
|
detect 1 SYCL GPUs: [0] with top Max compute units:512
|
||||||
|
```
|
||||||
|
Or
|
||||||
|
```
|
||||||
|
use 1 SYCL GPUs: [0] with Max compute units:512
|
||||||
```
|
```
|
||||||
|
|
||||||
## Environment Variable
|
## Environment Variable
|
||||||
@ -463,7 +545,6 @@ Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device
|
|||||||
|
|
||||||
|Name|Value|Function|
|
|Name|Value|Function|
|
||||||
|-|-|-|
|
|-|-|-|
|
||||||
|GGML_SYCL_DEVICE|0 (default) or 1|Set the device id used. Check the device ids by default running output|
|
|
||||||
|GGML_SYCL_DEBUG|0 (default) or 1|Enable log function by macro: GGML_SYCL_DEBUG|
|
|GGML_SYCL_DEBUG|0 (default) or 1|Enable log function by macro: GGML_SYCL_DEBUG|
|
||||||
|ZES_ENABLE_SYSMAN| 0 (default) or 1|Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.<br>Recommended to use when --split-mode = layer|
|
|ZES_ENABLE_SYSMAN| 0 (default) or 1|Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.<br>Recommended to use when --split-mode = layer|
|
||||||
|
|
||||||
@ -481,6 +562,9 @@ Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device
|
|||||||
|
|
||||||
## Q&A
|
## Q&A
|
||||||
|
|
||||||
|
Note: please add prefix **[SYCL]** in issue title, so that we will check it as soon as possible.
|
||||||
|
|
||||||
|
|
||||||
- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`.
|
- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`.
|
||||||
|
|
||||||
Miss to enable oneAPI running environment.
|
Miss to enable oneAPI running environment.
|
||||||
@ -512,4 +596,4 @@ Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device
|
|||||||
|
|
||||||
## Todo
|
## Todo
|
||||||
|
|
||||||
- Support multiple cards.
|
- Support row layer split for multiple card runs.
|
||||||
|
43
README.md
43
README.md
@ -10,15 +10,18 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
|
|
||||||
### Recent API changes
|
### Recent API changes
|
||||||
|
|
||||||
|
- [2024 Mar 13] Add `llama_synchronize()` + `llama_context_params.n_ubatch` https://github.com/ggerganov/llama.cpp/pull/6017
|
||||||
|
- [2024 Mar 8] `llama_kv_cache_seq_rm()` returns a `bool` instead of `void`, and new `llama_n_seq_max()` returns the upper limit of acceptable `seq_id` in batches (relevant when dealing with multiple sequences) https://github.com/ggerganov/llama.cpp/pull/5328
|
||||||
|
- [2024 Mar 4] Embeddings API updated https://github.com/ggerganov/llama.cpp/pull/5796
|
||||||
- [2024 Mar 3] `struct llama_context_params` https://github.com/ggerganov/llama.cpp/pull/5849
|
- [2024 Mar 3] `struct llama_context_params` https://github.com/ggerganov/llama.cpp/pull/5849
|
||||||
|
|
||||||
### Hot topics
|
### Hot topics
|
||||||
|
|
||||||
- The `api_like_OAI.py` script has been removed - use `server` instead ([#5766](https://github.com/ggerganov/llama.cpp/issues/5766#issuecomment-1969037761))
|
- Fix major bug in Metal batched inference https://github.com/ggerganov/llama.cpp/pull/6225
|
||||||
- Support for chat templates: [Wiki (contributions welcome)](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
|
- Multi-GPU pipeline parallelizm support https://github.com/ggerganov/llama.cpp/pull/6017
|
||||||
- Support for Gemma models: https://github.com/ggerganov/llama.cpp/pull/5631
|
- Looking for contributions to add Deepseek support: https://github.com/ggerganov/llama.cpp/issues/5981
|
||||||
- Non-linear quantization IQ4_NL: https://github.com/ggerganov/llama.cpp/pull/5590
|
- Quantization blind testing: https://github.com/ggerganov/llama.cpp/discussions/5962
|
||||||
- Looking for contributions to improve and maintain the `server` example: https://github.com/ggerganov/llama.cpp/issues/4216
|
- Initial Mamba support has been added: https://github.com/ggerganov/llama.cpp/pull/5328
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
@ -109,6 +112,8 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
- [x] [InternLM2](https://huggingface.co/models?search=internlm2)
|
- [x] [InternLM2](https://huggingface.co/models?search=internlm2)
|
||||||
- [x] [CodeShell](https://github.com/WisdomShell/codeshell)
|
- [x] [CodeShell](https://github.com/WisdomShell/codeshell)
|
||||||
- [x] [Gemma](https://ai.google.dev/gemma)
|
- [x] [Gemma](https://ai.google.dev/gemma)
|
||||||
|
- [x] [Mamba](https://github.com/state-spaces/mamba)
|
||||||
|
- [x] [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-v01)
|
||||||
|
|
||||||
**Multimodal models:**
|
**Multimodal models:**
|
||||||
|
|
||||||
@ -130,6 +135,7 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp)
|
- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp)
|
||||||
- JS/TS (llama.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/llamacpp)
|
- JS/TS (llama.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/llamacpp)
|
||||||
- JavaScript/Wasm (works in browser): [tangledgroup/llama-cpp-wasm](https://github.com/tangledgroup/llama-cpp-wasm)
|
- JavaScript/Wasm (works in browser): [tangledgroup/llama-cpp-wasm](https://github.com/tangledgroup/llama-cpp-wasm)
|
||||||
|
- Typescript/Wasm (nicer API, available on npm): [ngxson/wllama](https://github.com/ngxson/wllama)
|
||||||
- Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb)
|
- Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb)
|
||||||
- Rust (nicer API): [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp)
|
- Rust (nicer API): [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp)
|
||||||
- Rust (more direct bindings): [utilityai/llama-cpp-rs](https://github.com/utilityai/llama-cpp-rs)
|
- Rust (more direct bindings): [utilityai/llama-cpp-rs](https://github.com/utilityai/llama-cpp-rs)
|
||||||
@ -160,6 +166,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
|||||||
- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT)
|
- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT)
|
||||||
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
|
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
|
||||||
- [pythops/tenere](https://github.com/pythops/tenere) (AGPL)
|
- [pythops/tenere](https://github.com/pythops/tenere) (AGPL)
|
||||||
|
- [RecurseChat](https://recurse.chat/) (proprietary)
|
||||||
- [semperai/amica](https://github.com/semperai/amica)
|
- [semperai/amica](https://github.com/semperai/amica)
|
||||||
- [withcatai/catai](https://github.com/withcatai/catai)
|
- [withcatai/catai](https://github.com/withcatai/catai)
|
||||||
- [Mobile-Artificial-Intelligence/maid](https://github.com/Mobile-Artificial-Intelligence/maid) (MIT)
|
- [Mobile-Artificial-Intelligence/maid](https://github.com/Mobile-Artificial-Intelligence/maid) (MIT)
|
||||||
@ -901,6 +908,9 @@ First, install the essential packages for termux:
|
|||||||
pkg install clang wget git cmake
|
pkg install clang wget git cmake
|
||||||
```
|
```
|
||||||
Second, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
|
Second, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
|
||||||
|
|
||||||
|
You can execute the following commands on your computer to avoid downloading the NDK to your mobile. Of course, you can also do this in Termux.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ mkdir build-android
|
$ mkdir build-android
|
||||||
$ cd build-android
|
$ cd build-android
|
||||||
@ -909,7 +919,28 @@ $ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROI
|
|||||||
$ make
|
$ make
|
||||||
```
|
```
|
||||||
Install [termux](https://termux.dev/) on your device and run `termux-setup-storage` to get access to your SD card.
|
Install [termux](https://termux.dev/) on your device and run `termux-setup-storage` to get access to your SD card.
|
||||||
Finally, copy the `llama` binary and the model files to your device storage. Here is a demo of an interactive session running on Pixel 5 phone:
|
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
|
||||||
|
|
||||||
|
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
|
||||||
|
```
|
||||||
|
$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/
|
||||||
|
$cd /data/data/com.termux/files/home/bin
|
||||||
|
$chmod +x ./*
|
||||||
|
```
|
||||||
|
|
||||||
|
Download model [llama-2-7b-chat.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf), and push it to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/`
|
||||||
|
|
||||||
|
```
|
||||||
|
$mv /sdcard/llama.cpp/llama-2-7b-chat.Q4_K_M.gguf /data/data/com.termux/files/home/model/
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, you can start chatting:
|
||||||
|
```
|
||||||
|
$cd /data/data/com.termux/files/home/bin
|
||||||
|
$./main -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
|
||||||
|
```
|
||||||
|
|
||||||
|
Here is a demo of an interactive session running on Pixel 5 phone:
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
||||||
|
|
||||||
|
16
build.zig
16
build.zig
@ -115,24 +115,26 @@ pub fn build(b: *std.build.Builder) !void {
|
|||||||
const ggml_alloc = make.obj("ggml-alloc", "ggml-alloc.c");
|
const ggml_alloc = make.obj("ggml-alloc", "ggml-alloc.c");
|
||||||
const ggml_backend = make.obj("ggml-backend", "ggml-backend.c");
|
const ggml_backend = make.obj("ggml-backend", "ggml-backend.c");
|
||||||
const ggml_quants = make.obj("ggml-quants", "ggml-quants.c");
|
const ggml_quants = make.obj("ggml-quants", "ggml-quants.c");
|
||||||
|
const unicode = make.obj("unicode", "unicode.cpp");
|
||||||
const llama = make.obj("llama", "llama.cpp");
|
const llama = make.obj("llama", "llama.cpp");
|
||||||
const buildinfo = make.obj("common", "common/build-info.cpp");
|
const buildinfo = make.obj("common", "common/build-info.cpp");
|
||||||
const common = make.obj("common", "common/common.cpp");
|
const common = make.obj("common", "common/common.cpp");
|
||||||
const console = make.obj("console", "common/console.cpp");
|
const console = make.obj("console", "common/console.cpp");
|
||||||
const sampling = make.obj("sampling", "common/sampling.cpp");
|
const sampling = make.obj("sampling", "common/sampling.cpp");
|
||||||
const grammar_parser = make.obj("grammar-parser", "common/grammar-parser.cpp");
|
const grammar_parser = make.obj("grammar-parser", "common/grammar-parser.cpp");
|
||||||
|
const json_schema_to_grammar = make.obj("json-schema-to-grammar", "common/json-schema-to-grammar.cpp");
|
||||||
const train = make.obj("train", "common/train.cpp");
|
const train = make.obj("train", "common/train.cpp");
|
||||||
const clip = make.obj("clip", "examples/llava/clip.cpp");
|
const clip = make.obj("clip", "examples/llava/clip.cpp");
|
||||||
const llava = make.obj("llava", "examples/llava/llava.cpp");
|
const llava = make.obj("llava", "examples/llava/llava.cpp");
|
||||||
|
|
||||||
_ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, console, grammar_parser });
|
_ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, sampling, console, grammar_parser });
|
||||||
_ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
|
_ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo });
|
||||||
_ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
|
_ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo });
|
||||||
_ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
|
_ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo });
|
||||||
_ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
|
_ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, train });
|
||||||
_ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
|
_ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, train });
|
||||||
|
|
||||||
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, grammar_parser, clip, llava });
|
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, sampling, grammar_parser, json_schema_to_grammar, clip, llava });
|
||||||
if (server.target.isWindows()) {
|
if (server.target.isWindows()) {
|
||||||
server.linkSystemLibrary("ws2_32");
|
server.linkSystemLibrary("ws2_32");
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,8 @@ fi
|
|||||||
|
|
||||||
if [ ! -z ${GG_BUILD_SYCL} ]; then
|
if [ ! -z ${GG_BUILD_SYCL} ]; then
|
||||||
if [ -z ${ONEAPI_ROOT} ]; then
|
if [ -z ${ONEAPI_ROOT} ]; then
|
||||||
echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:\n source /opt/intel/oneapi/setvars.sh"
|
echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:"
|
||||||
|
echo "source /opt/intel/oneapi/setvars.sh"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -47,6 +47,8 @@ if (BUILD_SHARED_LIBS)
|
|||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(TARGET json-schema-to-grammar)
|
||||||
|
add_library(${TARGET} OBJECT json-schema-to-grammar.cpp json-schema-to-grammar.h)
|
||||||
|
|
||||||
set(TARGET common)
|
set(TARGET common)
|
||||||
|
|
||||||
@ -60,6 +62,7 @@ add_library(${TARGET} STATIC
|
|||||||
console.cpp
|
console.cpp
|
||||||
grammar-parser.h
|
grammar-parser.h
|
||||||
grammar-parser.cpp
|
grammar-parser.cpp
|
||||||
|
json.hpp
|
||||||
train.h
|
train.h
|
||||||
train.cpp
|
train.cpp
|
||||||
)
|
)
|
||||||
@ -68,6 +71,17 @@ if (BUILD_SHARED_LIBS)
|
|||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(LLAMA_COMMON_EXTRA_LIBS build_info)
|
||||||
|
|
||||||
|
# Use curl to download model url
|
||||||
|
if (LLAMA_CURL)
|
||||||
|
find_package(CURL REQUIRED)
|
||||||
|
add_definitions(-DLLAMA_USE_CURL)
|
||||||
|
include_directories(${CURL_INCLUDE_DIRS})
|
||||||
|
find_library(CURL_LIBRARY curl REQUIRED)
|
||||||
|
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARY})
|
||||||
|
endif ()
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC .)
|
target_include_directories(${TARGET} PUBLIC .)
|
||||||
target_compile_features(${TARGET} PUBLIC cxx_std_11)
|
target_compile_features(${TARGET} PUBLIC cxx_std_11)
|
||||||
target_link_libraries(${TARGET} PRIVATE build_info PUBLIC llama)
|
target_link_libraries(${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama)
|
||||||
|
2318
common/common.cpp
2318
common/common.cpp
File diff suppressed because it is too large
Load Diff
@ -37,10 +37,13 @@ extern char const *LLAMA_COMMIT;
|
|||||||
extern char const *LLAMA_COMPILER;
|
extern char const *LLAMA_COMPILER;
|
||||||
extern char const *LLAMA_BUILD_TARGET;
|
extern char const *LLAMA_BUILD_TARGET;
|
||||||
|
|
||||||
|
struct llama_control_vector_load_info;
|
||||||
|
|
||||||
|
int32_t get_num_physical_cores();
|
||||||
|
|
||||||
//
|
//
|
||||||
// CLI argument parsing
|
// CLI argument parsing
|
||||||
//
|
//
|
||||||
int32_t get_num_physical_cores();
|
|
||||||
|
|
||||||
struct gpt_params {
|
struct gpt_params {
|
||||||
uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
|
uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
|
||||||
@ -51,7 +54,8 @@ struct gpt_params {
|
|||||||
int32_t n_threads_batch_draft = -1;
|
int32_t n_threads_batch_draft = -1;
|
||||||
int32_t n_predict = -1; // new tokens to predict
|
int32_t n_predict = -1; // new tokens to predict
|
||||||
int32_t n_ctx = 512; // context size
|
int32_t n_ctx = 512; // context size
|
||||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
|
||||||
|
int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS)
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||||
int32_t n_draft = 5; // number of tokens to draft during speculative decoding
|
int32_t n_draft = 5; // number of tokens to draft during speculative decoding
|
||||||
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
||||||
@ -87,6 +91,9 @@ struct gpt_params {
|
|||||||
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
||||||
std::string model_draft = ""; // draft model for speculative decoding
|
std::string model_draft = ""; // draft model for speculative decoding
|
||||||
std::string model_alias = "unknown"; // model alias
|
std::string model_alias = "unknown"; // model alias
|
||||||
|
std::string model_url = ""; // model url to download
|
||||||
|
std::string hf_repo = ""; // HF repo
|
||||||
|
std::string hf_file = ""; // HF file
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
std::string prompt_file = ""; // store the external prompt file name
|
std::string prompt_file = ""; // store the external prompt file name
|
||||||
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
||||||
@ -102,6 +109,11 @@ struct gpt_params {
|
|||||||
std::vector<std::tuple<std::string, float>> lora_adapter; // lora adapter path with user defined scale
|
std::vector<std::tuple<std::string, float>> lora_adapter; // lora adapter path with user defined scale
|
||||||
std::string lora_base = ""; // base model path for the lora adapter
|
std::string lora_base = ""; // base model path for the lora adapter
|
||||||
|
|
||||||
|
std::vector<llama_control_vector_load_info> control_vectors; // control vector with user defined scale
|
||||||
|
|
||||||
|
int32_t control_vector_layer_start = -1; // layer range for control vector
|
||||||
|
int32_t control_vector_layer_end = -1; // layer range for control vector
|
||||||
|
|
||||||
int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
|
int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
|
||||||
int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
|
int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
|
||||||
// (which is more convenient to use for plotting)
|
// (which is more convenient to use for plotting)
|
||||||
@ -129,7 +141,7 @@ struct gpt_params {
|
|||||||
bool interactive_first = false; // wait for user input immediately
|
bool interactive_first = false; // wait for user input immediately
|
||||||
bool multiline_input = false; // reverse the usage of `\`
|
bool multiline_input = false; // reverse the usage of `\`
|
||||||
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
|
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
|
||||||
bool cont_batching = false; // insert new sequences for decoding on-the-fly
|
bool cont_batching = true; // insert new sequences for decoding on-the-fly
|
||||||
|
|
||||||
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
||||||
bool ignore_eos = false; // ignore generated EOS tokens
|
bool ignore_eos = false; // ignore generated EOS tokens
|
||||||
@ -182,6 +194,9 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
|||||||
struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
|
struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
|
||||||
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
|
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
|
||||||
|
|
||||||
|
struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const struct llama_model_params & params);
|
||||||
|
struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const struct llama_model_params & params);
|
||||||
|
|
||||||
// Batch utils
|
// Batch utils
|
||||||
|
|
||||||
void llama_batch_clear(struct llama_batch & batch);
|
void llama_batch_clear(struct llama_batch & batch);
|
||||||
@ -260,3 +275,32 @@ void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80);
|
|||||||
|
|
||||||
// Dump the KV cache view showing individual sequences in each cell (long output).
|
// Dump the KV cache view showing individual sequences in each cell (long output).
|
||||||
void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Embedding utils
|
||||||
|
//
|
||||||
|
|
||||||
|
void llama_embd_normalize(const float * inp, float * out, int n);
|
||||||
|
|
||||||
|
float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Control vector utils
|
||||||
|
//
|
||||||
|
|
||||||
|
struct llama_control_vector_data {
|
||||||
|
int n_embd;
|
||||||
|
|
||||||
|
// stores data for layers [1, n_layer] where n_layer = data.size() / n_embd
|
||||||
|
std::vector<float> data;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_control_vector_load_info {
|
||||||
|
float strength;
|
||||||
|
|
||||||
|
std::string fname;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Load control vectors, scale each by strength, and add them together.
|
||||||
|
// On error, returns {-1, empty}
|
||||||
|
llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos);
|
||||||
|
@ -278,6 +278,22 @@ namespace grammar_parser {
|
|||||||
while (*pos) {
|
while (*pos) {
|
||||||
pos = parse_rule(state, pos);
|
pos = parse_rule(state, pos);
|
||||||
}
|
}
|
||||||
|
// Validate the state to ensure that all rules are defined
|
||||||
|
for (const auto & rule : state.rules) {
|
||||||
|
for (const auto & elem : rule) {
|
||||||
|
if (elem.type == LLAMA_GRETYPE_RULE_REF) {
|
||||||
|
// Ensure that the rule at that location exists
|
||||||
|
if (elem.value >= state.rules.size() || state.rules[elem.value].empty()) {
|
||||||
|
// Get the name of the rule that is missing
|
||||||
|
for (const auto & kv : state.symbol_ids) {
|
||||||
|
if (kv.second == elem.value) {
|
||||||
|
throw std::runtime_error("Undefined rule identifier '" + kv.first + "'");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return state;
|
return state;
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
fprintf(stderr, "%s: error parsing grammar: %s\n", __func__, err.what());
|
fprintf(stderr, "%s: error parsing grammar: %s\n", __func__, err.what());
|
||||||
|
721
common/json-schema-to-grammar.cpp
Normal file
721
common/json-schema-to-grammar.cpp
Normal file
@ -0,0 +1,721 @@
|
|||||||
|
#include "json-schema-to-grammar.h"
|
||||||
|
#include <algorithm>
|
||||||
|
#include <fstream>
|
||||||
|
#include <map>
|
||||||
|
#include <regex>
|
||||||
|
#include <sstream>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <unordered_set>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
const std::string SPACE_RULE = "\" \"?";
|
||||||
|
|
||||||
|
std::unordered_map<std::string, std::string> PRIMITIVE_RULES = {
|
||||||
|
{"boolean", "(\"true\" | \"false\") space"},
|
||||||
|
{"number", "(\"-\"? ([0-9] | [1-9] [0-9]*)) (\".\" [0-9]+)? ([eE] [-+]? [0-9]+)? space"},
|
||||||
|
{"integer", "(\"-\"? ([0-9] | [1-9] [0-9]*)) space"},
|
||||||
|
{"value", "object | array | string | number | boolean"},
|
||||||
|
{"object", "\"{\" space ( string \":\" space value (\",\" space string \":\" space value)* )? \"}\" space"},
|
||||||
|
{"array", "\"[\" space ( value (\",\" space value)* )? \"]\" space"},
|
||||||
|
{"uuid", "\"\\\"\" [0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F] "
|
||||||
|
"\"-\" [0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F] "
|
||||||
|
"\"-\" [0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F] "
|
||||||
|
"\"-\" [0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F] "
|
||||||
|
"\"-\" [0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F] \"\\\"\" space"},
|
||||||
|
{"string", " \"\\\"\" (\n"
|
||||||
|
" [^\"\\\\] |\n"
|
||||||
|
" \"\\\\\" ([\"\\\\/bfnrt] | \"u\" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F])\n"
|
||||||
|
" )* \"\\\"\" space"},
|
||||||
|
{"null", "\"null\" space"}
|
||||||
|
};
|
||||||
|
std::vector<std::string> OBJECT_RULE_NAMES = {"object", "array", "string", "number", "boolean", "null", "value"};
|
||||||
|
|
||||||
|
std::unordered_map<std::string, std::string> DATE_RULES = {
|
||||||
|
{"date", "[0-9] [0-9] [0-9] [0-9] \"-\" ( \"0\" [1-9] | \"1\" [0-2] ) \"-\" ( \"0\" [1-9] | [1-2] [0-9] | \"3\" [0-1] )"},
|
||||||
|
{"time", "([01] [0-9] | \"2\" [0-3]) \":\" [0-5] [0-9] \":\" [0-5] [0-9] ( \".\" [0-9] [0-9] [0-9] )? ( \"Z\" | ( \"+\" | \"-\" ) ( [01] [0-9] | \"2\" [0-3] ) \":\" [0-5] [0-9] )"},
|
||||||
|
{"date-time", "date \"T\" time"},
|
||||||
|
{"date-string", "\"\\\"\" date \"\\\"\" space"},
|
||||||
|
{"time-string", "\"\\\"\" time \"\\\"\" space"},
|
||||||
|
{"date-time-string", "\"\\\"\" date-time \"\\\"\" space"}
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool is_reserved_name(const std::string & name) {
|
||||||
|
static std::unordered_set<std::string> RESERVED_NAMES;
|
||||||
|
if (RESERVED_NAMES.empty()) {
|
||||||
|
RESERVED_NAMES.insert("root");
|
||||||
|
for (const auto &p : PRIMITIVE_RULES) RESERVED_NAMES.insert(p.first);
|
||||||
|
for (const auto &p : DATE_RULES) RESERVED_NAMES.insert(p.first);
|
||||||
|
}
|
||||||
|
return RESERVED_NAMES.find(name) != RESERVED_NAMES.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::regex INVALID_RULE_CHARS_RE("[^a-zA-Z0-9-]+");
|
||||||
|
std::regex GRAMMAR_LITERAL_ESCAPE_RE("[\r\n\"]");
|
||||||
|
std::regex GRAMMAR_RANGE_LITERAL_ESCAPE_RE("[\r\n\"\\]\\-\\\\]");
|
||||||
|
std::unordered_map<char, std::string> GRAMMAR_LITERAL_ESCAPES = {
|
||||||
|
{'\r', "\\r"}, {'\n', "\\n"}, {'"', "\\\""}, {'-', "\\-"}, {']', "\\]"}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unordered_set<char> NON_LITERAL_SET = {'|', '.', '(', ')', '[', ']', '{', '}', '*', '+', '?'};
|
||||||
|
std::unordered_set<char> ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = {'[', ']', '(', ')', '|', '{', '}', '*', '+', '?'};
|
||||||
|
|
||||||
|
template <typename Iterator>
|
||||||
|
std::string join(Iterator begin, Iterator end, const std::string & separator) {
|
||||||
|
std::ostringstream result;
|
||||||
|
if (begin != end) {
|
||||||
|
result << *begin;
|
||||||
|
for (Iterator it = begin + 1; it != end; ++it) {
|
||||||
|
result << separator << *it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::vector<std::string> split(const std::string & str, const std::string & delimiter) {
|
||||||
|
std::vector<std::string> tokens;
|
||||||
|
size_t start = 0;
|
||||||
|
size_t end = str.find(delimiter);
|
||||||
|
|
||||||
|
while (end != std::string::npos) {
|
||||||
|
tokens.push_back(str.substr(start, end - start));
|
||||||
|
start = end + delimiter.length();
|
||||||
|
end = str.find(delimiter, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens.push_back(str.substr(start));
|
||||||
|
|
||||||
|
return tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string repeat(const std::string & str, size_t n) {
|
||||||
|
if (n == 0) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string result;
|
||||||
|
result.reserve(str.length() * n);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < n; ++i) {
|
||||||
|
result += str;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string replacePattern(const std::string & input, const std::regex & regex, const std::function<std::string(const std::smatch &)> & replacement) {
|
||||||
|
std::smatch match;
|
||||||
|
std::string result;
|
||||||
|
|
||||||
|
std::string::const_iterator searchStart(input.cbegin());
|
||||||
|
std::string::const_iterator searchEnd(input.cend());
|
||||||
|
|
||||||
|
while (std::regex_search(searchStart, searchEnd, match, regex)) {
|
||||||
|
result.append(searchStart, searchStart + match.position());
|
||||||
|
result.append(replacement(match));
|
||||||
|
searchStart = match.suffix().first;
|
||||||
|
}
|
||||||
|
|
||||||
|
result.append(searchStart, searchEnd);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string format_literal(const std::string & literal) {
|
||||||
|
std::string escaped = replacePattern(literal, GRAMMAR_LITERAL_ESCAPE_RE, [&](const std::smatch & match) {
|
||||||
|
char c = match.str()[0];
|
||||||
|
return GRAMMAR_LITERAL_ESCAPES.at(c);
|
||||||
|
});
|
||||||
|
return "\"" + escaped + "\"";
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SchemaConverter {
|
||||||
|
private:
|
||||||
|
std::function<json(const std::string &)> _fetch_json;
|
||||||
|
bool _dotall;
|
||||||
|
std::map<std::string, std::string> _rules;
|
||||||
|
std::unordered_map<std::string, json> _refs;
|
||||||
|
std::unordered_set<std::string> _refs_being_resolved;
|
||||||
|
std::vector<std::string> _errors;
|
||||||
|
std::vector<std::string> _warnings;
|
||||||
|
|
||||||
|
std::string _add_rule(const std::string & name, const std::string & rule) {
|
||||||
|
std::string esc_name = regex_replace(name, INVALID_RULE_CHARS_RE, "-");
|
||||||
|
if (_rules.find(esc_name) == _rules.end() || _rules[esc_name] == rule) {
|
||||||
|
_rules[esc_name] = rule;
|
||||||
|
return esc_name;
|
||||||
|
} else {
|
||||||
|
int i = 0;
|
||||||
|
while (_rules.find(esc_name + std::to_string(i)) != _rules.end() && _rules[esc_name + std::to_string(i)] != rule) {
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
std::string key = esc_name + std::to_string(i);
|
||||||
|
_rules[key] = rule;
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string _generate_union_rule(const std::string & name, const std::vector<json> & alt_schemas) {
|
||||||
|
std::vector<std::string> rules;
|
||||||
|
for (size_t i = 0; i < alt_schemas.size(); i++) {
|
||||||
|
rules.push_back(visit(alt_schemas[i], name + (name.empty() ? "alternative-" : "-") + std::to_string(i)));
|
||||||
|
}
|
||||||
|
return join(rules.begin(), rules.end(), " | ");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string _visit_pattern(const std::string & pattern, const std::string & name) {
|
||||||
|
if (!(pattern.front() == '^' && pattern.back() == '$')) {
|
||||||
|
_errors.push_back("Pattern must start with '^' and end with '$'");
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
std::string sub_pattern = pattern.substr(1, pattern.length() - 2);
|
||||||
|
std::unordered_map<std::string, std::string> sub_rule_ids;
|
||||||
|
|
||||||
|
size_t i = 0;
|
||||||
|
size_t length = sub_pattern.length();
|
||||||
|
|
||||||
|
using literal_or_rule = std::pair<std::string, bool>;
|
||||||
|
auto to_rule = [&](const literal_or_rule & ls) {
|
||||||
|
auto is_literal = ls.second;
|
||||||
|
auto s = ls.first;
|
||||||
|
return is_literal ? "\"" + s + "\"" : s;
|
||||||
|
};
|
||||||
|
std::function<literal_or_rule()> transform = [&]() -> literal_or_rule {
|
||||||
|
size_t start = i;
|
||||||
|
std::vector<literal_or_rule> seq;
|
||||||
|
|
||||||
|
auto get_dot = [&]() {
|
||||||
|
std::string rule;
|
||||||
|
if (_dotall) {
|
||||||
|
rule = "[\\U00000000-\\U0010FFFF]";
|
||||||
|
} else {
|
||||||
|
rule = "[\\U00000000-\\x09\\x0B\\x0C\\x0E-\\U0010FFFF]";
|
||||||
|
}
|
||||||
|
return _add_rule("dot", rule);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Joins the sequence, merging consecutive literals together.
|
||||||
|
auto join_seq = [&]() {
|
||||||
|
std::vector<literal_or_rule> ret;
|
||||||
|
|
||||||
|
std::string literal;
|
||||||
|
auto flush_literal = [&]() {
|
||||||
|
if (literal.empty()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ret.push_back(std::make_pair(literal, true));
|
||||||
|
literal.clear();
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const auto & item : seq) {
|
||||||
|
auto is_literal = item.second;
|
||||||
|
if (is_literal) {
|
||||||
|
literal += item.first;
|
||||||
|
} else {
|
||||||
|
flush_literal();
|
||||||
|
ret.push_back(item);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
flush_literal();
|
||||||
|
|
||||||
|
std::vector<std::string> results;
|
||||||
|
for (const auto & item : ret) {
|
||||||
|
results.push_back(to_rule(item));
|
||||||
|
}
|
||||||
|
return std::make_pair(join(results.begin(), results.end(), " "), false);
|
||||||
|
};
|
||||||
|
|
||||||
|
while (i < length) {
|
||||||
|
char c = sub_pattern[i];
|
||||||
|
if (c == '.') {
|
||||||
|
seq.push_back(std::make_pair(get_dot(), false));
|
||||||
|
i++;
|
||||||
|
} else if (c == '(') {
|
||||||
|
i++;
|
||||||
|
if (i < length) {
|
||||||
|
if (sub_pattern[i] == '?') {
|
||||||
|
_warnings.push_back("Unsupported pattern syntax");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq.push_back(std::make_pair("(" + to_rule(transform()) + ")", false));
|
||||||
|
} else if (c == ')') {
|
||||||
|
i++;
|
||||||
|
if (start > 0 && sub_pattern[start - 1] != '(') {
|
||||||
|
_errors.push_back("Unbalanced parentheses");
|
||||||
|
}
|
||||||
|
return join_seq();
|
||||||
|
} else if (c == '[') {
|
||||||
|
std::string square_brackets = std::string(1, c);
|
||||||
|
i++;
|
||||||
|
while (i < length && sub_pattern[i] != ']') {
|
||||||
|
if (sub_pattern[i] == '\\') {
|
||||||
|
square_brackets += sub_pattern.substr(i, 2);
|
||||||
|
i += 2;
|
||||||
|
} else {
|
||||||
|
square_brackets += sub_pattern[i];
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (i >= length) {
|
||||||
|
_errors.push_back("Unbalanced square brackets");
|
||||||
|
}
|
||||||
|
square_brackets += ']';
|
||||||
|
i++;
|
||||||
|
seq.push_back(std::make_pair(square_brackets, false));
|
||||||
|
} else if (c == '|') {
|
||||||
|
seq.push_back(std::make_pair("|", false));
|
||||||
|
i++;
|
||||||
|
} else if (c == '*' || c == '+' || c == '?') {
|
||||||
|
seq.back() = std::make_pair(to_rule(seq.back()) + c, false);
|
||||||
|
i++;
|
||||||
|
} else if (c == '{') {
|
||||||
|
std::string curly_brackets = std::string(1, c);
|
||||||
|
i++;
|
||||||
|
while (i < length && sub_pattern[i] != '}') {
|
||||||
|
curly_brackets += sub_pattern[i];
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
if (i >= length) {
|
||||||
|
_errors.push_back("Unbalanced curly brackets");
|
||||||
|
}
|
||||||
|
curly_brackets += '}';
|
||||||
|
i++;
|
||||||
|
auto nums = split(curly_brackets.substr(1, curly_brackets.length() - 2), ",");
|
||||||
|
int min_times = 0;
|
||||||
|
int max_times = std::numeric_limits<int>::max();
|
||||||
|
try {
|
||||||
|
if (nums.size() == 1) {
|
||||||
|
min_times = max_times = std::stoi(nums[0]);
|
||||||
|
} else if (nums.size() != 2) {
|
||||||
|
_errors.push_back("Wrong number of values in curly brackets");
|
||||||
|
} else {
|
||||||
|
if (!nums[0].empty()) {
|
||||||
|
min_times = std::stoi(nums[0]);
|
||||||
|
}
|
||||||
|
if (!nums[1].empty()) {
|
||||||
|
max_times = std::stoi(nums[1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (const std::invalid_argument & e) {
|
||||||
|
_errors.push_back("Invalid number in curly brackets");
|
||||||
|
return std::make_pair("", false);
|
||||||
|
}
|
||||||
|
auto &last = seq.back();
|
||||||
|
auto &sub = last.first;
|
||||||
|
auto sub_is_literal = last.second;
|
||||||
|
|
||||||
|
if (min_times == 0 && max_times == std::numeric_limits<int>::max()) {
|
||||||
|
sub += "*";
|
||||||
|
} else if (min_times == 0 && max_times == 1) {
|
||||||
|
sub += "?";
|
||||||
|
} else if (min_times == 1 && max_times == std::numeric_limits<int>::max()) {
|
||||||
|
sub += "+";
|
||||||
|
} else {
|
||||||
|
if (!sub_is_literal) {
|
||||||
|
std::string & sub_id = sub_rule_ids[sub];
|
||||||
|
if (sub_id.empty()) {
|
||||||
|
sub_id = _add_rule(name + "-" + std::to_string(sub_rule_ids.size()), sub);
|
||||||
|
}
|
||||||
|
sub = sub_id;
|
||||||
|
}
|
||||||
|
std::string result;
|
||||||
|
if (sub_is_literal && min_times > 0) {
|
||||||
|
result = "\"" + repeat(sub.substr(1, sub.length() - 2), min_times) + "\"";
|
||||||
|
} else {
|
||||||
|
for (int j = 0; j < min_times; j++) {
|
||||||
|
if (j > 0) {
|
||||||
|
result += " ";
|
||||||
|
}
|
||||||
|
result += sub;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (min_times > 0 && min_times < max_times) {
|
||||||
|
result += " ";
|
||||||
|
}
|
||||||
|
if (max_times == std::numeric_limits<int>::max()) {
|
||||||
|
result += sub + "*";
|
||||||
|
} else {
|
||||||
|
for (int j = min_times; j < max_times; j++) {
|
||||||
|
if (j > min_times) {
|
||||||
|
result += " ";
|
||||||
|
}
|
||||||
|
result += sub + "?";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq.back().first = result;
|
||||||
|
seq.back().second = false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std::string literal;
|
||||||
|
auto is_non_literal = [&](char c) {
|
||||||
|
return NON_LITERAL_SET.find(c) != NON_LITERAL_SET.end();
|
||||||
|
};
|
||||||
|
while (i < length) {
|
||||||
|
if (sub_pattern[i] == '\\' && i < length - 1) {
|
||||||
|
char next = sub_pattern[i + 1];
|
||||||
|
if (ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS.find(next) != ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS.end()) {
|
||||||
|
i++;
|
||||||
|
literal += sub_pattern[i];
|
||||||
|
i++;
|
||||||
|
} else {
|
||||||
|
literal += sub_pattern.substr(i, 2);
|
||||||
|
i += 2;
|
||||||
|
}
|
||||||
|
} else if (sub_pattern[i] == '"') {
|
||||||
|
literal += "\\\"";
|
||||||
|
i++;
|
||||||
|
} else if (!is_non_literal(sub_pattern[i]) &&
|
||||||
|
(i == length - 1 || literal.empty() || sub_pattern[i + 1] == '.' || !is_non_literal(sub_pattern[i + 1]))) {
|
||||||
|
literal += sub_pattern[i];
|
||||||
|
i++;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!literal.empty()) {
|
||||||
|
seq.push_back(std::make_pair(literal, true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return join_seq();
|
||||||
|
};
|
||||||
|
return _add_rule(name, "\"\\\"\" " + to_rule(transform()) + " \"\\\"\" space");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string _resolve_ref(const std::string & ref) {
|
||||||
|
std::string ref_name = ref.substr(ref.find_last_of('/') + 1);
|
||||||
|
if (_rules.find(ref_name) == _rules.end() && _refs_being_resolved.find(ref) == _refs_being_resolved.end()) {
|
||||||
|
_refs_being_resolved.insert(ref);
|
||||||
|
json resolved = _refs[ref];
|
||||||
|
ref_name = visit(resolved, ref_name);
|
||||||
|
_refs_being_resolved.erase(ref);
|
||||||
|
}
|
||||||
|
return ref_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string _build_object_rule(
|
||||||
|
const std::vector<std::pair<std::string, json>> & properties,
|
||||||
|
const std::unordered_set<std::string> & required,
|
||||||
|
const std::string & name,
|
||||||
|
const json & additional_properties)
|
||||||
|
{
|
||||||
|
std::vector<std::string> required_props;
|
||||||
|
std::vector<std::string> optional_props;
|
||||||
|
std::unordered_map<std::string, std::string> prop_kv_rule_names;
|
||||||
|
for (const auto & kv : properties) {
|
||||||
|
const auto &prop_name = kv.first;
|
||||||
|
const auto &prop_schema = kv.second;
|
||||||
|
|
||||||
|
std::string prop_rule_name = visit(prop_schema, name + (name.empty() ? "" : "-") + prop_name);
|
||||||
|
prop_kv_rule_names[prop_name] = _add_rule(
|
||||||
|
name + (name.empty() ? "" : "-") + prop_name + "-kv",
|
||||||
|
format_literal(json(prop_name).dump()) + " space \":\" space " + prop_rule_name
|
||||||
|
);
|
||||||
|
if (required.find(prop_name) != required.end()) {
|
||||||
|
required_props.push_back(prop_name);
|
||||||
|
} else {
|
||||||
|
optional_props.push_back(prop_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (additional_properties.is_object() || (additional_properties.is_boolean() && additional_properties.get<bool>())) {
|
||||||
|
std::string sub_name = name + (name.empty() ? "" : "-") + "additional";
|
||||||
|
std::string value_rule = visit(additional_properties.is_object() ? additional_properties : json::object(), sub_name + "-value");
|
||||||
|
std::string kv_rule = _add_rule(sub_name + "-kv", _add_rule("string", PRIMITIVE_RULES.at("string")) + " \":\" space " + value_rule);
|
||||||
|
prop_kv_rule_names["*"] = kv_rule;
|
||||||
|
optional_props.push_back("*");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string rule = "\"{\" space ";
|
||||||
|
for (size_t i = 0; i < required_props.size(); i++) {
|
||||||
|
if (i > 0) {
|
||||||
|
rule += " \",\" space ";
|
||||||
|
}
|
||||||
|
rule += prop_kv_rule_names[required_props[i]];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!optional_props.empty()) {
|
||||||
|
rule += " (";
|
||||||
|
if (!required_props.empty()) {
|
||||||
|
rule += " \",\" space ( ";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::function<std::string(const std::vector<std::string> &, bool)> get_recursive_refs = [&](const std::vector<std::string> & ks, bool first_is_optional) {
|
||||||
|
std::string res;
|
||||||
|
if (ks.empty()) {
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
std::string k = ks[0];
|
||||||
|
std::string kv_rule_name = prop_kv_rule_names[k];
|
||||||
|
if (k == "*") {
|
||||||
|
res = _add_rule(
|
||||||
|
name + (name.empty() ? "" : "-") + "additional-kvs",
|
||||||
|
kv_rule_name + " ( \",\" space " + kv_rule_name + " )*"
|
||||||
|
);
|
||||||
|
} else if (first_is_optional) {
|
||||||
|
res = "( \",\" space " + kv_rule_name + " )?";
|
||||||
|
} else {
|
||||||
|
res = kv_rule_name;
|
||||||
|
}
|
||||||
|
if (ks.size() > 1) {
|
||||||
|
res += " " + _add_rule(
|
||||||
|
name + (name.empty() ? "" : "-") + k + "-rest",
|
||||||
|
get_recursive_refs(std::vector<std::string>(ks.begin() + 1, ks.end()), true)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
};
|
||||||
|
|
||||||
|
for (size_t i = 0; i < optional_props.size(); i++) {
|
||||||
|
if (i > 0) {
|
||||||
|
rule += " | ";
|
||||||
|
}
|
||||||
|
rule += get_recursive_refs(std::vector<std::string>(optional_props.begin() + i, optional_props.end()), false);
|
||||||
|
}
|
||||||
|
if (!required_props.empty()) {
|
||||||
|
rule += " )";
|
||||||
|
}
|
||||||
|
rule += " )?";
|
||||||
|
}
|
||||||
|
|
||||||
|
rule += " \"}\" space";
|
||||||
|
|
||||||
|
return rule;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
SchemaConverter(
|
||||||
|
const std::function<json(const std::string &)> & fetch_json,
|
||||||
|
bool dotall)
|
||||||
|
: _fetch_json(fetch_json), _dotall(dotall)
|
||||||
|
{
|
||||||
|
_rules["space"] = SPACE_RULE;
|
||||||
|
}
|
||||||
|
|
||||||
|
void resolve_refs(json & schema, const std::string & url) {
|
||||||
|
/*
|
||||||
|
* Resolves all $ref fields in the given schema, fetching any remote schemas,
|
||||||
|
* replacing each $ref with absolute reference URL and populates _refs with the
|
||||||
|
* respective referenced (sub)schema dictionaries.
|
||||||
|
*/
|
||||||
|
std::function<void(json &)> visit_refs = [&](json & n) {
|
||||||
|
if (n.is_array()) {
|
||||||
|
for (auto & x : n) {
|
||||||
|
visit_refs(x);
|
||||||
|
}
|
||||||
|
} else if (n.is_object()) {
|
||||||
|
if (n.contains("$ref")) {
|
||||||
|
std::string ref = n["$ref"];
|
||||||
|
if (_refs.find(ref) == _refs.end()) {
|
||||||
|
json target;
|
||||||
|
if (ref.find("https://") == 0) {
|
||||||
|
std::string base_url = ref.substr(0, ref.find('#'));
|
||||||
|
auto it = _refs.find(base_url);
|
||||||
|
if (it != _refs.end()) {
|
||||||
|
target = it->second;
|
||||||
|
} else {
|
||||||
|
// Fetch the referenced schema and resolve its refs
|
||||||
|
auto referenced = _fetch_json(ref);
|
||||||
|
resolve_refs(referenced, base_url);
|
||||||
|
_refs[base_url] = referenced;
|
||||||
|
}
|
||||||
|
if (ref.find('#') == std::string::npos || ref.substr(ref.find('#') + 1).empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (ref.find("#/") == 0) {
|
||||||
|
target = schema;
|
||||||
|
n["$ref"] = url + ref;
|
||||||
|
ref = url + ref;
|
||||||
|
} else {
|
||||||
|
_errors.push_back("Unsupported ref: " + ref);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
std::string pointer = ref.substr(ref.find('#') + 1);
|
||||||
|
std::vector<std::string> tokens = split(pointer, "/");
|
||||||
|
for (size_t i = 1; i < tokens.size(); ++i) {
|
||||||
|
std::string sel = tokens[i];
|
||||||
|
if (target.is_null() || !target.contains(sel)) {
|
||||||
|
_errors.push_back("Error resolving ref " + ref + ": " + sel + " not in " + target.dump());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
target = target[sel];
|
||||||
|
}
|
||||||
|
_refs[ref] = target;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (auto & kv : n.items()) {
|
||||||
|
visit_refs(kv.value());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
visit_refs(schema);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string _generate_constant_rule(const json & value) {
|
||||||
|
return format_literal(value.dump());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string visit(const json & schema, const std::string & name) {
|
||||||
|
json schema_type = schema.contains("type") ? schema["type"] : json();
|
||||||
|
std::string schema_format = schema.contains("format") ? schema["format"].get<std::string>() : "";
|
||||||
|
std::string rule_name = is_reserved_name(name) ? name + "-" : name.empty() ? "root" : name;
|
||||||
|
|
||||||
|
if (schema.contains("$ref")) {
|
||||||
|
return _add_rule(rule_name, _resolve_ref(schema["$ref"]));
|
||||||
|
} else if (schema.contains("oneOf") || schema.contains("anyOf")) {
|
||||||
|
std::vector<json> alt_schemas = schema.contains("oneOf") ? schema["oneOf"].get<std::vector<json>>() : schema["anyOf"].get<std::vector<json>>();
|
||||||
|
return _add_rule(rule_name, _generate_union_rule(name, alt_schemas));
|
||||||
|
} else if (schema_type.is_array()) {
|
||||||
|
std::vector<json> schema_types;
|
||||||
|
for (const auto & t : schema_type) {
|
||||||
|
schema_types.push_back({{"type", t}});
|
||||||
|
}
|
||||||
|
return _add_rule(rule_name, _generate_union_rule(name, schema_types));
|
||||||
|
} else if (schema.contains("const")) {
|
||||||
|
return _add_rule(rule_name, _generate_constant_rule(schema["const"]));
|
||||||
|
} else if (schema.contains("enum")) {
|
||||||
|
std::vector<std::string> enum_values;
|
||||||
|
for (const auto & v : schema["enum"]) {
|
||||||
|
enum_values.push_back(_generate_constant_rule(v));
|
||||||
|
}
|
||||||
|
return _add_rule(rule_name, join(enum_values.begin(), enum_values.end(), " | "));
|
||||||
|
} else if ((schema_type.is_null() || schema_type == "object")
|
||||||
|
&& (schema.contains("properties") ||
|
||||||
|
(schema.contains("additionalProperties") && schema["additionalProperties"] != true))) {
|
||||||
|
std::unordered_set<std::string> required;
|
||||||
|
if (schema.contains("required") && schema["required"].is_array()) {
|
||||||
|
for (const auto & item : schema["required"]) {
|
||||||
|
if (item.is_string()) {
|
||||||
|
required.insert(item.get<std::string>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::vector<std::pair<std::string, json>> properties;
|
||||||
|
if (schema.contains("properties")) {
|
||||||
|
for (const auto & prop : schema["properties"].items()) {
|
||||||
|
properties.emplace_back(prop.key(), prop.value());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _add_rule(rule_name,
|
||||||
|
_build_object_rule(
|
||||||
|
properties, required, name,
|
||||||
|
schema.contains("additionalProperties") ? schema["additionalProperties"] : json()));
|
||||||
|
} else if ((schema_type.is_null() || schema_type == "object") && schema.contains("allOf")) {
|
||||||
|
std::unordered_set<std::string> required;
|
||||||
|
std::vector<std::pair<std::string, json>> properties;
|
||||||
|
std::string hybrid_name = name;
|
||||||
|
std::function<void(const json &, bool)> add_component = [&](const json & comp_schema, bool is_required) {
|
||||||
|
if (comp_schema.contains("$ref")) {
|
||||||
|
add_component(_refs[comp_schema["$ref"]], is_required);
|
||||||
|
} else if (comp_schema.contains("properties")) {
|
||||||
|
for (const auto & prop : comp_schema["properties"].items()) {
|
||||||
|
properties.emplace_back(prop.key(), prop.value());
|
||||||
|
if (is_required) {
|
||||||
|
required.insert(prop.key());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// todo warning
|
||||||
|
}
|
||||||
|
};
|
||||||
|
for (auto & t : schema["allOf"]) {
|
||||||
|
if (t.contains("anyOf")) {
|
||||||
|
for (auto & tt : t["anyOf"]) {
|
||||||
|
add_component(tt, false);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
add_component(t, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _add_rule(rule_name, _build_object_rule(properties, required, hybrid_name, json()));
|
||||||
|
} else if ((schema_type.is_null() || schema_type == "array") && (schema.contains("items") || schema.contains("prefixItems"))) {
|
||||||
|
json items = schema.contains("items") ? schema["items"] : schema["prefixItems"];
|
||||||
|
if (items.is_array()) {
|
||||||
|
std::string rule = "\"[\" space ";
|
||||||
|
for (size_t i = 0; i < items.size(); i++) {
|
||||||
|
if (i > 0) {
|
||||||
|
rule += " \",\" space ";
|
||||||
|
}
|
||||||
|
rule += visit(items[i], name + (name.empty() ? "" : "-") + "tuple-" + std::to_string(i));
|
||||||
|
}
|
||||||
|
rule += " \"]\" space";
|
||||||
|
return _add_rule(rule_name, rule);
|
||||||
|
} else {
|
||||||
|
std::string item_rule_name = visit(items, name + (name.empty() ? "" : "-") + "item");
|
||||||
|
std::string list_item_operator = "( \",\" space " + item_rule_name + " )";
|
||||||
|
std::string successive_items;
|
||||||
|
int min_items = schema.contains("minItems") ? schema["minItems"].get<int>() : 0;
|
||||||
|
json max_items_json = schema.contains("maxItems") ? schema["maxItems"] : json();
|
||||||
|
int max_items = max_items_json.is_number_integer() ? max_items_json.get<int>() : -1;
|
||||||
|
if (min_items > 0) {
|
||||||
|
successive_items += repeat(list_item_operator, min_items - 1);
|
||||||
|
min_items--;
|
||||||
|
}
|
||||||
|
if (max_items >= 0 && max_items > min_items) {
|
||||||
|
successive_items += repeat(list_item_operator + "?", max_items - min_items - 1);
|
||||||
|
} else {
|
||||||
|
successive_items += list_item_operator + "*";
|
||||||
|
}
|
||||||
|
std::string rule;
|
||||||
|
if (min_items == 0) {
|
||||||
|
rule = "\"[\" space ( " + item_rule_name + " " + successive_items + " )? \"]\" space";
|
||||||
|
} else {
|
||||||
|
rule = "\"[\" space " + item_rule_name + " " + successive_items + " \"]\" space";
|
||||||
|
}
|
||||||
|
return _add_rule(rule_name, rule);
|
||||||
|
}
|
||||||
|
} else if ((schema_type.is_null() || schema_type == "string") && schema.contains("pattern")) {
|
||||||
|
return _visit_pattern(schema["pattern"], rule_name);
|
||||||
|
} else if ((schema_type.is_null() || schema_type == "string") && std::regex_match(schema_format, std::regex("^uuid[1-5]?$"))) {
|
||||||
|
return _add_rule(rule_name == "root" ? "root" : schema_format, PRIMITIVE_RULES.at("uuid"));
|
||||||
|
} else if ((schema_type.is_null() || schema_type == "string") && DATE_RULES.find(schema_format) != DATE_RULES.end()) {
|
||||||
|
for (const auto & kv : DATE_RULES) {
|
||||||
|
_add_rule(kv.first, kv.second);
|
||||||
|
}
|
||||||
|
return schema_format + "-string";
|
||||||
|
} else if (schema.empty() || schema_type == "object") {
|
||||||
|
for (const auto & n : OBJECT_RULE_NAMES) {
|
||||||
|
_add_rule(n, PRIMITIVE_RULES.at(n));
|
||||||
|
}
|
||||||
|
return _add_rule(rule_name, "object");
|
||||||
|
} else {
|
||||||
|
if (!schema_type.is_string() || PRIMITIVE_RULES.find(schema_type.get<std::string>()) == PRIMITIVE_RULES.end()) {
|
||||||
|
_errors.push_back("Unrecognized schema: " + schema.dump());
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
// TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero
|
||||||
|
return _add_rule(rule_name == "root" ? "root" : schema_type.get<std::string>(), PRIMITIVE_RULES.at(schema_type.get<std::string>()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void check_errors() {
|
||||||
|
if (!_errors.empty()) {
|
||||||
|
throw std::runtime_error("JSON schema conversion failed:\n" + join(_errors.begin(), _errors.end(), "\n"));
|
||||||
|
}
|
||||||
|
if (!_warnings.empty()) {
|
||||||
|
fprintf(stderr, "WARNING: JSON schema conversion was incomplete: %s\n", join(_warnings.begin(), _warnings.end(), "; ").c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string format_grammar() {
|
||||||
|
std::stringstream ss;
|
||||||
|
for (const auto & kv : _rules) {
|
||||||
|
ss << kv.first << " ::= " << kv.second << std::endl;
|
||||||
|
}
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::string json_schema_to_grammar(const json & schema) {
|
||||||
|
SchemaConverter converter([](const std::string &) { return json::object(); }, /* dotall= */ false);
|
||||||
|
auto copy = schema;
|
||||||
|
converter.resolve_refs(copy, "input");
|
||||||
|
converter.visit(copy, "");
|
||||||
|
converter.check_errors();
|
||||||
|
return converter.format_grammar();
|
||||||
|
}
|
4
common/json-schema-to-grammar.h
Normal file
4
common/json-schema-to-grammar.h
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#pragma once
|
||||||
|
#include "json.hpp"
|
||||||
|
|
||||||
|
std::string json_schema_to_grammar(const nlohmann::ordered_json& schema);
|
File diff suppressed because it is too large
Load Diff
@ -297,7 +297,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
|||||||
#ifndef _MSC_VER
|
#ifndef _MSC_VER
|
||||||
#define LOG(...) LOG_IMPL(__VA_ARGS__, "")
|
#define LOG(...) LOG_IMPL(__VA_ARGS__, "")
|
||||||
#else
|
#else
|
||||||
#define LOG(str, ...) LOG_IMPL("%s" str, "", __VA_ARGS__, "")
|
#define LOG(str, ...) LOG_IMPL("%s" str, "", ##__VA_ARGS__, "")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Main TEE macro.
|
// Main TEE macro.
|
||||||
@ -311,7 +311,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
|||||||
#ifndef _MSC_VER
|
#ifndef _MSC_VER
|
||||||
#define LOG_TEE(...) LOG_TEE_IMPL(__VA_ARGS__, "")
|
#define LOG_TEE(...) LOG_TEE_IMPL(__VA_ARGS__, "")
|
||||||
#else
|
#else
|
||||||
#define LOG_TEE(str, ...) LOG_TEE_IMPL("%s" str, "", __VA_ARGS__, "")
|
#define LOG_TEE(str, ...) LOG_TEE_IMPL("%s" str, "", ##__VA_ARGS__, "")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// LOG macro variants with auto endline.
|
// LOG macro variants with auto endline.
|
||||||
@ -319,8 +319,8 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
|||||||
#define LOGLN(...) LOG_IMPL(__VA_ARGS__, "\n")
|
#define LOGLN(...) LOG_IMPL(__VA_ARGS__, "\n")
|
||||||
#define LOG_TEELN(...) LOG_TEE_IMPL(__VA_ARGS__, "\n")
|
#define LOG_TEELN(...) LOG_TEE_IMPL(__VA_ARGS__, "\n")
|
||||||
#else
|
#else
|
||||||
#define LOGLN(str, ...) LOG_IMPL("%s" str, "", __VA_ARGS__, "\n")
|
#define LOGLN(str, ...) LOG_IMPL("%s" str, "", ##__VA_ARGS__, "\n")
|
||||||
#define LOG_TEELN(str, ...) LOG_TEE_IMPL("%s" str, "", __VA_ARGS__, "\n")
|
#define LOG_TEELN(str, ...) LOG_TEE_IMPL("%s" str, "", ##__VA_ARGS__, "\n")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// INTERNAL, DO NOT USE
|
// INTERNAL, DO NOT USE
|
||||||
|
@ -17,6 +17,13 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure that there is a "root" node.
|
||||||
|
if (result->parsed_grammar.symbol_ids.find("root") == result->parsed_grammar.symbol_ids.end()) {
|
||||||
|
fprintf(stderr, "%s: grammar does not contain a 'root' symbol\n", __func__);
|
||||||
|
delete result;
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<const llama_grammar_element *> grammar_rules(result->parsed_grammar.c_rules());
|
std::vector<const llama_grammar_element *> grammar_rules(result->parsed_grammar.c_rules());
|
||||||
|
|
||||||
result->grammar = llama_grammar_init(
|
result->grammar = llama_grammar_init(
|
||||||
|
@ -32,13 +32,13 @@ typedef struct llama_sampling_params {
|
|||||||
float dynatemp_range = 0.00f; // 0.0 = disabled
|
float dynatemp_range = 0.00f; // 0.0 = disabled
|
||||||
float dynatemp_exponent = 1.00f; // controls how entropy maps to temperature in dynamic temperature sampler
|
float dynatemp_exponent = 1.00f; // controls how entropy maps to temperature in dynamic temperature sampler
|
||||||
int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
||||||
float penalty_repeat = 1.10f; // 1.0 = disabled
|
float penalty_repeat = 1.00f; // 1.0 = disabled
|
||||||
float penalty_freq = 0.00f; // 0.0 = disabled
|
float penalty_freq = 0.00f; // 0.0 = disabled
|
||||||
float penalty_present = 0.00f; // 0.0 = disabled
|
float penalty_present = 0.00f; // 0.0 = disabled
|
||||||
int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
|
int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
|
||||||
float mirostat_tau = 5.00f; // target entropy
|
float mirostat_tau = 5.00f; // target entropy
|
||||||
float mirostat_eta = 0.10f; // learning rate
|
float mirostat_eta = 0.10f; // learning rate
|
||||||
bool penalize_nl = true; // consider newlines as a repeatable token
|
bool penalize_nl = false; // consider newlines as a repeatable token
|
||||||
|
|
||||||
std::vector<llama_sampler_type> samplers_sequence = {
|
std::vector<llama_sampler_type> samplers_sequence = {
|
||||||
llama_sampler_type::TOP_K,
|
llama_sampler_type::TOP_K,
|
||||||
|
@ -36,8 +36,10 @@ class SentencePieceTokenTypes(IntEnum):
|
|||||||
UNUSED = 5
|
UNUSED = 5
|
||||||
BYTE = 6
|
BYTE = 6
|
||||||
|
|
||||||
|
|
||||||
AnyModel = TypeVar("AnyModel", bound="type[Model]")
|
AnyModel = TypeVar("AnyModel", bound="type[Model]")
|
||||||
|
|
||||||
|
|
||||||
class Model(ABC):
|
class Model(ABC):
|
||||||
_model_classes: dict[str, type[Model]] = {}
|
_model_classes: dict[str, type[Model]] = {}
|
||||||
|
|
||||||
@ -187,6 +189,7 @@ class Model(ABC):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
|
def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
|
||||||
assert names
|
assert names
|
||||||
|
|
||||||
def func(modelcls: type[Model]):
|
def func(modelcls: type[Model]):
|
||||||
for name in names:
|
for name in names:
|
||||||
cls._model_classes[name] = modelcls
|
cls._model_classes[name] = modelcls
|
||||||
@ -1631,7 +1634,7 @@ in chat mode so that the conversation can end normally.")
|
|||||||
self.post_write_tensors(tensor_map, name, data_torch)
|
self.post_write_tensors(tensor_map, name, data_torch)
|
||||||
|
|
||||||
|
|
||||||
@Model.register("BertModel")
|
@Model.register("BertModel", "CamembertModel")
|
||||||
class BertModel(Model):
|
class BertModel(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.BERT
|
model_arch = gguf.MODEL_ARCH.BERT
|
||||||
|
|
||||||
@ -1844,6 +1847,141 @@ class StarCoder2Model(Model):
|
|||||||
model_arch = gguf.MODEL_ARCH.STARCODER2
|
model_arch = gguf.MODEL_ARCH.STARCODER2
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("MambaForCausalLM", "MambaLMHeadModel")
|
||||||
|
class MambaModel(Model):
|
||||||
|
model_arch = gguf.MODEL_ARCH.MAMBA
|
||||||
|
|
||||||
|
def set_vocab(self):
|
||||||
|
vocab_size = self.hparams["vocab_size"]
|
||||||
|
# Round vocab size to next multiple of 8
|
||||||
|
pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
|
||||||
|
# pad using ceiling division
|
||||||
|
# ref: https://stackoverflow.com/a/17511341/22827863
|
||||||
|
vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
|
||||||
|
self.hparams["vocab_size"] = vocab_size
|
||||||
|
|
||||||
|
if (self.dir_model / "tokenizer.json").is_file():
|
||||||
|
self._set_vocab_gpt2()
|
||||||
|
else:
|
||||||
|
# Use the GPT-NeoX tokenizer when no tokenizer files are present
|
||||||
|
tokenizer_path = Path(sys.path[0]) / "models" / "ggml-vocab-gpt-neox.gguf"
|
||||||
|
print(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
|
||||||
|
neox_reader = gguf.GGUFReader(tokenizer_path, "r")
|
||||||
|
|
||||||
|
field = neox_reader.get_field(gguf.Keys.Tokenizer.MODEL)
|
||||||
|
self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]))
|
||||||
|
field = neox_reader.get_field(gguf.Keys.Tokenizer.LIST)
|
||||||
|
self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
|
||||||
|
field = neox_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
|
||||||
|
self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
|
||||||
|
field = neox_reader.get_field(gguf.Keys.Tokenizer.MERGES)
|
||||||
|
self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
|
||||||
|
field = neox_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)
|
||||||
|
self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
|
||||||
|
field = neox_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)
|
||||||
|
self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
|
||||||
|
field = neox_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)
|
||||||
|
self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
d_model = self.find_hparam(["hidden_size", "d_model"])
|
||||||
|
d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
|
||||||
|
d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
|
||||||
|
d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
|
||||||
|
# ceiling division
|
||||||
|
# ref: https://stackoverflow.com/a/17511341/22827863
|
||||||
|
# ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
|
||||||
|
dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
|
||||||
|
rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
|
||||||
|
|
||||||
|
# Fail early for models which don't have a block expansion factor of 2
|
||||||
|
assert d_inner == 2 * d_model
|
||||||
|
|
||||||
|
self.gguf_writer.add_name(self.dir_model.name)
|
||||||
|
self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
|
||||||
|
self.gguf_writer.add_embedding_length(d_model)
|
||||||
|
self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
|
||||||
|
self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
|
||||||
|
self.gguf_writer.add_block_count(self.hparams["n_layer"])
|
||||||
|
self.gguf_writer.add_ssm_conv_kernel(d_conv)
|
||||||
|
self.gguf_writer.add_ssm_inner_size(d_inner)
|
||||||
|
self.gguf_writer.add_ssm_state_size(d_state)
|
||||||
|
self.gguf_writer.add_ssm_time_step_rank(dt_rank)
|
||||||
|
self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
|
||||||
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
|
||||||
|
def write_tensors(self):
|
||||||
|
block_count = self.hparams["n_layer"]
|
||||||
|
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
|
||||||
|
|
||||||
|
tok_embd = None
|
||||||
|
tok_embd_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.TOKEN_EMBD] + ".weight"
|
||||||
|
output_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.OUTPUT] + ".weight"
|
||||||
|
|
||||||
|
for name, data_torch in self.get_tensors():
|
||||||
|
old_dtype = data_torch.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data_torch.dtype not in (torch.float16, torch.float32):
|
||||||
|
data_torch = data_torch.to(torch.float32)
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
|
||||||
|
if new_name is None:
|
||||||
|
print(f"Can not map tensor {name!r}")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
if name.endswith(".A_log"):
|
||||||
|
print("A_log --> A ==> " + new_name)
|
||||||
|
data_torch = -torch.exp(data_torch)
|
||||||
|
|
||||||
|
# assuming token_embd.weight is seen before output.weight
|
||||||
|
if tok_embd is not None and new_name == output_name:
|
||||||
|
if torch.equal(tok_embd, data_torch):
|
||||||
|
print(f"{output_name} is equivalent to {tok_embd_name}, omitting")
|
||||||
|
continue
|
||||||
|
if new_name == tok_embd_name:
|
||||||
|
tok_embd = data_torch
|
||||||
|
|
||||||
|
data = data_torch.squeeze().numpy()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if self.ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert big float32 2-dim weight tensors to float16
|
||||||
|
if self.ftype == 1 and data_dtype == np.float32 and new_name.removesuffix(".weight").endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
|
||||||
|
|
||||||
|
self.gguf_writer.add_tensor(new_name, data)
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("CohereForCausalLM")
|
||||||
|
class CommandR2Model(Model):
|
||||||
|
model_arch = gguf.MODEL_ARCH.COMMAND_R
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
# max_position_embeddings = 8192 in config.json but model was actually
|
||||||
|
# trained on 128k context length
|
||||||
|
self.hparams["max_position_embeddings"] = self.hparams["model_max_length"]
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
super().set_gguf_parameters()
|
||||||
|
self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
|
||||||
|
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
|
||||||
|
|
||||||
|
|
||||||
###### CONVERSION LOGIC ######
|
###### CONVERSION LOGIC ######
|
||||||
|
|
||||||
|
|
||||||
|
139
convert.py
139
convert.py
@ -332,6 +332,9 @@ class Params:
|
|||||||
#
|
#
|
||||||
|
|
||||||
class BpeVocab:
|
class BpeVocab:
|
||||||
|
tokenizer_model = "gpt2"
|
||||||
|
name = "bpe"
|
||||||
|
|
||||||
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> None:
|
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> None:
|
||||||
self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read())
|
self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read())
|
||||||
if isinstance(self.bpe_tokenizer.get('model'), dict):
|
if isinstance(self.bpe_tokenizer.get('model'), dict):
|
||||||
@ -390,6 +393,9 @@ class BpeVocab:
|
|||||||
|
|
||||||
|
|
||||||
class SentencePieceVocab:
|
class SentencePieceVocab:
|
||||||
|
tokenizer_model = "llama"
|
||||||
|
name = "spm"
|
||||||
|
|
||||||
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> None:
|
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> None:
|
||||||
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
|
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
|
||||||
added_tokens: dict[str, int]
|
added_tokens: dict[str, int]
|
||||||
@ -453,6 +459,9 @@ class SentencePieceVocab:
|
|||||||
|
|
||||||
|
|
||||||
class HfVocab:
|
class HfVocab:
|
||||||
|
tokenizer_model = "llama"
|
||||||
|
name = "hfft"
|
||||||
|
|
||||||
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None = None) -> None:
|
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None = None) -> None:
|
||||||
try:
|
try:
|
||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
@ -553,7 +562,15 @@ class HfVocab:
|
|||||||
return f"<HfVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
return f"<HfVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||||
|
|
||||||
|
|
||||||
Vocab: TypeAlias = "BpeVocab | SentencePieceVocab | HfVocab"
|
class NoVocab:
|
||||||
|
tokenizer_model = "no_vocab"
|
||||||
|
name = "no_vocab"
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return "<NoVocab for a model without integrated vocabulary>"
|
||||||
|
|
||||||
|
|
||||||
|
Vocab: TypeAlias = "BpeVocab | SentencePieceVocab | HfVocab | NoVocab"
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -935,8 +952,10 @@ def check_vocab_size(params: Params, vocab: Vocab, pad_vocab: bool = False) -> N
|
|||||||
# Handle special case where the model's vocab size is not set
|
# Handle special case where the model's vocab size is not set
|
||||||
if params.n_vocab == -1:
|
if params.n_vocab == -1:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"The model's vocab size is set to -1 in params.json. Please update it manually. Maybe {vocab.vocab_size}?"
|
f"The model's vocab size is set to -1 in params.json. Please update it manually.{f' Maybe {vocab.vocab_size}?' if hasattr(vocab, 'vocab_size') else ''}"
|
||||||
)
|
)
|
||||||
|
if isinstance(vocab, NoVocab):
|
||||||
|
return # model has no vocab
|
||||||
|
|
||||||
# Check for a vocab size mismatch
|
# Check for a vocab size mismatch
|
||||||
if params.n_vocab == vocab.vocab_size:
|
if params.n_vocab == vocab.vocab_size:
|
||||||
@ -977,6 +996,7 @@ class OutputFile:
|
|||||||
name = str(params.path_model.parent).split('/')[-1]
|
name = str(params.path_model.parent).split('/')[-1]
|
||||||
|
|
||||||
self.gguf.add_name (name)
|
self.gguf.add_name (name)
|
||||||
|
self.gguf.add_vocab_size (params.n_vocab)
|
||||||
self.gguf.add_context_length (params.n_ctx)
|
self.gguf.add_context_length (params.n_ctx)
|
||||||
self.gguf.add_embedding_length (params.n_embd)
|
self.gguf.add_embedding_length (params.n_embd)
|
||||||
self.gguf.add_block_count (params.n_layer)
|
self.gguf.add_block_count (params.n_layer)
|
||||||
@ -1013,21 +1033,9 @@ class OutputFile:
|
|||||||
if params.ftype is not None:
|
if params.ftype is not None:
|
||||||
self.gguf.add_file_type(params.ftype)
|
self.gguf.add_file_type(params.ftype)
|
||||||
|
|
||||||
def handle_tokenizer_model(self, vocab: Vocab) -> str:
|
|
||||||
# Map the vocab types to the supported tokenizer models
|
|
||||||
tokenizer_model = {
|
|
||||||
SentencePieceVocab: "llama",
|
|
||||||
HfVocab: "llama",
|
|
||||||
BpeVocab: "gpt2",
|
|
||||||
}.get(type(vocab))
|
|
||||||
|
|
||||||
# Block if vocab type is not predefined
|
|
||||||
if tokenizer_model is None:
|
|
||||||
raise ValueError("Unknown vocab type: Not supported")
|
|
||||||
|
|
||||||
return tokenizer_model
|
|
||||||
|
|
||||||
def extract_vocabulary_from_model(self, vocab: Vocab) -> tuple[list[bytes], list[float], list[gguf.TokenType]]:
|
def extract_vocabulary_from_model(self, vocab: Vocab) -> tuple[list[bytes], list[float], list[gguf.TokenType]]:
|
||||||
|
assert not isinstance(vocab, NoVocab)
|
||||||
|
|
||||||
tokens = []
|
tokens = []
|
||||||
scores = []
|
scores = []
|
||||||
toktypes = []
|
toktypes = []
|
||||||
@ -1043,11 +1051,8 @@ class OutputFile:
|
|||||||
return tokens, scores, toktypes
|
return tokens, scores, toktypes
|
||||||
|
|
||||||
def add_meta_vocab(self, vocab: Vocab) -> None:
|
def add_meta_vocab(self, vocab: Vocab) -> None:
|
||||||
# Handle the tokenizer model
|
|
||||||
tokenizer_model = self.handle_tokenizer_model(vocab)
|
|
||||||
|
|
||||||
# Ensure that tokenizer_model is added to the GGUF model
|
# Ensure that tokenizer_model is added to the GGUF model
|
||||||
self.gguf.add_tokenizer_model(tokenizer_model)
|
self.gguf.add_tokenizer_model(vocab.tokenizer_model)
|
||||||
|
|
||||||
# Extract model vocabulary for model conversion
|
# Extract model vocabulary for model conversion
|
||||||
tokens, scores, toktypes = self.extract_vocabulary_from_model(vocab)
|
tokens, scores, toktypes = self.extract_vocabulary_from_model(vocab)
|
||||||
@ -1074,6 +1079,26 @@ class OutputFile:
|
|||||||
def write_tensor_info(self) -> None:
|
def write_tensor_info(self) -> None:
|
||||||
self.gguf.write_ti_data_to_file()
|
self.gguf.write_ti_data_to_file()
|
||||||
|
|
||||||
|
def write_tensor_data(self, ftype: GGMLFileType, model: LazyModel, concurrency: int) -> None:
|
||||||
|
ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency=concurrency)
|
||||||
|
if ftype == GGMLFileType.MostlyQ8_0:
|
||||||
|
ndarrays = bounded_parallel_map(
|
||||||
|
OutputFile.maybe_do_quantize, ndarrays_inner, concurrency=concurrency, max_workers=concurrency,
|
||||||
|
use_processpool_executor=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner)
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
|
||||||
|
elapsed = time.time() - start
|
||||||
|
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
|
||||||
|
padi = len(str(len(model)))
|
||||||
|
print(
|
||||||
|
f"[{i + 1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}"
|
||||||
|
)
|
||||||
|
self.gguf.write_tensor_data(ndarray)
|
||||||
|
|
||||||
def close(self) -> None:
|
def close(self) -> None:
|
||||||
self.gguf.close()
|
self.gguf.close()
|
||||||
|
|
||||||
@ -1082,7 +1107,7 @@ class OutputFile:
|
|||||||
fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab,
|
fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab,
|
||||||
endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False,
|
endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
check_vocab_size(params, vocab, pad_vocab = pad_vocab)
|
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
||||||
|
|
||||||
of = OutputFile(fname_out, endianess=endianess)
|
of = OutputFile(fname_out, endianess=endianess)
|
||||||
|
|
||||||
@ -1120,6 +1145,9 @@ class OutputFile:
|
|||||||
|
|
||||||
# meta data
|
# meta data
|
||||||
of.add_meta_arch(params)
|
of.add_meta_arch(params)
|
||||||
|
if isinstance(vocab, NoVocab):
|
||||||
|
of.gguf.add_tokenizer_model(vocab.tokenizer_model)
|
||||||
|
else:
|
||||||
of.add_meta_vocab(vocab)
|
of.add_meta_vocab(vocab)
|
||||||
of.add_meta_special_vocab(svocab)
|
of.add_meta_special_vocab(svocab)
|
||||||
|
|
||||||
@ -1131,24 +1159,7 @@ class OutputFile:
|
|||||||
of.write_tensor_info()
|
of.write_tensor_info()
|
||||||
|
|
||||||
# tensor data
|
# tensor data
|
||||||
ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency = concurrency)
|
of.write_tensor_data(ftype, model, concurrency)
|
||||||
if ftype == GGMLFileType.MostlyQ8_0:
|
|
||||||
ndarrays = bounded_parallel_map(
|
|
||||||
OutputFile.maybe_do_quantize, ndarrays_inner, concurrency=concurrency, max_workers=concurrency,
|
|
||||||
use_processpool_executor=True,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner)
|
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
|
|
||||||
elapsed = time.time() - start
|
|
||||||
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
|
|
||||||
padi = len(str(len(model)))
|
|
||||||
print(
|
|
||||||
f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}"
|
|
||||||
)
|
|
||||||
of.gguf.write_tensor_data(ndarray)
|
|
||||||
|
|
||||||
of.close()
|
of.close()
|
||||||
|
|
||||||
@ -1156,9 +1167,9 @@ class OutputFile:
|
|||||||
def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileType:
|
def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileType:
|
||||||
wq_type = model[gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0) + ".weight"].data_type
|
wq_type = model[gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0) + ".weight"].data_type
|
||||||
|
|
||||||
if output_type_str == "f32" or (output_type_str is None and wq_type == DT_F32):
|
if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)):
|
||||||
return GGMLFileType.AllF32
|
return GGMLFileType.AllF32
|
||||||
if output_type_str == "f16" or (output_type_str is None and wq_type in (DT_F16, DT_BF16)):
|
if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16):
|
||||||
return GGMLFileType.MostlyF16
|
return GGMLFileType.MostlyF16
|
||||||
if output_type_str == "q8_0":
|
if output_type_str == "q8_0":
|
||||||
return GGMLFileType.MostlyQ8_0
|
return GGMLFileType.MostlyQ8_0
|
||||||
@ -1309,8 +1320,8 @@ class VocabFactory:
|
|||||||
return vtype, path
|
return vtype, path
|
||||||
raise FileNotFoundError(f"Could not find any of {[self._FILES[vt] for vt in vocab_types]}")
|
raise FileNotFoundError(f"Could not find any of {[self._FILES[vt] for vt in vocab_types]}")
|
||||||
|
|
||||||
def _create_special_vocab(self, vocab: Vocab, vocabtype: str, model_parent_path: Path) -> gguf.SpecialVocab:
|
def _create_special_vocab(self, vocab: Vocab, model_parent_path: Path) -> gguf.SpecialVocab:
|
||||||
load_merges = vocabtype == "bpe"
|
load_merges = vocab.name == "bpe"
|
||||||
n_vocab = vocab.vocab_size if hasattr(vocab, "vocab_size") else None
|
n_vocab = vocab.vocab_size if hasattr(vocab, "vocab_size") else None
|
||||||
return gguf.SpecialVocab(
|
return gguf.SpecialVocab(
|
||||||
model_parent_path,
|
model_parent_path,
|
||||||
@ -1319,30 +1330,34 @@ class VocabFactory:
|
|||||||
n_vocab=n_vocab,
|
n_vocab=n_vocab,
|
||||||
)
|
)
|
||||||
|
|
||||||
def load_vocab(self, vocab_types: list[str], model_parent_path: Path) -> tuple[Vocab, gguf.SpecialVocab]:
|
def _create_vocab_by_path(self, vocab_types: list[str]) -> Vocab:
|
||||||
vocab_type, path = self._select_file(vocab_types)
|
vocab_type, path = self._select_file(vocab_types)
|
||||||
print(f"Loading vocab file {path!r}, type {vocab_type!r}")
|
print(f"Loading vocab file {path!r}, type {vocab_type!r}")
|
||||||
|
|
||||||
added_tokens_path = path.parent / "added_tokens.json"
|
added_tokens_path = path.parent / "added_tokens.json"
|
||||||
vocab: Vocab
|
|
||||||
if vocab_type == "bpe":
|
if vocab_type == "bpe":
|
||||||
vocab = BpeVocab(
|
return BpeVocab(
|
||||||
path, added_tokens_path if added_tokens_path.exists() else None
|
path, added_tokens_path if added_tokens_path.exists() else None
|
||||||
)
|
)
|
||||||
elif vocab_type == "spm":
|
if vocab_type == "spm":
|
||||||
vocab = SentencePieceVocab(
|
return SentencePieceVocab(
|
||||||
path, added_tokens_path if added_tokens_path.exists() else None
|
path, added_tokens_path if added_tokens_path.exists() else None
|
||||||
)
|
)
|
||||||
elif vocab_type == "hfft":
|
if vocab_type == "hfft":
|
||||||
vocab = HfVocab(
|
return HfVocab(
|
||||||
path.parent, added_tokens_path if added_tokens_path.exists() else None
|
path.parent, added_tokens_path if added_tokens_path.exists() else None
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
raise ValueError(vocab_type)
|
raise ValueError(vocab_type)
|
||||||
|
|
||||||
|
def load_vocab(self, vocab_types: list[str], model_parent_path: Path) -> tuple[Vocab, gguf.SpecialVocab]:
|
||||||
|
vocab: Vocab
|
||||||
|
if len(vocab_types) == 1 and "no_vocab" in vocab_types:
|
||||||
|
vocab = NoVocab()
|
||||||
|
else:
|
||||||
|
vocab = self._create_vocab_by_path(vocab_types)
|
||||||
# FIXME: Respect --vocab-dir?
|
# FIXME: Respect --vocab-dir?
|
||||||
special_vocab = self._create_special_vocab(
|
special_vocab = self._create_special_vocab(
|
||||||
vocab,
|
vocab,
|
||||||
vocab_type,
|
|
||||||
model_parent_path,
|
model_parent_path,
|
||||||
)
|
)
|
||||||
return vocab, special_vocab
|
return vocab, special_vocab
|
||||||
@ -1377,10 +1392,10 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
# We currently only support Q8_0 output on little endian systems.
|
# We currently only support Q8_0 output on little endian systems.
|
||||||
output_choices.append("q8_0")
|
output_choices.append("q8_0")
|
||||||
parser = argparse.ArgumentParser(description="Convert a LLaMA model to a GGML compatible file")
|
parser = argparse.ArgumentParser(description="Convert a LLaMA model to a GGML compatible file")
|
||||||
parser.add_argument("--awq-path", type=Path, help="Path to scale awq cache file", default=None)
|
|
||||||
parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
|
parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
|
||||||
parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
|
parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
|
||||||
parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
|
parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
|
||||||
|
parser.add_argument("--no-vocab", action="store_true", help="store model without the vocab")
|
||||||
parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)")
|
parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)")
|
||||||
parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
|
parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
|
||||||
parser.add_argument("--vocab-type", help="vocab types to try in order, choose from 'spm', 'bpe', 'hfft' (default: spm,hfft)", default="spm,hfft")
|
parser.add_argument("--vocab-type", help="vocab types to try in order, choose from 'spm', 'bpe', 'hfft' (default: spm,hfft)", default="spm,hfft")
|
||||||
@ -1393,18 +1408,10 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
|
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
|
||||||
|
|
||||||
args = parser.parse_args(args_in)
|
args = parser.parse_args(args_in)
|
||||||
if args.awq_path:
|
if args.no_vocab:
|
||||||
sys.path.insert(1, str(Path(__file__).parent / 'awq-py'))
|
if args.vocab_only:
|
||||||
from awq.apply_awq import add_scale_weights # type: ignore[import-not-found]
|
raise ValueError("no need to specify --vocab-only if using --no-vocab")
|
||||||
tmp_model_path = args.model / "weighted_model"
|
args.vocab_type = "no_vocab"
|
||||||
if tmp_model_path.is_dir():
|
|
||||||
print(f"{tmp_model_path} exists as a weighted model.")
|
|
||||||
else:
|
|
||||||
tmp_model_path.mkdir(parents=True, exist_ok=True)
|
|
||||||
print("Saving new weighted model ...")
|
|
||||||
add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path))
|
|
||||||
print(f"Saved weighted model at {tmp_model_path}.")
|
|
||||||
args.model = tmp_model_path
|
|
||||||
|
|
||||||
if args.dump_single:
|
if args.dump_single:
|
||||||
model_plus = lazy_load_file(args.model)
|
model_plus = lazy_load_file(args.model)
|
||||||
@ -1455,7 +1462,7 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
print(f"Wrote {outfile}")
|
print(f"Wrote {outfile}")
|
||||||
return
|
return
|
||||||
|
|
||||||
if model_plus.vocab is not None and args.vocab_dir is None:
|
if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab:
|
||||||
vocab = model_plus.vocab
|
vocab = model_plus.vocab
|
||||||
|
|
||||||
print(f"Vocab info: {vocab}")
|
print(f"Vocab info: {vocab}")
|
||||||
|
@ -20,6 +20,8 @@ else()
|
|||||||
add_subdirectory(convert-llama2c-to-ggml)
|
add_subdirectory(convert-llama2c-to-ggml)
|
||||||
add_subdirectory(embedding)
|
add_subdirectory(embedding)
|
||||||
add_subdirectory(finetune)
|
add_subdirectory(finetune)
|
||||||
|
add_subdirectory(gritlm)
|
||||||
|
add_subdirectory(gguf-split)
|
||||||
add_subdirectory(infill)
|
add_subdirectory(infill)
|
||||||
add_subdirectory(llama-bench)
|
add_subdirectory(llama-bench)
|
||||||
add_subdirectory(llava)
|
add_subdirectory(llava)
|
||||||
|
@ -105,6 +105,9 @@ int main(int argc, char ** argv) {
|
|||||||
ctx_params.n_threads = params.n_threads;
|
ctx_params.n_threads = params.n_threads;
|
||||||
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
||||||
|
|
||||||
|
// ensure enough sequences are available
|
||||||
|
ctx_params.n_seq_max = *std::max_element(n_pl.begin(), n_pl.end());
|
||||||
|
|
||||||
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
||||||
|
|
||||||
if (ctx == NULL) {
|
if (ctx == NULL) {
|
||||||
@ -135,6 +138,8 @@ int main(int argc, char ** argv) {
|
|||||||
LOG_TEE("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret);
|
LOG_TEE("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_synchronize(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -174,10 +179,10 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
const int n_tokens = is_pp_shared ? pp : pl*pp;
|
for (int i = 0; i < pp; ++i) {
|
||||||
|
for (int j = 0; j < (is_pp_shared ? 1 : pl); ++j) {
|
||||||
for (int i = 0; i < n_tokens; ++i) {
|
llama_batch_add(batch, 0, i, { j }, false);
|
||||||
llama_batch_add(batch, 0, i, { 0 }, false);
|
}
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true;
|
batch.logits[batch.n_tokens - 1] = true;
|
||||||
|
|
||||||
@ -192,7 +197,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
if (is_pp_shared) {
|
if (is_pp_shared) {
|
||||||
for (int32_t i = 1; i < pl; ++i) {
|
for (int32_t i = 1; i < pl; ++i) {
|
||||||
llama_kv_cache_seq_cp(ctx, 0, i, 0, pp);
|
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +48,8 @@ int main(int argc, char ** argv) {
|
|||||||
params.prompt = "Hello my name is";
|
params.prompt = "Hello my name is";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
process_escapes(params.prompt);
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
@ -80,6 +82,7 @@ int main(int argc, char ** argv) {
|
|||||||
ctx_params.seed = 1234;
|
ctx_params.seed = 1234;
|
||||||
ctx_params.n_ctx = n_kv_req;
|
ctx_params.n_ctx = n_kv_req;
|
||||||
ctx_params.n_batch = std::max(n_len, n_parallel);
|
ctx_params.n_batch = std::max(n_len, n_parallel);
|
||||||
|
ctx_params.n_seq_max = n_parallel;
|
||||||
ctx_params.n_threads = params.n_threads;
|
ctx_params.n_threads = params.n_threads;
|
||||||
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
||||||
|
|
||||||
@ -132,7 +135,7 @@ int main(int argc, char ** argv) {
|
|||||||
// assign the system KV cache to all parallel sequences
|
// assign the system KV cache to all parallel sequences
|
||||||
// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
|
// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
|
||||||
for (int32_t i = 1; i < n_parallel; ++i) {
|
for (int32_t i = 1; i < n_parallel; ++i) {
|
||||||
llama_kv_cache_seq_cp(ctx, 0, i, 0, batch.n_tokens);
|
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n_parallel > 1) {
|
if (n_parallel > 1) {
|
||||||
|
@ -189,12 +189,10 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
int32_t nelements = sizex*sizey;
|
int32_t nelements = sizex*sizey;
|
||||||
|
|
||||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
|
||||||
|
|
||||||
// Set up a the benchmark matrices
|
// Set up a the benchmark matrices
|
||||||
// printf("Creating new tensor q11 & Running quantize\n");
|
// printf("Creating new tensor q11 & Running quantize\n");
|
||||||
struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
||||||
ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements/m11->ne[0], m11->ne[0], hist_cur.data(), nullptr);
|
ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements/m11->ne[0], m11->ne[0], nullptr);
|
||||||
|
|
||||||
// Set up a the compute graph
|
// Set up a the compute graph
|
||||||
// printf("Creating new tensor q31\n");
|
// printf("Creating new tensor q31\n");
|
||||||
@ -207,7 +205,7 @@ int main(int argc, char ** argv) {
|
|||||||
// Set up a second graph computation to make sure we override the CPU cache lines
|
// Set up a second graph computation to make sure we override the CPU cache lines
|
||||||
// printf("Creating new tensor q12 & Running quantize\n");
|
// printf("Creating new tensor q12 & Running quantize\n");
|
||||||
struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
||||||
ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements/m12->ne[0], m12->ne[0], hist_cur.data(), nullptr);
|
ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements/m12->ne[0], m12->ne[0], nullptr);
|
||||||
|
|
||||||
// printf("Creating new tensor q32\n");
|
// printf("Creating new tensor q32\n");
|
||||||
struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2);
|
struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2);
|
||||||
|
@ -19,18 +19,7 @@ static std::vector<std::string> split_lines(const std::string & s) {
|
|||||||
|
|
||||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, int seq_id) {
|
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, int seq_id) {
|
||||||
for (size_t i = 0; i < tokens.size(); i++) {
|
for (size_t i = 0; i < tokens.size(); i++) {
|
||||||
llama_batch_add(batch, tokens[i], i, { seq_id }, false);
|
llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void normalize(float * vec, float * out, int n) {
|
|
||||||
float norm = 0;
|
|
||||||
for (int i = 0; i < n; i++) {
|
|
||||||
norm += vec[i] * vec[i];
|
|
||||||
}
|
|
||||||
norm = sqrt(norm);
|
|
||||||
for (int i = 0; i < n; i++) {
|
|
||||||
out[i] = vec[i] / norm;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,11 +33,23 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
|||||||
fprintf(stderr, "%s : failed to decode\n", __func__);
|
fprintf(stderr, "%s : failed to decode\n", __func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
// normalize on copy
|
for (int i = 0; i < batch.n_tokens; i++) {
|
||||||
for (int k = 0; k < n_seq; k++) {
|
if (!batch.logits[i]) {
|
||||||
float * emb = llama_get_embeddings_ith(ctx, k);
|
continue;
|
||||||
float * out = output + k * n_embd;
|
}
|
||||||
normalize(emb, out, n_embd);
|
|
||||||
|
// try to get sequence embeddings - supported only when pooling_type is not NONE
|
||||||
|
const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
|
||||||
|
if (embd == NULL) {
|
||||||
|
embd = llama_get_embeddings_ith(ctx, i);
|
||||||
|
if (embd == NULL) {
|
||||||
|
fprintf(stderr, "%s: failed to get embeddings for token %d\n", __func__, i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||||
|
llama_embd_normalize(embd, out, n_embd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,18 +107,25 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// max batch size
|
// max batch size
|
||||||
const uint64_t n_batch = params.n_batch;
|
const uint64_t n_batch = params.n_batch;
|
||||||
GGML_ASSERT(params.n_batch == params.n_ctx);
|
GGML_ASSERT(params.n_batch >= params.n_ctx);
|
||||||
|
|
||||||
// tokenize the prompts and trim
|
// tokenize the prompts and trim
|
||||||
std::vector<std::vector<int32_t>> inputs;
|
std::vector<std::vector<int32_t>> inputs;
|
||||||
for (const auto & prompt : prompts) {
|
for (const auto & prompt : prompts) {
|
||||||
auto inp = ::llama_tokenize(ctx, prompt, true);
|
auto inp = ::llama_tokenize(ctx, prompt, true, false);
|
||||||
if (inp.size() > n_batch) {
|
if (inp.size() > n_batch) {
|
||||||
inp.resize(n_batch);
|
inp.resize(n_batch);
|
||||||
}
|
}
|
||||||
inputs.push_back(inp);
|
inputs.push_back(inp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add eos if not present
|
||||||
|
for (auto & inp : inputs) {
|
||||||
|
if (inp.empty() || inp.back() != llama_token_eos(model)) {
|
||||||
|
inp.push_back(llama_token_eos(model));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// tokenization stats
|
// tokenization stats
|
||||||
if (params.verbose_prompt) {
|
if (params.verbose_prompt) {
|
||||||
for (int i = 0; i < (int) inputs.size(); i++) {
|
for (int i = 0; i < (int) inputs.size(); i++) {
|
||||||
@ -132,7 +140,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// initialize batch
|
// initialize batch
|
||||||
const int n_prompts = prompts.size();
|
const int n_prompts = prompts.size();
|
||||||
struct llama_batch batch = llama_batch_init(n_batch, 0, n_prompts);
|
struct llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||||
|
|
||||||
// allocate output
|
// allocate output
|
||||||
const int n_embd = llama_n_embd(model);
|
const int n_embd = llama_n_embd(model);
|
||||||
@ -145,6 +153,7 @@ int main(int argc, char ** argv) {
|
|||||||
for (int k = 0; k < n_prompts; k++) {
|
for (int k = 0; k < n_prompts; k++) {
|
||||||
// clamp to n_batch tokens
|
// clamp to n_batch tokens
|
||||||
auto & inp = inputs[k];
|
auto & inp = inputs[k];
|
||||||
|
|
||||||
const uint64_t n_toks = inp.size();
|
const uint64_t n_toks = inp.size();
|
||||||
|
|
||||||
// encode if at capacity
|
// encode if at capacity
|
||||||
@ -165,15 +174,26 @@ int main(int argc, char ** argv) {
|
|||||||
float * out = emb + p * n_embd;
|
float * out = emb + p * n_embd;
|
||||||
batch_decode(ctx, batch, out, s, n_embd);
|
batch_decode(ctx, batch, out, s, n_embd);
|
||||||
|
|
||||||
// print first 3 embeddings
|
// print the first part of the embeddings
|
||||||
for (int j = 0; j < std::min(3, n_prompts); j++) {
|
fprintf(stdout, "\n");
|
||||||
fprintf(stderr, "embedding %d: ", j);
|
for (int j = 0; j < n_prompts; j++) {
|
||||||
for (int i = 0; i < n_embd; i++) {
|
fprintf(stdout, "embedding %d: ", j);
|
||||||
fprintf(stderr, "%f ", emb[j * n_embd + i]);
|
for (int i = 0; i < std::min(16, n_embd); i++) {
|
||||||
|
fprintf(stdout, "%9.6f ", emb[j * n_embd + i]);
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n\n");
|
fprintf(stdout, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// print cosine similarity matrix
|
||||||
|
fprintf(stdout, "\n");
|
||||||
|
printf("cosine similarity matrix:\n\n");
|
||||||
|
for (int i = 0; i < n_prompts; i++) {
|
||||||
|
for (int j = 0; j < n_prompts; j++) {
|
||||||
|
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||||
|
fprintf(stdout, "%6.2f ", sim);
|
||||||
|
}
|
||||||
|
fprintf(stdout, "\n");
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
|
||||||
|
|
||||||
// clean up
|
// clean up
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
|
5
examples/gguf-split/CMakeLists.txt
Normal file
5
examples/gguf-split/CMakeLists.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
set(TARGET gguf-split)
|
||||||
|
add_executable(${TARGET} gguf-split.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
9
examples/gguf-split/README.md
Normal file
9
examples/gguf-split/README.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
## GGUF split Example
|
||||||
|
|
||||||
|
CLI to split / merge GGUF files.
|
||||||
|
|
||||||
|
**Command line options:**
|
||||||
|
|
||||||
|
- `--split`: split GGUF to multiple GGUF, default operation.
|
||||||
|
- `--split-max-tensors`: maximum tensors in each split: default(128)
|
||||||
|
- `--merge`: merge multiple GGUF to a single GGUF.
|
489
examples/gguf-split/gguf-split.cpp
Normal file
489
examples/gguf-split/gguf-split.cpp
Normal file
@ -0,0 +1,489 @@
|
|||||||
|
#include "llama.h"
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <fstream>
|
||||||
|
#include <ios>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
enum split_operation : uint8_t {
|
||||||
|
SPLIT_OP_SPLIT,
|
||||||
|
SPLIT_OP_MERGE,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * const LLM_KV_GENERAL_SPLIT_I_SPLIT = "general.split";
|
||||||
|
static const char * const LLM_KV_GENERAL_SPLIT_N_SPLIT = "general.split_count";
|
||||||
|
|
||||||
|
static const int SPLIT_FILENAME_MAX = 256;
|
||||||
|
|
||||||
|
static const char * const SPLIT_FILENAME_FORMAT = "%s-%05d-of-%05d.gguf";
|
||||||
|
|
||||||
|
struct split_params {
|
||||||
|
split_operation operation = SPLIT_OP_SPLIT;
|
||||||
|
int n_split_tensors = 128;
|
||||||
|
std::string input;
|
||||||
|
std::string output;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void split_print_usage(const char * executable) {
|
||||||
|
const split_params default_params;
|
||||||
|
printf("\n");
|
||||||
|
printf("usage: %s [options] GGUF_IN GGUF_OUT\n", executable);
|
||||||
|
printf("\n");
|
||||||
|
printf("Apply a GGUF operation on IN to OUT.");
|
||||||
|
printf("\n");
|
||||||
|
printf("options:\n");
|
||||||
|
printf(" -h, --help show this help message and exit\n");
|
||||||
|
printf(" --version show version and build info\n");
|
||||||
|
printf(" --split split GGUF to multiple GGUF (default)\n");
|
||||||
|
printf(" --split-max-tensors max tensors in each split: default(%d)\n", default_params.n_split_tensors);
|
||||||
|
printf(" --merge merge multiple GGUF to a single GGUF\n");
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool split_params_parse_ex(int argc, const char ** argv, split_params & params) {
|
||||||
|
std::string arg;
|
||||||
|
const std::string arg_prefix = "--";
|
||||||
|
bool invalid_param = false;
|
||||||
|
|
||||||
|
int arg_idx = 1;
|
||||||
|
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
|
||||||
|
arg = argv[arg_idx];
|
||||||
|
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
|
||||||
|
std::replace(arg.begin(), arg.end(), '_', '-');
|
||||||
|
}
|
||||||
|
|
||||||
|
bool arg_found = false;
|
||||||
|
if (arg == "-h" || arg == "--help") {
|
||||||
|
split_print_usage(argv[0]);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
if (arg == "--version") {
|
||||||
|
fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
|
||||||
|
fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "--merge") {
|
||||||
|
arg_found = true;
|
||||||
|
params.operation = SPLIT_OP_MERGE;
|
||||||
|
}
|
||||||
|
if (arg == "--split") {
|
||||||
|
arg_found = true;
|
||||||
|
params.operation = SPLIT_OP_SPLIT;
|
||||||
|
}
|
||||||
|
if (arg == "--split-max-tensors") {
|
||||||
|
if (++arg_idx >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
arg_found = true;
|
||||||
|
params.n_split_tensors = atoi(argv[arg_idx]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!arg_found) {
|
||||||
|
throw std::invalid_argument("error: unknown argument: " + arg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (invalid_param) {
|
||||||
|
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc - arg_idx < 2) {
|
||||||
|
printf("%s: bad arguments\n", argv[0]);
|
||||||
|
split_print_usage(argv[0]);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
params.input = argv[arg_idx++];
|
||||||
|
params.output = argv[arg_idx++];
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool split_params_parse(int argc, const char ** argv, split_params & params) {
|
||||||
|
bool result = true;
|
||||||
|
try {
|
||||||
|
if (!split_params_parse_ex(argc, argv, params)) {
|
||||||
|
split_print_usage(argv[0]);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (const std::invalid_argument & ex) {
|
||||||
|
fprintf(stderr, "%s\n", ex.what());
|
||||||
|
split_print_usage(argv[0]);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void zeros(std::ofstream & file, size_t n) {
|
||||||
|
char zero = 0;
|
||||||
|
for (size_t i = 0; i < n; ++i) {
|
||||||
|
file.write(&zero, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string split_file_name(const std::string & path, int i_split, int n_split) {
|
||||||
|
char f_split[SPLIT_FILENAME_MAX] = {0};
|
||||||
|
snprintf(f_split, sizeof(f_split), SPLIT_FILENAME_FORMAT, path.c_str(), i_split + 1, n_split);
|
||||||
|
return std::string(f_split);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct split_strategy {
|
||||||
|
const split_params params;
|
||||||
|
std::ifstream & f_input;
|
||||||
|
struct gguf_context * ctx_gguf;
|
||||||
|
struct ggml_context * ctx_meta = NULL;
|
||||||
|
const int n_tensors;
|
||||||
|
|
||||||
|
const int n_split;
|
||||||
|
int i_split = 0;
|
||||||
|
|
||||||
|
int i_tensor = 0;
|
||||||
|
|
||||||
|
std::vector<uint8_t> read_data;
|
||||||
|
|
||||||
|
struct gguf_context * ctx_out;
|
||||||
|
std::ofstream fout;
|
||||||
|
|
||||||
|
split_strategy(const split_params & params,
|
||||||
|
std::ifstream & f_input,
|
||||||
|
struct gguf_context * ctx_gguf,
|
||||||
|
struct ggml_context * ctx_meta) :
|
||||||
|
params(params),
|
||||||
|
f_input(f_input),
|
||||||
|
ctx_gguf(ctx_gguf),
|
||||||
|
ctx_meta(ctx_meta),
|
||||||
|
n_tensors(gguf_get_n_tensors(ctx_gguf)),
|
||||||
|
n_split(std::ceil(1. * n_tensors / params.n_split_tensors)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
bool should_split() const {
|
||||||
|
return i_tensor < n_tensors && i_tensor % params.n_split_tensors == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void split_start() {
|
||||||
|
ctx_out = gguf_init_empty();
|
||||||
|
|
||||||
|
// Save all metadata in first split only
|
||||||
|
if (i_split == 0) {
|
||||||
|
gguf_set_kv(ctx_out, ctx_gguf);
|
||||||
|
}
|
||||||
|
gguf_set_val_u8(ctx_out, LLM_KV_GENERAL_SPLIT_I_SPLIT, i_split);
|
||||||
|
gguf_set_val_u8(ctx_out, LLM_KV_GENERAL_SPLIT_N_SPLIT, n_split);
|
||||||
|
|
||||||
|
// populate the original tensors, so we get an initial metadata
|
||||||
|
for (int i = i_split * params.n_split_tensors; i < n_tensors && i < (i_split + 1) * params.n_split_tensors; ++i) {
|
||||||
|
struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i));
|
||||||
|
gguf_add_tensor(ctx_out, meta);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto split_name = split_file_name(params.output, i_split, n_split);
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: %s ...", __func__, split_name.c_str());
|
||||||
|
fout = std::ofstream(split_name, std::ios::binary);
|
||||||
|
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
||||||
|
|
||||||
|
auto meta_size = gguf_get_meta_size(ctx_out);
|
||||||
|
|
||||||
|
// placeholder for the meta data
|
||||||
|
::zeros(fout, meta_size);
|
||||||
|
|
||||||
|
i_split++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void next_tensor() {
|
||||||
|
const char * t_name = gguf_get_tensor_name(ctx_gguf, i_tensor);
|
||||||
|
struct ggml_tensor * t = ggml_get_tensor(ctx_meta, t_name);
|
||||||
|
auto n_bytes = ggml_nbytes(t);
|
||||||
|
|
||||||
|
if (read_data.size() < n_bytes) {
|
||||||
|
read_data.resize(n_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor);
|
||||||
|
f_input.seekg(offset);
|
||||||
|
f_input.read((char *)read_data.data(), n_bytes);
|
||||||
|
|
||||||
|
t->data = read_data.data();
|
||||||
|
|
||||||
|
// write tensor data + padding
|
||||||
|
fout.write((const char *)t->data, n_bytes);
|
||||||
|
zeros(fout, GGML_PAD(n_bytes, GGUF_DEFAULT_ALIGNMENT) - n_bytes);
|
||||||
|
|
||||||
|
i_tensor++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void split_end() {
|
||||||
|
// go back to beginning of file and write the updated metadata
|
||||||
|
fout.seekp(0);
|
||||||
|
std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
|
||||||
|
gguf_get_meta_data(ctx_out, data.data());
|
||||||
|
fout.write((const char *)data.data(), data.size());
|
||||||
|
|
||||||
|
fout.close();
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
|
||||||
|
fprintf(stderr, "\033[3Ddone\n");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static void gguf_split(const split_params & split_params) {
|
||||||
|
struct ggml_context * ctx_meta = NULL;
|
||||||
|
|
||||||
|
struct gguf_init_params params = {
|
||||||
|
/*.no_alloc = */ true,
|
||||||
|
/*.ctx = */ &ctx_meta,
|
||||||
|
};
|
||||||
|
|
||||||
|
std::ifstream f_input(split_params.input.c_str(), std::ios::binary);
|
||||||
|
if (!f_input.is_open()) {
|
||||||
|
fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_params.input.c_str());
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto * ctx_gguf = gguf_init_from_file(split_params.input.c_str(), params);
|
||||||
|
if (!ctx_gguf) {
|
||||||
|
fprintf(stderr, "%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
split_strategy strategy(split_params, f_input, ctx_gguf, ctx_meta);
|
||||||
|
fprintf(stderr, "%s: %s -> %s (%d tensors per file)\n",
|
||||||
|
__func__, split_params.input.c_str(),
|
||||||
|
split_file_name(split_params.output, strategy.i_split, strategy.n_split).c_str(),
|
||||||
|
split_params.n_split_tensors);
|
||||||
|
|
||||||
|
strategy.split_start();
|
||||||
|
|
||||||
|
while (strategy.i_tensor < strategy.n_tensors) {
|
||||||
|
strategy.next_tensor();
|
||||||
|
if (strategy.should_split()) {
|
||||||
|
strategy.split_end();
|
||||||
|
strategy.split_start();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
strategy.split_end();
|
||||||
|
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
f_input.close();
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: %d gguf split written with a total of %d tensors.\n",
|
||||||
|
__func__, strategy.n_split, strategy.n_tensors);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gguf_merge(const split_params & split_params) {
|
||||||
|
fprintf(stderr, "%s: %s -> %s\n",
|
||||||
|
__func__, split_params.input.c_str(),
|
||||||
|
split_params.output.c_str());
|
||||||
|
int n_split = 1;
|
||||||
|
int total_tensors = 0;
|
||||||
|
|
||||||
|
auto * ctx_out = gguf_init_empty();
|
||||||
|
std::ofstream fout(split_params.output.c_str(), std::ios::binary);
|
||||||
|
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
||||||
|
|
||||||
|
std::vector<uint8_t> read_data;
|
||||||
|
std::vector<ggml_context *> ctx_metas;
|
||||||
|
std::vector<gguf_context *> ctx_ggufs;
|
||||||
|
|
||||||
|
std::string split_prefix;
|
||||||
|
|
||||||
|
// First pass to find KV and tensors metadata
|
||||||
|
for (int i_split = 0; i_split < n_split; i_split++) {
|
||||||
|
struct ggml_context * ctx_meta = NULL;
|
||||||
|
|
||||||
|
struct gguf_init_params params = {
|
||||||
|
/*.no_alloc = */ true,
|
||||||
|
/*.ctx = */ &ctx_meta,
|
||||||
|
};
|
||||||
|
|
||||||
|
auto split_name = split_params.input;
|
||||||
|
if (i_split > 0) {
|
||||||
|
split_name = split_file_name(split_prefix, i_split, n_split);
|
||||||
|
}
|
||||||
|
fprintf(stderr, "%s: reading metadata %s ...", __func__, split_name.c_str());
|
||||||
|
|
||||||
|
auto * ctx_gguf = gguf_init_from_file(split_name.c_str(), params);
|
||||||
|
if (!ctx_gguf) {
|
||||||
|
fprintf(stderr, "\n%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
ctx_ggufs.push_back(ctx_gguf);
|
||||||
|
ctx_metas.push_back(ctx_meta);
|
||||||
|
|
||||||
|
if (i_split == 0) {
|
||||||
|
auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_GENERAL_SPLIT_N_SPLIT);
|
||||||
|
if (key_n_split < 0) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"\n%s: input file does not contain %s metadata\n",
|
||||||
|
__func__,
|
||||||
|
LLM_KV_GENERAL_SPLIT_N_SPLIT);
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
fout.close();
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
n_split = gguf_get_val_u8(ctx_gguf, key_n_split);
|
||||||
|
if (n_split < 1) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"\n%s: input file does not contain a valid split count %d\n",
|
||||||
|
__func__,
|
||||||
|
n_split);
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
fout.close();
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not trigger merge if we try to merge again the output
|
||||||
|
gguf_set_val_u8(ctx_out, LLM_KV_GENERAL_SPLIT_N_SPLIT, 0);
|
||||||
|
|
||||||
|
// Set metadata from the first split
|
||||||
|
gguf_set_kv(ctx_out, ctx_gguf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the file naming
|
||||||
|
{
|
||||||
|
int i_split_file = 0;
|
||||||
|
int n_split_file = 0;
|
||||||
|
const char * i_split_format = "-00000-of-00000.gguf";
|
||||||
|
|
||||||
|
if (split_name.size() < strlen(i_split_format)) {
|
||||||
|
fprintf(stderr, "\n%s: unexpected input file name: %s\n", __func__, split_params.input.c_str());
|
||||||
|
for (auto * _ctx_gguf : ctx_ggufs) {
|
||||||
|
gguf_free(_ctx_gguf);
|
||||||
|
}
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
fout.close();
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
split_prefix = split_name.substr(0, split_name.size() - strlen(i_split_format));
|
||||||
|
|
||||||
|
const char * split_name_c_str = split_name.c_str();
|
||||||
|
int n_part = sscanf(&split_name_c_str[0] + split_prefix.size(), "-%d-of-%d", &i_split_file, &n_split_file);
|
||||||
|
|
||||||
|
if (n_part != 2 || i_split_file - 1 != i_split || n_split_file != n_split) {
|
||||||
|
fprintf(stderr, "\n%s: unexpected input file name: %s"
|
||||||
|
" i_split=%d i_split_file=%d"
|
||||||
|
" n_split=%d n_split_file=%d\n", __func__,
|
||||||
|
split_params.input.c_str(),
|
||||||
|
i_split, i_split_file,
|
||||||
|
n_split, n_split_file);
|
||||||
|
for (auto * _ctx_gguf : ctx_ggufs) {
|
||||||
|
gguf_free(_ctx_gguf);
|
||||||
|
}
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
fout.close();
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto n_tensors = gguf_get_n_tensors(ctx_gguf);
|
||||||
|
for (int i_tensor = 0; i_tensor < n_tensors; i_tensor++) {
|
||||||
|
const char * t_name = gguf_get_tensor_name(ctx_gguf, i_tensor);
|
||||||
|
struct ggml_tensor * t = ggml_get_tensor(ctx_meta, t_name);
|
||||||
|
gguf_add_tensor(ctx_out, t);
|
||||||
|
}
|
||||||
|
total_tensors += n_tensors;
|
||||||
|
|
||||||
|
fprintf(stderr, "\033[3Ddone\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// placeholder for the meta data
|
||||||
|
{
|
||||||
|
auto meta_size = gguf_get_meta_size(ctx_out);
|
||||||
|
::zeros(fout, meta_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write tensors data
|
||||||
|
for (int i_split = 0; i_split < n_split; i_split++) {
|
||||||
|
auto split_name = split_file_name(split_prefix, i_split, n_split);
|
||||||
|
std::ifstream f_input(split_name.c_str(), std::ios::binary);
|
||||||
|
if (!f_input.is_open()) {
|
||||||
|
fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_name.c_str());
|
||||||
|
for (auto * _ctx_gguf : ctx_ggufs) {
|
||||||
|
gguf_free(_ctx_gguf);
|
||||||
|
}
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
fout.close();
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
fprintf(stderr, "%s: writing tensors %s ...", __func__, split_name.c_str());
|
||||||
|
|
||||||
|
auto * ctx_gguf = ctx_ggufs[i_split];
|
||||||
|
auto * ctx_meta = ctx_metas[i_split];
|
||||||
|
|
||||||
|
auto n_tensors = gguf_get_n_tensors(ctx_gguf);
|
||||||
|
for (int i_tensor = 0; i_tensor < n_tensors; i_tensor++) {
|
||||||
|
const char * t_name = gguf_get_tensor_name(ctx_gguf, i_tensor);
|
||||||
|
struct ggml_tensor * t = ggml_get_tensor(ctx_meta, t_name);
|
||||||
|
|
||||||
|
auto n_bytes = ggml_nbytes(t);
|
||||||
|
|
||||||
|
if (read_data.size() < n_bytes) {
|
||||||
|
read_data.resize(n_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor);
|
||||||
|
f_input.seekg(offset);
|
||||||
|
f_input.read((char *)read_data.data(), n_bytes);
|
||||||
|
|
||||||
|
// write tensor data + padding
|
||||||
|
fout.write((const char *)read_data.data(), n_bytes);
|
||||||
|
zeros(fout, GGML_PAD(n_bytes, GGUF_DEFAULT_ALIGNMENT) - n_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
ggml_free(ctx_meta);
|
||||||
|
f_input.close();
|
||||||
|
fprintf(stderr, "\033[3Ddone\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// go back to beginning of file and write the updated metadata
|
||||||
|
fout.seekp(0);
|
||||||
|
std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
|
||||||
|
gguf_get_meta_data(ctx_out, data.data());
|
||||||
|
fout.write((const char *)data.data(), data.size());
|
||||||
|
|
||||||
|
fout.close();
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: %s merged from %d split with %d tensors.\n",
|
||||||
|
__func__, split_params.output.c_str(), n_split, total_tensors);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, const char ** argv) {
|
||||||
|
if (argc < 3) {
|
||||||
|
split_print_usage(argv[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
split_params params;
|
||||||
|
split_params_parse(argc, argv, params);
|
||||||
|
|
||||||
|
switch (params.operation) {
|
||||||
|
case SPLIT_OP_SPLIT: gguf_split(params);
|
||||||
|
break;
|
||||||
|
case SPLIT_OP_MERGE: gguf_merge(params);
|
||||||
|
break;
|
||||||
|
default:split_print_usage(argv[0]);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -211,6 +211,7 @@ static bool gguf_ex_read_1(const std::string & fname) {
|
|||||||
for (int j = 0; j < ggml_nelements(cur); ++j) {
|
for (int j = 0; j < ggml_nelements(cur); ++j) {
|
||||||
if (data[j] != 100 + i) {
|
if (data[j] != 100 + i) {
|
||||||
fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
|
fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
|
||||||
|
gguf_free(ctx);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
examples/gritlm/CMakeLists.txt
Normal file
5
examples/gritlm/CMakeLists.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
set(TARGET gritlm)
|
||||||
|
add_executable(${TARGET} gritlm.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
62
examples/gritlm/README.md
Normal file
62
examples/gritlm/README.md
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
## Generative Representational Instruction Tuning (GRIT) Example
|
||||||
|
[gritlm] a model which can generate embeddings as well as "normal" text
|
||||||
|
generation depending on the instructions in the prompt.
|
||||||
|
|
||||||
|
* Paper: https://arxiv.org/pdf/2402.09906.pdf
|
||||||
|
|
||||||
|
### Retrieval-Augmented Generation (RAG) use case
|
||||||
|
One use case for `gritlm` is to use it with RAG. If we recall how RAG works is
|
||||||
|
that we take documents that we want to use as context, to ground the large
|
||||||
|
language model (LLM), and we create token embeddings for them. We then store
|
||||||
|
these token embeddings in a vector database.
|
||||||
|
|
||||||
|
When we perform a query, prompt the LLM, we will first create token embeddings
|
||||||
|
for the query and then search the vector database to retrieve the most
|
||||||
|
similar vectors, and return those documents so they can be passed to the LLM as
|
||||||
|
context. Then the query and the context will be passed to the LLM which will
|
||||||
|
have to _again_ create token embeddings for the query. But because gritlm is used
|
||||||
|
the first query can be cached and the second query tokenization generation does
|
||||||
|
not have to be performed at all.
|
||||||
|
|
||||||
|
### Running the example
|
||||||
|
Download a Grit model:
|
||||||
|
```console
|
||||||
|
$ scripts/hf.sh --repo cohesionet/GritLM-7B_gguf --file gritlm-7b_q4_1.gguf
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the example using the downloaded model:
|
||||||
|
```console
|
||||||
|
$ ./gritlm -m gritlm-7b_q4_1.gguf
|
||||||
|
|
||||||
|
Cosine similarity between "Bitcoin: A Peer-to-Peer Electronic Cash System" and "A purely peer-to-peer version of electronic cash w" is: 0.605
|
||||||
|
Cosine similarity between "Bitcoin: A Peer-to-Peer Electronic Cash System" and "All text-based language problems can be reduced to" is: 0.103
|
||||||
|
Cosine similarity between "Generative Representational Instruction Tuning" and "A purely peer-to-peer version of electronic cash w" is: 0.112
|
||||||
|
Cosine similarity between "Generative Representational Instruction Tuning" and "All text-based language problems can be reduced to" is: 0.547
|
||||||
|
|
||||||
|
Oh, brave adventurer, who dared to climb
|
||||||
|
The lofty peak of Mt. Fuji in the night,
|
||||||
|
When shadows lurk and ghosts do roam,
|
||||||
|
And darkness reigns, a fearsome sight.
|
||||||
|
|
||||||
|
Thou didst set out, with heart aglow,
|
||||||
|
To conquer this mountain, so high,
|
||||||
|
And reach the summit, where the stars do glow,
|
||||||
|
And the moon shines bright, up in the sky.
|
||||||
|
|
||||||
|
Through the mist and fog, thou didst press on,
|
||||||
|
With steadfast courage, and a steadfast will,
|
||||||
|
Through the darkness, thou didst not be gone,
|
||||||
|
But didst climb on, with a steadfast skill.
|
||||||
|
|
||||||
|
At last, thou didst reach the summit's crest,
|
||||||
|
And gazed upon the world below,
|
||||||
|
And saw the beauty of the night's best,
|
||||||
|
And felt the peace, that only nature knows.
|
||||||
|
|
||||||
|
Oh, brave adventurer, who dared to climb
|
||||||
|
The lofty peak of Mt. Fuji in the night,
|
||||||
|
Thou art a hero, in the eyes of all,
|
||||||
|
For thou didst conquer this mountain, so bright.
|
||||||
|
```
|
||||||
|
|
||||||
|
[gritlm]: https://github.com/ContextualAI/gritlm
|
215
examples/gritlm/gritlm.cpp
Normal file
215
examples/gritlm/gritlm.cpp
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
#include "common.h"
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
// #define GRIT_DEBUG
|
||||||
|
|
||||||
|
static std::vector<std::vector<float>> encode(llama_context * ctx, const std::vector<std::string> & sentences, const std::string & instruction) {
|
||||||
|
std::vector<std::vector<float>> result;
|
||||||
|
|
||||||
|
const llama_model * mdl = llama_get_model(ctx);
|
||||||
|
|
||||||
|
llama_batch batch = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
||||||
|
|
||||||
|
for (uint64_t i = 0; i < sentences.size(); i++) {
|
||||||
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
|
const std::string input_string = instruction + sentences[i];
|
||||||
|
|
||||||
|
std::vector<llama_token> inputs = llama_tokenize(mdl, input_string, true, false);
|
||||||
|
|
||||||
|
const int32_t n_toks = inputs.size();
|
||||||
|
|
||||||
|
// GritLM seems to have EOS = ""
|
||||||
|
// https://github.com/ContextualAI/gritlm/blob/92025b16534712b31b3c4aaaf069350e222bd5f8/gritlm/gritlm.py#L18
|
||||||
|
// inputs.push_back(llama_token_eos(mdl));
|
||||||
|
|
||||||
|
// we want to ignore instruction tokens for mean pooling
|
||||||
|
const int32_t n_inst = llama_tokenize(mdl, instruction, true, false).size();
|
||||||
|
|
||||||
|
#ifdef GRIT_DEBUG
|
||||||
|
// debug tokens - should be matching as referenced in the GritLM sample
|
||||||
|
std::for_each(inputs.begin(), inputs.end(), [&ctx](llama_token t) {
|
||||||
|
std::printf("[%u:%s]", t, llama_token_to_piece(ctx, t).c_str());
|
||||||
|
});
|
||||||
|
std::printf("\n");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// add input to batch (this increments n_tokens)
|
||||||
|
for (int32_t j = 0; j < n_toks; j++) {
|
||||||
|
llama_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst);
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear previous kv_cache values (irrelevant for embeddings)
|
||||||
|
llama_kv_cache_clear(ctx);
|
||||||
|
llama_set_causal_attn(ctx, false);
|
||||||
|
|
||||||
|
// run model
|
||||||
|
llama_decode(ctx, batch);
|
||||||
|
|
||||||
|
// get embedding dimensions
|
||||||
|
uint64_t n_embd = llama_n_embd(mdl);
|
||||||
|
|
||||||
|
// allocate embedding output
|
||||||
|
std::vector<float> emb_unorm(n_embd, 0.0f);
|
||||||
|
|
||||||
|
// sum up all token embeddings
|
||||||
|
for (int32_t k = n_inst; k < n_toks; k++) {
|
||||||
|
float * emb = llama_get_embeddings_ith(ctx, k);
|
||||||
|
for (uint64_t j = 0; j < n_embd; j++) {
|
||||||
|
emb_unorm[j] += emb[j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// divide by number of tokens (mean pooling)
|
||||||
|
{
|
||||||
|
const uint64_t n_sent = n_toks - n_inst;
|
||||||
|
|
||||||
|
for (uint64_t j = 0; j < n_embd; j++) {
|
||||||
|
emb_unorm[j] /= n_sent;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> emb_norm(emb_unorm.size());
|
||||||
|
llama_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd);
|
||||||
|
result.push_back(emb_norm);
|
||||||
|
|
||||||
|
#ifdef GRIT_DEBUG
|
||||||
|
// print out emb_norm
|
||||||
|
std::printf("embedding %ld: ", i);
|
||||||
|
for (uint64_t j = 0; j < n_embd; j++) {
|
||||||
|
std::printf("%.5f ", emb_norm[j]);
|
||||||
|
}
|
||||||
|
std::printf("\n\n");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string generate(llama_context * ctx, const std::string & prompt, bool stream) {
|
||||||
|
std::string result;
|
||||||
|
|
||||||
|
const llama_model * mdl = llama_get_model(ctx);
|
||||||
|
llama_token eos_token = llama_token_eos(mdl);
|
||||||
|
|
||||||
|
llama_kv_cache_clear(ctx);
|
||||||
|
llama_set_causal_attn(ctx, true);
|
||||||
|
llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
||||||
|
|
||||||
|
std::vector<llama_token> inputs = llama_tokenize(mdl, prompt, false, true);
|
||||||
|
int32_t i_current_token = 0;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
llama_batch_clear(bat);
|
||||||
|
auto n_inputs = (int32_t)inputs.size();
|
||||||
|
for (int32_t i = 0; i < n_inputs; i++) {
|
||||||
|
llama_batch_add(bat, inputs[i], i_current_token++, { 0 }, i == n_inputs - 1);
|
||||||
|
}
|
||||||
|
inputs.clear();
|
||||||
|
|
||||||
|
llama_decode(ctx, bat);
|
||||||
|
auto logits = llama_get_logits_ith(ctx, bat.n_tokens - 1);
|
||||||
|
|
||||||
|
auto candidates = std::vector<llama_token_data>(llama_n_vocab(mdl));
|
||||||
|
auto n_candidates = (int32_t)candidates.size();
|
||||||
|
for (int32_t token = 0; token < n_candidates; token++) {
|
||||||
|
candidates[token] = llama_token_data{ token, logits[token], 0.0f };
|
||||||
|
}
|
||||||
|
auto candidates_p = llama_token_data_array{ candidates.data(), candidates.size(), false };
|
||||||
|
|
||||||
|
llama_token token = llama_sample_token_greedy(ctx, &candidates_p);
|
||||||
|
if (token == eos_token) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string piece = llama_token_to_piece(ctx, token);
|
||||||
|
if (stream) {
|
||||||
|
std::printf("%s", piece.c_str());
|
||||||
|
std::fflush(stdout);
|
||||||
|
}
|
||||||
|
|
||||||
|
inputs.push_back(token);
|
||||||
|
|
||||||
|
result += piece;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
std::printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_batch_free(bat);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string gritlm_instruction(const std::string & instruction) {
|
||||||
|
return !instruction.empty() ? "<|user|>\n" + instruction + "\n<|embed|>\n" : "<|embed|>\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char * argv[]) {
|
||||||
|
gpt_params params;
|
||||||
|
if (!gpt_params_parse(argc, argv, params)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_model_params mparams = llama_model_params_from_gpt_params(params);
|
||||||
|
llama_context_params cparams = llama_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
|
llama_backend_init();
|
||||||
|
|
||||||
|
llama_model * mdl = llama_load_model_from_file(params.model.c_str(), mparams);
|
||||||
|
|
||||||
|
// create new context - set to embedding mode
|
||||||
|
cparams.embeddings = true;
|
||||||
|
llama_context * ctx = llama_new_context_with_model(mdl, cparams);
|
||||||
|
|
||||||
|
// ### Embedding/Representation ###
|
||||||
|
// samples taken from: https://github.com/ContextualAI/gritlm#basic
|
||||||
|
{
|
||||||
|
const std::string instruction = "Given a scientific paper title, retrieve the paper's abstract";
|
||||||
|
|
||||||
|
const std::vector<std::string> queries = {
|
||||||
|
"Bitcoin: A Peer-to-Peer Electronic Cash System",
|
||||||
|
"Generative Representational Instruction Tuning",
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<std::string> documents = {
|
||||||
|
"A purely peer-to-peer version of electronic cash would allow online payments to be sent directly from one party to another without going through a financial institution. Digital signatures provide part of the solution, but the main benefits are lost if a trusted third party is still required to prevent double-spending. We propose a solution to the double-spending problem using a peer-to-peer network. The network timestamps transactions by hashing them into an ongoing chain of hash-based proof-of-work, forming a record that cannot be changed without redoing the proof-of-work. The longest chain not only serves as proof of the sequence of events witnessed, but proof that it came from the largest pool of CPU power. As long as a majority of CPU power is controlled by nodes that are not cooperating to attack the network, they'll generate the longest chain and outpace attackers. The network itself requires minimal structure. Messages are broadcast on a best effort basis, and nodes can leave and rejoin the network at will, accepting the longest proof-of-work chain as proof of what happened while they were gone.",
|
||||||
|
"All text-based language problems can be reduced to either generation or embedding. Current models only perform well at one or the other. We introduce generative representational instruction tuning (GRIT) whereby a large language model is trained to handle both generative and embedding tasks by distinguishing between them through instructions. Compared to other open models, our resulting GritLM 7B sets a new state of the art on the Massive Text Embedding Benchmark (MTEB) and outperforms all models up to its size on a range of generative tasks. By scaling up further, GritLM 8X7B outperforms all open generative language models that we tried while still being among the best embedding models. Notably, we find that GRIT matches training on only generative or embedding data, thus we can unify both at no performance loss. Among other benefits, the unification via GRIT speeds up Retrieval-Augmented Generation (RAG) by > 60% for long documents, by no longer requiring separate retrieval and generation models. Models, code, etc. are freely available at https://github.com/ContextualAI/gritlm.",
|
||||||
|
};
|
||||||
|
|
||||||
|
// No need to add instruction for retrieval documents
|
||||||
|
const std::vector<std::vector<float>> d_rep = encode(ctx, documents, gritlm_instruction(""));
|
||||||
|
const std::vector<std::vector<float>> q_rep = encode(ctx, queries, gritlm_instruction(instruction));
|
||||||
|
|
||||||
|
const int n_embd = llama_n_embd(mdl);
|
||||||
|
|
||||||
|
const float cosine_sim_q0_d0 = llama_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd);
|
||||||
|
const float cosine_sim_q0_d1 = llama_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd);
|
||||||
|
const float cosine_sim_q1_d0 = llama_embd_similarity_cos(q_rep[1].data(), d_rep[0].data(), n_embd);
|
||||||
|
const float cosine_sim_q1_d1 = llama_embd_similarity_cos(q_rep[1].data(), d_rep[1].data(), n_embd);
|
||||||
|
|
||||||
|
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[0].c_str(), cosine_sim_q0_d0);
|
||||||
|
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[1].c_str(), cosine_sim_q0_d1);
|
||||||
|
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[1].c_str(), documents[0].c_str(), cosine_sim_q1_d0);
|
||||||
|
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[1].c_str(), documents[1].c_str(), cosine_sim_q1_d1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ### Generation ###
|
||||||
|
// GritLM models are not finetuned with system prompts, as you can just include system-like instructions together with your user instruction
|
||||||
|
{
|
||||||
|
const std::string prompt = "<|user|>\nPlease write me a poem about my recent hike of Mt. Fuji at midnight in the style of Shakespeare.\n<|assistant|>\n";
|
||||||
|
std::string response = generate(ctx, prompt, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_free(ctx);
|
||||||
|
llama_free_model(mdl);
|
||||||
|
llama_backend_free();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -56,13 +56,31 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
const struct ggml_tensor * src0 = t->src[0];
|
const struct ggml_tensor * src0 = t->src[0];
|
||||||
const struct ggml_tensor * src1 = t->src[1];
|
const struct ggml_tensor * src1 = t->src[1];
|
||||||
|
|
||||||
|
std::string wname;
|
||||||
|
{
|
||||||
|
// remove any prefix and suffixes from the name
|
||||||
|
// CUDA0#blk.0.attn_k.weight#0 => blk.0.attn_k.weight
|
||||||
|
const char * p = strchr(src0->name, '#');
|
||||||
|
if (p != NULL) {
|
||||||
|
p = p + 1;
|
||||||
|
const char * q = strchr(p, '#');
|
||||||
|
if (q != NULL) {
|
||||||
|
wname = std::string(p, q - p);
|
||||||
|
} else {
|
||||||
|
wname = p;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
wname = src0->name;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// when ask is true, the scheduler wants to know if we are interested in data from this tensor
|
// when ask is true, the scheduler wants to know if we are interested in data from this tensor
|
||||||
// if we return true, a follow-up call will be made with ask=false in which we can do the actual collection
|
// if we return true, a follow-up call will be made with ask=false in which we can do the actual collection
|
||||||
if (ask) {
|
if (ask) {
|
||||||
if (t->op == GGML_OP_MUL_MAT_ID) return true; // collect all indirect matrix multiplications
|
if (t->op == GGML_OP_MUL_MAT_ID) return true; // collect all indirect matrix multiplications
|
||||||
if (t->op != GGML_OP_MUL_MAT) return false;
|
if (t->op != GGML_OP_MUL_MAT) return false;
|
||||||
if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return false;
|
if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return false;
|
||||||
if (!(strncmp(src0->name, "blk.", 4) == 0 || (m_params.collect_output_weight && strcmp(src0->name, "output.weight") == 0))) return false;
|
if (!(wname.substr(0, 4) == "blk." || (m_params.collect_output_weight && wname == "output.weight"))) return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,12 +112,12 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
// this is necessary to guarantee equal number of "ncall" for each tensor
|
// this is necessary to guarantee equal number of "ncall" for each tensor
|
||||||
for (int ex = 0; ex < n_as; ++ex) {
|
for (int ex = 0; ex < n_as; ++ex) {
|
||||||
src0 = t->src[2 + ex];
|
src0 = t->src[2 + ex];
|
||||||
auto& e = m_stats[src0->name];
|
auto& e = m_stats[wname];
|
||||||
if (e.values.empty()) {
|
if (e.values.empty()) {
|
||||||
e.values.resize(src1->ne[0], 0);
|
e.values.resize(src1->ne[0], 0);
|
||||||
}
|
}
|
||||||
else if (e.values.size() != (size_t)src1->ne[0]) {
|
else if (e.values.size() != (size_t)src1->ne[0]) {
|
||||||
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", src0->name, (int)e.values.size(), (int)src1->ne[0]);
|
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
|
||||||
exit(1); //GGML_ASSERT(false);
|
exit(1); //GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
// NOTE: since we select top-k experts, the number of calls for the expert tensors will be k times larger
|
// NOTE: since we select top-k experts, the number of calls for the expert tensors will be k times larger
|
||||||
@ -107,7 +125,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
//if (idx == t->src[0]->ne[0] - 1) ++e.ncall;
|
//if (idx == t->src[0]->ne[0] - 1) ++e.ncall;
|
||||||
++e.ncall;
|
++e.ncall;
|
||||||
if (m_params.verbosity > 1) {
|
if (m_params.verbosity > 1) {
|
||||||
printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, src0->name, ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
|
printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, wname.c_str(), ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
|
||||||
}
|
}
|
||||||
for (int row = 0; row < (int)src1->ne[1]; ++row) {
|
for (int row = 0; row < (int)src1->ne[1]; ++row) {
|
||||||
const int excur = m_ids[row*n_as + idx];
|
const int excur = m_ids[row*n_as + idx];
|
||||||
@ -129,17 +147,17 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto& e = m_stats[src0->name];
|
auto& e = m_stats[wname];
|
||||||
if (e.values.empty()) {
|
if (e.values.empty()) {
|
||||||
e.values.resize(src1->ne[0], 0);
|
e.values.resize(src1->ne[0], 0);
|
||||||
}
|
}
|
||||||
else if (e.values.size() != (size_t)src1->ne[0]) {
|
else if (e.values.size() != (size_t)src1->ne[0]) {
|
||||||
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", src0->name, (int)e.values.size(), (int)src1->ne[0]);
|
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
|
||||||
exit(1); //GGML_ASSERT(false);
|
exit(1); //GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
++e.ncall;
|
++e.ncall;
|
||||||
if (m_params.verbosity > 1) {
|
if (m_params.verbosity > 1) {
|
||||||
printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, src0->name, ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
|
printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, wname.c_str(), ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
|
||||||
}
|
}
|
||||||
for (int row = 0; row < (int)src1->ne[1]; ++row) {
|
for (int row = 0; row < (int)src1->ne[1]; ++row) {
|
||||||
const float * x = data + row * src1->ne[0];
|
const float * x = data + row * src1->ne[0];
|
||||||
|
74
examples/json-schema-pydantic-example.py
Normal file
74
examples/json-schema-pydantic-example.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
# Usage:
|
||||||
|
#! ./server -m some-model.gguf &
|
||||||
|
#! pip install pydantic
|
||||||
|
#! python json-schema-pydantic-example.py
|
||||||
|
|
||||||
|
from pydantic import BaseModel, TypeAdapter
|
||||||
|
from annotated_types import MinLen
|
||||||
|
from typing import Annotated, List, Optional
|
||||||
|
import json, requests
|
||||||
|
|
||||||
|
if True:
|
||||||
|
|
||||||
|
def create_completion(*, response_model=None, endpoint="http://localhost:8080/v1/chat/completions", messages, **kwargs):
|
||||||
|
'''
|
||||||
|
Creates a chat completion using an OpenAI-compatible endpoint w/ JSON schema support
|
||||||
|
(llama.cpp server, llama-cpp-python, Anyscale / Together...)
|
||||||
|
|
||||||
|
The response_model param takes a type (+ supports Pydantic) and behaves just as w/ Instructor (see below)
|
||||||
|
'''
|
||||||
|
if response_model:
|
||||||
|
type_adapter = TypeAdapter(response_model)
|
||||||
|
schema = type_adapter.json_schema()
|
||||||
|
messages = [{
|
||||||
|
"role": "system",
|
||||||
|
"content": f"You respond in JSON format with the following schema: {json.dumps(schema, indent=2)}"
|
||||||
|
}] + messages
|
||||||
|
response_format={"type": "json_object", "schema": schema}
|
||||||
|
|
||||||
|
data = requests.post(endpoint, headers={"Content-Type": "application/json"},
|
||||||
|
json=dict(messages=messages, response_format=response_format, **kwargs)).json()
|
||||||
|
if 'error' in data:
|
||||||
|
raise Exception(data['error']['message'])
|
||||||
|
|
||||||
|
content = data["choices"][0]["message"]["content"]
|
||||||
|
return type_adapter.validate_json(content) if type_adapter else content
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
# This alternative branch uses Instructor + OpenAI client lib.
|
||||||
|
# Instructor support streamed iterable responses, retry & more.
|
||||||
|
# (see https://python.useinstructor.com/)
|
||||||
|
#! pip install instructor openai
|
||||||
|
import instructor, openai
|
||||||
|
client = instructor.patch(
|
||||||
|
openai.OpenAI(api_key="123", base_url="http://localhost:8080"),
|
||||||
|
mode=instructor.Mode.JSON_SCHEMA)
|
||||||
|
create_completion = client.chat.completions.create
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
|
||||||
|
class QAPair(BaseModel):
|
||||||
|
question: str
|
||||||
|
concise_answer: str
|
||||||
|
justification: str
|
||||||
|
|
||||||
|
class PyramidalSummary(BaseModel):
|
||||||
|
title: str
|
||||||
|
summary: str
|
||||||
|
question_answers: Annotated[List[QAPair], MinLen(2)]
|
||||||
|
sub_sections: Optional[Annotated[List['PyramidalSummary'], MinLen(2)]]
|
||||||
|
|
||||||
|
print("# Summary\n", create_completion(
|
||||||
|
model="...",
|
||||||
|
response_model=PyramidalSummary,
|
||||||
|
messages=[{
|
||||||
|
"role": "user",
|
||||||
|
"content": f"""
|
||||||
|
You are a highly efficient corporate document summarizer.
|
||||||
|
Create a pyramidal summary of an imaginary internal document about our company processes
|
||||||
|
(starting high-level, going down to each sub sections).
|
||||||
|
Keep questions short, and answers even shorter (trivia / quizz style).
|
||||||
|
"""
|
||||||
|
}]))
|
@ -1,8 +1,10 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import argparse
|
import argparse
|
||||||
|
import itertools
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
from typing import Any, Dict, List, Set, Tuple, Union
|
||||||
|
|
||||||
# whitespace is constrained to a single space char to prevent model "running away" in
|
# whitespace is constrained to a single space char to prevent model "running away" in
|
||||||
# whitespace. Also maybe improves generation quality?
|
# whitespace. Also maybe improves generation quality?
|
||||||
@ -12,26 +14,54 @@ PRIMITIVE_RULES = {
|
|||||||
'boolean': '("true" | "false") space',
|
'boolean': '("true" | "false") space',
|
||||||
'number': '("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? space',
|
'number': '("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? space',
|
||||||
'integer': '("-"? ([0-9] | [1-9] [0-9]*)) space',
|
'integer': '("-"? ([0-9] | [1-9] [0-9]*)) space',
|
||||||
|
'value' : 'object | array | string | number | boolean',
|
||||||
|
'object' : '"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space',
|
||||||
|
'array' : '"[" space ( value ("," space value)* )? "]" space',
|
||||||
|
'uuid' : '"\\"" ' + ' "-" '.join('[0-9a-fA-F]' * n for n in [8, 4, 4, 4, 12]) + ' "\\"" space',
|
||||||
'string': r''' "\"" (
|
'string': r''' "\"" (
|
||||||
[^"\\] |
|
[^"\\] |
|
||||||
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F])
|
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F])
|
||||||
)* "\"" space ''',
|
)* "\"" space''',
|
||||||
'null': '"null" space',
|
'null': '"null" space',
|
||||||
}
|
}
|
||||||
|
OBJECT_RULE_NAMES = ['object', 'array', 'string', 'number', 'boolean', 'null', 'value']
|
||||||
|
|
||||||
|
# TODO: support "uri", "email" string formats
|
||||||
|
DATE_RULES = {
|
||||||
|
'date' : '[0-9] [0-9] [0-9] [0-9] "-" ( "0" [1-9] | "1" [0-2] ) "-" ( \"0\" [1-9] | [1-2] [0-9] | "3" [0-1] )',
|
||||||
|
'time' : '([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9] [0-9] [0-9] )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )',
|
||||||
|
'date-time': 'date "T" time',
|
||||||
|
'date-string': '"\\"" date "\\"" space',
|
||||||
|
'time-string': '"\\"" time "\\"" space',
|
||||||
|
'date-time-string': '"\\"" date-time "\\"" space',
|
||||||
|
}
|
||||||
|
|
||||||
|
RESERVED_NAMES = set(["root", *PRIMITIVE_RULES.keys(), *DATE_RULES.keys()])
|
||||||
|
|
||||||
INVALID_RULE_CHARS_RE = re.compile(r'[^a-zA-Z0-9-]+')
|
INVALID_RULE_CHARS_RE = re.compile(r'[^a-zA-Z0-9-]+')
|
||||||
GRAMMAR_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"]')
|
GRAMMAR_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"]')
|
||||||
GRAMMAR_LITERAL_ESCAPES = {'\r': '\\r', '\n': '\\n', '"': '\\"'}
|
GRAMMAR_RANGE_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"\]\-\\]')
|
||||||
|
GRAMMAR_LITERAL_ESCAPES = {'\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]'}
|
||||||
|
|
||||||
|
NON_LITERAL_SET = set('|.()[]{}*+?')
|
||||||
|
ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = set('[]()|{}*+?')
|
||||||
|
|
||||||
|
DATE_PATTERN = '[0-9]{4}-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1])'
|
||||||
|
TIME_PATTERN = '([01][0-9]|2[0-3])(:[0-5][0-9]){2}(\\.[0-9]{1,3})?(Z|[+-](([01][0-9]|2[0-3]):[0-5][0-9]))' # Cap millisecond precision w/ 3 digits
|
||||||
|
|
||||||
class SchemaConverter:
|
class SchemaConverter:
|
||||||
def __init__(self, prop_order):
|
def __init__(self, *, prop_order, allow_fetch, dotall, raw_pattern):
|
||||||
self._prop_order = prop_order
|
self._prop_order = prop_order
|
||||||
|
self._allow_fetch = allow_fetch
|
||||||
|
self._dotall = dotall
|
||||||
|
self._raw_pattern = raw_pattern
|
||||||
self._rules = {'space': SPACE_RULE}
|
self._rules = {'space': SPACE_RULE}
|
||||||
|
self._refs = {}
|
||||||
|
self._refs_being_resolved = set()
|
||||||
|
|
||||||
def _format_literal(self, literal):
|
def _format_literal(self, literal):
|
||||||
escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub(
|
escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub(
|
||||||
lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)), json.dumps(literal)
|
lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)), literal
|
||||||
)
|
)
|
||||||
return f'"{escaped}"'
|
return f'"{escaped}"'
|
||||||
|
|
||||||
@ -41,78 +71,420 @@ class SchemaConverter:
|
|||||||
key = esc_name
|
key = esc_name
|
||||||
else:
|
else:
|
||||||
i = 0
|
i = 0
|
||||||
while f'{esc_name}{i}' in self._rules:
|
while f'{esc_name}{i}' in self._rules and self._rules[f'{esc_name}{i}'] != rule:
|
||||||
i += 1
|
i += 1
|
||||||
key = f'{esc_name}{i}'
|
key = f'{esc_name}{i}'
|
||||||
self._rules[key] = rule
|
self._rules[key] = rule
|
||||||
return key
|
return key
|
||||||
|
|
||||||
|
def resolve_refs(self, schema: dict, url: str):
|
||||||
|
'''
|
||||||
|
Resolves all $ref fields in the given schema, fetching any remote schemas,
|
||||||
|
replacing $ref with absolute reference URL and populating self._refs with the
|
||||||
|
respective referenced (sub)schema dictionaries.
|
||||||
|
'''
|
||||||
|
def visit(n: dict):
|
||||||
|
if isinstance(n, list):
|
||||||
|
return [visit(x) for x in n]
|
||||||
|
elif isinstance(n, dict):
|
||||||
|
ref = n.get('$ref')
|
||||||
|
if ref is not None and ref not in self._refs:
|
||||||
|
if ref.startswith('https://'):
|
||||||
|
assert self._allow_fetch, 'Fetching remote schemas is not allowed (use --allow-fetch for force)'
|
||||||
|
import requests
|
||||||
|
|
||||||
|
frag_split = ref.split('#')
|
||||||
|
base_url = frag_split[0]
|
||||||
|
|
||||||
|
target = self._refs.get(base_url)
|
||||||
|
if target is None:
|
||||||
|
target = self.resolve_refs(requests.get(ref).json(), base_url)
|
||||||
|
self._refs[base_url] = target
|
||||||
|
|
||||||
|
if len(frag_split) == 1 or frag_split[-1] == '':
|
||||||
|
return target
|
||||||
|
elif ref.startswith('#/'):
|
||||||
|
target = schema
|
||||||
|
ref = f'{url}{ref}'
|
||||||
|
n['$ref'] = ref
|
||||||
|
else:
|
||||||
|
raise ValueError(f'Unsupported ref {ref}')
|
||||||
|
|
||||||
|
for sel in ref.split('#')[-1].split('/')[1:]:
|
||||||
|
assert target is not None and sel in target, f'Error resolving ref {ref}: {sel} not in {target}'
|
||||||
|
target = target[sel]
|
||||||
|
|
||||||
|
self._refs[ref] = target
|
||||||
|
else:
|
||||||
|
for v in n.values():
|
||||||
|
visit(v)
|
||||||
|
|
||||||
|
return n
|
||||||
|
return visit(schema)
|
||||||
|
|
||||||
|
def _generate_union_rule(self, name, alt_schemas):
|
||||||
|
return ' | '.join((
|
||||||
|
self.visit(alt_schema, f'{name}{"-" if name else "alternative-"}{i}')
|
||||||
|
for i, alt_schema in enumerate(alt_schemas)
|
||||||
|
))
|
||||||
|
|
||||||
|
def _visit_pattern(self, pattern, name):
|
||||||
|
'''
|
||||||
|
Transforms a regular expression pattern into a GBNF rule.
|
||||||
|
|
||||||
|
Input: https://json-schema.org/understanding-json-schema/reference/regular_expressions
|
||||||
|
Output: https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md
|
||||||
|
|
||||||
|
Unsupported features: negative/positive lookaheads, greedy/non-greedy modifiers.
|
||||||
|
|
||||||
|
Mostly a 1:1 translation, except for {x} / {x,} / {x,y} quantifiers for which
|
||||||
|
we define sub-rules to keep the output lean.
|
||||||
|
'''
|
||||||
|
|
||||||
|
assert pattern.startswith('^') and pattern.endswith('$'), 'Pattern must start with "^" and end with "$"'
|
||||||
|
pattern = pattern[1:-1]
|
||||||
|
sub_rule_ids = {}
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
length = len(pattern)
|
||||||
|
|
||||||
|
def to_rule(s: Tuple[str, bool]) -> str:
|
||||||
|
(txt, is_literal) = s
|
||||||
|
return "\"" + txt + "\"" if is_literal else txt
|
||||||
|
|
||||||
|
def transform() -> Tuple[str, bool]:
|
||||||
|
'''
|
||||||
|
Parse a unit at index i (advancing it), and return its string representation + whether it's a literal.
|
||||||
|
'''
|
||||||
|
nonlocal i
|
||||||
|
nonlocal pattern
|
||||||
|
nonlocal sub_rule_ids
|
||||||
|
|
||||||
|
start = i
|
||||||
|
# For each component of this sequence, store its string representation and whether it's a literal.
|
||||||
|
# We only need a flat structure here to apply repetition operators to the last item, and
|
||||||
|
# to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially
|
||||||
|
# (GBNF's syntax is luckily very close to regular expressions!)
|
||||||
|
seq: list[Tuple[str, bool]] = []
|
||||||
|
|
||||||
|
def get_dot():
|
||||||
|
if self._dotall:
|
||||||
|
rule = '[\\U00000000-\\U0010FFFF]'
|
||||||
|
else:
|
||||||
|
# Accept any character... except \n and \r line break chars (\x0A and \xOD)
|
||||||
|
rule = '[\\U00000000-\\x09\\x0B\\x0C\\x0E-\\U0010FFFF]'
|
||||||
|
return self._add_rule(f'dot', rule)
|
||||||
|
|
||||||
|
def join_seq():
|
||||||
|
nonlocal seq
|
||||||
|
ret = []
|
||||||
|
for is_literal, g in itertools.groupby(seq, lambda x: x[1]):
|
||||||
|
if is_literal:
|
||||||
|
ret.append((''.join(x[0] for x in g), True))
|
||||||
|
else:
|
||||||
|
ret.extend(g)
|
||||||
|
if len(ret) == 1:
|
||||||
|
return ret[0]
|
||||||
|
return (' '.join(to_rule(x) for x in seq), False)
|
||||||
|
|
||||||
|
while i < length:
|
||||||
|
c = pattern[i]
|
||||||
|
if c == '.':
|
||||||
|
seq.append((get_dot(), False))
|
||||||
|
i += 1
|
||||||
|
elif c == '(':
|
||||||
|
i += 1
|
||||||
|
if i < length:
|
||||||
|
assert pattern[i] != '?', f'Unsupported pattern syntax "{pattern[i]}" at index {i} of /{pattern}/'
|
||||||
|
seq.append((f'({to_rule(transform())})', False))
|
||||||
|
elif c == ')':
|
||||||
|
i += 1
|
||||||
|
assert start > 0 and pattern[start-1] == '(', f'Unbalanced parentheses; start = {start}, i = {i}, pattern = {pattern}'
|
||||||
|
return join_seq()
|
||||||
|
elif c == '[':
|
||||||
|
square_brackets = c
|
||||||
|
i += 1
|
||||||
|
while i < length and pattern[i] != ']':
|
||||||
|
if pattern[i] == '\\':
|
||||||
|
square_brackets += pattern[i:i+2]
|
||||||
|
i += 2
|
||||||
|
else:
|
||||||
|
square_brackets += pattern[i]
|
||||||
|
i += 1
|
||||||
|
assert i < length, f'Unbalanced square brackets; start = {start}, i = {i}, pattern = {pattern}'
|
||||||
|
square_brackets += ']'
|
||||||
|
i += 1
|
||||||
|
seq.append((square_brackets, False))
|
||||||
|
elif c == '|':
|
||||||
|
seq.append(('|', False))
|
||||||
|
i += 1
|
||||||
|
elif c in ('*', '+', '?'):
|
||||||
|
seq[-1] = (to_rule(seq[-1]) + c, False)
|
||||||
|
i += 1
|
||||||
|
elif c == '{':
|
||||||
|
curly_brackets = c
|
||||||
|
i += 1
|
||||||
|
while i < length and pattern[i] != '}':
|
||||||
|
curly_brackets += pattern[i]
|
||||||
|
i += 1
|
||||||
|
assert i < length, f'Unbalanced curly brackets; start = {start}, i = {i}, pattern = {pattern}'
|
||||||
|
curly_brackets += '}'
|
||||||
|
i += 1
|
||||||
|
nums = [s.strip() for s in curly_brackets[1:-1].split(',')]
|
||||||
|
min_times = 0
|
||||||
|
max_times = None
|
||||||
|
try:
|
||||||
|
if len(nums) == 1:
|
||||||
|
min_times = int(nums[0])
|
||||||
|
max_times = min_times
|
||||||
|
else:
|
||||||
|
assert len(nums) == 2
|
||||||
|
min_times = int(nums[0]) if nums[0] else 0
|
||||||
|
max_times = int(nums[1]) if nums[1] else None
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(f'Invalid quantifier {curly_brackets} in /{pattern}/')
|
||||||
|
|
||||||
|
(sub, sub_is_literal) = seq[-1]
|
||||||
|
|
||||||
|
if min_times == 0 and max_times is None:
|
||||||
|
seq[-1] = (f'{sub}*', False)
|
||||||
|
elif min_times == 0 and max_times == 1:
|
||||||
|
seq[-1] = (f'{sub}?', False)
|
||||||
|
elif min_times == 1 and max_times is None:
|
||||||
|
seq[-1] = (f'{sub}+', False)
|
||||||
|
else:
|
||||||
|
if not sub_is_literal:
|
||||||
|
id = sub_rule_ids.get(sub)
|
||||||
|
if id is None:
|
||||||
|
id = self._add_rule(f'{name}-{len(sub_rule_ids) + 1}', sub)
|
||||||
|
sub_rule_ids[sub] = id
|
||||||
|
sub = id
|
||||||
|
|
||||||
|
seq[-1] = (
|
||||||
|
' '.join(
|
||||||
|
([f'"{sub[1:-1] * min_times}"'] if sub_is_literal else [sub] * min_times) +
|
||||||
|
([f'{sub}?'] * (max_times - min_times) if max_times is not None else [f'{sub}*'])),
|
||||||
|
False
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
literal = ''
|
||||||
|
while i < length:
|
||||||
|
if pattern[i] == '\\' and i < length - 1:
|
||||||
|
next = pattern[i + 1]
|
||||||
|
if next in ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS:
|
||||||
|
i += 1
|
||||||
|
literal += pattern[i]
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
literal += pattern[i:i+2]
|
||||||
|
i += 2
|
||||||
|
elif pattern[i] == '"' and not self._raw_pattern:
|
||||||
|
literal += '\\"'
|
||||||
|
i += 1
|
||||||
|
elif pattern[i] not in NON_LITERAL_SET and \
|
||||||
|
(i == length - 1 or literal == '' or pattern[i+1] == '.' or pattern[i+1] not in NON_LITERAL_SET):
|
||||||
|
literal += pattern[i]
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
if literal:
|
||||||
|
seq.append((literal, True))
|
||||||
|
|
||||||
|
return join_seq()
|
||||||
|
|
||||||
|
return self._add_rule(
|
||||||
|
name,
|
||||||
|
to_rule(transform()) if self._raw_pattern \
|
||||||
|
else "\"\\\"\" " + to_rule(transform()) + " \"\\\"\" space")
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_ref(self, ref):
|
||||||
|
ref_name = ref.split('/')[-1]
|
||||||
|
if ref_name not in self._rules and ref not in self._refs_being_resolved:
|
||||||
|
self._refs_being_resolved.add(ref)
|
||||||
|
resolved = self._refs[ref]
|
||||||
|
ref_name = self.visit(resolved, ref_name)
|
||||||
|
self._refs_being_resolved.remove(ref)
|
||||||
|
return ref_name
|
||||||
|
|
||||||
|
def _generate_constant_rule(self, value):
|
||||||
|
return self._format_literal(json.dumps(value))
|
||||||
|
|
||||||
def visit(self, schema, name):
|
def visit(self, schema, name):
|
||||||
schema_type = schema.get('type')
|
schema_type = schema.get('type')
|
||||||
rule_name = name or 'root'
|
schema_format = schema.get('format')
|
||||||
|
rule_name = name + '-' if name in RESERVED_NAMES else name or 'root'
|
||||||
|
|
||||||
if 'oneOf' in schema or 'anyOf' in schema:
|
if (ref := schema.get('$ref')) is not None:
|
||||||
rule = ' | '.join((
|
return self._add_rule(rule_name, self._resolve_ref(ref))
|
||||||
self.visit(alt_schema, f'{name}{"-" if name else ""}{i}')
|
|
||||||
for i, alt_schema in enumerate(schema.get('oneOf') or schema['anyOf'])
|
elif 'oneOf' in schema or 'anyOf' in schema:
|
||||||
))
|
return self._add_rule(rule_name, self._generate_union_rule(name, schema.get('oneOf') or schema['anyOf']))
|
||||||
return self._add_rule(rule_name, rule)
|
|
||||||
|
elif isinstance(schema_type, list):
|
||||||
|
return self._add_rule(rule_name, self._generate_union_rule(name, [{'type': t} for t in schema_type]))
|
||||||
|
|
||||||
elif 'const' in schema:
|
elif 'const' in schema:
|
||||||
return self._add_rule(rule_name, self._format_literal(schema['const']))
|
return self._add_rule(rule_name, self._generate_constant_rule(schema['const']))
|
||||||
|
|
||||||
elif 'enum' in schema:
|
elif 'enum' in schema:
|
||||||
rule = ' | '.join((self._format_literal(v) for v in schema['enum']))
|
rule = ' | '.join((self._generate_constant_rule(v) for v in schema['enum']))
|
||||||
return self._add_rule(rule_name, rule)
|
return self._add_rule(rule_name, rule)
|
||||||
|
|
||||||
elif schema_type == 'object' and 'properties' in schema:
|
elif schema_type in (None, 'object') and \
|
||||||
# TODO: `required` keyword
|
('properties' in schema or \
|
||||||
prop_order = self._prop_order
|
('additionalProperties' in schema and schema['additionalProperties'] is not True)):
|
||||||
prop_pairs = sorted(
|
required = set(schema.get('required', []))
|
||||||
schema['properties'].items(),
|
properties = list(schema.get('properties', {}).items())
|
||||||
# sort by position in prop_order (if specified) then by key
|
return self._add_rule(rule_name, self._build_object_rule(properties, required, name, schema.get('additionalProperties')))
|
||||||
key=lambda kv: (prop_order.get(kv[0], len(prop_order)), kv[0]),
|
|
||||||
)
|
|
||||||
|
|
||||||
rule = '"{" space'
|
elif schema_type in (None, 'object') and 'allOf' in schema:
|
||||||
for i, (prop_name, prop_schema) in enumerate(prop_pairs):
|
required = set()
|
||||||
prop_rule_name = self.visit(prop_schema, f'{name}{"-" if name else ""}{prop_name}')
|
properties = []
|
||||||
if i > 0:
|
hybrid_name = name
|
||||||
rule += ' "," space'
|
def add_component(comp_schema, is_required):
|
||||||
rule += fr' {self._format_literal(prop_name)} space ":" space {prop_rule_name}'
|
if (ref := comp_schema.get('$ref')) is not None:
|
||||||
rule += ' "}" space'
|
comp_schema = self._refs[ref]
|
||||||
|
|
||||||
return self._add_rule(rule_name, rule)
|
if 'properties' in comp_schema:
|
||||||
|
for prop_name, prop_schema in comp_schema['properties'].items():
|
||||||
|
properties.append((prop_name, prop_schema))
|
||||||
|
if is_required:
|
||||||
|
required.add(prop_name)
|
||||||
|
|
||||||
elif schema_type == 'array' and 'items' in schema:
|
for t in schema['allOf']:
|
||||||
# TODO `prefixItems` keyword
|
if 'anyOf' in t:
|
||||||
item_rule_name = self.visit(schema['items'], f'{name}{"-" if name else ""}item')
|
for tt in t['anyOf']:
|
||||||
list_item_operator = f'("," space {item_rule_name})'
|
add_component(tt, is_required=False)
|
||||||
|
else:
|
||||||
|
add_component(t, is_required=True)
|
||||||
|
|
||||||
|
return self._add_rule(rule_name, self._build_object_rule(properties, required, hybrid_name, additional_properties=[]))
|
||||||
|
|
||||||
|
elif schema_type in (None, 'array') and ('items' in schema or 'prefixItems' in schema):
|
||||||
|
items = schema.get('items') or schema['prefixItems']
|
||||||
|
if isinstance(items, list):
|
||||||
|
return self._add_rule(
|
||||||
|
rule_name,
|
||||||
|
'"[" space ' +
|
||||||
|
' "," space '.join(
|
||||||
|
self.visit(item, f'{name}{"-" if name else ""}tuple-{i}')
|
||||||
|
for i, item in enumerate(items)) +
|
||||||
|
' "]" space')
|
||||||
|
else:
|
||||||
|
item_rule_name = self.visit(items, f'{name}{"-" if name else ""}item')
|
||||||
|
list_item_operator = f'( "," space {item_rule_name} )'
|
||||||
successive_items = ""
|
successive_items = ""
|
||||||
min_items = schema.get("minItems", 0)
|
min_items = schema.get("minItems", 0)
|
||||||
|
max_items = schema.get("maxItems")
|
||||||
if min_items > 0:
|
if min_items > 0:
|
||||||
first_item = f"({item_rule_name})"
|
|
||||||
successive_items = list_item_operator * (min_items - 1)
|
successive_items = list_item_operator * (min_items - 1)
|
||||||
min_items -= 1
|
min_items -= 1
|
||||||
else:
|
|
||||||
first_item = f"({item_rule_name})?"
|
|
||||||
max_items = schema.get("maxItems")
|
|
||||||
if max_items is not None and max_items > min_items:
|
if max_items is not None and max_items > min_items:
|
||||||
successive_items += (list_item_operator + "?") * (max_items - min_items - 1)
|
successive_items += (list_item_operator + "?") * (max_items - min_items - 1)
|
||||||
else:
|
else:
|
||||||
successive_items += list_item_operator + "*"
|
successive_items += list_item_operator + "*"
|
||||||
rule = f'"[" space {first_item} {successive_items} "]" space'
|
if min_items == 0:
|
||||||
|
rule = f'"[" space ( {item_rule_name} {successive_items} )? "]" space'
|
||||||
|
else:
|
||||||
|
rule = f'"[" space {item_rule_name} {successive_items} "]" space'
|
||||||
return self._add_rule(rule_name, rule)
|
return self._add_rule(rule_name, rule)
|
||||||
|
|
||||||
|
elif schema_type in (None, 'string') and 'pattern' in schema:
|
||||||
|
return self._visit_pattern(schema['pattern'], rule_name)
|
||||||
|
|
||||||
|
elif schema_type in (None, 'string') and re.match(r'^uuid[1-5]?$', schema_format or ''):
|
||||||
|
return self._add_rule(
|
||||||
|
'root' if rule_name == 'root' else schema_format,
|
||||||
|
PRIMITIVE_RULES['uuid']
|
||||||
|
)
|
||||||
|
|
||||||
|
elif schema_type in (None, 'string') and schema_format in DATE_RULES:
|
||||||
|
for t, r in DATE_RULES.items():
|
||||||
|
self._add_rule(t, r)
|
||||||
|
return schema_format + '-string'
|
||||||
|
|
||||||
|
elif (schema_type == 'object') or (len(schema) == 0):
|
||||||
|
for n in OBJECT_RULE_NAMES:
|
||||||
|
self._add_rule(n, PRIMITIVE_RULES[n])
|
||||||
|
return self._add_rule(rule_name, 'object')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
assert schema_type in PRIMITIVE_RULES, f'Unrecognized schema: {schema}'
|
assert schema_type in PRIMITIVE_RULES, f'Unrecognized schema: {schema}'
|
||||||
|
# TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero
|
||||||
return self._add_rule(
|
return self._add_rule(
|
||||||
'root' if rule_name == 'root' else schema_type,
|
'root' if rule_name == 'root' else schema_type,
|
||||||
PRIMITIVE_RULES[schema_type]
|
PRIMITIVE_RULES[schema_type]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _build_object_rule(self, properties: List[Tuple[str, Any]], required: Set[str], name: str, additional_properties: Union[bool, Any]):
|
||||||
|
prop_order = self._prop_order
|
||||||
|
# sort by position in prop_order (if specified) then by original order
|
||||||
|
sorted_props = [kv[0] for _, kv in sorted(enumerate(properties), key=lambda ikv: (prop_order.get(ikv[1][0], len(prop_order)), ikv[0]))]
|
||||||
|
|
||||||
|
prop_kv_rule_names = {}
|
||||||
|
for prop_name, prop_schema in properties:
|
||||||
|
prop_rule_name = self.visit(prop_schema, f'{name}{"-" if name else ""}{prop_name}')
|
||||||
|
prop_kv_rule_names[prop_name] = self._add_rule(
|
||||||
|
f'{name}{"-" if name else ""}{prop_name}-kv',
|
||||||
|
fr'{self._format_literal(json.dumps(prop_name))} space ":" space {prop_rule_name}'
|
||||||
|
)
|
||||||
|
required_props = [k for k in sorted_props if k in required]
|
||||||
|
optional_props = [k for k in sorted_props if k not in required]
|
||||||
|
|
||||||
|
if additional_properties == True or isinstance(additional_properties, dict):
|
||||||
|
sub_name = f'{name}{"-" if name else ""}additional'
|
||||||
|
value_rule = self.visit({} if additional_properties == True else additional_properties, f'{sub_name}-value')
|
||||||
|
prop_kv_rule_names["*"] = self._add_rule(
|
||||||
|
f'{sub_name}-kv',
|
||||||
|
self._add_rule('string', PRIMITIVE_RULES['string']) + f' ":" space {value_rule}'
|
||||||
|
)
|
||||||
|
optional_props.append("*")
|
||||||
|
|
||||||
|
rule = '"{" space '
|
||||||
|
rule += ' "," space '.join(prop_kv_rule_names[k] for k in required_props)
|
||||||
|
|
||||||
|
if optional_props:
|
||||||
|
rule += ' ('
|
||||||
|
if required_props:
|
||||||
|
rule += ' "," space ( '
|
||||||
|
|
||||||
|
def get_recursive_refs(ks, first_is_optional):
|
||||||
|
[k, *rest] = ks
|
||||||
|
kv_rule_name = prop_kv_rule_names[k]
|
||||||
|
if k == '*':
|
||||||
|
res = self._add_rule(
|
||||||
|
f'{name}{"-" if name else ""}additional-kvs',
|
||||||
|
f'{kv_rule_name} ( "," space ' + kv_rule_name + ' )*'
|
||||||
|
)
|
||||||
|
elif first_is_optional:
|
||||||
|
res = f'( "," space {kv_rule_name} )?'
|
||||||
|
else:
|
||||||
|
res = kv_rule_name
|
||||||
|
if len(rest) > 0:
|
||||||
|
res += ' ' + self._add_rule(
|
||||||
|
f'{name}{"-" if name else ""}{k}-rest',
|
||||||
|
get_recursive_refs(rest, first_is_optional=True)
|
||||||
|
)
|
||||||
|
return res
|
||||||
|
|
||||||
|
rule += ' | '.join(
|
||||||
|
get_recursive_refs(optional_props[i:], first_is_optional=False)
|
||||||
|
for i in range(len(optional_props))
|
||||||
|
)
|
||||||
|
if required_props:
|
||||||
|
rule += ' )'
|
||||||
|
rule += ' )?'
|
||||||
|
|
||||||
|
rule += ' "}" space'
|
||||||
|
|
||||||
|
return rule
|
||||||
|
|
||||||
def format_grammar(self):
|
def format_grammar(self):
|
||||||
return '\n'.join((f'{name} ::= {rule}' for name, rule in self._rules.items()))
|
return '\n'.join(
|
||||||
|
f'{name} ::= {rule}'
|
||||||
|
for name, rule in sorted(self._rules.items(), key=lambda kv: kv[0])
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main(args_in = None):
|
def main(args_in = None):
|
||||||
@ -129,16 +501,47 @@ def main(args_in = None):
|
|||||||
type=lambda s: s.split(','),
|
type=lambda s: s.split(','),
|
||||||
help='''
|
help='''
|
||||||
comma-separated property names defining the order of precedence for object properties;
|
comma-separated property names defining the order of precedence for object properties;
|
||||||
properties not specified here are given lower precedence than those that are, and are
|
properties not specified here are given lower precedence than those that are, and
|
||||||
sorted alphabetically
|
are kept in their original order from the schema. Required properties are always
|
||||||
|
given precedence over optional properties.
|
||||||
'''
|
'''
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--allow-fetch',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Whether to allow fetching referenced schemas over HTTPS')
|
||||||
|
parser.add_argument(
|
||||||
|
'--dotall',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Whether to treat dot (".") as matching all chars including line breaks in regular expression patterns')
|
||||||
|
parser.add_argument(
|
||||||
|
'--raw-pattern',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Treats string patterns as raw patterns w/o quotes (or quote escapes)')
|
||||||
|
|
||||||
parser.add_argument('schema', help='file containing JSON schema ("-" for stdin)')
|
parser.add_argument('schema', help='file containing JSON schema ("-" for stdin)')
|
||||||
args = parser.parse_args(args_in)
|
args = parser.parse_args(args_in)
|
||||||
|
|
||||||
schema = json.load(sys.stdin if args.schema == '-' else open(args.schema))
|
if args.schema.startswith('https://'):
|
||||||
prop_order = {name: idx for idx, name in enumerate(args.prop_order)}
|
url = args.schema
|
||||||
converter = SchemaConverter(prop_order)
|
import requests
|
||||||
|
schema = requests.get(url).json()
|
||||||
|
elif args.schema == '-':
|
||||||
|
url = 'stdin'
|
||||||
|
schema = json.load(sys.stdin)
|
||||||
|
else:
|
||||||
|
url = f'file://{args.schema}'
|
||||||
|
with open(args.schema) as f:
|
||||||
|
schema = json.load(f)
|
||||||
|
converter = SchemaConverter(
|
||||||
|
prop_order={name: idx for idx, name in enumerate(args.prop_order)},
|
||||||
|
allow_fetch=args.allow_fetch,
|
||||||
|
dotall=args.dotall,
|
||||||
|
raw_pattern=args.raw_pattern)
|
||||||
|
schema = converter.resolve_refs(schema, url)
|
||||||
converter.visit(schema, '')
|
converter.visit(schema, '')
|
||||||
print(converter.format_grammar())
|
print(converter.format_grammar())
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
|
#include <cstdlib>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
@ -103,6 +104,7 @@ static std::string get_cpu_info() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fclose(f);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
// TODO: other platforms
|
// TODO: other platforms
|
||||||
@ -112,10 +114,10 @@ static std::string get_cpu_info() {
|
|||||||
static std::string get_gpu_info() {
|
static std::string get_gpu_info() {
|
||||||
std::string id;
|
std::string id;
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUBLAS
|
||||||
int count = ggml_cuda_get_device_count();
|
int count = ggml_backend_cuda_get_device_count();
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
char buf[128];
|
char buf[128];
|
||||||
ggml_cuda_get_device_description(i, buf, sizeof(buf));
|
ggml_backend_cuda_get_device_description(i, buf, sizeof(buf));
|
||||||
id += buf;
|
id += buf;
|
||||||
if (i < count - 1) {
|
if (i < count - 1) {
|
||||||
id += "/";
|
id += "/";
|
||||||
@ -164,6 +166,7 @@ struct cmd_params {
|
|||||||
std::vector<int> n_prompt;
|
std::vector<int> n_prompt;
|
||||||
std::vector<int> n_gen;
|
std::vector<int> n_gen;
|
||||||
std::vector<int> n_batch;
|
std::vector<int> n_batch;
|
||||||
|
std::vector<int> n_ubatch;
|
||||||
std::vector<ggml_type> type_k;
|
std::vector<ggml_type> type_k;
|
||||||
std::vector<ggml_type> type_v;
|
std::vector<ggml_type> type_v;
|
||||||
std::vector<int> n_threads;
|
std::vector<int> n_threads;
|
||||||
@ -173,6 +176,7 @@ struct cmd_params {
|
|||||||
std::vector<bool> no_kv_offload;
|
std::vector<bool> no_kv_offload;
|
||||||
std::vector<std::vector<float>> tensor_split;
|
std::vector<std::vector<float>> tensor_split;
|
||||||
std::vector<bool> use_mmap;
|
std::vector<bool> use_mmap;
|
||||||
|
std::vector<bool> embeddings;
|
||||||
int reps;
|
int reps;
|
||||||
bool verbose;
|
bool verbose;
|
||||||
output_formats output_format;
|
output_formats output_format;
|
||||||
@ -182,7 +186,8 @@ static const cmd_params cmd_params_defaults = {
|
|||||||
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
||||||
/* n_prompt */ {512},
|
/* n_prompt */ {512},
|
||||||
/* n_gen */ {128},
|
/* n_gen */ {128},
|
||||||
/* n_batch */ {512},
|
/* n_batch */ {2048},
|
||||||
|
/* n_ubatch */ {512},
|
||||||
/* type_k */ {GGML_TYPE_F16},
|
/* type_k */ {GGML_TYPE_F16},
|
||||||
/* type_v */ {GGML_TYPE_F16},
|
/* type_v */ {GGML_TYPE_F16},
|
||||||
/* n_threads */ {get_num_physical_cores()},
|
/* n_threads */ {get_num_physical_cores()},
|
||||||
@ -192,6 +197,7 @@ static const cmd_params cmd_params_defaults = {
|
|||||||
/* no_kv_offload */ {false},
|
/* no_kv_offload */ {false},
|
||||||
/* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
|
/* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
|
||||||
/* use_mmap */ {true},
|
/* use_mmap */ {true},
|
||||||
|
/* embeddings */ {false},
|
||||||
/* reps */ 5,
|
/* reps */ 5,
|
||||||
/* verbose */ false,
|
/* verbose */ false,
|
||||||
/* output_format */ MARKDOWN
|
/* output_format */ MARKDOWN
|
||||||
@ -206,6 +212,7 @@ static void print_usage(int /* argc */, char ** argv) {
|
|||||||
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
||||||
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
||||||
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
||||||
|
printf(" -ub N, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
|
||||||
printf(" -ctk <t>, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
printf(" -ctk <t>, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
||||||
printf(" -ctv <t>, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
printf(" -ctv <t>, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
||||||
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
||||||
@ -214,7 +221,8 @@ static void print_usage(int /* argc */, char ** argv) {
|
|||||||
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
||||||
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
||||||
printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str());
|
printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str());
|
||||||
printf(" -ts, --tensor_split <ts0/ts1/..> (default: 0)\n");
|
printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str());
|
||||||
|
printf(" -ts, --tensor-split <ts0/ts1/..> (default: 0)\n");
|
||||||
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
||||||
printf(" -o, --output <csv|json|md|sql> (default: %s)\n", output_format_str(cmd_params_defaults.output_format));
|
printf(" -o, --output <csv|json|md|sql> (default: %s)\n", output_format_str(cmd_params_defaults.output_format));
|
||||||
printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0");
|
printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0");
|
||||||
@ -241,6 +249,9 @@ static ggml_type ggml_type_from_name(const std::string & s) {
|
|||||||
if (s == "q5_1") {
|
if (s == "q5_1") {
|
||||||
return GGML_TYPE_Q5_1;
|
return GGML_TYPE_Q5_1;
|
||||||
}
|
}
|
||||||
|
if (s == "iq4_nl") {
|
||||||
|
return GGML_TYPE_IQ4_NL;
|
||||||
|
}
|
||||||
|
|
||||||
return GGML_TYPE_COUNT;
|
return GGML_TYPE_COUNT;
|
||||||
}
|
}
|
||||||
@ -294,6 +305,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
auto p = split<int>(argv[i], split_delim);
|
auto p = split<int>(argv[i], split_delim);
|
||||||
params.n_batch.insert(params.n_batch.end(), p.begin(), p.end());
|
params.n_batch.insert(params.n_batch.end(), p.begin(), p.end());
|
||||||
|
} else if (arg == "-ub" || arg == "--ubatch-size") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto p = split<int>(argv[i], split_delim);
|
||||||
|
params.n_ubatch.insert(params.n_ubatch.end(), p.begin(), p.end());
|
||||||
} else if (arg == "-ctk" || arg == "--cache-type-k") {
|
} else if (arg == "-ctk" || arg == "--cache-type-k") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -382,6 +400,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
auto p = split<bool>(argv[i], split_delim);
|
auto p = split<bool>(argv[i], split_delim);
|
||||||
params.use_mmap.insert(params.use_mmap.end(), p.begin(), p.end());
|
params.use_mmap.insert(params.use_mmap.end(), p.begin(), p.end());
|
||||||
|
} else if (arg == "-embd" || arg == "--embeddings") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto p = split<bool>(argv[i], split_delim);
|
||||||
|
params.embeddings.insert(params.embeddings.end(), p.begin(), p.end());
|
||||||
} else if (arg == "-ts" || arg == "--tensor-split") {
|
} else if (arg == "-ts" || arg == "--tensor-split") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -445,6 +470,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
||||||
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
||||||
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
||||||
|
if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
|
||||||
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
||||||
if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
|
if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
|
||||||
if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
|
if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
|
||||||
@ -453,6 +479,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
|
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
|
||||||
if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
|
if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
|
||||||
if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; }
|
if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; }
|
||||||
|
if (params.embeddings.empty()) { params.embeddings = cmd_params_defaults.embeddings; }
|
||||||
if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; }
|
if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; }
|
||||||
|
|
||||||
return params;
|
return params;
|
||||||
@ -463,6 +490,7 @@ struct cmd_params_instance {
|
|||||||
int n_prompt;
|
int n_prompt;
|
||||||
int n_gen;
|
int n_gen;
|
||||||
int n_batch;
|
int n_batch;
|
||||||
|
int n_ubatch;
|
||||||
ggml_type type_k;
|
ggml_type type_k;
|
||||||
ggml_type type_v;
|
ggml_type type_v;
|
||||||
int n_threads;
|
int n_threads;
|
||||||
@ -472,6 +500,7 @@ struct cmd_params_instance {
|
|||||||
bool no_kv_offload;
|
bool no_kv_offload;
|
||||||
std::vector<float> tensor_split;
|
std::vector<float> tensor_split;
|
||||||
bool use_mmap;
|
bool use_mmap;
|
||||||
|
bool embeddings;
|
||||||
|
|
||||||
llama_model_params to_llama_mparams() const {
|
llama_model_params to_llama_mparams() const {
|
||||||
llama_model_params mparams = llama_model_default_params();
|
llama_model_params mparams = llama_model_default_params();
|
||||||
@ -499,9 +528,11 @@ struct cmd_params_instance {
|
|||||||
|
|
||||||
cparams.n_ctx = n_prompt + n_gen;
|
cparams.n_ctx = n_prompt + n_gen;
|
||||||
cparams.n_batch = n_batch;
|
cparams.n_batch = n_batch;
|
||||||
|
cparams.n_ubatch = n_ubatch;
|
||||||
cparams.type_k = type_k;
|
cparams.type_k = type_k;
|
||||||
cparams.type_v = type_v;
|
cparams.type_v = type_v;
|
||||||
cparams.offload_kqv = !no_kv_offload;
|
cparams.offload_kqv = !no_kv_offload;
|
||||||
|
cparams.embeddings = embeddings;
|
||||||
|
|
||||||
return cparams;
|
return cparams;
|
||||||
}
|
}
|
||||||
@ -517,7 +548,9 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
for (const auto & mg : params.main_gpu)
|
for (const auto & mg : params.main_gpu)
|
||||||
for (const auto & ts : params.tensor_split)
|
for (const auto & ts : params.tensor_split)
|
||||||
for (const auto & mmp : params.use_mmap)
|
for (const auto & mmp : params.use_mmap)
|
||||||
|
for (const auto & embd : params.embeddings)
|
||||||
for (const auto & nb : params.n_batch)
|
for (const auto & nb : params.n_batch)
|
||||||
|
for (const auto & nub : params.n_ubatch)
|
||||||
for (const auto & tk : params.type_k)
|
for (const auto & tk : params.type_k)
|
||||||
for (const auto & tv : params.type_v)
|
for (const auto & tv : params.type_v)
|
||||||
for (const auto & nkvo : params.no_kv_offload)
|
for (const auto & nkvo : params.no_kv_offload)
|
||||||
@ -531,6 +564,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .n_prompt = */ n_prompt,
|
/* .n_prompt = */ n_prompt,
|
||||||
/* .n_gen = */ 0,
|
/* .n_gen = */ 0,
|
||||||
/* .n_batch = */ nb,
|
/* .n_batch = */ nb,
|
||||||
|
/* .n_ubatch = */ nub,
|
||||||
/* .type_k = */ tk,
|
/* .type_k = */ tk,
|
||||||
/* .type_v = */ tv,
|
/* .type_v = */ tv,
|
||||||
/* .n_threads = */ nt,
|
/* .n_threads = */ nt,
|
||||||
@ -540,6 +574,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .no_kv_offload= */ nkvo,
|
/* .no_kv_offload= */ nkvo,
|
||||||
/* .tensor_split = */ ts,
|
/* .tensor_split = */ ts,
|
||||||
/* .use_mmap = */ mmp,
|
/* .use_mmap = */ mmp,
|
||||||
|
/* .embeddings = */ embd,
|
||||||
};
|
};
|
||||||
instances.push_back(instance);
|
instances.push_back(instance);
|
||||||
}
|
}
|
||||||
@ -553,6 +588,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .n_prompt = */ 0,
|
/* .n_prompt = */ 0,
|
||||||
/* .n_gen = */ n_gen,
|
/* .n_gen = */ n_gen,
|
||||||
/* .n_batch = */ nb,
|
/* .n_batch = */ nb,
|
||||||
|
/* .n_ubatch = */ nub,
|
||||||
/* .type_k = */ tk,
|
/* .type_k = */ tk,
|
||||||
/* .type_v = */ tv,
|
/* .type_v = */ tv,
|
||||||
/* .n_threads = */ nt,
|
/* .n_threads = */ nt,
|
||||||
@ -562,6 +598,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .no_kv_offload= */ nkvo,
|
/* .no_kv_offload= */ nkvo,
|
||||||
/* .tensor_split = */ ts,
|
/* .tensor_split = */ ts,
|
||||||
/* .use_mmap = */ mmp,
|
/* .use_mmap = */ mmp,
|
||||||
|
/* .embeddings = */ embd,
|
||||||
};
|
};
|
||||||
instances.push_back(instance);
|
instances.push_back(instance);
|
||||||
}
|
}
|
||||||
@ -588,6 +625,7 @@ struct test {
|
|||||||
uint64_t model_size;
|
uint64_t model_size;
|
||||||
uint64_t model_n_params;
|
uint64_t model_n_params;
|
||||||
int n_batch;
|
int n_batch;
|
||||||
|
int n_ubatch;
|
||||||
int n_threads;
|
int n_threads;
|
||||||
ggml_type type_k;
|
ggml_type type_k;
|
||||||
ggml_type type_v;
|
ggml_type type_v;
|
||||||
@ -597,6 +635,7 @@ struct test {
|
|||||||
bool no_kv_offload;
|
bool no_kv_offload;
|
||||||
std::vector<float> tensor_split;
|
std::vector<float> tensor_split;
|
||||||
bool use_mmap;
|
bool use_mmap;
|
||||||
|
bool embeddings;
|
||||||
int n_prompt;
|
int n_prompt;
|
||||||
int n_gen;
|
int n_gen;
|
||||||
std::string test_time;
|
std::string test_time;
|
||||||
@ -610,6 +649,7 @@ struct test {
|
|||||||
model_size = llama_model_size(lmodel);
|
model_size = llama_model_size(lmodel);
|
||||||
model_n_params = llama_model_n_params(lmodel);
|
model_n_params = llama_model_n_params(lmodel);
|
||||||
n_batch = inst.n_batch;
|
n_batch = inst.n_batch;
|
||||||
|
n_ubatch = inst.n_ubatch;
|
||||||
n_threads = inst.n_threads;
|
n_threads = inst.n_threads;
|
||||||
type_k = inst.type_k;
|
type_k = inst.type_k;
|
||||||
type_v = inst.type_v;
|
type_v = inst.type_v;
|
||||||
@ -619,6 +659,7 @@ struct test {
|
|||||||
no_kv_offload = inst.no_kv_offload;
|
no_kv_offload = inst.no_kv_offload;
|
||||||
tensor_split = inst.tensor_split;
|
tensor_split = inst.tensor_split;
|
||||||
use_mmap = inst.use_mmap;
|
use_mmap = inst.use_mmap;
|
||||||
|
embeddings = inst.embeddings;
|
||||||
n_prompt = inst.n_prompt;
|
n_prompt = inst.n_prompt;
|
||||||
n_gen = inst.n_gen;
|
n_gen = inst.n_gen;
|
||||||
// RFC 3339 date-time format
|
// RFC 3339 date-time format
|
||||||
@ -687,10 +728,11 @@ struct test {
|
|||||||
"cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "gpu_blas", "blas",
|
"cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "gpu_blas", "blas",
|
||||||
"cpu_info", "gpu_info",
|
"cpu_info", "gpu_info",
|
||||||
"model_filename", "model_type", "model_size", "model_n_params",
|
"model_filename", "model_type", "model_size", "model_n_params",
|
||||||
"n_batch", "n_threads", "type_k", "type_v",
|
"n_batch", "n_ubatch",
|
||||||
|
"n_threads", "type_k", "type_v",
|
||||||
"n_gpu_layers", "split_mode",
|
"n_gpu_layers", "split_mode",
|
||||||
"main_gpu", "no_kv_offload",
|
"main_gpu", "no_kv_offload",
|
||||||
"tensor_split", "use_mmap",
|
"tensor_split", "use_mmap", "embeddings",
|
||||||
"n_prompt", "n_gen", "test_time",
|
"n_prompt", "n_gen", "test_time",
|
||||||
"avg_ns", "stddev_ns",
|
"avg_ns", "stddev_ns",
|
||||||
"avg_ts", "stddev_ts"
|
"avg_ts", "stddev_ts"
|
||||||
@ -701,7 +743,8 @@ struct test {
|
|||||||
enum field_type {STRING, BOOL, INT, FLOAT};
|
enum field_type {STRING, BOOL, INT, FLOAT};
|
||||||
|
|
||||||
static field_type get_field_type(const std::string & field) {
|
static field_type get_field_type(const std::string & field) {
|
||||||
if (field == "build_number" || field == "n_batch" || field == "n_threads" ||
|
if (field == "build_number" || field == "n_batch" || field == "n_ubatch" ||
|
||||||
|
field == "n_threads" ||
|
||||||
field == "model_size" || field == "model_n_params" ||
|
field == "model_size" || field == "model_n_params" ||
|
||||||
field == "n_gpu_layers" || field == "main_gpu" ||
|
field == "n_gpu_layers" || field == "main_gpu" ||
|
||||||
field == "n_prompt" || field == "n_gen" ||
|
field == "n_prompt" || field == "n_gen" ||
|
||||||
@ -710,7 +753,7 @@ struct test {
|
|||||||
}
|
}
|
||||||
if (field == "cuda" || field == "opencl" || field == "vulkan" || field == "kompute" || field == "metal" ||
|
if (field == "cuda" || field == "opencl" || field == "vulkan" || field == "kompute" || field == "metal" ||
|
||||||
field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
|
field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
|
||||||
field == "use_mmap") {
|
field == "use_mmap" || field == "embeddings") {
|
||||||
return BOOL;
|
return BOOL;
|
||||||
}
|
}
|
||||||
if (field == "avg_ts" || field == "stddev_ts") {
|
if (field == "avg_ts" || field == "stddev_ts") {
|
||||||
@ -741,10 +784,11 @@ struct test {
|
|||||||
std::to_string(metal), std::to_string(sycl), std::to_string(gpu_blas), std::to_string(blas),
|
std::to_string(metal), std::to_string(sycl), std::to_string(gpu_blas), std::to_string(blas),
|
||||||
cpu_info, gpu_info,
|
cpu_info, gpu_info,
|
||||||
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
|
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
|
||||||
std::to_string(n_batch), std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v),
|
std::to_string(n_batch), std::to_string(n_ubatch),
|
||||||
|
std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v),
|
||||||
std::to_string(n_gpu_layers), split_mode_str(split_mode),
|
std::to_string(n_gpu_layers), split_mode_str(split_mode),
|
||||||
std::to_string(main_gpu), std::to_string(no_kv_offload),
|
std::to_string(main_gpu), std::to_string(no_kv_offload),
|
||||||
tensor_split_str, std::to_string(use_mmap),
|
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
|
||||||
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
||||||
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
||||||
std::to_string(avg_ts()), std::to_string(stdev_ts())
|
std::to_string(avg_ts()), std::to_string(stdev_ts())
|
||||||
@ -914,6 +958,9 @@ struct markdown_printer : public printer {
|
|||||||
if (field == "use_mmap") {
|
if (field == "use_mmap") {
|
||||||
return "mmap";
|
return "mmap";
|
||||||
}
|
}
|
||||||
|
if (field == "embeddings") {
|
||||||
|
return "embd";
|
||||||
|
}
|
||||||
if (field == "tensor_split") {
|
if (field == "tensor_split") {
|
||||||
return "ts";
|
return "ts";
|
||||||
}
|
}
|
||||||
@ -936,6 +983,9 @@ struct markdown_printer : public printer {
|
|||||||
if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) {
|
if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) {
|
||||||
fields.emplace_back("n_batch");
|
fields.emplace_back("n_batch");
|
||||||
}
|
}
|
||||||
|
if (params.n_ubatch.size() > 1 || params.n_ubatch != cmd_params_defaults.n_ubatch) {
|
||||||
|
fields.emplace_back("n_ubatch");
|
||||||
|
}
|
||||||
if (params.type_k.size() > 1 || params.type_k != cmd_params_defaults.type_k) {
|
if (params.type_k.size() > 1 || params.type_k != cmd_params_defaults.type_k) {
|
||||||
fields.emplace_back("type_k");
|
fields.emplace_back("type_k");
|
||||||
}
|
}
|
||||||
@ -957,6 +1007,9 @@ struct markdown_printer : public printer {
|
|||||||
if (params.use_mmap.size() > 1 || params.use_mmap != cmd_params_defaults.use_mmap) {
|
if (params.use_mmap.size() > 1 || params.use_mmap != cmd_params_defaults.use_mmap) {
|
||||||
fields.emplace_back("use_mmap");
|
fields.emplace_back("use_mmap");
|
||||||
}
|
}
|
||||||
|
if (params.embeddings.size() > 1 || params.embeddings != cmd_params_defaults.embeddings) {
|
||||||
|
fields.emplace_back("embeddings");
|
||||||
|
}
|
||||||
fields.emplace_back("test");
|
fields.emplace_back("test");
|
||||||
fields.emplace_back("t/s");
|
fields.emplace_back("t/s");
|
||||||
|
|
||||||
@ -1072,25 +1125,40 @@ struct sql_printer : public printer {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
|
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
|
||||||
std::vector<llama_token> tokens(n_batch, llama_token_bos(llama_get_model(ctx)));
|
|
||||||
int n_processed = 0;
|
|
||||||
|
|
||||||
llama_set_n_threads(ctx, n_threads, n_threads);
|
llama_set_n_threads(ctx, n_threads, n_threads);
|
||||||
|
|
||||||
|
const llama_model * model = llama_get_model(ctx);
|
||||||
|
const int32_t n_vocab = llama_n_vocab(model);
|
||||||
|
|
||||||
|
std::vector<llama_token> tokens(n_batch);
|
||||||
|
|
||||||
|
int n_processed = 0;
|
||||||
|
|
||||||
while (n_processed < n_prompt) {
|
while (n_processed < n_prompt) {
|
||||||
int n_tokens = std::min(n_prompt - n_processed, n_batch);
|
int n_tokens = std::min(n_prompt - n_processed, n_batch);
|
||||||
|
tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
|
||||||
|
for (int i = 1; i < n_tokens; i++) {
|
||||||
|
tokens[i] = std::rand() % n_vocab;
|
||||||
|
}
|
||||||
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens, n_past + n_processed, 0));
|
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens, n_past + n_processed, 0));
|
||||||
n_processed += n_tokens;
|
n_processed += n_tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_synchronize(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
|
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
|
||||||
llama_token token = llama_token_bos(llama_get_model(ctx));
|
|
||||||
|
|
||||||
llama_set_n_threads(ctx, n_threads, n_threads);
|
llama_set_n_threads(ctx, n_threads, n_threads);
|
||||||
|
|
||||||
|
const llama_model * model = llama_get_model(ctx);
|
||||||
|
const int32_t n_vocab = llama_n_vocab(model);
|
||||||
|
|
||||||
|
llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
|
||||||
|
|
||||||
for (int i = 0; i < n_gen; i++) {
|
for (int i = 0; i < n_gen; i++) {
|
||||||
llama_decode(ctx, llama_batch_get_one(&token, 1, n_past + i, 0));
|
llama_decode(ctx, llama_batch_get_one(&token, 1, n_past + i, 0));
|
||||||
|
llama_synchronize(ctx);
|
||||||
|
token = std::rand() % n_vocab;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1179,7 +1247,8 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// warmup run
|
// warmup run
|
||||||
if (t.n_prompt > 0) {
|
if (t.n_prompt > 0) {
|
||||||
test_prompt(ctx, std::min(2, t.n_batch), 0, t.n_batch, t.n_threads);
|
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
|
||||||
|
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
||||||
}
|
}
|
||||||
if (t.n_gen > 0) {
|
if (t.n_gen > 0) {
|
||||||
test_gen(ctx, 1, 0, t.n_threads);
|
test_gen(ctx, 1, 0, t.n_threads);
|
||||||
@ -1195,6 +1264,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (t.n_gen > 0) {
|
if (t.n_gen > 0) {
|
||||||
test_gen(ctx, t.n_gen, t.n_prompt, t.n_threads);
|
test_gen(ctx, t.n_gen, t.n_prompt, t.n_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t t_ns = get_time_ns() - t_start;
|
uint64_t t_ns = get_time_ns() - t_start;
|
||||||
t.samples_ns.push_back(t_ns);
|
t.samples_ns.push_back(t_ns);
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,45 @@ jclass la_int_var;
|
|||||||
jmethodID la_int_var_value;
|
jmethodID la_int_var_value;
|
||||||
jmethodID la_int_var_inc;
|
jmethodID la_int_var_inc;
|
||||||
|
|
||||||
|
std::string cached_token_chars;
|
||||||
|
|
||||||
|
bool is_valid_utf8(const char * string) {
|
||||||
|
if (!string) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const unsigned char * bytes = (const unsigned char *)string;
|
||||||
|
int num;
|
||||||
|
|
||||||
|
while (*bytes != 0x00) {
|
||||||
|
if ((*bytes & 0x80) == 0x00) {
|
||||||
|
// U+0000 to U+007F
|
||||||
|
num = 1;
|
||||||
|
} else if ((*bytes & 0xE0) == 0xC0) {
|
||||||
|
// U+0080 to U+07FF
|
||||||
|
num = 2;
|
||||||
|
} else if ((*bytes & 0xF0) == 0xE0) {
|
||||||
|
// U+0800 to U+FFFF
|
||||||
|
num = 3;
|
||||||
|
} else if ((*bytes & 0xF8) == 0xF0) {
|
||||||
|
// U+10000 to U+10FFFF
|
||||||
|
num = 4;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes += 1;
|
||||||
|
for (int i = 1; i < num; ++i) {
|
||||||
|
if ((*bytes & 0xC0) != 0x80) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
bytes += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void log_callback(ggml_log_level level, const char * fmt, void * data) {
|
static void log_callback(ggml_log_level level, const char * fmt, void * data) {
|
||||||
if (level == GGML_LOG_LEVEL_ERROR) __android_log_print(ANDROID_LOG_ERROR, TAG, fmt, data);
|
if (level == GGML_LOG_LEVEL_ERROR) __android_log_print(ANDROID_LOG_ERROR, TAG, fmt, data);
|
||||||
else if (level == GGML_LOG_LEVEL_INFO) __android_log_print(ANDROID_LOG_INFO, TAG, fmt, data);
|
else if (level == GGML_LOG_LEVEL_INFO) __android_log_print(ANDROID_LOG_INFO, TAG, fmt, data);
|
||||||
@ -295,6 +334,8 @@ Java_com_example_llama_Llm_completion_1init(
|
|||||||
jint n_len
|
jint n_len
|
||||||
) {
|
) {
|
||||||
|
|
||||||
|
cached_token_chars.clear();
|
||||||
|
|
||||||
const auto text = env->GetStringUTFChars(jtext, 0);
|
const auto text = env->GetStringUTFChars(jtext, 0);
|
||||||
const auto context = reinterpret_cast<llama_context *>(context_pointer);
|
const auto context = reinterpret_cast<llama_context *>(context_pointer);
|
||||||
const auto batch = reinterpret_cast<llama_batch *>(batch_pointer);
|
const auto batch = reinterpret_cast<llama_batch *>(batch_pointer);
|
||||||
@ -372,8 +413,16 @@ Java_com_example_llama_Llm_completion_1loop(
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto new_token_chars = llama_token_to_piece(context, new_token_id);
|
auto new_token_chars = llama_token_to_piece(context, new_token_id);
|
||||||
LOGi("new_token_chars: `%s`", new_token_chars.c_str());
|
cached_token_chars += new_token_chars;
|
||||||
auto new_token = env->NewStringUTF(new_token_chars.c_str());
|
|
||||||
|
jstring new_token = nullptr;
|
||||||
|
if (is_valid_utf8(cached_token_chars.c_str())) {
|
||||||
|
new_token = env->NewStringUTF(cached_token_chars.c_str());
|
||||||
|
LOGi("cached: %s, new_token_chars: `%s`, id: %d", cached_token_chars.c_str(), new_token_chars.c_str(), new_token_id);
|
||||||
|
cached_token_chars.clear();
|
||||||
|
} else {
|
||||||
|
new_token = env->NewStringUTF("");
|
||||||
|
}
|
||||||
|
|
||||||
llama_batch_clear(*batch);
|
llama_batch_clear(*batch);
|
||||||
llama_batch_add(*batch, new_token_id, n_cur, { 0 }, true);
|
llama_batch_add(*batch, new_token_id, n_cur, { 0 }, true);
|
||||||
|
@ -71,7 +71,7 @@ class Llm {
|
|||||||
batch: Long,
|
batch: Long,
|
||||||
nLen: Int,
|
nLen: Int,
|
||||||
ncur: IntVar
|
ncur: IntVar
|
||||||
): String
|
): String?
|
||||||
|
|
||||||
private external fun kv_cache_clear(context: Long)
|
private external fun kv_cache_clear(context: Long)
|
||||||
|
|
||||||
@ -115,7 +115,7 @@ class Llm {
|
|||||||
val ncur = IntVar(completion_init(state.context, state.batch, message, nlen))
|
val ncur = IntVar(completion_init(state.context, state.batch, message, nlen))
|
||||||
while (ncur.value <= nlen) {
|
while (ncur.value <= nlen) {
|
||||||
val str = completion_loop(state.context, state.batch, nlen, ncur)
|
val str = completion_loop(state.context, state.batch, nlen, ncur)
|
||||||
if (str.isEmpty()) {
|
if (str == null) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
emit(str)
|
emit(str)
|
||||||
|
@ -221,6 +221,7 @@ actor LlamaContext {
|
|||||||
if llama_decode(context, batch) != 0 {
|
if llama_decode(context, batch) != 0 {
|
||||||
print("llama_decode() failed during prompt")
|
print("llama_decode() failed during prompt")
|
||||||
}
|
}
|
||||||
|
llama_synchronize(context)
|
||||||
|
|
||||||
let t_pp_end = ggml_time_us()
|
let t_pp_end = ggml_time_us()
|
||||||
|
|
||||||
@ -240,6 +241,7 @@ actor LlamaContext {
|
|||||||
if llama_decode(context, batch) != 0 {
|
if llama_decode(context, batch) != 0 {
|
||||||
print("llama_decode() failed during text generation")
|
print("llama_decode() failed during text generation")
|
||||||
}
|
}
|
||||||
|
llama_synchronize(context)
|
||||||
}
|
}
|
||||||
|
|
||||||
let t_tg_end = ggml_time_us()
|
let t_tg_end = ggml_time_us()
|
||||||
|
@ -1,11 +1,13 @@
|
|||||||
# MobileVLM
|
# MobileVLM
|
||||||
|
|
||||||
Currently this implementation supports [MobileVLM-v1.7](https://huggingface.co/mtgv/MobileVLM-1.7B) variants.
|
Currently this implementation supports [MobileVLM-1.7B](https://huggingface.co/mtgv/MobileVLM-1.7B) / [MobileVLM_V2-1.7B](https://huggingface.co/mtgv/MobileVLM_V2-1.7B) variants.
|
||||||
|
|
||||||
for more information, please go to [Meituan-AutoML/MobileVLM](https://github.com/Meituan-AutoML/MobileVLM)
|
for more information, please go to [Meituan-AutoML/MobileVLM](https://github.com/Meituan-AutoML/MobileVLM)
|
||||||
|
|
||||||
The implementation is based on llava, and is compatible with llava and mobileVLM. The usage is basically same as llava.
|
The implementation is based on llava, and is compatible with llava and mobileVLM. The usage is basically same as llava.
|
||||||
|
|
||||||
|
Notice: The overall process of model inference for both **MobileVLM** and **MobileVLM_V2** models is the same, but the process of model conversion is a little different. Therefore, using MobiVLM as an example, the different conversion step will be shown.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
Build with cmake or run `make llava-cli` to build it.
|
Build with cmake or run `make llava-cli` to build it.
|
||||||
|
|
||||||
@ -34,7 +36,7 @@ git clone https://huggingface.co/openai/clip-vit-large-patch14-336
|
|||||||
python ./examples/llava/llava-surgery.py -m path/to/MobileVLM-1.7B
|
python ./examples/llava/llava-surgery.py -m path/to/MobileVLM-1.7B
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` to convert the LLaVA image encoder to GGUF:
|
3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` (for **V2** the arg is `--projector-type ldpv2`) to convert the LLaVA image encoder to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/llava/convert-image-encoder-to-gguf \
|
python ./examples/llava/convert-image-encoder-to-gguf \
|
||||||
@ -44,6 +46,14 @@ python ./examples/llava/convert-image-encoder-to-gguf \
|
|||||||
--projector-type ldp
|
--projector-type ldp
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```sh
|
||||||
|
python ./examples/llava/convert-image-encoder-to-gguf \
|
||||||
|
-m path/to/clip-vit-large-patch14-336 \
|
||||||
|
--llava-projector path/to/MobileVLM-1.7B_V2/llava.projector \
|
||||||
|
--output-dir path/to/MobileVLM-1.7B_V2 \
|
||||||
|
--projector-type ldpv2
|
||||||
|
```
|
||||||
|
|
||||||
4. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF:
|
4. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
@ -63,12 +63,20 @@ Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` director
|
|||||||
```console
|
```console
|
||||||
git clone https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b
|
git clone https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b
|
||||||
```
|
```
|
||||||
2) Use `llava-surgery-v2.py` which also supports llava-1.5 variants pytorch as well as safetensor models:
|
|
||||||
|
2) Install the required Python packages:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install -r examples/llava/requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
3) Use `llava-surgery-v2.py` which also supports llava-1.5 variants pytorch as well as safetensor models:
|
||||||
```console
|
```console
|
||||||
python examples/llava/llava-surgery-v2.py -C -m ../llava-v1.6-vicuna-7b/
|
python examples/llava/llava-surgery-v2.py -C -m ../llava-v1.6-vicuna-7b/
|
||||||
```
|
```
|
||||||
- you will find a llava.projector and a llava.clip file in your model directory
|
- you will find a llava.projector and a llava.clip file in your model directory
|
||||||
3) Copy the llava.clip file into a subdirectory (like vit), rename it to pytorch_model.bin and add a fitting vit configuration to the directory:
|
|
||||||
|
4) Copy the llava.clip file into a subdirectory (like vit), rename it to pytorch_model.bin and add a fitting vit configuration to the directory:
|
||||||
```console
|
```console
|
||||||
mkdir vit
|
mkdir vit
|
||||||
cp ../llava-v1.6-vicuna-7b/llava.clip vit/pytorch_model.bin
|
cp ../llava-v1.6-vicuna-7b/llava.clip vit/pytorch_model.bin
|
||||||
@ -76,18 +84,18 @@ cp ../llava-v1.6-vicuna-7b/llava.projector vit/
|
|||||||
curl -s -q https://huggingface.co/cmp-nct/llava-1.6-gguf/raw/main/config_vit.json -o vit/config.json
|
curl -s -q https://huggingface.co/cmp-nct/llava-1.6-gguf/raw/main/config_vit.json -o vit/config.json
|
||||||
```
|
```
|
||||||
|
|
||||||
4) Create the visual gguf model:
|
5) Create the visual gguf model:
|
||||||
```console
|
```console
|
||||||
python ./examples/llava/convert-image-encoder-to-gguf.py -m vit --llava-projector vit/llava.projector --output-dir vit --clip-model-is-vision
|
python ./examples/llava/convert-image-encoder-to-gguf.py -m vit --llava-projector vit/llava.projector --output-dir vit --clip-model-is-vision
|
||||||
```
|
```
|
||||||
- This is similar to llava-1.5, the difference is that we tell the encoder that we are working with the pure vision model part of CLIP
|
- This is similar to llava-1.5, the difference is that we tell the encoder that we are working with the pure vision model part of CLIP
|
||||||
|
|
||||||
5) Then convert the model to gguf format:
|
6) Then convert the model to gguf format:
|
||||||
```console
|
```console
|
||||||
python ./convert.py ../llava-v1.6-vicuna-7b/ --skip-unknown
|
python ./convert.py ../llava-v1.6-vicuna-7b/ --skip-unknown
|
||||||
```
|
```
|
||||||
|
|
||||||
6) And finally we can run the llava-cli using the 1.6 model version:
|
7) And finally we can run the llava-cli using the 1.6 model version:
|
||||||
```console
|
```console
|
||||||
./llava-cli -m ../llava-v1.6-vicuna-7b/ggml-model-f16.gguf --mmproj vit/mmproj-model-f16.gguf --image some-image.jpg -c 4096
|
./llava-cli -m ../llava-v1.6-vicuna-7b/ggml-model-f16.gguf --mmproj vit/mmproj-model-f16.gguf --image some-image.jpg -c 4096
|
||||||
```
|
```
|
||||||
|
@ -119,6 +119,7 @@ static std::string format(const char * fmt, ...) {
|
|||||||
#define TN_LLAVA_PROJ "mm.%d.%s"
|
#define TN_LLAVA_PROJ "mm.%d.%s"
|
||||||
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
|
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
|
||||||
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
|
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
|
||||||
|
#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
|
||||||
#define TN_IMAGE_NEWLINE "model.image_newline"
|
#define TN_IMAGE_NEWLINE "model.image_newline"
|
||||||
|
|
||||||
|
|
||||||
@ -126,12 +127,14 @@ enum projector_type {
|
|||||||
PROJECTOR_TYPE_MLP,
|
PROJECTOR_TYPE_MLP,
|
||||||
PROJECTOR_TYPE_MLP_NORM,
|
PROJECTOR_TYPE_MLP_NORM,
|
||||||
PROJECTOR_TYPE_LDP,
|
PROJECTOR_TYPE_LDP,
|
||||||
|
PROJECTOR_TYPE_LDPV2,
|
||||||
PROJECTOR_TYPE_UNKNOWN,
|
PROJECTOR_TYPE_UNKNOWN,
|
||||||
};
|
};
|
||||||
|
|
||||||
static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
|
static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
|
||||||
{ PROJECTOR_TYPE_MLP, "mlp" },
|
{ PROJECTOR_TYPE_MLP, "mlp" },
|
||||||
{ PROJECTOR_TYPE_LDP, "ldp" },
|
{ PROJECTOR_TYPE_LDP, "ldp" },
|
||||||
|
{ PROJECTOR_TYPE_LDPV2, "ldpv2"},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -475,6 +478,14 @@ struct clip_vision_model {
|
|||||||
struct ggml_tensor * mm_model_block_2_block_2_0_w;
|
struct ggml_tensor * mm_model_block_2_block_2_0_w;
|
||||||
struct ggml_tensor * mm_model_block_2_block_2_1_w;
|
struct ggml_tensor * mm_model_block_2_block_2_1_w;
|
||||||
struct ggml_tensor * mm_model_block_2_block_2_1_b;
|
struct ggml_tensor * mm_model_block_2_block_2_1_b;
|
||||||
|
|
||||||
|
// MobileVLM_V2 projection
|
||||||
|
struct ggml_tensor * mm_model_mlp_0_w;
|
||||||
|
struct ggml_tensor * mm_model_mlp_0_b;
|
||||||
|
struct ggml_tensor * mm_model_mlp_2_w;
|
||||||
|
struct ggml_tensor * mm_model_mlp_2_b;
|
||||||
|
struct ggml_tensor * mm_model_peg_0_w;
|
||||||
|
struct ggml_tensor * mm_model_peg_0_b;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct clip_ctx {
|
struct clip_ctx {
|
||||||
@ -497,7 +508,6 @@ struct clip_ctx {
|
|||||||
|
|
||||||
// memory buffers to evaluate the model
|
// memory buffers to evaluate the model
|
||||||
ggml_backend_buffer_t params_buffer = NULL;
|
ggml_backend_buffer_t params_buffer = NULL;
|
||||||
ggml_backend_buffer_t compute_buffer = NULL;
|
|
||||||
|
|
||||||
ggml_backend_t backend = NULL;
|
ggml_backend_t backend = NULL;
|
||||||
ggml_gallocr_t compute_alloc = NULL;
|
ggml_gallocr_t compute_alloc = NULL;
|
||||||
@ -808,6 +818,29 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||||||
}
|
}
|
||||||
embeddings = block_1;
|
embeddings = block_1;
|
||||||
}
|
}
|
||||||
|
else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
|
||||||
|
{
|
||||||
|
int n_patch = 24;
|
||||||
|
struct ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
|
||||||
|
mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
|
||||||
|
mlp_0 = ggml_gelu(ctx0, mlp_0);
|
||||||
|
struct ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
|
||||||
|
mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
|
||||||
|
// mlp_2 ne = [2048, 576, 1, 1]
|
||||||
|
// // AVG Pool Layer 2*2, strides = 2
|
||||||
|
mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
|
||||||
|
// mlp_2 ne = [576, 2048, 1, 1]
|
||||||
|
mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
|
||||||
|
// mlp_2 ne [24, 24, 2048, 1]
|
||||||
|
mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
|
||||||
|
// weight ne = [3, 3, 2048, 1]
|
||||||
|
struct ggml_tensor * peg_0 = ggml_conv_depthwise_2d(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
|
||||||
|
peg_0 = ggml_add(ctx0, peg_0, mlp_2);
|
||||||
|
peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
|
||||||
|
peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
|
||||||
|
peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
|
||||||
|
embeddings = peg_0;
|
||||||
|
}
|
||||||
else {
|
else {
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
@ -995,6 +1028,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
if (!new_clip->ctx_data) {
|
if (!new_clip->ctx_data) {
|
||||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||||
clip_free(new_clip);
|
clip_free(new_clip);
|
||||||
|
gguf_free(ctx);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1002,6 +1036,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
if (!fin) {
|
if (!fin) {
|
||||||
printf("cannot open model file for loading tensors\n");
|
printf("cannot open model file for loading tensors\n");
|
||||||
clip_free(new_clip);
|
clip_free(new_clip);
|
||||||
|
gguf_free(ctx);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1023,6 +1058,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
if (!fin) {
|
if (!fin) {
|
||||||
printf("%s: failed to seek for tensor %s\n", __func__, name);
|
printf("%s: failed to seek for tensor %s\n", __func__, name);
|
||||||
clip_free(new_clip);
|
clip_free(new_clip);
|
||||||
|
gguf_free(ctx);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
int num_bytes = ggml_nbytes(cur);
|
int num_bytes = ggml_nbytes(cur);
|
||||||
@ -1175,7 +1211,18 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
|
vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
|
||||||
vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
|
vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
|
||||||
vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
|
vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
|
||||||
} else {
|
}
|
||||||
|
else if (new_clip->proj_type == PROJECTOR_TYPE_LDPV2)
|
||||||
|
{
|
||||||
|
// MobilVLM_V2 projection
|
||||||
|
vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "weight"));
|
||||||
|
vision_model.mm_model_mlp_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "bias"));
|
||||||
|
vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "weight"));
|
||||||
|
vision_model.mm_model_mlp_2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "bias"));
|
||||||
|
vision_model.mm_model_peg_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "weight"));
|
||||||
|
vision_model.mm_model_peg_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "bias"));
|
||||||
|
}
|
||||||
|
else {
|
||||||
std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type];
|
std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type];
|
||||||
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
|
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
|
||||||
}
|
}
|
||||||
@ -1232,16 +1279,16 @@ struct clip_image_f32 * clip_image_f32_init() {
|
|||||||
|
|
||||||
void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
|
void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
|
||||||
void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
|
void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
|
||||||
void clip_image_u8_batch_free(struct clip_image_u8_batch & batch) {
|
void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) {
|
||||||
if (batch.size > 0) {
|
if (batch->size > 0) {
|
||||||
delete[] batch.data;
|
delete[] batch->data;
|
||||||
batch.size = 0;
|
batch->size = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void clip_image_f32_batch_free(struct clip_image_f32_batch & batch) {
|
void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) {
|
||||||
if (batch.size > 0) {
|
if (batch->size > 0) {
|
||||||
delete[] batch.data;
|
delete[] batch->data;
|
||||||
batch.size = 0;
|
batch->size = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1494,7 +1541,7 @@ static std::vector<clip_image_u8*> divide_to_patches_u8(const clip_image_u8 & im
|
|||||||
|
|
||||||
// returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
|
// returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
|
||||||
// res_imgs memory is being allocated here, previous allocations will be freed if found
|
// res_imgs memory is being allocated here, previous allocations will be freed if found
|
||||||
bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch & res_imgs) {
|
bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch * res_imgs) {
|
||||||
bool pad_to_square = true;
|
bool pad_to_square = true;
|
||||||
if (!ctx->has_vision_encoder) {
|
if (!ctx->has_vision_encoder) {
|
||||||
printf("This gguf file seems to have no vision encoder\n");
|
printf("This gguf file seems to have no vision encoder\n");
|
||||||
@ -1506,11 +1553,11 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli
|
|||||||
pad_to_square = false;
|
pad_to_square = false;
|
||||||
}
|
}
|
||||||
// free the previous res_imgs if any set
|
// free the previous res_imgs if any set
|
||||||
if (res_imgs.size > 0) {
|
if (res_imgs->size > 0) {
|
||||||
clip_image_f32_batch_free(res_imgs);
|
clip_image_f32_batch_free(res_imgs);
|
||||||
}
|
}
|
||||||
res_imgs.data = nullptr;
|
res_imgs->data = nullptr;
|
||||||
res_imgs.size = 0;
|
res_imgs->size = 0;
|
||||||
|
|
||||||
// the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
|
// the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
|
||||||
// see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
|
// see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
|
||||||
@ -1565,11 +1612,11 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli
|
|||||||
bicubic_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
|
bicubic_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
|
||||||
patches.insert(patches.begin(), image_original_resize);
|
patches.insert(patches.begin(), image_original_resize);
|
||||||
// clip_image_f32_batch_init(patches.size());
|
// clip_image_f32_batch_init(patches.size());
|
||||||
res_imgs.size = patches.size();
|
res_imgs->size = patches.size();
|
||||||
res_imgs.data = new clip_image_f32[res_imgs.size];
|
res_imgs->data = new clip_image_f32[res_imgs->size];
|
||||||
int num=0;
|
int num=0;
|
||||||
for (auto& patch : patches) {
|
for (auto& patch : patches) {
|
||||||
normalize_image_u8_to_f32(patch, &res_imgs.data[num], ctx->image_mean, ctx->image_std);
|
normalize_image_u8_to_f32(patch, &res_imgs->data[num], ctx->image_mean, ctx->image_std);
|
||||||
num++;
|
num++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1657,9 +1704,9 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli
|
|||||||
// }
|
// }
|
||||||
// res_imgs.push_back(res);
|
// res_imgs.push_back(res);
|
||||||
|
|
||||||
res_imgs.size = 1;
|
res_imgs->size = 1;
|
||||||
res_imgs.data = new clip_image_f32[res_imgs.size];
|
res_imgs->data = new clip_image_f32[res_imgs->size];
|
||||||
res_imgs.data[0] = *res;
|
res_imgs->data[0] = *res;
|
||||||
clip_image_f32_free(res);
|
clip_image_f32_free(res);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -1673,6 +1720,9 @@ void clip_free(clip_ctx * ctx) {
|
|||||||
ggml_free(ctx->ctx_data);
|
ggml_free(ctx->ctx_data);
|
||||||
gguf_free(ctx->ctx_gguf);
|
gguf_free(ctx->ctx_gguf);
|
||||||
|
|
||||||
|
ggml_backend_buffer_free(ctx->params_buffer);
|
||||||
|
ggml_backend_free(ctx->backend);
|
||||||
|
ggml_gallocr_free(ctx->compute_alloc);
|
||||||
delete ctx;
|
delete ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1862,7 +1912,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
|||||||
|
|
||||||
std::vector<uint8_t> work(512);
|
std::vector<uint8_t> work(512);
|
||||||
std::vector<float> conv_buf(512);
|
std::vector<float> conv_buf(512);
|
||||||
std::vector<int64_t> hist_all(1 << 4, 0);
|
|
||||||
size_t total_size_org = 0;
|
size_t total_size_org = 0;
|
||||||
size_t total_size_new = 0;
|
size_t total_size_new = 0;
|
||||||
|
|
||||||
@ -1909,6 +1958,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
printf("Please use an input file in f32 or f16\n");
|
printf("Please use an input file in f32 or f16\n");
|
||||||
|
gguf_free(ctx_out);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1917,48 +1967,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
|||||||
}
|
}
|
||||||
new_data = work.data();
|
new_data = work.data();
|
||||||
|
|
||||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
|
||||||
|
|
||||||
switch (new_type) {
|
|
||||||
case GGML_TYPE_Q4_0: {
|
|
||||||
new_size = ggml_quantize_q4_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q4_1: {
|
|
||||||
new_size = ggml_quantize_q4_1(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q5_0: {
|
|
||||||
new_size = ggml_quantize_q5_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q5_1: {
|
|
||||||
new_size = ggml_quantize_q5_1(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q8_0: {
|
|
||||||
new_size = ggml_quantize_q8_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q2_K: {
|
|
||||||
new_size = ggml_quantize_q2_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q3_K: {
|
|
||||||
new_size = ggml_quantize_q3_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q4_K: {
|
|
||||||
new_size = ggml_quantize_q4_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q5_K: {
|
|
||||||
new_size = ggml_quantize_q5_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q6_K: {
|
|
||||||
new_size = ggml_quantize_q6_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
default: {
|
|
||||||
fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, new_type);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t j = 0; j < hist_cur.size(); ++j) {
|
|
||||||
hist_all[j] += hist_cur[j];
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
new_type = cur->type;
|
new_type = cur->type;
|
||||||
new_data = cur->data;
|
new_data = cur->data;
|
||||||
@ -1993,17 +2002,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
|||||||
{
|
{
|
||||||
printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
|
printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
|
||||||
printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
|
printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
|
||||||
|
|
||||||
int64_t sum_all = 0;
|
|
||||||
for (size_t i = 0; i < hist_all.size(); ++i) {
|
|
||||||
sum_all += hist_all[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("%s: hist: ", __func__);
|
|
||||||
for (size_t i = 0; i < hist_all.size(); ++i) {
|
|
||||||
printf("%5.3f ", hist_all[i] / (float)sum_all);
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -2013,6 +2011,9 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|||||||
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
|
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
|
||||||
return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
|
return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
|
||||||
}
|
}
|
||||||
|
if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
|
||||||
|
return ctx->vision_model.mm_model_peg_0_b->ne[0];
|
||||||
|
}
|
||||||
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
||||||
return ctx->vision_model.mm_2_b->ne[0];
|
return ctx->vision_model.mm_2_b->ne[0];
|
||||||
}
|
}
|
||||||
|
@ -60,8 +60,8 @@ CLIP_API struct clip_image_f32 * clip_image_f32_init();
|
|||||||
|
|
||||||
CLIP_API void clip_image_u8_free (struct clip_image_u8 * img);
|
CLIP_API void clip_image_u8_free (struct clip_image_u8 * img);
|
||||||
CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
|
CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
|
||||||
CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch & batch);
|
CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch * batch);
|
||||||
CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch & batch);
|
CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch * batch);
|
||||||
|
|
||||||
CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
|
CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8
|
|||||||
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
||||||
|
|
||||||
/** preprocess img and store the result in res_imgs, pad_to_square may be overriden to false depending on model configuration */
|
/** preprocess img and store the result in res_imgs, pad_to_square may be overriden to false depending on model configuration */
|
||||||
CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch & res_imgs );
|
CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
|
||||||
|
|
||||||
CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
|
CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -38,9 +39,11 @@ def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_llava: b
|
|||||||
def get_tensor_name(name: str) -> str:
|
def get_tensor_name(name: str) -> str:
|
||||||
if "projection" in name:
|
if "projection" in name:
|
||||||
return name
|
return name
|
||||||
|
|
||||||
if "mm_projector" in name:
|
if "mm_projector" in name:
|
||||||
return name.replace("model.mm_projector", "mm")
|
name = name.replace("model.mm_projector", "mm")
|
||||||
|
name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1)
|
||||||
|
name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1)
|
||||||
|
return name
|
||||||
|
|
||||||
return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln")
|
return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln")
|
||||||
|
|
||||||
@ -83,7 +86,7 @@ ap.add_argument("--clip-model-is-vision", action="store_true", required=False,
|
|||||||
ap.add_argument("--clip-model-is-openclip", action="store_true", required=False,
|
ap.add_argument("--clip-model-is-openclip", action="store_true", required=False,
|
||||||
help="The clip model is from openclip (for ViT-SO400M type))")
|
help="The clip model is from openclip (for ViT-SO400M type))")
|
||||||
ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
|
ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
|
||||||
ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp", choices=["mlp", "ldp"], default="mlp")
|
ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp, ldpv2", choices=["mlp", "ldp", "ldpv2"], default="mlp")
|
||||||
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
|
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
|
||||||
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
|
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
|
||||||
# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
|
# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
|
||||||
|
@ -223,7 +223,7 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli
|
|||||||
clip_image_f32_batch img_res_v;
|
clip_image_f32_batch img_res_v;
|
||||||
img_res_v.size = 0;
|
img_res_v.size = 0;
|
||||||
img_res_v.data = nullptr;
|
img_res_v.data = nullptr;
|
||||||
if (!clip_image_preprocess(ctx_clip, img, img_res_v)) {
|
if (!clip_image_preprocess(ctx_clip, img, &img_res_v)) {
|
||||||
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
||||||
delete[] img_res_v.data;
|
delete[] img_res_v.data;
|
||||||
return false;
|
return false;
|
||||||
|
@ -29,9 +29,9 @@ struct llava_image_embed {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/** sanity check for clip <-> llava embed size match */
|
/** sanity check for clip <-> llava embed size match */
|
||||||
LLAVA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip);
|
LLAVA_API bool llava_validate_embed_size(const struct llama_context * ctx_llama, const struct clip_ctx * ctx_clip);
|
||||||
|
|
||||||
LLAVA_API bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out);
|
LLAVA_API bool llava_image_embed_make_with_clip_img(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out);
|
||||||
|
|
||||||
/** build an image embed from image file bytes */
|
/** build an image embed from image file bytes */
|
||||||
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length);
|
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length);
|
||||||
|
@ -67,6 +67,7 @@ main.exe -m models\7B\ggml-model.bin --ignore-eos -n -1 --random-prompt
|
|||||||
In this section, we cover the most commonly used options for running the `main` program with the LLaMA models:
|
In this section, we cover the most commonly used options for running the `main` program with the LLaMA models:
|
||||||
|
|
||||||
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
||||||
|
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf).
|
||||||
- `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses.
|
- `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses.
|
||||||
- `-ins, --instruct`: Run the program in instruction mode, which is particularly useful when working with Alpaca models.
|
- `-ins, --instruct`: Run the program in instruction mode, which is particularly useful when working with Alpaca models.
|
||||||
- `-n N, --n-predict N`: Set the number of tokens to predict when generating text. Adjusting this value can influence the length of the generated text.
|
- `-n N, --n-predict N`: Set the number of tokens to predict when generating text. Adjusting this value can influence the length of the generated text.
|
||||||
|
@ -878,6 +878,7 @@ int main(int argc, char ** argv) {
|
|||||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
||||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
|
const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
|
||||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||||
|
|
||||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||||
|
|
||||||
embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
|
embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
|
||||||
|
@ -107,6 +107,9 @@ int main(int argc, char ** argv) {
|
|||||||
// number of simultaneous "clients" to simulate
|
// number of simultaneous "clients" to simulate
|
||||||
const int32_t n_clients = params.n_parallel;
|
const int32_t n_clients = params.n_parallel;
|
||||||
|
|
||||||
|
// dedicate one sequence to the system prompt
|
||||||
|
params.n_parallel += 1;
|
||||||
|
|
||||||
// requests to simulate
|
// requests to simulate
|
||||||
const int32_t n_seq = params.n_sequences;
|
const int32_t n_seq = params.n_sequences;
|
||||||
|
|
||||||
@ -196,8 +199,8 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// assign the system KV cache to all parallel sequences
|
// assign the system KV cache to all parallel sequences
|
||||||
for (int32_t i = 1; i < n_clients; ++i) {
|
for (int32_t i = 1; i <= n_clients; ++i) {
|
||||||
llama_kv_cache_seq_cp(ctx, 0, i, 0, n_tokens_system);
|
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
@ -221,15 +224,17 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
client.i_batch = batch.n_tokens;
|
client.i_batch = batch.n_tokens;
|
||||||
|
|
||||||
llama_batch_add(batch, client.sampled, n_tokens_system + client.n_prompt + client.n_decoded, { client.id }, true);
|
llama_batch_add(batch, client.sampled, n_tokens_system + client.n_prompt + client.n_decoded, { client.id + 1 }, true);
|
||||||
|
|
||||||
client.n_decoded += 1;
|
client.n_decoded += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (batch.n_tokens == 0) {
|
if (batch.n_tokens == 0) {
|
||||||
// all sequences have ended - clear the entire KV cache
|
// all sequences have ended - clear the entire KV cache
|
||||||
for (int i = 0; i < n_clients; ++i) {
|
for (int i = 1; i <= n_clients; ++i) {
|
||||||
llama_kv_cache_seq_rm(ctx, i, n_tokens_system, -1);
|
llama_kv_cache_seq_rm(ctx, i, -1, -1);
|
||||||
|
// but keep the system prompt
|
||||||
|
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TEE("%s: clearing the KV cache\n", __func__);
|
LOG_TEE("%s: clearing the KV cache\n", __func__);
|
||||||
@ -255,7 +260,7 @@ int main(int argc, char ** argv) {
|
|||||||
tokens_prompt = ::llama_tokenize(ctx, client.prompt, false);
|
tokens_prompt = ::llama_tokenize(ctx, client.prompt, false);
|
||||||
|
|
||||||
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
|
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
|
||||||
llama_batch_add(batch, tokens_prompt[i], i + n_tokens_system, { client.id }, false);
|
llama_batch_add(batch, tokens_prompt[i], i + n_tokens_system, { client.id + 1 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// extract the logits only for the last token
|
// extract the logits only for the last token
|
||||||
@ -366,7 +371,8 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
|
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
|
||||||
llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, -1);
|
llama_kv_cache_seq_rm(ctx, client.id + 1, -1, -1);
|
||||||
|
llama_kv_cache_seq_cp(ctx, 0, client.id + 1, -1, -1);
|
||||||
|
|
||||||
const auto t_main_end = ggml_time_us();
|
const auto t_main_end = ggml_time_us();
|
||||||
|
|
||||||
|
@ -442,7 +442,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
|||||||
return {tokens, std::exp(nll / count), logit_history, prob_history};
|
return {tokens, std::exp(nll / count), logit_history, prob_history};
|
||||||
}
|
}
|
||||||
|
|
||||||
static results_perplexity perplexity(llama_context * ctx, const gpt_params & params) {
|
static results_perplexity perplexity(llama_context * ctx, const gpt_params & params, const int32_t n_ctx) {
|
||||||
if (params.ppl_stride > 0) {
|
if (params.ppl_stride > 0) {
|
||||||
return perplexity_v2(ctx, params);
|
return perplexity_v2(ctx, params);
|
||||||
}
|
}
|
||||||
@ -453,7 +453,6 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
// BOS tokens will be added for each chunk before eval
|
// BOS tokens will be added for each chunk before eval
|
||||||
|
|
||||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||||
const int n_ctx = llama_n_ctx(ctx);
|
|
||||||
|
|
||||||
std::ofstream logits_stream;
|
std::ofstream logits_stream;
|
||||||
if (!params.logits_file.empty()) {
|
if (!params.logits_file.empty()) {
|
||||||
@ -499,13 +498,19 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
double nll2 = 0.0;
|
double nll2 = 0.0;
|
||||||
|
|
||||||
const int num_batches = (n_ctx + n_batch - 1) / n_batch;
|
const int num_batches = (n_ctx + n_batch - 1) / n_batch;
|
||||||
|
const int n_seq = std::max(1, n_batch / n_ctx);
|
||||||
|
|
||||||
|
GGML_ASSERT(n_batch < n_ctx || n_batch % n_ctx == 0);
|
||||||
|
GGML_ASSERT(params.n_ctx == n_seq * n_ctx);
|
||||||
|
|
||||||
|
llama_batch batch = llama_batch_init(std::min(n_batch, n_ctx*n_seq), 0, 1);
|
||||||
|
|
||||||
std::vector<float> logits;
|
std::vector<float> logits;
|
||||||
if (num_batches > 1) {
|
if (num_batches > 1) {
|
||||||
logits.reserve((size_t)n_ctx * n_vocab);
|
logits.reserve((size_t)n_ctx * n_vocab);
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "%s: calculating perplexity over %d chunks, batch_size=%d\n", __func__, n_chunk, n_batch);
|
fprintf(stderr, "%s: calculating perplexity over %d chunks, n_ctx=%d, batch_size=%d, n_seq=%d\n", __func__, n_chunk, n_ctx, n_batch, n_seq);
|
||||||
|
|
||||||
std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
|
std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
|
||||||
|
|
||||||
@ -518,54 +523,6 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
log_probs.resize(n_ctx * nv);
|
log_probs.resize(n_ctx * nv);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < n_chunk; ++i) {
|
|
||||||
const int start = i * n_ctx;
|
|
||||||
const int end = start + n_ctx;
|
|
||||||
|
|
||||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
|
||||||
|
|
||||||
// clear the KV cache
|
|
||||||
llama_kv_cache_clear(ctx);
|
|
||||||
|
|
||||||
for (int j = 0; j < num_batches; ++j) {
|
|
||||||
const int batch_start = start + j * n_batch;
|
|
||||||
const int batch_size = std::min(end - batch_start, n_batch);
|
|
||||||
|
|
||||||
// save original token and restore it after eval
|
|
||||||
const auto token_org = tokens[batch_start];
|
|
||||||
|
|
||||||
// add BOS token for the first batch of each chunk
|
|
||||||
if (add_bos && j == 0) {
|
|
||||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
|
||||||
return {tokens, -1, logit_history, prob_history};
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore the original token in case it was set to BOS
|
|
||||||
tokens[batch_start] = token_org;
|
|
||||||
|
|
||||||
if (num_batches > 1) {
|
|
||||||
const auto * batch_logits = llama_get_logits(ctx);
|
|
||||||
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
|
||||||
|
|
||||||
if (i == 0) {
|
|
||||||
const float t_total = std::chrono::duration<float>(t_end - t_start).count();
|
|
||||||
fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total);
|
|
||||||
int total_seconds = (int)(t_total * n_chunk);
|
|
||||||
if (total_seconds >= 60*60) {
|
|
||||||
fprintf(stderr, "%d hours ", total_seconds / (60*60));
|
|
||||||
total_seconds = total_seconds % (60*60);
|
|
||||||
}
|
|
||||||
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We get the logits for all the tokens in the context window (params.n_ctx)
|
// We get the logits for all the tokens in the context window (params.n_ctx)
|
||||||
// from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity,
|
// from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity,
|
||||||
// calculate the perplexity over the last half of the window (so the model always has
|
// calculate the perplexity over the last half of the window (so the model always has
|
||||||
@ -579,25 +536,99 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
// last 256 tokens. Then, we split the input up into context window size chunks to
|
// last 256 tokens. Then, we split the input up into context window size chunks to
|
||||||
// process the entire prompt.
|
// process the entire prompt.
|
||||||
const int first = n_ctx/2;
|
const int first = n_ctx/2;
|
||||||
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
|
|
||||||
|
for (int i = 0; i < n_chunk; i += n_seq) {
|
||||||
|
const int start = i * n_ctx;
|
||||||
|
const int end = start + n_ctx;
|
||||||
|
|
||||||
|
const int n_seq_batch = std::min(n_seq, n_chunk - i);
|
||||||
|
|
||||||
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
|
// clear the KV cache
|
||||||
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
|
for (int j = 0; j < num_batches; ++j) {
|
||||||
|
const int batch_start = start + j * n_batch;
|
||||||
|
const int batch_size = std::min(end - batch_start, n_batch);
|
||||||
|
|
||||||
|
batch.n_tokens = 0;
|
||||||
|
for (int seq = 0; seq < n_seq_batch; seq++) {
|
||||||
|
int seq_start = batch_start + seq*n_ctx;
|
||||||
|
|
||||||
|
// save original token and restore it after eval
|
||||||
|
const auto token_org = tokens[seq_start];
|
||||||
|
|
||||||
|
// add BOS token for the first batch of each chunk
|
||||||
|
if (add_bos && j == 0) {
|
||||||
|
tokens[seq_start] = llama_token_bos(llama_get_model(ctx));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int k = 0; k < batch_size; ++k) {
|
||||||
|
const int idx = seq*n_ctx + k;
|
||||||
|
batch.token[idx] = tokens[seq_start + k];
|
||||||
|
batch.pos[idx] = j*n_batch + k;
|
||||||
|
batch.n_seq_id[idx] = 1;
|
||||||
|
batch.seq_id[idx][0] = seq;
|
||||||
|
batch.logits[idx] = batch.pos[idx] >= first ? 1 : 0;
|
||||||
|
}
|
||||||
|
batch.n_tokens += batch_size;
|
||||||
|
|
||||||
|
// restore the original token in case it was set to BOS
|
||||||
|
tokens[seq_start] = token_org;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (llama_decode(ctx, batch)) {
|
||||||
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
|
return {tokens, -1, logit_history, prob_history};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_batches > 1) {
|
||||||
|
const auto * batch_logits = llama_get_logits(ctx);
|
||||||
|
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (i == 0) {
|
||||||
|
llama_synchronize(ctx);
|
||||||
|
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||||
|
const float t_total = std::chrono::duration<float>(t_end - t_start).count();
|
||||||
|
fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total);
|
||||||
|
int total_seconds = (int)(t_total*n_chunk/n_seq);
|
||||||
|
if (total_seconds >= 60*60) {
|
||||||
|
fprintf(stderr, "%d hours ", total_seconds / (60*60));
|
||||||
|
total_seconds = total_seconds % (60*60);
|
||||||
|
}
|
||||||
|
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int seq = 0; seq < n_seq_batch; seq++) {
|
||||||
|
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits_ith(ctx, seq*n_ctx);
|
||||||
|
llama_token * tokens_data = tokens.data() + start + seq*n_ctx + first;
|
||||||
if (!params.logits_file.empty()) {
|
if (!params.logits_file.empty()) {
|
||||||
process_logits(logits_stream, n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
process_logits(logits_stream, n_vocab, all_logits + first*n_vocab,
|
||||||
|
tokens_data, n_ctx - 1 - first,
|
||||||
workers, log_probs, nll, nll2);
|
workers, log_probs, nll, nll2);
|
||||||
} else {
|
} else {
|
||||||
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
process_logits(n_vocab, all_logits + first*n_vocab,
|
||||||
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
|
tokens_data, n_ctx - 1 - first,
|
||||||
|
workers, nll, nll2,
|
||||||
|
logit_history.data() + start + seq*n_ctx + first,
|
||||||
|
prob_history.data() + start + seq*n_ctx + first);
|
||||||
}
|
}
|
||||||
count += n_ctx - first - 1;
|
count += n_ctx - first - 1;
|
||||||
|
|
||||||
// perplexity is e^(average negative log-likelihood)
|
// perplexity is e^(average negative log-likelihood)
|
||||||
if (params.ppl_output_type == 0) {
|
if (params.ppl_output_type == 0) {
|
||||||
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
printf("[%d]%.4lf,", i + seq + 1, std::exp(nll / count));
|
||||||
} else {
|
} else {
|
||||||
double av = nll/count;
|
double av = nll/count;
|
||||||
double av2 = nll2/count - av*av;
|
double av2 = nll2/count - av*av;
|
||||||
if (av2 > 0) av2 = sqrt(av2/(count-1));
|
if (av2 > 0) av2 = sqrt(av2/(count-1));
|
||||||
printf("%8d %.4lf %4lf %4lf\n", i*n_ctx, std::exp(nll / count), av, av2);
|
printf("%8d %.4lf %4lf %4lf\n", i*n_ctx, std::exp(nll / count), av, av2);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
logits.clear();
|
logits.clear();
|
||||||
@ -615,6 +646,8 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
printf("Unexpected negative standard deviation of log(prob)\n");
|
printf("Unexpected negative standard deviation of log(prob)\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
|
|
||||||
return {tokens, ppl, logit_history, prob_history};
|
return {tokens, ppl, logit_history, prob_history};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -809,7 +842,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
const int n_batch = params.n_batch;
|
const int n_batch = params.n_batch;
|
||||||
|
|
||||||
const int max_tasks_per_batch = 32;
|
const int max_tasks_per_batch = 32;
|
||||||
const int max_seq = 4*max_tasks_per_batch;
|
const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||||
|
|
||||||
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
||||||
|
|
||||||
@ -1086,7 +1119,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
const int n_batch = params.n_batch;
|
const int n_batch = params.n_batch;
|
||||||
|
|
||||||
const int max_tasks_per_batch = 128;
|
const int max_tasks_per_batch = 128;
|
||||||
const int max_seq = 2*max_tasks_per_batch;
|
const int max_seq = std::min(2*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||||
|
|
||||||
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
||||||
|
|
||||||
@ -1438,7 +1471,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
|||||||
const int n_batch = params.n_batch;
|
const int n_batch = params.n_batch;
|
||||||
|
|
||||||
const int max_tasks_per_batch = 32;
|
const int max_tasks_per_batch = 32;
|
||||||
const int max_seq = 4*max_tasks_per_batch;
|
const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||||
|
|
||||||
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
||||||
|
|
||||||
@ -1782,13 +1815,24 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
|||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
gpt_params params;
|
gpt_params params;
|
||||||
|
|
||||||
params.n_batch = 512;
|
|
||||||
if (!gpt_params_parse(argc, argv, params)) {
|
if (!gpt_params_parse(argc, argv, params)) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
params.logits_all = true;
|
params.logits_all = true;
|
||||||
|
|
||||||
|
const int32_t n_ctx = params.n_ctx;
|
||||||
|
|
||||||
|
const bool ppl = !params.hellaswag && !params.winogrande && !params.multiple_choice && !params.kl_divergence;
|
||||||
|
if (ppl) {
|
||||||
|
int n_seq = std::max(1, params.n_batch / n_ctx);
|
||||||
|
int32_t n_kv = n_seq * n_ctx;
|
||||||
|
params.n_parallel = n_seq;
|
||||||
|
params.n_ctx = n_kv;
|
||||||
|
params.n_batch = std::min(params.n_batch, n_kv);
|
||||||
|
} else {
|
||||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
if (params.ppl_stride > 0) {
|
if (params.ppl_stride > 0) {
|
||||||
fprintf(stderr, "Will perform strided perplexity calculation -> adjusting context size from %d to %d\n",
|
fprintf(stderr, "Will perform strided perplexity calculation -> adjusting context size from %d to %d\n",
|
||||||
@ -1815,6 +1859,9 @@ int main(int argc, char ** argv) {
|
|||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
||||||
|
// ensure there's at least enough seq_ids for HellaSwag
|
||||||
|
params.n_parallel = std::max(4, params.n_parallel);
|
||||||
|
|
||||||
// load the model and apply lora adapter, if any
|
// load the model and apply lora adapter, if any
|
||||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
if (model == NULL) {
|
if (model == NULL) {
|
||||||
@ -1844,7 +1891,7 @@ int main(int argc, char ** argv) {
|
|||||||
} else if (params.kl_divergence) {
|
} else if (params.kl_divergence) {
|
||||||
kl_divergence(ctx, params);
|
kl_divergence(ctx, params);
|
||||||
} else {
|
} else {
|
||||||
results = perplexity(ctx, params);
|
results = perplexity(ctx, params, n_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
|
20
examples/regex-to-grammar.py
Normal file
20
examples/regex-to-grammar.py
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
import json, subprocess, sys, os
|
||||||
|
|
||||||
|
assert len(sys.argv) >= 2
|
||||||
|
[_, pattern, *rest] = sys.argv
|
||||||
|
|
||||||
|
print(subprocess.check_output(
|
||||||
|
[
|
||||||
|
"python",
|
||||||
|
os.path.join(
|
||||||
|
os.path.dirname(os.path.realpath(__file__)),
|
||||||
|
"json-schema-to-grammar.py"),
|
||||||
|
*rest,
|
||||||
|
"-",
|
||||||
|
"--raw-pattern",
|
||||||
|
],
|
||||||
|
text=True,
|
||||||
|
input=json.dumps({
|
||||||
|
"type": "string",
|
||||||
|
"pattern": pattern,
|
||||||
|
}, indent=2)))
|
34
examples/server-embd.py
Normal file
34
examples/server-embd.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
import asyncio
|
||||||
|
import requests
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
n = 8
|
||||||
|
|
||||||
|
result = []
|
||||||
|
|
||||||
|
async def requests_post_async(*args, **kwargs):
|
||||||
|
return await asyncio.to_thread(requests.post, *args, **kwargs)
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
model_url = "http://127.0.0.1:6900"
|
||||||
|
responses: list[requests.Response] = await asyncio.gather(*[requests_post_async(
|
||||||
|
url= f"{model_url}/embedding",
|
||||||
|
json= {"content": str(0)*1024}
|
||||||
|
) for i in range(n)])
|
||||||
|
|
||||||
|
for response in responses:
|
||||||
|
embedding = response.json()["embedding"]
|
||||||
|
print(embedding[-8:])
|
||||||
|
result.append(embedding)
|
||||||
|
|
||||||
|
asyncio.run(main())
|
||||||
|
|
||||||
|
# compute cosine similarity
|
||||||
|
|
||||||
|
for i in range(n-1):
|
||||||
|
for j in range(i+1, n):
|
||||||
|
embedding1 = np.array(result[i])
|
||||||
|
embedding2 = np.array(result[j])
|
||||||
|
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
|
||||||
|
print(f"Similarity between {i} and {j}: {similarity:.2f}")
|
||||||
|
|
@ -1,12 +1,22 @@
|
|||||||
set(TARGET server)
|
set(TARGET server)
|
||||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
||||||
|
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
add_executable(${TARGET} server.cpp oai.hpp utils.hpp json.hpp httplib.h)
|
add_executable(${TARGET}
|
||||||
|
server.cpp
|
||||||
|
utils.hpp
|
||||||
|
httplib.h
|
||||||
|
)
|
||||||
install(TARGETS ${TARGET} RUNTIME)
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
target_compile_definitions(${TARGET} PRIVATE
|
target_compile_definitions(${TARGET} PRIVATE
|
||||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||||
)
|
)
|
||||||
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common json-schema-to-grammar ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
if (LLAMA_SERVER_SSL)
|
||||||
|
find_package(OpenSSL REQUIRED)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE OpenSSL::SSL OpenSSL::Crypto)
|
||||||
|
target_compile_definitions(${TARGET} PRIVATE CPPHTTPLIB_OPENSSL_SUPPORT)
|
||||||
|
endif()
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||||
endif()
|
endif()
|
||||||
|
@ -20,6 +20,7 @@ The project is under active development, and we are [looking for feedback and co
|
|||||||
- `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation.
|
- `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation.
|
||||||
- `--threads-http N`: number of threads in the http server pool to process requests (default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)`)
|
- `--threads-http N`: number of threads in the http server pool to process requests (default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)`)
|
||||||
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
||||||
|
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf).
|
||||||
- `-a ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
- `-a ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
||||||
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
||||||
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||||
@ -42,7 +43,7 @@ see https://github.com/ggerganov/llama.cpp/issues/1437
|
|||||||
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
|
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
|
||||||
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
||||||
- `--port`: Set the port to listen. Default: `8080`.
|
- `--port`: Set the port to listen. Default: `8080`.
|
||||||
- `--path`: path from which to serve static files (default examples/server/public)
|
- `--path`: path from which to serve static files (default: disabled)
|
||||||
- `--api-key`: Set an api key for request authorization. By default the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. May be used multiple times to enable multiple valid keys.
|
- `--api-key`: Set an api key for request authorization. By default the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. May be used multiple times to enable multiple valid keys.
|
||||||
- `--api-key-file`: path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access. May be used in conjunction with `--api-key`'s.
|
- `--api-key-file`: path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access. May be used in conjunction with `--api-key`'s.
|
||||||
- `--embedding`: Enable embedding extraction, Default: disabled.
|
- `--embedding`: Enable embedding extraction, Default: disabled.
|
||||||
@ -59,6 +60,10 @@ see https://github.com/ggerganov/llama.cpp/issues/1437
|
|||||||
- `--log-disable`: Output logs to stdout only, default: enabled.
|
- `--log-disable`: Output logs to stdout only, default: enabled.
|
||||||
- `--log-format FORMAT`: Define the log output to FORMAT: json or text (default: json)
|
- `--log-format FORMAT`: Define the log output to FORMAT: json or text (default: json)
|
||||||
|
|
||||||
|
**If compiled with `LLAMA_SERVER_SSL=ON`**
|
||||||
|
- `--ssl-key-file FNAME`: path to file a PEM-encoded SSL private key
|
||||||
|
- `--ssl-cert-file FNAME`: path to file a PEM-encoded SSL certificate
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
|
|
||||||
server is build alongside everything else from the root of the project
|
server is build alongside everything else from the root of the project
|
||||||
@ -75,6 +80,28 @@ server is build alongside everything else from the root of the project
|
|||||||
cmake --build . --config Release
|
cmake --build . --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Build with SSL
|
||||||
|
|
||||||
|
server can also be built with SSL support using OpenSSL 3
|
||||||
|
|
||||||
|
- Using `make`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# NOTE: For non-system openssl, use the following:
|
||||||
|
# CXXFLAGS="-I /path/to/openssl/include"
|
||||||
|
# LDFLAGS="-L /path/to/openssl/lib"
|
||||||
|
make LLAMA_SERVER_SSL=true server
|
||||||
|
```
|
||||||
|
|
||||||
|
- Using `CMake`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -DLLAMA_SERVER_SSL=ON
|
||||||
|
make server
|
||||||
|
```
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
To get started right away, run the following command, making sure to use the correct path for the model you have:
|
To get started right away, run the following command, making sure to use the correct path for the model you have:
|
||||||
@ -97,10 +124,10 @@ You can consume the endpoints with Postman or NodeJS with axios library. You can
|
|||||||
### Docker
|
### Docker
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -p 8080:8080 -v /path/to/models:/models ggerganov/llama.cpp:server -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080
|
docker run -p 8080:8080 -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:server -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080
|
||||||
|
|
||||||
# or, with CUDA:
|
# or, with CUDA:
|
||||||
docker run -p 8080:8080 -v /path/to/models:/models --gpus all ggerganov/llama.cpp:server-cuda -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080 --n-gpu-layers 99
|
docker run -p 8080:8080 -v /path/to/models:/models --gpus all ghcr.io/ggerganov/llama.cpp:server-cuda -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080 --n-gpu-layers 99
|
||||||
```
|
```
|
||||||
|
|
||||||
## Testing with CURL
|
## Testing with CURL
|
||||||
@ -169,7 +196,11 @@ node index.js
|
|||||||
|
|
||||||
*Options:*
|
*Options:*
|
||||||
|
|
||||||
`prompt`: Provide the prompt for this completion as a string or as an array of strings or numbers representing tokens. Internally, the prompt is compared to the previous completion and only the "unseen" suffix is evaluated. If the prompt is a string or an array with the first element given as a string, a `bos` token is inserted in the front like `main` does.
|
`prompt`: Provide the prompt for this completion as a string or as an array of strings or numbers representing tokens. Internally, if `cache_prompt` is `true`, the prompt is compared to the previous completion and only the "unseen" suffix is evaluated. A `BOS` token is inserted at the start, if all of the following conditions are true:
|
||||||
|
|
||||||
|
- The prompt is a string or an array with the first element given as a string
|
||||||
|
- The model's `tokenizer.ggml.add_bos_token` metadata is `true`
|
||||||
|
- The system prompt is empty
|
||||||
|
|
||||||
`temperature`: Adjust the randomness of the generated text (default: 0.8).
|
`temperature`: Adjust the randomness of the generated text (default: 0.8).
|
||||||
|
|
||||||
@ -229,7 +260,7 @@ node index.js
|
|||||||
|
|
||||||
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
|
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
|
||||||
|
|
||||||
`slot_id`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot (default: -1)
|
`id_slot`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot (default: -1)
|
||||||
|
|
||||||
`cache_prompt`: Re-use previously cached prompt from the last request if possible. This may prevent re-caching the prompt from scratch. (default: false)
|
`cache_prompt`: Re-use previously cached prompt from the last request if possible. This may prevent re-caching the prompt from scratch. (default: false)
|
||||||
|
|
||||||
@ -282,7 +313,7 @@ Notice that each `probs` is an array of length `n_probs`.
|
|||||||
|
|
||||||
`content`: Set the text to tokenize.
|
`content`: Set the text to tokenize.
|
||||||
|
|
||||||
Note that the special `BOS` token is not added in front of the text and also a space character is not inserted automatically as it is for `/completion`.
|
Note that a special `BOS` token is never inserted.
|
||||||
|
|
||||||
- **POST** `/detokenize`: Convert tokens to text.
|
- **POST** `/detokenize`: Convert tokens to text.
|
||||||
|
|
||||||
@ -436,7 +467,7 @@ Notice that each `probs` is an array of length `n_probs`.
|
|||||||
"next_token": {
|
"next_token": {
|
||||||
"has_next_token": true,
|
"has_next_token": true,
|
||||||
"n_remain": -1,
|
"n_remain": -1,
|
||||||
"num_tokens_predicted": 0,
|
"n_decoded": 0,
|
||||||
"stopped_eos": false,
|
"stopped_eos": false,
|
||||||
"stopped_limit": false,
|
"stopped_limit": false,
|
||||||
"stopped_word": false,
|
"stopped_word": false,
|
||||||
@ -526,13 +557,55 @@ Run with bash:
|
|||||||
bash chat.sh
|
bash chat.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
### API like OAI
|
### OAI-like API
|
||||||
|
|
||||||
The HTTP server supports OAI-like API
|
The HTTP server supports OAI-like API: https://github.com/openai/openai-openapi
|
||||||
|
|
||||||
|
### API errors
|
||||||
|
|
||||||
|
Server returns error in the same format as OAI: https://github.com/openai/openai-openapi
|
||||||
|
|
||||||
|
Example of an error:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": {
|
||||||
|
"code": 401,
|
||||||
|
"message": "Invalid API Key",
|
||||||
|
"type": "authentication_error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Apart from error types supported by OAI, we also have custom types that are specific to functionalities of llama.cpp:
|
||||||
|
|
||||||
|
**When /metrics or /slots endpoint is disabled**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": {
|
||||||
|
"code": 501,
|
||||||
|
"message": "This server does not support metrics endpoint.",
|
||||||
|
"type": "not_supported_error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When the server receives invalid grammar via */completions endpoint**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": {
|
||||||
|
"code": 400,
|
||||||
|
"message": "Failed to parse grammar",
|
||||||
|
"type": "invalid_request_error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Extending or building alternative Web Front End
|
### Extending or building alternative Web Front End
|
||||||
|
|
||||||
The default location for the static files is `examples/server/public`. You can extend the front end by running the server binary with `--path` set to `./your-directory` and importing `/completion.js` to get access to the llamaComplete() method.
|
You can extend the front end by running the server binary with `--path` set to `./your-directory` and importing `/completion.js` to get access to the llamaComplete() method.
|
||||||
|
|
||||||
Read the documentation in `/completion.js` to see convenient ways to access llama.
|
Read the documentation in `/completion.js` to see convenient ways to access llama.
|
||||||
|
|
||||||
|
88
examples/server/bench/README.md
Normal file
88
examples/server/bench/README.md
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
### Server benchmark tools
|
||||||
|
|
||||||
|
Benchmark is using [k6](https://k6.io/).
|
||||||
|
|
||||||
|
##### Install k6
|
||||||
|
|
||||||
|
Follow instruction from: https://k6.io/docs/get-started/installation/
|
||||||
|
|
||||||
|
Example for ubuntu:
|
||||||
|
```shell
|
||||||
|
snap install k6
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Download a dataset
|
||||||
|
|
||||||
|
This dataset was originally proposed in [vLLM benchmarks](https://github.com/vllm-project/vllm/blob/main/benchmarks/README.md).
|
||||||
|
|
||||||
|
```shell
|
||||||
|
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Download a model
|
||||||
|
Example for PHI-2
|
||||||
|
|
||||||
|
```shell
|
||||||
|
../../../scripts/hf.sh --repo ggml-org/models --file phi-2/ggml-model-q4_0.gguf
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Start the server
|
||||||
|
The server must answer OAI Chat completion requests on `http://localhost:8080/v1` or according to the environment variable `SERVER_BENCH_URL`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```shell
|
||||||
|
server --host localhost --port 8080 \
|
||||||
|
--model ggml-model-q4_0.gguf \
|
||||||
|
--cont-batching \
|
||||||
|
--metrics \
|
||||||
|
--parallel 8 \
|
||||||
|
--batch-size 512 \
|
||||||
|
--ctx-size 4096 \
|
||||||
|
--log-format text \
|
||||||
|
-ngl 33
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Run the benchmark
|
||||||
|
|
||||||
|
For 500 chat completions request with 8 concurrent users during maximum 10 minutes, run:
|
||||||
|
```shell
|
||||||
|
k6 run script.js --duration 10m --iterations 500 --vus 8
|
||||||
|
```
|
||||||
|
|
||||||
|
The benchmark values can be overridden with:
|
||||||
|
- `SERVER_BENCH_URL` server url prefix for chat completions, default `http://localhost:8080/v1`
|
||||||
|
- `SERVER_BENCH_N_PROMPTS` total prompts to randomly select in the benchmark, default `480`
|
||||||
|
- `SERVER_BENCH_MODEL_ALIAS` model alias to pass in the completion request, default `my-model`
|
||||||
|
- `SERVER_BENCH_MAX_TOKENS` max tokens to predict, default: `512`
|
||||||
|
- `SERVER_BENCH_DATASET` path to the benchmark dataset file
|
||||||
|
- `SERVER_BENCH_MAX_PROMPT_TOKENS` maximum prompt tokens to filter out in the dataset: default `1024`
|
||||||
|
- `SERVER_BENCH_MAX_CONTEXT` maximum context size of the completions request to filter out in the dataset: prompt + predicted tokens, default `2048`
|
||||||
|
|
||||||
|
Note: the local tokenizer is just a string space split, real number of tokens will differ.
|
||||||
|
|
||||||
|
Or with [k6 options](https://k6.io/docs/using-k6/k6-options/reference/):
|
||||||
|
|
||||||
|
```shell
|
||||||
|
SERVER_BENCH_N_PROMPTS=500 k6 run script.js --duration 10m --iterations 500 --vus 8
|
||||||
|
```
|
||||||
|
|
||||||
|
To [debug http request](https://k6.io/docs/using-k6/http-debugging/) use `--http-debug="full"`.
|
||||||
|
|
||||||
|
#### Metrics
|
||||||
|
|
||||||
|
Following metrics are available computed from the OAI chat completions response `usage`:
|
||||||
|
- `llamacpp_tokens_second` Trend of `usage.total_tokens / request duration`
|
||||||
|
- `llamacpp_prompt_tokens` Trend of `usage.prompt_tokens`
|
||||||
|
- `llamacpp_prompt_tokens_total_counter` Counter of `usage.prompt_tokens`
|
||||||
|
- `llamacpp_completion_tokens` Trend of `usage.completion_tokens`
|
||||||
|
- `llamacpp_completion_tokens_total_counter` Counter of `usage.completion_tokens`
|
||||||
|
- `llamacpp_completions_truncated_rate` Rate of completions truncated, i.e. if `finish_reason === 'length'`
|
||||||
|
- `llamacpp_completions_stop_rate` Rate of completions stopped by the model, i.e. if `finish_reason === 'stop'`
|
||||||
|
|
||||||
|
The script will fail if too many completions are truncated, see `llamacpp_completions_truncated_rate`.
|
||||||
|
|
||||||
|
K6 metrics might be compared against [server metrics](../README.md), with:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:8080/metrics
|
||||||
|
```
|
120
examples/server/bench/script.js
Normal file
120
examples/server/bench/script.js
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
import http from 'k6/http'
|
||||||
|
import {check, sleep} from 'k6'
|
||||||
|
import {SharedArray} from 'k6/data'
|
||||||
|
import {Counter, Rate, Trend} from 'k6/metrics'
|
||||||
|
import exec from 'k6/execution';
|
||||||
|
|
||||||
|
// Server chat completions prefix
|
||||||
|
const server_url = __ENV.SERVER_BENCH_URL ? __ENV.SERVER_BENCH_URL : 'http://localhost:8080/v1'
|
||||||
|
|
||||||
|
// Number of total prompts in the dataset - default 10m / 10 seconds/request * number of users
|
||||||
|
const n_prompt = __ENV.SERVER_BENCH_N_PROMPTS ? parseInt(__ENV.SERVER_BENCH_N_PROMPTS) : 600 / 10 * 8
|
||||||
|
|
||||||
|
// Model name to request
|
||||||
|
const model = __ENV.SERVER_BENCH_MODEL_ALIAS ? __ENV.SERVER_BENCH_MODEL_ALIAS : 'my-model'
|
||||||
|
|
||||||
|
// Dataset path
|
||||||
|
const dataset_path = __ENV.SERVER_BENCH_DATASET ? __ENV.SERVER_BENCH_DATASET : './ShareGPT_V3_unfiltered_cleaned_split.json'
|
||||||
|
|
||||||
|
// Max tokens to predict
|
||||||
|
const max_tokens = __ENV.SERVER_BENCH_MAX_TOKENS ? parseInt(__ENV.SERVER_BENCH_MAX_TOKENS) : 512
|
||||||
|
|
||||||
|
// Max prompt tokens
|
||||||
|
const n_prompt_tokens = __ENV.SERVER_BENCH_MAX_PROMPT_TOKENS ? parseInt(__ENV.SERVER_BENCH_MAX_PROMPT_TOKENS) : 1024
|
||||||
|
|
||||||
|
// Max slot context
|
||||||
|
const n_ctx_slot = __ENV.SERVER_BENCH_MAX_CONTEXT ? parseInt(__ENV.SERVER_BENCH_MAX_CONTEXT) : 2048
|
||||||
|
|
||||||
|
export function setup() {
|
||||||
|
console.info(`Benchmark config: server_url=${server_url} n_prompt=${n_prompt} model=${model} dataset_path=${dataset_path} max_tokens=${max_tokens}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = new SharedArray('conversations', function () {
|
||||||
|
const tokenizer = (message) => message.split(/[\s,'".?]/)
|
||||||
|
|
||||||
|
return JSON.parse(open(dataset_path))
|
||||||
|
// Filter out the conversations with less than 2 turns.
|
||||||
|
.filter(data => data["conversations"].length >= 2)
|
||||||
|
.filter(data => data["conversations"][0]["from"] === "human")
|
||||||
|
.map(data => {
|
||||||
|
return {
|
||||||
|
prompt: data["conversations"][0]["value"],
|
||||||
|
n_prompt_tokens: tokenizer(data["conversations"][0]["value"]).length,
|
||||||
|
n_completion_tokens: tokenizer(data["conversations"][1]["value"]).length,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// Filter out too short sequences
|
||||||
|
.filter(conv => conv.n_prompt_tokens >= 4 && conv.n_completion_tokens >= 4)
|
||||||
|
// Filter out too long sequences.
|
||||||
|
.filter(conv => conv.n_prompt_tokens <= n_prompt_tokens && conv.n_prompt_tokens + conv.n_completion_tokens <= n_ctx_slot)
|
||||||
|
// Keep only first n prompts
|
||||||
|
.slice(0, n_prompt)
|
||||||
|
})
|
||||||
|
|
||||||
|
const llamacpp_prompt_tokens = new Trend('llamacpp_prompt_tokens')
|
||||||
|
const llamacpp_completion_tokens = new Trend('llamacpp_completion_tokens')
|
||||||
|
const llamacpp_tokens_second = new Trend('llamacpp_tokens_second')
|
||||||
|
|
||||||
|
const llamacpp_prompt_tokens_total_counter = new Counter('llamacpp_prompt_tokens_total_counter')
|
||||||
|
const llamacpp_completion_tokens_total_counter = new Counter('llamacpp_completion_tokens_total_counter')
|
||||||
|
|
||||||
|
const llamacpp_completions_truncated_rate = new Rate('llamacpp_completions_truncated_rate')
|
||||||
|
const llamacpp_completions_stop_rate = new Rate('llamacpp_completions_stop_rate')
|
||||||
|
|
||||||
|
export const options = {
|
||||||
|
thresholds: {
|
||||||
|
llamacpp_completions_truncated_rate: [
|
||||||
|
// more than 80% of truncated input will abort the test
|
||||||
|
{threshold: 'rate < 0.8', abortOnFail: true, delayAbortEval: '1m'},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
duration: '10m',
|
||||||
|
vus: 8,
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function () {
|
||||||
|
const conversation = data[exec.scenario.iterationInInstance % data.length]
|
||||||
|
const payload = {
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": "You are ChatGPT, an AI assistant.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": conversation.prompt,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"model": model,
|
||||||
|
"stream": false,
|
||||||
|
"max_tokens": max_tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
const body = JSON.stringify(payload)
|
||||||
|
|
||||||
|
let res = http.post(`${server_url}/chat/completions`, body, {
|
||||||
|
headers: {'Content-Type': 'application/json'},
|
||||||
|
timeout: '300s'
|
||||||
|
})
|
||||||
|
|
||||||
|
check(res, {'success completion': (r) => r.status === 200})
|
||||||
|
|
||||||
|
if (res.status === 200) {
|
||||||
|
const completions = res.json()
|
||||||
|
|
||||||
|
llamacpp_prompt_tokens.add(completions.usage.prompt_tokens)
|
||||||
|
llamacpp_prompt_tokens_total_counter.add(completions.usage.prompt_tokens)
|
||||||
|
|
||||||
|
llamacpp_completion_tokens.add(completions.usage.completion_tokens)
|
||||||
|
llamacpp_completion_tokens_total_counter.add(completions.usage.completion_tokens)
|
||||||
|
|
||||||
|
llamacpp_completions_truncated_rate.add(completions.choices[0].finish_reason === 'length')
|
||||||
|
llamacpp_completions_stop_rate.add(completions.choices[0].finish_reason === 'stop')
|
||||||
|
|
||||||
|
llamacpp_tokens_second.add(completions.usage.total_tokens / res.timings.duration * 1.e3)
|
||||||
|
} else {
|
||||||
|
console.error(`response: ${res.body} request=${payload}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
sleep(0.3)
|
||||||
|
}
|
@ -26,8 +26,9 @@ const propOrder = grammarJsonSchemaPropOrder
|
|||||||
|
|
||||||
let grammar = null
|
let grammar = null
|
||||||
if (grammarJsonSchemaFile) {
|
if (grammarJsonSchemaFile) {
|
||||||
const schema = JSON.parse(readFileSync(grammarJsonSchemaFile, 'utf-8'))
|
let schema = JSON.parse(readFileSync(grammarJsonSchemaFile, 'utf-8'))
|
||||||
const converter = new SchemaConverter(propOrder)
|
const converter = new SchemaConverter({prop_order: propOrder, allow_fetch: true})
|
||||||
|
schema = await converter.resolveRefs(schema, grammarJsonSchemaFile)
|
||||||
converter.visit(schema, '')
|
converter.visit(schema, '')
|
||||||
grammar = converter.formatGrammar()
|
grammar = converter.formatGrammar()
|
||||||
}
|
}
|
||||||
|
@ -231,255 +231,256 @@ unsigned char completion_js[] = {
|
|||||||
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
||||||
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72,
|
||||||
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x3d,
|
0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
||||||
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x4a, 0x53, 0x4f,
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65, 0x73, 0x75,
|
||||||
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x65, 0x6e, 0x74, 0x2e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x73,
|
0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e,
|
||||||
0x28, 0x27, 0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61, 0x76, 0x61,
|
0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x29, 0x20, 0x7b, 0x0a,
|
0x65, 0x2e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x73, 0x28, 0x27,
|
||||||
|
0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c,
|
||||||
|
0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x61,
|
0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x61,
|
||||||
0x6e, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x62,
|
0x6e, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x62,
|
||||||
0x65, 0x20, 0x63, 0x61, 0x75, 0x67, 0x68, 0x74, 0x20, 0x62, 0x79, 0x20,
|
0x65, 0x20, 0x63, 0x61, 0x75, 0x67, 0x68, 0x74, 0x20, 0x62, 0x79, 0x20,
|
||||||
0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6c,
|
0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6c,
|
||||||
0x6c, 0x65, 0x72, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x6c, 0x65, 0x72, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x72,
|
||||||
0x20, 0x6e, 0x65, 0x77, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x27,
|
0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72,
|
||||||
0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c,
|
0x28, 0x27, 0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61, 0x76, 0x61,
|
||||||
0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6c, 0x65, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c,
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x65, 0x72, 0x72,
|
||||||
0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x20, 0x65, 0x72, 0x72, 0x6f,
|
0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70,
|
||||||
0x72, 0x3a, 0x20, 0x24, 0x7b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e,
|
0x70, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x5b, 0x24, 0x7b, 0x72,
|
||||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
|
||||||
0x74, 0x7d, 0x60, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x63, 0x6f, 0x64, 0x65, 0x7d, 0x20, 0x2d, 0x20, 0x24, 0x7b, 0x72, 0x65,
|
||||||
|
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x74,
|
||||||
|
0x79, 0x70, 0x65, 0x7d, 0x5d, 0x3a, 0x20, 0x24, 0x7b, 0x72, 0x65, 0x73,
|
||||||
|
0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x6d, 0x65,
|
||||||
|
0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x60, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x28, 0x65, 0x29,
|
||||||
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65,
|
||||||
|
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c, 0x61, 0x6d,
|
||||||
|
0x61, 0x2e, 0x63, 0x70, 0x70, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20,
|
||||||
|
0x24, 0x7b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72,
|
||||||
|
0x6f, 0x72, 0x7d, 0x60, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65,
|
|
||||||
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20,
|
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72,
|
|
||||||
0x6f, 0x72, 0x20, 0x3d, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61,
|
|
||||||
0x72, 0x73, 0x65, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65,
|
|
||||||
0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f,
|
|
||||||
0x6c, 0x65, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c,
|
|
||||||
0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x20, 0x65, 0x72, 0x72, 0x6f,
|
|
||||||
0x72, 0x3a, 0x20, 0x24, 0x7b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e,
|
|
||||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
|
||||||
0x74, 0x7d, 0x60, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61,
|
0x63, 0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x2e, 0x6e, 0x61,
|
||||||
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65,
|
0x6d, 0x65, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x41, 0x62, 0x6f, 0x72,
|
||||||
0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x45,
|
0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
0x72, 0x72, 0x6f, 0x72, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65,
|
||||||
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x65,
|
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x6c, 0x6c, 0x61, 0x6d,
|
||||||
0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x20,
|
0x61, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x22, 0x2c, 0x20,
|
||||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x65, 0x29,
|
0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
||||||
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20,
|
||||||
0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d,
|
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x6c, 0x79,
|
||||||
0x0a, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x7b,
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28,
|
||||||
0x6c, 0x65, 0x72, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28, 0x29, 0x3b,
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65,
|
||||||
0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
|
0x74, 0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
0x72, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a,
|
0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c,
|
||||||
0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20, 0x6c,
|
0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75,
|
||||||
0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e,
|
0x72, 0x6e, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20,
|
||||||
0x20, 0x61, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x61,
|
0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20,
|
||||||
0x72, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x79, 0x6f,
|
0x79, 0x6f, 0x75, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x75, 0x62, 0x73,
|
||||||
0x75, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
|
0x63, 0x72, 0x69, 0x62, 0x65, 0x20, 0x74, 0x6f, 0x0a, 0x2f, 0x2f, 0x0a,
|
||||||
0x69, 0x62, 0x65, 0x20, 0x74, 0x6f, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f,
|
0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a,
|
||||||
0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f,
|
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70,
|
||||||
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72,
|
0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45,
|
||||||
0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65,
|
0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x7d,
|
||||||
0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x7d, 0x20, 0x66,
|
0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70,
|
||||||
0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27, 0x0a, 0x2f,
|
||||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27, 0x0a, 0x2f, 0x2f, 0x0a,
|
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
0x74, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61,
|
||||||
0x63, 0x6f, 0x6e, 0x6e, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
||||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28,
|
0x74, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f,
|
||||||
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x61, 0x64, 0x64,
|
||||||
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x61, 0x64, 0x64, 0x45, 0x76,
|
0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65,
|
||||||
0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28,
|
0x72, 0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c,
|
||||||
0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20, 0x28,
|
0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
||||||
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f,
|
||||||
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75,
|
0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65,
|
||||||
0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63,
|
0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x65, 0x74, 0x61, 0x69,
|
||||||
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e,
|
0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f,
|
||||||
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20,
|
0x2f, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65,
|
||||||
0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70,
|
0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
||||||
0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c,
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
||||||
0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67,
|
0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d,
|
||||||
0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d,
|
||||||
0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b,
|
0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20,
|
||||||
0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20,
|
0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x63,
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
||||||
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77,
|
||||||
0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45,
|
0x20, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
|
||||||
0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28, 0x29,
|
0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63,
|
||||||
0x3b, 0x0a, 0x20, 0x20, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28,
|
0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c,
|
0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d,
|
0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66,
|
||||||
0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72,
|
0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f,
|
||||||
0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73,
|
0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66,
|
||||||
0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c,
|
0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
||||||
0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c,
|
0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63,
|
||||||
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e,
|
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e,
|
||||||
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e,
|
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64,
|
||||||
0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74,
|
0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b,
|
||||||
0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65,
|
||||||
|
0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73,
|
||||||
|
0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e,
|
||||||
|
0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65,
|
||||||
|
0x6e, 0x74, 0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22,
|
||||||
|
0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20,
|
||||||
|
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x7d,
|
||||||
|
0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68,
|
||||||
|
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e,
|
||||||
|
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
||||||
|
0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72,
|
||||||
|
0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68,
|
||||||
|
0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75,
|
||||||
|
0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x67,
|
||||||
|
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
||||||
|
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64,
|
||||||
|
0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
|
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
|
||||||
|
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
|
||||||
|
0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
||||||
|
0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
||||||
|
0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
||||||
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61,
|
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61,
|
||||||
0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77,
|
0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77,
|
||||||
0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||||
0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20,
|
0x28, 0x22, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20,
|
||||||
0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68,
|
0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68,
|
||||||
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x7d, 0x29, 0x29,
|
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d,
|
||||||
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
0x69, 0x6e, 0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e,
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72,
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68,
|
||||||
0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75,
|
||||||
0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x64,
|
||||||
0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76,
|
0x6f, 0x6e, 0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61,
|
||||||
0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74,
|
0x69, 0x6c, 0x3a, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x67, 0x65, 0x6e,
|
0x74, 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d,
|
||||||
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
0x29, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
||||||
0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74,
|
0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
||||||
0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64,
|
0x74, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c,
|
||||||
0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
|
0x6c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74,
|
||||||
0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20,
|
0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73,
|
||||||
0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c,
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63,
|
0x76, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63,
|
||||||
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69,
|
0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x74, 0x65, 0x78,
|
||||||
0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
0x74, 0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, 0x6f, 0x65, 0x73,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
|
||||||
0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63,
|
0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x0a, 0x2f,
|
||||||
0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43,
|
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
|
||||||
0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22,
|
0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20,
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65,
|
||||||
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x2e, 0x74, 0x68, 0x65,
|
||||||
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e,
|
0x6e, 0x28, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20,
|
||||||
0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77,
|
||||||
0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76,
|
0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a,
|
||||||
0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74,
|
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x72,
|
||||||
0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x64, 0x6f, 0x6e,
|
0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
|
||||||
0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
0x3a, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x6c, 0x6c, 0x61,
|
||||||
0x7d, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x28,
|
0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70, 0x72,
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
|
0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x3b,
|
0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72,
|
||||||
0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20,
|
0x69, 0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
||||||
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63,
|
||||||
0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20,
|
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72,
|
||||||
0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
|
0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f,
|
||||||
0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d,
|
0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20,
|
||||||
0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e,
|
0x3d, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
||||||
0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e,
|
0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
||||||
0x6f, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x73,
|
0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77,
|
||||||
0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x0a, 0x2f, 0x2f, 0x0a,
|
0x20, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x61, 0x73, 0x79,
|
||||||
0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a,
|
0x6e, 0x63, 0x20, 0x28, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2c,
|
||||||
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c,
|
0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
||||||
0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70,
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f,
|
||||||
0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28,
|
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a,
|
||||||
0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e,
|
0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69,
|
||||||
0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69,
|
0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75,
|
||||||
0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a,
|
0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28,
|
||||||
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f,
|
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61,
|
||||||
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x72, 0x0a, 0x2f,
|
0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29,
|
||||||
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
|
||||||
0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d,
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68,
|
||||||
0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e,
|
||||||
0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70, 0x72, 0x6f, 0x6d,
|
0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64,
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f,
|
||||||
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74,
|
0x6c, 0x76, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
||||||
0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f,
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63,
|
||||||
0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e,
|
0x68, 0x20, 0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74,
|
||||||
0x69, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20,
|
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x7d, 0x3b, 0x0a,
|
||||||
0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d,
|
0x0a, 0x2f, 0x2a, 0x2a, 0x0a, 0x20, 0x2a, 0x20, 0x28, 0x64, 0x65, 0x70,
|
||||||
0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x2a, 0x2f,
|
||||||
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x50,
|
0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63,
|
0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x6c,
|
||||||
0x20, 0x28, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2c, 0x20, 0x72,
|
0x65, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20,
|
||||||
0x65, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2c, 0x20, 0x63, 0x61, 0x6c,
|
||||||
0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20,
|
0x6c, 0x62, 0x61, 0x63, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
||||||
0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
||||||
0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72,
|
0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x61,
|
||||||
0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c,
|
||||||
0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b,
|
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63,
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x7d, 0x29,
|
||||||
0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x6c, 0x6c,
|
||||||
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
0x62, 0x61, 0x63, 0x6b, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x3b,
|
||||||
0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x47,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76,
|
0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c,
|
||||||
0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a,
|
0x20, 0x69, 0x6e, 0x66, 0x6f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20,
|
0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x20, 0x54,
|
||||||
0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x65,
|
0x6c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x67, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
||||||
0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78,
|
||||||
0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x7d, 0x3b, 0x0a, 0x0a, 0x2f,
|
0x74, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x61, 0x6e, 0x64,
|
||||||
0x2a, 0x2a, 0x0a, 0x20, 0x2a, 0x20, 0x28, 0x64, 0x65, 0x70, 0x72, 0x65,
|
0x20, 0x73, 0x6f, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x65, 0x78, 0x70, 0x6f,
|
||||||
0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x2a, 0x2f, 0x0a, 0x65,
|
0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61,
|
||||||
0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
0x6d, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x20,
|
||||||
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
|
0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d,
|
||||||
0x65, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x70,
|
0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x67,
|
||||||
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
|
||||||
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2c, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62,
|
|
||||||
0x61, 0x63, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
|
||||||
0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63,
|
|
||||||
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f,
|
|
||||||
0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x61, 0x72, 0x61,
|
|
||||||
0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70,
|
|
||||||
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e,
|
|
||||||
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x7d, 0x29, 0x29, 0x20,
|
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61,
|
|
||||||
0x63, 0x6b, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x3b, 0x0a, 0x20,
|
|
||||||
0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x47, 0x65, 0x74,
|
|
||||||
0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, 0x69,
|
|
||||||
0x6e, 0x66, 0x6f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x68, 0x65,
|
|
||||||
0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x20, 0x54, 0x68, 0x69,
|
|
||||||
0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20,
|
|
||||||
0x66, 0x6f, 0x72, 0x20, 0x67, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x20,
|
|
||||||
0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20,
|
|
||||||
0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73,
|
|
||||||
0x6f, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74,
|
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
|
||||||
0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20,
|
|
||||||
0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x67, 0x65, 0x6e,
|
|
||||||
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
|
||||||
0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x20,
|
|
||||||
0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x66, 0x65, 0x74, 0x63,
|
|
||||||
0x68, 0x28, 0x22, 0x2f, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x22, 0x29, 0x2e,
|
|
||||||
0x74, 0x68, 0x65, 0x6e, 0x28, 0x72, 0x20, 0x3d, 0x3e, 0x20, 0x72, 0x2e,
|
|
||||||
0x6a, 0x73, 0x6f, 0x6e, 0x28, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
|
||||||
0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x70,
|
|
||||||
0x72, 0x6f, 0x70, 0x73, 0x2e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
|
|
||||||
0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
|
||||||
0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x20, 0x20,
|
|
||||||
0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x67,
|
|
||||||
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
||||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x70,
|
||||||
|
0x73, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x66, 0x65,
|
||||||
|
0x74, 0x63, 0x68, 0x28, 0x22, 0x2f, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x22,
|
||||||
|
0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28, 0x72, 0x20, 0x3d, 0x3e, 0x20,
|
||||||
|
0x72, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x28, 0x29, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
|
||||||
|
0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d,
|
||||||
|
0x20, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x64, 0x65, 0x66, 0x61, 0x75,
|
||||||
|
0x6c, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
|
||||||
|
0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e,
|
||||||
|
0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
||||||
|
0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
||||||
};
|
};
|
||||||
unsigned int completion_js_len = 5782;
|
size_t completion_js_len = 5796;
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,225 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#include <set>
|
|
||||||
#include <mutex>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <unordered_map>
|
|
||||||
|
|
||||||
#include "json.hpp"
|
|
||||||
#include "utils.hpp"
|
|
||||||
|
|
||||||
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
|
|
||||||
|
|
||||||
using json = nlohmann::json;
|
|
||||||
|
|
||||||
inline static json oaicompat_completion_params_parse(
|
|
||||||
const struct llama_model * model,
|
|
||||||
const json &body, /* openai api json semantics */
|
|
||||||
const std::string &chat_template)
|
|
||||||
{
|
|
||||||
json llama_params;
|
|
||||||
|
|
||||||
llama_params["__oaicompat"] = true;
|
|
||||||
|
|
||||||
// Map OpenAI parameters to llama.cpp parameters
|
|
||||||
//
|
|
||||||
// For parameters that are defined by the OpenAI documentation (e.g.
|
|
||||||
// temperature), we explicitly specify OpenAI's intended default; we
|
|
||||||
// need to do that because sometimes OpenAI disagrees with llama.cpp
|
|
||||||
//
|
|
||||||
// https://platform.openai.com/docs/api-reference/chat/create
|
|
||||||
llama_sampling_params default_sparams;
|
|
||||||
llama_params["model"] = json_value(body, "model", std::string("unknown"));
|
|
||||||
llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
|
|
||||||
llama_params["cache_prompt"] = json_value(body, "cache_prompt", false);
|
|
||||||
llama_params["temperature"] = json_value(body, "temperature", 0.0);
|
|
||||||
llama_params["top_k"] = json_value(body, "top_k", default_sparams.top_k);
|
|
||||||
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
|
||||||
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
|
|
||||||
llama_params["logit_bias"] = json_value(body, "logit_bias",json::object());
|
|
||||||
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
|
|
||||||
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
|
||||||
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
|
|
||||||
llama_params["stream"] = json_value(body, "stream", false);
|
|
||||||
llama_params["mirostat"] = json_value(body, "mirostat", default_sparams.mirostat);
|
|
||||||
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", default_sparams.mirostat_tau);
|
|
||||||
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", default_sparams.mirostat_eta);
|
|
||||||
llama_params["penalize_nl"] = json_value(body, "penalize_nl", default_sparams.penalize_nl);
|
|
||||||
llama_params["typical_p"] = json_value(body, "typical_p", default_sparams.typical_p);
|
|
||||||
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", default_sparams.penalty_last_n);
|
|
||||||
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
|
|
||||||
llama_params["tfs_z"] = json_value(body, "tfs_z", default_sparams.tfs_z);
|
|
||||||
|
|
||||||
if (body.count("grammar") != 0) {
|
|
||||||
llama_params["grammar"] = json_value(body, "grammar", json::object());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle 'stop' field
|
|
||||||
if (body.contains("stop") && body["stop"].is_string()) {
|
|
||||||
llama_params["stop"] = json::array({body["stop"].get<std::string>()});
|
|
||||||
} else {
|
|
||||||
llama_params["stop"] = json_value(body, "stop", json::array());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure there is ChatML-specific end sequence among stop words
|
|
||||||
llama_params["stop"].push_back("<|im_end|>");
|
|
||||||
|
|
||||||
return llama_params;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static json format_final_response_oaicompat(const json &request, const task_result &response, bool streaming = false)
|
|
||||||
{
|
|
||||||
json result = response.result_json;
|
|
||||||
|
|
||||||
bool stopped_word = result.count("stopped_word") != 0;
|
|
||||||
bool stopped_eos = json_value(result, "stopped_eos", false);
|
|
||||||
int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
|
|
||||||
int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
|
|
||||||
std::string content = json_value(result, "content", std::string(""));
|
|
||||||
|
|
||||||
std::string finish_reason = "length";
|
|
||||||
if (stopped_word || stopped_eos) {
|
|
||||||
finish_reason = "stop";
|
|
||||||
}
|
|
||||||
|
|
||||||
json choices =
|
|
||||||
streaming ? json::array({json{{"finish_reason", finish_reason},
|
|
||||||
{"index", 0},
|
|
||||||
{"delta", json::object()}}})
|
|
||||||
: json::array({json{{"finish_reason", finish_reason},
|
|
||||||
{"index", 0},
|
|
||||||
{"message", json{{"content", content},
|
|
||||||
{"role", "assistant"}}}}});
|
|
||||||
|
|
||||||
std::time_t t = std::time(0);
|
|
||||||
|
|
||||||
json res =
|
|
||||||
json{{"choices", choices},
|
|
||||||
{"created", t},
|
|
||||||
{"model",
|
|
||||||
json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
|
||||||
{"object", streaming ? "chat.completion.chunk" : "chat.completion"},
|
|
||||||
{"usage",
|
|
||||||
json{{"completion_tokens", num_tokens_predicted},
|
|
||||||
{"prompt_tokens", num_prompt_tokens},
|
|
||||||
{"total_tokens", num_tokens_predicted + num_prompt_tokens}}},
|
|
||||||
{"id", gen_chatcmplid()}};
|
|
||||||
|
|
||||||
if (server_verbose) {
|
|
||||||
res["__verbose"] = result;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result.contains("completion_probabilities")) {
|
|
||||||
res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
// return value is vector as there is one case where we might need to generate two responses
|
|
||||||
inline static std::vector<json> format_partial_response_oaicompat(const task_result &response) {
|
|
||||||
json result = response.result_json;
|
|
||||||
|
|
||||||
if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
|
|
||||||
return std::vector<json>({response.result_json});
|
|
||||||
}
|
|
||||||
|
|
||||||
bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
|
|
||||||
std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
|
|
||||||
|
|
||||||
bool stopped_word = json_value(result, "stopped_word", false);
|
|
||||||
bool stopped_eos = json_value(result, "stopped_eos", false);
|
|
||||||
bool stopped_limit = json_value(result, "stopped_limit", false);
|
|
||||||
std::string content = json_value(result, "content", std::string(""));
|
|
||||||
|
|
||||||
std::string finish_reason;
|
|
||||||
if (stopped_word || stopped_eos) {
|
|
||||||
finish_reason = "stop";
|
|
||||||
}
|
|
||||||
if (stopped_limit) {
|
|
||||||
finish_reason = "length";
|
|
||||||
}
|
|
||||||
|
|
||||||
std::time_t t = std::time(0);
|
|
||||||
|
|
||||||
json choices;
|
|
||||||
|
|
||||||
if (!finish_reason.empty()) {
|
|
||||||
choices = json::array({json{{"finish_reason", finish_reason},
|
|
||||||
{"index", 0},
|
|
||||||
{"delta", json::object()}}});
|
|
||||||
} else {
|
|
||||||
if (first) {
|
|
||||||
if (content.empty()) {
|
|
||||||
choices = json::array({json{{"finish_reason", nullptr},
|
|
||||||
{"index", 0},
|
|
||||||
{"delta", json{{"role", "assistant"}}}}});
|
|
||||||
} else {
|
|
||||||
// We have to send this as two updates to conform to openai behavior
|
|
||||||
json initial_ret = json{{"choices", json::array({json{
|
|
||||||
{"finish_reason", nullptr},
|
|
||||||
{"index", 0},
|
|
||||||
{"delta", json{
|
|
||||||
{"role", "assistant"}
|
|
||||||
}}}})},
|
|
||||||
{"created", t},
|
|
||||||
{"id", gen_chatcmplid()},
|
|
||||||
{"model", modelname},
|
|
||||||
{"object", "chat.completion.chunk"}};
|
|
||||||
|
|
||||||
json second_ret = json{
|
|
||||||
{"choices", json::array({json{{"finish_reason", nullptr},
|
|
||||||
{"index", 0},
|
|
||||||
{"delta", json{
|
|
||||||
{"content", content}}}
|
|
||||||
}})},
|
|
||||||
{"created", t},
|
|
||||||
{"id", gen_chatcmplid()},
|
|
||||||
{"model", modelname},
|
|
||||||
{"object", "chat.completion.chunk"}};
|
|
||||||
|
|
||||||
return std::vector<json>({initial_ret, second_ret});
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Some idiosyncrasy in task processing logic makes several trailing calls
|
|
||||||
// with empty content, we ignore these at the calee site.
|
|
||||||
if (content.empty()) {
|
|
||||||
return std::vector<json>({json::object()});
|
|
||||||
}
|
|
||||||
|
|
||||||
choices = json::array({json{
|
|
||||||
{"finish_reason", nullptr},
|
|
||||||
{"index", 0},
|
|
||||||
{"delta",
|
|
||||||
json{
|
|
||||||
{"content", content},
|
|
||||||
}},
|
|
||||||
}});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
json ret = json{{"choices", choices},
|
|
||||||
{"created", t},
|
|
||||||
{"id", gen_chatcmplid()},
|
|
||||||
{"model", modelname},
|
|
||||||
{"object", "chat.completion.chunk"}};
|
|
||||||
|
|
||||||
return std::vector<json>({ret});
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static json format_embeddings_response_oaicompat(const json &request, const json &embeddings)
|
|
||||||
{
|
|
||||||
json res =
|
|
||||||
json{
|
|
||||||
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
|
||||||
{"object", "list"},
|
|
||||||
{"usage",
|
|
||||||
json{{"prompt_tokens", 0},
|
|
||||||
{"total_tokens", 0}}},
|
|
||||||
{"data", embeddings}
|
|
||||||
};
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
@ -96,17 +96,17 @@ export async function* llama(prompt, params = {}, config = {}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (result.error) {
|
if (result.error) {
|
||||||
|
try {
|
||||||
result.error = JSON.parse(result.error);
|
result.error = JSON.parse(result.error);
|
||||||
if (result.error.content.includes('slot unavailable')) {
|
if (result.error.message.includes('slot unavailable')) {
|
||||||
// Throw an error to be caught by upstream callers
|
// Throw an error to be caught by upstream callers
|
||||||
throw new Error('slot unavailable');
|
throw new Error('slot unavailable');
|
||||||
} else {
|
} else {
|
||||||
console.error(`llama.cpp error: ${result.error.content}`);
|
console.error(`llama.cpp error [${result.error.code} - ${result.error.type}]: ${result.error.message}`);
|
||||||
}
|
}
|
||||||
|
} catch(e) {
|
||||||
|
console.error(`llama.cpp error ${result.error}`)
|
||||||
}
|
}
|
||||||
if (result.error) {
|
|
||||||
result.error = JSON.parse(result.error);
|
|
||||||
console.error(`llama.cpp error: ${result.error.content}`);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -630,14 +630,16 @@
|
|||||||
|
|
||||||
const grammarJsonSchemaPropOrder = signal('')
|
const grammarJsonSchemaPropOrder = signal('')
|
||||||
const updateGrammarJsonSchemaPropOrder = (el) => grammarJsonSchemaPropOrder.value = el.target.value
|
const updateGrammarJsonSchemaPropOrder = (el) => grammarJsonSchemaPropOrder.value = el.target.value
|
||||||
const convertJSONSchemaGrammar = () => {
|
const convertJSONSchemaGrammar = async () => {
|
||||||
try {
|
try {
|
||||||
const schema = JSON.parse(params.value.grammar)
|
let schema = JSON.parse(params.value.grammar)
|
||||||
const converter = new SchemaConverter(
|
const converter = new SchemaConverter({
|
||||||
grammarJsonSchemaPropOrder.value
|
prop_order: grammarJsonSchemaPropOrder.value
|
||||||
.split(',')
|
.split(',')
|
||||||
.reduce((acc, cur, i) => ({ ...acc, [cur.trim()]: i }), {})
|
.reduce((acc, cur, i) => ({ ...acc, [cur.trim()]: i }), {}),
|
||||||
)
|
allow_fetch: true,
|
||||||
|
})
|
||||||
|
schema = await converter.resolveRefs(schema, 'input')
|
||||||
converter.visit(schema, '')
|
converter.visit(schema, '')
|
||||||
params.value = {
|
params.value = {
|
||||||
...params.value,
|
...params.value,
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,112 +1,538 @@
|
|||||||
|
// WARNING: This file was ported from json-schema-to-grammar.py, please fix bugs / add features there first.
|
||||||
const SPACE_RULE = '" "?';
|
const SPACE_RULE = '" "?';
|
||||||
|
|
||||||
const PRIMITIVE_RULES = {
|
const PRIMITIVE_RULES = {
|
||||||
boolean: '("true" | "false") space',
|
boolean: '("true" | "false") space',
|
||||||
number: '("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? space',
|
number: '("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? space',
|
||||||
integer: '("-"? ([0-9] | [1-9] [0-9]*)) space',
|
integer: '("-"? ([0-9] | [1-9] [0-9]*)) space',
|
||||||
|
value: 'object | array | string | number | boolean',
|
||||||
|
object: '"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space',
|
||||||
|
array: '"[" space ( value ("," space value)* )? "]" space',
|
||||||
|
uuid: '"\\"" ' + [8, 4, 4, 4, 12].map(n => [...new Array(n)].map(_ => '[0-9a-fA-F]').join('')).join(' "-" ') + ' "\\"" space',
|
||||||
string: ` "\\"" (
|
string: ` "\\"" (
|
||||||
[^"\\\\] |
|
[^"\\\\] |
|
||||||
"\\\\" (["\\\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F])
|
"\\\\" (["\\\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F])
|
||||||
)* "\\"" space`,
|
)* "\\"" space`,
|
||||||
null: '"null" space',
|
null: '"null" space',
|
||||||
};
|
};
|
||||||
|
const OBJECT_RULE_NAMES = ['object', 'array', 'string', 'number', 'boolean', 'null', 'value'];
|
||||||
|
|
||||||
|
// TODO: support "uri", "email" string formats
|
||||||
|
const DATE_RULES = {
|
||||||
|
'date' : '[0-9] [0-9] [0-9] [0-9] "-" ( "0" [1-9] | "1" [0-2] ) "-" ( \"0\" [1-9] | [1-2] [0-9] | "3" [0-1] )',
|
||||||
|
'time' : '([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9] [0-9] [0-9] )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )',
|
||||||
|
'date-time': 'date "T" time',
|
||||||
|
'date-string': '"\\"" date "\\"" space',
|
||||||
|
'time-string': '"\\"" time "\\"" space',
|
||||||
|
'date-time-string': '"\\"" date-time "\\"" space',
|
||||||
|
};
|
||||||
|
|
||||||
|
const RESERVED_NAMES = {'root': true, ...PRIMITIVE_RULES, ...DATE_RULES};
|
||||||
|
|
||||||
const INVALID_RULE_CHARS_RE = /[^\dA-Za-z-]+/g;
|
const INVALID_RULE_CHARS_RE = /[^\dA-Za-z-]+/g;
|
||||||
const GRAMMAR_LITERAL_ESCAPE_RE = /[\n\r"]/g;
|
const GRAMMAR_LITERAL_ESCAPE_RE = /[\n\r"]/g;
|
||||||
const GRAMMAR_LITERAL_ESCAPES = {'\r': '\\r', '\n': '\\n', '"': '\\"'};
|
const GRAMMAR_RANGE_LITERAL_ESCAPE_RE = /[\n\r"\]\-\\]/g;
|
||||||
|
const GRAMMAR_LITERAL_ESCAPES = { '\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]' };
|
||||||
|
|
||||||
|
const NON_LITERAL_SET = new Set('|.()[]{}*+?');
|
||||||
|
const ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = new Set('[]()|{}*+?');
|
||||||
|
|
||||||
export class SchemaConverter {
|
export class SchemaConverter {
|
||||||
constructor(propOrder) {
|
constructor(options) {
|
||||||
this._propOrder = propOrder || {};
|
this._propOrder = options.prop_order || {};
|
||||||
this._rules = new Map();
|
this._allowFetch = options.allow_fetch || false;
|
||||||
this._rules.set('space', SPACE_RULE);
|
this._dotall = options.dotall || false;
|
||||||
|
this._rules = {'space': SPACE_RULE};
|
||||||
|
this._refs = {};
|
||||||
|
this._refsBeingResolved = new Set();
|
||||||
}
|
}
|
||||||
|
|
||||||
_formatLiteral(literal) {
|
_formatLiteral(literal) {
|
||||||
const escaped = JSON.stringify(literal).replace(
|
const escaped = literal.replace(
|
||||||
GRAMMAR_LITERAL_ESCAPE_RE,
|
GRAMMAR_LITERAL_ESCAPE_RE,
|
||||||
m => GRAMMAR_LITERAL_ESCAPES[m]
|
m => GRAMMAR_LITERAL_ESCAPES[m]
|
||||||
);
|
);
|
||||||
return `"${escaped}"`;
|
return `"${escaped}"`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_formatRangeChar(literal) {
|
||||||
|
return JSON.stringify(literal).slice(1, -1).replace(
|
||||||
|
GRAMMAR_RANGE_LITERAL_ESCAPE_RE,
|
||||||
|
m => GRAMMAR_LITERAL_ESCAPES[m]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
_addRule(name, rule) {
|
_addRule(name, rule) {
|
||||||
let escName = name.replace(INVALID_RULE_CHARS_RE, '-');
|
let escName = name.replace(INVALID_RULE_CHARS_RE, '-');
|
||||||
let key = escName;
|
let key = escName;
|
||||||
|
|
||||||
if (this._rules.has(escName)) {
|
if (escName in this._rules) {
|
||||||
if (this._rules.get(escName) === rule) {
|
if (this._rules[escName] === rule) {
|
||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
|
|
||||||
let i = 0;
|
let i = 0;
|
||||||
while (this._rules.has(`${escName}${i}`)) {
|
while ((`${escName}${i}` in this._rules) && (this._rules[`${escName}${i}`] !== rule)) {
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
key = `${escName}${i}`;
|
key = `${escName}${i}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
this._rules.set(key, rule);
|
this._rules[key] = rule;
|
||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async resolveRefs(schema, url) {
|
||||||
|
const visit = async (n) => {
|
||||||
|
if (Array.isArray(n)) {
|
||||||
|
return Promise.all(n.map(visit));
|
||||||
|
} else if (typeof n === 'object' && n !== null) {
|
||||||
|
let ref = n.$ref;
|
||||||
|
let target;
|
||||||
|
if (ref !== undefined && !this._refs[ref]) {
|
||||||
|
if (ref.startsWith('https://')) {
|
||||||
|
if (!this._allowFetch) {
|
||||||
|
throw new Error('Fetching remote schemas is not allowed (use --allow-fetch for force)');
|
||||||
|
}
|
||||||
|
const fetch = (await import('node-fetch')).default;
|
||||||
|
|
||||||
|
const fragSplit = ref.split('#');
|
||||||
|
const baseUrl = fragSplit[0];
|
||||||
|
|
||||||
|
target = this._refs[baseUrl];
|
||||||
|
if (!target) {
|
||||||
|
target = await this.resolveRefs(await fetch(ref).then(res => res.json()), baseUrl);
|
||||||
|
this._refs[baseUrl] = target;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fragSplit.length === 1 || fragSplit[fragSplit.length - 1] === '') {
|
||||||
|
return target;
|
||||||
|
}
|
||||||
|
} else if (ref.startsWith('#/')) {
|
||||||
|
target = schema;
|
||||||
|
ref = `${url}${ref}`;
|
||||||
|
n.$ref = ref;
|
||||||
|
} else {
|
||||||
|
throw new Error(`Unsupported ref ${ref}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const selectors = ref.split('#')[1].split('/').slice(1);
|
||||||
|
for (const sel of selectors) {
|
||||||
|
if (!target || !(sel in target)) {
|
||||||
|
throw new Error(`Error resolving ref ${ref}: ${sel} not in ${JSON.stringify(target)}`);
|
||||||
|
}
|
||||||
|
target = target[sel];
|
||||||
|
}
|
||||||
|
|
||||||
|
this._refs[ref] = target;
|
||||||
|
} else {
|
||||||
|
await Promise.all(Object.values(n).map(visit));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return n;
|
||||||
|
};
|
||||||
|
|
||||||
|
return visit(schema);
|
||||||
|
}
|
||||||
|
|
||||||
|
_generateUnionRule(name, altSchemas) {
|
||||||
|
return altSchemas
|
||||||
|
.map((altSchema, i) => this.visit(altSchema, `${name ?? ''}${name ? '-' : 'alternative-'}${i}`))
|
||||||
|
.join(' | ');
|
||||||
|
}
|
||||||
|
|
||||||
|
_visitPattern(pattern, name) {
|
||||||
|
if (!pattern.startsWith('^') || !pattern.endsWith('$')) {
|
||||||
|
throw new Error('Pattern must start with "^" and end with "$"');
|
||||||
|
}
|
||||||
|
pattern = pattern.slice(1, -1);
|
||||||
|
const subRuleIds = {};
|
||||||
|
|
||||||
|
let i = 0;
|
||||||
|
const length = pattern.length;
|
||||||
|
|
||||||
|
const getDot = () => {
|
||||||
|
let rule;
|
||||||
|
if (this._dotall) {
|
||||||
|
rule = '[\\U00000000-\\U0010FFFF]';
|
||||||
|
} else {
|
||||||
|
// Accept any character... except \n and \r line break chars (\x0A and \xOD)
|
||||||
|
rule = '[\\U00000000-\\x09\\x0B\\x0C\\x0E-\\U0010FFFF]';
|
||||||
|
}
|
||||||
|
return this._addRule('dot', rule);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
const toRule = ([s, isLiteral]) => isLiteral ? "\"" + s + "\"" : s;
|
||||||
|
|
||||||
|
const transform = () => {
|
||||||
|
const start = i;
|
||||||
|
// For each component of this sequence, store its string representation and whether it's a literal.
|
||||||
|
// We only need a flat structure here to apply repetition operators to the last item, and
|
||||||
|
// to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially
|
||||||
|
// (GBNF's syntax is luckily very close to regular expressions!)
|
||||||
|
const seq = [];
|
||||||
|
|
||||||
|
const joinSeq = () => {
|
||||||
|
const ret = [];
|
||||||
|
for (const [isLiteral, g] of groupBy(seq, x => x[1])) {
|
||||||
|
if (isLiteral) {
|
||||||
|
ret.push([[...g].map(x => x[0]).join(''), true]);
|
||||||
|
} else {
|
||||||
|
ret.push(...g);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (ret.length === 1) {
|
||||||
|
return ret[0];
|
||||||
|
}
|
||||||
|
return [ret.map(x => toRule(x)).join(' '), false];
|
||||||
|
};
|
||||||
|
|
||||||
|
while (i < length) {
|
||||||
|
const c = pattern[i];
|
||||||
|
if (c === '.') {
|
||||||
|
seq.push([getDot(), false]);
|
||||||
|
i += 1;
|
||||||
|
} else if (c === '(') {
|
||||||
|
i += 1;
|
||||||
|
if (i < length) {
|
||||||
|
if (pattern[i] === '?') {
|
||||||
|
throw new Error(`Unsupported pattern syntax "${pattern[i]}" at index ${i} of /${pattern}/`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq.push([`(${toRule(transform())})`, false]);
|
||||||
|
} else if (c === ')') {
|
||||||
|
i += 1;
|
||||||
|
if (start <= 0 || pattern[start - 1] !== '(') {
|
||||||
|
throw new Error(`Unbalanced parentheses; start = ${start}, i = ${i}, pattern = ${pattern}`);
|
||||||
|
}
|
||||||
|
return joinSeq();
|
||||||
|
} else if (c === '[') {
|
||||||
|
let squareBrackets = c;
|
||||||
|
i += 1;
|
||||||
|
while (i < length && pattern[i] !== ']') {
|
||||||
|
if (pattern[i] === '\\') {
|
||||||
|
squareBrackets += pattern.slice(i, i + 2);
|
||||||
|
i += 2;
|
||||||
|
} else {
|
||||||
|
squareBrackets += pattern[i];
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (i >= length) {
|
||||||
|
throw new Error(`Unbalanced square brackets; start = ${start}, i = ${i}, pattern = ${pattern}`);
|
||||||
|
}
|
||||||
|
squareBrackets += ']';
|
||||||
|
i += 1;
|
||||||
|
seq.push([squareBrackets, false]);
|
||||||
|
} else if (c === '|') {
|
||||||
|
seq.push(['|', false]);
|
||||||
|
i += 1;
|
||||||
|
} else if (c === '*' || c === '+' || c === '?') {
|
||||||
|
seq[seq.length - 1] = [toRule(seq[seq.length - 1]) + c, false];
|
||||||
|
i += 1;
|
||||||
|
} else if (c === '{') {
|
||||||
|
let curlyBrackets = c;
|
||||||
|
i += 1;
|
||||||
|
while (i < length && pattern[i] !== '}') {
|
||||||
|
curlyBrackets += pattern[i];
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
if (i >= length) {
|
||||||
|
throw new Error(`Unbalanced curly brackets; start = ${start}, i = ${i}, pattern = ${pattern}`);
|
||||||
|
}
|
||||||
|
curlyBrackets += '}';
|
||||||
|
i += 1;
|
||||||
|
const nums = curlyBrackets.slice(1, -1).split(',').map(s => s.trim());
|
||||||
|
let minTimes, maxTimes;
|
||||||
|
if (nums.length === 1) {
|
||||||
|
minTimes = parseInt(nums[0], 10);
|
||||||
|
maxTimes = minTimes;
|
||||||
|
} else {
|
||||||
|
if (nums.length !== 2) {
|
||||||
|
throw new Error(`Invalid quantifier ${curlyBrackets}`);
|
||||||
|
}
|
||||||
|
minTimes = nums[0] ? parseInt(nums[0], 10) : 0;
|
||||||
|
maxTimes = nums[1] ? parseInt(nums[1], 10) : Infinity;
|
||||||
|
}
|
||||||
|
|
||||||
|
let [sub, subIsLiteral] = seq[seq.length - 1];
|
||||||
|
|
||||||
|
if (minTimes === 0 && maxTimes === Infinity) {
|
||||||
|
seq[seq.length - 1] = [`${sub}*`, false];
|
||||||
|
} else if (minTimes === 0 && maxTimes === 1) {
|
||||||
|
seq[seq.length - 1] = [`${sub}?`, false];
|
||||||
|
} else if (minTimes === 1 && maxTimes === Infinity) {
|
||||||
|
seq[seq.length - 1] = [`${sub}+`, false];
|
||||||
|
} else {
|
||||||
|
if (!subIsLiteral) {
|
||||||
|
let id = subRuleIds[sub];
|
||||||
|
if (id === undefined) {
|
||||||
|
id = this._addRule(`${name}-${Object.keys(subRuleIds).length + 1}`, sub);
|
||||||
|
subRuleIds[sub] = id;
|
||||||
|
}
|
||||||
|
sub = id;
|
||||||
|
}
|
||||||
|
|
||||||
|
const repeatedSub = Array.from({ length: minTimes }, () => subIsLiteral ? `"${sub.slice(1, -1).repeat(minTimes)}"` : sub);
|
||||||
|
const optionalSub = maxTimes !== undefined ? Array.from({ length: maxTimes - minTimes }, () => `${sub}?`) : [`${sub}*`];
|
||||||
|
seq[seq.length - 1] = [repeatedSub.concat(optionalSub).join(' '), false];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let literal = '';
|
||||||
|
while (i < length) {
|
||||||
|
if (pattern[i] === '\\' && i < length - 1) {
|
||||||
|
const next = pattern[i + 1];
|
||||||
|
if (ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS.has(next)) {
|
||||||
|
i += 1;
|
||||||
|
literal += pattern[i];
|
||||||
|
i += 1;
|
||||||
|
} else {
|
||||||
|
literal += pattern.slice(i, i + 2);
|
||||||
|
i += 2;
|
||||||
|
}
|
||||||
|
} else if (pattern[i] === '"') {
|
||||||
|
literal += '\\"';
|
||||||
|
i += 1;
|
||||||
|
} else if (!NON_LITERAL_SET.has(pattern[i]) &&
|
||||||
|
(i === length - 1 || literal === '' || pattern[i + 1] === '.' || !NON_LITERAL_SET.has(pattern[i+1]))) {
|
||||||
|
literal += pattern[i];
|
||||||
|
i += 1;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (literal !== '') {
|
||||||
|
seq.push([literal, true]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return joinSeq();
|
||||||
|
};
|
||||||
|
|
||||||
|
return this._addRule(name, "\"\\\"\" " + toRule(transform()) + " \"\\\"\" space")
|
||||||
|
}
|
||||||
|
|
||||||
|
_resolveRef(ref) {
|
||||||
|
let refName = ref.split('/').pop();
|
||||||
|
if (!(refName in this._rules) && !this._refsBeingResolved.has(ref)) {
|
||||||
|
this._refsBeingResolved.add(ref);
|
||||||
|
const resolved = this._refs[ref];
|
||||||
|
refName = this.visit(resolved, refName);
|
||||||
|
this._refsBeingResolved.delete(ref);
|
||||||
|
}
|
||||||
|
return refName;
|
||||||
|
}
|
||||||
|
|
||||||
|
_generateConstantRule(value) {
|
||||||
|
return this._formatLiteral(JSON.stringify(value));
|
||||||
|
}
|
||||||
|
|
||||||
visit(schema, name) {
|
visit(schema, name) {
|
||||||
const schemaType = schema.type;
|
const schemaType = schema.type;
|
||||||
const ruleName = name || 'root';
|
const schemaFormat = schema.format;
|
||||||
|
const ruleName = name in RESERVED_NAMES ? name + '-' : name == '' ? 'root' : name;
|
||||||
|
|
||||||
if (schema.oneOf || schema.anyOf) {
|
const ref = schema.$ref;
|
||||||
const rule = (schema.oneOf || schema.anyOf).map((altSchema, i) =>
|
if (ref !== undefined) {
|
||||||
this.visit(altSchema, `${name}${name ? "-" : ""}${i}`)
|
return this._addRule(ruleName, this._resolveRef(ref));
|
||||||
).join(' | ');
|
} else if (schema.oneOf || schema.anyOf) {
|
||||||
|
return this._addRule(ruleName, this._generateUnionRule(name, schema.oneOf || schema.anyOf));
|
||||||
return this._addRule(ruleName, rule);
|
} else if (Array.isArray(schemaType)) {
|
||||||
|
return this._addRule(ruleName, this._generateUnionRule(name, schemaType.map(t => ({ type: t }))));
|
||||||
} else if ('const' in schema) {
|
} else if ('const' in schema) {
|
||||||
return this._addRule(ruleName, this._formatLiteral(schema.const));
|
return this._addRule(ruleName, this._generateConstantRule(schema.const));
|
||||||
} else if ('enum' in schema) {
|
} else if ('enum' in schema) {
|
||||||
const rule = schema.enum.map(v => this._formatLiteral(v)).join(' | ');
|
const rule = schema.enum.map(v => this._generateConstantRule(v)).join(' | ');
|
||||||
return this._addRule(ruleName, rule);
|
return this._addRule(ruleName, rule);
|
||||||
} else if (schemaType === 'object' && 'properties' in schema) {
|
} else if ((schemaType === undefined || schemaType === 'object') &&
|
||||||
// TODO: `required` keyword (from python implementation)
|
('properties' in schema ||
|
||||||
const propOrder = this._propOrder;
|
('additionalProperties' in schema && schema.additionalProperties !== true))) {
|
||||||
const propPairs = Object.entries(schema.properties).sort((a, b) => {
|
const required = new Set(schema.required || []);
|
||||||
// sort by position in prop_order (if specified) then by key
|
const properties = Object.entries(schema.properties ?? {});
|
||||||
const orderA = typeof propOrder[a[0]] === 'number' ? propOrder[a[0]] : Infinity;
|
return this._addRule(ruleName, this._buildObjectRule(properties, required, name, schema.additionalProperties));
|
||||||
const orderB = typeof propOrder[b[0]] === 'number' ? propOrder[b[0]] : Infinity;
|
} else if ((schemaType === undefined || schemaType === 'object') && 'allOf' in schema) {
|
||||||
return orderA - orderB || a[0].localeCompare(b[0]);
|
const required = new Set();
|
||||||
});
|
const properties = [];
|
||||||
|
const addComponent = (compSchema, isRequired) => {
|
||||||
let rule = '"{" space';
|
const ref = compSchema.$ref;
|
||||||
propPairs.forEach(([propName, propSchema], i) => {
|
if (ref !== undefined) {
|
||||||
const propRuleName = this.visit(propSchema, `${name}${name ? "-" : ""}${propName}`);
|
compSchema = this._refs[ref];
|
||||||
if (i > 0) {
|
|
||||||
rule += ' "," space';
|
|
||||||
}
|
}
|
||||||
rule += ` ${this._formatLiteral(propName)} space ":" space ${propRuleName}`;
|
|
||||||
});
|
|
||||||
rule += ' "}" space';
|
|
||||||
|
|
||||||
return this._addRule(ruleName, rule);
|
if ('properties' in compSchema) {
|
||||||
} else if (schemaType === 'array' && 'items' in schema) {
|
for (const [propName, propSchema] of Object.entries(compSchema.properties)) {
|
||||||
// TODO `prefixItems` keyword (from python implementation)
|
properties.push([propName, propSchema]);
|
||||||
const itemRuleName = this.visit(schema.items, `${name}${name ? "-" : ""}item`);
|
if (isRequired) {
|
||||||
const rule = `"[" space (${itemRuleName} ("," space ${itemRuleName})*)? "]" space`;
|
required.add(propName);
|
||||||
return this._addRule(ruleName, rule);
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const t of schema.allOf) {
|
||||||
|
if ('anyOf' in t) {
|
||||||
|
for (const tt of t.anyOf) {
|
||||||
|
addComponent(tt, false);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!PRIMITIVE_RULES[schemaType]) {
|
addComponent(t, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._addRule(ruleName, this._buildObjectRule(properties, required, name, /* additionalProperties= */ false));
|
||||||
|
} else if ((schemaType === undefined || schemaType === 'array') && ('items' in schema || 'prefixItems' in schema)) {
|
||||||
|
const items = schema.items ?? schema.prefixItems;
|
||||||
|
if (Array.isArray(items)) {
|
||||||
|
return this._addRule(
|
||||||
|
ruleName,
|
||||||
|
'"[" space ' +
|
||||||
|
items.map((item, i) => this.visit(item, `${name ?? ''}${name ? '-' : ''}tuple-${i}`)).join(' "," space ') +
|
||||||
|
' "]" space'
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
const itemRuleName = this.visit(items, `${name ?? ''}${name ? '-' : ''}item`);
|
||||||
|
const listItemOperator = `( "," space ${itemRuleName} )`;
|
||||||
|
let successiveItems = '';
|
||||||
|
let minItems = schema.minItems || 0;
|
||||||
|
const maxItems = schema.maxItems;
|
||||||
|
if (minItems > 0) {
|
||||||
|
successiveItems = listItemOperator.repeat(minItems - 1);
|
||||||
|
minItems--;
|
||||||
|
}
|
||||||
|
if (maxItems !== undefined && maxItems > minItems) {
|
||||||
|
successiveItems += `${listItemOperator}?`.repeat(maxItems - minItems - 1);
|
||||||
|
} else {
|
||||||
|
successiveItems += `${listItemOperator}*`;
|
||||||
|
}
|
||||||
|
const rule = minItems === 0
|
||||||
|
? `"[" space ( ${itemRuleName} ${successiveItems} )? "]" space`
|
||||||
|
: `"[" space ${itemRuleName} ${successiveItems} "]" space`;
|
||||||
|
return this._addRule(ruleName, rule);
|
||||||
|
}
|
||||||
|
} else if ((schemaType === undefined || schemaType === 'string') && 'pattern' in schema) {
|
||||||
|
return this._visitPattern(schema.pattern, ruleName);
|
||||||
|
} else if ((schemaType === undefined || schemaType === 'string') && /^uuid[1-5]?$/.test(schema.format || '')) {
|
||||||
|
return this._addRule(
|
||||||
|
ruleName === 'root' ? 'root' : schemaFormat,
|
||||||
|
PRIMITIVE_RULES['uuid'])
|
||||||
|
} else if ((schemaType === undefined || schemaType === 'string') && schema.format in DATE_RULES) {
|
||||||
|
for (const [t, r] of Object.entries(DATE_RULES)) {
|
||||||
|
this._addRule(t, r);
|
||||||
|
}
|
||||||
|
return schemaFormat + '-string';
|
||||||
|
} else if ((schemaType === 'object') || (Object.keys(schema).length === 0)) {
|
||||||
|
for (const n of OBJECT_RULE_NAMES) {
|
||||||
|
this._addRule(n, PRIMITIVE_RULES[n]);
|
||||||
|
}
|
||||||
|
return this._addRule(ruleName, 'object');
|
||||||
|
} else {
|
||||||
|
if (!(schemaType in PRIMITIVE_RULES)) {
|
||||||
throw new Error(`Unrecognized schema: ${JSON.stringify(schema)}`);
|
throw new Error(`Unrecognized schema: ${JSON.stringify(schema)}`);
|
||||||
}
|
}
|
||||||
return this._addRule(
|
// TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero
|
||||||
ruleName === 'root' ? 'root' : schemaType,
|
return this._addRule(ruleName === 'root' ? 'root' : schemaType, PRIMITIVE_RULES[schemaType]);
|
||||||
PRIMITIVE_RULES[schemaType]
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_buildObjectRule(properties, required, name, additionalProperties) {
|
||||||
|
const propOrder = this._propOrder;
|
||||||
|
// sort by position in prop_order (if specified) then by original order
|
||||||
|
const sortedProps = properties.map(([k]) => k).sort((a, b) => {
|
||||||
|
const orderA = propOrder[a] || Infinity;
|
||||||
|
const orderB = propOrder[b] || Infinity;
|
||||||
|
return orderA - orderB || properties.findIndex(([k]) => k === a) - properties.findIndex(([k]) => k === b);
|
||||||
|
});
|
||||||
|
|
||||||
|
const propKvRuleNames = {};
|
||||||
|
for (const [propName, propSchema] of properties) {
|
||||||
|
const propRuleName = this.visit(propSchema, `${name ?? ''}${name ? '-' : ''}${propName}`);
|
||||||
|
propKvRuleNames[propName] = this._addRule(
|
||||||
|
`${name ?? ''}${name ? '-' : ''}${propName}-kv`,
|
||||||
|
`${this._formatLiteral(JSON.stringify(propName))} space ":" space ${propRuleName}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
const requiredProps = sortedProps.filter(k => required.has(k));
|
||||||
|
const optionalProps = sortedProps.filter(k => !required.has(k));
|
||||||
|
|
||||||
|
if (typeof additionalProperties === 'object' || additionalProperties === true) {
|
||||||
|
const subName = `${name ?? ''}${name ? '-' : ''}additional`;
|
||||||
|
const valueRule = this.visit(additionalProperties === true ? {} : additionalProperties, `${subName}-value`);
|
||||||
|
propKvRuleNames['*'] = this._addRule(
|
||||||
|
`${subName}-kv`,
|
||||||
|
`${this._addRule('string', PRIMITIVE_RULES['string'])} ":" space ${valueRule}`);
|
||||||
|
optionalProps.push('*');
|
||||||
|
}
|
||||||
|
|
||||||
|
let rule = '"{" space ';
|
||||||
|
rule += requiredProps.map(k => propKvRuleNames[k]).join(' "," space ');
|
||||||
|
|
||||||
|
if (optionalProps.length > 0) {
|
||||||
|
rule += ' (';
|
||||||
|
if (requiredProps.length > 0) {
|
||||||
|
rule += ' "," space ( ';
|
||||||
|
}
|
||||||
|
|
||||||
|
const getRecursiveRefs = (ks, firstIsOptional) => {
|
||||||
|
const [k, ...rest] = ks;
|
||||||
|
const kvRuleName = propKvRuleNames[k];
|
||||||
|
let res;
|
||||||
|
if (k === '*') {
|
||||||
|
res = this._addRule(
|
||||||
|
`${name ?? ''}${name ? '-' : ''}additional-kvs`,
|
||||||
|
`${kvRuleName} ( "," space ` + kvRuleName + ` )*`
|
||||||
|
)
|
||||||
|
} else if (firstIsOptional) {
|
||||||
|
res = `( "," space ${kvRuleName} )?`;
|
||||||
|
} else {
|
||||||
|
res = kvRuleName;
|
||||||
|
}
|
||||||
|
if (rest.length > 0) {
|
||||||
|
res += ' ' + this._addRule(
|
||||||
|
`${name ?? ''}${name ? '-' : ''}${k}-rest`,
|
||||||
|
getRecursiveRefs(rest, true)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
};
|
||||||
|
|
||||||
|
rule += optionalProps.map((_, i) => getRecursiveRefs(optionalProps.slice(i), false)).join(' | ');
|
||||||
|
if (requiredProps.length > 0) {
|
||||||
|
rule += ' )';
|
||||||
|
}
|
||||||
|
rule += ' )?';
|
||||||
|
}
|
||||||
|
|
||||||
|
rule += ' "}" space';
|
||||||
|
|
||||||
|
return rule;
|
||||||
}
|
}
|
||||||
|
|
||||||
formatGrammar() {
|
formatGrammar() {
|
||||||
let grammar = '';
|
let grammar = '';
|
||||||
this._rules.forEach((rule, name) => {
|
for (const [name, rule] of Object.entries(this._rules).sort(([a], [b]) => a.localeCompare(b))) {
|
||||||
grammar += `${name} ::= ${rule}\n`;
|
grammar += `${name} ::= ${rule}\n`;
|
||||||
});
|
}
|
||||||
return grammar;
|
return grammar;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function to group elements by a key function
|
||||||
|
function* groupBy(iterable, keyFn) {
|
||||||
|
let lastKey = null;
|
||||||
|
let group = [];
|
||||||
|
for (const element of iterable) {
|
||||||
|
const key = keyFn(element);
|
||||||
|
if (lastKey !== null && key !== lastKey) {
|
||||||
|
yield [lastKey, group];
|
||||||
|
group = [];
|
||||||
|
}
|
||||||
|
group.push(element);
|
||||||
|
lastKey = key;
|
||||||
|
}
|
||||||
|
if (group.length > 0) {
|
||||||
|
yield [lastKey, group];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -57,7 +57,7 @@ Feature or Scenario must be annotated with `@llama.cpp` to be included in the de
|
|||||||
To run a scenario annotated with `@bug`, start:
|
To run a scenario annotated with `@bug`, start:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
DEBUG=ON ./tests.sh --no-skipped --tags bug
|
DEBUG=ON ./tests.sh --no-skipped --tags bug --stop
|
||||||
```
|
```
|
||||||
|
|
||||||
After changing logic in `steps.py`, ensure that `@bug` and `@wrong_usage` scenario are updated.
|
After changing logic in `steps.py`, ensure that `@bug` and `@wrong_usage` scenario are updated.
|
||||||
|
96
examples/server/tests/features/embeddings.feature
Normal file
96
examples/server/tests/features/embeddings.feature
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
@llama.cpp
|
||||||
|
@embeddings
|
||||||
|
Feature: llama.cpp server
|
||||||
|
|
||||||
|
Background: Server startup
|
||||||
|
Given a server listening on localhost:8080
|
||||||
|
And a model url https://huggingface.co/ggml-org/models/resolve/main/bert-bge-small/ggml-model-f16.gguf
|
||||||
|
And a model file ggml-model-f16.gguf
|
||||||
|
And a model alias bert-bge-small
|
||||||
|
And 42 as server seed
|
||||||
|
And 2 slots
|
||||||
|
And 1024 as batch size
|
||||||
|
And 1024 as ubatch size
|
||||||
|
And 2048 KV cache size
|
||||||
|
And embeddings extraction
|
||||||
|
Then the server is starting
|
||||||
|
Then the server is healthy
|
||||||
|
|
||||||
|
Scenario: Embedding
|
||||||
|
When embeddings are computed for:
|
||||||
|
"""
|
||||||
|
What is the capital of Bulgaria ?
|
||||||
|
"""
|
||||||
|
Then embeddings are generated
|
||||||
|
|
||||||
|
Scenario: OAI Embeddings compatibility
|
||||||
|
Given a model bert-bge-small
|
||||||
|
When an OAI compatible embeddings computation request for:
|
||||||
|
"""
|
||||||
|
What is the capital of Spain ?
|
||||||
|
"""
|
||||||
|
Then embeddings are generated
|
||||||
|
|
||||||
|
Scenario: OAI Embeddings compatibility with multiple inputs
|
||||||
|
Given a model bert-bge-small
|
||||||
|
Given a prompt:
|
||||||
|
"""
|
||||||
|
In which country Paris is located ?
|
||||||
|
"""
|
||||||
|
And a prompt:
|
||||||
|
"""
|
||||||
|
Is Madrid the capital of Spain ?
|
||||||
|
"""
|
||||||
|
When an OAI compatible embeddings computation request for multiple inputs
|
||||||
|
Then embeddings are generated
|
||||||
|
|
||||||
|
Scenario: Multi users embeddings
|
||||||
|
Given a prompt:
|
||||||
|
"""
|
||||||
|
Write a very long story about AI.
|
||||||
|
"""
|
||||||
|
And a prompt:
|
||||||
|
"""
|
||||||
|
Write another very long music lyrics.
|
||||||
|
"""
|
||||||
|
And a prompt:
|
||||||
|
"""
|
||||||
|
Write a very long poem.
|
||||||
|
"""
|
||||||
|
And a prompt:
|
||||||
|
"""
|
||||||
|
Write a very long joke.
|
||||||
|
"""
|
||||||
|
Given concurrent embedding requests
|
||||||
|
Then the server is busy
|
||||||
|
Then the server is idle
|
||||||
|
Then all embeddings are generated
|
||||||
|
|
||||||
|
Scenario: Multi users OAI compatibility embeddings
|
||||||
|
Given a prompt:
|
||||||
|
"""
|
||||||
|
In which country Paris is located ?
|
||||||
|
"""
|
||||||
|
And a prompt:
|
||||||
|
"""
|
||||||
|
Is Madrid the capital of Spain ?
|
||||||
|
"""
|
||||||
|
And a prompt:
|
||||||
|
"""
|
||||||
|
What is the biggest US city ?
|
||||||
|
"""
|
||||||
|
And a prompt:
|
||||||
|
"""
|
||||||
|
What is the capital of Bulgaria ?
|
||||||
|
"""
|
||||||
|
And a model bert-bge-small
|
||||||
|
Given concurrent OAI embedding requests
|
||||||
|
Then the server is busy
|
||||||
|
Then the server is idle
|
||||||
|
Then all embeddings are generated
|
||||||
|
|
||||||
|
Scenario: All embeddings should be the same
|
||||||
|
Given 10 fixed prompts
|
||||||
|
And a model bert-bge-small
|
||||||
|
Given concurrent OAI embedding requests
|
||||||
|
Then all embeddings are the same
|
@ -1,16 +1,18 @@
|
|||||||
import os
|
import os
|
||||||
|
import signal
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import traceback
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
from signal import SIGKILL
|
from subprocess import TimeoutExpired
|
||||||
|
|
||||||
|
|
||||||
def before_scenario(context, scenario):
|
def before_scenario(context, scenario):
|
||||||
context.debug = 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON'
|
context.debug = 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON'
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print("DEBUG=ON\n")
|
print("DEBUG=ON")
|
||||||
print(f"\x1b[33;42mStarting new scenario: {scenario.name}!\x1b[0m\n")
|
print(f"\x1b[33;42mStarting new scenario: {scenario.name}!\x1b[0m")
|
||||||
port = 8080
|
port = 8080
|
||||||
if 'PORT' in os.environ:
|
if 'PORT' in os.environ:
|
||||||
port = int(os.environ['PORT'])
|
port = int(os.environ['PORT'])
|
||||||
@ -19,11 +21,12 @@ def before_scenario(context, scenario):
|
|||||||
|
|
||||||
|
|
||||||
def after_scenario(context, scenario):
|
def after_scenario(context, scenario):
|
||||||
if context.server_process is None:
|
try:
|
||||||
|
if 'server_process' not in context or context.server_process is None:
|
||||||
return
|
return
|
||||||
if scenario.status == "failed":
|
if scenario.status == "failed":
|
||||||
if 'GITHUB_ACTIONS' in os.environ:
|
if 'GITHUB_ACTIONS' in os.environ:
|
||||||
print(f"\x1b[33;101mSCENARIO FAILED: {scenario.name} server logs:\x1b[0m\n\n")
|
print(f"\x1b[33;101mSCENARIO FAILED: {scenario.name} server logs:\x1b[0m\n")
|
||||||
if os.path.isfile('llama.log'):
|
if os.path.isfile('llama.log'):
|
||||||
with closing(open('llama.log', 'r')) as f:
|
with closing(open('llama.log', 'r')) as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
@ -31,42 +34,38 @@ def after_scenario(context, scenario):
|
|||||||
if not is_server_listening(context.server_fqdn, context.server_port):
|
if not is_server_listening(context.server_fqdn, context.server_port):
|
||||||
print("\x1b[33;101mERROR: Server stopped listening\x1b[0m")
|
print("\x1b[33;101mERROR: Server stopped listening\x1b[0m")
|
||||||
|
|
||||||
if not pid_exists(context.server_process.pid):
|
if context.server_process.poll() is not None:
|
||||||
assert False, f"Server not running pid={context.server_process.pid} ..."
|
assert False, f"Server not running pid={context.server_process.pid} ..."
|
||||||
|
|
||||||
print(f"stopping server pid={context.server_process.pid} ...")
|
server_graceful_shutdown(context) # SIGINT
|
||||||
context.server_process.kill()
|
|
||||||
# Wait few for socket to free up
|
try:
|
||||||
time.sleep(0.05)
|
context.server_process.wait(0.5)
|
||||||
|
except TimeoutExpired:
|
||||||
|
print(f"server still alive after 500ms, force-killing pid={context.server_process.pid} ...")
|
||||||
|
context.server_process.kill() # SIGKILL
|
||||||
|
context.server_process.wait()
|
||||||
|
|
||||||
attempts = 0
|
|
||||||
while is_server_listening(context.server_fqdn, context.server_port):
|
while is_server_listening(context.server_fqdn, context.server_port):
|
||||||
print(f"stopping server pid={context.server_process.pid} ...")
|
|
||||||
os.kill(context.server_process.pid, SIGKILL)
|
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
attempts += 1
|
except Exception:
|
||||||
if attempts > 5:
|
print("ignoring error in after_scenario:")
|
||||||
print(f"Server dangling exits, killing all {context.server_path} ...")
|
traceback.print_exc(file=sys.stdout)
|
||||||
process = subprocess.run(['killall', '-9', context.server_path],
|
|
||||||
stderr=subprocess.PIPE,
|
|
||||||
universal_newlines=True)
|
def server_graceful_shutdown(context):
|
||||||
print(process)
|
print(f"shutting down server pid={context.server_process.pid} ...")
|
||||||
|
if os.name == 'nt':
|
||||||
|
interrupt = signal.CTRL_C_EVENT
|
||||||
|
else:
|
||||||
|
interrupt = signal.SIGINT
|
||||||
|
context.server_process.send_signal(interrupt)
|
||||||
|
|
||||||
|
|
||||||
def is_server_listening(server_fqdn, server_port):
|
def is_server_listening(server_fqdn, server_port):
|
||||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
||||||
result = sock.connect_ex((server_fqdn, server_port))
|
result = sock.connect_ex((server_fqdn, server_port))
|
||||||
return result == 0
|
_is_server_listening = result == 0
|
||||||
|
if _is_server_listening:
|
||||||
|
print(f"server is listening on {server_fqdn}:{server_port}...")
|
||||||
def pid_exists(pid):
|
return _is_server_listening
|
||||||
"""Check whether pid exists in the current process table."""
|
|
||||||
import errno
|
|
||||||
if pid < 0:
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
os.kill(pid, 0)
|
|
||||||
except OSError as e:
|
|
||||||
return e.errno == errno.EPERM
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
@ -6,10 +6,9 @@ Feature: Parallel
|
|||||||
Given a server listening on localhost:8080
|
Given a server listening on localhost:8080
|
||||||
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
||||||
And 42 as server seed
|
And 42 as server seed
|
||||||
And 512 as batch size
|
And 128 as batch size
|
||||||
And 64 KV cache size
|
And 256 KV cache size
|
||||||
And 2 slots
|
And 2 slots
|
||||||
And embeddings extraction
|
|
||||||
And continuous batching
|
And continuous batching
|
||||||
Then the server is starting
|
Then the server is starting
|
||||||
Then the server is healthy
|
Then the server is healthy
|
||||||
@ -77,6 +76,7 @@ Feature: Parallel
|
|||||||
| disabled | 128 |
|
| disabled | 128 |
|
||||||
| enabled | 64 |
|
| enabled | 64 |
|
||||||
|
|
||||||
|
|
||||||
Scenario: Multi users with total number of tokens to predict exceeds the KV Cache size #3969
|
Scenario: Multi users with total number of tokens to predict exceeds the KV Cache size #3969
|
||||||
Given a prompt:
|
Given a prompt:
|
||||||
"""
|
"""
|
||||||
@ -99,48 +99,3 @@ Feature: Parallel
|
|||||||
Then the server is busy
|
Then the server is busy
|
||||||
Then the server is idle
|
Then the server is idle
|
||||||
Then all prompts are predicted
|
Then all prompts are predicted
|
||||||
|
|
||||||
Scenario: Multi users embeddings
|
|
||||||
Given a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long story about AI.
|
|
||||||
"""
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write another very long music lyrics.
|
|
||||||
"""
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long poem.
|
|
||||||
"""
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long joke.
|
|
||||||
"""
|
|
||||||
Given concurrent embedding requests
|
|
||||||
Then the server is busy
|
|
||||||
Then the server is idle
|
|
||||||
Then all embeddings are generated
|
|
||||||
|
|
||||||
Scenario: Multi users OAI compatibility embeddings
|
|
||||||
Given a prompt:
|
|
||||||
"""
|
|
||||||
In which country Paris is located ?
|
|
||||||
"""
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Is Madrid the capital of Spain ?
|
|
||||||
"""
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
What is the biggest US city ?
|
|
||||||
"""
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
What is the capital of Bulgaria ?
|
|
||||||
"""
|
|
||||||
And a model tinyllama-2
|
|
||||||
Given concurrent OAI embedding requests
|
|
||||||
Then the server is busy
|
|
||||||
Then the server is idle
|
|
||||||
Then all embeddings are generated
|
|
||||||
|
@ -37,8 +37,25 @@ Feature: Security
|
|||||||
| llama.cpp | no |
|
| llama.cpp | no |
|
||||||
| hackme | raised |
|
| hackme | raised |
|
||||||
|
|
||||||
|
Scenario Outline: OAI Compatibility (invalid response formats)
|
||||||
|
Given a system prompt test
|
||||||
|
And a user prompt test
|
||||||
|
And a response format <response_format>
|
||||||
|
And a model test
|
||||||
|
And 2 max tokens to predict
|
||||||
|
And streaming is disabled
|
||||||
|
Given an OAI compatible chat completions request with raised api error
|
||||||
|
|
||||||
|
Examples: Prompts
|
||||||
|
| response_format |
|
||||||
|
| {"type": "sound"} |
|
||||||
|
| {"type": "json_object", "schema": 123} |
|
||||||
|
| {"type": "json_object", "schema": {"type": 123}} |
|
||||||
|
| {"type": "json_object", "schema": {"type": "hiccup"}} |
|
||||||
|
|
||||||
|
|
||||||
Scenario Outline: CORS Options
|
Scenario Outline: CORS Options
|
||||||
|
Given a user api key llama.cpp
|
||||||
When an OPTIONS request is sent from <origin>
|
When an OPTIONS request is sent from <origin>
|
||||||
Then CORS header <cors_header> is set to <cors_header_value>
|
Then CORS header <cors_header> is set to <cors_header_value>
|
||||||
|
|
||||||
|
@ -4,17 +4,17 @@ Feature: llama.cpp server
|
|||||||
|
|
||||||
Background: Server startup
|
Background: Server startup
|
||||||
Given a server listening on localhost:8080
|
Given a server listening on localhost:8080
|
||||||
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
And a model url https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories260K.gguf
|
||||||
|
And a model file stories260K.gguf
|
||||||
And a model alias tinyllama-2
|
And a model alias tinyllama-2
|
||||||
And 42 as server seed
|
And 42 as server seed
|
||||||
# KV Cache corresponds to the total amount of tokens
|
# KV Cache corresponds to the total amount of tokens
|
||||||
# that can be stored across all independent sequences: #4130
|
# that can be stored across all independent sequences: #4130
|
||||||
# see --ctx-size and #5568
|
# see --ctx-size and #5568
|
||||||
And 32 KV cache size
|
And 256 KV cache size
|
||||||
And 512 as batch size
|
And 32 as batch size
|
||||||
And 1 slots
|
And 2 slots
|
||||||
And embeddings extraction
|
And 64 server max tokens to predict
|
||||||
And 32 server max tokens to predict
|
|
||||||
And prometheus compatible metrics exposed
|
And prometheus compatible metrics exposed
|
||||||
Then the server is starting
|
Then the server is starting
|
||||||
Then the server is healthy
|
Then the server is healthy
|
||||||
@ -23,17 +23,35 @@ Feature: llama.cpp server
|
|||||||
Then the server is ready
|
Then the server is ready
|
||||||
And all slots are idle
|
And all slots are idle
|
||||||
|
|
||||||
|
|
||||||
Scenario Outline: Completion
|
Scenario Outline: Completion
|
||||||
Given a prompt <prompt>
|
Given a prompt <prompt>
|
||||||
And <n_predict> max tokens to predict
|
And <n_predict> max tokens to predict
|
||||||
And a completion request with no api error
|
And a completion request with no api error
|
||||||
Then <n_predicted> tokens are predicted matching <re_content>
|
Then <n_predicted> tokens are predicted matching <re_content>
|
||||||
|
And the completion is <truncated> truncated
|
||||||
|
And <n_prompt> prompt tokens are processed
|
||||||
And prometheus metrics are exposed
|
And prometheus metrics are exposed
|
||||||
|
And metric llamacpp:tokens_predicted is <n_predicted>
|
||||||
|
|
||||||
Examples: Prompts
|
Examples: Prompts
|
||||||
| prompt | n_predict | re_content | n_predicted |
|
| prompt | n_predict | re_content | n_prompt | n_predicted | truncated |
|
||||||
| I believe the meaning of life is | 8 | (read\|going)+ | 8 |
|
| I believe the meaning of life is | 8 | (read\|going)+ | 18 | 8 | not |
|
||||||
| Write a joke about AI | 64 | (park\|friends\|scared\|always)+ | 32 |
|
| Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids\|Anna\|forest)+ | 46 | 64 | not |
|
||||||
|
|
||||||
|
Scenario: Completion prompt truncated
|
||||||
|
Given a prompt:
|
||||||
|
"""
|
||||||
|
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||||
|
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
|
||||||
|
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
|
||||||
|
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||||
|
"""
|
||||||
|
And a completion request with no api error
|
||||||
|
Then 64 tokens are predicted matching fun|Annaks|popcorns|pictry|bowl
|
||||||
|
And the completion is truncated
|
||||||
|
And 109 prompt tokens are processed
|
||||||
|
|
||||||
|
|
||||||
Scenario Outline: OAI Compatibility
|
Scenario Outline: OAI Compatibility
|
||||||
Given a model <model>
|
Given a model <model>
|
||||||
@ -43,39 +61,30 @@ Feature: llama.cpp server
|
|||||||
And streaming is <enable_streaming>
|
And streaming is <enable_streaming>
|
||||||
Given an OAI compatible chat completions request with no api error
|
Given an OAI compatible chat completions request with no api error
|
||||||
Then <n_predicted> tokens are predicted matching <re_content>
|
Then <n_predicted> tokens are predicted matching <re_content>
|
||||||
|
And <n_prompt> prompt tokens are processed
|
||||||
|
And the completion is <truncated> truncated
|
||||||
|
|
||||||
Examples: Prompts
|
Examples: Prompts
|
||||||
| model | system_prompt | user_prompt | max_tokens | re_content | n_predicted | enable_streaming |
|
| model | system_prompt | user_prompt | max_tokens | re_content | n_prompt | n_predicted | enable_streaming | truncated |
|
||||||
| llama-2 | Book | What is the best book | 8 | (Mom\|what)+ | 8 | disabled |
|
| llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 77 | 8 | disabled | not |
|
||||||
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 64 | (thanks\|happy\|bird)+ | 32 | enabled |
|
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird\|Annabyear)+ | -1 | 64 | enabled | |
|
||||||
|
|
||||||
Scenario: Embedding
|
|
||||||
When embeddings are computed for:
|
|
||||||
"""
|
|
||||||
What is the capital of Bulgaria ?
|
|
||||||
"""
|
|
||||||
Then embeddings are generated
|
|
||||||
|
|
||||||
Scenario: OAI Embeddings compatibility
|
Scenario Outline: OAI Compatibility w/ response format
|
||||||
Given a model tinyllama-2
|
Given a model test
|
||||||
When an OAI compatible embeddings computation request for:
|
And a system prompt test
|
||||||
"""
|
And a user prompt test
|
||||||
What is the capital of Spain ?
|
And a response format <response_format>
|
||||||
"""
|
And 10 max tokens to predict
|
||||||
Then embeddings are generated
|
Given an OAI compatible chat completions request with no api error
|
||||||
|
Then <n_predicted> tokens are predicted matching <re_content>
|
||||||
|
|
||||||
|
Examples: Prompts
|
||||||
|
| response_format | n_predicted | re_content |
|
||||||
|
| {"type": "json_object", "schema": {"const": "42"}} | 5 | "42" |
|
||||||
|
| {"type": "json_object", "schema": {"items": [{"type": "integer"}]}} | 10 | \[ -300 \] |
|
||||||
|
| {"type": "json_object"} | 10 | \{ " Jacky. |
|
||||||
|
|
||||||
Scenario: OAI Embeddings compatibility with multiple inputs
|
|
||||||
Given a model tinyllama-2
|
|
||||||
Given a prompt:
|
|
||||||
"""
|
|
||||||
In which country Paris is located ?
|
|
||||||
"""
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Is Madrid the capital of Spain ?
|
|
||||||
"""
|
|
||||||
When an OAI compatible embeddings computation request for multiple inputs
|
|
||||||
Then embeddings are generated
|
|
||||||
|
|
||||||
Scenario: Tokenize / Detokenize
|
Scenario: Tokenize / Detokenize
|
||||||
When tokenizing:
|
When tokenizing:
|
||||||
|
@ -5,11 +5,14 @@ import os
|
|||||||
import re
|
import re
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
from re import RegexFlag
|
from re import RegexFlag
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
import numpy as np
|
||||||
import openai
|
import openai
|
||||||
from behave import step
|
from behave import step
|
||||||
from behave.api.async_step import async_run_until_complete
|
from behave.api.async_step import async_run_until_complete
|
||||||
@ -17,23 +20,33 @@ from huggingface_hub import hf_hub_download
|
|||||||
from prometheus_client import parser
|
from prometheus_client import parser
|
||||||
|
|
||||||
|
|
||||||
@step(u"a server listening on {server_fqdn}:{server_port}")
|
@step("a server listening on {server_fqdn}:{server_port}")
|
||||||
def step_server_config(context, server_fqdn, server_port):
|
def step_server_config(context, server_fqdn, server_port):
|
||||||
context.server_fqdn = server_fqdn
|
context.server_fqdn = server_fqdn
|
||||||
context.server_port = int(server_port)
|
context.server_port = int(server_port)
|
||||||
|
context.n_gpu_layer = None
|
||||||
if 'PORT' in os.environ:
|
if 'PORT' in os.environ:
|
||||||
context.server_port = int(os.environ['PORT'])
|
context.server_port = int(os.environ['PORT'])
|
||||||
print(f"$PORT set, overriding server port with to {context.server_port}")
|
print(f"$PORT set, overriding server port with to {context.server_port}")
|
||||||
|
if 'FQDN' in os.environ:
|
||||||
|
context.server_fqdn = os.environ['FQDN']
|
||||||
|
print(f"$FQDN set, overriding server fqdn with to {context.server_fqdn}")
|
||||||
|
if 'N_GPU_LAYERS' in os.environ:
|
||||||
|
context.n_gpu_layer = int(os.environ['N_GPU_LAYERS'])
|
||||||
|
print(f"$N_GPU_LAYERS set, overriding n_gpu_layer with to {context.n_gpu_layer}")
|
||||||
|
|
||||||
context.base_url = f'http://{context.server_fqdn}:{context.server_port}'
|
context.base_url = f'http://{context.server_fqdn}:{context.server_port}'
|
||||||
|
|
||||||
context.model_alias = None
|
context.model_alias = None
|
||||||
|
context.model_file = None
|
||||||
|
context.model_url = None
|
||||||
context.n_batch = None
|
context.n_batch = None
|
||||||
|
context.n_ubatch = None
|
||||||
context.n_ctx = None
|
context.n_ctx = None
|
||||||
context.n_ga = None
|
context.n_ga = None
|
||||||
context.n_ga_w = None
|
context.n_ga_w = None
|
||||||
context.n_gpu_layer = None
|
|
||||||
context.n_predict = None
|
context.n_predict = None
|
||||||
|
context.n_prompts = 0
|
||||||
context.n_server_predict = None
|
context.n_server_predict = None
|
||||||
context.n_slots = None
|
context.n_slots = None
|
||||||
context.prompt_prefix = None
|
context.prompt_prefix = None
|
||||||
@ -46,30 +59,41 @@ def step_server_config(context, server_fqdn, server_port):
|
|||||||
context.seed = None
|
context.seed = None
|
||||||
context.server_seed = None
|
context.server_seed = None
|
||||||
context.user_api_key = None
|
context.user_api_key = None
|
||||||
|
context.response_format = None
|
||||||
|
|
||||||
context.tasks_result = []
|
context.tasks_result = []
|
||||||
context.concurrent_tasks = []
|
context.concurrent_tasks = []
|
||||||
context.prompts = []
|
context.prompts = []
|
||||||
|
|
||||||
|
|
||||||
@step(u'a model file {hf_file} from HF repo {hf_repo}')
|
@step('a model file {hf_file} from HF repo {hf_repo}')
|
||||||
def step_download_hf_model(context, hf_file, hf_repo):
|
def step_download_hf_model(context, hf_file, hf_repo):
|
||||||
context.model_file = hf_hub_download(repo_id=hf_repo, filename=hf_file)
|
context.model_file = hf_hub_download(repo_id=hf_repo, filename=hf_file)
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"model file: {context.model_file}\n")
|
print(f"model file: {context.model_file}")
|
||||||
|
|
||||||
|
|
||||||
@step(u'a model alias {model_alias}')
|
@step('a model file {model_file}')
|
||||||
|
def step_model_file(context, model_file):
|
||||||
|
context.model_file = model_file
|
||||||
|
|
||||||
|
|
||||||
|
@step('a model url {model_url}')
|
||||||
|
def step_model_url(context, model_url):
|
||||||
|
context.model_url = model_url
|
||||||
|
|
||||||
|
|
||||||
|
@step('a model alias {model_alias}')
|
||||||
def step_model_alias(context, model_alias):
|
def step_model_alias(context, model_alias):
|
||||||
context.model_alias = model_alias
|
context.model_alias = model_alias
|
||||||
|
|
||||||
|
|
||||||
@step(u'{seed:d} as server seed')
|
@step('{seed:d} as server seed')
|
||||||
def step_seed(context, seed):
|
def step_seed(context, seed):
|
||||||
context.server_seed = seed
|
context.server_seed = seed
|
||||||
|
|
||||||
|
|
||||||
@step(u'{ngl:d} GPU offloaded layers')
|
@step('{ngl:d} GPU offloaded layers')
|
||||||
def step_n_gpu_layer(context, ngl):
|
def step_n_gpu_layer(context, ngl):
|
||||||
if 'N_GPU_LAYERS' in os.environ:
|
if 'N_GPU_LAYERS' in os.environ:
|
||||||
new_ngl = int(os.environ['N_GPU_LAYERS'])
|
new_ngl = int(os.environ['N_GPU_LAYERS'])
|
||||||
@ -79,59 +103,67 @@ def step_n_gpu_layer(context, ngl):
|
|||||||
context.n_gpu_layer = ngl
|
context.n_gpu_layer = ngl
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_ctx:d} KV cache size')
|
@step('{n_ctx:d} KV cache size')
|
||||||
def step_n_ctx(context, n_ctx):
|
def step_n_ctx(context, n_ctx):
|
||||||
context.n_ctx = n_ctx
|
context.n_ctx = n_ctx
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_slots:d} slots')
|
@step('{n_slots:d} slots')
|
||||||
def step_n_slots(context, n_slots):
|
def step_n_slots(context, n_slots):
|
||||||
context.n_slots = n_slots
|
context.n_slots = n_slots
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_predict:d} server max tokens to predict')
|
@step('{n_predict:d} server max tokens to predict')
|
||||||
def step_server_n_predict(context, n_predict):
|
def step_server_n_predict(context, n_predict):
|
||||||
context.n_server_predict = n_predict
|
context.n_server_predict = n_predict
|
||||||
|
|
||||||
|
|
||||||
@step(u'continuous batching')
|
@step('continuous batching')
|
||||||
def step_server_continuous_batching(context):
|
def step_server_continuous_batching(context):
|
||||||
context.server_continuous_batching = True
|
context.server_continuous_batching = True
|
||||||
|
|
||||||
|
|
||||||
@step(u'embeddings extraction')
|
@step('embeddings extraction')
|
||||||
def step_server_embeddings(context):
|
def step_server_embeddings(context):
|
||||||
context.server_embeddings = True
|
context.server_embeddings = True
|
||||||
|
|
||||||
|
|
||||||
@step(u'prometheus compatible metrics exposed')
|
@step('prometheus compatible metrics exposed')
|
||||||
def step_server_metrics(context):
|
def step_server_metrics(context):
|
||||||
context.server_metrics = True
|
context.server_metrics = True
|
||||||
|
|
||||||
|
|
||||||
@step(u"the server is starting")
|
@step("the server is starting")
|
||||||
def step_start_server(context):
|
def step_start_server(context):
|
||||||
start_server_background(context)
|
start_server_background(context)
|
||||||
attempts = 0
|
attempts = 0
|
||||||
|
max_attempts = 20
|
||||||
|
if 'GITHUB_ACTIONS' in os.environ:
|
||||||
|
max_attempts *= 2
|
||||||
|
|
||||||
|
addrs = socket.getaddrinfo(context.server_fqdn, context.server_port, type=socket.SOCK_STREAM)
|
||||||
|
family, typ, proto, _, sockaddr = addrs[0]
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
with closing(socket.socket(family, typ, proto)) as sock:
|
||||||
result = sock.connect_ex((context.server_fqdn, context.server_port))
|
result = sock.connect_ex(sockaddr)
|
||||||
if result == 0:
|
if result == 0:
|
||||||
print("\x1b[33;46mserver started!\x1b[0m")
|
print("\x1b[33;46mserver started!\x1b[0m")
|
||||||
return
|
return
|
||||||
attempts += 1
|
attempts += 1
|
||||||
if attempts > 20:
|
if attempts > max_attempts:
|
||||||
assert False, "server not started"
|
assert False, "server not started"
|
||||||
print(f"waiting for server to start, connect error code = {result}...")
|
print(f"waiting for server to start, connect error code = {result}...")
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
|
||||||
@step(u"the server is {expecting_status}")
|
@step("the server is {expecting_status}")
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_wait_for_the_server_to_be_started(context, expecting_status):
|
async def step_wait_for_the_server_to_be_started(context, expecting_status):
|
||||||
match expecting_status:
|
match expecting_status:
|
||||||
case 'healthy':
|
case 'healthy':
|
||||||
await wait_for_health_status(context, context.base_url, 200, 'ok')
|
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
||||||
|
timeout=30)
|
||||||
|
|
||||||
case 'ready' | 'idle':
|
case 'ready' | 'idle':
|
||||||
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
||||||
@ -155,7 +187,7 @@ async def step_wait_for_the_server_to_be_started(context, expecting_status):
|
|||||||
assert False, "unknown status"
|
assert False, "unknown status"
|
||||||
|
|
||||||
|
|
||||||
@step(u'all slots are {expected_slot_status_string}')
|
@step('all slots are {expected_slot_status_string}')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_all_slots_status(context, expected_slot_status_string):
|
async def step_all_slots_status(context, expected_slot_status_string):
|
||||||
match expected_slot_status_string:
|
match expected_slot_status_string:
|
||||||
@ -171,7 +203,7 @@ async def step_all_slots_status(context, expected_slot_status_string):
|
|||||||
await request_slots_status(context, expected_slots)
|
await request_slots_status(context, expected_slots)
|
||||||
|
|
||||||
|
|
||||||
@step(u'a completion request with {api_error} api error')
|
@step('a completion request with {api_error} api error')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_request_completion(context, api_error):
|
async def step_request_completion(context, api_error):
|
||||||
expect_api_error = api_error == 'raised'
|
expect_api_error = api_error == 'raised'
|
||||||
@ -184,113 +216,148 @@ async def step_request_completion(context, api_error):
|
|||||||
user_api_key=context.user_api_key)
|
user_api_key=context.user_api_key)
|
||||||
context.tasks_result.append(completion)
|
context.tasks_result.append(completion)
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"Completion response: {completion}\n")
|
print(f"Completion response: {completion}")
|
||||||
if expect_api_error:
|
if expect_api_error:
|
||||||
assert completion == 401, f"completion must be an 401 status code: {completion}"
|
assert completion == 401, f"completion must be an 401 status code: {completion}"
|
||||||
|
|
||||||
|
|
||||||
@step(u'{predicted_n:d} tokens are predicted matching {re_content}')
|
@step('{predicted_n:d} tokens are predicted matching {re_content}')
|
||||||
def step_n_tokens_predicted_with_content(context, predicted_n, re_content):
|
def step_n_tokens_predicted_with_content(context, predicted_n, re_content):
|
||||||
assert_n_tokens_predicted(context.tasks_result.pop(), predicted_n, re_content)
|
context.completion = context.tasks_result.pop()
|
||||||
|
assert_n_tokens_predicted(context.completion, predicted_n, re_content)
|
||||||
|
|
||||||
|
|
||||||
@step(u'{predicted_n:d} tokens are predicted')
|
@step('{predicted_n:d} tokens are predicted')
|
||||||
def step_n_tokens_predicted(context, predicted_n):
|
def step_n_tokens_predicted(context, predicted_n):
|
||||||
assert_n_tokens_predicted(context.tasks_result.pop(), predicted_n)
|
context.completion = context.tasks_result.pop()
|
||||||
|
assert_n_tokens_predicted(context.completion, predicted_n)
|
||||||
|
|
||||||
|
|
||||||
@step(u'a user prompt {user_prompt}')
|
@step('the completion is truncated')
|
||||||
|
def step_assert_completion_truncated(context):
|
||||||
|
step_assert_completion_truncated(context, '')
|
||||||
|
|
||||||
|
|
||||||
|
@step('the completion is {truncated} truncated')
|
||||||
|
def step_assert_completion_truncated(context, truncated):
|
||||||
|
truncated = truncated != "not"
|
||||||
|
assert context.completion['truncated'] == truncated, f'{context.completion}'
|
||||||
|
|
||||||
|
|
||||||
|
@step('{n_prompt:d} prompt tokens are processed')
|
||||||
|
def step_impl(context, n_prompt):
|
||||||
|
assert n_prompt < 0 or n_prompt == context.completion['timings']['prompt_n'], f"n_prompt={context.completion['timings']['prompt_n']}"
|
||||||
|
|
||||||
|
|
||||||
|
@step('a user prompt {user_prompt}')
|
||||||
def step_user_prompt(context, user_prompt):
|
def step_user_prompt(context, user_prompt):
|
||||||
context.prompts.append(user_prompt)
|
context.prompts.append(user_prompt)
|
||||||
|
context.n_prompts = len(context.prompts)
|
||||||
|
|
||||||
|
|
||||||
@step(u'a system prompt {system_prompt}')
|
@step('a system prompt {system_prompt}')
|
||||||
def step_system_prompt(context, system_prompt):
|
def step_system_prompt(context, system_prompt):
|
||||||
context.system_prompt = system_prompt
|
context.system_prompt = system_prompt
|
||||||
|
|
||||||
|
|
||||||
@step(u'a model {model}')
|
@step('a model {model}')
|
||||||
def step_model(context, model):
|
def step_model(context, model):
|
||||||
context.model = model
|
context.model = model
|
||||||
|
|
||||||
|
|
||||||
@step(u'{max_tokens:d} max tokens to predict')
|
@step('{max_tokens:d} max tokens to predict')
|
||||||
def step_max_tokens(context, max_tokens):
|
def step_max_tokens(context, max_tokens):
|
||||||
context.n_predict = max_tokens
|
context.n_predict = max_tokens
|
||||||
|
|
||||||
|
|
||||||
@step(u'streaming is {enable_streaming}')
|
@step('a response format {response_format}')
|
||||||
|
def step_response_format(context, response_format):
|
||||||
|
context.response_format = json.loads(response_format)
|
||||||
|
|
||||||
|
|
||||||
|
@step('streaming is {enable_streaming}')
|
||||||
def step_streaming(context, enable_streaming):
|
def step_streaming(context, enable_streaming):
|
||||||
context.enable_streaming = enable_streaming == 'enabled'
|
context.enable_streaming = enable_streaming == 'enabled'
|
||||||
|
|
||||||
|
|
||||||
@step(u'a user api key {user_api_key}')
|
@step('a user api key {user_api_key}')
|
||||||
def step_user_api_key(context, user_api_key):
|
def step_user_api_key(context, user_api_key):
|
||||||
context.user_api_key = user_api_key
|
context.user_api_key = user_api_key
|
||||||
|
|
||||||
|
|
||||||
@step(u'no user api key')
|
@step('no user api key')
|
||||||
def step_no_user_api_key(context):
|
def step_no_user_api_key(context):
|
||||||
context.user_api_key = None
|
context.user_api_key = None
|
||||||
|
|
||||||
|
|
||||||
@step(u'a user api key ')
|
@step('a user api key ')
|
||||||
def step_no_user_api_key_space(context):
|
def step_no_user_api_key_space(context):
|
||||||
context.user_api_key = None
|
context.user_api_key = None
|
||||||
|
|
||||||
|
|
||||||
@step(u'a server api key {server_api_key}')
|
@step('a server api key {server_api_key}')
|
||||||
def step_server_api_key(context, server_api_key):
|
def step_server_api_key(context, server_api_key):
|
||||||
context.server_api_key = server_api_key
|
context.server_api_key = server_api_key
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_junk:d} as number of junk')
|
@step('{n_junk:d} as number of junk')
|
||||||
def step_n_junk(context, n_junk):
|
def step_n_junk(context, n_junk):
|
||||||
context.n_junk = n_junk
|
context.n_junk = n_junk
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_batch:d} as batch size')
|
@step('{n_batch:d} as batch size')
|
||||||
def step_n_batch(context, n_batch):
|
def step_n_batch(context, n_batch):
|
||||||
context.n_batch = n_batch
|
context.n_batch = n_batch
|
||||||
|
|
||||||
|
|
||||||
@step(u'{seed:d} as seed')
|
@step('{n_ubatch:d} as ubatch size')
|
||||||
|
def step_n_ubatch(context, n_ubatch):
|
||||||
|
context.n_ubatch = n_ubatch
|
||||||
|
|
||||||
|
|
||||||
|
@step('{seed:d} as seed')
|
||||||
def step_seed(context, seed):
|
def step_seed(context, seed):
|
||||||
context.seed = seed
|
context.seed = seed
|
||||||
|
|
||||||
|
|
||||||
@step(u'a prefix prompt')
|
@step('a prefix prompt')
|
||||||
def step_prompt_prefix(context):
|
def step_prompt_prefix(context):
|
||||||
context.prompt_prefix = context.text
|
context.prompt_prefix = context_text(context)
|
||||||
|
|
||||||
|
|
||||||
@step(u'a junk suffix prompt')
|
@step('a junk suffix prompt')
|
||||||
def step_prompt_junk_suffix(context):
|
def step_prompt_junk_suffix(context):
|
||||||
context.prompt_junk_suffix = context.text
|
context.prompt_junk_suffix = context_text(context)
|
||||||
|
|
||||||
|
|
||||||
@step(u'a suffix prompt')
|
@step('a suffix prompt')
|
||||||
def step_prompt_suffix(context):
|
def step_prompt_suffix(context):
|
||||||
context.prompt_suffix = context.text
|
context.prompt_suffix = context_text(context)
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_ga:d} group attention factor'
|
@step('{n_ga:d} group attention factor'
|
||||||
u' to extend context size through self-extend')
|
' to extend context size through self-extend')
|
||||||
def step_impl(context, n_ga):
|
def step_impl(context, n_ga):
|
||||||
context.n_ga = n_ga
|
context.n_ga = n_ga
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_ga_w:d} group attention width to extend context size through self-extend')
|
@step('{n_ga_w:d} group attention width to extend context size through self-extend')
|
||||||
def step_impl(context, n_ga_w):
|
def step_impl(context, n_ga_w):
|
||||||
context.n_ga_w = n_ga_w
|
context.n_ga_w = n_ga_w
|
||||||
|
|
||||||
|
|
||||||
@step(u'a passkey prompt template')
|
@step('a passkey prompt template')
|
||||||
def step_prompt_passkey(context):
|
def step_prompt_passkey(context):
|
||||||
context.prompt_passkey = context.text
|
context.prompt_passkey = context_text(context)
|
||||||
|
|
||||||
|
|
||||||
@step(u'a "{passkey}" passkey challenge prompt with the passkey inserted every {i_pos:d} junk')
|
@step('{n_prompts:d} fixed prompts')
|
||||||
|
def step_fixed_prompts(context, n_prompts):
|
||||||
|
context.prompts.extend([str(0)*(context.n_batch if context.n_batch is not None else 512) for i in range(n_prompts)])
|
||||||
|
context.n_prompts = n_prompts
|
||||||
|
|
||||||
|
|
||||||
|
@step('a "{passkey}" passkey challenge prompt with the passkey inserted every {i_pos:d} junk')
|
||||||
def step_prompt_passkey(context, passkey, i_pos):
|
def step_prompt_passkey(context, passkey, i_pos):
|
||||||
prompt = ""
|
prompt = ""
|
||||||
for i in range(context.n_junk):
|
for i in range(context.n_junk):
|
||||||
@ -299,15 +366,16 @@ def step_prompt_passkey(context, passkey, i_pos):
|
|||||||
prompt += context.prompt_junk_suffix
|
prompt += context.prompt_junk_suffix
|
||||||
if context.debug:
|
if context.debug:
|
||||||
passkey_highlight = "\x1b[33m" + passkey + "\x1b[0m"
|
passkey_highlight = "\x1b[33m" + passkey + "\x1b[0m"
|
||||||
print(f"Passkey challenge:\n```{prompt.replace(passkey, passkey_highlight)}```\n")
|
print(f"Passkey challenge:\n```{prompt.replace(passkey, passkey_highlight)}```")
|
||||||
context.prompts.append(context.prompt_prefix + prompt + context.prompt_suffix)
|
context.prompts.append(context.prompt_prefix + prompt + context.prompt_suffix)
|
||||||
|
context.n_prompts = len(context.prompts)
|
||||||
|
|
||||||
|
|
||||||
@step(u'an OAI compatible chat completions request with {api_error} api error')
|
@step('an OAI compatible chat completions request with {api_error} api error')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_oai_chat_completions(context, api_error):
|
async def step_oai_chat_completions(context, api_error):
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"Submitting OAI compatible completions request...\n")
|
print(f"Submitting OAI compatible completions request...")
|
||||||
expect_api_error = api_error == 'raised'
|
expect_api_error = api_error == 'raised'
|
||||||
completion = await oai_chat_completions(context.prompts.pop(),
|
completion = await oai_chat_completions(context.prompts.pop(),
|
||||||
context.system_prompt,
|
context.system_prompt,
|
||||||
@ -322,6 +390,9 @@ async def step_oai_chat_completions(context, api_error):
|
|||||||
enable_streaming=context.enable_streaming
|
enable_streaming=context.enable_streaming
|
||||||
if hasattr(context, 'enable_streaming') else None,
|
if hasattr(context, 'enable_streaming') else None,
|
||||||
|
|
||||||
|
response_format=context.response_format
|
||||||
|
if hasattr(context, 'response_format') else None,
|
||||||
|
|
||||||
seed=await completions_seed(context),
|
seed=await completions_seed(context),
|
||||||
|
|
||||||
user_api_key=context.user_api_key
|
user_api_key=context.user_api_key
|
||||||
@ -338,17 +409,19 @@ async def step_oai_chat_completions(context, api_error):
|
|||||||
print(f"Completion response: {completion}")
|
print(f"Completion response: {completion}")
|
||||||
|
|
||||||
|
|
||||||
@step(u'a prompt')
|
@step('a prompt')
|
||||||
def step_a_prompt(context):
|
def step_a_prompt(context):
|
||||||
context.prompts.append(context.text)
|
context.prompts.append(context_text(context))
|
||||||
|
context.n_prompts = len(context.prompts)
|
||||||
|
|
||||||
|
|
||||||
@step(u'a prompt {prompt}')
|
@step('a prompt {prompt}')
|
||||||
def step_a_prompt_prompt(context, prompt):
|
def step_a_prompt_prompt(context, prompt):
|
||||||
context.prompts.append(prompt)
|
context.prompts.append(prompt)
|
||||||
|
context.n_prompts = len(context.prompts)
|
||||||
|
|
||||||
|
|
||||||
@step(u'concurrent completion requests')
|
@step('concurrent completion requests')
|
||||||
@async_run_until_complete()
|
@async_run_until_complete()
|
||||||
async def step_concurrent_completion_requests(context):
|
async def step_concurrent_completion_requests(context):
|
||||||
await concurrent_requests(context,
|
await concurrent_requests(context,
|
||||||
@ -364,7 +437,7 @@ async def step_concurrent_completion_requests(context):
|
|||||||
'user_api_key') else None)
|
'user_api_key') else None)
|
||||||
|
|
||||||
|
|
||||||
@step(u'concurrent OAI completions requests')
|
@step('concurrent OAI completions requests')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_oai_chat_completions(context):
|
async def step_oai_chat_completions(context):
|
||||||
await concurrent_requests(context, oai_chat_completions,
|
await concurrent_requests(context, oai_chat_completions,
|
||||||
@ -379,12 +452,14 @@ async def step_oai_chat_completions(context):
|
|||||||
if hasattr(context, 'n_predict') else None,
|
if hasattr(context, 'n_predict') else None,
|
||||||
enable_streaming=context.enable_streaming
|
enable_streaming=context.enable_streaming
|
||||||
if hasattr(context, 'enable_streaming') else None,
|
if hasattr(context, 'enable_streaming') else None,
|
||||||
|
response_format=context.response_format
|
||||||
|
if hasattr(context, 'response_format') else None,
|
||||||
seed=await completions_seed(context),
|
seed=await completions_seed(context),
|
||||||
user_api_key=context.user_api_key
|
user_api_key=context.user_api_key
|
||||||
if hasattr(context, 'user_api_key') else None)
|
if hasattr(context, 'user_api_key') else None)
|
||||||
|
|
||||||
|
|
||||||
@step(u'concurrent OAI completions requests no v1')
|
@step('concurrent OAI completions requests no v1')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_oai_chat_completions(context):
|
async def step_oai_chat_completions(context):
|
||||||
await concurrent_requests(context, oai_chat_completions,
|
await concurrent_requests(context, oai_chat_completions,
|
||||||
@ -399,6 +474,8 @@ async def step_oai_chat_completions(context):
|
|||||||
if hasattr(context, 'n_predict') else None,
|
if hasattr(context, 'n_predict') else None,
|
||||||
enable_streaming=context.enable_streaming
|
enable_streaming=context.enable_streaming
|
||||||
if hasattr(context, 'enable_streaming') else None,
|
if hasattr(context, 'enable_streaming') else None,
|
||||||
|
response_format=context.response_format
|
||||||
|
if hasattr(context, 'response_format') else None,
|
||||||
seed=context.seed
|
seed=context.seed
|
||||||
if hasattr(context, 'seed') else
|
if hasattr(context, 'seed') else
|
||||||
context.server_seed
|
context.server_seed
|
||||||
@ -407,13 +484,13 @@ async def step_oai_chat_completions(context):
|
|||||||
if hasattr(context, 'user_api_key') else None)
|
if hasattr(context, 'user_api_key') else None)
|
||||||
|
|
||||||
|
|
||||||
@step(u'all prompts are predicted')
|
@step('all prompts are predicted')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_all_prompts_are_predicted(context):
|
async def step_all_prompts_are_predicted(context):
|
||||||
await all_prompts_are_predicted(context)
|
await all_prompts_are_predicted(context)
|
||||||
|
|
||||||
|
|
||||||
@step(u'all prompts are predicted with {n_expected_predicted:d} tokens')
|
@step('all prompts are predicted with {n_expected_predicted:d} tokens')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_all_prompts_are_predicted_with_n_tokens(context, n_expected_predicted):
|
async def step_all_prompts_are_predicted_with_n_tokens(context, n_expected_predicted):
|
||||||
await all_prompts_are_predicted(context, n_expected_predicted)
|
await all_prompts_are_predicted(context, n_expected_predicted)
|
||||||
@ -427,44 +504,68 @@ async def all_prompts_are_predicted(context, expected_predicted_n=None):
|
|||||||
assert len(context.concurrent_tasks) == 0, f"{len(context.concurrent_tasks)} pending requests"
|
assert len(context.concurrent_tasks) == 0, f"{len(context.concurrent_tasks)} pending requests"
|
||||||
|
|
||||||
|
|
||||||
@step(u'embeddings are computed for')
|
@step('embeddings are computed for')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_compute_embedding(context):
|
async def step_compute_embedding(context):
|
||||||
context.embeddings = await request_embedding(context.text, base_url=context.base_url)
|
context.n_prompts = 1
|
||||||
|
context.embeddings = await request_embedding(context_text(context), base_url=context.base_url)
|
||||||
|
|
||||||
|
|
||||||
@step(u'embeddings are generated')
|
@step('all embeddings are the same')
|
||||||
|
@async_run_until_complete
|
||||||
|
async def step_all_embeddings_are_the_same(context):
|
||||||
|
n_embedding_requests = await gather_tasks_results(context)
|
||||||
|
assert n_embedding_requests > 0
|
||||||
|
embeddings = []
|
||||||
|
for i in range(n_embedding_requests):
|
||||||
|
embedding = context.tasks_result.pop().pop()
|
||||||
|
embeddings.append(embedding)
|
||||||
|
assert_embeddings(embedding)
|
||||||
|
n = len(embeddings)
|
||||||
|
for i in range(n-1):
|
||||||
|
for j in range(i+1, n):
|
||||||
|
embedding1 = np.array(embeddings[i])
|
||||||
|
embedding2 = np.array(embeddings[j])
|
||||||
|
if context.debug:
|
||||||
|
print(f"embedding1: {embedding1[-8:]}")
|
||||||
|
print(f"embedding2: {embedding2[-8:]}")
|
||||||
|
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
|
||||||
|
msg = f"Similarity between {i} and {j}: {similarity:.10f}"
|
||||||
|
if context.debug:
|
||||||
|
print(f"{msg}")
|
||||||
|
assert np.isclose(similarity, 1.0, rtol=1e-05, atol=1e-08, equal_nan=False), msg
|
||||||
|
|
||||||
|
|
||||||
|
@step('embeddings are generated')
|
||||||
def step_assert_embeddings(context):
|
def step_assert_embeddings(context):
|
||||||
if len(context.prompts) == 0:
|
assert context.n_prompts == len(context.embeddings), (f"unexpected response:\n"
|
||||||
assert_embeddings(context.embeddings)
|
f"context.n_prompts={context.n_prompts}\n"
|
||||||
else:
|
|
||||||
assert len(context.embeddings) == len(context.prompts), (f"unexpected response:\n"
|
|
||||||
f"context.prompts={context.prompts}\n"
|
|
||||||
f"context.embeddings={context.embeddings}")
|
f"context.embeddings={context.embeddings}")
|
||||||
for embedding in context.embeddings:
|
for embedding in context.embeddings:
|
||||||
context.prompts.pop()
|
|
||||||
assert_embeddings(embedding)
|
assert_embeddings(embedding)
|
||||||
|
|
||||||
|
|
||||||
@step(u'an OAI compatible embeddings computation request for')
|
@step('an OAI compatible embeddings computation request for')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_oai_compute_embeddings(context):
|
async def step_oai_compute_embeddings(context):
|
||||||
context.embeddings = await request_oai_embeddings(context.text,
|
context.n_prompts = 1
|
||||||
|
context.embeddings = await request_oai_embeddings(context_text(context),
|
||||||
base_url=context.base_url,
|
base_url=context.base_url,
|
||||||
user_api_key=context.user_api_key,
|
user_api_key=context.user_api_key,
|
||||||
model=context.model)
|
model=context.model)
|
||||||
|
|
||||||
|
|
||||||
@step(u'an OAI compatible embeddings computation request for multiple inputs')
|
@step('an OAI compatible embeddings computation request for multiple inputs')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_oai_compute_embeddings_multiple_inputs(context):
|
async def step_oai_compute_embeddings_multiple_inputs(context):
|
||||||
context.embeddings = await request_oai_embeddings(context.prompts,
|
context.embeddings = await request_oai_embeddings(context.prompts,
|
||||||
base_url=context.base_url,
|
base_url=context.base_url,
|
||||||
user_api_key=context.user_api_key,
|
user_api_key=context.user_api_key,
|
||||||
model=context.model)
|
model=context.model)
|
||||||
|
context.prompts.clear()
|
||||||
|
|
||||||
|
|
||||||
@step(u'concurrent embedding requests')
|
@step('concurrent embedding requests')
|
||||||
@async_run_until_complete()
|
@async_run_until_complete()
|
||||||
async def step_concurrent_embedding_requests(context):
|
async def step_concurrent_embedding_requests(context):
|
||||||
await concurrent_requests(context,
|
await concurrent_requests(context,
|
||||||
@ -473,7 +574,7 @@ async def step_concurrent_embedding_requests(context):
|
|||||||
base_url=context.base_url)
|
base_url=context.base_url)
|
||||||
|
|
||||||
|
|
||||||
@step(u'concurrent OAI embedding requests')
|
@step('concurrent OAI embedding requests')
|
||||||
@async_run_until_complete()
|
@async_run_until_complete()
|
||||||
async def step_concurrent_oai_embedding_requests(context):
|
async def step_concurrent_oai_embedding_requests(context):
|
||||||
await concurrent_requests(context,
|
await concurrent_requests(context,
|
||||||
@ -484,19 +585,19 @@ async def step_concurrent_oai_embedding_requests(context):
|
|||||||
model=context.model)
|
model=context.model)
|
||||||
|
|
||||||
|
|
||||||
@step(u'all embeddings are generated')
|
@step('all embeddings are generated')
|
||||||
@async_run_until_complete()
|
@async_run_until_complete()
|
||||||
async def all_embeddings_are_generated(context):
|
async def all_embeddings_are_generated(context):
|
||||||
n_embedding_requests = await gather_tasks_results(context)
|
n_embedding_requests = await gather_tasks_results(context)
|
||||||
assert n_embedding_requests > 0
|
assert n_embedding_requests == context.n_prompts
|
||||||
for i in range(n_embedding_requests):
|
for i in range(n_embedding_requests):
|
||||||
assert_embeddings(context.tasks_result.pop())
|
assert_embeddings(context.tasks_result.pop().pop())
|
||||||
|
|
||||||
|
|
||||||
@step(u'tokenizing')
|
@step('tokenizing')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_tokenize(context):
|
async def step_tokenize(context):
|
||||||
context.tokenized_text = context.text
|
context.tokenized_text = context_text(context)
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
async with session.post(f'{context.base_url}/tokenize',
|
async with session.post(f'{context.base_url}/tokenize',
|
||||||
json={
|
json={
|
||||||
@ -507,7 +608,7 @@ async def step_tokenize(context):
|
|||||||
context.tokens = tokenize_json['tokens']
|
context.tokens = tokenize_json['tokens']
|
||||||
|
|
||||||
|
|
||||||
@step(u'tokens can be detokenize')
|
@step('tokens can be detokenize')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_detokenize(context):
|
async def step_detokenize(context):
|
||||||
assert len(context.tokens) > 0
|
assert len(context.tokens) > 0
|
||||||
@ -522,22 +623,23 @@ async def step_detokenize(context):
|
|||||||
assert context.tokenized_text == detokenize_json['content'].strip()
|
assert context.tokenized_text == detokenize_json['content'].strip()
|
||||||
|
|
||||||
|
|
||||||
@step(u'an OPTIONS request is sent from {origin}')
|
@step('an OPTIONS request is sent from {origin}')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_options_request(context, origin):
|
async def step_options_request(context, origin):
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
|
headers = {'Authorization': f'Bearer {context.user_api_key}', 'Origin': origin}
|
||||||
async with session.options(f'{context.base_url}/v1/chat/completions',
|
async with session.options(f'{context.base_url}/v1/chat/completions',
|
||||||
headers={"Origin": origin}) as response:
|
headers=headers) as response:
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
context.options_response = response
|
context.options_response = response
|
||||||
|
|
||||||
|
|
||||||
@step(u'CORS header {cors_header} is set to {cors_header_value}')
|
@step('CORS header {cors_header} is set to {cors_header_value}')
|
||||||
def step_check_options_header_value(context, cors_header, cors_header_value):
|
def step_check_options_header_value(context, cors_header, cors_header_value):
|
||||||
assert context.options_response.headers[cors_header] == cors_header_value
|
assert context.options_response.headers[cors_header] == cors_header_value
|
||||||
|
|
||||||
|
|
||||||
@step(u'prometheus metrics are exposed')
|
@step('prometheus metrics are exposed')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_prometheus_metrics_exported(context):
|
async def step_prometheus_metrics_exported(context):
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
@ -547,16 +649,26 @@ async def step_prometheus_metrics_exported(context):
|
|||||||
metrics_raw = await metrics_response.text()
|
metrics_raw = await metrics_response.text()
|
||||||
metric_exported = False
|
metric_exported = False
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"/metrics answer:\n{metrics_raw}\n")
|
print(f"/metrics answer:\n{metrics_raw}")
|
||||||
|
context.metrics = {}
|
||||||
for metric in parser.text_string_to_metric_families(metrics_raw):
|
for metric in parser.text_string_to_metric_families(metrics_raw):
|
||||||
match metric.name:
|
match metric.name:
|
||||||
case "llamacpp:kv_cache_usage_ratio":
|
case "llamacpp:kv_cache_usage_ratio":
|
||||||
assert len(metric.samples) > 0
|
assert len(metric.samples) > 0
|
||||||
metric_exported = True
|
metric_exported = True
|
||||||
|
context.metrics[metric.name] = metric
|
||||||
|
assert int(metrics_response.headers["Process-Start-Time-Unix"]) > 0, "no header process start time"
|
||||||
assert metric_exported, "No metrics exported"
|
assert metric_exported, "No metrics exported"
|
||||||
|
|
||||||
|
|
||||||
@step(u'available models')
|
@step('metric {metric_name} is {metric_value:d}')
|
||||||
|
def step_assert_metric_value(context, metric_name, metric_value):
|
||||||
|
if metric_name not in context.metrics:
|
||||||
|
assert False, f"no metric {metric_name} in {context.metrics.keys()}"
|
||||||
|
assert context.metrics[metric_name].samples[0].value == metric_value, f"metric: {context.metrics[metric_name]}"
|
||||||
|
|
||||||
|
|
||||||
|
@step('available models')
|
||||||
def step_available_models(context):
|
def step_available_models(context):
|
||||||
# openai client always expects an api_key
|
# openai client always expects an api_key
|
||||||
openai.api_key = context.user_api_key if context.user_api_key is not None else 'nope'
|
openai.api_key = context.user_api_key if context.user_api_key is not None else 'nope'
|
||||||
@ -564,14 +676,14 @@ def step_available_models(context):
|
|||||||
context.models = openai.Model.list().data
|
context.models = openai.Model.list().data
|
||||||
|
|
||||||
|
|
||||||
@step(u'{n_model:d} models are supported')
|
@step('{n_model:d} models are supported')
|
||||||
def step_supported_models(context, n_model):
|
def step_supported_models(context, n_model):
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print("server models available:", context.models)
|
print("server models available:", context.models)
|
||||||
assert len(context.models) == n_model
|
assert len(context.models) == n_model
|
||||||
|
|
||||||
|
|
||||||
@step(u'model {i_model:d} is {param} {preposition} {param_value}')
|
@step('model {i_model:d} is {param} {preposition} {param_value}')
|
||||||
def step_supported_models(context, i_model, param, preposition, param_value):
|
def step_supported_models(context, i_model, param, preposition, param_value):
|
||||||
assert i_model < len(context.models)
|
assert i_model < len(context.models)
|
||||||
model = context.models[i_model]
|
model = context.models[i_model]
|
||||||
@ -588,11 +700,11 @@ def step_supported_models(context, i_model, param, preposition, param_value):
|
|||||||
|
|
||||||
|
|
||||||
async def concurrent_requests(context, f_completion, *args, **kwargs):
|
async def concurrent_requests(context, f_completion, *args, **kwargs):
|
||||||
n_prompts = len(context.prompts)
|
context.n_prompts = len(context.prompts)
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"starting {n_prompts} concurrent completion requests...")
|
print(f"starting {context.n_prompts} concurrent completion requests...")
|
||||||
assert n_prompts > 0
|
assert context.n_prompts > 0
|
||||||
for prompt_no in range(n_prompts):
|
for prompt_no in range(context.n_prompts):
|
||||||
shifted_args = [context.prompts.pop(), *args]
|
shifted_args = [context.prompts.pop(), *args]
|
||||||
context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs)))
|
context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs)))
|
||||||
await asyncio.sleep(0.1)
|
await asyncio.sleep(0.1)
|
||||||
@ -646,6 +758,7 @@ async def oai_chat_completions(user_prompt,
|
|||||||
model=None,
|
model=None,
|
||||||
n_predict=None,
|
n_predict=None,
|
||||||
enable_streaming=None,
|
enable_streaming=None,
|
||||||
|
response_format=None,
|
||||||
seed=None,
|
seed=None,
|
||||||
user_api_key=None,
|
user_api_key=None,
|
||||||
expect_api_error=None):
|
expect_api_error=None):
|
||||||
@ -671,10 +784,13 @@ async def oai_chat_completions(user_prompt,
|
|||||||
"stream": enable_streaming,
|
"stream": enable_streaming,
|
||||||
"seed": seed
|
"seed": seed
|
||||||
}
|
}
|
||||||
|
if response_format is not None:
|
||||||
|
payload['response_format'] = response_format
|
||||||
completion_response = {
|
completion_response = {
|
||||||
'content': '',
|
'content': '',
|
||||||
'timings': {
|
'timings': {
|
||||||
'predicted_n': 0
|
'predicted_n': 0,
|
||||||
|
'prompt_n': 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if async_client:
|
if async_client:
|
||||||
@ -715,7 +831,8 @@ async def oai_chat_completions(user_prompt,
|
|||||||
completion_response = {
|
completion_response = {
|
||||||
'content': chat_completion_raw['choices'][0]['message'],
|
'content': chat_completion_raw['choices'][0]['message'],
|
||||||
'timings': {
|
'timings': {
|
||||||
'predicted_n': chat_completion_raw['usage']['completion_tokens']
|
'predicted_n': chat_completion_raw['usage']['completion_tokens'],
|
||||||
|
'prompt_n': chat_completion_raw['usage']['prompt_tokens']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
@ -729,9 +846,10 @@ async def oai_chat_completions(user_prompt,
|
|||||||
model=model,
|
model=model,
|
||||||
max_tokens=n_predict,
|
max_tokens=n_predict,
|
||||||
stream=enable_streaming,
|
stream=enable_streaming,
|
||||||
|
response_format=payload.get('response_format'),
|
||||||
seed=seed
|
seed=seed
|
||||||
)
|
)
|
||||||
except openai.error.APIError as e:
|
except openai.error.AuthenticationError as e:
|
||||||
if expect_api_error is not None and expect_api_error:
|
if expect_api_error is not None and expect_api_error:
|
||||||
return 401
|
return 401
|
||||||
else:
|
else:
|
||||||
@ -744,13 +862,16 @@ async def oai_chat_completions(user_prompt,
|
|||||||
if 'content' in delta:
|
if 'content' in delta:
|
||||||
completion_response['content'] += delta['content']
|
completion_response['content'] += delta['content']
|
||||||
completion_response['timings']['predicted_n'] += 1
|
completion_response['timings']['predicted_n'] += 1
|
||||||
|
completion_response['truncated'] = chunk.choices[0].finish_reason != 'stop'
|
||||||
else:
|
else:
|
||||||
assert len(chat_completion.choices) == 1
|
assert len(chat_completion.choices) == 1
|
||||||
completion_response = {
|
completion_response = {
|
||||||
'content': chat_completion.choices[0].message.content,
|
'content': chat_completion.choices[0].message.content,
|
||||||
'timings': {
|
'timings': {
|
||||||
'predicted_n': chat_completion.usage.completion_tokens
|
'predicted_n': chat_completion.usage.completion_tokens,
|
||||||
}
|
'prompt_n': chat_completion.usage.prompt_tokens
|
||||||
|
},
|
||||||
|
'truncated': chat_completion.choices[0].finish_reason != 'stop'
|
||||||
}
|
}
|
||||||
if debug:
|
if debug:
|
||||||
print("OAI response formatted to llama.cpp:", completion_response)
|
print("OAI response formatted to llama.cpp:", completion_response)
|
||||||
@ -765,7 +886,7 @@ async def request_embedding(content, base_url=None):
|
|||||||
}) as response:
|
}) as response:
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
response_json = await response.json()
|
response_json = await response.json()
|
||||||
return response_json['embedding']
|
return [response_json['embedding']]
|
||||||
|
|
||||||
|
|
||||||
async def request_oai_embeddings(input,
|
async def request_oai_embeddings(input,
|
||||||
@ -775,6 +896,7 @@ async def request_oai_embeddings(input,
|
|||||||
user_api_key = user_api_key if user_api_key is not None else 'nope'
|
user_api_key = user_api_key if user_api_key is not None else 'nope'
|
||||||
if async_client:
|
if async_client:
|
||||||
origin = 'llama.cpp'
|
origin = 'llama.cpp'
|
||||||
|
headers=[]
|
||||||
if user_api_key is not None:
|
if user_api_key is not None:
|
||||||
headers = {'Authorization': f'Bearer {user_api_key}', 'Origin': origin}
|
headers = {'Authorization': f'Bearer {user_api_key}', 'Origin': origin}
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
@ -783,14 +905,21 @@ async def request_oai_embeddings(input,
|
|||||||
"input": input,
|
"input": input,
|
||||||
"model": model,
|
"model": model,
|
||||||
},
|
},
|
||||||
headers=headers) as response:
|
headers=headers,
|
||||||
|
timeout=3600) as response:
|
||||||
assert response.status == 200, f"received status code not expected: {response.status}"
|
assert response.status == 200, f"received status code not expected: {response.status}"
|
||||||
assert response.headers['Access-Control-Allow-Origin'] == origin
|
assert response.headers['Access-Control-Allow-Origin'] == origin
|
||||||
assert response.headers['Content-Type'] == "application/json; charset=utf-8"
|
assert response.headers['Content-Type'] == "application/json; charset=utf-8"
|
||||||
response_json = await response.json()
|
response_json = await response.json()
|
||||||
assert response_json['model'] == model, f"invalid model received: {response_json['model']}"
|
assert response_json['model'] == model, f"invalid model received: {response_json['model']}"
|
||||||
assert response_json['object'] == 'list'
|
assert response_json['object'] == 'list'
|
||||||
return response_json['data']
|
if isinstance(input, collections.abc.Sequence):
|
||||||
|
embeddings = []
|
||||||
|
for an_oai_embeddings in response_json['data']:
|
||||||
|
embeddings.append(an_oai_embeddings['embedding'])
|
||||||
|
else:
|
||||||
|
embeddings = [response_json['data']['embedding']]
|
||||||
|
return embeddings
|
||||||
else:
|
else:
|
||||||
openai.api_key = user_api_key
|
openai.api_key = user_api_key
|
||||||
openai.api_base = f'{base_url}/v1'
|
openai.api_base = f'{base_url}/v1'
|
||||||
@ -804,7 +933,7 @@ async def request_oai_embeddings(input,
|
|||||||
for an_oai_embeddings in oai_embeddings.data:
|
for an_oai_embeddings in oai_embeddings.data:
|
||||||
embeddings.append(an_oai_embeddings.embedding)
|
embeddings.append(an_oai_embeddings.embedding)
|
||||||
else:
|
else:
|
||||||
embeddings = oai_embeddings.data.embedding
|
embeddings = [oai_embeddings.data.embedding]
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
@ -826,18 +955,17 @@ def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re
|
|||||||
last_match = end
|
last_match = end
|
||||||
highlighted += content[last_match:]
|
highlighted += content[last_match:]
|
||||||
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
||||||
print(f"Checking completion response: {highlighted}\n")
|
print(f"Checking completion response: {highlighted}")
|
||||||
assert last_match > 0, f'/{re_content}/ must match ```{highlighted}```'
|
assert last_match > 0, f'/{re_content}/ must match ```{highlighted}```'
|
||||||
if expected_predicted_n and expected_predicted_n > 0:
|
if expected_predicted_n and expected_predicted_n > 0:
|
||||||
assert n_predicted == expected_predicted_n, (f'invalid number of tokens predicted:'
|
assert n_predicted == expected_predicted_n, (f'invalid number of tokens predicted:'
|
||||||
f' {n_predicted} <> {expected_predicted_n}')
|
f' {n_predicted} <> {expected_predicted_n}')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
async def gather_tasks_results(context):
|
async def gather_tasks_results(context):
|
||||||
n_tasks = len(context.concurrent_tasks)
|
n_tasks = len(context.concurrent_tasks)
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"Waiting for all {n_tasks} tasks results...\n")
|
print(f"Waiting for all {n_tasks} tasks results...")
|
||||||
for task_no in range(n_tasks):
|
for task_no in range(n_tasks):
|
||||||
context.tasks_result.append(await context.concurrent_tasks.pop())
|
context.tasks_result.append(await context.concurrent_tasks.pop())
|
||||||
n_completions = len(context.tasks_result)
|
n_completions = len(context.tasks_result)
|
||||||
@ -854,9 +982,12 @@ async def wait_for_health_status(context,
|
|||||||
slots_processing=None,
|
slots_processing=None,
|
||||||
expected_slots=None):
|
expected_slots=None):
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"Starting checking for health for expected_health_status={expected_health_status}\n")
|
print(f"Starting checking for health for expected_health_status={expected_health_status}")
|
||||||
interval = 0.5
|
interval = 0.5
|
||||||
counter = 0
|
counter = 0
|
||||||
|
if 'GITHUB_ACTIONS' in os.environ:
|
||||||
|
timeout *= 2
|
||||||
|
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
while True:
|
while True:
|
||||||
async with await session.get(f'{base_url}/health', params=params) as health_response:
|
async with await session.get(f'{base_url}/health', params=params) as health_response:
|
||||||
@ -899,6 +1030,8 @@ def assert_embeddings(embeddings):
|
|||||||
assert len(embeddings) > 0
|
assert len(embeddings) > 0
|
||||||
embeddings_computed = False
|
embeddings_computed = False
|
||||||
for emb in embeddings:
|
for emb in embeddings:
|
||||||
|
if not isinstance(emb, float):
|
||||||
|
assert False, f"Bad embeddings: {embeddings}"
|
||||||
if emb != 0:
|
if emb != 0:
|
||||||
embeddings_computed = True
|
embeddings_computed = True
|
||||||
assert embeddings_computed, f"Embeddings: {embeddings}"
|
assert embeddings_computed, f"Embeddings: {embeddings}"
|
||||||
@ -926,17 +1059,30 @@ async def completions_seed(context):
|
|||||||
else context.server_seed if hasattr(context, 'server_seed') else None
|
else context.server_seed if hasattr(context, 'server_seed') else None
|
||||||
|
|
||||||
|
|
||||||
|
def context_text(context):
|
||||||
|
return context.text.replace('\r', '')
|
||||||
|
|
||||||
|
|
||||||
def start_server_background(context):
|
def start_server_background(context):
|
||||||
|
if os.name == 'nt':
|
||||||
|
context.server_path = '../../../build/bin/Release/server.exe'
|
||||||
|
else:
|
||||||
context.server_path = '../../../build/bin/server'
|
context.server_path = '../../../build/bin/server'
|
||||||
if 'LLAMA_SERVER_BIN_PATH' in os.environ:
|
if 'LLAMA_SERVER_BIN_PATH' in os.environ:
|
||||||
context.server_path = os.environ['LLAMA_SERVER_BIN_PATH']
|
context.server_path = os.environ['LLAMA_SERVER_BIN_PATH']
|
||||||
|
server_listen_addr = context.server_fqdn
|
||||||
server_args = [
|
server_args = [
|
||||||
'--host', context.server_fqdn,
|
'--host', server_listen_addr,
|
||||||
'--port', context.server_port,
|
'--port', context.server_port,
|
||||||
'--model', context.model_file
|
|
||||||
]
|
]
|
||||||
|
if context.model_file:
|
||||||
|
server_args.extend(['--model', context.model_file])
|
||||||
|
if context.model_url:
|
||||||
|
server_args.extend(['--model-url', context.model_url])
|
||||||
if context.n_batch:
|
if context.n_batch:
|
||||||
server_args.extend(['--batch-size', context.n_batch])
|
server_args.extend(['--batch-size', context.n_batch])
|
||||||
|
if context.n_ubatch:
|
||||||
|
server_args.extend(['--ubatch-size', context.n_ubatch])
|
||||||
if context.n_gpu_layer:
|
if context.n_gpu_layer:
|
||||||
server_args.extend(['--n-gpu-layers', context.n_gpu_layer])
|
server_args.extend(['--n-gpu-layers', context.n_gpu_layer])
|
||||||
if context.server_continuous_batching:
|
if context.server_continuous_batching:
|
||||||
@ -963,8 +1109,32 @@ def start_server_background(context):
|
|||||||
server_args.append('--verbose')
|
server_args.append('--verbose')
|
||||||
if 'SERVER_LOG_FORMAT_JSON' not in os.environ:
|
if 'SERVER_LOG_FORMAT_JSON' not in os.environ:
|
||||||
server_args.extend(['--log-format', "text"])
|
server_args.extend(['--log-format', "text"])
|
||||||
print(f"starting server with: {context.server_path} {server_args}\n")
|
print(f"starting server with: {context.server_path} {server_args}")
|
||||||
|
flags = 0
|
||||||
|
if 'nt' == os.name:
|
||||||
|
flags |= subprocess.DETACHED_PROCESS
|
||||||
|
flags |= subprocess.CREATE_NEW_PROCESS_GROUP
|
||||||
|
flags |= subprocess.CREATE_NO_WINDOW
|
||||||
|
|
||||||
|
pkwargs = {
|
||||||
|
'creationflags': flags,
|
||||||
|
'stdout': subprocess.PIPE,
|
||||||
|
'stderr': subprocess.PIPE
|
||||||
|
}
|
||||||
context.server_process = subprocess.Popen(
|
context.server_process = subprocess.Popen(
|
||||||
[str(arg) for arg in [context.server_path, *server_args]],
|
[str(arg) for arg in [context.server_path, *server_args]],
|
||||||
close_fds=True)
|
**pkwargs)
|
||||||
print(f"server pid={context.server_process.pid}")
|
|
||||||
|
def log_stdout(process):
|
||||||
|
for line in iter(process.stdout.readline, b''):
|
||||||
|
print(line.decode('utf-8'), end='')
|
||||||
|
thread_stdout = threading.Thread(target=log_stdout, args=(context.server_process,))
|
||||||
|
thread_stdout.start()
|
||||||
|
|
||||||
|
def log_stderr(process):
|
||||||
|
for line in iter(process.stderr.readline, b''):
|
||||||
|
print(line.decode('utf-8'), end='', file=sys.stderr)
|
||||||
|
thread_stderr = threading.Thread(target=log_stderr, args=(context.server_process,))
|
||||||
|
thread_stderr.start()
|
||||||
|
|
||||||
|
print(f"server pid={context.server_process.pid}, behave pid={os.getpid()}")
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
aiohttp~=3.9.3
|
aiohttp~=3.9.3
|
||||||
behave~=1.2.6
|
behave~=1.2.6
|
||||||
huggingface_hub~=0.20.3
|
huggingface_hub~=0.20.3
|
||||||
|
numpy~=1.24.4
|
||||||
openai~=0.25.0
|
openai~=0.25.0
|
||||||
prometheus-client~=0.20.0
|
prometheus-client~=0.20.0
|
||||||
|
@ -1,17 +1,29 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <string>
|
#include "llama.h"
|
||||||
#include <vector>
|
#include "common.h"
|
||||||
#include <set>
|
|
||||||
#include <mutex>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <unordered_map>
|
|
||||||
|
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
|
|
||||||
#include "../llava/clip.h"
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <sstream>
|
||||||
|
#include <random>
|
||||||
|
|
||||||
using json = nlohmann::json;
|
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
|
||||||
|
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
// https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
|
||||||
|
enum error_type {
|
||||||
|
ERROR_TYPE_INVALID_REQUEST,
|
||||||
|
ERROR_TYPE_AUTHENTICATION,
|
||||||
|
ERROR_TYPE_SERVER,
|
||||||
|
ERROR_TYPE_NOT_FOUND,
|
||||||
|
ERROR_TYPE_PERMISSION,
|
||||||
|
ERROR_TYPE_UNAVAILABLE, // custom error
|
||||||
|
ERROR_TYPE_NOT_SUPPORTED, // custom error
|
||||||
|
};
|
||||||
|
|
||||||
extern bool server_verbose;
|
extern bool server_verbose;
|
||||||
extern bool server_log_json;
|
extern bool server_log_json;
|
||||||
@ -37,61 +49,13 @@ extern bool server_log_json;
|
|||||||
#define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
|
#define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
|
||||||
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
||||||
|
|
||||||
enum server_state {
|
template <typename T>
|
||||||
SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
|
static T json_value(const json &body, const std::string &key, const T &default_value) {
|
||||||
SERVER_STATE_READY, // Server is ready and model is loaded
|
// Fallback null to default value
|
||||||
SERVER_STATE_ERROR // An error occurred, load_model failed
|
return body.contains(key) && !body.at(key).is_null()
|
||||||
};
|
? body.value(key, default_value)
|
||||||
|
: default_value;
|
||||||
enum task_type {
|
}
|
||||||
TASK_TYPE_COMPLETION,
|
|
||||||
TASK_TYPE_CANCEL,
|
|
||||||
TASK_TYPE_NEXT_RESPONSE,
|
|
||||||
TASK_TYPE_METRICS
|
|
||||||
};
|
|
||||||
|
|
||||||
struct task_server {
|
|
||||||
int id = -1; // to be filled by llama_server_queue
|
|
||||||
int target_id;
|
|
||||||
task_type type;
|
|
||||||
json data;
|
|
||||||
bool infill_mode = false;
|
|
||||||
bool embedding_mode = false;
|
|
||||||
int multitask_id = -1;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct task_result {
|
|
||||||
int id;
|
|
||||||
int multitask_id = -1;
|
|
||||||
bool stop;
|
|
||||||
bool error;
|
|
||||||
json result_json;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct task_multi {
|
|
||||||
int id;
|
|
||||||
std::set<int> subtasks_remaining{};
|
|
||||||
std::vector<task_result> results{};
|
|
||||||
};
|
|
||||||
|
|
||||||
// completion token output with probabilities
|
|
||||||
struct completion_token_output {
|
|
||||||
struct token_prob
|
|
||||||
{
|
|
||||||
llama_token tok;
|
|
||||||
float prob;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<token_prob> probs;
|
|
||||||
llama_token tok;
|
|
||||||
std::string text_to_send;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct token_translator {
|
|
||||||
llama_context * ctx;
|
|
||||||
std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
|
|
||||||
std::string operator()(const completion_token_output &cto) const { return (*this)(cto.tok); }
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra) {
|
static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra) {
|
||||||
std::stringstream ss_tid;
|
std::stringstream ss_tid;
|
||||||
@ -102,18 +66,18 @@ static inline void server_log(const char *level, const char *function, int line,
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (server_log_json) {
|
if (server_log_json) {
|
||||||
log.merge_patch(
|
log.merge_patch( {
|
||||||
{
|
|
||||||
{"level", level},
|
{"level", level},
|
||||||
{"function", function},
|
{"function", function},
|
||||||
{"line", line},
|
{"line", line},
|
||||||
{"msg", message},
|
{"msg", message},
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!extra.empty()) {
|
if (!extra.empty()) {
|
||||||
log.merge_patch(extra);
|
log.merge_patch(extra);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << log.dump(-1, ' ', false, json::error_handler_t::replace) << "\n" << std::flush;
|
printf("%s\n", log.dump(-1, ' ', false, json::error_handler_t::replace).c_str());
|
||||||
} else {
|
} else {
|
||||||
char buf[1024];
|
char buf[1024];
|
||||||
snprintf(buf, 1024, "%4s [%24s] %s", level, function, message);
|
snprintf(buf, 1024, "%4s [%24s] %s", level, function, message);
|
||||||
@ -136,22 +100,13 @@ static inline void server_log(const char *level, const char *function, int line,
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// server utils
|
// chat template utils
|
||||||
//
|
//
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
static T json_value(const json &body, const std::string &key, const T &default_value) {
|
|
||||||
// Fallback null to default value
|
|
||||||
return body.contains(key) && !body.at(key).is_null()
|
|
||||||
? body.value(key, default_value)
|
|
||||||
: default_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
||||||
inline bool verify_custom_template(const std::string & tmpl) {
|
inline bool verify_custom_template(const std::string & tmpl) {
|
||||||
llama_chat_message chat[] = {{"user", "test"}};
|
llama_chat_message chat[] = {{"user", "test"}};
|
||||||
std::vector<char> buf(1);
|
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
|
||||||
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, buf.data(), buf.size());
|
|
||||||
return res >= 0;
|
return res >= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +118,7 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
|
|||||||
std::vector<llama_chat_message> chat(messages.size());
|
std::vector<llama_chat_message> chat(messages.size());
|
||||||
|
|
||||||
for (size_t i = 0; i < messages.size(); ++i) {
|
for (size_t i = 0; i < messages.size(); ++i) {
|
||||||
auto &curr_msg = messages[i];
|
const auto & curr_msg = messages[i];
|
||||||
str[i*2 + 0] = json_value(curr_msg, "role", std::string(""));
|
str[i*2 + 0] = json_value(curr_msg, "role", std::string(""));
|
||||||
str[i*2 + 1] = json_value(curr_msg, "content", std::string(""));
|
str[i*2 + 1] = json_value(curr_msg, "content", std::string(""));
|
||||||
alloc_size += str[i*2 + 1].length();
|
alloc_size += str[i*2 + 1].length();
|
||||||
@ -183,261 +138,13 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
|
|||||||
res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
|
res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string formatted_chat(buf.data(), res);
|
const std::string formatted_chat(buf.data(), res);
|
||||||
|
|
||||||
LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}});
|
LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}});
|
||||||
|
|
||||||
return formatted_chat;
|
return formatted_chat;
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// work queue utils
|
|
||||||
//
|
|
||||||
|
|
||||||
struct llama_server_queue {
|
|
||||||
int id = 0;
|
|
||||||
std::mutex mutex_tasks;
|
|
||||||
bool running;
|
|
||||||
// queues
|
|
||||||
std::vector<task_server> queue_tasks;
|
|
||||||
std::vector<task_server> queue_tasks_deferred;
|
|
||||||
std::vector<task_multi> queue_multitasks;
|
|
||||||
std::condition_variable condition_tasks;
|
|
||||||
// callback functions
|
|
||||||
std::function<void(task_server&)> callback_new_task;
|
|
||||||
std::function<void(task_multi&)> callback_finish_multitask;
|
|
||||||
std::function<void(void)> callback_run_slots;
|
|
||||||
|
|
||||||
// Add a new task to the end of the queue
|
|
||||||
int post(task_server task) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
if (task.id == -1) {
|
|
||||||
task.id = id++;
|
|
||||||
LOG_VERBOSE("new task id", {{"new_id", task.id}});
|
|
||||||
}
|
|
||||||
queue_tasks.push_back(std::move(task));
|
|
||||||
condition_tasks.notify_one();
|
|
||||||
return task.id;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a new task, but defer until one slot is available
|
|
||||||
void defer(task_server task) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
queue_tasks_deferred.push_back(std::move(task));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the next id for creating anew task
|
|
||||||
int get_new_id() {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
int new_id = id++;
|
|
||||||
LOG_VERBOSE("new task id", {{"new_id", new_id}});
|
|
||||||
return new_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register function to process a new task
|
|
||||||
void on_new_task(std::function<void(task_server&)> callback) {
|
|
||||||
callback_new_task = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register function to process a multitask when it is finished
|
|
||||||
void on_finish_multitask(std::function<void(task_multi&)> callback) {
|
|
||||||
callback_finish_multitask = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the function to be called when all slots data is ready to be processed
|
|
||||||
void on_run_slots(std::function<void(void)> callback) {
|
|
||||||
callback_run_slots = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call when the state of one slot is changed
|
|
||||||
void notify_slot_changed() {
|
|
||||||
// move deferred tasks back to main loop
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
for (auto & task : queue_tasks_deferred) {
|
|
||||||
queue_tasks.push_back(std::move(task));
|
|
||||||
}
|
|
||||||
queue_tasks_deferred.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// end the start_loop routine
|
|
||||||
void terminate() {
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
running = false;
|
|
||||||
}
|
|
||||||
condition_tasks.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Main loop consists of these steps:
|
|
||||||
* - Wait until a new task arrives
|
|
||||||
* - Process the task (i.e. maybe copy data into slot)
|
|
||||||
* - Check if multitask is finished
|
|
||||||
* - Run all slots
|
|
||||||
*/
|
|
||||||
void start_loop() {
|
|
||||||
running = true;
|
|
||||||
while (true) {
|
|
||||||
LOG_VERBOSE("new task may arrive", {});
|
|
||||||
{
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
if (queue_tasks.empty()) {
|
|
||||||
lock.unlock();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
task_server task = queue_tasks.front();
|
|
||||||
queue_tasks.erase(queue_tasks.begin());
|
|
||||||
lock.unlock();
|
|
||||||
LOG_VERBOSE("callback_new_task", {{"task_id", task.id}});
|
|
||||||
callback_new_task(task);
|
|
||||||
}
|
|
||||||
LOG_VERBOSE("update_multitasks", {});
|
|
||||||
// check if we have any finished multitasks
|
|
||||||
auto queue_iterator = queue_multitasks.begin();
|
|
||||||
while (queue_iterator != queue_multitasks.end())
|
|
||||||
{
|
|
||||||
if (queue_iterator->subtasks_remaining.empty())
|
|
||||||
{
|
|
||||||
// all subtasks done == multitask is done
|
|
||||||
task_multi current_multitask = *queue_iterator;
|
|
||||||
callback_finish_multitask(current_multitask);
|
|
||||||
// remove this multitask
|
|
||||||
queue_iterator = queue_multitasks.erase(queue_iterator);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
++queue_iterator;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// all tasks in the current loop is processed, slots data is now ready
|
|
||||||
LOG_VERBOSE("callback_run_slots", {});
|
|
||||||
callback_run_slots();
|
|
||||||
}
|
|
||||||
LOG_VERBOSE("wait for new task", {});
|
|
||||||
// wait for new task
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
if (queue_tasks.empty()) {
|
|
||||||
if (!running) {
|
|
||||||
LOG_VERBOSE("ending start_loop", {});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
condition_tasks.wait(lock, [&]{
|
|
||||||
return (!queue_tasks.empty() || !running);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// functions to manage multitasks
|
|
||||||
//
|
|
||||||
|
|
||||||
// add a multitask by specifying the id of all subtask (subtask is a task_server)
|
|
||||||
void add_multitask(int multitask_id, std::vector<int>& sub_ids)
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_tasks);
|
|
||||||
task_multi multi;
|
|
||||||
multi.id = multitask_id;
|
|
||||||
std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end()));
|
|
||||||
queue_multitasks.push_back(multi);
|
|
||||||
}
|
|
||||||
|
|
||||||
// updatethe remaining subtasks, while appending results to multitask
|
|
||||||
void update_multitask(int multitask_id, int subtask_id, task_result& result)
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_tasks);
|
|
||||||
for (auto& multitask : queue_multitasks)
|
|
||||||
{
|
|
||||||
if (multitask.id == multitask_id)
|
|
||||||
{
|
|
||||||
multitask.subtasks_remaining.erase(subtask_id);
|
|
||||||
multitask.results.push_back(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct llama_server_response {
|
|
||||||
typedef std::function<void(int, int, task_result&)> callback_multitask_t;
|
|
||||||
callback_multitask_t callback_update_multitask;
|
|
||||||
// for keeping track of all tasks waiting for the result
|
|
||||||
std::set<int> waiting_task_ids;
|
|
||||||
// the main result queue
|
|
||||||
std::vector<task_result> queue_results;
|
|
||||||
std::mutex mutex_results;
|
|
||||||
std::condition_variable condition_results;
|
|
||||||
|
|
||||||
// add the task_id to the list of tasks waiting for response
|
|
||||||
void add_waiting_task_id(int task_id) {
|
|
||||||
LOG_VERBOSE("waiting for task id", {{"task_id", task_id}});
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
waiting_task_ids.insert(task_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// when the request is finished, we can remove task associated with it
|
|
||||||
void remove_waiting_task_id(int task_id) {
|
|
||||||
LOG_VERBOSE("remove waiting for task id", {{"task_id", task_id}});
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
waiting_task_ids.erase(task_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function blocks the thread until there is a response for this task_id
|
|
||||||
task_result recv(int task_id) {
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
condition_results.wait(lock, [&]{
|
|
||||||
return !queue_results.empty();
|
|
||||||
});
|
|
||||||
|
|
||||||
for (int i = 0; i < (int) queue_results.size(); i++)
|
|
||||||
{
|
|
||||||
if (queue_results[i].id == task_id)
|
|
||||||
{
|
|
||||||
assert(queue_results[i].multitask_id == -1);
|
|
||||||
task_result res = queue_results[i];
|
|
||||||
queue_results.erase(queue_results.begin() + i);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// should never reach here
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the function to update multitask
|
|
||||||
void on_multitask_update(callback_multitask_t callback) {
|
|
||||||
callback_update_multitask = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a new result to a waiting task_id
|
|
||||||
void send(task_result result) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
LOG_VERBOSE("send new result", {{"task_id", result.id}});
|
|
||||||
for (auto& task_id : waiting_task_ids) {
|
|
||||||
// LOG_TEE("waiting task id %i \n", task_id);
|
|
||||||
// for now, tasks that have associated parent multitasks just get erased once multitask picks up the result
|
|
||||||
if (result.multitask_id == task_id)
|
|
||||||
{
|
|
||||||
LOG_VERBOSE("callback_update_multitask", {{"task_id", task_id}});
|
|
||||||
callback_update_multitask(task_id, result.id, result);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result.id == task_id)
|
|
||||||
{
|
|
||||||
LOG_VERBOSE("queue_results.push_back", {{"task_id", task_id}});
|
|
||||||
queue_results.push_back(result);
|
|
||||||
condition_results.notify_all();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// base64 utils (TODO: move to common in the future)
|
// base64 utils (TODO: move to common in the future)
|
||||||
//
|
//
|
||||||
@ -447,13 +154,11 @@ static const std::string base64_chars =
|
|||||||
"abcdefghijklmnopqrstuvwxyz"
|
"abcdefghijklmnopqrstuvwxyz"
|
||||||
"0123456789+/";
|
"0123456789+/";
|
||||||
|
|
||||||
static inline bool is_base64(uint8_t c)
|
static inline bool is_base64(uint8_t c) {
|
||||||
{
|
|
||||||
return (isalnum(c) || (c == '+') || (c == '/'));
|
return (isalnum(c) || (c == '+') || (c == '/'));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string)
|
static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
|
||||||
{
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
int j = 0;
|
int j = 0;
|
||||||
int in_ = 0;
|
int in_ = 0;
|
||||||
@ -465,13 +170,10 @@ static inline std::vector<uint8_t> base64_decode(const std::string & encoded_str
|
|||||||
|
|
||||||
std::vector<uint8_t> ret;
|
std::vector<uint8_t> ret;
|
||||||
|
|
||||||
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_]))
|
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
|
||||||
{
|
|
||||||
char_array_4[i++] = encoded_string[in_]; in_++;
|
char_array_4[i++] = encoded_string[in_]; in_++;
|
||||||
if (i == 4)
|
if (i == 4) {
|
||||||
{
|
for (i = 0; i < 4; i++) {
|
||||||
for (i = 0; i <4; i++)
|
|
||||||
{
|
|
||||||
char_array_4[i] = base64_chars.find(char_array_4[i]);
|
char_array_4[i] = base64_chars.find(char_array_4[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -479,23 +181,20 @@ static inline std::vector<uint8_t> base64_decode(const std::string & encoded_str
|
|||||||
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
||||||
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
||||||
|
|
||||||
for (i = 0; (i < 3); i++)
|
for (i = 0; (i < 3); i++) {
|
||||||
{
|
|
||||||
ret.push_back(char_array_3[i]);
|
ret.push_back(char_array_3[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i)
|
if (i) {
|
||||||
{
|
for (j = i; j < 4; j++) {
|
||||||
for (j = i; j <4; j++)
|
|
||||||
{
|
|
||||||
char_array_4[j] = 0;
|
char_array_4[j] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j <4; j++)
|
for (j = 0; j < 4; j++) {
|
||||||
{
|
|
||||||
char_array_4[j] = base64_chars.find(char_array_4[j]);
|
char_array_4[j] = base64_chars.find(char_array_4[j]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -503,8 +202,7 @@ static inline std::vector<uint8_t> base64_decode(const std::string & encoded_str
|
|||||||
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
||||||
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
||||||
|
|
||||||
for (j = 0; (j < i - 1); j++)
|
for (j = 0; j < i - 1; j++) {
|
||||||
{
|
|
||||||
ret.push_back(char_array_3[j]);
|
ret.push_back(char_array_3[j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -516,8 +214,7 @@ static inline std::vector<uint8_t> base64_decode(const std::string & encoded_str
|
|||||||
// random string / id
|
// random string / id
|
||||||
//
|
//
|
||||||
|
|
||||||
static std::string random_string()
|
static std::string random_string() {
|
||||||
{
|
|
||||||
static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
|
static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
|
||||||
|
|
||||||
std::random_device rd;
|
std::random_device rd;
|
||||||
@ -532,10 +229,10 @@ static std::string random_string()
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string gen_chatcmplid()
|
static std::string gen_chatcmplid() {
|
||||||
{
|
|
||||||
std::stringstream chatcmplid;
|
std::stringstream chatcmplid;
|
||||||
chatcmplid << "chatcmpl-" << random_string();
|
chatcmplid << "chatcmpl-" << random_string();
|
||||||
|
|
||||||
return chatcmplid.str();
|
return chatcmplid.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -543,91 +240,378 @@ static std::string gen_chatcmplid()
|
|||||||
// other common utils
|
// other common utils
|
||||||
//
|
//
|
||||||
|
|
||||||
static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
|
static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
|
||||||
{
|
|
||||||
size_t i;
|
size_t i;
|
||||||
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
|
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
|
||||||
{
|
|
||||||
}
|
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ends_with(const std::string &str, const std::string &suffix)
|
static bool ends_with(const std::string & str, const std::string & suffix) {
|
||||||
{
|
return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
|
||||||
return str.size() >= suffix.size() &&
|
|
||||||
0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t find_partial_stop_string(const std::string &stop,
|
static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
|
||||||
const std::string &text)
|
if (!text.empty() && !stop.empty()) {
|
||||||
{
|
|
||||||
if (!text.empty() && !stop.empty())
|
|
||||||
{
|
|
||||||
const char text_last_char = text.back();
|
const char text_last_char = text.back();
|
||||||
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
|
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
|
||||||
{
|
if (stop[char_index] == text_last_char) {
|
||||||
if (stop[char_index] == text_last_char)
|
|
||||||
{
|
|
||||||
const std::string current_partial = stop.substr(0, char_index + 1);
|
const std::string current_partial = stop.substr(0, char_index + 1);
|
||||||
if (ends_with(text, current_partial))
|
if (ends_with(text, current_partial)) {
|
||||||
{
|
|
||||||
return text.size() - char_index - 1;
|
return text.size() - char_index - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::string::npos;
|
return std::string::npos;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: reuse llama_detokenize
|
// TODO: reuse llama_detokenize
|
||||||
template <class Iter>
|
template <class Iter>
|
||||||
static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
|
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||||
{
|
|
||||||
std::string ret;
|
std::string ret;
|
||||||
for (; begin != end; ++begin)
|
for (; begin != end; ++begin) {
|
||||||
{
|
|
||||||
ret += llama_token_to_piece(ctx, *begin);
|
ret += llama_token_to_piece(ctx, *begin);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
// format incomplete utf-8 multibyte character for output
|
// format incomplete utf-8 multibyte character for output
|
||||||
static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
|
static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
|
||||||
{
|
|
||||||
std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
|
std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
|
||||||
|
|
||||||
// if the size is 1 and first bit is 1, meaning it's a partial character
|
// if the size is 1 and first bit is 1, meaning it's a partial character
|
||||||
// (size > 1 meaning it's already a known token)
|
// (size > 1 meaning it's already a known token)
|
||||||
if (out.size() == 1 && (out[0] & 0x80) == 0x80)
|
if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
|
||||||
{
|
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << std::hex << (out[0] & 0xff);
|
ss << std::hex << (out[0] & 0xff);
|
||||||
std::string res(ss.str());
|
std::string res(ss.str());
|
||||||
out = "byte: \\x" + res;
|
out = "byte: \\x" + res;
|
||||||
}
|
}
|
||||||
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct completion_token_output {
|
||||||
|
llama_token tok;
|
||||||
|
std::string text_to_send;
|
||||||
|
|
||||||
|
struct token_prob {
|
||||||
|
llama_token tok;
|
||||||
|
float prob;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<token_prob> probs;
|
||||||
|
};
|
||||||
|
|
||||||
// convert a vector of completion_token_output to json
|
// convert a vector of completion_token_output to json
|
||||||
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> &probs)
|
static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
|
||||||
{
|
|
||||||
json out = json::array();
|
json out = json::array();
|
||||||
for (const auto &prob : probs)
|
|
||||||
{
|
for (const auto & prob : probs) {
|
||||||
json probs_for_token = json::array();
|
json probs_for_token = json::array();
|
||||||
for (const auto &p : prob.probs)
|
|
||||||
{
|
for (const auto & p : prob.probs) {
|
||||||
std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
|
const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
|
||||||
probs_for_token.push_back(json
|
probs_for_token.push_back(json {
|
||||||
{
|
|
||||||
{"tok_str", tok_str},
|
{"tok_str", tok_str},
|
||||||
{"prob", p.prob},
|
{"prob", p.prob},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
|
|
||||||
out.push_back(json{
|
const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
|
||||||
|
out.push_back(json {
|
||||||
{"content", tok_str},
|
{"content", tok_str},
|
||||||
{"probs", probs_for_token},
|
{"probs", probs_for_token},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// OAI utils
|
||||||
|
//
|
||||||
|
|
||||||
|
static json oaicompat_completion_params_parse(
|
||||||
|
const struct llama_model * model,
|
||||||
|
const json & body, /* openai api json semantics */
|
||||||
|
const std::string & chat_template) {
|
||||||
|
json llama_params;
|
||||||
|
|
||||||
|
llama_params["__oaicompat"] = true;
|
||||||
|
|
||||||
|
// Map OpenAI parameters to llama.cpp parameters
|
||||||
|
//
|
||||||
|
// For parameters that are defined by the OpenAI documentation (e.g.
|
||||||
|
// temperature), we explicitly specify OpenAI's intended default; we
|
||||||
|
// need to do that because sometimes OpenAI disagrees with llama.cpp
|
||||||
|
//
|
||||||
|
// https://platform.openai.com/docs/api-reference/chat/create
|
||||||
|
llama_sampling_params default_sparams;
|
||||||
|
llama_params["model"] = json_value(body, "model", std::string("unknown"));
|
||||||
|
llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
|
||||||
|
llama_params["cache_prompt"] = json_value(body, "cache_prompt", false);
|
||||||
|
llama_params["temperature"] = json_value(body, "temperature", 0.0);
|
||||||
|
llama_params["top_k"] = json_value(body, "top_k", default_sparams.top_k);
|
||||||
|
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
||||||
|
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
|
||||||
|
llama_params["logit_bias"] = json_value(body, "logit_bias", json::object());
|
||||||
|
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
|
||||||
|
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
||||||
|
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
|
||||||
|
llama_params["stream"] = json_value(body, "stream", false);
|
||||||
|
llama_params["mirostat"] = json_value(body, "mirostat", default_sparams.mirostat);
|
||||||
|
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", default_sparams.mirostat_tau);
|
||||||
|
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", default_sparams.mirostat_eta);
|
||||||
|
llama_params["penalize_nl"] = json_value(body, "penalize_nl", default_sparams.penalize_nl);
|
||||||
|
llama_params["typical_p"] = json_value(body, "typical_p", default_sparams.typical_p);
|
||||||
|
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", default_sparams.penalty_last_n);
|
||||||
|
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
|
||||||
|
llama_params["tfs_z"] = json_value(body, "tfs_z", default_sparams.tfs_z);
|
||||||
|
llama_params["n_keep"] = json_value(body, "n_keep", 0);
|
||||||
|
|
||||||
|
if (body.contains("grammar")) {
|
||||||
|
llama_params["grammar"] = json_value(body, "grammar", json::object());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (body.contains("response_format")) {
|
||||||
|
auto response_format = json_value(body, "response_format", json::object());
|
||||||
|
if (response_format.contains("type")) {
|
||||||
|
if (response_format["type"] == "json_object") {
|
||||||
|
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("response_format type not supported: " + response_format["type"].dump());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle 'stop' field
|
||||||
|
if (body.contains("stop") && body["stop"].is_string()) {
|
||||||
|
llama_params["stop"] = json::array({body["stop"].get<std::string>()});
|
||||||
|
} else {
|
||||||
|
llama_params["stop"] = json_value(body, "stop", json::array());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure there is ChatML-specific end sequence among stop words
|
||||||
|
llama_params["stop"].push_back("<|im_end|>");
|
||||||
|
|
||||||
|
return llama_params;
|
||||||
|
}
|
||||||
|
|
||||||
|
static json format_final_response_oaicompat(const json & request, json result, const std::string & completion_id, bool streaming = false) {
|
||||||
|
bool stopped_word = result.count("stopped_word") != 0;
|
||||||
|
bool stopped_eos = json_value(result, "stopped_eos", false);
|
||||||
|
int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
|
||||||
|
int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
|
||||||
|
std::string content = json_value(result, "content", std::string(""));
|
||||||
|
|
||||||
|
std::string finish_reason = "length";
|
||||||
|
if (stopped_word || stopped_eos) {
|
||||||
|
finish_reason = "stop";
|
||||||
|
}
|
||||||
|
|
||||||
|
json choices =
|
||||||
|
streaming ? json::array({json{{"finish_reason", finish_reason},
|
||||||
|
{"index", 0},
|
||||||
|
{"delta", json::object()}}})
|
||||||
|
: json::array({json{{"finish_reason", finish_reason},
|
||||||
|
{"index", 0},
|
||||||
|
{"message", json{{"content", content},
|
||||||
|
{"role", "assistant"}}}}});
|
||||||
|
|
||||||
|
std::time_t t = std::time(0);
|
||||||
|
|
||||||
|
json res = json {
|
||||||
|
{"choices", choices},
|
||||||
|
{"created", t},
|
||||||
|
{"model",
|
||||||
|
json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
||||||
|
{"object", streaming ? "chat.completion.chunk" : "chat.completion"},
|
||||||
|
{"usage", json {
|
||||||
|
{"completion_tokens", num_tokens_predicted},
|
||||||
|
{"prompt_tokens", num_prompt_tokens},
|
||||||
|
{"total_tokens", num_tokens_predicted + num_prompt_tokens}
|
||||||
|
}},
|
||||||
|
{"id", completion_id}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (server_verbose) {
|
||||||
|
res["__verbose"] = result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.contains("completion_probabilities")) {
|
||||||
|
res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
|
||||||
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
// return value is vector as there is one case where we might need to generate two responses
|
||||||
|
static std::vector<json> format_partial_response_oaicompat(json result, const std::string & completion_id) {
|
||||||
|
if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
|
||||||
|
return std::vector<json>({result});
|
||||||
|
}
|
||||||
|
|
||||||
|
bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
|
||||||
|
std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
|
||||||
|
|
||||||
|
bool stopped_word = json_value(result, "stopped_word", false);
|
||||||
|
bool stopped_eos = json_value(result, "stopped_eos", false);
|
||||||
|
bool stopped_limit = json_value(result, "stopped_limit", false);
|
||||||
|
std::string content = json_value(result, "content", std::string(""));
|
||||||
|
|
||||||
|
std::string finish_reason;
|
||||||
|
if (stopped_word || stopped_eos) {
|
||||||
|
finish_reason = "stop";
|
||||||
|
}
|
||||||
|
if (stopped_limit) {
|
||||||
|
finish_reason = "length";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::time_t t = std::time(0);
|
||||||
|
|
||||||
|
json choices;
|
||||||
|
|
||||||
|
if (!finish_reason.empty()) {
|
||||||
|
choices = json::array({json{{"finish_reason", finish_reason},
|
||||||
|
{"index", 0},
|
||||||
|
{"delta", json::object()}}});
|
||||||
|
} else {
|
||||||
|
if (first) {
|
||||||
|
if (content.empty()) {
|
||||||
|
choices = json::array({json{{"finish_reason", nullptr},
|
||||||
|
{"index", 0},
|
||||||
|
{"delta", json{{"role", "assistant"}}}}});
|
||||||
|
} else {
|
||||||
|
// We have to send this as two updates to conform to openai behavior
|
||||||
|
json initial_ret = json{{"choices", json::array({json{
|
||||||
|
{"finish_reason", nullptr},
|
||||||
|
{"index", 0},
|
||||||
|
{"delta", json{
|
||||||
|
{"role", "assistant"}
|
||||||
|
}}}})},
|
||||||
|
{"created", t},
|
||||||
|
{"id", completion_id},
|
||||||
|
{"model", modelname},
|
||||||
|
{"object", "chat.completion.chunk"}};
|
||||||
|
|
||||||
|
json second_ret = json{
|
||||||
|
{"choices", json::array({json{{"finish_reason", nullptr},
|
||||||
|
{"index", 0},
|
||||||
|
{"delta", json{
|
||||||
|
{"content", content}}}
|
||||||
|
}})},
|
||||||
|
{"created", t},
|
||||||
|
{"id", completion_id},
|
||||||
|
{"model", modelname},
|
||||||
|
{"object", "chat.completion.chunk"}};
|
||||||
|
|
||||||
|
return std::vector<json>({initial_ret, second_ret});
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Some idiosyncrasy in task processing logic makes several trailing calls
|
||||||
|
// with empty content, we ignore these at the calee site.
|
||||||
|
if (content.empty()) {
|
||||||
|
return std::vector<json>({json::object()});
|
||||||
|
}
|
||||||
|
|
||||||
|
choices = json::array({json{
|
||||||
|
{"finish_reason", nullptr},
|
||||||
|
{"index", 0},
|
||||||
|
{"delta",
|
||||||
|
json{
|
||||||
|
{"content", content},
|
||||||
|
}},
|
||||||
|
}});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
json ret = json {
|
||||||
|
{"choices", choices},
|
||||||
|
{"created", t},
|
||||||
|
{"id", completion_id},
|
||||||
|
{"model", modelname},
|
||||||
|
{"object", "chat.completion.chunk"}
|
||||||
|
};
|
||||||
|
|
||||||
|
return std::vector<json>({ret});
|
||||||
|
}
|
||||||
|
|
||||||
|
static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
|
||||||
|
json data = json::array();
|
||||||
|
int i = 0;
|
||||||
|
for (auto & elem : embeddings) {
|
||||||
|
data.push_back(json{
|
||||||
|
{"embedding", json_value(elem, "embedding", json::array())},
|
||||||
|
{"index", i++},
|
||||||
|
{"object", "embedding"}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
json res = json {
|
||||||
|
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
||||||
|
{"object", "list"},
|
||||||
|
{"usage", json {
|
||||||
|
{"prompt_tokens", 0},
|
||||||
|
{"total_tokens", 0}
|
||||||
|
}},
|
||||||
|
{"data", data}
|
||||||
|
};
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
static json format_tokenizer_response(const std::vector<llama_token> & tokens) {
|
||||||
|
return json {
|
||||||
|
{"tokens", tokens}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static json format_detokenized_response(const std::string & content) {
|
||||||
|
return json {
|
||||||
|
{"content", content}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static json format_error_response(const std::string & message, const enum error_type type) {
|
||||||
|
std::string type_str;
|
||||||
|
int code = 500;
|
||||||
|
switch (type) {
|
||||||
|
case ERROR_TYPE_INVALID_REQUEST:
|
||||||
|
type_str = "invalid_request_error";
|
||||||
|
code = 400;
|
||||||
|
break;
|
||||||
|
case ERROR_TYPE_AUTHENTICATION:
|
||||||
|
type_str = "authentication_error";
|
||||||
|
code = 401;
|
||||||
|
break;
|
||||||
|
case ERROR_TYPE_NOT_FOUND:
|
||||||
|
type_str = "not_found_error";
|
||||||
|
code = 404;
|
||||||
|
break;
|
||||||
|
case ERROR_TYPE_SERVER:
|
||||||
|
type_str = "server_error";
|
||||||
|
code = 500;
|
||||||
|
break;
|
||||||
|
case ERROR_TYPE_PERMISSION:
|
||||||
|
type_str = "permission_error";
|
||||||
|
code = 403;
|
||||||
|
break;
|
||||||
|
case ERROR_TYPE_NOT_SUPPORTED:
|
||||||
|
type_str = "not_supported_error";
|
||||||
|
code = 501;
|
||||||
|
break;
|
||||||
|
case ERROR_TYPE_UNAVAILABLE:
|
||||||
|
type_str = "unavailable_error";
|
||||||
|
code = 503;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return json {
|
||||||
|
{"code", code},
|
||||||
|
{"message", message},
|
||||||
|
{"type", type_str},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
@ -226,7 +226,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
while (active_seqs.size() > 0) {
|
while (active_seqs.size() > 0) {
|
||||||
// randomly select a sequence to verify from active sequences
|
// randomly select a sequence to verify from active sequences
|
||||||
std::uniform_int_distribution<u_int> u_int_dist(0, active_seqs.size() - 1);
|
std::uniform_int_distribution<unsigned int> u_int_dist(0, active_seqs.size() - 1);
|
||||||
int s = *std::next(active_seqs.begin(), u_int_dist(rng));
|
int s = *std::next(active_seqs.begin(), u_int_dist(rng));
|
||||||
if (i_dft >= (int) drafts[s].tokens.size()) {
|
if (i_dft >= (int) drafts[s].tokens.size()) {
|
||||||
drafts[s].active = false;
|
drafts[s].active = false;
|
||||||
|
@ -13,8 +13,11 @@ source /opt/intel/oneapi/setvars.sh
|
|||||||
#for FP32
|
#for FP32
|
||||||
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
#build example/main only
|
#build example/main
|
||||||
#cmake --build . --config Release --target main
|
#cmake --build . --config Release --target main
|
||||||
|
|
||||||
|
#build example/llama-bench
|
||||||
|
#cmake --build . --config Release --target llama-bench
|
||||||
|
|
||||||
#build all binary
|
#build all binary
|
||||||
cmake --build . --config Release -v
|
cmake --build . --config Release -v
|
||||||
|
@ -9,18 +9,28 @@ source /opt/intel/oneapi/setvars.sh
|
|||||||
|
|
||||||
if [ $# -gt 0 ]; then
|
if [ $# -gt 0 ]; then
|
||||||
GGML_SYCL_DEVICE=$1
|
GGML_SYCL_DEVICE=$1
|
||||||
|
GGML_SYCL_SINGLE_GPU=1
|
||||||
else
|
else
|
||||||
GGML_SYCL_DEVICE=0
|
GGML_SYCL_DEVICE=0
|
||||||
fi
|
fi
|
||||||
echo "use $GGML_SYCL_DEVICE as main GPU"
|
|
||||||
#export GGML_SYCL_DEBUG=1
|
#export GGML_SYCL_DEBUG=1
|
||||||
|
|
||||||
|
|
||||||
#ZES_ENABLE_SYSMAN=1, Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory. Recommended to use when --split-mode = layer.
|
#ZES_ENABLE_SYSMAN=1, Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory. Recommended to use when --split-mode = layer.
|
||||||
|
|
||||||
#use all GPUs with same max compute units
|
if [ $GGML_SYCL_SINGLE_GPU -eq 1 ]; then
|
||||||
ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
|
echo "use $GGML_SYCL_DEVICE as main GPU"
|
||||||
|
#use signle GPU only
|
||||||
|
ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0 -mg $GGML_SYCL_DEVICE -sm none
|
||||||
|
else
|
||||||
|
#use multiple GPUs with same max compute units
|
||||||
|
ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
|
||||||
|
fi
|
||||||
|
|
||||||
#use main GPU only
|
#use main GPU only
|
||||||
#ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0 -mg $GGML_SYCL_DEVICE -sm none
|
#ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0 -mg $GGML_SYCL_DEVICE -sm none
|
||||||
|
|
||||||
|
#use multiple GPUs with same max compute units
|
||||||
|
#ZES_ENABLE_SYSMAN=1 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
|
||||||
|
|
||||||
|
@ -6,8 +6,6 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
|
|||||||
@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
|
@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
|
||||||
|
|
||||||
|
|
||||||
set GGML_SYCL_DEVICE=0
|
|
||||||
rem set GGML_SYCL_DEBUG=1
|
|
||||||
.\build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0
|
.\build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0
|
||||||
|
|
||||||
|
|
||||||
|
@ -711,6 +711,7 @@ static bool load_checkpoint_file(const char * filename, struct my_llama_model *
|
|||||||
|
|
||||||
load_checkpoint_gguf(fctx, f_ggml_ctx, model, train);
|
load_checkpoint_gguf(fctx, f_ggml_ctx, model, train);
|
||||||
|
|
||||||
|
gguf_free(fctx);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
28
examples/ts-type-to-grammar.sh
Executable file
28
examples/ts-type-to-grammar.sh
Executable file
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# ./examples/ts-type-to-grammar.sh "{a:string,b:string,c?:string}"
|
||||||
|
# python examples/json-schema-to-grammar.py https://json.schemastore.org/tsconfig.json
|
||||||
|
#
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
readonly type="$1"
|
||||||
|
|
||||||
|
# Create a temporary directory
|
||||||
|
TMPDIR=""
|
||||||
|
trap 'rm -fR "$TMPDIR"' EXIT
|
||||||
|
TMPDIR=$(mktemp -d)
|
||||||
|
|
||||||
|
DTS_FILE="$TMPDIR/type.d.ts"
|
||||||
|
SCHEMA_FILE="$TMPDIR/schema.json"
|
||||||
|
|
||||||
|
echo "export type MyType = $type" > "$DTS_FILE"
|
||||||
|
|
||||||
|
# This is a fork of typescript-json-schema, actively maintained as of March 2024:
|
||||||
|
# https://github.com/vega/ts-json-schema-generator
|
||||||
|
npx ts-json-schema-generator --unstable --no-top-ref --path "$DTS_FILE" --type MyType -e none > "$SCHEMA_FILE"
|
||||||
|
|
||||||
|
# Alternative, not actively maintained as of March 2024:
|
||||||
|
# https://github.com/YousefED/typescript-json-schema
|
||||||
|
# npx typescript-json-schema --defaultProps --required "$DTS_FILE" MyType | tee "$SCHEMA_FILE" >&2
|
||||||
|
|
||||||
|
./examples/json-schema-to-grammar.py "$SCHEMA_FILE"
|
@ -20,11 +20,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1709237383,
|
"lastModified": 1710451336,
|
||||||
"narHash": "sha256-cy6ArO4k5qTx+l5o+0mL9f5fa86tYUX3ozE1S+Txlds=",
|
"narHash": "sha256-pP86Pcfu3BrAvRO7R64x7hs+GaQrjFes+mEPowCfkxY=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "1536926ef5621b09bba54035ae2bb6d806d72ac8",
|
"rev": "d691274a972b3165335d261cc4671335f5c67de9",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
119
ggml-alloc.c
119
ggml-alloc.c
@ -61,7 +61,6 @@ static bool ggml_op_can_inplace(enum ggml_op op) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: GGML_PAD ?
|
|
||||||
static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
|
static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
|
||||||
assert(alignment && !(alignment & (alignment - 1))); // power of 2
|
assert(alignment && !(alignment & (alignment - 1))); // power of 2
|
||||||
size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
|
size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
|
||||||
@ -69,25 +68,14 @@ static size_t aligned_offset(const void * buffer, size_t offset, size_t alignmen
|
|||||||
}
|
}
|
||||||
|
|
||||||
// tallocr
|
// tallocr
|
||||||
struct ggml_tallocr {
|
|
||||||
ggml_backend_buffer_t buffer;
|
|
||||||
void * base;
|
|
||||||
size_t alignment;
|
|
||||||
size_t offset;
|
|
||||||
};
|
|
||||||
|
|
||||||
ggml_tallocr_t ggml_tallocr_new(ggml_backend_buffer_t buffer) {
|
|
||||||
ggml_tallocr_t talloc = malloc(sizeof(struct ggml_tallocr));
|
|
||||||
if (talloc == NULL) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) {
|
||||||
void * base = ggml_backend_buffer_get_base(buffer);
|
void * base = ggml_backend_buffer_get_base(buffer);
|
||||||
size_t align = ggml_backend_buffer_get_alignment(buffer);
|
size_t align = ggml_backend_buffer_get_alignment(buffer);
|
||||||
|
|
||||||
assert(align && !(align & (align - 1))); // power of 2
|
assert(align && !(align & (align - 1))); // power of 2
|
||||||
|
|
||||||
*talloc = (struct ggml_tallocr) {
|
struct ggml_tallocr talloc = (struct ggml_tallocr) {
|
||||||
/*.buffer = */ buffer,
|
/*.buffer = */ buffer,
|
||||||
/*.base = */ base,
|
/*.base = */ base,
|
||||||
/*.alignment = */ align,
|
/*.alignment = */ align,
|
||||||
@ -96,11 +84,7 @@ ggml_tallocr_t ggml_tallocr_new(ggml_backend_buffer_t buffer) {
|
|||||||
return talloc;
|
return talloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_tallocr_free(ggml_tallocr_t talloc) {
|
void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) {
|
||||||
free(talloc);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_tallocr_alloc(ggml_tallocr_t talloc, struct ggml_tensor * tensor) {
|
|
||||||
size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
|
size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
|
||||||
size = GGML_PAD(size, talloc->alignment);
|
size = GGML_PAD(size, talloc->alignment);
|
||||||
|
|
||||||
@ -354,12 +338,16 @@ struct hash_node {
|
|||||||
bool allocated;
|
bool allocated;
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
|
||||||
struct tensor_alloc {
|
struct tensor_alloc {
|
||||||
size_t offset;
|
size_t offset;
|
||||||
size_t size_max; // 0 = pre-allocated, unused, or view
|
size_t size_max; // 0 = pre-allocated, unused, or view
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct leaf_alloc {
|
||||||
|
int buffer_id;
|
||||||
|
struct tensor_alloc leaf;
|
||||||
|
};
|
||||||
|
|
||||||
struct node_alloc {
|
struct node_alloc {
|
||||||
int buffer_id;
|
int buffer_id;
|
||||||
struct tensor_alloc dst;
|
struct tensor_alloc dst;
|
||||||
@ -378,7 +366,7 @@ struct ggml_gallocr {
|
|||||||
struct node_alloc * node_allocs; // [n_nodes]
|
struct node_alloc * node_allocs; // [n_nodes]
|
||||||
int n_nodes;
|
int n_nodes;
|
||||||
|
|
||||||
struct tensor_alloc * leaf_allocs; // [n_leafs]
|
struct leaf_alloc * leaf_allocs; // [n_leafs]
|
||||||
int n_leafs;
|
int n_leafs;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -543,17 +531,28 @@ static int get_node_buffer_id(const int * node_buffer_ids, int i) {
|
|||||||
return node_buffer_ids ? node_buffer_ids[i] : 0;
|
return node_buffer_ids ? node_buffer_ids[i] : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids) {
|
static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
|
||||||
// clear hash tables
|
// clear hash tables
|
||||||
memset(galloc->hash_set.keys, 0, galloc->hash_set.size * sizeof(struct ggml_tensor *));
|
memset(galloc->hash_set.keys, 0, galloc->hash_set.size * sizeof(struct ggml_tensor *));
|
||||||
memset(galloc->hash_values, 0, galloc->hash_set.size * sizeof(struct hash_node));
|
memset(galloc->hash_values, 0, galloc->hash_set.size * sizeof(struct hash_node));
|
||||||
|
|
||||||
|
// allocate leafs
|
||||||
|
// these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
|
||||||
|
for (int i = 0; i < graph->n_leafs; i++) {
|
||||||
|
struct ggml_tensor * leaf = graph->leafs[i];
|
||||||
|
ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
|
||||||
|
}
|
||||||
|
|
||||||
// count number of children and views
|
// count number of children and views
|
||||||
// allocate all graph inputs and leafs first to avoid overwriting them
|
// allocate other graph inputs and leafs first to avoid overwriting them
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
|
|
||||||
if (ggml_is_view(node)) {
|
// TODO: better way to add external dependencies
|
||||||
|
// GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
|
||||||
|
// control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
|
||||||
|
// itself is never used and should not be considered a dependency
|
||||||
|
if (ggml_is_view(node) && node->op != GGML_OP_NONE) {
|
||||||
struct ggml_tensor * view_src = node->view_src;
|
struct ggml_tensor * view_src = node->view_src;
|
||||||
ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
|
ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
|
||||||
}
|
}
|
||||||
@ -570,26 +569,13 @@ static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgr
|
|||||||
|
|
||||||
ggml_gallocr_hash_get(galloc, src)->n_children += 1;
|
ggml_gallocr_hash_get(galloc, src)->n_children += 1;
|
||||||
|
|
||||||
// allocate explicit inputs and leafs
|
// allocate explicit inputs
|
||||||
if (src->flags & GGML_TENSOR_FLAG_INPUT || src->op == GGML_OP_NONE) {
|
if (src->flags & GGML_TENSOR_FLAG_INPUT) {
|
||||||
ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
|
ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocate the remaining leafs that are unused on the graph
|
|
||||||
// these are effectively static tensors that the application is not using in the graph, but may still want to allocate for other purposes
|
|
||||||
for (int i = 0; i < graph->n_leafs; i++) {
|
|
||||||
struct ggml_tensor * leaf = graph->leafs[i];
|
|
||||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
|
|
||||||
|
|
||||||
if (hn->n_children == 0) {
|
|
||||||
assert(!hn->allocated);
|
|
||||||
// since buffer ids are only given for nodes, these leafs are always allocated in the first buffer
|
|
||||||
ggml_gallocr_allocate_node(galloc, leaf, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocate tensors
|
// allocate tensors
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
@ -652,7 +638,7 @@ static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids) {
|
bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
|
||||||
size_t hash_size = graph->visited_hash_table.size;
|
size_t hash_size = graph->visited_hash_table.size;
|
||||||
|
|
||||||
// initialize hash table
|
// initialize hash table
|
||||||
@ -676,7 +662,7 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
// allocate in hash table
|
// allocate in hash table
|
||||||
ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids);
|
ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
|
||||||
|
|
||||||
// set the node_allocs from the hash table
|
// set the node_allocs from the hash table
|
||||||
if (galloc->n_nodes < graph->n_nodes) {
|
if (galloc->n_nodes < graph->n_nodes) {
|
||||||
@ -711,15 +697,16 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
|||||||
}
|
}
|
||||||
if (galloc->n_leafs < graph->n_leafs) {
|
if (galloc->n_leafs < graph->n_leafs) {
|
||||||
free(galloc->leaf_allocs);
|
free(galloc->leaf_allocs);
|
||||||
galloc->leaf_allocs = calloc(sizeof(struct tensor_alloc), graph->n_leafs);
|
galloc->leaf_allocs = calloc(sizeof(galloc->leaf_allocs[0]), graph->n_leafs);
|
||||||
GGML_ASSERT(galloc->leaf_allocs != NULL);
|
GGML_ASSERT(galloc->leaf_allocs != NULL);
|
||||||
}
|
}
|
||||||
galloc->n_leafs = graph->n_leafs;
|
galloc->n_leafs = graph->n_leafs;
|
||||||
for (int i = 0; i < graph->n_leafs; i++) {
|
for (int i = 0; i < graph->n_leafs; i++) {
|
||||||
struct ggml_tensor * leaf = graph->leafs[i];
|
struct ggml_tensor * leaf = graph->leafs[i];
|
||||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
|
struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
|
||||||
galloc->leaf_allocs[i].offset = hn->offset;
|
galloc->leaf_allocs[i].buffer_id = hn->buffer_id;
|
||||||
galloc->leaf_allocs[i].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
|
galloc->leaf_allocs[i].leaf.offset = hn->offset;
|
||||||
|
galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// reallocate buffers if needed
|
// reallocate buffers if needed
|
||||||
@ -727,7 +714,8 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
|||||||
size_t cur_size = galloc->buffers[i] ? ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
|
size_t cur_size = galloc->buffers[i] ? ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
|
||||||
size_t new_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
|
size_t new_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
|
||||||
|
|
||||||
if (new_size > cur_size) {
|
// even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
|
||||||
|
if (new_size > cur_size || galloc->buffers[i] == NULL) {
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
fprintf(stderr, "%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
fprintf(stderr, "%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
||||||
#endif
|
#endif
|
||||||
@ -744,30 +732,30 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
|
bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
|
||||||
return ggml_gallocr_reserve_n(galloc, graph, NULL);
|
return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id, struct tensor_alloc * tensor_alloc) {
|
static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, int buffer_id, struct tensor_alloc * tensor_alloc) {
|
||||||
assert(node->data || node->view_src || ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], node) <= tensor_alloc->size_max);
|
assert(tensor->data || tensor->view_src || ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
|
||||||
|
|
||||||
if (node->view_src != NULL) {
|
if (tensor->view_src != NULL) {
|
||||||
if (node->buffer == NULL) {
|
if (tensor->buffer == NULL) {
|
||||||
assert(tensor_alloc->offset == SIZE_MAX);
|
assert(tensor_alloc->offset == SIZE_MAX);
|
||||||
if (node->view_src->buffer == NULL) {
|
if (tensor->view_src->buffer == NULL) {
|
||||||
// this tensor was allocated without ggml-backend
|
// this tensor was allocated without ggml-backend
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ggml_backend_view_init(galloc->buffers[buffer_id], node);
|
ggml_backend_view_init(galloc->buffers[buffer_id], tensor);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (node->data == NULL) {
|
if (tensor->data == NULL) {
|
||||||
assert(tensor_alloc->offset != SIZE_MAX);
|
assert(tensor_alloc->offset != SIZE_MAX);
|
||||||
assert(ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], node) <= tensor_alloc->size_max);
|
assert(ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
|
||||||
void * base = ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
|
void * base = ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
|
||||||
void * addr = (char *)base + tensor_alloc->offset;
|
void * addr = (char *)base + tensor_alloc->offset;
|
||||||
ggml_backend_tensor_alloc(galloc->buffers[buffer_id], node, addr);
|
ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
|
||||||
} else {
|
} else {
|
||||||
if (node->buffer == NULL) {
|
if (tensor->buffer == NULL) {
|
||||||
// this tensor was allocated without ggml-backend
|
// this tensor was allocated without ggml-backend
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -843,13 +831,18 @@ bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph)
|
|||||||
|
|
||||||
// reset buffers
|
// reset buffers
|
||||||
for (int i = 0; i < galloc->n_buffers; i++) {
|
for (int i = 0; i < galloc->n_buffers; i++) {
|
||||||
// zero size buffers are not allocated
|
|
||||||
if (galloc->buffers[i] != NULL) {
|
if (galloc->buffers[i] != NULL) {
|
||||||
ggml_backend_buffer_reset(galloc->buffers[i]);
|
ggml_backend_buffer_reset(galloc->buffers[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocate the graph tensors from the previous assignments
|
// allocate the graph tensors from the previous assignments
|
||||||
|
// leafs
|
||||||
|
for (int i = 0; i < graph->n_leafs; i++) {
|
||||||
|
struct ggml_tensor * leaf = graph->leafs[i];
|
||||||
|
struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
|
||||||
|
ggml_gallocr_init_tensor(galloc, leaf, leaf_alloc->buffer_id, &leaf_alloc->leaf);
|
||||||
|
}
|
||||||
// nodes
|
// nodes
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
struct ggml_tensor * node = graph->nodes[i];
|
||||||
@ -863,12 +856,6 @@ bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph)
|
|||||||
}
|
}
|
||||||
ggml_gallocr_init_tensor(galloc, node, node_alloc->buffer_id, &node_alloc->dst);
|
ggml_gallocr_init_tensor(galloc, node, node_alloc->buffer_id, &node_alloc->dst);
|
||||||
}
|
}
|
||||||
// leafs
|
|
||||||
for (int i = 0; i < graph->n_leafs; i++) {
|
|
||||||
struct ggml_tensor * leaf = graph->leafs[i];
|
|
||||||
struct tensor_alloc * leaf_alloc = &galloc->leaf_allocs[i];
|
|
||||||
ggml_gallocr_init_tensor(galloc, leaf, 0, leaf_alloc);
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -900,12 +887,12 @@ static bool alloc_tensor_range(struct ggml_context * ctx,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tallocr * tallocr = ggml_tallocr_new(buffer);
|
struct ggml_tallocr tallocr = ggml_tallocr_new(buffer);
|
||||||
|
|
||||||
for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) {
|
for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) {
|
||||||
if (t->data == NULL) {
|
if (t->data == NULL) {
|
||||||
if (t->view_src == NULL) {
|
if (t->view_src == NULL) {
|
||||||
ggml_tallocr_alloc(tallocr, t);
|
ggml_tallocr_alloc(&tallocr, t);
|
||||||
} else if (t->buffer == NULL) {
|
} else if (t->buffer == NULL) {
|
||||||
ggml_backend_view_init(buffer, t);
|
ggml_backend_view_init(buffer, t);
|
||||||
}
|
}
|
||||||
@ -917,8 +904,6 @@ static bool alloc_tensor_range(struct ggml_context * ctx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tallocr_free(tallocr);
|
|
||||||
|
|
||||||
*buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1));
|
*buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1));
|
||||||
(*buffers)[(*n_buffers)++] = buffer;
|
(*buffers)[(*n_buffers)++] = buffer;
|
||||||
|
|
||||||
|
18
ggml-alloc.h
18
ggml-alloc.h
@ -11,11 +11,15 @@ typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
|||||||
typedef struct ggml_backend * ggml_backend_t;
|
typedef struct ggml_backend * ggml_backend_t;
|
||||||
|
|
||||||
// Tensor allocator
|
// Tensor allocator
|
||||||
typedef struct ggml_tallocr * ggml_tallocr_t;
|
struct ggml_tallocr {
|
||||||
|
ggml_backend_buffer_t buffer;
|
||||||
|
void * base;
|
||||||
|
size_t alignment;
|
||||||
|
size_t offset;
|
||||||
|
};
|
||||||
|
|
||||||
GGML_API ggml_tallocr_t ggml_tallocr_new(ggml_backend_buffer_t buffer);
|
GGML_API struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer);
|
||||||
GGML_API void ggml_tallocr_free(ggml_tallocr_t talloc);
|
GGML_API void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor);
|
||||||
GGML_API void ggml_tallocr_alloc(ggml_tallocr_t talloc, struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
// Graph allocator
|
// Graph allocator
|
||||||
/*
|
/*
|
||||||
@ -50,7 +54,11 @@ GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc);
|
|||||||
// not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
|
// not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
|
||||||
// returns false if the buffer allocation failed
|
// returns false if the buffer allocation failed
|
||||||
GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph);
|
GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph);
|
||||||
GGML_API bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids);
|
GGML_API bool ggml_gallocr_reserve_n(
|
||||||
|
ggml_gallocr_t galloc,
|
||||||
|
struct ggml_cgraph * graph,
|
||||||
|
const int * node_buffer_ids,
|
||||||
|
const int * leaf_buffer_ids);
|
||||||
|
|
||||||
// automatic reallocation if the topology changes when using a single buffer
|
// automatic reallocation if the topology changes when using a single buffer
|
||||||
// returns false if using multiple buffers and a re-allocation is needed (call ggml_gallocr_reserve_n first to set the node buffers)
|
// returns false if using multiple buffers and a re-allocation is needed (call ggml_gallocr_reserve_n first to set the node buffers)
|
||||||
|
@ -86,31 +86,48 @@ extern "C" {
|
|||||||
// (optional) asynchronous tensor data access
|
// (optional) asynchronous tensor data access
|
||||||
void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
bool (*GGML_CALL cpy_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
bool (*GGML_CALL cpy_tensor_async)(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
|
|
||||||
// (optional) complete all pending operations
|
// (optional) complete all pending operations
|
||||||
void (*GGML_CALL synchronize)(ggml_backend_t backend);
|
void (*GGML_CALL synchronize)(ggml_backend_t backend);
|
||||||
|
|
||||||
// compute graph with a plan
|
// compute graph with a plan (not used currently)
|
||||||
ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
|
ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
|
||||||
void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
void (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
|
|
||||||
|
// compute graph with a plan
|
||||||
|
enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
// compute graph without a plan (async)
|
// compute graph without a plan (async)
|
||||||
bool (*GGML_CALL graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
enum ggml_status (*GGML_CALL graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
|
||||||
// check if the backend supports an operation
|
// check if the backend supports an operation
|
||||||
bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
|
||||||
|
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
|
||||||
|
// these should be expensive operations with large batch sizes that may benefit from running on this backend
|
||||||
|
// even if the weight has to be copied from the CPU temporarily
|
||||||
|
bool (*GGML_CALL offload_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
|
||||||
|
// (optional) event synchronization
|
||||||
|
ggml_backend_event_t (*GGML_CALL event_new) (ggml_backend_t backend);
|
||||||
|
void (*GGML_CALL event_free) (ggml_backend_event_t event);
|
||||||
|
void (*GGML_CALL event_record) (ggml_backend_event_t event);
|
||||||
|
void (*GGML_CALL event_wait) (ggml_backend_t backend, ggml_backend_event_t event);
|
||||||
|
void (*GGML_CALL event_synchronize) (ggml_backend_event_t event);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_backend {
|
struct ggml_backend {
|
||||||
ggml_guid_t guid;
|
ggml_guid_t guid;
|
||||||
|
|
||||||
struct ggml_backend_i iface;
|
struct ggml_backend_i iface;
|
||||||
|
|
||||||
ggml_backend_context_t context;
|
ggml_backend_context_t context;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ggml_backend_event {
|
||||||
|
ggml_backend_t backend;
|
||||||
|
void * context;
|
||||||
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
// Backend registry
|
// Backend registry
|
||||||
//
|
//
|
||||||
|
748
ggml-backend.c
748
ggml-backend.c
File diff suppressed because it is too large
Load Diff
@ -9,6 +9,7 @@ extern "C" {
|
|||||||
|
|
||||||
typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
|
typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
|
||||||
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
||||||
|
typedef struct ggml_backend_event * ggml_backend_event_t;
|
||||||
typedef struct ggml_backend * ggml_backend_t;
|
typedef struct ggml_backend * ggml_backend_t;
|
||||||
typedef void * ggml_backend_graph_plan_t;
|
typedef void * ggml_backend_graph_plan_t;
|
||||||
|
|
||||||
@ -66,16 +67,30 @@ extern "C" {
|
|||||||
|
|
||||||
GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
|
GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
|
||||||
|
|
||||||
GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
|
||||||
GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
GGML_API bool ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op);
|
GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
|
||||||
// tensor copy between different backends
|
// tensor copy between different backends
|
||||||
GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
|
GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); // automatic fallback to sync copy
|
|
||||||
|
// asynchronous copy
|
||||||
|
// the copy is performed after all the currently queued operations in backend_src
|
||||||
|
// backend_dst will wait for the copy to complete before performing other operations
|
||||||
|
// automatic fallback to sync copy if async is not supported
|
||||||
|
GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
|
|
||||||
|
// events
|
||||||
|
GGML_API ggml_backend_event_t ggml_backend_event_new (ggml_backend_t backend);
|
||||||
|
GGML_API void ggml_backend_event_free (ggml_backend_event_t event);
|
||||||
|
GGML_API void ggml_backend_event_record (ggml_backend_event_t event);
|
||||||
|
GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event);
|
||||||
|
GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event
|
||||||
|
|
||||||
//
|
//
|
||||||
// CPU backend
|
// CPU backend
|
||||||
@ -122,27 +137,31 @@ extern "C" {
|
|||||||
/*
|
/*
|
||||||
Example usage:
|
Example usage:
|
||||||
|
|
||||||
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
|
// operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be asigned
|
||||||
// sched is initialized with measure allocators and cannot be used until allocated with a measure graph
|
// preferrably to run on the same backend as the buffer
|
||||||
|
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
||||||
|
|
||||||
// initialize buffers from a measure graph
|
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
|
||||||
measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
|
|
||||||
|
// initialize buffers from a max size graph (optional)
|
||||||
|
reserve_graph = build_graph(sched, max_batch_size);
|
||||||
|
|
||||||
// in build_graph:
|
|
||||||
build_graph(...) {
|
|
||||||
// manually assign nodes to a backend (optional, should not be needed in most cases)
|
// manually assign nodes to a backend (optional, should not be needed in most cases)
|
||||||
struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
|
struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
|
||||||
ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
|
ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
|
||||||
}
|
|
||||||
|
|
||||||
// allocate backend buffers from measure graph
|
ggml_backend_sched_reserve(sched, reserve_graph);
|
||||||
ggml_backend_sched_init_measure(sched, measure_graph);
|
|
||||||
|
|
||||||
// the scheduler is now ready to compute graphs
|
|
||||||
|
|
||||||
// compute
|
// compute
|
||||||
graph = build_graph(sched);
|
graph = build_graph(sched);
|
||||||
ggml_backend_sched_graph_compute(sched, graph);
|
ggml_backend_sched_graph_compute(sched, graph);
|
||||||
|
|
||||||
|
// if there are graph inputs:
|
||||||
|
ggml_backend_sched_reset(sched);
|
||||||
|
ggml_backend_sched_alloc_graph(sched, graph);
|
||||||
|
ggml_backend_tensor_set(input_tensor, ...);
|
||||||
|
ggml_backend_sched_graph_compute(sched, graph);
|
||||||
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct ggml_backend_sched;
|
struct ggml_backend_sched;
|
||||||
@ -157,20 +176,26 @@ extern "C" {
|
|||||||
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
||||||
|
|
||||||
// Initialize a backend scheduler
|
// Initialize a backend scheduler
|
||||||
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size);
|
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
||||||
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
// Initialize backend buffers from a measure graph
|
// Initialize backend buffers from a measure graph
|
||||||
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
||||||
|
|
||||||
// Get the number of splits of the last graph
|
// Get the number of splits of the last graph
|
||||||
GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
|
GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
|
||||||
|
GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
|
GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
|
||||||
|
|
||||||
GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
||||||
GGML_API ggml_backend_t ggml_backend_sched_get_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
|
GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
|
||||||
|
|
||||||
// Allocate and compute graph on the backend scheduler
|
// Allocate and compute graph on the backend scheduler
|
||||||
GGML_API bool ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||||
|
GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||||
|
GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||||
|
GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
// Reset all assignments and allocators - must be called before changing the node backends
|
// Reset all assignments and allocators - must be called before changing the node backends
|
||||||
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
|
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
|
||||||
|
1830
ggml-common.h
Normal file
1830
ggml-common.h
Normal file
File diff suppressed because it is too large
Load Diff
5095
ggml-cuda.cu
5095
ggml-cuda.cu
File diff suppressed because it is too large
Load Diff
21
ggml-cuda.h
21
ggml-cuda.h
@ -17,29 +17,17 @@ extern "C" {
|
|||||||
|
|
||||||
#define GGML_CUDA_MAX_DEVICES 16
|
#define GGML_CUDA_MAX_DEVICES 16
|
||||||
|
|
||||||
// Always success. To check if CUDA is actually loaded, use `ggml_cublas_loaded`.
|
|
||||||
GGML_API GGML_CALL void ggml_init_cublas(void);
|
|
||||||
|
|
||||||
// Returns `true` if there are available CUDA devices and cublas loads successfully; otherwise, it returns `false`.
|
|
||||||
GGML_API GGML_CALL bool ggml_cublas_loaded(void);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL void * ggml_cuda_host_malloc(size_t size);
|
|
||||||
GGML_API GGML_CALL void ggml_cuda_host_free(void * ptr);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
|
||||||
GGML_API GGML_CALL bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL int ggml_cuda_get_device_count(void);
|
|
||||||
GGML_API GGML_CALL void ggml_cuda_get_device_description(int device, char * description, size_t description_size);
|
|
||||||
|
|
||||||
// backend API
|
// backend API
|
||||||
GGML_API GGML_CALL ggml_backend_t ggml_backend_cuda_init(int device);
|
GGML_API GGML_CALL ggml_backend_t ggml_backend_cuda_init(int device);
|
||||||
|
|
||||||
GGML_API GGML_CALL bool ggml_backend_is_cuda(ggml_backend_t backend);
|
GGML_API GGML_CALL bool ggml_backend_is_cuda(ggml_backend_t backend);
|
||||||
|
|
||||||
|
// device buffer
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
|
||||||
|
|
||||||
// split tensor buffer that splits matrices by rows across multiple devices
|
// split tensor buffer that splits matrices by rows across multiple devices
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
|
||||||
|
|
||||||
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
|
||||||
|
|
||||||
@ -47,6 +35,9 @@ GGML_API GGML_CALL int ggml_backend_cuda_get_device_count(void);
|
|||||||
GGML_API GGML_CALL void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
|
GGML_API GGML_CALL void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
|
||||||
GGML_API GGML_CALL void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
|
GGML_API GGML_CALL void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_cuda_unregister_host_buffer(void * buffer);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -53,26 +53,30 @@ extern "C" {
|
|||||||
//
|
//
|
||||||
#include <arm_neon.h>
|
#include <arm_neon.h>
|
||||||
|
|
||||||
|
typedef __fp16 ggml_fp16_internal_t;
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
||||||
|
|
||||||
#define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
#define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
||||||
|
|
||||||
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
||||||
__fp16 tmp;
|
ggml_fp16_internal_t tmp;
|
||||||
memcpy(&tmp, &h, sizeof(ggml_fp16_t));
|
memcpy(&tmp, &h, sizeof(ggml_fp16_t));
|
||||||
return (float)tmp;
|
return (float)tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
||||||
ggml_fp16_t res;
|
ggml_fp16_t res;
|
||||||
__fp16 tmp = f;
|
ggml_fp16_internal_t tmp = f;
|
||||||
memcpy(&res, &tmp, sizeof(ggml_fp16_t));
|
memcpy(&res, &tmp, sizeof(ggml_fp16_t));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
typedef uint16_t ggml_fp16_internal_t;
|
||||||
|
|
||||||
#ifdef __wasm_simd128__
|
#ifdef __wasm_simd128__
|
||||||
#include <wasm_simd128.h>
|
#include <wasm_simd128.h>
|
||||||
#else
|
#else
|
||||||
|
@ -1927,10 +1927,10 @@ static ggml_backend_buffer_type_t ggml_backend_kompute_get_default_buffer_type(g
|
|||||||
return ggml_backend_kompute_buffer_type(ctx->device);
|
return ggml_backend_kompute_buffer_type(ctx->device);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_kompute_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
static ggml_status ggml_backend_kompute_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
||||||
auto * ctx = static_cast<ggml_kompute_context *>(backend->context);
|
auto * ctx = static_cast<ggml_kompute_context *>(backend->context);
|
||||||
ggml_vk_graph_compute(ctx, cgraph);
|
ggml_vk_graph_compute(ctx, cgraph);
|
||||||
return true;
|
return GGML_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_kompute_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
static bool ggml_backend_kompute_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
||||||
@ -1951,6 +1951,12 @@ static struct ggml_backend_i kompute_backend_i = {
|
|||||||
/* .graph_plan_compute = */ NULL,
|
/* .graph_plan_compute = */ NULL,
|
||||||
/* .graph_compute = */ ggml_backend_kompute_graph_compute,
|
/* .graph_compute = */ ggml_backend_kompute_graph_compute,
|
||||||
/* .supports_op = */ ggml_backend_kompute_supports_op,
|
/* .supports_op = */ ggml_backend_kompute_supports_op,
|
||||||
|
/* .offload_op = */ NULL,
|
||||||
|
/* .event_new = */ NULL,
|
||||||
|
/* .event_free = */ NULL,
|
||||||
|
/* .event_record = */ NULL,
|
||||||
|
/* .event_wait = */ NULL,
|
||||||
|
/* .event_synchronize = */ NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static ggml_guid_t ggml_backend_kompute_guid() {
|
static ggml_guid_t ggml_backend_kompute_guid() {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user