mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 21:34:36 +00:00
Merge branch 'master' of https://github.com/ggerganov/llama.cpp into ceb/nomic-vulkan
This commit is contained in:
commit
3773e1afe7
@ -15,6 +15,7 @@ RUN apt-get update && \
|
|||||||
apt-get install -y build-essential python3 python3-pip git
|
apt-get install -y build-essential python3 python3-pip git
|
||||||
|
|
||||||
COPY requirements.txt requirements.txt
|
COPY requirements.txt requirements.txt
|
||||||
|
COPY requirements requirements
|
||||||
|
|
||||||
RUN pip install --upgrade pip setuptools wheel \
|
RUN pip install --upgrade pip setuptools wheel \
|
||||||
&& pip install -r requirements.txt
|
&& pip install -r requirements.txt
|
||||||
|
@ -24,6 +24,7 @@ ARG ROCM_DOCKER_ARCH=\
|
|||||||
gfx1102
|
gfx1102
|
||||||
|
|
||||||
COPY requirements.txt requirements.txt
|
COPY requirements.txt requirements.txt
|
||||||
|
COPY requirements requirements
|
||||||
|
|
||||||
RUN pip install --upgrade pip setuptools wheel \
|
RUN pip install --upgrade pip setuptools wheel \
|
||||||
&& pip install -r requirements.txt
|
&& pip install -r requirements.txt
|
||||||
|
@ -6,6 +6,7 @@ RUN apt-get update && \
|
|||||||
apt-get install -y build-essential python3 python3-pip git
|
apt-get install -y build-essential python3 python3-pip git
|
||||||
|
|
||||||
COPY requirements.txt requirements.txt
|
COPY requirements.txt requirements.txt
|
||||||
|
COPY requirements requirements
|
||||||
|
|
||||||
RUN pip install --upgrade pip setuptools wheel \
|
RUN pip install --upgrade pip setuptools wheel \
|
||||||
&& pip install -r requirements.txt
|
&& pip install -r requirements.txt
|
||||||
|
@ -24,6 +24,7 @@ ARG ROCM_DOCKER_ARCH=\
|
|||||||
gfx1102
|
gfx1102
|
||||||
|
|
||||||
COPY requirements.txt requirements.txt
|
COPY requirements.txt requirements.txt
|
||||||
|
COPY requirements requirements
|
||||||
|
|
||||||
RUN pip install --upgrade pip setuptools wheel \
|
RUN pip install --upgrade pip setuptools wheel \
|
||||||
&& pip install -r requirements.txt
|
&& pip install -r requirements.txt
|
||||||
|
22
.devops/nix/apps.nix
Normal file
22
.devops/nix/apps.nix
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
perSystem =
|
||||||
|
{ config, lib, ... }:
|
||||||
|
{
|
||||||
|
apps =
|
||||||
|
let
|
||||||
|
inherit (config.packages) default;
|
||||||
|
binaries = [
|
||||||
|
"llama"
|
||||||
|
"llama-embedding"
|
||||||
|
"llama-server"
|
||||||
|
"quantize"
|
||||||
|
"train-text-from-scratch"
|
||||||
|
];
|
||||||
|
mkApp = name: {
|
||||||
|
type = "app";
|
||||||
|
program = "${default}/bin/${name}";
|
||||||
|
};
|
||||||
|
in
|
||||||
|
lib.genAttrs binaries mkApp;
|
||||||
|
};
|
||||||
|
}
|
13
.devops/nix/devshells.nix
Normal file
13
.devops/nix/devshells.nix
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
perSystem =
|
||||||
|
{ config, lib, ... }:
|
||||||
|
{
|
||||||
|
devShells =
|
||||||
|
lib.concatMapAttrs
|
||||||
|
(name: package: {
|
||||||
|
${name} = package.passthru.shell;
|
||||||
|
${name + "-extra"} = package.passthru.shell-extra;
|
||||||
|
})
|
||||||
|
config.packages;
|
||||||
|
};
|
||||||
|
}
|
39
.devops/nix/jetson-support.nix
Normal file
39
.devops/nix/jetson-support.nix
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
{ inputs, ... }:
|
||||||
|
{
|
||||||
|
perSystem =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
system,
|
||||||
|
lib,
|
||||||
|
pkgsCuda,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
legacyPackages =
|
||||||
|
let
|
||||||
|
caps.llamaPackagesXavier = "7.2";
|
||||||
|
caps.llamaPackagesOrin = "8.7";
|
||||||
|
caps.llamaPackagesTX2 = "6.2";
|
||||||
|
caps.llamaPackagesNano = "5.3";
|
||||||
|
|
||||||
|
pkgsFor =
|
||||||
|
cap:
|
||||||
|
import inputs.nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
config = {
|
||||||
|
cudaSupport = true;
|
||||||
|
cudaCapabilities = [ cap ];
|
||||||
|
cudaEnableForwardCompat = false;
|
||||||
|
inherit (pkgsCuda.config) allowUnfreePredicate;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
builtins.mapAttrs (name: cap: (pkgsFor cap).callPackage ./scope.nix { }) caps;
|
||||||
|
|
||||||
|
packages = lib.optionalAttrs (system == "aarch64-linux") {
|
||||||
|
jetson-xavier = config.legacyPackages.llamaPackagesXavier.llama-cpp;
|
||||||
|
jetson-orin = config.legacyPackages.llamaPackagesOrin.llama-cpp;
|
||||||
|
jetson-nano = config.legacyPackages.llamaPackagesNano.llama-cpp;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
35
.devops/nix/nixpkgs-instances.nix
Normal file
35
.devops/nix/nixpkgs-instances.nix
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
{ inputs, ... }:
|
||||||
|
{
|
||||||
|
# The _module.args definitions are passed on to modules as arguments. E.g.
|
||||||
|
# the module `{ pkgs ... }: { /* config */ }` implicitly uses
|
||||||
|
# `_module.args.pkgs` (defined in this case by flake-parts).
|
||||||
|
perSystem =
|
||||||
|
{ system, ... }:
|
||||||
|
{
|
||||||
|
_module.args = {
|
||||||
|
pkgsCuda = import inputs.nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
# Ensure dependencies use CUDA consistently (e.g. that openmpi, ucc,
|
||||||
|
# and ucx are built with CUDA support)
|
||||||
|
config.cudaSupport = true;
|
||||||
|
config.allowUnfreePredicate =
|
||||||
|
p:
|
||||||
|
builtins.all
|
||||||
|
(
|
||||||
|
license:
|
||||||
|
license.free
|
||||||
|
|| builtins.elem license.shortName [
|
||||||
|
"CUDA EULA"
|
||||||
|
"cuDNN EULA"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
(p.meta.licenses or [ p.meta.license ]);
|
||||||
|
};
|
||||||
|
# Ensure dependencies use ROCm consistently
|
||||||
|
pkgsRocm = import inputs.nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
config.rocmSupport = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
265
.devops/nix/package.nix
Normal file
265
.devops/nix/package.nix
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
config,
|
||||||
|
stdenv,
|
||||||
|
mkShell,
|
||||||
|
cmake,
|
||||||
|
ninja,
|
||||||
|
pkg-config,
|
||||||
|
git,
|
||||||
|
python3,
|
||||||
|
mpi,
|
||||||
|
openblas, # TODO: Use the generic `blas` so users could switch between alternative implementations
|
||||||
|
cudaPackages,
|
||||||
|
darwin,
|
||||||
|
rocmPackages,
|
||||||
|
clblast,
|
||||||
|
useBlas ? builtins.all (x: !x) [
|
||||||
|
useCuda
|
||||||
|
useMetalKit
|
||||||
|
useOpenCL
|
||||||
|
useRocm
|
||||||
|
],
|
||||||
|
useCuda ? config.cudaSupport,
|
||||||
|
useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin && !useOpenCL,
|
||||||
|
useMpi ? false, # Increases the runtime closure size by ~700M
|
||||||
|
useOpenCL ? false,
|
||||||
|
useRocm ? config.rocmSupport,
|
||||||
|
llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
|
||||||
|
}@inputs:
|
||||||
|
|
||||||
|
let
|
||||||
|
inherit (lib)
|
||||||
|
cmakeBool
|
||||||
|
cmakeFeature
|
||||||
|
optionals
|
||||||
|
strings
|
||||||
|
versionOlder
|
||||||
|
;
|
||||||
|
|
||||||
|
# It's necessary to consistently use backendStdenv when building with CUDA support,
|
||||||
|
# otherwise we get libstdc++ errors downstream.
|
||||||
|
stdenv = throw "Use effectiveStdenv instead";
|
||||||
|
effectiveStdenv = if useCuda then cudaPackages.backendStdenv else inputs.stdenv;
|
||||||
|
|
||||||
|
suffices =
|
||||||
|
lib.optionals useBlas [ "BLAS" ]
|
||||||
|
++ lib.optionals useCuda [ "CUDA" ]
|
||||||
|
++ lib.optionals useMetalKit [ "MetalKit" ]
|
||||||
|
++ lib.optionals useMpi [ "MPI" ]
|
||||||
|
++ lib.optionals useOpenCL [ "OpenCL" ]
|
||||||
|
++ lib.optionals useRocm [ "ROCm" ];
|
||||||
|
|
||||||
|
pnameSuffix =
|
||||||
|
strings.optionalString (suffices != [ ])
|
||||||
|
"-${strings.concatMapStringsSep "-" strings.toLower suffices}";
|
||||||
|
descriptionSuffix =
|
||||||
|
strings.optionalString (suffices != [ ])
|
||||||
|
", accelerated with ${strings.concatStringsSep ", " suffices}";
|
||||||
|
|
||||||
|
# TODO: package the Python in this repository in a Nix-like way.
|
||||||
|
# It'd be nice to migrate to buildPythonPackage, as well as ensure this repo
|
||||||
|
# is PEP 517-compatible, and ensure the correct .dist-info is generated.
|
||||||
|
# https://peps.python.org/pep-0517/
|
||||||
|
llama-python = python3.withPackages (
|
||||||
|
ps: [
|
||||||
|
ps.numpy
|
||||||
|
ps.sentencepiece
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
# TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
|
||||||
|
llama-python-extra = python3.withPackages (
|
||||||
|
ps: [
|
||||||
|
ps.numpy
|
||||||
|
ps.sentencepiece
|
||||||
|
ps.torchWithoutCuda
|
||||||
|
ps.transformers
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
# apple_sdk is supposed to choose sane defaults, no need to handle isAarch64
|
||||||
|
# separately
|
||||||
|
darwinBuildInputs =
|
||||||
|
with darwin.apple_sdk.frameworks;
|
||||||
|
[
|
||||||
|
Accelerate
|
||||||
|
CoreVideo
|
||||||
|
CoreGraphics
|
||||||
|
]
|
||||||
|
++ optionals useMetalKit [ MetalKit ];
|
||||||
|
|
||||||
|
cudaBuildInputs = with cudaPackages; [
|
||||||
|
cuda_cccl.dev # <nv/target>
|
||||||
|
|
||||||
|
# A temporary hack for reducing the closure size, remove once cudaPackages
|
||||||
|
# have stopped using lndir: https://github.com/NixOS/nixpkgs/issues/271792
|
||||||
|
cuda_cudart.dev
|
||||||
|
cuda_cudart.lib
|
||||||
|
cuda_cudart.static
|
||||||
|
libcublas.dev
|
||||||
|
libcublas.lib
|
||||||
|
libcublas.static
|
||||||
|
];
|
||||||
|
|
||||||
|
rocmBuildInputs = with rocmPackages; [
|
||||||
|
clr
|
||||||
|
hipblas
|
||||||
|
rocblas
|
||||||
|
];
|
||||||
|
in
|
||||||
|
|
||||||
|
effectiveStdenv.mkDerivation (
|
||||||
|
finalAttrs: {
|
||||||
|
pname = "llama-cpp${pnameSuffix}";
|
||||||
|
version = llamaVersion;
|
||||||
|
|
||||||
|
src = lib.cleanSourceWith {
|
||||||
|
filter =
|
||||||
|
name: type:
|
||||||
|
!(builtins.any (_: _) [
|
||||||
|
(lib.hasSuffix ".nix" name) # Ignore *.nix files when computing outPaths
|
||||||
|
(name == "README.md") # Ignore *.md changes whe computing outPaths
|
||||||
|
(lib.hasPrefix "." name) # Skip hidden files and directories
|
||||||
|
]);
|
||||||
|
src = lib.cleanSource ../../.;
|
||||||
|
};
|
||||||
|
|
||||||
|
postPatch = ''
|
||||||
|
substituteInPlace ./ggml-metal.m \
|
||||||
|
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
||||||
|
|
||||||
|
# TODO: Package up each Python script or service appropriately.
|
||||||
|
# If we were to migrate to buildPythonPackage and prepare the `pyproject.toml`,
|
||||||
|
# we could make those *.py into setuptools' entrypoints
|
||||||
|
substituteInPlace ./*.py --replace "/usr/bin/env python" "${llama-python}/bin/python"
|
||||||
|
'';
|
||||||
|
|
||||||
|
nativeBuildInputs =
|
||||||
|
[
|
||||||
|
cmake
|
||||||
|
ninja
|
||||||
|
pkg-config
|
||||||
|
git
|
||||||
|
]
|
||||||
|
++ optionals useCuda [
|
||||||
|
cudaPackages.cuda_nvcc
|
||||||
|
|
||||||
|
# TODO: Replace with autoAddDriverRunpath
|
||||||
|
# once https://github.com/NixOS/nixpkgs/pull/275241 has been merged
|
||||||
|
cudaPackages.autoAddOpenGLRunpathHook
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs =
|
||||||
|
optionals effectiveStdenv.isDarwin darwinBuildInputs
|
||||||
|
++ optionals useCuda cudaBuildInputs
|
||||||
|
++ optionals useMpi [ mpi ]
|
||||||
|
++ optionals useOpenCL [ clblast ]
|
||||||
|
++ optionals useRocm rocmBuildInputs;
|
||||||
|
|
||||||
|
cmakeFlags =
|
||||||
|
[
|
||||||
|
(cmakeBool "LLAMA_NATIVE" true)
|
||||||
|
(cmakeBool "LLAMA_BUILD_SERVER" true)
|
||||||
|
(cmakeBool "BUILD_SHARED_LIBS" true)
|
||||||
|
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
|
||||||
|
(cmakeBool "LLAMA_BLAS" useBlas)
|
||||||
|
(cmakeBool "LLAMA_CLBLAST" useOpenCL)
|
||||||
|
(cmakeBool "LLAMA_CUBLAS" useCuda)
|
||||||
|
(cmakeBool "LLAMA_HIPBLAS" useRocm)
|
||||||
|
(cmakeBool "LLAMA_METAL" useMetalKit)
|
||||||
|
(cmakeBool "LLAMA_MPI" useMpi)
|
||||||
|
]
|
||||||
|
++ optionals useCuda [
|
||||||
|
(
|
||||||
|
with cudaPackages.flags;
|
||||||
|
cmakeFeature "CMAKE_CUDA_ARCHITECTURES" (
|
||||||
|
builtins.concatStringsSep ";" (map dropDot cudaCapabilities)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
]
|
||||||
|
++ optionals useRocm [
|
||||||
|
(cmakeFeature "CMAKE_C_COMPILER" "hipcc")
|
||||||
|
(cmakeFeature "CMAKE_CXX_COMPILER" "hipcc")
|
||||||
|
|
||||||
|
# Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM
|
||||||
|
# in https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt
|
||||||
|
# and select the line that matches the current nixpkgs version of rocBLAS.
|
||||||
|
# Should likely use `rocmPackages.clr.gpuTargets`.
|
||||||
|
"-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
|
||||||
|
]
|
||||||
|
++ optionals useMetalKit [ (lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") ]
|
||||||
|
++ optionals useBlas [ (lib.cmakeFeature "LLAMA_BLAS_VENDOR" "OpenBLAS") ];
|
||||||
|
|
||||||
|
# TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
|
||||||
|
# if they haven't been added yet.
|
||||||
|
postInstall = ''
|
||||||
|
mv $out/bin/main $out/bin/llama
|
||||||
|
mv $out/bin/server $out/bin/llama-server
|
||||||
|
mkdir -p $out/include
|
||||||
|
cp $src/llama.h $out/include/
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Define the shells here, but don't add in the inputsFrom to avoid recursion.
|
||||||
|
passthru = {
|
||||||
|
inherit
|
||||||
|
useBlas
|
||||||
|
useCuda
|
||||||
|
useMetalKit
|
||||||
|
useMpi
|
||||||
|
useOpenCL
|
||||||
|
useRocm
|
||||||
|
;
|
||||||
|
|
||||||
|
shell = mkShell {
|
||||||
|
name = "shell-${finalAttrs.finalPackage.name}";
|
||||||
|
description = "contains numpy and sentencepiece";
|
||||||
|
buildInputs = [ llama-python ];
|
||||||
|
inputsFrom = [ finalAttrs.finalPackage ];
|
||||||
|
};
|
||||||
|
|
||||||
|
shell-extra = mkShell {
|
||||||
|
name = "shell-extra-${finalAttrs.finalPackage.name}";
|
||||||
|
description = "contains numpy, sentencepiece, torchWithoutCuda, and transformers";
|
||||||
|
buildInputs = [ llama-python-extra ];
|
||||||
|
inputsFrom = [ finalAttrs.finalPackage ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
meta = {
|
||||||
|
# Configurations we don't want even the CI to evaluate. Results in the
|
||||||
|
# "unsupported platform" messages. This is mostly a no-op, because
|
||||||
|
# cudaPackages would've refused to evaluate anyway.
|
||||||
|
badPlatforms = optionals (useCuda || useOpenCL) lib.platforms.darwin;
|
||||||
|
|
||||||
|
# Configurations that are known to result in build failures. Can be
|
||||||
|
# overridden by importing Nixpkgs with `allowBroken = true`.
|
||||||
|
broken = (useMetalKit && !effectiveStdenv.isDarwin);
|
||||||
|
|
||||||
|
description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}";
|
||||||
|
homepage = "https://github.com/ggerganov/llama.cpp/";
|
||||||
|
license = lib.licenses.mit;
|
||||||
|
|
||||||
|
# Accommodates `nix run` and `lib.getExe`
|
||||||
|
mainProgram = "llama";
|
||||||
|
|
||||||
|
# These people might respond, on the best effort basis, if you ping them
|
||||||
|
# in case of Nix-specific regressions or for reviewing Nix-specific PRs.
|
||||||
|
# Consider adding yourself to this list if you want to ensure this flake
|
||||||
|
# stays maintained and you're willing to invest your time. Do not add
|
||||||
|
# other people without their consent. Consider removing people after
|
||||||
|
# they've been unreachable for long periods of time.
|
||||||
|
|
||||||
|
# Note that lib.maintainers is defined in Nixpkgs, but you may just add
|
||||||
|
# an attrset following the same format as in
|
||||||
|
# https://github.com/NixOS/nixpkgs/blob/f36a80e54da29775c78d7eff0e628c2b4e34d1d7/maintainers/maintainer-list.nix
|
||||||
|
maintainers = with lib.maintainers; [
|
||||||
|
philiptaron
|
||||||
|
SomeoneSerge
|
||||||
|
];
|
||||||
|
|
||||||
|
# Extend `badPlatforms` instead
|
||||||
|
platforms = lib.platforms.all;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
12
.devops/nix/scope.nix
Normal file
12
.devops/nix/scope.nix
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
newScope,
|
||||||
|
llamaVersion ? "0.0.0",
|
||||||
|
}:
|
||||||
|
|
||||||
|
lib.makeScope newScope (
|
||||||
|
self: {
|
||||||
|
inherit llamaVersion;
|
||||||
|
llama-cpp = self.callPackage ./package.nix { };
|
||||||
|
}
|
||||||
|
)
|
177
.github/ISSUE_TEMPLATE/bug.md
vendored
177
.github/ISSUE_TEMPLATE/bug.md
vendored
@ -6,179 +6,4 @@ assignees: ''
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Prerequisites
|
Please include information about your system, the steps to reproduce the bug, and the version of llama.cpp that you are using. If possible, please provide a minimal code example that reproduces the bug.
|
||||||
|
|
||||||
Please answer the following questions for yourself before submitting an issue.
|
|
||||||
|
|
||||||
- [ ] I am running the latest code. Development is very rapid so there are no tagged versions as of now.
|
|
||||||
- [ ] I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md).
|
|
||||||
- [ ] I [searched using keywords relevant to my issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) to make sure that I am creating a new issue that is not already open (or closed).
|
|
||||||
- [ ] I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new bug or useful enhancement to share.
|
|
||||||
|
|
||||||
# Expected Behavior
|
|
||||||
|
|
||||||
Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do.
|
|
||||||
|
|
||||||
# Current Behavior
|
|
||||||
|
|
||||||
Please provide a detailed written description of what `llama.cpp` did, instead.
|
|
||||||
|
|
||||||
# Environment and Context
|
|
||||||
|
|
||||||
Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions.
|
|
||||||
|
|
||||||
* Physical (or virtual) hardware you are using, e.g. for Linux:
|
|
||||||
|
|
||||||
`$ lscpu`
|
|
||||||
|
|
||||||
* Operating System, e.g. for Linux:
|
|
||||||
|
|
||||||
`$ uname -a`
|
|
||||||
|
|
||||||
* SDK version, e.g. for Linux:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ python3 --version
|
|
||||||
$ make --version
|
|
||||||
$ g++ --version
|
|
||||||
```
|
|
||||||
|
|
||||||
# Failure Information (for bugs)
|
|
||||||
|
|
||||||
Please help provide information about the failure / bug.
|
|
||||||
|
|
||||||
# Steps to Reproduce
|
|
||||||
|
|
||||||
Please provide detailed steps for reproducing the issue. We are not sitting in front of your screen, so the more detail the better.
|
|
||||||
|
|
||||||
1. step 1
|
|
||||||
2. step 2
|
|
||||||
3. step 3
|
|
||||||
4. etc.
|
|
||||||
|
|
||||||
# Failure Logs
|
|
||||||
|
|
||||||
Please include any relevant log snippets or files. If it works under one configuration but not under another, please provide logs for both configurations and their corresponding outputs so it is easy to see where behavior changes.
|
|
||||||
|
|
||||||
Also, please try to **avoid using screenshots** if at all possible. Instead, copy/paste the console output and use [Github's markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to cleanly format your logs for easy readability.
|
|
||||||
|
|
||||||
Example environment info:
|
|
||||||
```
|
|
||||||
llama.cpp$ git log | head -1
|
|
||||||
commit 2af23d30434a677c6416812eea52ccc0af65119c
|
|
||||||
|
|
||||||
llama.cpp$ lscpu | egrep "AMD|Flags"
|
|
||||||
Vendor ID: AuthenticAMD
|
|
||||||
Model name: AMD Ryzen Threadripper 1950X 16-Core Processor
|
|
||||||
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid amd_dcm aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb hw_pstate ssbd ibpb vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 xsaves clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif overflow_recov succor smca sme sev
|
|
||||||
Virtualization: AMD-V
|
|
||||||
|
|
||||||
llama.cpp$ python3 --version
|
|
||||||
Python 3.10.9
|
|
||||||
|
|
||||||
llama.cpp$ pip list | egrep "torch|numpy|sentencepiece"
|
|
||||||
numpy 1.24.2
|
|
||||||
numpydoc 1.5.0
|
|
||||||
sentencepiece 0.1.97
|
|
||||||
torch 1.13.1
|
|
||||||
torchvision 0.14.1
|
|
||||||
|
|
||||||
llama.cpp$ make --version | head -1
|
|
||||||
GNU Make 4.3
|
|
||||||
|
|
||||||
$ md5sum ./models/65B/ggml-model-q4_0.bin
|
|
||||||
dbdd682cce80e2d6e93cefc7449df487 ./models/65B/ggml-model-q4_0.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
Example run with the Linux command [perf](https://www.brendangregg.com/perf.html)
|
|
||||||
```
|
|
||||||
llama.cpp$ perf stat ./main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p "Please close your issue when it has been answered."
|
|
||||||
main: seed = 1679149377
|
|
||||||
llama_model_load: loading model from './models/65B/ggml-model-q4_0.bin' - please wait ...
|
|
||||||
llama_model_load: n_vocab = 32000
|
|
||||||
llama_model_load: n_ctx = 512
|
|
||||||
llama_model_load: n_embd = 8192
|
|
||||||
llama_model_load: n_mult = 256
|
|
||||||
llama_model_load: n_head = 64
|
|
||||||
llama_model_load: n_layer = 80
|
|
||||||
llama_model_load: n_rot = 128
|
|
||||||
llama_model_load: f16 = 2
|
|
||||||
llama_model_load: n_ff = 22016
|
|
||||||
llama_model_load: n_parts = 8
|
|
||||||
llama_model_load: ggml ctx size = 41477.73 MB
|
|
||||||
llama_model_load: memory_size = 2560.00 MB, n_mem = 40960
|
|
||||||
llama_model_load: loading model part 1/8 from './models/65B/ggml-model-q4_0.bin'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
llama_model_load: loading model part 2/8 from './models/65B/ggml-model-q4_0.bin.1'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
llama_model_load: loading model part 3/8 from './models/65B/ggml-model-q4_0.bin.2'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
llama_model_load: loading model part 4/8 from './models/65B/ggml-model-q4_0.bin.3'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
llama_model_load: loading model part 5/8 from './models/65B/ggml-model-q4_0.bin.4'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
llama_model_load: loading model part 6/8 from './models/65B/ggml-model-q4_0.bin.5'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
llama_model_load: loading model part 7/8 from './models/65B/ggml-model-q4_0.bin.6'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.7'
|
|
||||||
llama_model_load: .......................................................................................... done
|
|
||||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
|
||||||
|
|
||||||
system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
|
|
||||||
|
|
||||||
main: prompt: 'Please close your issue when it has been answered.'
|
|
||||||
main: number of tokens in prompt = 11
|
|
||||||
1 -> ''
|
|
||||||
12148 -> 'Please'
|
|
||||||
3802 -> ' close'
|
|
||||||
596 -> ' your'
|
|
||||||
2228 -> ' issue'
|
|
||||||
746 -> ' when'
|
|
||||||
372 -> ' it'
|
|
||||||
756 -> ' has'
|
|
||||||
1063 -> ' been'
|
|
||||||
7699 -> ' answered'
|
|
||||||
29889 -> '.'
|
|
||||||
|
|
||||||
sampling parameters: temp = 0.800000, top_k = 40, top_p = 0.950000, repeat_last_n = 64, repeat_penalty = 1.300000
|
|
||||||
|
|
||||||
|
|
||||||
Please close your issue when it has been answered.
|
|
||||||
@duncan-donut: I'm trying to figure out what kind of "support" you need for this script and why, exactly? Is there a question about how the code works that hasn't already been addressed in one or more comments below this ticket, or are we talking something else entirely like some sorta bugfixing job because your server setup is different from mine??
|
|
||||||
I can understand if your site needs to be running smoothly and you need help with a fix of sorts but there should really be nothing wrong here that the code itself could not handle. And given that I'm getting reports about how it works perfectly well on some other servers, what exactly are we talking? A detailed report will do wonders in helping us get this resolved for ya quickly so please take your time and describe the issue(s) you see as clearly & concisely as possible!!
|
|
||||||
@duncan-donut: I'm not sure if you have access to cPanel but you could try these instructions. It is worth a shot! Let me know how it goes (or what error message, exactly!) when/if ya give that code a go? [end of text]
|
|
||||||
|
|
||||||
|
|
||||||
main: mem per token = 71159620 bytes
|
|
||||||
main: load time = 19309.95 ms
|
|
||||||
main: sample time = 168.62 ms
|
|
||||||
main: predict time = 223895.61 ms / 888.47 ms per token
|
|
||||||
main: total time = 246406.42 ms
|
|
||||||
|
|
||||||
Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.':
|
|
||||||
|
|
||||||
3636882.89 msec task-clock # 14.677 CPUs utilized
|
|
||||||
13509 context-switches # 3.714 /sec
|
|
||||||
2436 cpu-migrations # 0.670 /sec
|
|
||||||
10476679 page-faults # 2.881 K/sec
|
|
||||||
13133115082869 cycles # 3.611 GHz (16.77%)
|
|
||||||
29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%)
|
|
||||||
10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%)
|
|
||||||
23479217109614 instructions # 1.79 insn per cycle
|
|
||||||
# 0.44 stalled cycles per insn (16.76%)
|
|
||||||
2353072268027 branches # 647.002 M/sec (16.77%)
|
|
||||||
1998682780 branch-misses # 0.08% of all branches (16.76%)
|
|
||||||
|
|
||||||
247.802177522 seconds time elapsed
|
|
||||||
|
|
||||||
3618.573072000 seconds user
|
|
||||||
18.491698000 seconds sys
|
|
||||||
```
|
|
||||||
|
1
.github/workflows/build.yml
vendored
1
.github/workflows/build.yml
vendored
@ -515,7 +515,6 @@ jobs:
|
|||||||
- name: Build Xcode project
|
- name: Build Xcode project
|
||||||
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
|
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
|
||||||
|
|
||||||
|
|
||||||
# freeBSD-latest:
|
# freeBSD-latest:
|
||||||
# runs-on: macos-12
|
# runs-on: macos-12
|
||||||
# steps:
|
# steps:
|
||||||
|
34
.github/workflows/docker.yml
vendored
34
.github/workflows/docker.yml
vendored
@ -52,6 +52,36 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
# https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example
|
||||||
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
uses: jlumbroso/free-disk-space@main
|
||||||
|
with:
|
||||||
|
# this might remove tools that are actually needed,
|
||||||
|
# if set to "true" but frees about 6 GB
|
||||||
|
tool-cache: false
|
||||||
|
|
||||||
|
# all of these default to true, but feel free to set to
|
||||||
|
# "false" if necessary for your workflow
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
docker-images: true
|
||||||
|
swap-storage: true
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
BUILD_NUMBER="$(git rev-list --count HEAD)"
|
||||||
|
SHORT_HASH="$(git rev-parse --short=7 HEAD)"
|
||||||
|
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
|
||||||
|
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
|
||||||
|
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Build and push Docker image (versioned)
|
- name: Build and push Docker image (versioned)
|
||||||
if: github.event_name == 'push'
|
if: github.event_name == 'push'
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
@ -59,7 +89,7 @@ jobs:
|
|||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
platforms: ${{ matrix.config.platforms }}
|
platforms: ${{ matrix.config.platforms }}
|
||||||
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
||||||
file: ${{ matrix.config.dockerfile }}
|
file: ${{ matrix.config.dockerfile }}
|
||||||
|
|
||||||
- name: Build and push Docker image (tagged)
|
- name: Build and push Docker image (tagged)
|
||||||
@ -68,5 +98,5 @@ jobs:
|
|||||||
context: .
|
context: .
|
||||||
push: ${{ github.event_name == 'push' }}
|
push: ${{ github.event_name == 'push' }}
|
||||||
platforms: ${{ matrix.config.platforms }}
|
platforms: ${{ matrix.config.platforms }}
|
||||||
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}"
|
tags: "ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }},ghcr.io/${{ github.repository_owner }}/llama.cpp:${{ matrix.config.tag }}-${{ steps.tag.outputs.name }}"
|
||||||
file: ${{ matrix.config.dockerfile }}
|
file: ${{ matrix.config.dockerfile }}
|
||||||
|
112
.github/workflows/nix-ci.yml
vendored
Normal file
112
.github/workflows/nix-ci.yml
vendored
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
name: Nix CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # allows manual triggering
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', '**/*.sh', '**/*.py', '**/*.nix']
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', '**/*.sh', '**/*.py', '**/*.nix']
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
nix-eval:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-latest, macos-latest ]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install Nix
|
||||||
|
uses: DeterminateSystems/nix-installer-action@v9
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
extra-conf: |
|
||||||
|
extra-substituters = https://${{ vars.CACHIX_NAME }}.cachix.org https://cuda-maintainers.cachix.org
|
||||||
|
extra-trusted-public-keys = ${{ vars.CACHIX_PUBLIC_KEY }} cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E=
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@v2
|
||||||
|
with:
|
||||||
|
upstream-cache: https://${{ matrix.cachixName }}.cachix.org
|
||||||
|
- name: List all flake outputs
|
||||||
|
run: nix flake show --all-systems
|
||||||
|
- name: Show all output paths
|
||||||
|
run: >
|
||||||
|
nix run github:nix-community/nix-eval-jobs
|
||||||
|
-- --gc-roots-dir gcroot
|
||||||
|
--flake
|
||||||
|
".#packages.$(nix eval --raw --impure --expr builtins.currentSystem)"
|
||||||
|
nix-build:
|
||||||
|
if: ${{ vars.CACHIX_NAME != '' }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-latest, macos-latest ]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install Nix
|
||||||
|
uses: DeterminateSystems/nix-installer-action@v9
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
extra-conf: |
|
||||||
|
extra-substituters = https://${{ vars.CACHIX_NAME }}.cachix.org https://cuda-maintainers.cachix.org
|
||||||
|
extra-trusted-public-keys = ${{ vars.CACHIX_PUBLIC_KEY }} cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E=
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@v2
|
||||||
|
with:
|
||||||
|
upstream-cache: https://${{ matrix.cachixName }}.cachix.org
|
||||||
|
- name: Set-up cachix to push the results to
|
||||||
|
uses: cachix/cachix-action@v13
|
||||||
|
with:
|
||||||
|
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||||
|
name: ${{ vars.CACHIX_NAME }}
|
||||||
|
- name: Build
|
||||||
|
run: >
|
||||||
|
nix run github:Mic92/nix-fast-build
|
||||||
|
-- --skip-cached --no-nom
|
||||||
|
--flake
|
||||||
|
".#checks.$(nix eval --raw --impure --expr builtins.currentSystem)"
|
||||||
|
nix-build-aarch64:
|
||||||
|
if: ${{ vars.CACHIX_NAME != '' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install QEMU
|
||||||
|
# Copy-paste from https://github.com/orgs/community/discussions/8305#discussioncomment-5888654
|
||||||
|
run: |
|
||||||
|
sudo apt-get install -y qemu-user-static qemu-system-aarch64
|
||||||
|
sudo usermod -a -G kvm $USER
|
||||||
|
- name: Install Nix
|
||||||
|
uses: DeterminateSystems/nix-installer-action@v9
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
extra-conf: |
|
||||||
|
extra-platforms = aarch64-linux
|
||||||
|
extra-system-features = nixos-test kvm
|
||||||
|
extra-substituters = https://${{ vars.CACHIX_NAME }}.cachix.org https://cuda-maintainers.cachix.org
|
||||||
|
extra-trusted-public-keys = ${{ vars.CACHIX_PUBLIC_KEY }} cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E=
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@v2
|
||||||
|
with:
|
||||||
|
upstream-cache: https://${{ matrix.cachixName }}.cachix.org
|
||||||
|
- name: Set-up cachix to push the results to
|
||||||
|
uses: cachix/cachix-action@v13
|
||||||
|
with:
|
||||||
|
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||||
|
name: ${{ vars.CACHIX_NAME }}
|
||||||
|
- name: Show all output paths
|
||||||
|
run: >
|
||||||
|
nix run github:nix-community/nix-eval-jobs
|
||||||
|
-- --gc-roots-dir gcroot
|
||||||
|
--flake
|
||||||
|
".#packages.aarch64-linux"
|
||||||
|
- name: Build
|
||||||
|
run: >
|
||||||
|
nix run github:Mic92/nix-fast-build
|
||||||
|
-- --skip-cached --no-nom
|
||||||
|
--systems aarch64-linux
|
||||||
|
--flake
|
||||||
|
".#checks.aarch64-linux"
|
22
.github/workflows/nix-flake-update.yml
vendored
Normal file
22
.github/workflows/nix-flake-update.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
name: update-flake-lock
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 0 * * 0' # runs weekly on Sunday at 00:00
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lockfile:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install Nix
|
||||||
|
uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
- name: Update flake.lock
|
||||||
|
uses: DeterminateSystems/update-flake-lock@main
|
||||||
|
with:
|
||||||
|
pr-title: "nix: update flake.lock"
|
||||||
|
pr-labels: |
|
||||||
|
nix
|
||||||
|
pr-reviewers: philiptaron,SomeoneSerge
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
36
.github/workflows/nix-publish-flake.yml
vendored
Normal file
36
.github/workflows/nix-publish-flake.yml
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# Make the flake discoverable on https://flakestry.dev and https://flakehub.com/flakes
|
||||||
|
name: "Publish a flake to flakestry & flakehub"
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- "*"
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: "The existing tag to publish"
|
||||||
|
type: "string"
|
||||||
|
required: true
|
||||||
|
jobs:
|
||||||
|
flakestry-publish:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: "write"
|
||||||
|
contents: "read"
|
||||||
|
steps:
|
||||||
|
- uses: flakestry/flakestry-publish@main
|
||||||
|
with:
|
||||||
|
version: "${{ inputs.tag || github.ref_name }}"
|
||||||
|
flakehub-publish:
|
||||||
|
runs-on: "ubuntu-latest"
|
||||||
|
permissions:
|
||||||
|
id-token: "write"
|
||||||
|
contents: "read"
|
||||||
|
steps:
|
||||||
|
- uses: "actions/checkout@v4"
|
||||||
|
with:
|
||||||
|
ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}"
|
||||||
|
- uses: "DeterminateSystems/nix-installer-action@main"
|
||||||
|
- uses: "DeterminateSystems/flakehub-push@main"
|
||||||
|
with:
|
||||||
|
visibility: "public"
|
||||||
|
tag: "${{ inputs.tag }}"
|
29
.github/workflows/python-check-requirements.yml
vendored
Normal file
29
.github/workflows/python-check-requirements.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
name: Python check requirements.txt
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'scripts/check-requirements.sh'
|
||||||
|
- 'convert*.py'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- 'requirements/*.txt'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'scripts/check-requirements.sh'
|
||||||
|
- 'convert*.py'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- 'requirements/*.txt'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
python-check-requirements:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: check-requirements
|
||||||
|
steps:
|
||||||
|
- name: Check out source repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Set up Python environment
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.11"
|
||||||
|
- name: Run check-requirements.sh script
|
||||||
|
run: bash scripts/check-requirements.sh nocleanup
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -48,8 +48,10 @@ models-mnt
|
|||||||
/llama-bench
|
/llama-bench
|
||||||
/llava-cli
|
/llava-cli
|
||||||
/lookahead
|
/lookahead
|
||||||
|
/lookup
|
||||||
/main
|
/main
|
||||||
/metal
|
/metal
|
||||||
|
/passkey
|
||||||
/perplexity
|
/perplexity
|
||||||
/q8dot
|
/q8dot
|
||||||
/quantize
|
/quantize
|
||||||
|
@ -95,6 +95,7 @@ option(LLAMA_HIP_UMA "llama: use HIP unified memory arch
|
|||||||
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
||||||
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
|
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
|
||||||
option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF)
|
option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF)
|
||||||
|
option(LLAMA_METAL_SHADER_DEBUG "llama: compile Metal with -fno-fast-math" OFF)
|
||||||
option(LLAMA_KOMPUTE "llama: use Kompute" OFF)
|
option(LLAMA_KOMPUTE "llama: use Kompute" OFF)
|
||||||
option(LLAMA_MPI "llama: use MPI" OFF)
|
option(LLAMA_MPI "llama: use MPI" OFF)
|
||||||
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
||||||
@ -174,6 +175,35 @@ if (LLAMA_METAL)
|
|||||||
# copy ggml-metal.metal to bin directory
|
# copy ggml-metal.metal to bin directory
|
||||||
configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY)
|
configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY)
|
||||||
|
|
||||||
|
if (LLAMA_METAL_SHADER_DEBUG)
|
||||||
|
# custom command to do the following:
|
||||||
|
# xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air
|
||||||
|
# xcrun -sdk macosx metallib ggml-metal.air -o default.metallib
|
||||||
|
#
|
||||||
|
# note: this is the only way I found to disable fast-math in Metal. it's ugly, but at least it works
|
||||||
|
# disabling fast math is needed in order to pass tests/test-backend-ops
|
||||||
|
# note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1
|
||||||
|
# note: unfortunately, we have to call it default.metallib instead of ggml.metallib
|
||||||
|
# ref: https://github.com/ggerganov/whisper.cpp/issues/1720
|
||||||
|
set(XC_FLAGS -fno-fast-math -fno-inline -g)
|
||||||
|
if (LLAMA_QKK_64)
|
||||||
|
set(XC_FLAGS ${XC_FLAGS} -DQK_K=64)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
||||||
|
COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air
|
||||||
|
COMMAND xcrun -sdk macosx metallib ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
||||||
|
DEPENDS ggml-metal.metal
|
||||||
|
COMMENT "Compiling Metal kernels"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_custom_target(
|
||||||
|
ggml-metal ALL
|
||||||
|
DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
|
||||||
${FOUNDATION_LIBRARY}
|
${FOUNDATION_LIBRARY}
|
||||||
${METAL_FRAMEWORK}
|
${METAL_FRAMEWORK}
|
||||||
@ -201,7 +231,11 @@ if (LLAMA_BLAS)
|
|||||||
if (${LLAMA_BLAS_VENDOR} MATCHES "Generic")
|
if (${LLAMA_BLAS_VENDOR} MATCHES "Generic")
|
||||||
pkg_check_modules(DepBLAS REQUIRED blas)
|
pkg_check_modules(DepBLAS REQUIRED blas)
|
||||||
elseif (${LLAMA_BLAS_VENDOR} MATCHES "OpenBLAS")
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "OpenBLAS")
|
||||||
|
# As of openblas v0.3.22, the 64-bit is named openblas64.pc
|
||||||
|
pkg_check_modules(DepBLAS openblas64)
|
||||||
|
if (NOT DepBLAS_FOUND)
|
||||||
pkg_check_modules(DepBLAS REQUIRED openblas)
|
pkg_check_modules(DepBLAS REQUIRED openblas)
|
||||||
|
endif()
|
||||||
elseif (${LLAMA_BLAS_VENDOR} MATCHES "FLAME")
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "FLAME")
|
||||||
pkg_check_modules(DepBLAS REQUIRED blis)
|
pkg_check_modules(DepBLAS REQUIRED blis)
|
||||||
elseif (${LLAMA_BLAS_VENDOR} MATCHES "ATLAS")
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "ATLAS")
|
||||||
@ -303,6 +337,8 @@ if (LLAMA_CUBLAS)
|
|||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver)
|
||||||
|
|
||||||
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
||||||
# 52 == lowest CUDA 12 standard
|
# 52 == lowest CUDA 12 standard
|
||||||
# 60 == f16 CUDA intrinsics
|
# 60 == f16 CUDA intrinsics
|
||||||
|
39
Makefile
39
Makefile
@ -2,7 +2,7 @@
|
|||||||
BUILD_TARGETS = \
|
BUILD_TARGETS = \
|
||||||
main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
||||||
simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
|
simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
|
||||||
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead tests/test-c.o
|
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey tests/test-c.o
|
||||||
|
|
||||||
# Binaries only useful for tests
|
# Binaries only useful for tests
|
||||||
TEST_TARGETS = \
|
TEST_TARGETS = \
|
||||||
@ -282,8 +282,17 @@ endif
|
|||||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
||||||
# Apple M1, M2, etc.
|
# Apple M1, M2, etc.
|
||||||
# Raspberry Pi 3, 4, Zero 2 (64-bit)
|
# Raspberry Pi 3, 4, Zero 2 (64-bit)
|
||||||
|
# Nvidia Jetson
|
||||||
MK_CFLAGS += -mcpu=native
|
MK_CFLAGS += -mcpu=native
|
||||||
MK_CXXFLAGS += -mcpu=native
|
MK_CXXFLAGS += -mcpu=native
|
||||||
|
JETSON_RELEASE_INFO = $(shell jetson_release)
|
||||||
|
ifdef JETSON_RELEASE_INFO
|
||||||
|
ifneq ($(filter TX2%,$(JETSON_RELEASE_INFO)),)
|
||||||
|
JETSON_EOL_MODULE_DETECT = 1
|
||||||
|
CC = aarch64-unknown-linux-gnu-gcc
|
||||||
|
cxx = aarch64-unknown-linux-gnu-g++
|
||||||
|
endif
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
ifneq ($(filter armv6%,$(UNAME_M)),)
|
||||||
@ -357,15 +366,16 @@ ifdef LLAMA_BLIS
|
|||||||
endif # LLAMA_BLIS
|
endif # LLAMA_BLIS
|
||||||
|
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUBLAS
|
||||||
MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
|
MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include -I/usr/local/cuda/targets/aarch64-linux/include
|
||||||
MK_LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
|
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib -L/usr/lib/wsl/lib
|
||||||
OBJS += ggml-cuda.o
|
OBJS += ggml-cuda.o
|
||||||
MK_NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
|
MK_NVCCFLAGS = -use_fast_math
|
||||||
|
ifndef JETSON_EOL_MODULE_DETECT
|
||||||
|
MK_NVCCFLAGS += --forward-unknown-to-host-compiler
|
||||||
|
endif # JETSON_EOL_MODULE_DETECT
|
||||||
ifdef LLAMA_DEBUG
|
ifdef LLAMA_DEBUG
|
||||||
MK_NVCCFLAGS += -lineinfo
|
MK_NVCCFLAGS += -lineinfo
|
||||||
endif
|
endif # LLAMA_DEBUG
|
||||||
|
|
||||||
ifdef LLAMA_CUDA_NVCC
|
ifdef LLAMA_CUDA_NVCC
|
||||||
NVCC = $(LLAMA_CUDA_NVCC)
|
NVCC = $(LLAMA_CUDA_NVCC)
|
||||||
else
|
else
|
||||||
@ -417,7 +427,11 @@ ifdef LLAMA_CUDA_CCBIN
|
|||||||
MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
|
MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
|
||||||
endif
|
endif
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
||||||
|
ifdef JETSON_EOL_MODULE_DETECT
|
||||||
|
$(NVCC) -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I/usr/local/cuda/targets/aarch64-linux/include -std=c++11 -O3 $(NVCCFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
||||||
|
else
|
||||||
$(NVCC) $(BASE_CXXFLAGS) $(NVCCFLAGS) -Wno-pedantic -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
$(NVCC) $(BASE_CXXFLAGS) $(NVCCFLAGS) -Wno-pedantic -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
||||||
|
endif # JETSON_EOL_MODULE_DETECT
|
||||||
endif # LLAMA_CUBLAS
|
endif # LLAMA_CUBLAS
|
||||||
|
|
||||||
ifdef LLAMA_CLBLAST
|
ifdef LLAMA_CLBLAST
|
||||||
@ -452,6 +466,9 @@ ifdef LLAMA_HIPBLAS
|
|||||||
LLAMA_CUDA_MMV_Y ?= 1
|
LLAMA_CUDA_MMV_Y ?= 1
|
||||||
LLAMA_CUDA_KQUANTS_ITER ?= 2
|
LLAMA_CUDA_KQUANTS_ITER ?= 2
|
||||||
MK_CPPFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
MK_CPPFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
||||||
|
ifdef LLAMA_HIP_UMA
|
||||||
|
MK_CPPFLAGS += -DGGML_HIP_UMA
|
||||||
|
endif # LLAMA_HIP_UMA
|
||||||
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
||||||
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
||||||
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
||||||
@ -606,7 +623,7 @@ save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o $(C
|
|||||||
server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
|
server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS) $(LWINSOCK2) -Wno-cast-qual
|
$(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS) $(LWINSOCK2) -Wno-cast-qual
|
||||||
|
|
||||||
gguf: examples/gguf/gguf.cpp ggml.o llama.o $(OBJS)
|
gguf: examples/gguf/gguf.cpp ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
|
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
|
||||||
@ -645,6 +662,12 @@ parallel: examples/parallel/parallel.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
|||||||
lookahead: examples/lookahead/lookahead.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
lookahead: examples/lookahead/lookahead.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
lookup: examples/lookup/lookup.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
passkey: examples/passkey/passkey.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
ifdef LLAMA_METAL
|
ifdef LLAMA_METAL
|
||||||
metal: examples/metal/metal.cpp ggml.o $(OBJS)
|
metal: examples/metal/metal.cpp ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||||
|
@ -13,21 +13,17 @@ let package = Package(
|
|||||||
products: [
|
products: [
|
||||||
.library(name: "llama", targets: ["llama"]),
|
.library(name: "llama", targets: ["llama"]),
|
||||||
],
|
],
|
||||||
|
dependencies: [
|
||||||
|
.package(url: "https://github.com/ggerganov/ggml.git", .branch("master"))
|
||||||
|
],
|
||||||
targets: [
|
targets: [
|
||||||
.target(
|
.target(
|
||||||
name: "llama",
|
name: "llama",
|
||||||
|
dependencies: ["ggml"],
|
||||||
path: ".",
|
path: ".",
|
||||||
exclude: [],
|
exclude: ["ggml-metal.metal"],
|
||||||
sources: [
|
sources: [
|
||||||
"ggml.c",
|
|
||||||
"llama.cpp",
|
"llama.cpp",
|
||||||
"ggml-alloc.c",
|
|
||||||
"ggml-backend.c",
|
|
||||||
"ggml-quants.c",
|
|
||||||
"ggml-metal.m",
|
|
||||||
],
|
|
||||||
resources: [
|
|
||||||
.process("ggml-metal.metal")
|
|
||||||
],
|
],
|
||||||
publicHeadersPath: "spm-headers",
|
publicHeadersPath: "spm-headers",
|
||||||
cSettings: [
|
cSettings: [
|
||||||
|
38
README.md
38
README.md
@ -10,6 +10,7 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
|
|||||||
|
|
||||||
### Hot topics
|
### Hot topics
|
||||||
|
|
||||||
|
- New SOTA quantized models, including pure 2-bits: https://huggingface.co/ikawrakow
|
||||||
- Collecting Apple Silicon performance stats:
|
- Collecting Apple Silicon performance stats:
|
||||||
- M-series: https://github.com/ggerganov/llama.cpp/discussions/4167
|
- M-series: https://github.com/ggerganov/llama.cpp/discussions/4167
|
||||||
- A-series: https://github.com/ggerganov/llama.cpp/discussions/4508
|
- A-series: https://github.com/ggerganov/llama.cpp/discussions/4508
|
||||||
@ -102,6 +103,8 @@ as the main playground for developing new features for the [ggml](https://github
|
|||||||
- [x] [Deepseek models](https://huggingface.co/models?search=deepseek-ai/deepseek)
|
- [x] [Deepseek models](https://huggingface.co/models?search=deepseek-ai/deepseek)
|
||||||
- [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen)
|
- [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen)
|
||||||
- [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral)
|
- [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral)
|
||||||
|
- [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557)
|
||||||
|
- [x] [GPT-2](https://huggingface.co/gpt2)
|
||||||
|
|
||||||
**Multimodal models:**
|
**Multimodal models:**
|
||||||
|
|
||||||
@ -116,6 +119,7 @@ as the main playground for developing new features for the [ggml](https://github
|
|||||||
- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
||||||
- Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp)
|
- Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp)
|
||||||
- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp)
|
- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp)
|
||||||
|
- JS/TS (llama.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/llamacpp)
|
||||||
- Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb)
|
- Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb)
|
||||||
- Rust: [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp)
|
- Rust: [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp)
|
||||||
- C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp)
|
- C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp)
|
||||||
@ -123,6 +127,7 @@ as the main playground for developing new features for the [ggml](https://github
|
|||||||
- Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj)
|
- Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj)
|
||||||
- React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn)
|
- React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn)
|
||||||
- Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp)
|
- Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp)
|
||||||
|
- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig)
|
||||||
|
|
||||||
**UI:**
|
**UI:**
|
||||||
|
|
||||||
@ -131,6 +136,8 @@ as the main playground for developing new features for the [ggml](https://github
|
|||||||
- [withcatai/catai](https://github.com/withcatai/catai)
|
- [withcatai/catai](https://github.com/withcatai/catai)
|
||||||
- [semperai/amica](https://github.com/semperai/amica)
|
- [semperai/amica](https://github.com/semperai/amica)
|
||||||
- [psugihara/FreeChat](https://github.com/psugihara/FreeChat)
|
- [psugihara/FreeChat](https://github.com/psugihara/FreeChat)
|
||||||
|
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
|
||||||
|
- [iohub/collama](https://github.com/iohub/coLLaMA)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -381,20 +388,37 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
|
|
||||||
Check [BLIS.md](docs/BLIS.md) for more information.
|
Check [BLIS.md](docs/BLIS.md) for more information.
|
||||||
|
|
||||||
- #### Intel MKL
|
- #### Intel oneMKL
|
||||||
|
- Using manual oneAPI installation:
|
||||||
|
By default, `LLAMA_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DLLAMA_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps:
|
||||||
|
```bash
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-runtime docker image, only required for manual installation
|
||||||
|
cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON
|
||||||
|
cmake --build . --config Release
|
||||||
|
```
|
||||||
|
|
||||||
By default, `LLAMA_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DLLAMA_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. You may also specify it by:
|
- Using oneAPI docker image:
|
||||||
|
If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-runtime](https://hub.docker.com/r/intel/oneapi-runtime)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON
|
||||||
cmake --build . --config Release
|
cmake --build . --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni.
|
||||||
|
|
||||||
|
Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information.
|
||||||
|
|
||||||
- #### cuBLAS
|
- #### cuBLAS
|
||||||
|
|
||||||
This provides BLAS acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
This provides BLAS acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
||||||
|
|
||||||
|
For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling.
|
||||||
|
|
||||||
- Using `make`:
|
- Using `make`:
|
||||||
```bash
|
```bash
|
||||||
make LLAMA_CUBLAS=1
|
make LLAMA_CUBLAS=1
|
||||||
@ -439,7 +463,13 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
&& cmake --build build -- -j 16
|
&& cmake --build build -- -j 16
|
||||||
```
|
```
|
||||||
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`.
|
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`.
|
||||||
However, this hurts performance for non-integrated GPUs.
|
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
||||||
|
|
||||||
|
- Using `make` (example for target gfx1030, build with 16 CPU threads):
|
||||||
|
```bash
|
||||||
|
make -j16 LLAMA_HIPBLAS=1 LLAMA_HIP_UMA=1 AMDGPU_TARGETS=gxf1030
|
||||||
|
```
|
||||||
|
|
||||||
- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU):
|
- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU):
|
||||||
```bash
|
```bash
|
||||||
set PATH=%HIP_PATH%\bin;%PATH%
|
set PATH=%HIP_PATH%\bin;%PATH%
|
||||||
|
116
awq-py/README.md
Normal file
116
awq-py/README.md
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# AWQ: Activation-aware Weight Quantization for LLM - version apply to llamacpp
|
||||||
|
[[Paper](https://arxiv.org/abs/2306.00978)][[Original Repo](https://github.com/mit-han-lab/llm-awq)][[Easy-to-use Repo](https://github.com/casper-hansen/AutoAWQ)]
|
||||||
|
|
||||||
|
**Supported models:**
|
||||||
|
|
||||||
|
- [X] LLaMA
|
||||||
|
- [x] LLaMA 2
|
||||||
|
- [X] MPT
|
||||||
|
- [X] Mistral AI v0.1
|
||||||
|
- [ ] Bloom
|
||||||
|
- [ ] Mixtral MoE
|
||||||
|
|
||||||
|
**TODO:**
|
||||||
|
- [x] Update version work with both MPT and MPT-AWQ model
|
||||||
|
- [ ] Add OPT model
|
||||||
|
- [ ] Add Bloom model
|
||||||
|
- [ ] Add Mixtral MoE
|
||||||
|
- [ ] Support w3, w2
|
||||||
|
|
||||||
|
|
||||||
|
## Contents
|
||||||
|
|
||||||
|
- [Install](##Install)
|
||||||
|
- [Convert](##Convert)
|
||||||
|
- [Quantize](##Quantize)
|
||||||
|
- [Test](##Test)
|
||||||
|
- [Benchmark](##Benchmark)
|
||||||
|
- [Results](##Results)
|
||||||
|
|
||||||
|
## Install
|
||||||
|
Install requirements
|
||||||
|
```bash
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
Get the pre-computed AWQ search results for multiple model families, including LLaMA, LLaMA2, MPT, OPT
|
||||||
|
```bash
|
||||||
|
git clone https://huggingface.co/datasets/mit-han-lab/awq-model-zoo awq_cache
|
||||||
|
```
|
||||||
|
|
||||||
|
## Convert
|
||||||
|
Example for llama model
|
||||||
|
```bash
|
||||||
|
# For llama7b and llama2 models
|
||||||
|
python convert.py models/llama-7b/ --awq-path awq_cache/llama-7b-w4-g128.pt --outfile models/llama_7b_fp16.gguf
|
||||||
|
# For mistral and mpt models
|
||||||
|
python convert-hf-to-gguf.py models/mpt-7b/ --awq-path awq_cache/llama-7b-w4-g128.pt --outfile models/mpt_7b_fp16.gguf
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quantize
|
||||||
|
```bash
|
||||||
|
# We only benchmark and confirm the results on q4_0, q4_1, and q2_k types.
|
||||||
|
./quantize models/llama_7b_fp16.gguf models/llama_7b_q4_0.gguf q4_0
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test
|
||||||
|
```bash
|
||||||
|
# For all models.
|
||||||
|
./build/bin/main -m models/llama_7b_q4_0.gguf -n 128 --prompt "Once upon a time"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmark
|
||||||
|
The perplexity measurements in table above are done against the `wikitext2` test dataset (https://paperswithcode.com/dataset/wikitext-2), with context length of 512.
|
||||||
|
```bash
|
||||||
|
# For llama and llama2, and mistral models.
|
||||||
|
./perplexity -m models/llama_7b_q4_0.gguf -f datasets/wikitext-2-raw/wiki.test.raw
|
||||||
|
```
|
||||||
|
|
||||||
|
## Results
|
||||||
|
Results are run on OpenBLAS (CPU) and CuBLAS (GPU) for fair comparison
|
||||||
|
We use three types of llamacpp quantization methods to work with our version, including q4_0, q4_1, and q2_k
|
||||||
|
|
||||||
|
### Llama 7B (Build with OpenBLAS)
|
||||||
|
|
||||||
|
| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K |
|
||||||
|
|-----------:|--------------|-------:|-------:|-------:|-------:|
|
||||||
|
|Llama 7B | perplexity | 5.9066 | 6.1214 | 6.0643 | 6.5808 |
|
||||||
|
|Llama 7B | file size | 12.9G | 3.5G | 3.9G | 2.7G |
|
||||||
|
|Llama 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
||||||
|
|AWQ-LLama 7B| perplexity | 5.9175 | 6.0252 | 5.9987 | 6.3692 |
|
||||||
|
|AWQ-LLama 7B| file size | 12.9G | 3.5G | 3.9G | 2.7G |
|
||||||
|
|AWQ-LLama 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
||||||
|
|
||||||
|
|
||||||
|
### Llama2 7B (Build with CuBLAS)
|
||||||
|
|
||||||
|
| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K |
|
||||||
|
|------------:|--------------|-------:|-------:|-------:|-------:|
|
||||||
|
|Llama2 7B | perplexity | 5.8664 | 6.0260 | 6.0656 | 6.4496 |
|
||||||
|
|Llama2 7B | file size | 12.9G | 3.5G | 3.9G | 2.7G |
|
||||||
|
|Llama2 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
||||||
|
|AWQ-LLama2 7B| perplexity | 5.8801 | 6.0054 | 5.9849 | 6.3650 |
|
||||||
|
|AWQ-LLama2 7B| file size | 12.9G | 3.5G | 3.9G | 2.7G |
|
||||||
|
|AWQ-LLama2 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
||||||
|
|
||||||
|
|
||||||
|
### Mistral 7B v0.1 (Build with CuBLAS)
|
||||||
|
|
||||||
|
| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K |
|
||||||
|
|-------------:|--------------|-------:|-------:|-------:|-------:|
|
||||||
|
|Mistral 7B | perplexity | 5.6931 | 5.8202 | 5.8268 | 6.1645 |
|
||||||
|
|Mistral 7B | file size | 14.5G | 4.1G | 4.5G | 3.1G |
|
||||||
|
|Mistral 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
||||||
|
|AWQ-Mistral 7B| perplexity | 5.6934 | 5.8020 | 5.7691 | 6.0426 |
|
||||||
|
|AWQ-Mistral 7B| file size | 14.5G | 4.1G | 4.5G | 3.1G |
|
||||||
|
|AWQ-Mistral 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
||||||
|
|
||||||
|
### MPT 7B (Build with OpenBLAS)
|
||||||
|
|
||||||
|
| Model | Measure | F16 | Q4_0 | Q4_1 | Q2_K |
|
||||||
|
|---------:|--------------|-------:|-------:|-------:|--------:|
|
||||||
|
|MPT 7B | perplexity | 8.4369 | 8.7956 | 8.6265 | 11.4913 |
|
||||||
|
|MPT 7B | file size | 13.7G | 3.9G | 4.3G | 2.8G |
|
||||||
|
|MPT 7B | bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
||||||
|
|AWQ-MPT 7B| perplexity | 8.4944 | 8.7053 | 8.6750 | 10.2873|
|
||||||
|
|AWQ-MPT 7B| file size | 13.7G | 3.9G | 4.3G | 2.8G |
|
||||||
|
|AWQ-MPT 7B| bits/weight | 16.0 | 4.5 | 5.0 | 2.6 |
|
254
awq-py/awq/apply_awq.py
Normal file
254
awq-py/awq/apply_awq.py
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
"""
|
||||||
|
Implements the AWQ for llama.cpp use cases.
|
||||||
|
Original paper: https://arxiv.org/abs/2306.00978
|
||||||
|
|
||||||
|
This code is based on versions of the AWQ implementation found in the following repositories:
|
||||||
|
* https://github.com/mit-han-lab/llm-awq
|
||||||
|
* https://github.com/casper-hansen/AutoAWQ
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
|
||||||
|
from transformers import AutoModelForCausalLM, AutoConfig
|
||||||
|
from transformers.models.bloom.modeling_bloom import BloomGelu
|
||||||
|
from transformers.models.llama.modeling_llama import LlamaRMSNorm
|
||||||
|
from transformers.activations import GELUActivation
|
||||||
|
|
||||||
|
|
||||||
|
class ScaledActivation(nn.Module):
|
||||||
|
"""
|
||||||
|
ScaledActivation module wraps an existing activation function and applies a
|
||||||
|
scale factor to its output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
module (nn.Module): The activation function to be scaled.
|
||||||
|
scales (torch.Tensor): A tensor of size (num_features,) containing the initial
|
||||||
|
scale factors for each feature.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
torch.Tensor: The scaled output of the activation function.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, module, scales):
|
||||||
|
super().__init__()
|
||||||
|
self.act = module
|
||||||
|
self.scales = nn.Parameter(scales.data)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return self.act(x) / self.scales.view(1, 1, -1).to(x.device)
|
||||||
|
|
||||||
|
|
||||||
|
def set_op_by_name(layer, name, new_module):
|
||||||
|
"""
|
||||||
|
Set the new module for given module's name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
layer (nn.Module): The layer in which to replace the submodule.
|
||||||
|
name (str): The path to the submodule to be replaced, using dot notation
|
||||||
|
to access nested modules.
|
||||||
|
new_module (nn.Module): The new module to replace the existing one.
|
||||||
|
"""
|
||||||
|
levels = name.split(".")
|
||||||
|
if len(levels) > 1:
|
||||||
|
mod_ = layer
|
||||||
|
for l_idx in range(len(levels) - 1):
|
||||||
|
if levels[l_idx].isdigit():
|
||||||
|
mod_ = mod_[int(levels[l_idx])]
|
||||||
|
else:
|
||||||
|
mod_ = getattr(mod_, levels[l_idx])
|
||||||
|
setattr(mod_, levels[-1], new_module)
|
||||||
|
else:
|
||||||
|
setattr(layer, name, new_module)
|
||||||
|
|
||||||
|
|
||||||
|
def get_op_by_name(module, op_name):
|
||||||
|
"""
|
||||||
|
Retrieves a submodule within a given layer based on its name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
module (nn.Module): The layer containing the submodule to find.
|
||||||
|
op_name (str): The name of the submodule.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
nn.Module: The requested submodule found within the given layer.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the specified submodule cannot be found within the layer.
|
||||||
|
"""
|
||||||
|
for name, m in module.named_modules():
|
||||||
|
if name == op_name:
|
||||||
|
return m
|
||||||
|
raise ValueError(f"Cannot find op {op_name} in module {module}")
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def scale_ln_fcs(ln, fcs, scales):
|
||||||
|
"""
|
||||||
|
Scales the weights of a LayerNorm and a list of fully-connected layers proportionally.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ln (nn.LayerNorm): The LayerNorm module to be scaled.
|
||||||
|
fcs (List[nn.Linear]): A list of fully-connected layers to be scaled.
|
||||||
|
scales (torch.Tensor): A 1D tensor of size (num_features,).
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not isinstance(fcs, list):
|
||||||
|
fcs = [fcs]
|
||||||
|
|
||||||
|
scales = scales.to(ln.weight.device)
|
||||||
|
|
||||||
|
ln.weight.div_(scales)
|
||||||
|
if hasattr(ln, "bias") and ln.bias is not None:
|
||||||
|
ln.bias.div_(scales)
|
||||||
|
|
||||||
|
for fc in fcs:
|
||||||
|
fc.weight.mul_(scales.view(1, -1))
|
||||||
|
|
||||||
|
for p in ln.parameters():
|
||||||
|
assert torch.isnan(p).sum() == 0
|
||||||
|
for fc in fcs:
|
||||||
|
for p in fc.parameters():
|
||||||
|
assert torch.isnan(p).sum() == 0
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def scale_fc_fc(fc1, fc2, scales):
|
||||||
|
"""
|
||||||
|
Scales the weights of two fully-connected layers in a specific pattern.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
fc1 (nn.Linear): The first fully-connected layer to be scaled.
|
||||||
|
fc2 (nn.Linear): The second fully-connected layer to be scaled.
|
||||||
|
scales (torch.Tensor): A 1D tensor of size (num_features,).
|
||||||
|
"""
|
||||||
|
assert isinstance(fc1, nn.Linear)
|
||||||
|
assert isinstance(fc2, nn.Linear)
|
||||||
|
|
||||||
|
scales = scales.to(fc1.weight.device)
|
||||||
|
|
||||||
|
fc1.weight[-scales.size(0):].div_(scales.view(-1, 1))
|
||||||
|
if fc1.bias is not None:
|
||||||
|
fc1.bias.div_(scales.view(-1))
|
||||||
|
|
||||||
|
fc2.weight.mul_(scales.view(1, -1))
|
||||||
|
|
||||||
|
for p in fc1.parameters():
|
||||||
|
assert torch.isnan(p).sum() == 0
|
||||||
|
for p in fc2.parameters():
|
||||||
|
assert torch.isnan(p).sum() == 0
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def scale_gelu_fc(gelu, fc, scales):
|
||||||
|
"""
|
||||||
|
Scales the weight of a GELU activation and a fully-connected layer proportionally.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
gelu (Union[nn.GELU, BloomGelu, GELUActivation]): The GELU activation module to be scaled.
|
||||||
|
fc (nn.Linear): The fully-connected layer to be scaled.
|
||||||
|
scales (torch.Tensor): A 1D tensor of size (num_features,).
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TypeError: If the `gelu` module is not of type `nn.GELU`, `BloomGelu`, or `GELUActivation`.
|
||||||
|
TypeError: If the `fc` module is not of type `nn.Linear`.
|
||||||
|
"""
|
||||||
|
assert isinstance(gelu, (nn.GELU, BloomGelu, GELUActivation))
|
||||||
|
assert isinstance(fc, nn.Linear)
|
||||||
|
|
||||||
|
fc.weight.mul_(scales.view(1, -1).to(fc.weight.device))
|
||||||
|
|
||||||
|
for p in fc.parameters():
|
||||||
|
assert torch.isnan(p).sum() == 0
|
||||||
|
|
||||||
|
|
||||||
|
def apply_scale(module, scales_list, input_feat_dict=None):
|
||||||
|
"""
|
||||||
|
Applies different scaling strategies to layers based on their type and hierarchy within a given module.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
module (nn.Module): The module containing the layers to be scaled.
|
||||||
|
scales_list (List[Tuple[str, List[str], torch.Tensor]]): A list of tuples containing:
|
||||||
|
* prev_op_name (str): The name of the preceding operation or module,
|
||||||
|
relative to which the layers to be scaled are located.
|
||||||
|
* layer_names (List[str]): A list of names of the layers to be scaled, relative to the preceding operation.
|
||||||
|
* scales (torch.Tensor): A 1D tensor of size (num_features,) containing the scaling factors for each feature.
|
||||||
|
input_feat_dict (Optional[Dict[str, torch.Tensor]]): A dictionary mapping layer names to their corresponding
|
||||||
|
input features (optional).
|
||||||
|
"""
|
||||||
|
for prev_op_name, layer_names, scales in scales_list:
|
||||||
|
prev_op = get_op_by_name(module, prev_op_name)
|
||||||
|
layers = [get_op_by_name(module, name) for name in layer_names]
|
||||||
|
|
||||||
|
prev_op.cuda()
|
||||||
|
for layer in layers:
|
||||||
|
layer.cuda()
|
||||||
|
scales.cuda()
|
||||||
|
|
||||||
|
if isinstance(prev_op, nn.Linear):
|
||||||
|
assert len(layers) == 1
|
||||||
|
scale_fc_fc(prev_op, layers[0], scales)
|
||||||
|
elif isinstance(prev_op, (nn.LayerNorm, LlamaRMSNorm)) or "rmsnorm" in str(prev_op.__class__).lower():
|
||||||
|
scale_ln_fcs(prev_op, layers, scales)
|
||||||
|
elif isinstance(prev_op, (nn.GELU, BloomGelu, GELUActivation)):
|
||||||
|
new_module = ScaledActivation(prev_op, scales)
|
||||||
|
set_op_by_name(module, prev_op_name, new_module)
|
||||||
|
scale_gelu_fc(prev_op, layers[0], scales)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(f"prev_op {type(prev_op)} not supported yet!")
|
||||||
|
|
||||||
|
# apply the scaling to input feat if given; prepare it for clipping
|
||||||
|
if input_feat_dict is not None:
|
||||||
|
for layer_name in layer_names:
|
||||||
|
inp = input_feat_dict[layer_name]
|
||||||
|
inp.div_(scales.view(1, -1).to(inp.device))
|
||||||
|
|
||||||
|
prev_op.cpu()
|
||||||
|
for layer in layers:
|
||||||
|
layer.cpu()
|
||||||
|
scales.cpu()
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def apply_clip(module, clip_list):
|
||||||
|
"""
|
||||||
|
Applies element-wise clipping to the weight of a specific layer within a given module.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
module (nn.Module): The module containing the layer to be clipped.
|
||||||
|
clip_list (List[Tuple[str, torch.Tensor]]): A list of tuples containing:
|
||||||
|
* name (str): The name of the layer to be clipped, relative to the root of the module.
|
||||||
|
* max_val (torch.Tensor): A 1D or 2D tensor defining the upper bound for each element of the layer's weight.
|
||||||
|
"""
|
||||||
|
for name, max_val in clip_list:
|
||||||
|
layer = get_op_by_name(module, name)
|
||||||
|
layer.cuda()
|
||||||
|
max_val = max_val.to(layer.weight.device)
|
||||||
|
org_shape = layer.weight.shape
|
||||||
|
layer.weight.data = layer.weight.data.reshape(*max_val.shape[:2], -1)
|
||||||
|
layer.weight.data = torch.clamp(layer.weight.data, -max_val, max_val)
|
||||||
|
layer.weight.data = layer.weight.data.reshape(org_shape)
|
||||||
|
layer.cpu()
|
||||||
|
|
||||||
|
|
||||||
|
def add_scale_weights(model_path, scale_path, tmp_path):
|
||||||
|
"""
|
||||||
|
Adds pre-computed Activation Weight Quantization (AWQ) results to a model,
|
||||||
|
including scaling factors and clipping bounds.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_path (str): Path to the pre-trained model to be equipped with AWQ.
|
||||||
|
scale_path (str): Path to the AWQ scale factors (.pt file).
|
||||||
|
tmp_path (str): Path to the temporary directory where the equipped model will be saved.
|
||||||
|
"""
|
||||||
|
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_path, config=config, trust_remote_code=True
|
||||||
|
)
|
||||||
|
model.eval()
|
||||||
|
awq_results = torch.load(str(scale_path), map_location="cpu")
|
||||||
|
apply_scale(model, awq_results["scale"])
|
||||||
|
apply_clip(model, awq_results["clip"])
|
||||||
|
model.save_pretrained(str(tmp_path))
|
||||||
|
os.system(f"cp {str(model_path)}/tokenizer* {str(tmp_path)}")
|
2
awq-py/requirements.txt
Normal file
2
awq-py/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
torch>=2.1.1
|
||||||
|
transformers>=4.32.0
|
10
ci/run.sh
10
ci/run.sh
@ -30,6 +30,12 @@ sd=`dirname $0`
|
|||||||
cd $sd/../
|
cd $sd/../
|
||||||
SRC=`pwd`
|
SRC=`pwd`
|
||||||
|
|
||||||
|
CMAKE_EXTRA=""
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_METAL} ]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_METAL_SHADER_DEBUG=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
## helpers
|
## helpers
|
||||||
|
|
||||||
# download a file if it does not exist or if it is outdated
|
# download a file if it does not exist or if it is outdated
|
||||||
@ -81,7 +87,7 @@ function gg_run_ctest_debug {
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
(time cmake -DCMAKE_BUILD_TYPE=Debug .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Debug ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
(time ctest --output-on-failure -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log
|
(time ctest --output-on-failure -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log
|
||||||
@ -109,7 +115,7 @@ function gg_run_ctest_release {
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
(time cmake -DCMAKE_BUILD_TYPE=Release .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
if [ -z ${GG_BUILD_LOW_PERF} ]; then
|
if [ -z ${GG_BUILD_LOW_PERF} ]; then
|
||||||
|
@ -65,4 +65,4 @@ endif()
|
|||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC .)
|
target_include_directories(${TARGET} PUBLIC .)
|
||||||
target_compile_features(${TARGET} PUBLIC cxx_std_11)
|
target_compile_features(${TARGET} PUBLIC cxx_std_11)
|
||||||
target_link_libraries(${TARGET} PRIVATE llama build_info)
|
target_link_libraries(${TARGET} PRIVATE build_info PUBLIC llama)
|
||||||
|
@ -220,6 +220,20 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.n_ctx = std::stoi(argv[i]);
|
params.n_ctx = std::stoi(argv[i]);
|
||||||
|
} else if (arg == "--grp-attn-n" || arg == "-gan") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
params.grp_attn_n = std::stoi(argv[i]);
|
||||||
|
} else if (arg == "--grp-attn-w" || arg == "-gaw") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
params.grp_attn_w = std::stoi(argv[i]);
|
||||||
} else if (arg == "--rope-freq-base") {
|
} else if (arg == "--rope-freq-base") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -904,6 +918,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" Not recommended since this is both slower and uses more VRAM.\n");
|
printf(" Not recommended since this is both slower and uses more VRAM.\n");
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
#endif
|
#endif
|
||||||
|
printf(" -gan N, --grp-attn-n N\n");
|
||||||
|
printf(" group-attention factor (default: %d)\n", params.grp_attn_n);
|
||||||
|
printf(" -gaw N, --grp-attn-w N\n");
|
||||||
|
printf(" group-attention width (default: %.1f)\n", (double)params.grp_attn_w);
|
||||||
printf(" --verbose-prompt print prompt before generation\n");
|
printf(" --verbose-prompt print prompt before generation\n");
|
||||||
printf(" -dkvc, --dump-kv-cache\n");
|
printf(" -dkvc, --dump-kv-cache\n");
|
||||||
printf(" verbose print of the KV cache\n");
|
printf(" verbose print of the KV cache\n");
|
||||||
@ -1394,6 +1412,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "build_number: %d\n", LLAMA_BUILD_NUMBER);
|
fprintf(stream, "build_number: %d\n", LLAMA_BUILD_NUMBER);
|
||||||
fprintf(stream, "cpu_has_arm_fma: %s\n", ggml_cpu_has_arm_fma() ? "true" : "false");
|
fprintf(stream, "cpu_has_arm_fma: %s\n", ggml_cpu_has_arm_fma() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_avx: %s\n", ggml_cpu_has_avx() ? "true" : "false");
|
fprintf(stream, "cpu_has_avx: %s\n", ggml_cpu_has_avx() ? "true" : "false");
|
||||||
|
fprintf(stream, "cpu_has_avx_vnni: %s\n", ggml_cpu_has_avx_vnni() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_avx2: %s\n", ggml_cpu_has_avx2() ? "true" : "false");
|
fprintf(stream, "cpu_has_avx2: %s\n", ggml_cpu_has_avx2() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_avx512: %s\n", ggml_cpu_has_avx512() ? "true" : "false");
|
fprintf(stream, "cpu_has_avx512: %s\n", ggml_cpu_has_avx512() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_avx512_vbmi: %s\n", ggml_cpu_has_avx512_vbmi() ? "true" : "false");
|
fprintf(stream, "cpu_has_avx512_vbmi: %s\n", ggml_cpu_has_avx512_vbmi() ? "true" : "false");
|
||||||
|
@ -51,7 +51,7 @@ struct gpt_params {
|
|||||||
int32_t n_ctx = 512; // context size
|
int32_t n_ctx = 512; // context size
|
||||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||||
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
|
int32_t n_draft = 8; // number of tokens to draft during speculative decoding
|
||||||
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
||||||
int32_t n_parallel = 1; // number of parallel sequences to decode
|
int32_t n_parallel = 1; // number of parallel sequences to decode
|
||||||
int32_t n_sequences = 1; // number of sequences to decode
|
int32_t n_sequences = 1; // number of sequences to decode
|
||||||
@ -62,6 +62,8 @@ struct gpt_params {
|
|||||||
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
||||||
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
||||||
int32_t n_beams = 0; // if non-zero then use beam search of given width.
|
int32_t n_beams = 0; // if non-zero then use beam search of given width.
|
||||||
|
int32_t grp_attn_n = 1; // group-attention factor
|
||||||
|
int32_t grp_attn_w = 512; // group-attention width
|
||||||
float rope_freq_base = 0.0f; // RoPE base frequency
|
float rope_freq_base = 0.0f; // RoPE base frequency
|
||||||
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
|
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
|
||||||
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
|
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
|
||||||
@ -240,3 +242,4 @@ void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80);
|
|||||||
|
|
||||||
// Dump the KV cache view showing individual sequences in each cell (long output).
|
// Dump the KV cache view showing individual sequences in each cell (long output).
|
||||||
void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
||||||
|
|
||||||
|
@ -149,11 +149,12 @@ static void sampler_queue(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_token llama_sampling_sample(
|
static llama_token llama_sampling_sample_impl(
|
||||||
struct llama_sampling_context * ctx_sampling,
|
struct llama_sampling_context * ctx_sampling,
|
||||||
struct llama_context * ctx_main,
|
struct llama_context * ctx_main,
|
||||||
struct llama_context * ctx_cfg,
|
struct llama_context * ctx_cfg,
|
||||||
const int idx) {
|
const int idx,
|
||||||
|
bool is_resampling) { // Add a parameter to indicate if we are resampling
|
||||||
const llama_sampling_params & params = ctx_sampling->params;
|
const llama_sampling_params & params = ctx_sampling->params;
|
||||||
|
|
||||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
|
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
|
||||||
@ -173,8 +174,17 @@ llama_token llama_sampling_sample(
|
|||||||
|
|
||||||
llama_token id = 0;
|
llama_token id = 0;
|
||||||
|
|
||||||
|
// Get a pointer to the logits
|
||||||
float * logits = llama_get_logits_ith(ctx_main, idx);
|
float * logits = llama_get_logits_ith(ctx_main, idx);
|
||||||
|
|
||||||
|
// Declare original_logits at the beginning of the function scope
|
||||||
|
std::vector<float> original_logits;
|
||||||
|
|
||||||
|
if (!is_resampling) {
|
||||||
|
// Only make a copy of the original logits if we are not in the resampling phase, not sure if I actually have to do this.
|
||||||
|
original_logits = std::vector<float>(logits, logits + llama_n_vocab(llama_get_model(ctx_main)));
|
||||||
|
}
|
||||||
|
|
||||||
// apply params.logit_bias map
|
// apply params.logit_bias map
|
||||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
||||||
logits[it->first] += it->second;
|
logits[it->first] += it->second;
|
||||||
@ -193,12 +203,14 @@ llama_token llama_sampling_sample(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// apply penalties
|
// apply penalties
|
||||||
if (!prev.empty()) {
|
const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev;
|
||||||
|
const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n);
|
||||||
|
if (penalty_tokens_used_size) {
|
||||||
const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))];
|
const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))];
|
||||||
|
|
||||||
llama_sample_repetition_penalties(ctx_main, &cur_p,
|
llama_sample_repetition_penalties(ctx_main, &cur_p,
|
||||||
prev.data() + prev.size() - penalty_last_n,
|
penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size,
|
||||||
penalty_last_n, penalty_repeat, penalty_freq, penalty_present);
|
penalty_tokens_used_size, penalty_repeat, penalty_freq, penalty_present);
|
||||||
|
|
||||||
if (!penalize_nl) {
|
if (!penalize_nl) {
|
||||||
for (size_t idx = 0; idx < cur_p.size; idx++) {
|
for (size_t idx = 0; idx < cur_p.size; idx++) {
|
||||||
@ -210,7 +222,8 @@ llama_token llama_sampling_sample(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx_sampling->grammar != NULL) {
|
// If we are in the resampling phase, apply grammar checks before sampling logic
|
||||||
|
if (is_resampling && ctx_sampling->grammar != NULL) {
|
||||||
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
|
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,9 +265,40 @@ llama_token llama_sampling_sample(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
||||||
|
// Create an array with a single token data element for the sampled id
|
||||||
|
llama_token_data single_token_data = {id, logits[id], 0.0f};
|
||||||
|
llama_token_data_array single_token_data_array = { &single_token_data, 1, false };
|
||||||
|
|
||||||
|
// Apply grammar constraints to the single token
|
||||||
|
llama_sample_grammar(ctx_main, &single_token_data_array, ctx_sampling->grammar);
|
||||||
|
|
||||||
|
// Check if the token is valid according to the grammar by seeing if its logit has been set to -INFINITY
|
||||||
|
bool is_valid = single_token_data_array.data[0].logit != -INFINITY;
|
||||||
|
|
||||||
|
// If the token is not valid according to the grammar, perform resampling
|
||||||
|
if (!is_valid) {
|
||||||
|
LOG("Resampling because token %d: '%s' does not meet grammar rules\n", id, llama_token_to_piece(ctx_main, id).c_str());
|
||||||
|
|
||||||
|
// Restore logits from the copy
|
||||||
|
std::copy(original_logits.begin(), original_logits.end(), logits);
|
||||||
|
|
||||||
|
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, true); // Pass true for is_resampling
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_token llama_sampling_sample(
|
||||||
|
struct llama_sampling_context * ctx_sampling,
|
||||||
|
struct llama_context * ctx_main,
|
||||||
|
struct llama_context * ctx_cfg,
|
||||||
|
const int idx) {
|
||||||
|
// Call the implementation function with is_resampling set to false by default
|
||||||
|
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, false);
|
||||||
|
}
|
||||||
|
|
||||||
void llama_sampling_accept(
|
void llama_sampling_accept(
|
||||||
struct llama_sampling_context * ctx_sampling,
|
struct llama_sampling_context * ctx_sampling,
|
||||||
struct llama_context * ctx_main,
|
struct llama_context * ctx_main,
|
||||||
|
@ -36,6 +36,9 @@ typedef struct llama_sampling_params {
|
|||||||
float cfg_scale = 1.f; // how strong is guidance
|
float cfg_scale = 1.f; // how strong is guidance
|
||||||
|
|
||||||
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
||||||
|
|
||||||
|
std::vector<llama_token> penalty_prompt_tokens;
|
||||||
|
bool use_penalty_prompt_tokens = false;
|
||||||
} llama_sampling_params;
|
} llama_sampling_params;
|
||||||
|
|
||||||
// general sampler context
|
// general sampler context
|
||||||
|
@ -1107,7 +1107,7 @@ void print_common_train_usage(int /*argc*/, char ** /*argv*/, const struct train
|
|||||||
fprintf(stderr, " --sample-start STR Sets the starting point for samples after the specified pattern. If empty use every token position as sample start. (default '%s')\n", params->sample_start.c_str());
|
fprintf(stderr, " --sample-start STR Sets the starting point for samples after the specified pattern. If empty use every token position as sample start. (default '%s')\n", params->sample_start.c_str());
|
||||||
fprintf(stderr, " --include-sample-start Include the sample start in the samples. (default off)\n");
|
fprintf(stderr, " --include-sample-start Include the sample start in the samples. (default off)\n");
|
||||||
fprintf(stderr, " --escape process sample start escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
|
fprintf(stderr, " --escape process sample start escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
|
||||||
fprintf(stderr, " --overlapping-samples Samples my overlap, will include sample-start of second and following samples. When off, samples will end at begin of next sample. (default off)\n");
|
fprintf(stderr, " --overlapping-samples Samples may overlap, will include sample-start of second and following samples. When off, samples will end at begin of next sample. (default off)\n");
|
||||||
fprintf(stderr, " --fill-with-next-samples Samples shorter than context length will be followed by the next (shuffled) samples. (default off)\n");
|
fprintf(stderr, " --fill-with-next-samples Samples shorter than context length will be followed by the next (shuffled) samples. (default off)\n");
|
||||||
fprintf(stderr, " --separate-with-eos When fill-with-next-samples, insert end-of-sequence token between samples.%s\n", params->separate_with_eos ? " (default)" : "");
|
fprintf(stderr, " --separate-with-eos When fill-with-next-samples, insert end-of-sequence token between samples.%s\n", params->separate_with_eos ? " (default)" : "");
|
||||||
fprintf(stderr, " --separate-with-bos When fill-with-next-samples, insert begin-of-sequence token between samples.%s\n", params->separate_with_bos ? " (default)" : "");
|
fprintf(stderr, " --separate-with-bos When fill-with-next-samples, insert begin-of-sequence token between samples.%s\n", params->separate_with_bos ? " (default)" : "");
|
||||||
|
@ -46,7 +46,7 @@ class Model:
|
|||||||
self.part_names = self._get_part_names()
|
self.part_names = self._get_part_names()
|
||||||
self.hparams = Model.load_hparams(self.dir_model)
|
self.hparams = Model.load_hparams(self.dir_model)
|
||||||
self.model_arch = self._get_model_architecture()
|
self.model_arch = self._get_model_architecture()
|
||||||
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess)
|
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=False)
|
||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
self._set_vocab_gpt2()
|
self._set_vocab_gpt2()
|
||||||
@ -182,8 +182,12 @@ class Model:
|
|||||||
return QwenModel
|
return QwenModel
|
||||||
if model_architecture == "MixtralForCausalLM":
|
if model_architecture == "MixtralForCausalLM":
|
||||||
return MixtralModel
|
return MixtralModel
|
||||||
|
if model_architecture == "GPT2LMHeadModel":
|
||||||
|
return GPT2Model
|
||||||
if model_architecture == "PhiForCausalLM":
|
if model_architecture == "PhiForCausalLM":
|
||||||
return Phi2Model
|
return Phi2Model
|
||||||
|
if model_architecture == "PlamoForCausalLM":
|
||||||
|
return PlamoModel
|
||||||
return Model
|
return Model
|
||||||
|
|
||||||
def _is_model_safetensors(self) -> bool:
|
def _is_model_safetensors(self) -> bool:
|
||||||
@ -223,8 +227,12 @@ class Model:
|
|||||||
return gguf.MODEL_ARCH.QWEN
|
return gguf.MODEL_ARCH.QWEN
|
||||||
if arch == "MixtralForCausalLM":
|
if arch == "MixtralForCausalLM":
|
||||||
return gguf.MODEL_ARCH.LLAMA
|
return gguf.MODEL_ARCH.LLAMA
|
||||||
|
if arch == "GPT2LMHeadModel":
|
||||||
|
return gguf.MODEL_ARCH.GPT2
|
||||||
if arch == "PhiForCausalLM":
|
if arch == "PhiForCausalLM":
|
||||||
return gguf.MODEL_ARCH.PHI2
|
return gguf.MODEL_ARCH.PHI2
|
||||||
|
if arch == "PlamoForCausalLM":
|
||||||
|
return gguf.MODEL_ARCH.PLAMO
|
||||||
|
|
||||||
raise NotImplementedError(f'Architecture "{arch}" not supported!')
|
raise NotImplementedError(f'Architecture "{arch}" not supported!')
|
||||||
|
|
||||||
@ -234,7 +242,7 @@ class Model:
|
|||||||
tokens: list[bytearray] = []
|
tokens: list[bytearray] = []
|
||||||
toktypes: list[int] = []
|
toktypes: list[int] = []
|
||||||
|
|
||||||
from transformers import AutoTokenizer # type: ignore[attr-defined]
|
from transformers import AutoTokenizer
|
||||||
tokenizer = AutoTokenizer.from_pretrained(dir_model)
|
tokenizer = AutoTokenizer.from_pretrained(dir_model)
|
||||||
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
|
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
|
||||||
assert max(tokenizer.vocab.values()) < vocab_size
|
assert max(tokenizer.vocab.values()) < vocab_size
|
||||||
@ -460,6 +468,10 @@ class MPTModel(Model):
|
|||||||
data = data_torch.squeeze().numpy()
|
data = data_torch.squeeze().numpy()
|
||||||
|
|
||||||
# map tensor names
|
# map tensor names
|
||||||
|
if "scales" in name:
|
||||||
|
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales"))
|
||||||
|
new_name = new_name.replace("scales", "act.scales")
|
||||||
|
else:
|
||||||
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
|
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
|
||||||
if new_name is None:
|
if new_name is None:
|
||||||
print(f"Can not map tensor {name!r}")
|
print(f"Can not map tensor {name!r}")
|
||||||
@ -844,7 +856,7 @@ class StableLMModel(Model):
|
|||||||
hparams = self.hparams
|
hparams = self.hparams
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
|
|
||||||
self.gguf_writer.add_name(dir_model.name)
|
self.gguf_writer.add_name(self.dir_model.name)
|
||||||
self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
||||||
self.gguf_writer.add_embedding_length(hparams["hidden_size"])
|
self.gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
self.gguf_writer.add_block_count(block_count)
|
self.gguf_writer.add_block_count(block_count)
|
||||||
@ -890,7 +902,7 @@ class QwenModel(Model):
|
|||||||
tokens: list[bytearray] = []
|
tokens: list[bytearray] = []
|
||||||
toktypes: list[int] = []
|
toktypes: list[int] = []
|
||||||
|
|
||||||
from transformers import AutoTokenizer # type: ignore[attr-defined]
|
from transformers import AutoTokenizer
|
||||||
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
|
||||||
vocab_size = hparams["vocab_size"]
|
vocab_size = hparams["vocab_size"]
|
||||||
assert max(tokenizer.get_vocab().values()) < vocab_size
|
assert max(tokenizer.get_vocab().values()) < vocab_size
|
||||||
@ -985,6 +997,68 @@ class QwenModel(Model):
|
|||||||
self.gguf_writer.add_tensor(new_name, data)
|
self.gguf_writer.add_tensor(new_name, data)
|
||||||
|
|
||||||
|
|
||||||
|
class GPT2Model(Model):
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
self.gguf_writer.add_name(self.dir_model.name)
|
||||||
|
self.gguf_writer.add_block_count(self.hparams["n_layer"])
|
||||||
|
self.gguf_writer.add_context_length(self.hparams["n_ctx"])
|
||||||
|
self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
|
||||||
|
self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
|
||||||
|
self.gguf_writer.add_head_count(self.hparams["n_head"])
|
||||||
|
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
|
||||||
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
|
||||||
|
def write_tensors(self):
|
||||||
|
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
||||||
|
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
|
||||||
|
|
||||||
|
for name, data_torch in self.get_tensors():
|
||||||
|
# we don't need these
|
||||||
|
if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias")):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
|
||||||
|
data_torch = data_torch.transpose(1, 0)
|
||||||
|
|
||||||
|
old_dtype = data_torch.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data_torch.dtype not in (torch.float16, torch.float32):
|
||||||
|
data_torch = data_torch.to(torch.float32)
|
||||||
|
|
||||||
|
data = data_torch.squeeze().numpy()
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
|
||||||
|
if new_name is None:
|
||||||
|
print(f"Can not map tensor {name!r}")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if self.ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
|
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
|
||||||
|
|
||||||
|
self.gguf_writer.add_tensor(new_name, data)
|
||||||
|
|
||||||
|
# note: GPT2 output is tied to (same as) wte in original model
|
||||||
|
if new_name == "token_embd.weight":
|
||||||
|
print(f"output.weight, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
|
||||||
|
self.gguf_writer.add_tensor("output.weight", data)
|
||||||
|
|
||||||
|
|
||||||
class Phi2Model(Model):
|
class Phi2Model(Model):
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
block_count = self.hparams["n_layer"]
|
block_count = self.hparams["n_layer"]
|
||||||
@ -1002,15 +1076,98 @@ class Phi2Model(Model):
|
|||||||
self.gguf_writer.add_add_bos_token(False)
|
self.gguf_writer.add_add_bos_token(False)
|
||||||
|
|
||||||
|
|
||||||
|
class PlamoModel(Model):
|
||||||
|
def set_vocab(self):
|
||||||
|
self._set_vocab_sentencepiece()
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
hparams = self.hparams
|
||||||
|
block_count = hparams["num_hidden_layers"]
|
||||||
|
|
||||||
|
self.gguf_writer.add_name("PLaMo")
|
||||||
|
self.gguf_writer.add_context_length(4096) # not in config.json
|
||||||
|
self.gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
|
self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
|
self.gguf_writer.add_block_count(block_count)
|
||||||
|
self.gguf_writer.add_head_count(hparams["num_attention_heads"])
|
||||||
|
self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
|
||||||
|
self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
|
def shuffle_attn_q_weight(self, data_torch):
|
||||||
|
assert data_torch.size() == (5120, 5120)
|
||||||
|
data_torch = data_torch.reshape(8, 5, 128, 5120)
|
||||||
|
data_torch = torch.permute(data_torch, (1, 0, 2, 3))
|
||||||
|
data_torch = torch.reshape(data_torch, (5120, 5120))
|
||||||
|
return data_torch
|
||||||
|
|
||||||
|
def shuffle_attn_output_weight(self, data_torch):
|
||||||
|
assert data_torch.size() == (5120, 5120)
|
||||||
|
data_torch = data_torch.reshape(5120, 8, 5, 128)
|
||||||
|
data_torch = torch.permute(data_torch, (0, 2, 1, 3))
|
||||||
|
data_torch = torch.reshape(data_torch, (5120, 5120))
|
||||||
|
return data_torch
|
||||||
|
|
||||||
|
def write_tensors(self):
|
||||||
|
block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers"))
|
||||||
|
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
|
||||||
|
|
||||||
|
for name, data_torch in self.get_tensors():
|
||||||
|
if "self_attn.rotary_emb.inv_freq" in name:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
|
||||||
|
if new_name is None:
|
||||||
|
print(f"Can not map tensor {name!r}")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
# shuffle for broadcasting of gqa in ggml_mul_mat
|
||||||
|
if new_name.endswith("attn_q.weight"):
|
||||||
|
data_torch = self.shuffle_attn_q_weight(data_torch)
|
||||||
|
elif new_name.endswith("attn_output.weight"):
|
||||||
|
data_torch = self.shuffle_attn_output_weight(data_torch)
|
||||||
|
|
||||||
|
old_dtype = data_torch.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data_torch.dtype not in (torch.float16, torch.float32):
|
||||||
|
data_torch = data_torch.to(torch.float32)
|
||||||
|
|
||||||
|
data = data_torch.squeeze().numpy()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if self.ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
|
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
|
||||||
|
|
||||||
|
self.gguf_writer.add_tensor(new_name, data)
|
||||||
|
|
||||||
|
|
||||||
###### CONVERSION LOGIC ######
|
###### CONVERSION LOGIC ######
|
||||||
|
|
||||||
|
|
||||||
def parse_args() -> argparse.Namespace:
|
def parse_args() -> argparse.Namespace:
|
||||||
parser = argparse.ArgumentParser(description="Convert a huggingface model to a GGML compatible file")
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Convert a huggingface model to a GGML compatible file")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--vocab-only", action="store_true",
|
"--vocab-only", action="store_true",
|
||||||
help="extract only the vocab",
|
help="extract only the vocab",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--awq-path", type=Path, default=None,
|
||||||
|
help="Path to scale awq cache file")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--outfile", type=Path,
|
"--outfile", type=Path,
|
||||||
help="path to write to; default: based on input",
|
help="path to write to; default: based on input",
|
||||||
@ -1028,9 +1185,24 @@ def parse_args() -> argparse.Namespace:
|
|||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
|
||||||
dir_model = args.model
|
dir_model = args.model
|
||||||
|
|
||||||
|
if args.awq_path:
|
||||||
|
sys.path.insert(1, str(Path(__file__).parent / 'awq-py'))
|
||||||
|
from awq.apply_awq import add_scale_weights
|
||||||
|
tmp_model_path = args.model / "weighted_model"
|
||||||
|
dir_model = tmp_model_path
|
||||||
|
if tmp_model_path.is_dir():
|
||||||
|
print(f"{tmp_model_path} exists as a weighted model.")
|
||||||
|
else:
|
||||||
|
tmp_model_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
print("Saving new weighted model ...")
|
||||||
|
add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path))
|
||||||
|
print(f"Saved weighted model at {tmp_model_path}.")
|
||||||
|
|
||||||
if not dir_model.is_dir():
|
if not dir_model.is_dir():
|
||||||
print(f'Error: {args.model} is not a directory', file=sys.stderr)
|
print(f'Error: {args.model} is not a directory', file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -1068,3 +1240,7 @@ with torch.inference_mode():
|
|||||||
model_instance.write()
|
model_instance.write()
|
||||||
|
|
||||||
print(f"Model successfully exported to '{fname_out}'")
|
print(f"Model successfully exported to '{fname_out}'")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
@ -47,6 +47,7 @@ def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_ty
|
|||||||
fout.seek((fout.tell() + 31) & -32)
|
fout.seek((fout.tell() + 31) & -32)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
print(f"Usage: python {sys.argv[0]} <path> [arch]")
|
print(f"Usage: python {sys.argv[0]} <path> [arch]")
|
||||||
print(
|
print(
|
||||||
|
1
convert-persimmon-to-gguf.py
Normal file → Executable file
1
convert-persimmon-to-gguf.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import torch
|
import torch
|
||||||
import os
|
import os
|
||||||
from pprint import pprint
|
from pprint import pprint
|
||||||
|
825
convert.py
825
convert.py
File diff suppressed because it is too large
Load Diff
@ -31,8 +31,10 @@ else()
|
|||||||
add_subdirectory(quantize-stats)
|
add_subdirectory(quantize-stats)
|
||||||
add_subdirectory(save-load-state)
|
add_subdirectory(save-load-state)
|
||||||
add_subdirectory(simple)
|
add_subdirectory(simple)
|
||||||
|
add_subdirectory(passkey)
|
||||||
add_subdirectory(speculative)
|
add_subdirectory(speculative)
|
||||||
add_subdirectory(lookahead)
|
add_subdirectory(lookahead)
|
||||||
|
add_subdirectory(lookup)
|
||||||
add_subdirectory(train-text-from-scratch)
|
add_subdirectory(train-text-from-scratch)
|
||||||
if (LLAMA_METAL)
|
if (LLAMA_METAL)
|
||||||
add_subdirectory(metal)
|
add_subdirectory(metal)
|
||||||
|
@ -575,10 +575,7 @@ static struct ggml_tensor * forward(
|
|||||||
|
|
||||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||||
// KQ_scaled shape [n_past + N, N, n_head, 1]
|
// KQ_scaled shape [n_past + N, N, n_head, 1]
|
||||||
struct ggml_tensor * KQ_scaled =
|
struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, 1.0f/sqrtf(float(n_embd)/n_head));
|
||||||
ggml_scale(ctx0,
|
|
||||||
KQ,
|
|
||||||
ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
|
|
||||||
|
|
||||||
// KQ_masked = mask_past(KQ_scaled)
|
// KQ_masked = mask_past(KQ_scaled)
|
||||||
// KQ_masked shape [n_past + N, N, n_head, 1]
|
// KQ_masked shape [n_past + N, N, n_head, 1]
|
||||||
@ -844,10 +841,7 @@ static struct ggml_tensor * forward_batch(
|
|||||||
|
|
||||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||||
// KQ_scaled shape [n_past + N, N, n_head, n_batch]
|
// KQ_scaled shape [n_past + N, N, n_head, n_batch]
|
||||||
struct ggml_tensor * KQ_scaled =
|
struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, 1.0f/sqrtf(float(n_embd)/n_head));
|
||||||
ggml_scale(ctx0,
|
|
||||||
KQ,
|
|
||||||
ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
|
|
||||||
assert_shape_4d(KQ_scaled, n_past + N, N, n_head, n_batch);
|
assert_shape_4d(KQ_scaled, n_past + N, N, n_head, n_batch);
|
||||||
|
|
||||||
// KQ_masked = mask_past(KQ_scaled)
|
// KQ_masked = mask_past(KQ_scaled)
|
||||||
@ -1131,10 +1125,7 @@ static struct ggml_tensor * forward_lora(
|
|||||||
|
|
||||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||||
// KQ_scaled shape [n_past + N, N, n_head, 1]
|
// KQ_scaled shape [n_past + N, N, n_head, 1]
|
||||||
struct ggml_tensor * KQ_scaled =
|
struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, 1.0f/sqrtf(float(n_embd)/n_head));
|
||||||
ggml_scale(ctx0,
|
|
||||||
KQ,
|
|
||||||
ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
|
|
||||||
|
|
||||||
// KQ_masked = mask_past(KQ_scaled)
|
// KQ_masked = mask_past(KQ_scaled)
|
||||||
// KQ_masked shape [n_past + N, N, n_head, 1]
|
// KQ_masked shape [n_past + N, N, n_head, 1]
|
||||||
|
61
examples/base-translate.sh
Executable file
61
examples/base-translate.sh
Executable file
@ -0,0 +1,61 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Few-shot translation example.
|
||||||
|
# Requires a base model (i.e. no fine-tuned or instruct models).
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
#
|
||||||
|
# cd llama.cpp
|
||||||
|
# make -j
|
||||||
|
#
|
||||||
|
# ./examples/base-translate.sh <model-base> "<text>" [extra-main-args]
|
||||||
|
#
|
||||||
|
|
||||||
|
if [ $# -lt 2 ]; then
|
||||||
|
echo "Usage: ./base-translate.sh <model-base> \"<text>\" [extra-main-args]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
eargs=""
|
||||||
|
if [ $# -gt 2 ]; then
|
||||||
|
eargs="${@:3}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
ftmp="__llama.cpp_example_tmp__.txt"
|
||||||
|
trap "rm -f $ftmp" EXIT
|
||||||
|
|
||||||
|
echo "Translate from English to French:
|
||||||
|
|
||||||
|
===
|
||||||
|
|
||||||
|
sea otter, peppermint, plush girafe:
|
||||||
|
|
||||||
|
sea otter => loutre de mer
|
||||||
|
peppermint => menthe poivrée
|
||||||
|
plush girafe => girafe peluche
|
||||||
|
|
||||||
|
===
|
||||||
|
|
||||||
|
violin
|
||||||
|
|
||||||
|
violin => violon
|
||||||
|
|
||||||
|
===
|
||||||
|
|
||||||
|
phone, computer, mouse, keyboard:
|
||||||
|
|
||||||
|
phone => téléphone
|
||||||
|
computer => ordinateur
|
||||||
|
mouse => souris
|
||||||
|
keyboard => clavier
|
||||||
|
|
||||||
|
===
|
||||||
|
" > $ftmp
|
||||||
|
|
||||||
|
echo "$2
|
||||||
|
" >> $ftmp
|
||||||
|
|
||||||
|
model=$1
|
||||||
|
|
||||||
|
# generate the most likely continuation until the string "===" is found
|
||||||
|
./main -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs
|
@ -69,6 +69,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
std::vector<llama_token> tokens_list;
|
std::vector<llama_token> tokens_list;
|
||||||
tokens_list = ::llama_tokenize(model, params.prompt, true);
|
tokens_list = ::llama_tokenize(model, params.prompt, true);
|
||||||
|
|
||||||
const int n_kv_req = tokens_list.size() + (n_len - tokens_list.size())*n_parallel;
|
const int n_kv_req = tokens_list.size() + (n_len - tokens_list.size())*n_parallel;
|
||||||
|
|
||||||
// initialize the context
|
// initialize the context
|
||||||
|
@ -309,7 +309,7 @@ static struct ggml_cgraph * build_graph_lora(
|
|||||||
) {
|
) {
|
||||||
struct ggml_tensor * ab = ggml_mul_mat(ctx, lora_a, lora_b);
|
struct ggml_tensor * ab = ggml_mul_mat(ctx, lora_a, lora_b);
|
||||||
if (scaling != 1.0f) {
|
if (scaling != 1.0f) {
|
||||||
ab = ggml_scale(ctx, ab, ggml_new_f32(ctx, scaling));
|
ab = ggml_scale(ctx, ab, scaling);
|
||||||
}
|
}
|
||||||
struct ggml_tensor * res = ggml_add_inplace(ctx, tensor, ab);
|
struct ggml_tensor * res = ggml_add_inplace(ctx, tensor, ab);
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ For example to apply 40% of the 'shakespeare' LORA adapter, 80% of the 'bible' L
|
|||||||
--lora lora-open-llama-3b-v2-q8_0-yet-another-one-LATEST.bin
|
--lora lora-open-llama-3b-v2-q8_0-yet-another-one-LATEST.bin
|
||||||
```
|
```
|
||||||
|
|
||||||
The scale numbers don't need to add up to one, and you can also use numbers greater than 1 to further increase the influence of an adapter. But making the values to big will sometimes result in worse output. Play around to find good values.
|
The scale numbers don't need to add up to one, and you can also use numbers greater than 1 to further increase the influence of an adapter. But making the values too big will sometimes result in worse output. Play around to find good values.
|
||||||
|
|
||||||
Gradient checkpointing reduces the memory requirements by ~50% but increases the runtime.
|
Gradient checkpointing reduces the memory requirements by ~50% but increases the runtime.
|
||||||
If you have enough RAM, you can make finetuning a bit faster by disabling checkpointing with `--no-checkpointing`.
|
If you have enough RAM, you can make finetuning a bit faster by disabling checkpointing with `--no-checkpointing`.
|
||||||
|
@ -3,15 +3,9 @@
|
|||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "train.h"
|
#include "train.h"
|
||||||
#include <unordered_map>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <cassert>
|
|
||||||
#include <climits>
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <cstdarg>
|
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <random>
|
|
||||||
#include <stdexcept>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
@ -612,6 +606,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
|||||||
const int n_rot = hparams.n_embd_head();
|
const int n_rot = hparams.n_embd_head();
|
||||||
const int n_embd_head = hparams.n_embd_head();
|
const int n_embd_head = hparams.n_embd_head();
|
||||||
const int n_embd_gqa = hparams.n_embd_gqa();
|
const int n_embd_gqa = hparams.n_embd_gqa();
|
||||||
|
|
||||||
const float rms_norm_eps = hparams.f_norm_rms_eps;
|
const float rms_norm_eps = hparams.f_norm_rms_eps;
|
||||||
const float rope_freq_base = hparams.rope_freq_base;
|
const float rope_freq_base = hparams.rope_freq_base;
|
||||||
const float rope_freq_scale = hparams.rope_freq_scale;
|
const float rope_freq_scale = hparams.rope_freq_scale;
|
||||||
@ -680,10 +675,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
|||||||
checkpoints.push_back(t01);
|
checkpoints.push_back(t01);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * kv_scale = NULL;
|
const float kv_scale = 1.0f/sqrtf(float(n_embd)/n_head);
|
||||||
if (!enable_flash_attn) {
|
|
||||||
kv_scale = ggml_new_f32(ctx, 1.0f/sqrtf(float(n_embd)/n_head));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
struct my_llama_layer & layer = model->layers[il];
|
struct my_llama_layer & layer = model->layers[il];
|
||||||
@ -781,32 +773,32 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
|||||||
// make sure some tensors are not reallocated by inserting new temporary nodes depending on them
|
// make sure some tensors are not reallocated by inserting new temporary nodes depending on them
|
||||||
int n_leafs_before = gb->n_leafs;
|
int n_leafs_before = gb->n_leafs;
|
||||||
int n_nodes_before = gb->n_nodes;
|
int n_nodes_before = gb->n_nodes;
|
||||||
struct ggml_tensor * one = ggml_new_f32(ctx, 1.0f);
|
|
||||||
// output tensors
|
// output tensors
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, 1.0f));
|
||||||
// input gradient
|
// input gradient
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, 1.0f));
|
||||||
GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
|
GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
|
||||||
ggml_allocr_alloc(alloc, t36->grad);
|
ggml_allocr_alloc(alloc, t36->grad);
|
||||||
// KQ_pos
|
// KQ_pos
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, 1.0f));
|
||||||
|
|
||||||
// make sure base model tensors data cannot be used in viewable operations
|
// make sure base model tensors data cannot be used in viewable operations
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->norm, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->norm, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->output, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->output, 1.0f));
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
struct my_llama_layer & layer = model->layers[il];
|
struct my_llama_layer & layer = model->layers[il];
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.attention_norm, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.attention_norm, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_norm, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_norm, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wq, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wq, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w1, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w1, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w2, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w2, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w3, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w3, 1.0f));
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocating checkpoints in one block to reduce memory fragmentation
|
// allocating checkpoints in one block to reduce memory fragmentation
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
set(TARGET gguf)
|
set(TARGET gguf)
|
||||||
add_executable(${TARGET} gguf.cpp)
|
add_executable(${TARGET} gguf.cpp)
|
||||||
install(TARGETS ${TARGET} RUNTIME)
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "llama.h"
|
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
|
@ -138,6 +138,7 @@ struct cmd_params {
|
|||||||
std::vector<int> n_threads;
|
std::vector<int> n_threads;
|
||||||
std::vector<int> n_gpu_layers;
|
std::vector<int> n_gpu_layers;
|
||||||
std::vector<int> main_gpu;
|
std::vector<int> main_gpu;
|
||||||
|
std::vector<bool> no_kv_offload;
|
||||||
std::vector<bool> mul_mat_q;
|
std::vector<bool> mul_mat_q;
|
||||||
std::vector<std::array<float, LLAMA_MAX_DEVICES>> tensor_split;
|
std::vector<std::array<float, LLAMA_MAX_DEVICES>> tensor_split;
|
||||||
int reps;
|
int reps;
|
||||||
@ -155,6 +156,7 @@ static const cmd_params cmd_params_defaults = {
|
|||||||
/* n_threads */ {get_num_physical_cores()},
|
/* n_threads */ {get_num_physical_cores()},
|
||||||
/* n_gpu_layers */ {99},
|
/* n_gpu_layers */ {99},
|
||||||
/* main_gpu */ {0},
|
/* main_gpu */ {0},
|
||||||
|
/* no_kv_offload */ {false},
|
||||||
/* mul_mat_q */ {true},
|
/* mul_mat_q */ {true},
|
||||||
/* tensor_split */ {{}},
|
/* tensor_split */ {{}},
|
||||||
/* reps */ 5,
|
/* reps */ 5,
|
||||||
@ -176,6 +178,7 @@ static void print_usage(int /* argc */, char ** argv) {
|
|||||||
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
||||||
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
||||||
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
||||||
|
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
||||||
printf(" -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str());
|
printf(" -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str());
|
||||||
printf(" -ts, --tensor_split <ts0/ts1/..> \n");
|
printf(" -ts, --tensor_split <ts0/ts1/..> \n");
|
||||||
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
||||||
@ -309,6 +312,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.main_gpu = split<int>(argv[i], split_delim);
|
params.main_gpu = split<int>(argv[i], split_delim);
|
||||||
|
} else if (arg == "-nkvo" || arg == "--no-kv-offload") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto p = split<bool>(argv[i], split_delim);
|
||||||
|
params.no_kv_offload.insert(params.no_kv_offload.end(), p.begin(), p.end());
|
||||||
} else if (arg == "-mmq" || arg == "--mul-mat-q") {
|
} else if (arg == "-mmq" || arg == "--mul-mat-q") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -383,6 +393,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
|
if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
|
||||||
if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
|
if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
|
||||||
if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
|
if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
|
||||||
|
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
|
||||||
if (params.mul_mat_q.empty()) { params.mul_mat_q = cmd_params_defaults.mul_mat_q; }
|
if (params.mul_mat_q.empty()) { params.mul_mat_q = cmd_params_defaults.mul_mat_q; }
|
||||||
if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
|
if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
|
||||||
if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; }
|
if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; }
|
||||||
@ -400,6 +411,7 @@ struct cmd_params_instance {
|
|||||||
int n_threads;
|
int n_threads;
|
||||||
int n_gpu_layers;
|
int n_gpu_layers;
|
||||||
int main_gpu;
|
int main_gpu;
|
||||||
|
bool no_kv_offload;
|
||||||
bool mul_mat_q;
|
bool mul_mat_q;
|
||||||
std::array<float, LLAMA_MAX_DEVICES> tensor_split;
|
std::array<float, LLAMA_MAX_DEVICES> tensor_split;
|
||||||
|
|
||||||
@ -428,6 +440,7 @@ struct cmd_params_instance {
|
|||||||
cparams.type_k = type_k;
|
cparams.type_k = type_k;
|
||||||
cparams.type_v = type_v;
|
cparams.type_v = type_v;
|
||||||
cparams.mul_mat_q = mul_mat_q;
|
cparams.mul_mat_q = mul_mat_q;
|
||||||
|
cparams.offload_kqv = !no_kv_offload;
|
||||||
|
|
||||||
return cparams;
|
return cparams;
|
||||||
}
|
}
|
||||||
@ -444,6 +457,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances_int(const cmd_p
|
|||||||
for (const auto & tk : params.type_k)
|
for (const auto & tk : params.type_k)
|
||||||
for (const auto & tv : params.type_v)
|
for (const auto & tv : params.type_v)
|
||||||
for (const auto & mmq : params.mul_mat_q)
|
for (const auto & mmq : params.mul_mat_q)
|
||||||
|
for (const auto & nkvo : params.no_kv_offload)
|
||||||
for (const auto & nt : params.n_threads) {
|
for (const auto & nt : params.n_threads) {
|
||||||
cmd_params_instance instance = {
|
cmd_params_instance instance = {
|
||||||
/* .model = */ m,
|
/* .model = */ m,
|
||||||
@ -455,6 +469,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances_int(const cmd_p
|
|||||||
/* .n_threads = */ nt,
|
/* .n_threads = */ nt,
|
||||||
/* .n_gpu_layers = */ nl,
|
/* .n_gpu_layers = */ nl,
|
||||||
/* .main_gpu = */ mg,
|
/* .main_gpu = */ mg,
|
||||||
|
/* .no_kv_offload= */ nkvo,
|
||||||
/* .mul_mat_q = */ mmq,
|
/* .mul_mat_q = */ mmq,
|
||||||
/* .tensor_split = */ ts,
|
/* .tensor_split = */ ts,
|
||||||
};
|
};
|
||||||
@ -476,6 +491,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
for (const auto & tk : params.type_k)
|
for (const auto & tk : params.type_k)
|
||||||
for (const auto & tv : params.type_v)
|
for (const auto & tv : params.type_v)
|
||||||
for (const auto & mmq : params.mul_mat_q)
|
for (const auto & mmq : params.mul_mat_q)
|
||||||
|
for (const auto & nkvo : params.no_kv_offload)
|
||||||
for (const auto & nt : params.n_threads) {
|
for (const auto & nt : params.n_threads) {
|
||||||
for (const auto & n_prompt : params.n_prompt) {
|
for (const auto & n_prompt : params.n_prompt) {
|
||||||
if (n_prompt == 0) {
|
if (n_prompt == 0) {
|
||||||
@ -491,6 +507,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .n_threads = */ nt,
|
/* .n_threads = */ nt,
|
||||||
/* .n_gpu_layers = */ nl,
|
/* .n_gpu_layers = */ nl,
|
||||||
/* .main_gpu = */ mg,
|
/* .main_gpu = */ mg,
|
||||||
|
/* .no_kv_offload= */ nkvo,
|
||||||
/* .mul_mat_q = */ mmq,
|
/* .mul_mat_q = */ mmq,
|
||||||
/* .tensor_split = */ ts,
|
/* .tensor_split = */ ts,
|
||||||
};
|
};
|
||||||
@ -511,6 +528,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .n_threads = */ nt,
|
/* .n_threads = */ nt,
|
||||||
/* .n_gpu_layers = */ nl,
|
/* .n_gpu_layers = */ nl,
|
||||||
/* .main_gpu = */ mg,
|
/* .main_gpu = */ mg,
|
||||||
|
/* .no_kv_offload= */ nkvo,
|
||||||
/* .mul_mat_q = */ mmq,
|
/* .mul_mat_q = */ mmq,
|
||||||
/* .tensor_split = */ ts,
|
/* .tensor_split = */ ts,
|
||||||
};
|
};
|
||||||
@ -559,6 +577,7 @@ struct test {
|
|||||||
ggml_type type_v;
|
ggml_type type_v;
|
||||||
int n_gpu_layers;
|
int n_gpu_layers;
|
||||||
int main_gpu;
|
int main_gpu;
|
||||||
|
bool no_kv_offload;
|
||||||
bool mul_mat_q;
|
bool mul_mat_q;
|
||||||
std::array<float, LLAMA_MAX_DEVICES> tensor_split;
|
std::array<float, LLAMA_MAX_DEVICES> tensor_split;
|
||||||
int n_prompt;
|
int n_prompt;
|
||||||
@ -579,6 +598,7 @@ struct test {
|
|||||||
type_v = inst.type_v;
|
type_v = inst.type_v;
|
||||||
n_gpu_layers = inst.n_gpu_layers;
|
n_gpu_layers = inst.n_gpu_layers;
|
||||||
main_gpu = inst.main_gpu;
|
main_gpu = inst.main_gpu;
|
||||||
|
no_kv_offload = inst.no_kv_offload;
|
||||||
mul_mat_q = inst.mul_mat_q;
|
mul_mat_q = inst.mul_mat_q;
|
||||||
tensor_split = inst.tensor_split;
|
tensor_split = inst.tensor_split;
|
||||||
n_prompt = inst.n_prompt;
|
n_prompt = inst.n_prompt;
|
||||||
@ -640,7 +660,8 @@ struct test {
|
|||||||
"cpu_info", "gpu_info",
|
"cpu_info", "gpu_info",
|
||||||
"model_filename", "model_type", "model_size", "model_n_params",
|
"model_filename", "model_type", "model_size", "model_n_params",
|
||||||
"n_batch", "n_threads", "type_k", "type_v",
|
"n_batch", "n_threads", "type_k", "type_v",
|
||||||
"n_gpu_layers", "main_gpu", "mul_mat_q", "tensor_split",
|
"n_gpu_layers", "main_gpu", "no_kv_offload",
|
||||||
|
"mul_mat_q", "tensor_split",
|
||||||
"n_prompt", "n_gen", "test_time",
|
"n_prompt", "n_gen", "test_time",
|
||||||
"avg_ns", "stddev_ns",
|
"avg_ns", "stddev_ns",
|
||||||
"avg_ts", "stddev_ts"
|
"avg_ts", "stddev_ts"
|
||||||
@ -659,7 +680,7 @@ struct test {
|
|||||||
return INT;
|
return INT;
|
||||||
}
|
}
|
||||||
if (field == "cuda" || field == "opencl" || field == "metal" || field == "gpu_blas" || field == "blas" ||
|
if (field == "cuda" || field == "opencl" || field == "metal" || field == "gpu_blas" || field == "blas" ||
|
||||||
field == "f16_kv" || field == "mul_mat_q") {
|
field == "f16_kv" || field == "no_kv_offload" || field == "mul_mat_q") {
|
||||||
return BOOL;
|
return BOOL;
|
||||||
}
|
}
|
||||||
if (field == "avg_ts" || field == "stddev_ts") {
|
if (field == "avg_ts" || field == "stddev_ts") {
|
||||||
@ -690,7 +711,8 @@ struct test {
|
|||||||
cpu_info, gpu_info,
|
cpu_info, gpu_info,
|
||||||
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
|
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
|
||||||
std::to_string(n_batch), std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v),
|
std::to_string(n_batch), std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v),
|
||||||
std::to_string(n_gpu_layers), std::to_string(main_gpu), std::to_string(mul_mat_q), tensor_split_str,
|
std::to_string(n_gpu_layers), std::to_string(main_gpu), std::to_string(no_kv_offload),
|
||||||
|
std::to_string(mul_mat_q), tensor_split_str,
|
||||||
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
||||||
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
||||||
std::to_string(avg_ts()), std::to_string(stdev_ts())
|
std::to_string(avg_ts()), std::to_string(stdev_ts())
|
||||||
@ -851,6 +873,9 @@ struct markdown_printer : public printer {
|
|||||||
if (field == "mul_mat_q") {
|
if (field == "mul_mat_q") {
|
||||||
return "mmq";
|
return "mmq";
|
||||||
}
|
}
|
||||||
|
if (field == "no_kv_offload") {
|
||||||
|
return "nkvo";
|
||||||
|
}
|
||||||
if (field == "tensor_split") {
|
if (field == "tensor_split") {
|
||||||
return "ts";
|
return "ts";
|
||||||
}
|
}
|
||||||
@ -885,6 +910,9 @@ struct markdown_printer : public printer {
|
|||||||
if (params.mul_mat_q.size() > 1 || params.mul_mat_q != cmd_params_defaults.mul_mat_q) {
|
if (params.mul_mat_q.size() > 1 || params.mul_mat_q != cmd_params_defaults.mul_mat_q) {
|
||||||
fields.push_back("mul_mat_q");
|
fields.push_back("mul_mat_q");
|
||||||
}
|
}
|
||||||
|
if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) {
|
||||||
|
fields.push_back("no_kv_offload");
|
||||||
|
}
|
||||||
if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) {
|
if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) {
|
||||||
fields.push_back("tensor_split");
|
fields.push_back("tensor_split");
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,12 @@
|
|||||||
# llama.swiftui
|
# llama.cpp/examples/llama.swiftui
|
||||||
|
|
||||||
Local inference of llama.cpp on an iPhone.
|
Local inference of llama.cpp on an iPhone. This is a sample app that can be used as a starting
|
||||||
So far I only tested with starcoder 1B model, but it can most likely handle 7B models as well.
|
point for more advanced projects.
|
||||||
|
|
||||||
|
For usage instructions and performance stats, check the following discussion: https://github.com/ggerganov/llama.cpp/discussions/4508
|
||||||
|
|
||||||
|
![image](https://github.com/ggerganov/llama.cpp/assets/1991296/2b40284f-8421-47a2-b634-74eece09a299)
|
||||||
|
|
||||||
|
Video demonstration:
|
||||||
|
|
||||||
https://github.com/bachittle/llama.cpp/assets/39804642/e290827a-4edb-4093-9642-2a5e399ec545
|
https://github.com/bachittle/llama.cpp/assets/39804642/e290827a-4edb-4093-9642-2a5e399ec545
|
||||||
|
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import Foundation
|
import Foundation
|
||||||
|
import llama
|
||||||
// import llama
|
|
||||||
|
|
||||||
enum LlamaError: Error {
|
enum LlamaError: Error {
|
||||||
case couldNotInitializeContext
|
case couldNotInitializeContext
|
||||||
@ -159,7 +158,7 @@ actor LlamaContext {
|
|||||||
new_token_id = llama_sample_token_greedy(context, &candidates_p)
|
new_token_id = llama_sample_token_greedy(context, &candidates_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
if new_token_id == llama_token_eos(context) || n_cur == n_len {
|
if new_token_id == llama_token_eos(model) || n_cur == n_len {
|
||||||
print("\n")
|
print("\n")
|
||||||
let new_token_str = String(cString: temporary_invalid_cchars + [0])
|
let new_token_str = String(cString: temporary_invalid_cchars + [0])
|
||||||
temporary_invalid_cchars.removeAll()
|
temporary_invalid_cchars.removeAll()
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
//
|
|
||||||
// Use this file to import your target's public headers that you would like to expose to Swift.
|
|
||||||
//
|
|
||||||
|
|
||||||
#import "llama.h"
|
|
@ -7,51 +7,32 @@
|
|||||||
objects = {
|
objects = {
|
||||||
|
|
||||||
/* Begin PBXBuildFile section */
|
/* Begin PBXBuildFile section */
|
||||||
542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */ = {isa = PBXBuildFile; fileRef = 542376072B0D9BFB008E6A1C /* ggml-quants.c */; settings = {COMPILER_FLAGS = "-O3"; }; };
|
|
||||||
5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */ = {isa = PBXBuildFile; fileRef = 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */; settings = {COMPILER_FLAGS = "-O3"; }; };
|
|
||||||
542378792ACE3F3500834A7B /* ggml-metal.metal in Resources */ = {isa = PBXBuildFile; fileRef = 549479C82AC9E10B00E0F78B /* ggml-metal.metal */; };
|
|
||||||
542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09B2AC8723900A8AEE9 /* ggml.c */; settings = {COMPILER_FLAGS = "-DGGML_USE_ACCELERATE -DGGML_USE_METAL -DGGML_USE_K_QUANTS -O3"; }; };
|
|
||||||
542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */; settings = {COMPILER_FLAGS = "-O3"; }; };
|
|
||||||
542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 542EA0A12AC8729100A8AEE9 /* llama.cpp */; settings = {COMPILER_FLAGS = "-DGGML_USE_K_QUANTS -DGGML_USE_METAL -O3"; }; };
|
|
||||||
549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 549479CA2AC9E16000E0F78B /* Metal.framework */; };
|
549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 549479CA2AC9E16000E0F78B /* Metal.framework */; };
|
||||||
549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */ = {isa = PBXBuildFile; fileRef = 549479C52AC9E0F200E0F78B /* ggml-metal.m */; settings = {COMPILER_FLAGS = "-fno-objc-arc -DGGML_SWIFT -DGGML_USE_METAL -O3"; }; };
|
|
||||||
7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */; };
|
7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */; };
|
||||||
8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */; };
|
8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */; };
|
||||||
8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83782AC328BD0096AF73 /* ContentView.swift */; };
|
8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83782AC328BD0096AF73 /* ContentView.swift */; };
|
||||||
8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */; };
|
8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */; };
|
||||||
8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */; };
|
|
||||||
8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8A39BE092AC7601000BFEB40 /* Accelerate.framework */; };
|
8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8A39BE092AC7601000BFEB40 /* Accelerate.framework */; };
|
||||||
8A3F84242AC4C891005E2EE8 /* models in Resources */ = {isa = PBXBuildFile; fileRef = 8A3F84232AC4C891005E2EE8 /* models */; };
|
8A3F84242AC4C891005E2EE8 /* models in Resources */ = {isa = PBXBuildFile; fileRef = 8A3F84232AC4C891005E2EE8 /* models */; };
|
||||||
8A907F332AC7138A006146EA /* LibLlama.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A907F322AC7134E006146EA /* LibLlama.swift */; };
|
8A907F332AC7138A006146EA /* LibLlama.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A907F322AC7134E006146EA /* LibLlama.swift */; };
|
||||||
8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */; };
|
8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */; };
|
||||||
|
DF810E132B4A5BA200301144 /* llama in Frameworks */ = {isa = PBXBuildFile; productRef = DF810E122B4A5BA200301144 /* llama */; };
|
||||||
|
F1FE20E22B465ECA00B45541 /* LoadCustomButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = F1FE20E12B465EC900B45541 /* LoadCustomButton.swift */; };
|
||||||
/* End PBXBuildFile section */
|
/* End PBXBuildFile section */
|
||||||
|
|
||||||
/* Begin PBXFileReference section */
|
/* Begin PBXFileReference section */
|
||||||
542376062B0D9BEA008E6A1C /* ggml-quants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-quants.h"; path = "../../ggml-quants.h"; sourceTree = "<group>"; };
|
|
||||||
542376072B0D9BFB008E6A1C /* ggml-quants.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-quants.c"; path = "../../ggml-quants.c"; sourceTree = "<group>"; };
|
|
||||||
542376092B0D9C40008E6A1C /* ggml-backend.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "ggml-backend.h"; path = "../../ggml-backend.h"; sourceTree = "<group>"; };
|
|
||||||
5423760A2B0D9C4B008E6A1C /* ggml-backend.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-backend.c"; path = "../../ggml-backend.c"; sourceTree = "<group>"; };
|
|
||||||
542EA09B2AC8723900A8AEE9 /* ggml.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = ggml.c; path = ../../ggml.c; sourceTree = "<group>"; };
|
|
||||||
542EA09C2AC8723900A8AEE9 /* ggml.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ggml.h; path = ../../ggml.h; sourceTree = "<group>"; };
|
|
||||||
542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-alloc.h"; path = "../../ggml-alloc.h"; sourceTree = "<group>"; };
|
|
||||||
542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-alloc.c"; path = "../../ggml-alloc.c"; sourceTree = "<group>"; };
|
|
||||||
542EA0A12AC8729100A8AEE9 /* llama.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = llama.cpp; path = ../../llama.cpp; sourceTree = "<group>"; };
|
|
||||||
542EA0A22AC8729100A8AEE9 /* llama.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = llama.h; path = ../../llama.h; sourceTree = "<group>"; };
|
|
||||||
549479C52AC9E0F200E0F78B /* ggml-metal.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "ggml-metal.m"; path = "../../ggml-metal.m"; sourceTree = "<group>"; };
|
|
||||||
549479C62AC9E0F200E0F78B /* ggml-metal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-metal.h"; path = "../../ggml-metal.h"; sourceTree = "<group>"; };
|
|
||||||
549479C82AC9E10B00E0F78B /* ggml-metal.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; name = "ggml-metal.metal"; path = "../../ggml-metal.metal"; sourceTree = "<group>"; };
|
|
||||||
549479CA2AC9E16000E0F78B /* Metal.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Metal.framework; path = System/Library/Frameworks/Metal.framework; sourceTree = SDKROOT; };
|
549479CA2AC9E16000E0F78B /* Metal.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Metal.framework; path = System/Library/Frameworks/Metal.framework; sourceTree = SDKROOT; };
|
||||||
7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DownloadButton.swift; sourceTree = "<group>"; };
|
7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DownloadButton.swift; sourceTree = "<group>"; };
|
||||||
8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "bridging-header.h"; sourceTree = "<group>"; };
|
|
||||||
8A1C83732AC328BD0096AF73 /* llama.swiftui.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = llama.swiftui.app; sourceTree = BUILT_PRODUCTS_DIR; };
|
8A1C83732AC328BD0096AF73 /* llama.swiftui.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = llama.swiftui.app; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||||
8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = llama_swiftuiApp.swift; sourceTree = "<group>"; };
|
8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = llama_swiftuiApp.swift; sourceTree = "<group>"; };
|
||||||
8A1C83782AC328BD0096AF73 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = "<group>"; };
|
8A1C83782AC328BD0096AF73 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = "<group>"; };
|
||||||
8A1C837A2AC328BE0096AF73 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; };
|
8A1C837A2AC328BE0096AF73 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; };
|
||||||
8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = "<group>"; };
|
|
||||||
8A39BE092AC7601000BFEB40 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; };
|
8A39BE092AC7601000BFEB40 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; };
|
||||||
8A3F84232AC4C891005E2EE8 /* models */ = {isa = PBXFileReference; lastKnownFileType = folder; name = models; path = llama.swiftui/Resources/models; sourceTree = "<group>"; };
|
8A3F84232AC4C891005E2EE8 /* models */ = {isa = PBXFileReference; lastKnownFileType = folder; name = models; path = llama.swiftui/Resources/models; sourceTree = "<group>"; };
|
||||||
8A907F322AC7134E006146EA /* LibLlama.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibLlama.swift; sourceTree = "<group>"; };
|
8A907F322AC7134E006146EA /* LibLlama.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibLlama.swift; sourceTree = "<group>"; };
|
||||||
8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LlamaState.swift; sourceTree = "<group>"; };
|
8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LlamaState.swift; sourceTree = "<group>"; };
|
||||||
|
DF2D2FE72B4A59BE00FCB72D /* llama.cpp */ = {isa = PBXFileReference; lastKnownFileType = wrapper; name = llama.cpp; path = ../..; sourceTree = "<group>"; };
|
||||||
|
F1FE20E12B465EC900B45541 /* LoadCustomButton.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LoadCustomButton.swift; sourceTree = "<group>"; };
|
||||||
/* End PBXFileReference section */
|
/* End PBXFileReference section */
|
||||||
|
|
||||||
/* Begin PBXFrameworksBuildPhase section */
|
/* Begin PBXFrameworksBuildPhase section */
|
||||||
@ -59,6 +40,7 @@
|
|||||||
isa = PBXFrameworksBuildPhase;
|
isa = PBXFrameworksBuildPhase;
|
||||||
buildActionMask = 2147483647;
|
buildActionMask = 2147483647;
|
||||||
files = (
|
files = (
|
||||||
|
DF810E132B4A5BA200301144 /* llama in Frameworks */,
|
||||||
549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */,
|
549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */,
|
||||||
8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */,
|
8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */,
|
||||||
);
|
);
|
||||||
@ -67,30 +49,10 @@
|
|||||||
/* End PBXFrameworksBuildPhase section */
|
/* End PBXFrameworksBuildPhase section */
|
||||||
|
|
||||||
/* Begin PBXGroup section */
|
/* Begin PBXGroup section */
|
||||||
8A08D1F62AC7383900FE6CD4 /* llama.cpp */ = {
|
|
||||||
isa = PBXGroup;
|
|
||||||
children = (
|
|
||||||
5423760A2B0D9C4B008E6A1C /* ggml-backend.c */,
|
|
||||||
542376092B0D9C40008E6A1C /* ggml-backend.h */,
|
|
||||||
542376062B0D9BEA008E6A1C /* ggml-quants.h */,
|
|
||||||
542376072B0D9BFB008E6A1C /* ggml-quants.c */,
|
|
||||||
549479C82AC9E10B00E0F78B /* ggml-metal.metal */,
|
|
||||||
549479C62AC9E0F200E0F78B /* ggml-metal.h */,
|
|
||||||
549479C52AC9E0F200E0F78B /* ggml-metal.m */,
|
|
||||||
542EA09B2AC8723900A8AEE9 /* ggml.c */,
|
|
||||||
542EA09C2AC8723900A8AEE9 /* ggml.h */,
|
|
||||||
542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */,
|
|
||||||
542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */,
|
|
||||||
542EA0A12AC8729100A8AEE9 /* llama.cpp */,
|
|
||||||
542EA0A22AC8729100A8AEE9 /* llama.h */,
|
|
||||||
);
|
|
||||||
name = llama.cpp;
|
|
||||||
sourceTree = "<group>";
|
|
||||||
};
|
|
||||||
8A1C836A2AC328BD0096AF73 = {
|
8A1C836A2AC328BD0096AF73 = {
|
||||||
isa = PBXGroup;
|
isa = PBXGroup;
|
||||||
children = (
|
children = (
|
||||||
8A08D1F62AC7383900FE6CD4 /* llama.cpp */,
|
DF2D2FE72B4A59BE00FCB72D /* llama.cpp */,
|
||||||
8A907F312AC7134E006146EA /* llama.cpp.swift */,
|
8A907F312AC7134E006146EA /* llama.cpp.swift */,
|
||||||
8A3F84232AC4C891005E2EE8 /* models */,
|
8A3F84232AC4C891005E2EE8 /* models */,
|
||||||
8A1C83752AC328BD0096AF73 /* llama.swiftui */,
|
8A1C83752AC328BD0096AF73 /* llama.swiftui */,
|
||||||
@ -115,19 +77,10 @@
|
|||||||
8A9F7C4A2AC332BF008AE1EA /* UI */,
|
8A9F7C4A2AC332BF008AE1EA /* UI */,
|
||||||
8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */,
|
8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */,
|
||||||
8A1C837A2AC328BE0096AF73 /* Assets.xcassets */,
|
8A1C837A2AC328BE0096AF73 /* Assets.xcassets */,
|
||||||
8A1C837C2AC328BE0096AF73 /* Preview Content */,
|
|
||||||
);
|
);
|
||||||
path = llama.swiftui;
|
path = llama.swiftui;
|
||||||
sourceTree = "<group>";
|
sourceTree = "<group>";
|
||||||
};
|
};
|
||||||
8A1C837C2AC328BE0096AF73 /* Preview Content */ = {
|
|
||||||
isa = PBXGroup;
|
|
||||||
children = (
|
|
||||||
8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */,
|
|
||||||
);
|
|
||||||
path = "Preview Content";
|
|
||||||
sourceTree = "<group>";
|
|
||||||
};
|
|
||||||
8A39BE082AC7601000BFEB40 /* Frameworks */ = {
|
8A39BE082AC7601000BFEB40 /* Frameworks */ = {
|
||||||
isa = PBXGroup;
|
isa = PBXGroup;
|
||||||
children = (
|
children = (
|
||||||
@ -155,7 +108,6 @@
|
|||||||
8A907F312AC7134E006146EA /* llama.cpp.swift */ = {
|
8A907F312AC7134E006146EA /* llama.cpp.swift */ = {
|
||||||
isa = PBXGroup;
|
isa = PBXGroup;
|
||||||
children = (
|
children = (
|
||||||
8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */,
|
|
||||||
8A907F322AC7134E006146EA /* LibLlama.swift */,
|
8A907F322AC7134E006146EA /* LibLlama.swift */,
|
||||||
);
|
);
|
||||||
path = llama.cpp.swift;
|
path = llama.cpp.swift;
|
||||||
@ -166,6 +118,7 @@
|
|||||||
children = (
|
children = (
|
||||||
7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */,
|
7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */,
|
||||||
8A1C83782AC328BD0096AF73 /* ContentView.swift */,
|
8A1C83782AC328BD0096AF73 /* ContentView.swift */,
|
||||||
|
F1FE20E12B465EC900B45541 /* LoadCustomButton.swift */,
|
||||||
);
|
);
|
||||||
path = UI;
|
path = UI;
|
||||||
sourceTree = "<group>";
|
sourceTree = "<group>";
|
||||||
@ -195,6 +148,7 @@
|
|||||||
);
|
);
|
||||||
name = llama.swiftui;
|
name = llama.swiftui;
|
||||||
packageProductDependencies = (
|
packageProductDependencies = (
|
||||||
|
DF810E122B4A5BA200301144 /* llama */,
|
||||||
);
|
);
|
||||||
productName = llama.swiftui;
|
productName = llama.swiftui;
|
||||||
productReference = 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */;
|
productReference = 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */;
|
||||||
@ -241,9 +195,7 @@
|
|||||||
isa = PBXResourcesBuildPhase;
|
isa = PBXResourcesBuildPhase;
|
||||||
buildActionMask = 2147483647;
|
buildActionMask = 2147483647;
|
||||||
files = (
|
files = (
|
||||||
542378792ACE3F3500834A7B /* ggml-metal.metal in Resources */,
|
|
||||||
8A3F84242AC4C891005E2EE8 /* models in Resources */,
|
8A3F84242AC4C891005E2EE8 /* models in Resources */,
|
||||||
8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */,
|
|
||||||
8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */,
|
8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */,
|
||||||
);
|
);
|
||||||
runOnlyForDeploymentPostprocessing = 0;
|
runOnlyForDeploymentPostprocessing = 0;
|
||||||
@ -255,17 +207,12 @@
|
|||||||
isa = PBXSourcesBuildPhase;
|
isa = PBXSourcesBuildPhase;
|
||||||
buildActionMask = 2147483647;
|
buildActionMask = 2147483647;
|
||||||
files = (
|
files = (
|
||||||
542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */,
|
F1FE20E22B465ECA00B45541 /* LoadCustomButton.swift in Sources */,
|
||||||
549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */,
|
|
||||||
542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */,
|
|
||||||
8A907F332AC7138A006146EA /* LibLlama.swift in Sources */,
|
8A907F332AC7138A006146EA /* LibLlama.swift in Sources */,
|
||||||
542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */,
|
|
||||||
8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */,
|
8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */,
|
||||||
8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */,
|
8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */,
|
||||||
8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */,
|
8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */,
|
||||||
7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */,
|
7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */,
|
||||||
542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */,
|
|
||||||
5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */,
|
|
||||||
);
|
);
|
||||||
runOnlyForDeploymentPostprocessing = 0;
|
runOnlyForDeploymentPostprocessing = 0;
|
||||||
};
|
};
|
||||||
@ -395,11 +342,9 @@
|
|||||||
isa = XCBuildConfiguration;
|
isa = XCBuildConfiguration;
|
||||||
buildSettings = {
|
buildSettings = {
|
||||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||||
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
|
|
||||||
CLANG_ENABLE_MODULES = YES;
|
CLANG_ENABLE_MODULES = YES;
|
||||||
CODE_SIGN_STYLE = Automatic;
|
CODE_SIGN_STYLE = Automatic;
|
||||||
CURRENT_PROJECT_VERSION = 1;
|
CURRENT_PROJECT_VERSION = 1;
|
||||||
DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\"";
|
|
||||||
DEVELOPMENT_TEAM = STLSG3FG8Q;
|
DEVELOPMENT_TEAM = STLSG3FG8Q;
|
||||||
ENABLE_PREVIEWS = YES;
|
ENABLE_PREVIEWS = YES;
|
||||||
GENERATE_INFOPLIST_FILE = YES;
|
GENERATE_INFOPLIST_FILE = YES;
|
||||||
@ -416,11 +361,12 @@
|
|||||||
MARKETING_VERSION = 1.0;
|
MARKETING_VERSION = 1.0;
|
||||||
PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift";
|
PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift";
|
||||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||||
|
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator xros xrsimulator";
|
||||||
|
SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = NO;
|
||||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||||
SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h";
|
|
||||||
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
|
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
|
||||||
SWIFT_VERSION = 5.0;
|
SWIFT_VERSION = 5.0;
|
||||||
TARGETED_DEVICE_FAMILY = "1,2";
|
TARGETED_DEVICE_FAMILY = "1,2,7";
|
||||||
};
|
};
|
||||||
name = Debug;
|
name = Debug;
|
||||||
};
|
};
|
||||||
@ -428,11 +374,9 @@
|
|||||||
isa = XCBuildConfiguration;
|
isa = XCBuildConfiguration;
|
||||||
buildSettings = {
|
buildSettings = {
|
||||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||||
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
|
|
||||||
CLANG_ENABLE_MODULES = YES;
|
CLANG_ENABLE_MODULES = YES;
|
||||||
CODE_SIGN_STYLE = Automatic;
|
CODE_SIGN_STYLE = Automatic;
|
||||||
CURRENT_PROJECT_VERSION = 1;
|
CURRENT_PROJECT_VERSION = 1;
|
||||||
DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\"";
|
|
||||||
DEVELOPMENT_TEAM = STLSG3FG8Q;
|
DEVELOPMENT_TEAM = STLSG3FG8Q;
|
||||||
ENABLE_PREVIEWS = YES;
|
ENABLE_PREVIEWS = YES;
|
||||||
GENERATE_INFOPLIST_FILE = YES;
|
GENERATE_INFOPLIST_FILE = YES;
|
||||||
@ -449,10 +393,11 @@
|
|||||||
MARKETING_VERSION = 1.0;
|
MARKETING_VERSION = 1.0;
|
||||||
PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift";
|
PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift";
|
||||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||||
|
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator xros xrsimulator";
|
||||||
|
SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = NO;
|
||||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||||
SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h";
|
|
||||||
SWIFT_VERSION = 5.0;
|
SWIFT_VERSION = 5.0;
|
||||||
TARGETED_DEVICE_FAMILY = "1,2";
|
TARGETED_DEVICE_FAMILY = "1,2,7";
|
||||||
};
|
};
|
||||||
name = Release;
|
name = Release;
|
||||||
};
|
};
|
||||||
@ -478,6 +423,13 @@
|
|||||||
defaultConfigurationName = Release;
|
defaultConfigurationName = Release;
|
||||||
};
|
};
|
||||||
/* End XCConfigurationList section */
|
/* End XCConfigurationList section */
|
||||||
|
|
||||||
|
/* Begin XCSwiftPackageProductDependency section */
|
||||||
|
DF810E122B4A5BA200301144 /* llama */ = {
|
||||||
|
isa = XCSwiftPackageProductDependency;
|
||||||
|
productName = llama;
|
||||||
|
};
|
||||||
|
/* End XCSwiftPackageProductDependency section */
|
||||||
};
|
};
|
||||||
rootObject = 8A1C836B2AC328BD0096AF73 /* Project object */;
|
rootObject = 8A1C836B2AC328BD0096AF73 /* Project object */;
|
||||||
}
|
}
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"colors" : [
|
|
||||||
{
|
|
||||||
"idiom" : "universal"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"info" : {
|
|
||||||
"author" : "xcode",
|
|
||||||
"version" : 1
|
|
||||||
}
|
|
||||||
}
|
|
@ -4,6 +4,7 @@ import Foundation
|
|||||||
class LlamaState: ObservableObject {
|
class LlamaState: ObservableObject {
|
||||||
@Published var messageLog = ""
|
@Published var messageLog = ""
|
||||||
@Published var cacheCleared = false
|
@Published var cacheCleared = false
|
||||||
|
let NS_PER_S = 1_000_000_000.0
|
||||||
|
|
||||||
private var llamaContext: LlamaContext?
|
private var llamaContext: LlamaContext?
|
||||||
private var defaultModelUrl: URL? {
|
private var defaultModelUrl: URL? {
|
||||||
@ -20,12 +21,12 @@ class LlamaState: ObservableObject {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func loadModel(modelUrl: URL?) throws {
|
func loadModel(modelUrl: URL?) throws {
|
||||||
messageLog += "Loading model...\n"
|
|
||||||
if let modelUrl {
|
if let modelUrl {
|
||||||
|
messageLog += "Loading model...\n"
|
||||||
llamaContext = try LlamaContext.create_context(path: modelUrl.path())
|
llamaContext = try LlamaContext.create_context(path: modelUrl.path())
|
||||||
messageLog += "Loaded model \(modelUrl.lastPathComponent)\n"
|
messageLog += "Loaded model \(modelUrl.lastPathComponent)\n"
|
||||||
} else {
|
} else {
|
||||||
messageLog += "Could not locate model\n"
|
messageLog += "Load a model from the list below\n"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,15 +35,29 @@ class LlamaState: ObservableObject {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let t_start = DispatchTime.now().uptimeNanoseconds
|
||||||
await llamaContext.completion_init(text: text)
|
await llamaContext.completion_init(text: text)
|
||||||
|
let t_heat_end = DispatchTime.now().uptimeNanoseconds
|
||||||
|
let t_heat = Double(t_heat_end - t_start) / NS_PER_S
|
||||||
|
|
||||||
messageLog += "\(text)"
|
messageLog += "\(text)"
|
||||||
|
|
||||||
while await llamaContext.n_cur <= llamaContext.n_len {
|
while await llamaContext.n_cur < llamaContext.n_len {
|
||||||
let result = await llamaContext.completion_loop()
|
let result = await llamaContext.completion_loop()
|
||||||
messageLog += "\(result)"
|
messageLog += "\(result)"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let t_end = DispatchTime.now().uptimeNanoseconds
|
||||||
|
let t_generation = Double(t_end - t_heat_end) / NS_PER_S
|
||||||
|
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
|
||||||
|
|
||||||
await llamaContext.clear()
|
await llamaContext.clear()
|
||||||
messageLog += "\n\ndone\n"
|
messageLog += """
|
||||||
|
\n
|
||||||
|
Done
|
||||||
|
Heat up took \(t_heat)s
|
||||||
|
Generated \(tokens_per_second) t/s\n
|
||||||
|
"""
|
||||||
}
|
}
|
||||||
|
|
||||||
func bench() async {
|
func bench() async {
|
||||||
@ -56,10 +71,10 @@ class LlamaState: ObservableObject {
|
|||||||
messageLog += await llamaContext.model_info() + "\n"
|
messageLog += await llamaContext.model_info() + "\n"
|
||||||
|
|
||||||
let t_start = DispatchTime.now().uptimeNanoseconds
|
let t_start = DispatchTime.now().uptimeNanoseconds
|
||||||
await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up
|
let _ = await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up
|
||||||
let t_end = DispatchTime.now().uptimeNanoseconds
|
let t_end = DispatchTime.now().uptimeNanoseconds
|
||||||
|
|
||||||
let t_heat = Double(t_end - t_start) / 1_000_000_000.0
|
let t_heat = Double(t_end - t_start) / NS_PER_S
|
||||||
messageLog += "Heat up time: \(t_heat) seconds, please wait...\n"
|
messageLog += "Heat up time: \(t_heat) seconds, please wait...\n"
|
||||||
|
|
||||||
// if more than 5 seconds, then we're probably running on a slow device
|
// if more than 5 seconds, then we're probably running on a slow device
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
{
|
|
||||||
"info" : {
|
|
||||||
"author" : "xcode",
|
|
||||||
"version" : 1
|
|
||||||
}
|
|
||||||
}
|
|
@ -42,46 +42,27 @@ struct ContentView: View {
|
|||||||
Button("Send") {
|
Button("Send") {
|
||||||
sendText()
|
sendText()
|
||||||
}
|
}
|
||||||
.padding(8)
|
|
||||||
.background(Color.blue)
|
|
||||||
.foregroundColor(.white)
|
|
||||||
.cornerRadius(8)
|
|
||||||
|
|
||||||
Button("Bench") {
|
Button("Bench") {
|
||||||
bench()
|
bench()
|
||||||
}
|
}
|
||||||
.padding(8)
|
|
||||||
.background(Color.blue)
|
|
||||||
.foregroundColor(.white)
|
|
||||||
.cornerRadius(8)
|
|
||||||
|
|
||||||
Button("Clear") {
|
Button("Clear") {
|
||||||
clear()
|
clear()
|
||||||
}
|
}
|
||||||
.padding(8)
|
|
||||||
.background(Color.blue)
|
|
||||||
.foregroundColor(.white)
|
|
||||||
.cornerRadius(8)
|
|
||||||
|
|
||||||
Button("Copy") {
|
Button("Copy") {
|
||||||
UIPasteboard.general.string = llamaState.messageLog
|
UIPasteboard.general.string = llamaState.messageLog
|
||||||
}
|
}
|
||||||
.padding(8)
|
}.buttonStyle(.bordered)
|
||||||
.background(Color.blue)
|
|
||||||
.foregroundColor(.white)
|
|
||||||
.cornerRadius(8)
|
|
||||||
}
|
|
||||||
|
|
||||||
VStack {
|
VStack(alignment: .leading) {
|
||||||
DownloadButton(
|
DownloadButton(
|
||||||
llamaState: llamaState,
|
llamaState: llamaState,
|
||||||
modelName: "TinyLlama-1.1B (Q4_0, 0.6 GiB)",
|
modelName: "TinyLlama-1.1B (Q4_0, 0.6 GiB)",
|
||||||
modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true",
|
modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true",
|
||||||
filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf"
|
filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf"
|
||||||
)
|
)
|
||||||
.font(.system(size: 12))
|
|
||||||
.padding(.top, 4)
|
|
||||||
.frame(maxWidth: .infinity, alignment: .leading)
|
|
||||||
|
|
||||||
DownloadButton(
|
DownloadButton(
|
||||||
llamaState: llamaState,
|
llamaState: llamaState,
|
||||||
@ -89,7 +70,6 @@ struct ContentView: View {
|
|||||||
modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q8_0.gguf?download=true",
|
modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q8_0.gguf?download=true",
|
||||||
filename: "tinyllama-1.1b-1t-openorca.Q8_0.gguf"
|
filename: "tinyllama-1.1b-1t-openorca.Q8_0.gguf"
|
||||||
)
|
)
|
||||||
.font(.system(size: 12))
|
|
||||||
|
|
||||||
DownloadButton(
|
DownloadButton(
|
||||||
llamaState: llamaState,
|
llamaState: llamaState,
|
||||||
@ -97,8 +77,6 @@ struct ContentView: View {
|
|||||||
modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf?download=true",
|
modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf?download=true",
|
||||||
filename: "tinyllama-1.1b-f16.gguf"
|
filename: "tinyllama-1.1b-f16.gguf"
|
||||||
)
|
)
|
||||||
.font(.system(size: 12))
|
|
||||||
.frame(maxWidth: .infinity, alignment: .leading)
|
|
||||||
|
|
||||||
DownloadButton(
|
DownloadButton(
|
||||||
llamaState: llamaState,
|
llamaState: llamaState,
|
||||||
@ -106,7 +84,6 @@ struct ContentView: View {
|
|||||||
modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf?download=true",
|
modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf?download=true",
|
||||||
filename: "phi-2-q4_0.gguf"
|
filename: "phi-2-q4_0.gguf"
|
||||||
)
|
)
|
||||||
.font(.system(size: 12))
|
|
||||||
|
|
||||||
DownloadButton(
|
DownloadButton(
|
||||||
llamaState: llamaState,
|
llamaState: llamaState,
|
||||||
@ -114,8 +91,6 @@ struct ContentView: View {
|
|||||||
modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q8_0.gguf?download=true",
|
modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q8_0.gguf?download=true",
|
||||||
filename: "phi-2-q8_0.gguf"
|
filename: "phi-2-q8_0.gguf"
|
||||||
)
|
)
|
||||||
.font(.system(size: 12))
|
|
||||||
.frame(maxWidth: .infinity, alignment: .leading)
|
|
||||||
|
|
||||||
DownloadButton(
|
DownloadButton(
|
||||||
llamaState: llamaState,
|
llamaState: llamaState,
|
||||||
@ -123,15 +98,17 @@ struct ContentView: View {
|
|||||||
modelUrl: "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_0.gguf?download=true",
|
modelUrl: "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_0.gguf?download=true",
|
||||||
filename: "mistral-7b-v0.1.Q4_0.gguf"
|
filename: "mistral-7b-v0.1.Q4_0.gguf"
|
||||||
)
|
)
|
||||||
.font(.system(size: 12))
|
|
||||||
|
|
||||||
Button("Clear downloaded models") {
|
Button("Clear downloaded models") {
|
||||||
ContentView.cleanupModelCaches()
|
ContentView.cleanupModelCaches()
|
||||||
llamaState.cacheCleared = true
|
llamaState.cacheCleared = true
|
||||||
}
|
}
|
||||||
.padding(8)
|
|
||||||
.font(.system(size: 12))
|
LoadCustomButton(llamaState: llamaState)
|
||||||
}
|
}
|
||||||
|
.padding(.top, 4)
|
||||||
|
.font(.system(size: 12))
|
||||||
|
.frame(maxWidth: .infinity, alignment: .leading)
|
||||||
}
|
}
|
||||||
.padding()
|
.padding()
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,7 @@ struct DownloadButton: View {
|
|||||||
print("Error: \(err.localizedDescription)")
|
print("Error: \(err.localizedDescription)")
|
||||||
}
|
}
|
||||||
}) {
|
}) {
|
||||||
Text("\(modelName) (Downloaded)")
|
Text("Load \(modelName)")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Text("Unknown status")
|
Text("Unknown status")
|
||||||
|
@ -0,0 +1,44 @@
|
|||||||
|
import SwiftUI
|
||||||
|
import UniformTypeIdentifiers
|
||||||
|
|
||||||
|
struct LoadCustomButton: View {
|
||||||
|
@ObservedObject private var llamaState: LlamaState
|
||||||
|
@State private var showFileImporter = false
|
||||||
|
|
||||||
|
init(llamaState: LlamaState) {
|
||||||
|
self.llamaState = llamaState
|
||||||
|
}
|
||||||
|
|
||||||
|
var body: some View {
|
||||||
|
VStack {
|
||||||
|
Button(action: {
|
||||||
|
showFileImporter = true
|
||||||
|
}) {
|
||||||
|
Text("Load Custom Model")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.fileImporter(
|
||||||
|
isPresented: $showFileImporter,
|
||||||
|
allowedContentTypes: [UTType(filenameExtension: "gguf", conformingTo: .data)!],
|
||||||
|
allowsMultipleSelection: false
|
||||||
|
) { result in
|
||||||
|
switch result {
|
||||||
|
case .success(let files):
|
||||||
|
files.forEach { file in
|
||||||
|
let gotAccess = file.startAccessingSecurityScopedResource()
|
||||||
|
if !gotAccess { return }
|
||||||
|
|
||||||
|
do {
|
||||||
|
try llamaState.loadModel(modelUrl: file.absoluteURL)
|
||||||
|
} catch let err {
|
||||||
|
print("Error: \(err.localizedDescription)")
|
||||||
|
}
|
||||||
|
|
||||||
|
file.stopAccessingSecurityScopedResource()
|
||||||
|
}
|
||||||
|
case .failure(let error):
|
||||||
|
print(error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -25,6 +25,7 @@ endif()
|
|||||||
if (NOT MSVC)
|
if (NOT MSVC)
|
||||||
target_compile_options(llava PRIVATE -Wno-cast-qual) # stb_image.h
|
target_compile_options(llava PRIVATE -Wno-cast-qual) # stb_image.h
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(TARGET BUILD_INFO)
|
if(TARGET BUILD_INFO)
|
||||||
add_dependencies(llava BUILD_INFO)
|
add_dependencies(llava BUILD_INFO)
|
||||||
endif()
|
endif()
|
||||||
@ -32,5 +33,5 @@ endif()
|
|||||||
set(TARGET llava-cli)
|
set(TARGET llava-cli)
|
||||||
add_executable(llava-cli llava-cli.cpp)
|
add_executable(llava-cli llava-cli.cpp)
|
||||||
install(TARGETS llava-cli RUNTIME)
|
install(TARGETS llava-cli RUNTIME)
|
||||||
target_link_libraries(llava-cli PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(llava-cli PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_compile_features(llava PRIVATE cxx_std_11)
|
target_compile_features(llava PRIVATE cxx_std_11)
|
||||||
|
@ -16,12 +16,19 @@
|
|||||||
#include "clip.h"
|
#include "clip.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "ggml-alloc.h"
|
#include "ggml-alloc.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
#include "ggml-cuda.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
#include "ggml-metal.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
#define STB_IMAGE_IMPLEMENTATION
|
#define STB_IMAGE_IMPLEMENTATION
|
||||||
#include "stb_image.h"
|
#include "stb_image.h"
|
||||||
|
|
||||||
#define CLIP_DEBUG
|
|
||||||
|
|
||||||
static std::string format(const char * fmt, ...) {
|
static std::string format(const char * fmt, ...) {
|
||||||
va_list ap;
|
va_list ap;
|
||||||
va_list ap2;
|
va_list ap2;
|
||||||
@ -119,26 +126,30 @@ static struct ggml_tensor * get_tensor(struct ggml_context * ctx, const std::str
|
|||||||
}
|
}
|
||||||
|
|
||||||
static std::string get_ftype(int ftype) {
|
static std::string get_ftype(int ftype) {
|
||||||
switch (ftype) {
|
return ggml_type_name(static_cast<ggml_type>(ftype));
|
||||||
case 0:
|
|
||||||
return "f32";
|
|
||||||
case 1:
|
|
||||||
return "f16";
|
|
||||||
case 2:
|
|
||||||
return "q4_0";
|
|
||||||
case 3:
|
|
||||||
return "q4_1";
|
|
||||||
case 6:
|
|
||||||
return "q5_0";
|
|
||||||
case 7:
|
|
||||||
return "q5_1";
|
|
||||||
case 8:
|
|
||||||
return "q8_0";
|
|
||||||
default:
|
|
||||||
throw std::runtime_error(format("%s: Unrecognized file type: %d\n", __func__, ftype));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// image data
|
||||||
|
//
|
||||||
|
|
||||||
|
// RGB uint8 image
|
||||||
|
struct clip_image_u8 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<uint8_t> buf;
|
||||||
|
};
|
||||||
|
|
||||||
|
// RGB float32 image (NHWC)
|
||||||
|
// Memory layout: RGBRGBRGB...
|
||||||
|
struct clip_image_f32 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<float> buf;
|
||||||
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
// clip layers
|
// clip layers
|
||||||
//
|
//
|
||||||
@ -196,39 +207,31 @@ struct clip_vision_model {
|
|||||||
struct ggml_tensor * mm_2_b;
|
struct ggml_tensor * mm_2_b;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
|
||||||
struct clip_buffer {
|
|
||||||
uint8_t * data = NULL;
|
|
||||||
size_t size = 0;
|
|
||||||
|
|
||||||
void resize(size_t size) {
|
|
||||||
delete[] data;
|
|
||||||
data = new uint8_t[size];
|
|
||||||
this->size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
~clip_buffer() { delete[] data; }
|
|
||||||
};
|
|
||||||
|
|
||||||
struct clip_ctx {
|
struct clip_ctx {
|
||||||
bool has_text_encoder = false;
|
bool has_text_encoder = false;
|
||||||
bool has_vision_encoder = false;
|
bool has_vision_encoder = false;
|
||||||
bool has_llava_projector = false;
|
bool has_llava_projector = false;
|
||||||
|
|
||||||
struct clip_vision_model vision_model;
|
struct clip_vision_model vision_model;
|
||||||
|
|
||||||
float image_mean[3];
|
float image_mean[3];
|
||||||
float image_std[3];
|
float image_std[3];
|
||||||
bool use_gelu = false;
|
bool use_gelu = false;
|
||||||
int32_t ftype = 1;
|
int32_t ftype = 1;
|
||||||
struct ggml_context * ctx;
|
|
||||||
struct gguf_context * ctx_gguf;
|
struct gguf_context * ctx_gguf;
|
||||||
|
struct ggml_context * ctx_data;
|
||||||
|
|
||||||
|
std::vector<uint8_t> buf_compute_meta;
|
||||||
|
|
||||||
// memory buffers to evaluate the model
|
// memory buffers to evaluate the model
|
||||||
clip_buffer buf_compute;
|
ggml_backend_buffer_t params_buffer = NULL;
|
||||||
clip_buffer buf_alloc;
|
ggml_backend_buffer_t compute_buffer = NULL;
|
||||||
ggml_allocr * alloc = NULL;
|
ggml_backend_t backend = NULL;
|
||||||
|
ggml_allocr * compute_alloc = NULL;
|
||||||
};
|
};
|
||||||
|
|
||||||
static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_image_f32_batch * imgs) {
|
static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs) {
|
||||||
if (!ctx->has_vision_encoder) {
|
if (!ctx->has_vision_encoder) {
|
||||||
printf("This gguf file seems to have no vision encoder\n");
|
printf("This gguf file seems to have no vision encoder\n");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -253,24 +256,20 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
GGML_ASSERT(batch_size == 1);
|
GGML_ASSERT(batch_size == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto & buf_compute = ctx->buf_compute;
|
|
||||||
|
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ buf_compute.size,
|
/*.mem_size =*/ ctx->buf_compute_meta.size(),
|
||||||
/*.mem_buffer =*/ buf_compute.data,
|
/*.mem_buffer =*/ ctx->buf_compute_meta.data(),
|
||||||
/*.no_alloc =*/ false,
|
/*.no_alloc =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
params.no_alloc = true;
|
|
||||||
|
|
||||||
struct ggml_context * ctx0 = ggml_init(params);
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||||
|
|
||||||
struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size, image_size, 3, batch_size);
|
struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size, image_size, 3, batch_size);
|
||||||
ggml_allocr_alloc(ctx->alloc, inp_raw);
|
ggml_allocr_alloc(ctx->compute_alloc, inp_raw);
|
||||||
|
|
||||||
if (!ggml_allocr_is_measure(ctx->alloc)) {
|
if (!ggml_allocr_is_measure(ctx->compute_alloc)) {
|
||||||
float * data = (float *)ggml_get_data(inp_raw);
|
float * data = (float *)malloc(ggml_nbytes(inp_raw));
|
||||||
|
|
||||||
for (size_t i = 0; i < imgs->size; i++) {
|
for (size_t i = 0; i < imgs->size; i++) {
|
||||||
const int nx = imgs->data[i].nx;
|
const int nx = imgs->data[i].nx;
|
||||||
@ -283,12 +282,14 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
for (int k = 0; k < 3; k++) {
|
for (int k = 0; k < 3; k++) {
|
||||||
for (int y = 0; y < ny; y++) {
|
for (int y = 0; y < ny; y++) {
|
||||||
for (int x = 0; x < nx; x++) {
|
for (int x = 0; x < nx; x++) {
|
||||||
data[(b * 3 * n) + k * n + y * nx + x] = imgs->data[b].data[3 * (y * nx + x) + k];
|
data[(b * 3 * n) + k * n + y * nx + x] = imgs->data[b].buf[3 * (y * nx + x) + k];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ggml_backend_tensor_set(inp_raw, data, 0, ggml_nbytes(inp_raw));
|
||||||
|
free(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
|
struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
|
||||||
@ -298,42 +299,39 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
|
|
||||||
// concat class_embeddings and patch_embeddings
|
// concat class_embeddings and patch_embeddings
|
||||||
struct ggml_tensor * embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
struct ggml_tensor * embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
||||||
ggml_allocr_alloc(ctx->alloc, embeddings);
|
ggml_allocr_alloc(ctx->compute_alloc, embeddings);
|
||||||
if (!ggml_allocr_is_measure(ctx->alloc)) {
|
if (!ggml_allocr_is_measure(ctx->compute_alloc)) {
|
||||||
ggml_set_zero(embeddings);
|
void* zero_mem = malloc(ggml_nbytes(embeddings));
|
||||||
|
memset(zero_mem, 0, ggml_nbytes(embeddings));
|
||||||
|
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
|
||||||
|
free(zero_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * temp = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, 1, batch_size);
|
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
||||||
ggml_allocr_alloc(ctx->alloc, temp);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
||||||
|
|
||||||
embeddings = ggml_acc(ctx0, embeddings, ggml_repeat(ctx0, model.class_embedding, temp), embeddings->nb[1],
|
embeddings = ggml_acc(ctx0, embeddings, inp,
|
||||||
embeddings->nb[2], embeddings->nb[3], 0);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
||||||
embeddings =
|
|
||||||
ggml_acc(ctx0, embeddings, inp, embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
|
||||||
|
|
||||||
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
||||||
ggml_allocr_alloc(ctx->alloc, positions);
|
ggml_allocr_alloc(ctx->compute_alloc, positions);
|
||||||
if (!ggml_allocr_is_measure(ctx->alloc)) {
|
if (!ggml_allocr_is_measure(ctx->compute_alloc)) {
|
||||||
|
int* positions_data = (int*)malloc(ggml_nbytes(positions));
|
||||||
for (int i = 0; i < num_positions; i++) {
|
for (int i = 0; i < num_positions; i++) {
|
||||||
ggml_set_i32_1d(positions, i, i);
|
positions_data[i] = i;
|
||||||
}
|
}
|
||||||
|
ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
|
||||||
|
free(positions_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
embeddings =
|
embeddings =
|
||||||
ggml_add(ctx0, embeddings, ggml_repeat(ctx0, ggml_get_rows(ctx0, model.position_embeddings, positions), embeddings));
|
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
|
||||||
|
|
||||||
// pre-layernorm
|
// pre-layernorm
|
||||||
{
|
{
|
||||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||||
|
|
||||||
embeddings = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.pre_ln_w, embeddings), embeddings),
|
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
|
||||||
ggml_repeat(ctx0, model.pre_ln_b, embeddings));
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
|
|
||||||
ggml_allocr_alloc(ctx->alloc, KQ_scale);
|
|
||||||
if (!ggml_allocr_is_measure(ctx->alloc)) {
|
|
||||||
ggml_set_f32(KQ_scale, 1.0f / sqrt((float)d_head));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// loop over layers
|
// loop over layers
|
||||||
@ -346,30 +344,30 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
{
|
{
|
||||||
cur = ggml_norm(ctx0, cur, eps);
|
cur = ggml_norm(ctx0, cur, eps);
|
||||||
|
|
||||||
cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_1_w, cur), cur),
|
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w),
|
||||||
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
model.layers[il].ln_1_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
// self-attention
|
// self-attention
|
||||||
{
|
{
|
||||||
|
|
||||||
struct ggml_tensor * Q =
|
struct ggml_tensor * Q =
|
||||||
ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].q_b, cur), ggml_mul_mat(ctx0, model.layers[il].q_w, cur));
|
ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
|
||||||
|
|
||||||
Q = ggml_scale_inplace(ctx0, Q, KQ_scale);
|
Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head));
|
||||||
Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size);
|
Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size);
|
||||||
Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
|
Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
|
||||||
Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size);
|
Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size);
|
||||||
|
|
||||||
struct ggml_tensor * K =
|
struct ggml_tensor * K =
|
||||||
ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].k_b, cur), ggml_mul_mat(ctx0, model.layers[il].k_w, cur));
|
ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
|
||||||
|
|
||||||
K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
|
K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
|
||||||
K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
|
K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
|
||||||
K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
|
K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
|
||||||
|
|
||||||
struct ggml_tensor * V =
|
struct ggml_tensor * V =
|
||||||
ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].v_b, cur), ggml_mul_mat(ctx0, model.layers[il].v_w, cur));
|
ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
|
||||||
|
|
||||||
V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
|
V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
|
||||||
V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
|
V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
|
||||||
@ -385,7 +383,7 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
}
|
}
|
||||||
|
|
||||||
// attention output
|
// attention output
|
||||||
cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].o_b, cur), ggml_mul_mat(ctx0, model.layers[il].o_w, cur));
|
cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
|
||||||
|
|
||||||
// re-add the layer input, e.g., residual
|
// re-add the layer input, e.g., residual
|
||||||
cur = ggml_add(ctx0, cur, embeddings);
|
cur = ggml_add(ctx0, cur, embeddings);
|
||||||
@ -396,12 +394,11 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
{
|
{
|
||||||
cur = ggml_norm(ctx0, cur, eps);
|
cur = ggml_norm(ctx0, cur, eps);
|
||||||
|
|
||||||
cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_2_w, cur), cur),
|
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
|
||||||
ggml_repeat(ctx0, model.layers[il].ln_2_b, cur));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
|
cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
|
||||||
cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].ff_i_b, cur), cur);
|
cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
|
||||||
|
|
||||||
if (ctx->use_gelu) {
|
if (ctx->use_gelu) {
|
||||||
cur = ggml_gelu_inplace(ctx0, cur);
|
cur = ggml_gelu_inplace(ctx0, cur);
|
||||||
@ -410,7 +407,7 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
}
|
}
|
||||||
|
|
||||||
cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
|
cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
|
||||||
cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].ff_o_b, cur), cur);
|
cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
|
||||||
|
|
||||||
// residual 2
|
// residual 2
|
||||||
cur = ggml_add(ctx0, embeddings, cur);
|
cur = ggml_add(ctx0, embeddings, cur);
|
||||||
@ -423,23 +420,26 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
|
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
|
||||||
|
|
||||||
struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
|
struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
|
||||||
ggml_allocr_alloc(ctx->alloc, patches);
|
ggml_allocr_alloc(ctx->compute_alloc, patches);
|
||||||
if (!ggml_allocr_is_measure(ctx->alloc)) {
|
if (!ggml_allocr_is_measure(ctx->compute_alloc)) {
|
||||||
for (int i = 0; i < num_patches; ++i) {
|
int* patches_data = (int*)malloc(ggml_nbytes(patches));
|
||||||
ggml_set_i32_1d(patches, i, i+1);
|
for (int i = 0; i < num_patches; i++) {
|
||||||
|
patches_data[i] = i + 1;
|
||||||
}
|
}
|
||||||
|
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
|
||||||
|
free(patches_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
embeddings = ggml_get_rows(ctx0, embeddings, patches);
|
embeddings = ggml_get_rows(ctx0, embeddings, patches);
|
||||||
|
|
||||||
// mm projection 0
|
// mm projection 0
|
||||||
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
||||||
embeddings = ggml_add(ctx0, ggml_repeat(ctx0, model.mm_0_b, embeddings), embeddings);
|
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
||||||
|
|
||||||
embeddings = ggml_gelu(ctx0, embeddings);
|
embeddings = ggml_gelu(ctx0, embeddings);
|
||||||
|
|
||||||
embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
||||||
embeddings = ggml_add(ctx0, ggml_repeat(ctx0, model.mm_2_b, embeddings), embeddings);
|
embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
// build the graph
|
// build the graph
|
||||||
@ -452,7 +452,6 @@ static ggml_cgraph * clip_image_build_graph(const clip_ctx * ctx, const clip_ima
|
|||||||
|
|
||||||
// read and create ggml_context containing the tensors and their data
|
// read and create ggml_context containing the tensors and their data
|
||||||
struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
|
|
||||||
struct ggml_context * meta = NULL;
|
struct ggml_context * meta = NULL;
|
||||||
|
|
||||||
struct gguf_init_params params = {
|
struct gguf_init_params params = {
|
||||||
@ -485,7 +484,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
printf("%s: ftype: %s\n", __func__, ftype_str.c_str());
|
printf("%s: ftype: %s\n", __func__, ftype_str.c_str());
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||||
// kv
|
// kv
|
||||||
if (verbosity >= 3) {
|
if (verbosity >= 3) {
|
||||||
const int n_kv = gguf_get_n_kv(ctx);
|
const int n_kv = gguf_get_n_kv(ctx);
|
||||||
@ -499,28 +498,41 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// data
|
// data
|
||||||
size_t ctx_size = 0;
|
size_t buffer_size = 0;
|
||||||
{
|
{
|
||||||
const int n_tensors = gguf_get_n_tensors(ctx);
|
|
||||||
|
|
||||||
for (int i = 0; i < n_tensors; ++i) {
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
const char * name = gguf_get_tensor_name(ctx, i);
|
const char * name = gguf_get_tensor_name(ctx, i);
|
||||||
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
||||||
|
|
||||||
struct ggml_tensor * cur = ggml_get_tensor(meta, name);
|
struct ggml_tensor * cur = ggml_get_tensor(meta, name);
|
||||||
ctx_size += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
|
|
||||||
size_t tensor_size = ggml_nbytes(cur);
|
size_t tensor_size = ggml_nbytes(cur);
|
||||||
size_t padded_size = ggml_nbytes_pad(cur);
|
buffer_size += tensor_size;
|
||||||
ctx_size += padded_size;
|
|
||||||
if (verbosity >= 3) {
|
if (verbosity >= 3) {
|
||||||
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, padded_size=%zu, offset=%zu\n", __func__, i,
|
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu\n", __func__, i,
|
||||||
ggml_n_dims(cur), cur->name, tensor_size, padded_size, offset);
|
ggml_n_dims(cur), cur->name, tensor_size, offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
buffer_size += n_tensors * 128 /* CLIP PADDING */;
|
||||||
|
|
||||||
clip_ctx * new_clip = new clip_ctx;
|
clip_ctx * new_clip = new clip_ctx;
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
new_clip->backend = ggml_backend_cuda_init(0);
|
||||||
|
printf("%s: CLIP using CUDA backend\n", __func__);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
new_clip->backend = ggml_backend_metal_init();
|
||||||
|
printf("%s: CLIP using Metal backend\n", __func__);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
if (!new_clip->backend) {
|
||||||
|
new_clip->backend = ggml_backend_cpu_init();
|
||||||
|
printf("%s: CLIP using CPU backend\n", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
// model size and capabilities
|
// model size and capabilities
|
||||||
{
|
{
|
||||||
int idx = get_key_idx(ctx, KEY_HAS_TEXT_ENC);
|
int idx = get_key_idx(ctx, KEY_HAS_TEXT_ENC);
|
||||||
@ -545,21 +557,24 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
printf("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder);
|
printf("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder);
|
||||||
printf("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder);
|
printf("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder);
|
||||||
printf("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector);
|
printf("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector);
|
||||||
printf("%s: model size: %.2f MB\n", __func__, (ctx_size / 1024.0 / 1024.0));
|
printf("%s: model size: %.2f MB\n", __func__, buffer_size / 1024.0 / 1024.0);
|
||||||
printf("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0);
|
printf("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
printf("%s: params backend buffer size = % 6.2f MB (%i tensors)\n", __func__, buffer_size / (1024.0 * 1024.0), n_tensors);
|
||||||
|
|
||||||
// load tensors
|
// load tensors
|
||||||
{
|
{
|
||||||
|
std::vector<uint8_t> read_buf;
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ ctx_size,
|
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
|
||||||
/*.mem_buffer =*/ NULL,
|
/*.mem_buffer =*/ NULL,
|
||||||
/*.no_alloc =*/ false,
|
/*.no_alloc =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
new_clip->ctx = ggml_init(params);
|
new_clip->ctx_data = ggml_init(params);
|
||||||
if (!new_clip->ctx) {
|
if (!new_clip->ctx_data) {
|
||||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||||
clip_free(new_clip);
|
clip_free(new_clip);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -572,13 +587,21 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int n_tensors = gguf_get_n_tensors(ctx);
|
// add tensors to context
|
||||||
for (int i = 0; i < n_tensors; ++i) {
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
const char * name = gguf_get_tensor_name(ctx, i);
|
const char * name = gguf_get_tensor_name(ctx, i);
|
||||||
struct ggml_tensor * t = ggml_get_tensor(meta, name);
|
struct ggml_tensor * t = ggml_get_tensor(meta, name);
|
||||||
struct ggml_tensor * cur = ggml_dup_tensor(new_clip->ctx, t);
|
struct ggml_tensor * cur = ggml_dup_tensor(new_clip->ctx_data, t);
|
||||||
ggml_set_name(cur, name);
|
ggml_set_name(cur, name);
|
||||||
|
}
|
||||||
|
|
||||||
|
// alloc memory and offload data
|
||||||
|
new_clip->params_buffer = ggml_backend_alloc_buffer(new_clip->backend, buffer_size);
|
||||||
|
ggml_allocr* alloc = ggml_allocr_new_from_buffer(new_clip->params_buffer);
|
||||||
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
|
const char * name = gguf_get_tensor_name(ctx, i);
|
||||||
|
struct ggml_tensor * cur = ggml_get_tensor(new_clip->ctx_data, name);
|
||||||
|
ggml_allocr_alloc(alloc, cur);
|
||||||
const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i);
|
const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i);
|
||||||
fin.seekg(offset, std::ios::beg);
|
fin.seekg(offset, std::ios::beg);
|
||||||
if (!fin) {
|
if (!fin) {
|
||||||
@ -586,10 +609,18 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
clip_free(new_clip);
|
clip_free(new_clip);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
int num_bytes = ggml_nbytes(cur);
|
||||||
fin.read(reinterpret_cast<char *>(cur->data), ggml_nbytes(t));
|
if (ggml_backend_buffer_is_host(new_clip->params_buffer)) {
|
||||||
|
// for the CPU and Metal backend, we can read directly into the tensor
|
||||||
|
fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
|
||||||
|
} else {
|
||||||
|
// read into a temporary buffer first, then copy to device memory
|
||||||
|
read_buf.resize(num_bytes);
|
||||||
|
fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
|
||||||
|
ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
ggml_allocr_free(alloc);
|
||||||
fin.close();
|
fin.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -625,35 +656,35 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
printf("v_n_layer %d\n", hparams.n_layer);
|
printf("v_n_layer %d\n", hparams.n_layer);
|
||||||
}
|
}
|
||||||
|
|
||||||
vision_model.patch_embeddings = get_tensor(new_clip->ctx, TN_PATCH_EMBD);
|
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
||||||
vision_model.class_embedding = get_tensor(new_clip->ctx, TN_CLASS_EMBD);
|
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||||
vision_model.position_embeddings = get_tensor(new_clip->ctx, format(TN_POS_EMBD, "v"));
|
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
||||||
vision_model.pre_ln_w = get_tensor(new_clip->ctx, format(TN_LN_PRE, "v", "weight"));
|
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx, format(TN_LN_PRE, "v", "bias"));
|
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||||
vision_model.mm_0_w = get_tensor(new_clip->ctx, format(TN_LLAVA_PROJ, 0, "weight"));
|
vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight"));
|
||||||
vision_model.mm_0_b = get_tensor(new_clip->ctx, format(TN_LLAVA_PROJ, 0, "bias"));
|
vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias"));
|
||||||
vision_model.mm_2_w = get_tensor(new_clip->ctx, format(TN_LLAVA_PROJ, 2, "weight"));
|
vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight"));
|
||||||
vision_model.mm_2_b = get_tensor(new_clip->ctx, format(TN_LLAVA_PROJ, 2, "bias"));
|
vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias"));
|
||||||
|
|
||||||
vision_model.layers.resize(hparams.n_layer);
|
vision_model.layers.resize(hparams.n_layer);
|
||||||
for (int il = 0; il < hparams.n_layer; ++il) {
|
for (int il = 0; il < hparams.n_layer; ++il) {
|
||||||
auto & layer = vision_model.layers[il];
|
auto & layer = vision_model.layers[il];
|
||||||
layer.k_w = get_tensor(new_clip->ctx, format(TN_ATTN_K, "v", il, "weight"));
|
layer.k_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "weight"));
|
||||||
layer.q_w = get_tensor(new_clip->ctx, format(TN_ATTN_Q, "v", il, "weight"));
|
layer.q_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "weight"));
|
||||||
layer.v_w = get_tensor(new_clip->ctx, format(TN_ATTN_V, "v", il, "weight"));
|
layer.v_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "weight"));
|
||||||
layer.o_w = get_tensor(new_clip->ctx, format(TN_ATTN_OUTPUT, "v", il, "weight"));
|
layer.o_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "weight"));
|
||||||
layer.ln_1_w = get_tensor(new_clip->ctx, format(TN_LN_1, "v", il, "weight"));
|
layer.ln_1_w = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "weight"));
|
||||||
layer.ln_2_w = get_tensor(new_clip->ctx, format(TN_LN_2, "v", il, "weight"));
|
layer.ln_2_w = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "weight"));
|
||||||
layer.ff_i_w = get_tensor(new_clip->ctx, format(TN_FFN_DOWN, "v", il, "weight"));
|
layer.ff_i_w = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "weight"));
|
||||||
layer.ff_o_w = get_tensor(new_clip->ctx, format(TN_FFN_UP, "v", il, "weight"));
|
layer.ff_o_w = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "weight"));
|
||||||
layer.k_b = get_tensor(new_clip->ctx, format(TN_ATTN_K, "v", il, "bias"));
|
layer.k_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "bias"));
|
||||||
layer.q_b = get_tensor(new_clip->ctx, format(TN_ATTN_Q, "v", il, "bias"));
|
layer.q_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "bias"));
|
||||||
layer.v_b = get_tensor(new_clip->ctx, format(TN_ATTN_V, "v", il, "bias"));
|
layer.v_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "bias"));
|
||||||
layer.o_b = get_tensor(new_clip->ctx, format(TN_ATTN_OUTPUT, "v", il, "bias"));
|
layer.o_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "bias"));
|
||||||
layer.ln_1_b = get_tensor(new_clip->ctx, format(TN_LN_1, "v", il, "bias"));
|
layer.ln_1_b = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "bias"));
|
||||||
layer.ln_2_b = get_tensor(new_clip->ctx, format(TN_LN_2, "v", il, "bias"));
|
layer.ln_2_b = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "bias"));
|
||||||
layer.ff_i_b = get_tensor(new_clip->ctx, format(TN_FFN_DOWN, "v", il, "bias"));
|
layer.ff_i_b = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "bias"));
|
||||||
layer.ff_o_b = get_tensor(new_clip->ctx, format(TN_FFN_UP, "v", il, "bias"));
|
layer.ff_o_b = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "bias"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,43 +694,43 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
|
|
||||||
// measure mem requirement and allocate
|
// measure mem requirement and allocate
|
||||||
{
|
{
|
||||||
static const size_t tensor_alignment = 32;
|
new_clip->buf_compute_meta.resize(GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead());
|
||||||
new_clip->buf_compute.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead());
|
new_clip->compute_alloc = ggml_allocr_new_measure_from_backend(new_clip->backend);
|
||||||
new_clip->alloc = ggml_allocr_new_measure(tensor_alignment);
|
|
||||||
clip_image_f32_batch batch;
|
clip_image_f32_batch batch;
|
||||||
batch.size = 1;
|
batch.size = 1;
|
||||||
ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch);
|
ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch);
|
||||||
size_t alloc_size = ggml_allocr_alloc_graph(new_clip->alloc, gf) + tensor_alignment;
|
size_t compute_memory_buffer_size = ggml_allocr_alloc_graph(new_clip->compute_alloc, gf);
|
||||||
ggml_allocr_free(new_clip->alloc);
|
ggml_allocr_free(new_clip->compute_alloc);
|
||||||
new_clip->buf_alloc.resize(alloc_size);
|
new_clip->compute_buffer = ggml_backend_alloc_buffer(new_clip->backend, compute_memory_buffer_size);
|
||||||
new_clip->alloc = ggml_allocr_new(new_clip->buf_alloc.data, new_clip->buf_alloc.size, tensor_alignment);
|
new_clip->compute_alloc = ggml_allocr_new_from_buffer(new_clip->compute_buffer);
|
||||||
|
|
||||||
printf("%s: total allocated memory: %.2f MB\n", __func__, (new_clip->buf_compute.size + alloc_size)/1024.0/1024.0);
|
printf("%s: compute allocated memory: %.2f MB\n", __func__, compute_memory_buffer_size /1024.0/1024.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new_clip;
|
return new_clip;
|
||||||
}
|
}
|
||||||
|
|
||||||
clip_image_u8 * make_clip_image_u8() {
|
struct clip_image_u8 * clip_image_u8_init() {
|
||||||
auto img = new clip_image_u8();
|
return new clip_image_u8();
|
||||||
return img;
|
|
||||||
}
|
}
|
||||||
clip_image_f32 * make_clip_image_f32() { return new clip_image_f32(); }
|
|
||||||
|
|
||||||
void clip_image_u8_free(clip_image_u8 * img) { if (img->data) { delete[] img->data; } delete img; }
|
struct clip_image_f32 * clip_image_f32_init() {
|
||||||
void clip_image_f32_free(clip_image_f32 * img) { if (img->data) { delete[] img->data; } delete img; }
|
return new clip_image_f32();
|
||||||
|
}
|
||||||
|
|
||||||
|
void clip_image_u8_free (struct clip_image_u8 * img) { delete img; }
|
||||||
|
void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
|
||||||
|
|
||||||
static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) {
|
static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) {
|
||||||
img->nx = nx;
|
img->nx = nx;
|
||||||
img->ny = ny;
|
img->ny = ny;
|
||||||
img->size = nx * ny * 3;
|
img->buf.resize(3 * nx * ny);
|
||||||
img->data = new uint8_t[img->size]();
|
memcpy(img->buf.data(), data, img->buf.size());
|
||||||
memcpy(img->data, data, img->size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
|
bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
|
||||||
int nx, ny, nc;
|
int nx, ny, nc;
|
||||||
auto data = stbi_load(fname, &nx, &ny, &nc, 3);
|
auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
|
||||||
if (!data) {
|
if (!data) {
|
||||||
fprintf(stderr, "%s: failed to load image '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: failed to load image '%s'\n", __func__, fname);
|
||||||
return false;
|
return false;
|
||||||
@ -711,7 +742,7 @@ bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
|
|||||||
|
|
||||||
bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
|
bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
|
||||||
int nx, ny, nc;
|
int nx, ny, nc;
|
||||||
auto data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
|
auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
|
||||||
if (!data) {
|
if (!data) {
|
||||||
fprintf(stderr, "%s: failed to decode image bytes\n", __func__);
|
fprintf(stderr, "%s: failed to decode image bytes\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
@ -723,7 +754,7 @@ bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length
|
|||||||
|
|
||||||
// normalize: x = (x - mean) / std
|
// normalize: x = (x - mean) / std
|
||||||
// TODO: implement bicubic interpolation instead of linear.
|
// TODO: implement bicubic interpolation instead of linear.
|
||||||
bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32 * res, const bool pad2square) {
|
bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32 * res, const bool pad2square) {
|
||||||
if (!ctx->has_vision_encoder) {
|
if (!ctx->has_vision_encoder) {
|
||||||
printf("This gguf file seems to have no vision encoder\n");
|
printf("This gguf file seems to have no vision encoder\n");
|
||||||
return false;
|
return false;
|
||||||
@ -732,18 +763,17 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
|
|||||||
// the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
|
// the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
|
||||||
// see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
|
// see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
|
||||||
|
|
||||||
clip_image_u8 * temp = make_clip_image_u8(); // we will keep the input image data here temporarily
|
clip_image_u8 * temp = clip_image_u8_init(); // we will keep the input image data here temporarily
|
||||||
if (pad2square && img->nx != img->ny) {
|
if (pad2square && img->nx != img->ny) {
|
||||||
int longer_side = std::max(img->nx, img->ny);
|
int longer_side = std::max(img->nx, img->ny);
|
||||||
temp->nx = longer_side;
|
temp->nx = longer_side;
|
||||||
temp->ny = longer_side;
|
temp->ny = longer_side;
|
||||||
temp->size = 3 * longer_side * longer_side;
|
temp->buf.resize(3 * longer_side * longer_side);
|
||||||
temp->data = new uint8_t[temp->size]();
|
const uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA
|
||||||
uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA
|
|
||||||
|
|
||||||
// fill with background color
|
// fill with background color
|
||||||
for (size_t i = 0; i < temp->size; i++) {
|
for (size_t i = 0; i < temp->buf.size(); i++) {
|
||||||
temp->data[i] = bc[i % 3];
|
temp->buf[i] = bc[i % 3];
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy from the input image
|
// copy from the input image
|
||||||
@ -751,17 +781,16 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
|
|||||||
for (int x = 0; x < img->nx; x++) {
|
for (int x = 0; x < img->nx; x++) {
|
||||||
const int i = 3 * (y * img->nx + x);
|
const int i = 3 * (y * img->nx + x);
|
||||||
const int j = 3 * (y * temp->nx + x);
|
const int j = 3 * (y * temp->nx + x);
|
||||||
temp->data[j] = img->data[i];
|
temp->buf[j] = img->buf[i];
|
||||||
temp->data[j+1] = img->data[i+1];
|
temp->buf[j+1] = img->buf[i+1];
|
||||||
temp->data[j+2] = img->data[i+2];
|
temp->buf[j+2] = img->buf[i+2];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
temp->nx = img->nx;
|
temp->nx = img->nx;
|
||||||
temp->ny = img->ny;
|
temp->ny = img->ny;
|
||||||
temp->size = img->size;
|
temp->buf.resize(img->buf.size());
|
||||||
temp->data = new uint8_t[temp->size]();
|
memcpy(temp->buf.data(), img->buf.data(), temp->buf.size());
|
||||||
memcpy(&temp->data[0], &img->data[0], temp->size); // copy
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const int nx = temp->nx;
|
const int nx = temp->nx;
|
||||||
@ -772,8 +801,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
|
|||||||
|
|
||||||
res->nx = nx2;
|
res->nx = nx2;
|
||||||
res->ny = ny2;
|
res->ny = ny2;
|
||||||
res->size = 3 * nx2 * ny2;
|
res->buf.resize(3 * nx2 * ny2);
|
||||||
res->data = new float[res->size]();
|
|
||||||
|
|
||||||
const float scale = std::max(nx, ny) / (float)ctx->vision_model.hparams.image_size;
|
const float scale = std::max(nx, ny) / (float)ctx->vision_model.hparams.image_size;
|
||||||
|
|
||||||
@ -804,10 +832,10 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
|
|||||||
const int j10 = 3 * (y1 * nx + x0) + c;
|
const int j10 = 3 * (y1 * nx + x0) + c;
|
||||||
const int j11 = 3 * (y1 * nx + x1) + c;
|
const int j11 = 3 * (y1 * nx + x1) + c;
|
||||||
|
|
||||||
const float v00 = temp->data[j00];
|
const float v00 = temp->buf[j00];
|
||||||
const float v01 = temp->data[j01];
|
const float v01 = temp->buf[j01];
|
||||||
const float v10 = temp->data[j10];
|
const float v10 = temp->buf[j10];
|
||||||
const float v11 = temp->data[j11];
|
const float v11 = temp->buf[j11];
|
||||||
|
|
||||||
const float v0 = v00 * (1.0f - dx) + v01 * dx;
|
const float v0 = v00 * (1.0f - dx) + v01 * dx;
|
||||||
const float v1 = v10 * (1.0f - dx) + v11 * dx;
|
const float v1 = v10 * (1.0f - dx) + v11 * dx;
|
||||||
@ -818,7 +846,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
|
|||||||
|
|
||||||
const int i = 3 * (y * nx3 + x) + c;
|
const int i = 3 * (y * nx3 + x) + c;
|
||||||
|
|
||||||
res->data[i] = ((float(v2) / 255.0f) - m3[c]) / s3[c];
|
res->buf[i] = ((float(v2) / 255.0f) - m3[c]) / s3[c];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -828,12 +856,13 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
|
|||||||
}
|
}
|
||||||
|
|
||||||
void clip_free(clip_ctx * ctx) {
|
void clip_free(clip_ctx * ctx) {
|
||||||
ggml_free(ctx->ctx);
|
ggml_free(ctx->ctx_data);
|
||||||
gguf_free(ctx->ctx_gguf);
|
gguf_free(ctx->ctx_gguf);
|
||||||
|
|
||||||
delete ctx;
|
delete ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool clip_image_encode(const clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
|
bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
|
||||||
if (!ctx->has_vision_encoder) {
|
if (!ctx->has_vision_encoder) {
|
||||||
printf("This gguf file seems to have no vision encoder\n");
|
printf("This gguf file seems to have no vision encoder\n");
|
||||||
return false;
|
return false;
|
||||||
@ -845,8 +874,7 @@ bool clip_image_encode(const clip_ctx * ctx, const int n_threads, clip_image_f32
|
|||||||
return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
|
return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool clip_image_batch_encode(const clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec) {
|
bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec) {
|
||||||
|
|
||||||
if (!ctx->has_vision_encoder) {
|
if (!ctx->has_vision_encoder) {
|
||||||
printf("This gguf file seems to have no vision encoder\n");
|
printf("This gguf file seems to have no vision encoder\n");
|
||||||
return false;
|
return false;
|
||||||
@ -858,29 +886,29 @@ bool clip_image_batch_encode(const clip_ctx * ctx, const int n_threads, const cl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// reset alloc buffer to clean the memory from previous invocations
|
// reset alloc buffer to clean the memory from previous invocations
|
||||||
ggml_allocr_reset(ctx->alloc);
|
ggml_allocr_reset(ctx->compute_alloc);
|
||||||
|
|
||||||
// build the inference graph
|
// build the inference graph
|
||||||
ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
|
ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
|
||||||
ggml_allocr_alloc_graph(ctx->alloc, gf);
|
ggml_allocr_alloc_graph(ctx->compute_alloc, gf);
|
||||||
|
|
||||||
struct ggml_cplan plan = ggml_graph_plan(gf, n_threads);
|
if (ggml_backend_is_cpu(ctx->backend)) {
|
||||||
if (plan.work_size > 0) {
|
ggml_backend_cpu_set_n_threads(ctx->backend, n_threads);
|
||||||
plan.work_data = (uint8_t *)malloc(plan.work_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_graph_compute(gf, &plan);
|
#ifdef GGML_USE_METAL
|
||||||
|
if (ggml_backend_is_metal(ctx->backend)) {
|
||||||
|
ggml_backend_metal_set_n_cb(ctx->backend, n_threads);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ggml_backend_graph_compute(ctx->backend, gf);
|
||||||
|
|
||||||
// the last node is the embedding tensor
|
// the last node is the embedding tensor
|
||||||
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 1];
|
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 1];
|
||||||
|
|
||||||
// copy the embeddings to the location passed by the user
|
// copy the embeddings to the location passed by the user
|
||||||
memcpy(vec, ggml_get_data_f32(embeddings), ggml_nbytes(embeddings));
|
ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
|
||||||
|
|
||||||
if (plan.work_size > 0) {
|
|
||||||
free(plan.work_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -888,32 +916,15 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
|||||||
|
|
||||||
ggml_type type = GGML_TYPE_Q4_1;
|
ggml_type type = GGML_TYPE_Q4_1;
|
||||||
|
|
||||||
switch (itype) {
|
assert(itype < GGML_TYPE_COUNT);
|
||||||
case 2:
|
type = static_cast<ggml_type>(itype);
|
||||||
type = GGML_TYPE_Q4_0;
|
|
||||||
break;
|
auto * ctx_clip = clip_model_load(fname_inp, 2);
|
||||||
case 3:
|
|
||||||
type = GGML_TYPE_Q4_1;
|
|
||||||
break;
|
|
||||||
case 6:
|
|
||||||
type = GGML_TYPE_Q5_0;
|
|
||||||
break;
|
|
||||||
case 7:
|
|
||||||
type = GGML_TYPE_Q5_1;
|
|
||||||
break;
|
|
||||||
case 8:
|
|
||||||
type = GGML_TYPE_Q8_0;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype);
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto ctx_clip = clip_model_load(fname_inp, 2);
|
|
||||||
const auto & ctx_src = ctx_clip->ctx_gguf;
|
const auto & ctx_src = ctx_clip->ctx_gguf;
|
||||||
const auto & ctx_data = ctx_clip->ctx;
|
const auto & ctx_data = ctx_clip->ctx_data;
|
||||||
|
|
||||||
auto ctx_out = gguf_init_empty();
|
auto * ctx_out = gguf_init_empty();
|
||||||
gguf_set_kv(ctx_out, ctx_src);
|
gguf_set_kv(ctx_out, ctx_src);
|
||||||
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
|
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
|
||||||
gguf_set_val_u32(ctx_out, "general.file_type", itype);
|
gguf_set_val_u32(ctx_out, "general.file_type", itype);
|
||||||
@ -966,6 +977,10 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
|||||||
|
|
||||||
if (quantize) {
|
if (quantize) {
|
||||||
new_type = type;
|
new_type = type;
|
||||||
|
if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
|
||||||
|
new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
|
||||||
|
// fprintf(stderr, "%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
|
||||||
|
}
|
||||||
const size_t n_elms = ggml_nelements(cur);
|
const size_t n_elms = ggml_nelements(cur);
|
||||||
float * f32_data;
|
float * f32_data;
|
||||||
|
|
||||||
@ -1010,6 +1025,21 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
|||||||
case GGML_TYPE_Q8_0: {
|
case GGML_TYPE_Q8_0: {
|
||||||
new_size = ggml_quantize_q8_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
new_size = ggml_quantize_q8_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_TYPE_Q2_K: {
|
||||||
|
new_size = ggml_quantize_q2_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||||
|
} break;
|
||||||
|
case GGML_TYPE_Q3_K: {
|
||||||
|
new_size = ggml_quantize_q3_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||||
|
} break;
|
||||||
|
case GGML_TYPE_Q4_K: {
|
||||||
|
new_size = ggml_quantize_q4_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||||
|
} break;
|
||||||
|
case GGML_TYPE_Q5_K: {
|
||||||
|
new_size = ggml_quantize_q5_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||||
|
} break;
|
||||||
|
case GGML_TYPE_Q6_K: {
|
||||||
|
new_size = ggml_quantize_q6_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||||
|
} break;
|
||||||
default: {
|
default: {
|
||||||
fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, new_type);
|
fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, new_type);
|
||||||
return false;
|
return false;
|
||||||
|
@ -35,31 +35,14 @@ struct clip_vision_hparams {
|
|||||||
float eps;
|
float eps;
|
||||||
};
|
};
|
||||||
|
|
||||||
/** load mmproj model */
|
CLIP_API struct clip_ctx * clip_model_load(const char * fname, int verbosity);
|
||||||
CLIP_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity);
|
|
||||||
/** free mmproj model */
|
|
||||||
CLIP_API void clip_free(struct clip_ctx * ctx);
|
CLIP_API void clip_free(struct clip_ctx * ctx);
|
||||||
|
|
||||||
size_t clip_embd_nbytes(const struct clip_ctx * ctx);
|
CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx);
|
||||||
int clip_n_patches(const struct clip_ctx * ctx);
|
|
||||||
int clip_n_mmproj_embd(const struct clip_ctx * ctx);
|
|
||||||
|
|
||||||
// RGB uint8 image
|
CLIP_API int clip_n_patches (const struct clip_ctx * ctx);
|
||||||
struct clip_image_u8 {
|
CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx);
|
||||||
int nx;
|
|
||||||
int ny;
|
|
||||||
uint8_t * data = NULL;
|
|
||||||
size_t size;
|
|
||||||
};
|
|
||||||
|
|
||||||
// RGB float32 image (NHWC)
|
|
||||||
// Memory layout: RGBRGBRGB...
|
|
||||||
struct clip_image_f32 {
|
|
||||||
int nx;
|
|
||||||
int ny;
|
|
||||||
float * data = NULL;
|
|
||||||
size_t size;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct clip_image_u8_batch {
|
struct clip_image_u8_batch {
|
||||||
struct clip_image_u8 * data;
|
struct clip_image_u8 * data;
|
||||||
@ -71,21 +54,22 @@ struct clip_image_f32_batch {
|
|||||||
size_t size;
|
size_t size;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct clip_image_u8 * make_clip_image_u8();
|
CLIP_API struct clip_image_u8 * clip_image_u8_init ();
|
||||||
struct clip_image_f32 * make_clip_image_f32();
|
CLIP_API struct clip_image_f32 * clip_image_f32_init();
|
||||||
CLIP_API void clip_image_u8_free(clip_image_u8 * img);
|
|
||||||
CLIP_API void clip_image_f32_free(clip_image_f32 * img);
|
CLIP_API void clip_image_u8_free (struct clip_image_u8 * img);
|
||||||
|
CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
|
||||||
|
|
||||||
CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
|
CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
|
||||||
|
|
||||||
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
|
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
|
||||||
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
||||||
|
|
||||||
bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square);
|
CLIP_API bool clip_image_preprocess (struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, bool pad2square);
|
||||||
bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec);
|
CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
|
||||||
|
CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
|
||||||
|
|
||||||
bool clip_image_batch_encode(const struct clip_ctx * ctx, const int n_threads, const struct clip_image_f32_batch * imgs,
|
CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype);
|
||||||
float * vec);
|
|
||||||
|
|
||||||
bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -39,73 +39,11 @@ static bool eval_string(struct llama_context * ctx_llama, const char* str, int n
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: use common/sampling.h
|
static const char * sample(struct llama_sampling_context * ctx_sampling,
|
||||||
static llama_token sample_id(llama_context * ctx_llama, gpt_params & params) {
|
struct llama_context * ctx_llama,
|
||||||
auto & sparams = params.sparams;
|
int * n_past) {
|
||||||
|
const llama_token id = llama_sampling_sample(ctx_sampling, ctx_llama, NULL);
|
||||||
// out of user input, sample next token
|
llama_sampling_accept(ctx_sampling, ctx_llama, id, true);
|
||||||
const float temp = sparams.temp;
|
|
||||||
const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k;
|
|
||||||
const float top_p = sparams.top_p;
|
|
||||||
const float tfs_z = sparams.tfs_z;
|
|
||||||
const float typical_p = sparams.typical_p;
|
|
||||||
// const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n;
|
|
||||||
// const float repeat_penalty = sparams.repeat_penalty;
|
|
||||||
// const float alpha_presence = sparams.presence_penalty;
|
|
||||||
// const float alpha_frequency = sparams.frequency_penalty;
|
|
||||||
const int mirostat = sparams.mirostat;
|
|
||||||
const float mirostat_tau = sparams.mirostat_tau;
|
|
||||||
const float mirostat_eta = sparams.mirostat_eta;
|
|
||||||
// const bool penalize_nl = sparams.penalize_nl;
|
|
||||||
|
|
||||||
llama_token id = 0;
|
|
||||||
{
|
|
||||||
auto logits = llama_get_logits(ctx_llama);
|
|
||||||
auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama));
|
|
||||||
|
|
||||||
// Apply params.logit_bias map
|
|
||||||
for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) {
|
|
||||||
logits[it->first] += it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<llama_token_data> candidates;
|
|
||||||
candidates.reserve(n_vocab);
|
|
||||||
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
|
||||||
candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
|
||||||
|
|
||||||
if (temp <= 0) {
|
|
||||||
// Greedy sampling
|
|
||||||
id = llama_sample_token_greedy(ctx_llama, &candidates_p);
|
|
||||||
} else {
|
|
||||||
if (mirostat == 1) {
|
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
|
||||||
const int mirostat_m = 100;
|
|
||||||
llama_sample_temp(ctx_llama, &candidates_p, temp);
|
|
||||||
id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
|
||||||
} else if (mirostat == 2) {
|
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
|
||||||
llama_sample_temp(ctx_llama, &candidates_p, temp);
|
|
||||||
id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
|
||||||
} else {
|
|
||||||
// Temperature sampling
|
|
||||||
llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1);
|
|
||||||
llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1);
|
|
||||||
llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1);
|
|
||||||
llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1);
|
|
||||||
llama_sample_temp(ctx_llama, &candidates_p, temp);
|
|
||||||
id = llama_sample_token(ctx_llama, &candidates_p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) {
|
|
||||||
int id = sample_id(ctx_llama, params);
|
|
||||||
static std::string ret;
|
static std::string ret;
|
||||||
if (id == llama_token_eos(llama_get_model(ctx_llama))) {
|
if (id == llama_token_eos(llama_get_model(ctx_llama))) {
|
||||||
ret = "</s>";
|
ret = "</s>";
|
||||||
@ -174,8 +112,8 @@ struct llava_context {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static void show_additional_info(int /*argc*/, char ** argv) {
|
static void show_additional_info(int /*argc*/, char ** argv) {
|
||||||
printf("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
|
fprintf(stderr, "\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
|
||||||
printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
|
fprintf(stderr, " note: a lower temperature value like 0.1 is recommended for better quality.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params) {
|
static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params) {
|
||||||
@ -185,7 +123,7 @@ static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_para
|
|||||||
auto prompt = params->prompt;
|
auto prompt = params->prompt;
|
||||||
if (prompt_contains_image(prompt)) {
|
if (prompt_contains_image(prompt)) {
|
||||||
if (!params->image.empty()) {
|
if (!params->image.empty()) {
|
||||||
printf("using base64 encoded image instead of command line image path\n");
|
fprintf(stderr, "using base64 encoded image instead of command line image path\n");
|
||||||
}
|
}
|
||||||
embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->n_threads, prompt);
|
embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->n_threads, prompt);
|
||||||
if (!embed) {
|
if (!embed) {
|
||||||
@ -217,16 +155,19 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
|||||||
|
|
||||||
// generate the response
|
// generate the response
|
||||||
|
|
||||||
printf("\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
||||||
|
|
||||||
for (int i = 0; i < max_tgt_len; i++) {
|
for (int i = 0; i < max_tgt_len; i++) {
|
||||||
const char * tmp = sample(ctx_llava->ctx_llama, *params, &n_past);
|
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
||||||
if (strcmp(tmp, "</s>") == 0) break;
|
if (strcmp(tmp, "</s>") == 0) break;
|
||||||
|
|
||||||
printf("%s", tmp);
|
printf("%s", tmp);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_sampling_free(ctx_sampling);
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,6 +243,9 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto image_embed = load_image(ctx_llava, ¶ms);
|
auto image_embed = load_image(ctx_llava, ¶ms);
|
||||||
|
if (!image_embed) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
// process the prompt
|
// process the prompt
|
||||||
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#include "base64.hpp"
|
#include "base64.hpp"
|
||||||
|
|
||||||
static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) {
|
static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) {
|
||||||
clip_image_f32 * img_res = make_clip_image_f32();
|
clip_image_f32 * img_res = clip_image_f32_init();
|
||||||
if (!clip_image_preprocess(ctx_clip, img, img_res, /*pad2square =*/ true)) {
|
if (!clip_image_preprocess(ctx_clip, img, img_res, /*pad2square =*/ true)) {
|
||||||
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
||||||
clip_image_f32_free(img_res);
|
clip_image_f32_free(img_res);
|
||||||
@ -86,7 +86,7 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_
|
|||||||
}
|
}
|
||||||
|
|
||||||
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) {
|
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) {
|
||||||
clip_image_u8 * img = make_clip_image_u8();
|
clip_image_u8 * img = clip_image_u8_init();
|
||||||
if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) {
|
if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) {
|
||||||
clip_image_u8_free(img);
|
clip_image_u8_free(img);
|
||||||
fprintf(stderr, "%s: can't load image from bytes, is it a valid image?", __func__);
|
fprintf(stderr, "%s: can't load image from bytes, is it a valid image?", __func__);
|
||||||
|
5
examples/lookup/CMakeLists.txt
Normal file
5
examples/lookup/CMakeLists.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
set(TARGET lookup)
|
||||||
|
add_executable(${TARGET} lookup.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
13
examples/lookup/README.md
Normal file
13
examples/lookup/README.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# llama.cpp/examples/lookup
|
||||||
|
|
||||||
|
Demonstration of Prompt Lookup Decoding
|
||||||
|
|
||||||
|
https://github.com/apoorvumang/prompt-lookup-decoding
|
||||||
|
|
||||||
|
The key parameters for lookup decoding are `ngram_min`, `ngram_max` and `n_draft`. The first two determine the size of the ngrams to search for in the prompt for a match. The latter specifies how many subsequent tokens to draft if a match is found.
|
||||||
|
|
||||||
|
More info:
|
||||||
|
|
||||||
|
https://github.com/ggerganov/llama.cpp/pull/4484
|
||||||
|
https://github.com/ggerganov/llama.cpp/issues/4226
|
||||||
|
|
230
examples/lookup/lookup.cpp
Normal file
230
examples/lookup/lookup.cpp
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
#include "common.h"
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
int main(int argc, char ** argv){
|
||||||
|
gpt_params params;
|
||||||
|
|
||||||
|
if (!gpt_params_parse(argc, argv, params)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// max/min n-grams size to search for in prompt
|
||||||
|
const int ngram_max = 4;
|
||||||
|
const int ngram_min = 1;
|
||||||
|
|
||||||
|
// length of the candidate / draft sequence, if match is found
|
||||||
|
const int n_draft = params.n_draft;
|
||||||
|
|
||||||
|
const bool dump_kv_cache = params.dump_kv_cache;
|
||||||
|
|
||||||
|
#ifndef LOG_DISABLE_LOGS
|
||||||
|
log_set_target(log_filename_generator("lookup", "log"));
|
||||||
|
LOG_TEE("Log start\n");
|
||||||
|
log_dump_cmdline(argc, argv);
|
||||||
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
|
// init llama.cpp
|
||||||
|
llama_backend_init(params.numa);
|
||||||
|
|
||||||
|
llama_model * model = NULL;
|
||||||
|
llama_context * ctx = NULL;
|
||||||
|
|
||||||
|
// load the model
|
||||||
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
|
// tokenize the prompt
|
||||||
|
const bool add_bos = llama_should_add_bos_token(model);
|
||||||
|
LOG("add_bos tgt: %d\n", add_bos);
|
||||||
|
|
||||||
|
std::vector<llama_token> inp;
|
||||||
|
inp = ::llama_tokenize(ctx, params.prompt, add_bos, true);
|
||||||
|
|
||||||
|
const int max_context_size = llama_n_ctx(ctx);
|
||||||
|
const int max_tokens_list_size = max_context_size - 4;
|
||||||
|
|
||||||
|
if ((int) inp.size() > max_tokens_list_size) {
|
||||||
|
fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) inp.size(), max_tokens_list_size);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "\n\n");
|
||||||
|
|
||||||
|
for (auto id : inp) {
|
||||||
|
fprintf(stderr, "%s", llama_token_to_piece(ctx, id).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
fflush(stderr);
|
||||||
|
|
||||||
|
const int n_input = inp.size();
|
||||||
|
|
||||||
|
const auto t_enc_start = ggml_time_us();
|
||||||
|
|
||||||
|
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
|
||||||
|
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
|
||||||
|
|
||||||
|
const auto t_enc_end = ggml_time_us();
|
||||||
|
|
||||||
|
int n_predict = 0;
|
||||||
|
int n_drafted = 0;
|
||||||
|
int n_accept = 0;
|
||||||
|
|
||||||
|
int n_past = inp.size();
|
||||||
|
|
||||||
|
bool has_eos = false;
|
||||||
|
|
||||||
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params.sparams);
|
||||||
|
|
||||||
|
std::vector<llama_token> draft;
|
||||||
|
|
||||||
|
llama_batch batch_tgt = llama_batch_init(params.n_ctx, 0, 1);
|
||||||
|
|
||||||
|
// debug
|
||||||
|
struct llama_kv_cache_view kvc_view = llama_kv_cache_view_init(ctx, 1);
|
||||||
|
|
||||||
|
const auto t_dec_start = ggml_time_us();
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
// debug
|
||||||
|
if (dump_kv_cache) {
|
||||||
|
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||||
|
dump_kv_cache_view_seqs(kvc_view, 40);
|
||||||
|
}
|
||||||
|
|
||||||
|
// print current draft sequence
|
||||||
|
LOG("drafted %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, draft).c_str());
|
||||||
|
|
||||||
|
int i_dft = 0;
|
||||||
|
while (true) {
|
||||||
|
// sample from the target model
|
||||||
|
llama_token id = llama_sampling_sample(ctx_sampling, ctx, NULL, i_dft);
|
||||||
|
|
||||||
|
llama_sampling_accept(ctx_sampling, ctx, id, true);
|
||||||
|
|
||||||
|
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||||
|
|
||||||
|
if (!params.use_color) {
|
||||||
|
printf("%s", token_str.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (id == llama_token_eos(model)) {
|
||||||
|
has_eos = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
++n_predict;
|
||||||
|
|
||||||
|
// check if the target token matches the draft
|
||||||
|
if (i_dft < (int) draft.size() && id == draft[i_dft]) {
|
||||||
|
LOG("the sampled target token matches the %dth drafted token (%d, '%s') - accepted\n", i_dft, id, token_str.c_str());
|
||||||
|
++n_accept;
|
||||||
|
++n_past;
|
||||||
|
++i_dft;
|
||||||
|
inp.push_back(id);
|
||||||
|
|
||||||
|
if (params.use_color) {
|
||||||
|
// color accepted draft token
|
||||||
|
printf("\033[34m%s\033[0m", token_str.c_str());
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.use_color) {
|
||||||
|
printf("%s", token_str.c_str());
|
||||||
|
}
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
|
||||||
|
LOG("the sampled target token (%d, '%s') did not match, or we ran out of drafted tokens\n", id, token_str.c_str());
|
||||||
|
|
||||||
|
draft.clear();
|
||||||
|
draft.push_back(id);
|
||||||
|
inp.push_back(id);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((params.n_predict > 0 && n_predict > params.n_predict) || has_eos) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// KV cache management
|
||||||
|
// clean the cache of draft tokens that weren't accepted
|
||||||
|
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
|
||||||
|
|
||||||
|
llama_batch_clear(batch_tgt);
|
||||||
|
llama_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
||||||
|
|
||||||
|
// generate n_pred tokens through prompt lookup
|
||||||
|
auto prompt_lookup = [&]() -> void {
|
||||||
|
int inp_size = inp.size();
|
||||||
|
for (int ngram_size = ngram_max ; ngram_size > ngram_min; --ngram_size){
|
||||||
|
const llama_token * ngram = &inp[inp_size - ngram_size];
|
||||||
|
|
||||||
|
for (int i = 0; i <= (int) inp_size - (ngram_size * 2); ++i) {
|
||||||
|
bool match = true;
|
||||||
|
for (int j = 0; j < ngram_size; ++j) {
|
||||||
|
if (inp[i + j] != ngram[j]) {
|
||||||
|
match = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (match) {
|
||||||
|
const int startIdx = i + ngram_size;
|
||||||
|
const int endIdx = startIdx + n_draft;
|
||||||
|
if (endIdx < inp_size) {
|
||||||
|
for (int j = startIdx; j < endIdx; ++j) {
|
||||||
|
LOG(" - draft candidate %d: %d\n", j, inp[j]);
|
||||||
|
draft.push_back(inp[j]);
|
||||||
|
llama_batch_add(batch_tgt, inp[j], n_past + (j - startIdx) + 1, { 0 }, true);
|
||||||
|
++n_drafted;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
prompt_lookup();
|
||||||
|
|
||||||
|
llama_decode(ctx, batch_tgt);
|
||||||
|
++n_past;
|
||||||
|
|
||||||
|
draft.erase(draft.begin());
|
||||||
|
}
|
||||||
|
|
||||||
|
auto t_dec_end = ggml_time_us();
|
||||||
|
|
||||||
|
LOG_TEE("\n\n");
|
||||||
|
|
||||||
|
LOG_TEE("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
|
||||||
|
LOG_TEE("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
|
||||||
|
|
||||||
|
LOG_TEE("\n");
|
||||||
|
LOG_TEE("n_draft = %d\n", n_draft);
|
||||||
|
LOG_TEE("n_predict = %d\n", n_predict);
|
||||||
|
LOG_TEE("n_drafted = %d\n", n_drafted);
|
||||||
|
LOG_TEE("n_accept = %d\n", n_accept);
|
||||||
|
LOG_TEE("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
|
||||||
|
|
||||||
|
LOG_TEE("\ntarget:\n");
|
||||||
|
llama_print_timings(ctx);
|
||||||
|
|
||||||
|
llama_sampling_free(ctx_sampling);
|
||||||
|
llama_batch_free(batch_tgt);
|
||||||
|
|
||||||
|
llama_free(ctx);
|
||||||
|
llama_free_model(model);
|
||||||
|
|
||||||
|
llama_backend_free();
|
||||||
|
|
||||||
|
fprintf(stderr, "\n\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -7,28 +7,13 @@ find_package(Llama 0.0.1 REQUIRED)
|
|||||||
# Bake common functionality in with target. Because applications
|
# Bake common functionality in with target. Because applications
|
||||||
# using the relocatable Llama package should be outside of the
|
# using the relocatable Llama package should be outside of the
|
||||||
# source tree, main-cmake-pkg pretends the dependencies are built-in.
|
# source tree, main-cmake-pkg pretends the dependencies are built-in.
|
||||||
|
|
||||||
set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common")
|
set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common")
|
||||||
add_library(common OBJECT
|
add_library(common OBJECT)
|
||||||
${_common_path}/common.h
|
file(GLOB _common_files
|
||||||
${_common_path}/common.cpp
|
"${_common_path}/*.h"
|
||||||
${_common_path}/console.h
|
"${_common_path}/*.cpp"
|
||||||
${_common_path}/console.cpp
|
|
||||||
${_common_path}/grammar-parser.h
|
|
||||||
${_common_path}/grammar-parser.cpp
|
|
||||||
${_common_path}/sampling.h
|
|
||||||
${_common_path}/sampling.cpp
|
|
||||||
)
|
)
|
||||||
|
target_sources(common PRIVATE ${_common_files})
|
||||||
# WARNING: because build-info.h is auto-generated, it will only
|
|
||||||
# be available after the user has built the llama.cpp sources.
|
|
||||||
#
|
|
||||||
configure_file(${_common_path}/../build-info.h
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/build-info.h
|
|
||||||
COPYONLY)
|
|
||||||
|
|
||||||
target_include_directories(common PUBLIC ${LLAMA_INCLUDE_DIR}
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR})
|
|
||||||
|
|
||||||
# If the common project was part of "main-cmake-pkg" the transient
|
# If the common project was part of "main-cmake-pkg" the transient
|
||||||
# defines would automatically be attached. Because the common func-
|
# defines would automatically be attached. Because the common func-
|
||||||
|
@ -447,6 +447,21 @@ int main(int argc, char ** argv) {
|
|||||||
LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
|
LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
|
||||||
LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
|
LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
|
||||||
LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
||||||
|
|
||||||
|
// group-attention state
|
||||||
|
// number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
|
||||||
|
int ga_i = 0;
|
||||||
|
|
||||||
|
const int ga_n = params.grp_attn_n;
|
||||||
|
const int ga_w = params.grp_attn_w;
|
||||||
|
|
||||||
|
if (ga_n != 1) {
|
||||||
|
GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
|
||||||
|
GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
|
||||||
|
//GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
|
||||||
|
//GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
|
||||||
|
LOG_TEE("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
|
||||||
|
}
|
||||||
LOG_TEE("\n\n");
|
LOG_TEE("\n\n");
|
||||||
|
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
@ -508,7 +523,8 @@ int main(int argc, char ** argv) {
|
|||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
|
|
||||||
// infinite text generation via context swapping
|
if (ga_n == 1) {
|
||||||
|
// infinite text generation via context shifting
|
||||||
// if we run out of context:
|
// if we run out of context:
|
||||||
// - take the n_keep first tokens from the original prompt (via n_past)
|
// - take the n_keep first tokens from the original prompt (via n_past)
|
||||||
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
|
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
|
||||||
@ -540,6 +556,29 @@ int main(int argc, char ** argv) {
|
|||||||
LOG("clear session path\n");
|
LOG("clear session path\n");
|
||||||
path_session.clear();
|
path_session.clear();
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// context extension via Self-Extend
|
||||||
|
while (n_past >= ga_i + ga_w) {
|
||||||
|
const int ib = (ga_n*ga_i)/ga_w;
|
||||||
|
const int bd = (ga_w/ga_n)*(ga_n - 1);
|
||||||
|
const int dd = (ga_w/ga_n) - ib*bd - ga_w;
|
||||||
|
|
||||||
|
LOG("\n");
|
||||||
|
LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i, n_past, ib*bd, ga_i + ib*bd, n_past + ib*bd);
|
||||||
|
LOG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
|
||||||
|
LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);
|
||||||
|
|
||||||
|
llama_kv_cache_seq_shift(ctx, 0, ga_i, n_past, ib*bd);
|
||||||
|
llama_kv_cache_seq_div (ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
|
||||||
|
llama_kv_cache_seq_shift(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
|
||||||
|
|
||||||
|
n_past -= bd;
|
||||||
|
|
||||||
|
ga_i += ga_w/ga_n;
|
||||||
|
|
||||||
|
LOG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", n_past + bd, n_past, ga_i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
|
// try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
|
||||||
if (n_session_consumed < (int) session_tokens.size()) {
|
if (n_session_consumed < (int) session_tokens.size()) {
|
||||||
|
5
examples/passkey/CMakeLists.txt
Normal file
5
examples/passkey/CMakeLists.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
set(TARGET passkey)
|
||||||
|
add_executable(${TARGET} passkey.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
12
examples/passkey/README.md
Normal file
12
examples/passkey/README.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# llama.cpp/example/passkey
|
||||||
|
|
||||||
|
See the following PRs for more info:
|
||||||
|
|
||||||
|
- https://github.com/ggerganov/llama.cpp/pull/3856
|
||||||
|
- https://github.com/ggerganov/llama.cpp/pull/4810
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make -j && ./passkey ./models/llama-7b-v2/ggml-model-f16.gguf 250
|
||||||
|
```
|
296
examples/passkey/passkey.cpp
Normal file
296
examples/passkey/passkey.cpp
Normal file
@ -0,0 +1,296 @@
|
|||||||
|
#include "common.h"
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
int main(int argc, char ** argv) {
|
||||||
|
gpt_params params;
|
||||||
|
|
||||||
|
if (argc == 1 || argv[1][0] == '-') {
|
||||||
|
printf("usage: %s MODEL_PATH N_JUNK N_GRP I_POS SEED\n" , argv[0]);
|
||||||
|
return 1 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
int seed = -1;
|
||||||
|
|
||||||
|
int n_junk = 250; // number of times to repeat the junk text
|
||||||
|
int n_keep = 32; // number of tokens in the prompt prefix
|
||||||
|
int n_grp = 1; // if more than 1 - perform LongLM SelfExtend
|
||||||
|
int i_pos = -1; // position of the passkey in the junk text
|
||||||
|
|
||||||
|
if (argc >= 2) {
|
||||||
|
params.model = argv[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc >= 3) {
|
||||||
|
n_junk = std::stoi(argv[2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc >= 4) {
|
||||||
|
n_grp = std::stoi(argv[3]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc >= 5) {
|
||||||
|
i_pos = std::stoi(argv[4]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc >= 6) {
|
||||||
|
seed = std::stoi(argv[5]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (seed == -1) {
|
||||||
|
seed = time(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
srand(seed);
|
||||||
|
|
||||||
|
if (i_pos == -1) {
|
||||||
|
i_pos = rand() % n_junk;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string prompt_prefix = "There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.";
|
||||||
|
const std::string prompt_suffix = " What is the pass key? The pass key is";
|
||||||
|
|
||||||
|
// generate junk text
|
||||||
|
params.prompt = prompt_prefix;
|
||||||
|
|
||||||
|
const int passkey = rand() % 50000 + 1;
|
||||||
|
|
||||||
|
for (int i = 0; i < n_junk; i++) {
|
||||||
|
if (i % n_junk == i_pos) {
|
||||||
|
params.prompt += " The pass key is " + std::to_string(passkey) + ". Remember it. " + std::to_string(passkey) + " is the pass key.";
|
||||||
|
}
|
||||||
|
|
||||||
|
params.prompt += " The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again.";
|
||||||
|
}
|
||||||
|
|
||||||
|
params.prompt += prompt_suffix;
|
||||||
|
|
||||||
|
// init LLM
|
||||||
|
|
||||||
|
llama_backend_init(params.numa);
|
||||||
|
|
||||||
|
// initialize the model
|
||||||
|
|
||||||
|
llama_model_params model_params = llama_model_default_params();
|
||||||
|
|
||||||
|
model_params.n_gpu_layers = 99; // offload all layers to the GPU
|
||||||
|
|
||||||
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||||
|
|
||||||
|
if (model == NULL) {
|
||||||
|
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize the context
|
||||||
|
|
||||||
|
llama_context_params ctx_params = llama_context_default_params();
|
||||||
|
|
||||||
|
ctx_params.seed = seed;
|
||||||
|
ctx_params.n_ctx = llama_n_ctx_train(model)*n_grp + n_keep;
|
||||||
|
ctx_params.n_batch = 512;
|
||||||
|
ctx_params.n_threads = params.n_threads;
|
||||||
|
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
||||||
|
|
||||||
|
GGML_ASSERT(ctx_params.n_batch % n_grp == 0 && "n_batch must be divisible by n_grp");
|
||||||
|
|
||||||
|
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
||||||
|
|
||||||
|
if (ctx == NULL) {
|
||||||
|
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// tokenize the prompt
|
||||||
|
std::vector<llama_token> tokens_list;
|
||||||
|
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
|
// tokenize the prefix and use it as a sink
|
||||||
|
const int n_tokens_prefix = ::llama_tokenize(ctx, prompt_prefix, true).size();
|
||||||
|
|
||||||
|
const int n_tokens_all = tokens_list.size();
|
||||||
|
|
||||||
|
// we leave a margin of 16 tokens for the generated text - it should contain just the passkey
|
||||||
|
const int n_predict = 16;
|
||||||
|
|
||||||
|
// total length of the sequences including the prompt
|
||||||
|
const int n_len = n_tokens_all + n_predict;
|
||||||
|
|
||||||
|
const int n_ctx = llama_n_ctx(ctx) - n_keep;
|
||||||
|
const int n_kv_req = llama_n_ctx(ctx);
|
||||||
|
const int n_batch = ctx_params.n_batch;
|
||||||
|
const int n_batch_grp = ctx_params.n_batch/n_grp;
|
||||||
|
|
||||||
|
LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_kv_req = %d, n_grp = %d, n_batch = %d\n", __func__, n_len, n_ctx, n_kv_req, n_grp, n_batch);
|
||||||
|
|
||||||
|
// print the prompt token-by-token
|
||||||
|
|
||||||
|
LOG_TEE("\n");
|
||||||
|
LOG_TEE("prefix tokens: %d\n", n_tokens_prefix);
|
||||||
|
LOG_TEE("prompt tokens: %d\n", n_tokens_all);
|
||||||
|
//LOG_TEE("prompt: %s\n", params.prompt.c_str());
|
||||||
|
|
||||||
|
llama_batch batch = llama_batch_init(512, 0, 1);
|
||||||
|
|
||||||
|
int n_past = 0;
|
||||||
|
|
||||||
|
// fill the KV cache
|
||||||
|
for (int i = 0; i < n_ctx; i += n_batch) {
|
||||||
|
if (i > 0 && n_grp > 1) {
|
||||||
|
// if SelfExtend is enabled, we compress the position from the last batch by a factor of n_grp
|
||||||
|
const int ib = i/n_batch - 1;
|
||||||
|
const int bd = n_batch_grp*(n_grp - 1);
|
||||||
|
|
||||||
|
llama_kv_cache_seq_shift(ctx, 0, n_past - n_batch, n_past, ib*bd);
|
||||||
|
llama_kv_cache_seq_div (ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp);
|
||||||
|
|
||||||
|
n_past -= bd;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
|
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
||||||
|
llama_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i + n_batch >= n_tokens_all) {
|
||||||
|
batch.logits[batch.n_tokens - 1] = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (llama_decode(ctx, batch) != 0) {
|
||||||
|
LOG_TEE("%s: llama_decode() failed\n", __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEE("%s: processed: [%6d, %6d)\n", __func__, i, std::min(i + n_batch, n_tokens_all));
|
||||||
|
|
||||||
|
if (i + n_batch >= n_tokens_all) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = n_ctx; i < n_tokens_all; i += n_batch) {
|
||||||
|
const int n_discard = n_batch;
|
||||||
|
|
||||||
|
LOG_TEE("%s: shifting KV cache with %d\n", __func__, n_discard);
|
||||||
|
|
||||||
|
llama_kv_cache_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
|
||||||
|
llama_kv_cache_seq_shift(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
|
||||||
|
|
||||||
|
n_past -= n_discard;
|
||||||
|
|
||||||
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
|
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
||||||
|
llama_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i + n_batch >= n_tokens_all) {
|
||||||
|
batch.logits[batch.n_tokens - 1] = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (llama_decode(ctx, batch) != 0) {
|
||||||
|
LOG_TEE("%s: llama_decode() failed\n", __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEE("%s: processed: [%6d, %6d)\n", __func__, i, std::min(i + n_batch, n_tokens_all));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const int n_discard = n_past - n_ctx + n_predict;
|
||||||
|
|
||||||
|
if (n_discard > 0) {
|
||||||
|
LOG_TEE("%s: shifting KV cache with %d to free space for the answer\n", __func__, n_discard);
|
||||||
|
|
||||||
|
llama_kv_cache_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
|
||||||
|
llama_kv_cache_seq_shift(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
|
||||||
|
|
||||||
|
n_past -= n_discard;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEE("\n");
|
||||||
|
LOG_TEE("%s: passkey = %d, inserted at position %d / %d (token pos: ~%d)\n", __func__, passkey, i_pos, n_junk, (i_pos * n_tokens_all) / n_junk);
|
||||||
|
LOG_TEE("\n");
|
||||||
|
|
||||||
|
// main loop
|
||||||
|
|
||||||
|
int n_cur = n_tokens_all;
|
||||||
|
int n_decode = 0;
|
||||||
|
|
||||||
|
LOG_TEE("%s", prompt_suffix.c_str());
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
const auto t_main_start = ggml_time_us();
|
||||||
|
|
||||||
|
while (n_cur <= n_len) {
|
||||||
|
// sample the next token
|
||||||
|
{
|
||||||
|
auto n_vocab = llama_n_vocab(model);
|
||||||
|
auto * logits = llama_get_logits_ith(ctx, batch.n_tokens - 1);
|
||||||
|
|
||||||
|
std::vector<llama_token_data> candidates;
|
||||||
|
candidates.reserve(n_vocab);
|
||||||
|
|
||||||
|
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
||||||
|
candidates.emplace_back(llama_token_data{ token_id, logits[token_id], 0.0f });
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||||
|
|
||||||
|
// sample the most likely token
|
||||||
|
const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
|
||||||
|
|
||||||
|
// is it an end of stream?
|
||||||
|
if (new_token_id == llama_token_eos(model) || n_cur == n_len) {
|
||||||
|
LOG_TEE("\n");
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEE("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
n_decode += 1;
|
||||||
|
|
||||||
|
// prepare the next batch
|
||||||
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
|
// push this new token for next evaluation
|
||||||
|
llama_batch_add(batch, new_token_id, n_past++, { 0 }, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
n_cur += 1;
|
||||||
|
|
||||||
|
// evaluate the current batch with the transformer model
|
||||||
|
if (llama_decode(ctx, batch)) {
|
||||||
|
fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEE("\n");
|
||||||
|
|
||||||
|
const auto t_main_end = ggml_time_us();
|
||||||
|
|
||||||
|
LOG_TEE("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
|
||||||
|
__func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
|
||||||
|
|
||||||
|
llama_print_timings(ctx);
|
||||||
|
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
|
|
||||||
|
llama_free(ctx);
|
||||||
|
llama_free_model(model);
|
||||||
|
|
||||||
|
llama_backend_free();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -6,7 +6,7 @@ install(TARGETS ${TARGET} RUNTIME)
|
|||||||
target_compile_definitions(${TARGET} PRIVATE
|
target_compile_definitions(${TARGET} PRIVATE
|
||||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||||
)
|
)
|
||||||
target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||||
endif()
|
endif()
|
||||||
|
@ -23,6 +23,7 @@ Command line options:
|
|||||||
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
||||||
- `--port`: Set the port to listen. Default: `8080`.
|
- `--port`: Set the port to listen. Default: `8080`.
|
||||||
- `--path`: path from which to serve static files (default examples/server/public)
|
- `--path`: path from which to serve static files (default examples/server/public)
|
||||||
|
- `--api-key`: Set an api key for request authorization. By default the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token.
|
||||||
- `--embedding`: Enable embedding extraction, Default: disabled.
|
- `--embedding`: Enable embedding extraction, Default: disabled.
|
||||||
- `-np N`, `--parallel N`: Set the number of slots for process requests (default: 1)
|
- `-np N`, `--parallel N`: Set the number of slots for process requests (default: 1)
|
||||||
- `-cb`, `--cont-batching`: enable continuous batching (a.k.a dynamic batching) (default: disabled)
|
- `-cb`, `--cont-batching`: enable continuous batching (a.k.a dynamic batching) (default: disabled)
|
||||||
@ -148,6 +149,8 @@ node index.js
|
|||||||
|
|
||||||
`frequency_penalty`: Repeat alpha frequency penalty (default: 0.0, 0.0 = disabled);
|
`frequency_penalty`: Repeat alpha frequency penalty (default: 0.0, 0.0 = disabled);
|
||||||
|
|
||||||
|
`penalty_prompt`: This will replace the `prompt` for the purpose of the penalty evaluation. Can be either `null`, a string or an array of numbers representing tokens (default: `null` = use the original `prompt`).
|
||||||
|
|
||||||
`mirostat`: Enable Mirostat sampling, controlling perplexity during text generation (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0).
|
`mirostat`: Enable Mirostat sampling, controlling perplexity during text generation (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0).
|
||||||
|
|
||||||
`mirostat_tau`: Set the Mirostat target entropy, parameter tau (default: 5.0).
|
`mirostat_tau`: Set the Mirostat target entropy, parameter tau (default: 5.0).
|
||||||
@ -164,37 +167,7 @@ node index.js
|
|||||||
|
|
||||||
`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token (default: 0)
|
`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token (default: 0)
|
||||||
|
|
||||||
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:` In this case, `[img-12]` will be replaced by the embeddings of the image id 12 in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
|
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
|
||||||
|
|
||||||
*Result JSON:*
|
|
||||||
|
|
||||||
Note: When using streaming mode (`stream`) only `content` and `stop` will be returned until end of completion.
|
|
||||||
|
|
||||||
`content`: Completion result as a string (excluding `stopping_word` if any). In case of streaming mode, will contain the next token as a string.
|
|
||||||
|
|
||||||
`stop`: Boolean for use with `stream` to check whether the generation has stopped (Note: This is not related to stopping words array `stop` from input options)
|
|
||||||
|
|
||||||
`generation_settings`: The provided options above excluding `prompt` but including `n_ctx`, `model`
|
|
||||||
|
|
||||||
`model`: The path to the model loaded with `-m`
|
|
||||||
|
|
||||||
`prompt`: The provided `prompt`
|
|
||||||
|
|
||||||
`stopped_eos`: Indicating whether the completion has stopped because it encountered the EOS token
|
|
||||||
|
|
||||||
`stopped_limit`: Indicating whether the completion stopped because `n_predict` tokens were generated before stop words or EOS was encountered
|
|
||||||
|
|
||||||
`stopped_word`: Indicating whether the completion stopped due to encountering a stopping word from `stop` JSON array provided
|
|
||||||
|
|
||||||
`stopping_word`: The stopping word encountered which stopped the generation (or "" if not stopped due to a stopping word)
|
|
||||||
|
|
||||||
`timings`: Hash of timing information about the completion such as the number of tokens `predicted_per_second`
|
|
||||||
|
|
||||||
`tokens_cached`: Number of tokens from the prompt which could be re-used from previous completion (`n_past`)
|
|
||||||
|
|
||||||
`tokens_evaluated`: Number of tokens evaluated in total from the prompt
|
|
||||||
|
|
||||||
`truncated`: Boolean indicating if the context size was exceeded during generation, i.e. the number of tokens provided in the prompt (`tokens_evaluated`) plus tokens generated (`tokens predicted`) exceeded the context size (`n_ctx`)
|
|
||||||
|
|
||||||
`slot_id`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot (default: -1)
|
`slot_id`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot (default: -1)
|
||||||
|
|
||||||
@ -202,6 +175,45 @@ node index.js
|
|||||||
|
|
||||||
`system_prompt`: Change the system prompt (initial prompt of all slots), this is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
`system_prompt`: Change the system prompt (initial prompt of all slots), this is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
||||||
|
|
||||||
|
### Result JSON:
|
||||||
|
|
||||||
|
* Note: When using streaming mode (`stream`) only `content` and `stop` will be returned until end of completion.
|
||||||
|
|
||||||
|
|
||||||
|
- `completion_probabilities`: An array of token probabilities for each completion. The array's length is `n_predict`. Each item in the array has the following structure:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"content": "<the token selected by the model>",
|
||||||
|
"probs": [
|
||||||
|
{
|
||||||
|
"prob": float,
|
||||||
|
"tok_str": "<most likely token>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prob": float,
|
||||||
|
"tok_str": "<second most likely tonen>"
|
||||||
|
},
|
||||||
|
...
|
||||||
|
]
|
||||||
|
},
|
||||||
|
```
|
||||||
|
Notice that each `probs` is an array of length `n_probs`.
|
||||||
|
|
||||||
|
- `content`: Completion result as a string (excluding `stopping_word` if any). In case of streaming mode, will contain the next token as a string.
|
||||||
|
- `stop`: Boolean for use with `stream` to check whether the generation has stopped (Note: This is not related to stopping words array `stop` from input options)
|
||||||
|
- `generation_settings`: The provided options above excluding `prompt` but including `n_ctx`, `model`
|
||||||
|
- `model`: The path to the model loaded with `-m`
|
||||||
|
- `prompt`: The provided `prompt`
|
||||||
|
- `stopped_eos`: Indicating whether the completion has stopped because it encountered the EOS token
|
||||||
|
- `stopped_limit`: Indicating whether the completion stopped because `n_predict` tokens were generated before stop words or EOS was encountered
|
||||||
|
- `stopped_word`: Indicating whether the completion stopped due to encountering a stopping word from `stop` JSON array provided
|
||||||
|
- `stopping_word`: The stopping word encountered which stopped the generation (or "" if not stopped due to a stopping word)
|
||||||
|
- `timings`: Hash of timing information about the completion such as the number of tokens `predicted_per_second`
|
||||||
|
- `tokens_cached`: Number of tokens from the prompt which could be re-used from previous completion (`n_past`)
|
||||||
|
- `tokens_evaluated`: Number of tokens evaluated in total from the prompt
|
||||||
|
- `truncated`: Boolean indicating if the context size was exceeded during generation, i.e. the number of tokens provided in the prompt (`tokens_evaluated`) plus tokens generated (`tokens predicted`) exceeded the context size (`n_ctx`)
|
||||||
|
|
||||||
- **POST** `/tokenize`: Tokenize a given text.
|
- **POST** `/tokenize`: Tokenize a given text.
|
||||||
|
|
||||||
*Options:*
|
*Options:*
|
||||||
@ -222,6 +234,8 @@ node index.js
|
|||||||
|
|
||||||
`content`: Set the text to process.
|
`content`: Set the text to process.
|
||||||
|
|
||||||
|
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `content`. You can determine the place of the image in the content as in the following: `Image: [img-21].\nCaption: This is a picture of a house`. In this case, `[img-21]` will be replaced by the embeddings of the image with id `21` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 21}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
|
||||||
|
|
||||||
- **POST** `/infill`: For code infilling. Takes a prefix and a suffix and returns the predicted completion as stream.
|
- **POST** `/infill`: For code infilling. Takes a prefix and a suffix and returns the predicted completion as stream.
|
||||||
|
|
||||||
*Options:*
|
*Options:*
|
||||||
|
@ -74,355 +74,376 @@ unsigned char completion_js[] = {
|
|||||||
0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x27, 0x2c, 0x0a, 0x20, 0x20,
|
0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x27, 0x2c, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x27, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x27,
|
0x20, 0x20, 0x20, 0x20, 0x27, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x27,
|
||||||
0x3a, 0x20, 0x27, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e,
|
0x3a, 0x20, 0x27, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e,
|
||||||
0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x27, 0x0a, 0x20, 0x20,
|
0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x27, 0x2c, 0x0a, 0x20,
|
||||||
0x20, 0x20, 0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x69, 0x67,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x28, 0x70, 0x61, 0x72,
|
||||||
0x6e, 0x61, 0x6c, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
0x61, 0x6d, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x20,
|
||||||
0x6c, 0x65, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x0a,
|
0x3f, 0x20, 0x7b, 0x27, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
|
||||||
0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x60, 0x42, 0x65, 0x61,
|
||||||
0x73, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20,
|
0x72, 0x65, 0x72, 0x20, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
||||||
0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x62, 0x6f, 0x64,
|
0x2e, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x60, 0x7d, 0x20,
|
||||||
0x79, 0x2e, 0x67, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28,
|
0x3a, 0x20, 0x7b, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c,
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x64,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x3a,
|
||||||
0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77,
|
0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e,
|
||||||
0x20, 0x54, 0x65, 0x78, 0x74, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72,
|
0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x0a, 0x20, 0x20, 0x7d, 0x29,
|
||||||
0x28, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72,
|
||||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b,
|
0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x70,
|
||||||
0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f,
|
0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x67, 0x65,
|
||||||
0x76, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f,
|
0x74, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
0x20, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64,
|
||||||
0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x72, 0x65,
|
0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x54, 0x65, 0x78,
|
||||||
0x61, 0x64, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x0a, 0x0a, 0x20, 0x20,
|
0x74, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a,
|
||||||
0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65,
|
0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75,
|
0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x6c,
|
||||||
0x65, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c,
|
0x65, 0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20,
|
||||||
0x65, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
0x3d, 0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x42, 0x75, 0x66,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72,
|
0x66, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x61, 0x72, 0x74,
|
||||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69,
|
0x69, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x72, 0x65, 0x61, 0x64, 0x20, 0x6c,
|
||||||
0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61,
|
0x69, 0x6e, 0x65, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20,
|
||||||
0x64, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69,
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f,
|
||||||
0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x6f,
|
0x6e, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x0a,
|
||||||
0x6e, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x63,
|
||||||
0x20, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
0x6f, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
||||||
0x2f, 0x2f, 0x20, 0x41, 0x64, 0x64, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x6c,
|
0x74, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x65,
|
||||||
0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x64, 0x61, 0x74, 0x61,
|
0x61, 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x28, 0x29, 0x3b,
|
||||||
0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72,
|
||||||
0x65, 0x6e, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66,
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x6f, 0x6e, 0x65, 0x29, 0x20,
|
||||||
0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72,
|
||||||
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d,
|
0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x2b, 0x20,
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41,
|
||||||
0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x64, 0x65, 0x63, 0x6f,
|
0x64, 0x64, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f,
|
||||||
0x64, 0x65, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61,
|
0x76, 0x65, 0x72, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x74, 0x6f, 0x20,
|
||||||
0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20,
|
||||||
0x20, 0x2f, 0x2f, 0x20, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x20, 0x69, 0x66,
|
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74,
|
||||||
0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x63, 0x68,
|
0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x20, 0x69, 0x73, 0x20, 0x61,
|
0x74, 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x6c, 0x65, 0x66,
|
||||||
0x20, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x0a,
|
0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x2b, 0x20, 0x64, 0x65, 0x63, 0x6f,
|
||||||
|
0x64, 0x65, 0x72, 0x2e, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x28, 0x72,
|
||||||
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29,
|
||||||
|
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
||||||
|
0x43, 0x68, 0x65, 0x63, 0x6b, 0x20, 0x69, 0x66, 0x20, 0x74, 0x68, 0x65,
|
||||||
|
0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63,
|
||||||
|
0x74, 0x65, 0x72, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x6e,
|
||||||
|
0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x6e, 0x64, 0x73,
|
||||||
|
0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x6e, 0x65, 0x42, 0x72, 0x65, 0x61,
|
||||||
|
0x6b, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x65, 0x6e, 0x64,
|
||||||
|
0x73, 0x57, 0x69, 0x74, 0x68, 0x28, 0x27, 0x5c, 0x6e, 0x27, 0x29, 0x3b,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53,
|
||||||
|
0x70, 0x6c, 0x69, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x65, 0x78,
|
||||||
|
0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6c,
|
||||||
|
0x69, 0x6e, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e,
|
||||||
|
0x73, 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, 0x5c, 0x6e, 0x27, 0x29, 0x3b,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49,
|
||||||
|
0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x64,
|
||||||
|
0x6f, 0x65, 0x73, 0x6e, 0x27, 0x74, 0x20, 0x65, 0x6e, 0x64, 0x20, 0x77,
|
||||||
|
0x69, 0x74, 0x68, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x62,
|
||||||
|
0x72, 0x65, 0x61, 0x6b, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x6e, 0x20, 0x74,
|
||||||
|
0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x6c, 0x69, 0x6e, 0x65,
|
||||||
|
0x20, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
||||||
|
0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
||||||
|
0x53, 0x74, 0x6f, 0x72, 0x65, 0x20, 0x69, 0x74, 0x20, 0x69, 0x6e, 0x20,
|
||||||
|
0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20,
|
||||||
|
0x62, 0x65, 0x20, 0x61, 0x64, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20,
|
||||||
|
0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x63, 0x68, 0x75,
|
||||||
|
0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x65, 0x6e,
|
||||||
|
0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x6e, 0x65, 0x42, 0x72,
|
||||||
|
0x65, 0x61, 0x6b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20,
|
||||||
|
0x3d, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x70, 0x6f, 0x70, 0x28,
|
||||||
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65,
|
||||||
|
0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x3d,
|
||||||
|
0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x73, 0x65,
|
||||||
|
0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x69,
|
||||||
|
0x66, 0x20, 0x77, 0x65, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x20,
|
||||||
|
0x6c, 0x69, 0x6e, 0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x20, 0x61,
|
||||||
|
0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x64, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x2f, 0x2f, 0x20, 0x50, 0x61, 0x72, 0x73, 0x65, 0x20, 0x61, 0x6c,
|
||||||
|
0x6c, 0x20, 0x73, 0x73, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
|
||||||
|
0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x64, 0x64, 0x20, 0x74, 0x68, 0x65,
|
||||||
|
0x6d, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x0a,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
||||||
0x65, 0x6e, 0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x6e, 0x65,
|
0x72, 0x65, 0x67, 0x65, 0x78, 0x20, 0x3d, 0x20, 0x2f, 0x5e, 0x28, 0x5c,
|
||||||
0x42, 0x72, 0x65, 0x61, 0x6b, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x78, 0x74,
|
0x53, 0x2b, 0x29, 0x3a, 0x5c, 0x73, 0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f,
|
||||||
0x2e, 0x65, 0x6e, 0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x28, 0x27, 0x5c,
|
0x67, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f,
|
||||||
0x6e, 0x27, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x72, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x69, 0x6e,
|
||||||
0x2f, 0x2f, 0x20, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x20, 0x74, 0x68, 0x65,
|
0x65, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x29, 0x20,
|
||||||
0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x6c,
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
0x69, 0x6e, 0x65, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c,
|
0x6e, 0x73, 0x74, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x20, 0x3d, 0x20,
|
||||||
0x65, 0x74, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x74,
|
0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x28, 0x6c,
|
||||||
0x65, 0x78, 0x74, 0x2e, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, 0x5c,
|
0x69, 0x6e, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6e, 0x27, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x29,
|
||||||
0x2f, 0x2f, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x65,
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x78, 0x74, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x6e, 0x27, 0x74, 0x20, 0x65,
|
0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6d, 0x61, 0x74, 0x63,
|
||||||
0x6e, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x61, 0x20, 0x6c, 0x69,
|
0x68, 0x5b, 0x31, 0x5d, 0x5d, 0x20, 0x3d, 0x20, 0x6d, 0x61, 0x74, 0x63,
|
||||||
0x6e, 0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x2c, 0x20, 0x74, 0x68,
|
0x68, 0x5b, 0x32, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x65, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20,
|
0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x20,
|
||||||
0x6c, 0x69, 0x6e, 0x65, 0x20, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6f,
|
0x77, 0x65, 0x20, 0x6b, 0x6e, 0x6f, 0x77, 0x20, 0x74, 0x68, 0x69, 0x73,
|
||||||
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x69, 0x73, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70,
|
||||||
0x20, 0x2f, 0x2f, 0x20, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x20, 0x69, 0x74,
|
0x70, 0x2c, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, 0x20, 0x6a, 0x75, 0x73,
|
||||||
0x20, 0x69, 0x6e, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72,
|
0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65,
|
||||||
0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x61, 0x64, 0x64, 0x65, 0x64,
|
0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x64, 0x61, 0x74,
|
||||||
0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74,
|
0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61,
|
0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64,
|
||||||
0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x28, 0x21, 0x65, 0x6e, 0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4c, 0x69,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
||||||
0x6e, 0x65, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x4a, 0x53, 0x4f,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f,
|
0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65, 0x73, 0x75,
|
||||||
0x76, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e,
|
0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
0x70, 0x6f, 0x70, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x72, 0x65, 0x73,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76,
|
0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e,
|
||||||
0x65, 0x72, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f, 0x20,
|
0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x52, 0x65, 0x73, 0x65, 0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x79, 0x69,
|
||||||
0x65, 0x72, 0x20, 0x69, 0x66, 0x20, 0x77, 0x65, 0x20, 0x68, 0x61, 0x76,
|
0x65, 0x6c, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x65, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x62, 0x72, 0x65,
|
0x20, 0x20, 0x20, 0x20, 0x79, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x72, 0x65,
|
||||||
0x61, 0x6b, 0x20, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e,
|
0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69, 0x66,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x50, 0x61, 0x72, 0x73,
|
0x20, 0x77, 0x65, 0x20, 0x67, 0x6f, 0x74, 0x20, 0x61, 0x20, 0x73, 0x74,
|
||||||
0x65, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x73, 0x73, 0x65, 0x20, 0x65, 0x76,
|
0x6f, 0x70, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x66, 0x72, 0x6f,
|
||||||
0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x64, 0x64,
|
0x6d, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2c, 0x20, 0x77, 0x65,
|
||||||
0x20, 0x74, 0x68, 0x65, 0x6d, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x73,
|
0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x20,
|
||||||
0x75, 0x6c, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
0x68, 0x65, 0x72, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x67, 0x65, 0x78, 0x20, 0x3d, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73,
|
||||||
0x2f, 0x5e, 0x28, 0x5c, 0x53, 0x2b, 0x29, 0x3a, 0x5c, 0x73, 0x28, 0x2e,
|
0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x74, 0x6f,
|
||||||
0x2a, 0x29, 0x24, 0x2f, 0x67, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x70, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72,
|
||||||
0x20, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x69, 0x6e,
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67,
|
||||||
0x65, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
||||||
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x61, 0x74, 0x63,
|
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x68, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x65, 0x78,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x65, 0x63, 0x28, 0x6c, 0x69, 0x6e, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
0x20, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x61,
|
0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20,
|
||||||
0x74, 0x63, 0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b,
|
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
||||||
0x6d, 0x61, 0x74, 0x63, 0x68, 0x5b, 0x31, 0x5d, 0x5d, 0x20, 0x3d, 0x20,
|
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x6d, 0x61, 0x74, 0x63, 0x68, 0x5b, 0x32, 0x5d, 0x0a, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6e, 0x63, 0x65, 0x20, 0x77, 0x65, 0x20, 0x6b, 0x6e, 0x6f, 0x77, 0x20,
|
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x61,
|
||||||
0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x61, 0x2e, 0x63, 0x70, 0x70, 0x2c, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b,
|
||||||
0x20, 0x6a, 0x75, 0x73, 0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65,
|
|
||||||
0x20, 0x74, 0x68, 0x65, 0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x20, 0x69, 0x6e,
|
|
||||||
0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75,
|
|
||||||
0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72,
|
|
||||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d,
|
|
||||||
0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28,
|
|
||||||
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29,
|
|
||||||
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d,
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61,
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x20,
|
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f,
|
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x2f, 0x20, 0x79, 0x69, 0x65, 0x6c, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x79, 0x69, 0x65, 0x6c,
|
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x3d,
|
||||||
0x64, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x0a, 0x20,
|
0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f,
|
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
||||||
0x2f, 0x20, 0x69, 0x66, 0x20, 0x77, 0x65, 0x20, 0x67, 0x6f, 0x74, 0x20,
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x61, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
|
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x65,
|
||||||
0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
|
0x72, 0x72, 0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e,
|
||||||
0x2c, 0x20, 0x77, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x72,
|
0x63, 0x70, 0x70, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x24,
|
||||||
0x65, 0x61, 0x6b, 0x20, 0x68, 0x65, 0x72, 0x65, 0x0a, 0x20, 0x20, 0x20,
|
0x7b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
0x72, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x7d, 0x60, 0x29,
|
||||||
0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61,
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61,
|
0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20,
|
||||||
|
0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66,
|
||||||
|
0x20, 0x28, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x21, 0x3d, 0x3d,
|
||||||
|
0x20, 0x27, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72,
|
||||||
|
0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
||||||
|
0x28, 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x20, 0x65, 0x72, 0x72, 0x6f,
|
||||||
|
0x72, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f,
|
||||||
|
0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x66,
|
||||||
|
0x69, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e,
|
||||||
|
0x61, 0x62, 0x6f, 0x72, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f,
|
||||||
|
0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x6e, 0x20,
|
||||||
|
0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
|
||||||
|
0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x63, 0x61,
|
||||||
|
0x6e, 0x20, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x20,
|
||||||
|
0x74, 0x6f, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61,
|
||||||
|
0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20,
|
||||||
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
||||||
|
0x72, 0x67, 0x65, 0x74, 0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20,
|
||||||
|
0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x2e, 0x6a, 0x73, 0x27, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x6e,
|
||||||
|
0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e,
|
||||||
|
0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28, 0x70, 0x72, 0x6f, 0x6d,
|
||||||
|
0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
|
0x6e, 0x6e, 0x2e, 0x61, 0x64, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c,
|
||||||
|
0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28, 0x22, 0x6d, 0x65, 0x73,
|
||||||
|
0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e,
|
||||||
|
0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
|
||||||
|
0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
|
0x2e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45,
|
||||||
|
0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x3d,
|
||||||
|
0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61,
|
||||||
|
0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20,
|
||||||
|
0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||||
|
0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
|
||||||
|
0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x28, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e,
|
||||||
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61,
|
||||||
|
0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
||||||
|
0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72,
|
||||||
|
0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29,
|
||||||
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66,
|
||||||
|
0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61,
|
||||||
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63,
|
||||||
|
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f,
|
||||||
|
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67,
|
||||||
|
0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45,
|
||||||
|
0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73,
|
||||||
|
0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x6d, 0x65,
|
||||||
|
0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65,
|
||||||
|
0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e,
|
||||||
|
0x64, 0x61, 0x74, 0x61, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61,
|
||||||
0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
|
0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
|
||||||
0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20,
|
0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20,
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
|
0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69,
|
||||||
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
|
0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28,
|
||||||
0x73, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64,
|
0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76,
|
||||||
0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
|
0x65, 0x6e, 0x74, 0x28, 0x22, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
|
||||||
0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b,
|
0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a,
|
||||||
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x20,
|
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
||||||
0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
|
||||||
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x20,
|
0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65,
|
||||||
0x63, 0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a,
|
0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x2e, 0x6e, 0x61,
|
0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x74, 0x69, 0x6d, 0x69,
|
||||||
0x6d, 0x65, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x41, 0x62, 0x6f, 0x72,
|
0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61,
|
||||||
0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65,
|
0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x7d,
|
||||||
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x6c, 0x6c, 0x61, 0x6d,
|
0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
0x61, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x22, 0x2c, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76,
|
||||||
0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69,
|
||||||
0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20,
|
0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28,
|
||||||
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x6c, 0x79,
|
0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76,
|
||||||
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
0x65, 0x6e, 0x74, 0x28, 0x22, 0x64, 0x6f, 0x6e, 0x65, 0x22, 0x2c, 0x20,
|
||||||
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28,
|
0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x7b, 0x20,
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65,
|
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x7d, 0x20, 0x7d, 0x29,
|
||||||
0x74, 0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x28, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c,
|
0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e,
|
||||||
0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75,
|
0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a,
|
||||||
0x72, 0x6e, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20,
|
0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
||||||
0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20,
|
0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20,
|
||||||
0x79, 0x6f, 0x75, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x75, 0x62, 0x63,
|
0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74,
|
||||||
0x72, 0x69, 0x62, 0x65, 0x20, 0x74, 0x6f, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f,
|
0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x73, 0x20, 0x74, 0x6f,
|
||||||
0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a, 0x2f,
|
0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
|
||||||
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f,
|
0x65, 0x64, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x69,
|
||||||
0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76,
|
0x73, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73,
|
||||||
0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x7d, 0x20,
|
0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61,
|
||||||
0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c,
|
0x6d, 0x69, 0x6e, 0x67, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45,
|
||||||
0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27, 0x0a, 0x2f, 0x2f,
|
0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f,
|
||||||
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50,
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
||||||
0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
|
0x74, 0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28, 0x28, 0x63, 0x6f, 0x6e,
|
||||||
0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20,
|
0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x2f,
|
||||||
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x61, 0x64, 0x64, 0x45,
|
0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75,
|
||||||
0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72,
|
0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63,
|
||||||
0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20,
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20,
|
||||||
0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b,
|
0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20,
|
||||||
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63,
|
0x20, 0x20, 0x20, 0x20, 0x6f, 0x72, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f,
|
||||||
0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63,
|
||||||
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61,
|
||||||
0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f,
|
0x69, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78,
|
0x69, 0x73, 0x65, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a,
|
||||||
|
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d,
|
||||||
|
0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x6f,
|
||||||
|
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78,
|
||||||
0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c,
|
0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c,
|
||||||
0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72,
|
0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20,
|
||||||
0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70,
|
||||||
0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20,
|
0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x2c, 0x20,
|
||||||
0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d,
|
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x29,
|
||||||
0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
|
||||||
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54,
|
0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x50, 0x72, 0x6f, 0x6d, 0x69,
|
||||||
0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20,
|
0x73, 0x65, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x72, 0x65,
|
||||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28,
|
0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2c, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63,
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20,
|
0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
||||||
0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f,
|
0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72,
|
||||||
|
0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f,
|
||||||
0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e,
|
0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e,
|
||||||
0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20,
|
0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20,
|
||||||
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
||||||
0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f,
|
0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f,
|
||||||
0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
|
||||||
0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61,
|
0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61,
|
||||||
0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a,
|
0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70,
|
0x20, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x28, 0x63, 0x6f,
|
||||||
0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65,
|
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e,
|
0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x72, 0x72,
|
||||||
0x74, 0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c,
|
0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63,
|
0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
||||||
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x7d, 0x29,
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d,
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20,
|
0x29, 0x3b, 0x0a, 0x7d, 0x3b, 0x0a, 0x0a, 0x2f, 0x2a, 0x2a, 0x0a, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75,
|
0x2a, 0x20, 0x28, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
|
||||||
0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65,
|
0x64, 0x29, 0x0a, 0x20, 0x2a, 0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72,
|
||||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69,
|
|
||||||
0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67,
|
|
||||||
0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45,
|
|
||||||
0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73,
|
|
||||||
0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x67, 0x65,
|
|
||||||
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74,
|
|
||||||
0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65,
|
|
||||||
0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e,
|
|
||||||
0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
|
|
||||||
0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
|
|
||||||
0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28,
|
|
||||||
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74,
|
|
||||||
0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54,
|
|
||||||
0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74,
|
|
||||||
0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20,
|
|
||||||
0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28,
|
|
||||||
0x22, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b,
|
|
||||||
0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75,
|
|
||||||
0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69,
|
|
||||||
0x6e, 0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67,
|
|
||||||
0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45,
|
|
||||||
0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73,
|
|
||||||
0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x64, 0x6f,
|
|
||||||
0x6e, 0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69,
|
|
||||||
0x6c, 0x3a, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
|
||||||
0x20, 0x7d, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x29,
|
|
||||||
0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e,
|
|
||||||
0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
|
|
||||||
0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c,
|
|
||||||
0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75,
|
|
||||||
0x72, 0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65,
|
|
||||||
0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76,
|
|
||||||
0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f,
|
|
||||||
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x74, 0x65, 0x78, 0x74,
|
|
||||||
0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20,
|
|
||||||
0x6e, 0x6f, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20,
|
|
||||||
0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x0a, 0x2f, 0x2f,
|
|
||||||
0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a,
|
|
||||||
0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c,
|
|
||||||
0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28,
|
|
||||||
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e,
|
|
||||||
0x28, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d,
|
|
||||||
0x3e, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72,
|
|
||||||
0x69, 0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
|
||||||
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f,
|
|
||||||
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x72, 0x0a,
|
|
||||||
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
|
||||||
0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
|
||||||
0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
|
||||||
0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70, 0x72, 0x6f,
|
|
||||||
0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69,
|
|
||||||
0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a,
|
|
||||||
0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f,
|
|
||||||
0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f,
|
|
||||||
0x6d, 0x69, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d,
|
|
||||||
0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d,
|
|
||||||
0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20,
|
|
||||||
0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20,
|
|
||||||
0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77, 0x20,
|
|
||||||
0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x61, 0x73, 0x79, 0x6e,
|
|
||||||
0x63, 0x20, 0x28, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2c, 0x20,
|
|
||||||
0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b,
|
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e,
|
|
||||||
0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74,
|
|
||||||
0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
|
||||||
0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70,
|
|
||||||
0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d,
|
|
||||||
0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20,
|
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
|
||||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75,
|
|
||||||
0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
|
|
||||||
0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c,
|
|
||||||
0x76, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x3b,
|
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68,
|
|
||||||
0x20, 0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x28,
|
|
||||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x7d, 0x3b, 0x0a, 0x0a,
|
|
||||||
0x2f, 0x2a, 0x2a, 0x0a, 0x20, 0x2a, 0x20, 0x28, 0x64, 0x65, 0x70, 0x72,
|
|
||||||
0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x2a, 0x2f, 0x0a,
|
|
||||||
0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
|
||||||
0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
|
||||||
0x74, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28,
|
|
||||||
0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
|
||||||
0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2c, 0x20, 0x63, 0x61, 0x6c, 0x6c,
|
|
||||||
0x62, 0x61, 0x63, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20,
|
|
||||||
0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28,
|
|
||||||
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20,
|
|
||||||
0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x61, 0x72,
|
|
||||||
0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20,
|
|
||||||
0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, 0x6f,
|
|
||||||
0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x7d, 0x29, 0x29,
|
|
||||||
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62,
|
|
||||||
0x61, 0x63, 0x6b, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x3b, 0x0a,
|
|
||||||
0x20, 0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x47, 0x65,
|
|
||||||
0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20,
|
|
||||||
0x69, 0x6e, 0x66, 0x6f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x68,
|
|
||||||
0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x20, 0x54, 0x68,
|
|
||||||
0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75, 0x6c,
|
|
||||||
0x20, 0x66, 0x6f, 0x72, 0x20, 0x67, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
|
|
||||||
0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74,
|
|
||||||
0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x61, 0x6e, 0x64, 0x20,
|
|
||||||
0x73, 0x6f, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72,
|
|
||||||
0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
||||||
0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d,
|
0x61, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x3d, 0x20,
|
||||||
0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e,
|
0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d,
|
||||||
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x67, 0x65,
|
0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
|
||||||
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74,
|
0x72, 0x2c, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x29,
|
||||||
0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||||
0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||||
0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x61,
|
0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c,
|
||||||
0x77, 0x61, 0x69, 0x74, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x22,
|
0x61, 0x6d, 0x61, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70,
|
||||||
0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x22,
|
0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d,
|
||||||
0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28, 0x72, 0x20, 0x3d, 0x3e, 0x20,
|
0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
||||||
0x72, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x28, 0x29, 0x29, 0x3b, 0x0a, 0x20,
|
0x6c, 0x65, 0x72, 0x20, 0x7d, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
|
0x20, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x28, 0x63,
|
||||||
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
0x68, 0x75, 0x6e, 0x6b, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x7d,
|
||||||
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x47, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65,
|
||||||
|
0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x20,
|
||||||
|
0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72,
|
||||||
|
0x76, 0x65, 0x72, 0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73,
|
||||||
|
0x20, 0x75, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||||
|
0x67, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20, 0x77, 0x69, 0x6e, 0x64,
|
||||||
|
0x6f, 0x77, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x6f, 0x20, 0x6f, 0x6e,
|
||||||
|
0x2e, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x4d, 0x6f, 0x64, 0x65,
|
||||||
|
0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e,
|
||||||
|
0x63, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x69, 0x66, 0x20, 0x28, 0x21, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
|
||||||
|
0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
|
||||||
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x67, 0x65, 0x6e, 0x65,
|
||||||
|
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69,
|
||||||
|
0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
||||||
|
0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x22, 0x2f, 0x6d, 0x6f, 0x64, 0x65,
|
||||||
|
0x6c, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0x29, 0x2e, 0x74, 0x68, 0x65,
|
||||||
|
0x6e, 0x28, 0x72, 0x20, 0x3d, 0x3e, 0x20, 0x72, 0x2e, 0x6a, 0x73, 0x6f,
|
||||||
|
0x6e, 0x28, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
||||||
|
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
||||||
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
||||||
|
0x67, 0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
||||||
};
|
};
|
||||||
unsigned int completion_js_len = 5099;
|
unsigned int completion_js_len = 5346;
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -95,6 +95,15 @@ export async function* llama(prompt, params = {}, config = {}) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (result.error) {
|
||||||
|
result.error = JSON.parse(result.error);
|
||||||
|
if (result.error.content.includes('slot unavailable')) {
|
||||||
|
// Throw an error to be caught by upstream callers
|
||||||
|
throw new Error('slot unavailable');
|
||||||
|
} else {
|
||||||
|
console.error(`llama.cpp error: ${result.error.content}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
if (result.error) {
|
if (result.error) {
|
||||||
result.error = JSON.parse(result.error);
|
result.error = JSON.parse(result.error);
|
||||||
console.error(`llama.cpp error: ${result.error.content}`);
|
console.error(`llama.cpp error: ${result.error.content}`);
|
||||||
|
@ -427,7 +427,7 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (data.timings) {
|
if (data.timings) {
|
||||||
llamaStats.value = data.timings;
|
llamaStats.value = data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -880,7 +880,7 @@
|
|||||||
}
|
}
|
||||||
return html`
|
return html`
|
||||||
<span>
|
<span>
|
||||||
${llamaStats.value.predicted_per_token_ms.toFixed()}ms per token, ${llamaStats.value.predicted_per_second.toFixed(2)} tokens per second
|
${llamaStats.value.tokens_predicted} predicted, ${llamaStats.value.tokens_cached} cached, ${llamaStats.value.timings.predicted_per_token_ms.toFixed()}ms per token, ${llamaStats.value.timings.predicted_per_second.toFixed(2)} tokens per second
|
||||||
</span>
|
</span>
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include <thread>
|
#include <thread>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
#include <condition_variable>
|
||||||
|
|
||||||
#ifndef SERVER_VERBOSE
|
#ifndef SERVER_VERBOSE
|
||||||
#define SERVER_VERBOSE 1
|
#define SERVER_VERBOSE 1
|
||||||
@ -81,7 +82,7 @@ static inline bool is_base64(uint8_t c)
|
|||||||
return (isalnum(c) || (c == '+') || (c == '/'));
|
return (isalnum(c) || (c == '+') || (c == '/'));
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::vector<uint8_t> base64_decode(std::string const &encoded_string)
|
static std::vector<uint8_t> base64_decode(const std::string & encoded_string)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
int j = 0;
|
int j = 0;
|
||||||
@ -211,7 +212,7 @@ struct slot_image
|
|||||||
float * image_embedding = nullptr;
|
float * image_embedding = nullptr;
|
||||||
int32_t image_tokens = 0;
|
int32_t image_tokens = 0;
|
||||||
|
|
||||||
clip_image_u8 img_data;
|
clip_image_u8 * img_data;
|
||||||
|
|
||||||
std::string prefix_prompt; // before of this image
|
std::string prefix_prompt; // before of this image
|
||||||
};
|
};
|
||||||
@ -436,16 +437,23 @@ struct llama_client_slot
|
|||||||
for (slot_image & img : images)
|
for (slot_image & img : images)
|
||||||
{
|
{
|
||||||
free(img.image_embedding);
|
free(img.image_embedding);
|
||||||
delete[] img.img_data.data;
|
if (img.img_data) {
|
||||||
|
clip_image_u8_free(img.img_data);
|
||||||
|
}
|
||||||
img.prefix_prompt = "";
|
img.prefix_prompt = "";
|
||||||
}
|
}
|
||||||
|
|
||||||
images.clear();
|
images.clear();
|
||||||
// llama_set_rng_seed(ctx, params.seed); in batched the seed matter???????
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool has_budget(gpt_params &global_params) {
|
bool has_budget(gpt_params &global_params) {
|
||||||
|
if (params.n_predict == -1 && global_params.n_predict == -1)
|
||||||
|
{
|
||||||
|
return true; // limitless
|
||||||
|
}
|
||||||
|
|
||||||
n_remaining = -1;
|
n_remaining = -1;
|
||||||
|
|
||||||
if (params.n_predict != -1)
|
if (params.n_predict != -1)
|
||||||
{
|
{
|
||||||
n_remaining = params.n_predict - n_decoded;
|
n_remaining = params.n_predict - n_decoded;
|
||||||
@ -454,7 +462,8 @@ struct llama_client_slot
|
|||||||
{
|
{
|
||||||
n_remaining = global_params.n_predict - n_decoded;
|
n_remaining = global_params.n_predict - n_decoded;
|
||||||
}
|
}
|
||||||
return n_remaining > 0 || n_remaining == -1; // no budget || limitless
|
|
||||||
|
return n_remaining > 0; // no budget
|
||||||
}
|
}
|
||||||
|
|
||||||
bool available() const {
|
bool available() const {
|
||||||
@ -542,7 +551,9 @@ struct llama_server_context
|
|||||||
std::vector<task_result> queue_results;
|
std::vector<task_result> queue_results;
|
||||||
std::vector<task_multi> queue_multitasks;
|
std::vector<task_multi> queue_multitasks;
|
||||||
std::mutex mutex_tasks; // also guards id_gen, and queue_multitasks
|
std::mutex mutex_tasks; // also guards id_gen, and queue_multitasks
|
||||||
|
std::condition_variable condition_tasks;
|
||||||
std::mutex mutex_results;
|
std::mutex mutex_results;
|
||||||
|
std::condition_variable condition_results;
|
||||||
|
|
||||||
~llama_server_context()
|
~llama_server_context()
|
||||||
{
|
{
|
||||||
@ -761,6 +772,42 @@ struct llama_server_context
|
|||||||
slot->prompt = "";
|
slot->prompt = "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
slot->sparams.penalty_prompt_tokens.clear();
|
||||||
|
slot->sparams.use_penalty_prompt_tokens = false;
|
||||||
|
const auto &penalty_prompt = data.find("penalty_prompt");
|
||||||
|
if (penalty_prompt != data.end())
|
||||||
|
{
|
||||||
|
if (penalty_prompt->is_string())
|
||||||
|
{
|
||||||
|
const auto penalty_prompt_string = penalty_prompt->get<std::string>();
|
||||||
|
auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false);
|
||||||
|
slot->sparams.penalty_prompt_tokens.swap(penalty_tokens);
|
||||||
|
if (slot->params.n_predict > 0)
|
||||||
|
{
|
||||||
|
slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict);
|
||||||
|
}
|
||||||
|
slot->sparams.use_penalty_prompt_tokens = true;
|
||||||
|
}
|
||||||
|
else if (penalty_prompt->is_array())
|
||||||
|
{
|
||||||
|
const auto n_tokens = penalty_prompt->size();
|
||||||
|
slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict));
|
||||||
|
const int n_vocab = llama_n_vocab(model);
|
||||||
|
for (const auto &penalty_token : *penalty_prompt)
|
||||||
|
{
|
||||||
|
if (penalty_token.is_number_integer())
|
||||||
|
{
|
||||||
|
const auto tok = penalty_token.get<llama_token>();
|
||||||
|
if (tok >= 0 && tok < n_vocab)
|
||||||
|
{
|
||||||
|
slot->sparams.penalty_prompt_tokens.push_back(tok);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slot->sparams.use_penalty_prompt_tokens = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
slot->sparams.logit_bias.clear();
|
slot->sparams.logit_bias.clear();
|
||||||
|
|
||||||
if (json_value(data, "ignore_eos", false))
|
if (json_value(data, "ignore_eos", false))
|
||||||
@ -813,24 +860,17 @@ struct llama_server_context
|
|||||||
{
|
{
|
||||||
for (const auto &img : *images_data)
|
for (const auto &img : *images_data)
|
||||||
{
|
{
|
||||||
std::string data_b64 = img["data"].get<std::string>();
|
const std::vector<uint8_t> image_buffer = base64_decode(img["data"].get<std::string>());
|
||||||
|
|
||||||
slot_image img_sl;
|
slot_image img_sl;
|
||||||
img_sl.id = img.count("id") != 0 ? img["id"].get<int>() : slot->images.size();
|
img_sl.id = img.count("id") != 0 ? img["id"].get<int>() : slot->images.size();
|
||||||
int width, height, channels;
|
img_sl.img_data = clip_image_u8_init();
|
||||||
std::vector<uint8_t> image_buffer = base64_decode(data_b64);
|
if (!clip_image_load_from_bytes(image_buffer.data(), image_buffer.size(), img_sl.img_data))
|
||||||
data_b64.clear();
|
{
|
||||||
auto data = stbi_load_from_memory(image_buffer.data(), image_buffer.size(), &width, &height, &channels, 3);
|
|
||||||
if (!data) {
|
|
||||||
LOG_TEE("slot %i - failed to load image [id: %i]\n", slot->id, img_sl.id);
|
LOG_TEE("slot %i - failed to load image [id: %i]\n", slot->id, img_sl.id);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
LOG_TEE("slot %i - image loaded [id: %i] resolution (%i x %i)\n", slot->id, img_sl.id, width, height);
|
LOG_TEE("slot %i - loaded image\n", slot->id);
|
||||||
img_sl.img_data.nx = width;
|
|
||||||
img_sl.img_data.ny = height;
|
|
||||||
img_sl.img_data.size = width * height * 3;
|
|
||||||
img_sl.img_data.data = new uint8_t[width * height * 3]();
|
|
||||||
memcpy(img_sl.img_data.data, data, width * height * 3);
|
|
||||||
stbi_image_free(data);
|
|
||||||
img_sl.request_encode_image = true;
|
img_sl.request_encode_image = true;
|
||||||
slot->images.push_back(img_sl);
|
slot->images.push_back(img_sl);
|
||||||
}
|
}
|
||||||
@ -885,6 +925,7 @@ struct llama_server_context
|
|||||||
llama_sampling_free(slot->ctx_sampling);
|
llama_sampling_free(slot->ctx_sampling);
|
||||||
}
|
}
|
||||||
slot->ctx_sampling = llama_sampling_init(slot->sparams);
|
slot->ctx_sampling = llama_sampling_init(slot->sparams);
|
||||||
|
llama_set_rng_seed(ctx, slot->params.seed);
|
||||||
slot->command = LOAD_PROMPT;
|
slot->command = LOAD_PROMPT;
|
||||||
|
|
||||||
all_slots_are_idle = false;
|
all_slots_are_idle = false;
|
||||||
@ -992,6 +1033,12 @@ struct llama_server_context
|
|||||||
slot.generated_text += token_str;
|
slot.generated_text += token_str;
|
||||||
slot.has_next_token = true;
|
slot.has_next_token = true;
|
||||||
|
|
||||||
|
if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1)
|
||||||
|
{
|
||||||
|
// we can change penalty_prompt_tokens because it is always created from scratch each request
|
||||||
|
slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
|
||||||
|
}
|
||||||
|
|
||||||
// check if there is incomplete UTF-8 character at the end
|
// check if there is incomplete UTF-8 character at the end
|
||||||
bool incomplete = false;
|
bool incomplete = false;
|
||||||
for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i)
|
for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i)
|
||||||
@ -1062,7 +1109,7 @@ struct llama_server_context
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check the limits
|
// check the limits
|
||||||
if (slot.n_decoded > 2 && slot.has_next_token && !slot.has_budget(params))
|
if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params))
|
||||||
{
|
{
|
||||||
slot.stopped_limit = true;
|
slot.stopped_limit = true;
|
||||||
slot.has_next_token = false;
|
slot.has_next_token = false;
|
||||||
@ -1098,8 +1145,8 @@ struct llama_server_context
|
|||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
clip_image_f32 img_res;
|
clip_image_f32 * img_res = clip_image_f32_init();
|
||||||
if (!clip_image_preprocess(clp_ctx, &img.img_data, &img_res, /*pad2square =*/ true))
|
if (!clip_image_preprocess(clp_ctx, img.img_data, img_res, /*pad2square =*/ true))
|
||||||
{
|
{
|
||||||
LOG_TEE("Error processing the given image");
|
LOG_TEE("Error processing the given image");
|
||||||
clip_free(clp_ctx);
|
clip_free(clp_ctx);
|
||||||
@ -1114,11 +1161,12 @@ struct llama_server_context
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
LOG_TEE("slot %i - encoding image [id: %i]\n", slot.id, img.id);
|
LOG_TEE("slot %i - encoding image [id: %i]\n", slot.id, img.id);
|
||||||
if (!clip_image_encode(clp_ctx, params.n_threads, &img_res, img.image_embedding))
|
if (!clip_image_encode(clp_ctx, params.n_threads, img_res, img.image_embedding))
|
||||||
{
|
{
|
||||||
LOG_TEE("Unable to encode image\n");
|
LOG_TEE("Unable to encode image\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
clip_image_f32_free(img_res);
|
||||||
img.request_encode_image = false;
|
img.request_encode_image = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1127,7 +1175,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
void send_error(task_server& task, std::string error)
|
void send_error(task_server& task, std::string error)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_results);
|
std::unique_lock<std::mutex> lock(mutex_results);
|
||||||
task_result res;
|
task_result res;
|
||||||
res.id = task.id;
|
res.id = task.id;
|
||||||
res.multitask_id = task.multitask_id;
|
res.multitask_id = task.multitask_id;
|
||||||
@ -1135,6 +1183,7 @@ struct llama_server_context
|
|||||||
res.error = true;
|
res.error = true;
|
||||||
res.result_json = { { "content", error } };
|
res.result_json = { { "content", error } };
|
||||||
queue_results.push_back(res);
|
queue_results.push_back(res);
|
||||||
|
condition_results.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_multi_task(int id, std::vector<int>& sub_ids)
|
void add_multi_task(int id, std::vector<int>& sub_ids)
|
||||||
@ -1144,6 +1193,7 @@ struct llama_server_context
|
|||||||
multi.id = id;
|
multi.id = id;
|
||||||
std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end()));
|
std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end()));
|
||||||
queue_multitasks.push_back(multi);
|
queue_multitasks.push_back(multi);
|
||||||
|
condition_tasks.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_multi_task(int multitask_id, int subtask_id, task_result& result)
|
void update_multi_task(int multitask_id, int subtask_id, task_result& result)
|
||||||
@ -1155,6 +1205,7 @@ struct llama_server_context
|
|||||||
{
|
{
|
||||||
multitask.subtasks_remaining.erase(subtask_id);
|
multitask.subtasks_remaining.erase(subtask_id);
|
||||||
multitask.results.push_back(result);
|
multitask.results.push_back(result);
|
||||||
|
condition_tasks.notify_one();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1173,7 +1224,7 @@ struct llama_server_context
|
|||||||
{"n_ctx", slot.n_ctx},
|
{"n_ctx", slot.n_ctx},
|
||||||
{"model", params.model_alias},
|
{"model", params.model_alias},
|
||||||
{"seed", slot.params.seed},
|
{"seed", slot.params.seed},
|
||||||
{"temp", slot.sparams.temp},
|
{"temperature", slot.sparams.temp},
|
||||||
{"top_k", slot.sparams.top_k},
|
{"top_k", slot.sparams.top_k},
|
||||||
{"top_p", slot.sparams.top_p},
|
{"top_p", slot.sparams.top_p},
|
||||||
{"min_p", slot.sparams.min_p},
|
{"min_p", slot.sparams.min_p},
|
||||||
@ -1183,6 +1234,8 @@ struct llama_server_context
|
|||||||
{"repeat_penalty", slot.sparams.penalty_repeat},
|
{"repeat_penalty", slot.sparams.penalty_repeat},
|
||||||
{"presence_penalty", slot.sparams.penalty_present},
|
{"presence_penalty", slot.sparams.penalty_present},
|
||||||
{"frequency_penalty", slot.sparams.penalty_freq},
|
{"frequency_penalty", slot.sparams.penalty_freq},
|
||||||
|
{"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
|
||||||
|
{"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
|
||||||
{"mirostat", slot.sparams.mirostat},
|
{"mirostat", slot.sparams.mirostat},
|
||||||
{"mirostat_tau", slot.sparams.mirostat_tau},
|
{"mirostat_tau", slot.sparams.mirostat_tau},
|
||||||
{"mirostat_eta", slot.sparams.mirostat_eta},
|
{"mirostat_eta", slot.sparams.mirostat_eta},
|
||||||
@ -1200,7 +1253,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
void send_partial_response(llama_client_slot &slot, completion_token_output tkn)
|
void send_partial_response(llama_client_slot &slot, completion_token_output tkn)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_results);
|
std::unique_lock<std::mutex> lock(mutex_results);
|
||||||
task_result res;
|
task_result res;
|
||||||
res.id = slot.task_id;
|
res.id = slot.task_id;
|
||||||
res.multitask_id = slot.multitask_id;
|
res.multitask_id = slot.multitask_id;
|
||||||
@ -1236,11 +1289,12 @@ struct llama_server_context
|
|||||||
}
|
}
|
||||||
|
|
||||||
queue_results.push_back(res);
|
queue_results.push_back(res);
|
||||||
|
condition_results.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
void send_final_response(llama_client_slot &slot)
|
void send_final_response(llama_client_slot &slot)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_results);
|
std::unique_lock<std::mutex> lock(mutex_results);
|
||||||
task_result res;
|
task_result res;
|
||||||
res.id = slot.task_id;
|
res.id = slot.task_id;
|
||||||
res.multitask_id = slot.multitask_id;
|
res.multitask_id = slot.multitask_id;
|
||||||
@ -1278,7 +1332,7 @@ struct llama_server_context
|
|||||||
{
|
{
|
||||||
probs = std::vector<completion_token_output>(
|
probs = std::vector<completion_token_output>(
|
||||||
slot.generated_token_probs.begin(),
|
slot.generated_token_probs.begin(),
|
||||||
slot.generated_token_probs.begin() + slot.sent_token_probs_index);
|
slot.generated_token_probs.end());
|
||||||
}
|
}
|
||||||
res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
|
res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
|
||||||
}
|
}
|
||||||
@ -1296,11 +1350,12 @@ struct llama_server_context
|
|||||||
}
|
}
|
||||||
|
|
||||||
queue_results.push_back(res);
|
queue_results.push_back(res);
|
||||||
|
condition_results.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
void send_embedding(llama_client_slot &slot)
|
void send_embedding(llama_client_slot &slot)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_results);
|
std::unique_lock<std::mutex> lock(mutex_results);
|
||||||
task_result res;
|
task_result res;
|
||||||
res.id = slot.task_id;
|
res.id = slot.task_id;
|
||||||
res.multitask_id = slot.multitask_id;
|
res.multitask_id = slot.multitask_id;
|
||||||
@ -1328,6 +1383,7 @@ struct llama_server_context
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
queue_results.push_back(res);
|
queue_results.push_back(res);
|
||||||
|
condition_results.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
int request_completion(json data, bool infill, bool embedding, int multitask_id)
|
int request_completion(json data, bool infill, bool embedding, int multitask_id)
|
||||||
@ -1351,6 +1407,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
// otherwise, it's a single-prompt task, we actually queue it
|
// otherwise, it's a single-prompt task, we actually queue it
|
||||||
queue_tasks.push_back(task);
|
queue_tasks.push_back(task);
|
||||||
|
condition_tasks.notify_one();
|
||||||
return task.id;
|
return task.id;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1358,13 +1415,10 @@ struct llama_server_context
|
|||||||
{
|
{
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
std::this_thread::sleep_for(std::chrono::microseconds(5));
|
std::unique_lock<std::mutex> lock(mutex_results);
|
||||||
std::lock_guard<std::mutex> lock(mutex_results);
|
condition_results.wait(lock, [&]{
|
||||||
|
return !queue_results.empty();
|
||||||
if (queue_results.empty())
|
});
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < (int) queue_results.size(); i++)
|
for (int i = 0; i < (int) queue_results.size(); i++)
|
||||||
{
|
{
|
||||||
@ -1460,12 +1514,13 @@ struct llama_server_context
|
|||||||
|
|
||||||
void request_cancel(int task_id)
|
void request_cancel(int task_id)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_tasks);
|
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||||
task_server task;
|
task_server task;
|
||||||
task.id = id_gen++;
|
task.id = id_gen++;
|
||||||
task.type = CANCEL_TASK;
|
task.type = CANCEL_TASK;
|
||||||
task.target_id = task_id;
|
task.target_id = task_id;
|
||||||
queue_tasks.push_back(task);
|
queue_tasks.push_back(task);
|
||||||
|
condition_tasks.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
int split_multiprompt_task(task_server& multiprompt_task)
|
int split_multiprompt_task(task_server& multiprompt_task)
|
||||||
@ -1491,7 +1546,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
void process_tasks()
|
void process_tasks()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_tasks);
|
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||||
while (!queue_tasks.empty())
|
while (!queue_tasks.empty())
|
||||||
{
|
{
|
||||||
task_server task = queue_tasks.front();
|
task_server task = queue_tasks.front();
|
||||||
@ -1563,6 +1618,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_results);
|
std::lock_guard<std::mutex> lock(mutex_results);
|
||||||
queue_results.push_back(aggregate_result);
|
queue_results.push_back(aggregate_result);
|
||||||
|
condition_results.notify_all();
|
||||||
|
|
||||||
queue_iterator = queue_multitasks.erase(queue_iterator);
|
queue_iterator = queue_multitasks.erase(queue_iterator);
|
||||||
}
|
}
|
||||||
@ -1593,8 +1649,10 @@ struct llama_server_context
|
|||||||
LOG_TEE("all slots are idle and system prompt is empty, clear the KV cache\n");
|
LOG_TEE("all slots are idle and system prompt is empty, clear the KV cache\n");
|
||||||
kv_cache_clear();
|
kv_cache_clear();
|
||||||
}
|
}
|
||||||
// avoid 100% usage of cpu all time
|
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
condition_tasks.wait(lock, [&]{
|
||||||
|
return !queue_tasks.empty();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
for (llama_client_slot &slot : slots)
|
for (llama_client_slot &slot : slots)
|
||||||
@ -1652,7 +1710,6 @@ struct llama_server_context
|
|||||||
|
|
||||||
llama_batch_add(batch, slot.sampled, system_tokens.size() + slot.n_past, { slot.id }, true);
|
llama_batch_add(batch, slot.sampled, system_tokens.size() + slot.n_past, { slot.id }, true);
|
||||||
|
|
||||||
slot.n_decoded += 1;
|
|
||||||
slot.n_past += 1;
|
slot.n_past += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1870,6 +1927,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
|
llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
|
||||||
|
|
||||||
|
slot.n_decoded += 1;
|
||||||
if (slot.n_decoded == 1)
|
if (slot.n_decoded == 1)
|
||||||
{
|
{
|
||||||
slot.t_start_genereration = ggml_time_us();
|
slot.t_start_genereration = ggml_time_us();
|
||||||
@ -1965,6 +2023,10 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
|
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
|
||||||
printf(" --log-disable disables logging to a file.\n");
|
printf(" --log-disable disables logging to a file.\n");
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
printf(" --override-kv KEY=TYPE:VALUE\n");
|
||||||
|
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
|
||||||
|
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
||||||
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void server_params_parse(int argc, char **argv, server_params &sparams,
|
static void server_params_parse(int argc, char **argv, server_params &sparams,
|
||||||
@ -2328,6 +2390,49 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
log_set_target(stdout);
|
log_set_target(stdout);
|
||||||
LOG_INFO("logging to file is disabled.", {});
|
LOG_INFO("logging to file is disabled.", {});
|
||||||
}
|
}
|
||||||
|
else if (arg == "--override-kv")
|
||||||
|
{
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
char * sep = strchr(argv[i], '=');
|
||||||
|
if (sep == nullptr || sep - argv[i] >= 128) {
|
||||||
|
fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
struct llama_model_kv_override kvo;
|
||||||
|
std::strncpy(kvo.key, argv[i], sep - argv[i]);
|
||||||
|
kvo.key[sep - argv[i]] = 0;
|
||||||
|
sep++;
|
||||||
|
if (strncmp(sep, "int:", 4) == 0) {
|
||||||
|
sep += 4;
|
||||||
|
kvo.tag = LLAMA_KV_OVERRIDE_INT;
|
||||||
|
kvo.int_value = std::atol(sep);
|
||||||
|
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||||
|
sep += 6;
|
||||||
|
kvo.tag = LLAMA_KV_OVERRIDE_FLOAT;
|
||||||
|
kvo.float_value = std::atof(sep);
|
||||||
|
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||||
|
sep += 5;
|
||||||
|
kvo.tag = LLAMA_KV_OVERRIDE_BOOL;
|
||||||
|
if (std::strcmp(sep, "true") == 0) {
|
||||||
|
kvo.bool_value = true;
|
||||||
|
} else if (std::strcmp(sep, "false") == 0) {
|
||||||
|
kvo.bool_value = false;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.kv_overrides.push_back(kvo);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
@ -2335,6 +2440,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!params.kv_overrides.empty()) {
|
||||||
|
params.kv_overrides.emplace_back(llama_model_kv_override());
|
||||||
|
params.kv_overrides.back().key[0] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (invalid_param)
|
if (invalid_param)
|
||||||
{
|
{
|
||||||
@ -2393,26 +2502,33 @@ json oaicompat_completion_params_parse(
|
|||||||
llama_params["__oaicompat"] = true;
|
llama_params["__oaicompat"] = true;
|
||||||
|
|
||||||
// Map OpenAI parameters to llama.cpp parameters
|
// Map OpenAI parameters to llama.cpp parameters
|
||||||
|
//
|
||||||
|
// For parameters that are defined by the OpenAI documentation (e.g.
|
||||||
|
// temperature), we explicitly specify OpenAI's intended default; we
|
||||||
|
// need to do that because sometimes OpenAI disagrees with llama.cpp
|
||||||
|
//
|
||||||
|
// https://platform.openai.com/docs/api-reference/chat/create
|
||||||
|
llama_sampling_params default_sparams;
|
||||||
llama_params["model"] = json_value(body, "model", std::string("uknown"));
|
llama_params["model"] = json_value(body, "model", std::string("uknown"));
|
||||||
llama_params["prompt"] = format_chatml(body["messages"]); // OpenAI 'messages' to llama.cpp 'prompt'
|
llama_params["prompt"] = format_chatml(body["messages"]); // OpenAI 'messages' to llama.cpp 'prompt'
|
||||||
llama_params["cache_prompt"] = json_value(body, "cache_prompt", false);
|
llama_params["cache_prompt"] = json_value(body, "cache_prompt", false);
|
||||||
llama_params["temperature"] = json_value(body, "temperature", 0.8);
|
llama_params["temperature"] = json_value(body, "temperature", 0.0);
|
||||||
llama_params["top_k"] = json_value(body, "top_k", 40);
|
llama_params["top_k"] = json_value(body, "top_k", default_sparams.top_k);
|
||||||
llama_params["top_p"] = json_value(body, "top_p", 0.95);
|
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
||||||
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
|
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
|
||||||
llama_params["logit_bias"] = json_value(body, "logit_bias",json::object());
|
llama_params["logit_bias"] = json_value(body, "logit_bias",json::object());
|
||||||
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
|
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
|
||||||
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
||||||
llama_params["seed"] = json_value(body, "seed", 0);
|
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
|
||||||
llama_params["stream"] = json_value(body, "stream", false);
|
llama_params["stream"] = json_value(body, "stream", false);
|
||||||
llama_params["mirostat"] = json_value(body, "mirostat", false);
|
llama_params["mirostat"] = json_value(body, "mirostat", default_sparams.mirostat);
|
||||||
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", 0.0);
|
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", default_sparams.mirostat_tau);
|
||||||
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", 0.0);
|
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", default_sparams.mirostat_eta);
|
||||||
llama_params["penalize_nl"] = json_value(body, "penalize_nl", false);
|
llama_params["penalize_nl"] = json_value(body, "penalize_nl", default_sparams.penalize_nl);
|
||||||
llama_params["typical_p"] = json_value(body, "typical_p", 0.0);
|
llama_params["typical_p"] = json_value(body, "typical_p", default_sparams.typical_p);
|
||||||
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", 0);
|
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", default_sparams.penalty_last_n);
|
||||||
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
|
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
|
||||||
llama_params["tfs_z"] = json_value(body, "tfs_z", 0.0);
|
llama_params["tfs_z"] = json_value(body, "tfs_z", default_sparams.tfs_z);
|
||||||
|
|
||||||
if (body.count("grammar") != 0) {
|
if (body.count("grammar") != 0) {
|
||||||
llama_params["grammar"] = json_value(body, "grammar", json::object());
|
llama_params["grammar"] = json_value(body, "grammar", json::object());
|
||||||
@ -3026,7 +3142,17 @@ int main(int argc, char **argv)
|
|||||||
{
|
{
|
||||||
prompt = "";
|
prompt = "";
|
||||||
}
|
}
|
||||||
const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
|
|
||||||
|
json image_data;
|
||||||
|
if (body.count("image_data") != 0) {
|
||||||
|
image_data = body["image_data"];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
image_data = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1);
|
||||||
task_result result = llama.next_result(task_id);
|
task_result result = llama.next_result(task_id);
|
||||||
return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
|
return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
|
||||||
});
|
});
|
||||||
|
@ -369,10 +369,7 @@ static struct ggml_tensor * llama_build_train_graphs(
|
|||||||
checkpoints.push_back(t00);
|
checkpoints.push_back(t00);
|
||||||
checkpoints.push_back(t01);
|
checkpoints.push_back(t01);
|
||||||
|
|
||||||
struct ggml_tensor * kv_scale = NULL;
|
const float kv_scale = 1.0f/sqrtf(float(n_embd)/n_head);
|
||||||
if (!enable_flash_attn) {
|
|
||||||
kv_scale = ggml_new_f32(ctx, 1.0f/sqrtf(float(n_embd)/n_head));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
struct my_llama_layer & layer = model->layers[il];
|
struct my_llama_layer & layer = model->layers[il];
|
||||||
@ -444,14 +441,13 @@ static struct ggml_tensor * llama_build_train_graphs(
|
|||||||
// make sure some tensors are not reallocated by inserting new temporary nodes depending on them
|
// make sure some tensors are not reallocated by inserting new temporary nodes depending on them
|
||||||
int n_leafs_before = gb->n_leafs;
|
int n_leafs_before = gb->n_leafs;
|
||||||
int n_nodes_before = gb->n_nodes;
|
int n_nodes_before = gb->n_nodes;
|
||||||
struct ggml_tensor * one = ggml_new_f32(ctx, 1.0f);
|
|
||||||
// output tensors
|
// output tensors
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, 1.0f));
|
||||||
// input gradient
|
// input gradient
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, 1.0f));
|
||||||
// KQ_pos
|
// KQ_pos
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, one));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, 1.0f));
|
||||||
GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
|
GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
|
||||||
|
|
||||||
ggml_allocr_alloc(alloc, t36->grad);
|
ggml_allocr_alloc(alloc, t36->grad);
|
||||||
|
55
flake.lock
55
flake.lock
@ -1,30 +1,30 @@
|
|||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"flake-utils": {
|
"flake-parts": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"systems": "systems"
|
"nixpkgs-lib": "nixpkgs-lib"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1694529238,
|
"lastModified": 1701473968,
|
||||||
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
"narHash": "sha256-YcVE5emp1qQ8ieHUnxt1wCZCC3ZfAS+SRRWZ2TMda7E=",
|
||||||
"owner": "numtide",
|
"owner": "hercules-ci",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-parts",
|
||||||
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
"rev": "34fed993f1674c8d06d58b37ce1e0fe5eebcb9f5",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "numtide",
|
"owner": "hercules-ci",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-parts",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1698318101,
|
"lastModified": 1703637592,
|
||||||
"narHash": "sha256-gUihHt3yPD7bVqg+k/UVHgngyaJ3DMEBchbymBMvK1E=",
|
"narHash": "sha256-8MXjxU0RfFfzl57Zy3OfXCITS0qWDNLzlBAdwxGZwfY=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "63678e9f3d3afecfeafa0acead6239cdb447574c",
|
"rev": "cfc3698c31b1fb9cdcf10f36c9643460264d0ca8",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -34,26 +34,29 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"nixpkgs-lib": {
|
||||||
"inputs": {
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"nixpkgs": "nixpkgs"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1681028828,
|
"dir": "lib",
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
"lastModified": 1701253981,
|
||||||
"owner": "nix-systems",
|
"narHash": "sha256-ztaDIyZ7HrTAfEEUt9AtTDNoCYxUdSd6NrRHaYOIxtk=",
|
||||||
"repo": "default",
|
"owner": "NixOS",
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
"repo": "nixpkgs",
|
||||||
|
"rev": "e92039b55bcd58469325ded85d4f58dd5a4eaf58",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "nix-systems",
|
"dir": "lib",
|
||||||
"repo": "default",
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-parts": "flake-parts",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
263
flake.nix
263
flake.nix
@ -1,139 +1,144 @@
|
|||||||
{
|
{
|
||||||
|
description = "Port of Facebook's LLaMA model in C/C++";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||||
};
|
};
|
||||||
outputs = { self, nixpkgs, flake-utils }:
|
|
||||||
flake-utils.lib.eachDefaultSystem (system:
|
# Optional binary cache
|
||||||
|
nixConfig = {
|
||||||
|
extra-substituters = [
|
||||||
|
# Populated by the CI in ggerganov/llama.cpp
|
||||||
|
"https://llama-cpp.cachix.org"
|
||||||
|
|
||||||
|
# A development cache for nixpkgs imported with `config.cudaSupport = true`.
|
||||||
|
# Populated by https://hercules-ci.com/github/SomeoneSerge/nixpkgs-cuda-ci.
|
||||||
|
# This lets one skip building e.g. the CUDA-enabled openmpi.
|
||||||
|
# TODO: Replace once nix-community obtains an official one.
|
||||||
|
"https://cuda-maintainers.cachix.org"
|
||||||
|
];
|
||||||
|
|
||||||
|
# Verify these are the same keys as published on
|
||||||
|
# - https://app.cachix.org/cache/llama-cpp
|
||||||
|
# - https://app.cachix.org/cache/cuda-maintainers
|
||||||
|
extra-trusted-public-keys = [
|
||||||
|
"llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc="
|
||||||
|
"cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E="
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
# For inspection, use `nix flake show github:ggerganov/llama.cpp` or the nix repl:
|
||||||
|
#
|
||||||
|
# ```bash
|
||||||
|
# ❯ nix repl
|
||||||
|
# nix-repl> :lf github:ggerganov/llama.cpp
|
||||||
|
# Added 13 variables.
|
||||||
|
# nix-repl> outputs.apps.x86_64-linux.quantize
|
||||||
|
# { program = "/nix/store/00000000000000000000000000000000-llama.cpp/bin/quantize"; type = "app"; }
|
||||||
|
# ```
|
||||||
|
outputs =
|
||||||
|
{ self, flake-parts, ... }@inputs:
|
||||||
let
|
let
|
||||||
name = "llama.cpp";
|
# We could include the git revisions in the package names but those would
|
||||||
src = ./.;
|
# needlessly trigger rebuilds:
|
||||||
meta.mainProgram = "llama";
|
# llamaVersion = self.dirtyShortRev or self.shortRev;
|
||||||
inherit (pkgs.stdenv) isAarch32 isAarch64 isDarwin;
|
|
||||||
buildInputs = with pkgs; [ openmpi ];
|
# Nix already uses cryptographic hashes for versioning, so we'll just fix
|
||||||
osSpecific = with pkgs; buildInputs ++ (
|
# the fake semver for now:
|
||||||
if isAarch64 && isDarwin then
|
llamaVersion = "0.0.0";
|
||||||
with pkgs.darwin.apple_sdk_11_0.frameworks; [
|
|
||||||
Accelerate
|
|
||||||
MetalKit
|
|
||||||
]
|
|
||||||
else if isAarch32 && isDarwin then
|
|
||||||
with pkgs.darwin.apple_sdk.frameworks; [
|
|
||||||
Accelerate
|
|
||||||
CoreGraphics
|
|
||||||
CoreVideo
|
|
||||||
]
|
|
||||||
else if isDarwin then
|
|
||||||
with pkgs.darwin.apple_sdk.frameworks; [
|
|
||||||
Accelerate
|
|
||||||
CoreGraphics
|
|
||||||
CoreVideo
|
|
||||||
]
|
|
||||||
else
|
|
||||||
with pkgs; [ openblas ]
|
|
||||||
);
|
|
||||||
pkgs = import nixpkgs { inherit system; };
|
|
||||||
nativeBuildInputs = with pkgs; [ cmake ninja pkg-config ];
|
|
||||||
cudatoolkit_joined = with pkgs; symlinkJoin {
|
|
||||||
# HACK(Green-Sky): nix currently has issues with cmake findcudatoolkit
|
|
||||||
# see https://github.com/NixOS/nixpkgs/issues/224291
|
|
||||||
# copied from jaxlib
|
|
||||||
name = "${cudaPackages.cudatoolkit.name}-merged";
|
|
||||||
paths = [
|
|
||||||
cudaPackages.cudatoolkit.lib
|
|
||||||
cudaPackages.cudatoolkit.out
|
|
||||||
] ++ lib.optionals (lib.versionOlder cudaPackages.cudatoolkit.version "11") [
|
|
||||||
# for some reason some of the required libs are in the targets/x86_64-linux
|
|
||||||
# directory; not sure why but this works around it
|
|
||||||
"${cudaPackages.cudatoolkit}/targets/${system}"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
llama-python =
|
|
||||||
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
|
|
||||||
# TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
|
|
||||||
llama-python-extra =
|
|
||||||
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece torchWithoutCuda transformers ]);
|
|
||||||
postPatch = ''
|
|
||||||
substituteInPlace ./ggml-metal.m \
|
|
||||||
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
|
||||||
substituteInPlace ./*.py --replace '/usr/bin/env python' '${llama-python}/bin/python'
|
|
||||||
'';
|
|
||||||
postInstall = ''
|
|
||||||
mv $out/bin/main $out/bin/llama
|
|
||||||
mv $out/bin/server $out/bin/llama-server
|
|
||||||
mkdir -p $out/include
|
|
||||||
cp ${src}/llama.h $out/include/
|
|
||||||
'';
|
|
||||||
cmakeFlags = [ "-DLLAMA_NATIVE=OFF" "-DLLAMA_BUILD_SERVER=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ];
|
|
||||||
in
|
in
|
||||||
|
flake-parts.lib.mkFlake { inherit inputs; }
|
||||||
|
|
||||||
{
|
{
|
||||||
packages.default = pkgs.stdenv.mkDerivation {
|
|
||||||
inherit name src meta postPatch nativeBuildInputs postInstall;
|
imports = [
|
||||||
buildInputs = osSpecific;
|
.devops/nix/nixpkgs-instances.nix
|
||||||
cmakeFlags = cmakeFlags
|
.devops/nix/apps.nix
|
||||||
++ (if isAarch64 && isDarwin then [
|
.devops/nix/devshells.nix
|
||||||
"-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1"
|
.devops/nix/jetson-support.nix
|
||||||
"-DLLAMA_METAL=ON"
|
|
||||||
] else [
|
|
||||||
"-DLLAMA_BLAS=ON"
|
|
||||||
"-DLLAMA_BLAS_VENDOR=OpenBLAS"
|
|
||||||
]);
|
|
||||||
};
|
|
||||||
packages.opencl = pkgs.stdenv.mkDerivation {
|
|
||||||
inherit name src meta postPatch nativeBuildInputs postInstall;
|
|
||||||
buildInputs = with pkgs; buildInputs ++ [ clblast ];
|
|
||||||
cmakeFlags = cmakeFlags ++ [
|
|
||||||
"-DLLAMA_CLBLAST=ON"
|
|
||||||
];
|
];
|
||||||
};
|
|
||||||
packages.cuda = pkgs.stdenv.mkDerivation {
|
# An overlay can be used to have a more granular control over llama-cpp's
|
||||||
inherit name src meta postPatch nativeBuildInputs postInstall;
|
# dependencies and configuration, than that offered by the `.override`
|
||||||
buildInputs = with pkgs; buildInputs ++ [ cudatoolkit_joined ];
|
# mechanism. Cf. https://nixos.org/manual/nixpkgs/stable/#chap-overlays.
|
||||||
cmakeFlags = cmakeFlags ++ [
|
#
|
||||||
"-DLLAMA_CUBLAS=ON"
|
# E.g. in a flake:
|
||||||
];
|
# ```
|
||||||
};
|
# { nixpkgs, llama-cpp, ... }:
|
||||||
packages.rocm = pkgs.stdenv.mkDerivation {
|
# let pkgs = import nixpkgs {
|
||||||
inherit name src meta postPatch nativeBuildInputs postInstall;
|
# overlays = [ (llama-cpp.overlays.default) ];
|
||||||
buildInputs = with pkgs.rocmPackages; buildInputs ++ [ clr hipblas rocblas ];
|
# system = "aarch64-linux";
|
||||||
cmakeFlags = cmakeFlags ++ [
|
# config.allowUnfree = true;
|
||||||
"-DLLAMA_HIPBLAS=1"
|
# config.cudaSupport = true;
|
||||||
"-DCMAKE_C_COMPILER=hipcc"
|
# config.cudaCapabilities = [ "7.2" ];
|
||||||
"-DCMAKE_CXX_COMPILER=hipcc"
|
# config.cudaEnableForwardCompat = false;
|
||||||
# Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM
|
# }; in {
|
||||||
# in github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt
|
# packages.aarch64-linux.llamaJetsonXavier = pkgs.llamaPackages.llama-cpp;
|
||||||
# and select the line that matches the current nixpkgs version of rocBLAS.
|
# }
|
||||||
"-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
|
# ```
|
||||||
];
|
#
|
||||||
};
|
# Cf. https://nixos.org/manual/nix/unstable/command-ref/new-cli/nix3-flake.html?highlight=flake#flake-format
|
||||||
apps.llama-server = {
|
flake.overlays.default =
|
||||||
type = "app";
|
(final: prev: {
|
||||||
program = "${self.packages.${system}.default}/bin/llama-server";
|
llamaPackages = final.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
|
||||||
};
|
inherit (final.llamaPackages) llama-cpp;
|
||||||
apps.llama-embedding = {
|
|
||||||
type = "app";
|
|
||||||
program = "${self.packages.${system}.default}/bin/embedding";
|
|
||||||
};
|
|
||||||
apps.llama = {
|
|
||||||
type = "app";
|
|
||||||
program = "${self.packages.${system}.default}/bin/llama";
|
|
||||||
};
|
|
||||||
apps.quantize = {
|
|
||||||
type = "app";
|
|
||||||
program = "${self.packages.${system}.default}/bin/quantize";
|
|
||||||
};
|
|
||||||
apps.train-text-from-scratch = {
|
|
||||||
type = "app";
|
|
||||||
program = "${self.packages.${system}.default}/bin/train-text-from-scratch";
|
|
||||||
};
|
|
||||||
apps.default = self.apps.${system}.llama;
|
|
||||||
devShells.default = pkgs.mkShell {
|
|
||||||
buildInputs = [ llama-python ];
|
|
||||||
packages = nativeBuildInputs ++ osSpecific;
|
|
||||||
};
|
|
||||||
devShells.extra = pkgs.mkShell {
|
|
||||||
buildInputs = [ llama-python-extra ];
|
|
||||||
packages = nativeBuildInputs ++ osSpecific;
|
|
||||||
};
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
systems = [
|
||||||
|
"aarch64-darwin"
|
||||||
|
"aarch64-linux"
|
||||||
|
"x86_64-darwin" # x86_64-darwin isn't tested (and likely isn't relevant)
|
||||||
|
"x86_64-linux"
|
||||||
|
];
|
||||||
|
|
||||||
|
perSystem =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
system,
|
||||||
|
pkgs,
|
||||||
|
pkgsCuda,
|
||||||
|
pkgsRocm,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Unlike `.#packages`, legacyPackages may contain values of
|
||||||
|
# arbitrary types (including nested attrsets) and may even throw
|
||||||
|
# exceptions. This attribute isn't recursed into by `nix flake
|
||||||
|
# show` either.
|
||||||
|
#
|
||||||
|
# You can add arbitrary scripts to `.devops/nix/scope.nix` and
|
||||||
|
# access them as `nix build .#llamaPackages.${scriptName}` using
|
||||||
|
# the same path you would with an overlay.
|
||||||
|
legacyPackages = {
|
||||||
|
llamaPackages = pkgs.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
|
||||||
|
llamaPackagesCuda = pkgsCuda.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
|
||||||
|
llamaPackagesRocm = pkgsRocm.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
|
||||||
|
};
|
||||||
|
|
||||||
|
# We don't use the overlay here so as to avoid making too many instances of nixpkgs,
|
||||||
|
# cf. https://zimbatm.com/notes/1000-instances-of-nixpkgs
|
||||||
|
packages =
|
||||||
|
{
|
||||||
|
default = config.legacyPackages.llamaPackages.llama-cpp;
|
||||||
|
}
|
||||||
|
// lib.optionalAttrs pkgs.stdenv.isLinux {
|
||||||
|
opencl = config.packages.default.override { useOpenCL = true; };
|
||||||
|
cuda = config.legacyPackages.llamaPackagesCuda.llama-cpp;
|
||||||
|
|
||||||
|
mpi-cpu = config.packages.default.override { useMpi = true; };
|
||||||
|
mpi-cuda = config.packages.default.override { useMpi = true; };
|
||||||
|
}
|
||||||
|
// lib.optionalAttrs (system == "x86_64-linux") {
|
||||||
|
rocm = config.legacyPackages.llamaPackagesRocm.llama-cpp;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Packages exposed in `.#checks` will be built by the CI and by
|
||||||
|
# `nix flake check`. Currently we expose all packages, but we could
|
||||||
|
# make more granular choices
|
||||||
|
checks = config.packages;
|
||||||
|
};
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ static void remove_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * t
|
|||||||
|
|
||||||
// check if a tensor is allocated by this buffer
|
// check if a tensor is allocated by this buffer
|
||||||
static bool ggml_tallocr_is_own(ggml_tallocr_t alloc, const struct ggml_tensor * tensor) {
|
static bool ggml_tallocr_is_own(ggml_tallocr_t alloc, const struct ggml_tensor * tensor) {
|
||||||
return tensor->buffer == alloc->buffer;
|
return tensor->buffer == alloc->buffer && (!tensor->view_src || tensor->view_src->buffer == alloc->buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_is_view(struct ggml_tensor * t) {
|
static bool ggml_is_view(struct ggml_tensor * t) {
|
||||||
|
@ -90,7 +90,7 @@ extern "C" {
|
|||||||
void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
|
|
||||||
// compute graph without a plan
|
// compute graph without a plan
|
||||||
void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
bool (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
|
||||||
// check if the backend supports an operation
|
// check if the backend supports an operation
|
||||||
bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
@ -195,11 +195,14 @@ void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_
|
|||||||
ggml_backend_synchronize(backend);
|
ggml_backend_synchronize(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
bool ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
||||||
backend->iface.graph_compute(backend, cgraph);
|
if (!backend->iface.graph_compute(backend, cgraph)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: optional sync
|
// TODO: optional sync
|
||||||
ggml_backend_synchronize(backend);
|
ggml_backend_synchronize(backend);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
||||||
@ -297,7 +300,7 @@ static void ggml_backend_registry_init(void) {
|
|||||||
void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) {
|
void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) {
|
||||||
GGML_ASSERT(ggml_backend_registry_count < GGML_MAX_BACKENDS_REG);
|
GGML_ASSERT(ggml_backend_registry_count < GGML_MAX_BACKENDS_REG);
|
||||||
|
|
||||||
int id = ggml_backend_registry_count;
|
size_t id = ggml_backend_registry_count;
|
||||||
|
|
||||||
ggml_backend_registry[id] = (struct ggml_backend_reg) {
|
ggml_backend_registry[id] = (struct ggml_backend_reg) {
|
||||||
/* .name = */ {0},
|
/* .name = */ {0},
|
||||||
@ -330,6 +333,8 @@ size_t ggml_backend_reg_find_by_name(const char * name) {
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// not found
|
||||||
return SIZE_MAX;
|
return SIZE_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -340,15 +345,15 @@ ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str)
|
|||||||
const char * params = strchr(backend_str, ':');
|
const char * params = strchr(backend_str, ':');
|
||||||
char backend_name[128];
|
char backend_name[128];
|
||||||
if (params == NULL) {
|
if (params == NULL) {
|
||||||
strcpy(backend_name, backend_str);
|
snprintf(backend_name, sizeof(backend_name), "%s", backend_str);
|
||||||
params = "";
|
params = "";
|
||||||
} else {
|
} else {
|
||||||
strncpy(backend_name, backend_str, params - backend_str);
|
snprintf(backend_name, sizeof(backend_name), "%.*s", (int)(params - backend_str), backend_str);
|
||||||
backend_name[params - backend_str] = '\0';
|
|
||||||
params++;
|
params++;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t backend_i = ggml_backend_reg_find_by_name(backend_name);
|
size_t backend_i = ggml_backend_reg_find_by_name(backend_name);
|
||||||
|
|
||||||
if (backend_i == SIZE_MAX) {
|
if (backend_i == SIZE_MAX) {
|
||||||
fprintf(stderr, "%s: backend %s not found\n", __func__, backend_name);
|
fprintf(stderr, "%s: backend %s not found\n", __func__, backend_name);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -396,18 +401,12 @@ static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
||||||
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
|
|
||||||
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
||||||
|
|
||||||
memcpy((char *)tensor->data + offset, data, size);
|
memcpy((char *)tensor->data + offset, data, size);
|
||||||
|
|
||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
||||||
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
|
|
||||||
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
||||||
|
|
||||||
memcpy(data, (const char *)tensor->data + offset, size);
|
memcpy(data, (const char *)tensor->data + offset, size);
|
||||||
|
|
||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
@ -601,7 +600,7 @@ static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_bac
|
|||||||
GGML_UNUSED(backend);
|
GGML_UNUSED(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
static bool ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
||||||
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
|
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
|
||||||
|
|
||||||
struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
|
struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
|
||||||
@ -615,13 +614,18 @@ static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_c
|
|||||||
cplan.work_data = cpu_ctx->work_data;
|
cplan.work_data = cpu_ctx->work_data;
|
||||||
|
|
||||||
ggml_graph_compute(cgraph, &cplan);
|
ggml_graph_compute(cgraph, &cplan);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
||||||
|
switch (op->op) {
|
||||||
|
case GGML_OP_MUL_MAT:
|
||||||
|
return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_internal_get_type_traits(op->src[0]->type).vec_dot_type;
|
||||||
|
default:
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
GGML_UNUSED(backend);
|
GGML_UNUSED(backend);
|
||||||
GGML_UNUSED(op);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ggml_backend_i cpu_backend_i = {
|
static struct ggml_backend_i cpu_backend_i = {
|
||||||
|
@ -58,7 +58,7 @@ extern "C" {
|
|||||||
|
|
||||||
GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
GGML_API void ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
GGML_API bool ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op);
|
GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
|
||||||
// tensor copy between different backends
|
// tensor copy between different backends
|
||||||
|
1543
ggml-cuda.cu
1543
ggml-cuda.cu
File diff suppressed because it is too large
Load Diff
@ -5,6 +5,7 @@
|
|||||||
// GGML internal header
|
// GGML internal header
|
||||||
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <string.h> // memcpy
|
#include <string.h> // memcpy
|
||||||
|
@ -87,7 +87,7 @@ int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx);
|
|||||||
|
|
||||||
// same as ggml_graph_compute but uses Metal
|
// same as ggml_graph_compute but uses Metal
|
||||||
// creates gf->n_threads command buffers in parallel
|
// creates gf->n_threads command buffers in parallel
|
||||||
void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
|
bool ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
|
||||||
|
|
||||||
//
|
//
|
||||||
// backend API
|
// backend API
|
||||||
|
110
ggml-metal.m
110
ggml-metal.m
@ -87,6 +87,8 @@ struct ggml_metal_context {
|
|||||||
GGML_METAL_DECL_KERNEL(get_rows_q4_K);
|
GGML_METAL_DECL_KERNEL(get_rows_q4_K);
|
||||||
GGML_METAL_DECL_KERNEL(get_rows_q5_K);
|
GGML_METAL_DECL_KERNEL(get_rows_q5_K);
|
||||||
GGML_METAL_DECL_KERNEL(get_rows_q6_K);
|
GGML_METAL_DECL_KERNEL(get_rows_q6_K);
|
||||||
|
GGML_METAL_DECL_KERNEL(get_rows_i32);
|
||||||
|
GGML_METAL_DECL_KERNEL(get_rows_iq2_xxs);
|
||||||
GGML_METAL_DECL_KERNEL(rms_norm);
|
GGML_METAL_DECL_KERNEL(rms_norm);
|
||||||
GGML_METAL_DECL_KERNEL(group_norm);
|
GGML_METAL_DECL_KERNEL(group_norm);
|
||||||
GGML_METAL_DECL_KERNEL(norm);
|
GGML_METAL_DECL_KERNEL(norm);
|
||||||
@ -105,6 +107,7 @@ struct ggml_metal_context {
|
|||||||
GGML_METAL_DECL_KERNEL(mul_mv_q4_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q4_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mv_q5_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q5_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mv_q6_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q6_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mv_iq2_xxs_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mv_id_f32_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_id_f32_f32);
|
||||||
//GGML_METAL_DECL_KERNEL(mul_mv_id_f16_f16);
|
//GGML_METAL_DECL_KERNEL(mul_mv_id_f16_f16);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mv_id_f16_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_id_f16_f32);
|
||||||
@ -120,6 +123,7 @@ struct ggml_metal_context {
|
|||||||
GGML_METAL_DECL_KERNEL(mul_mv_id_q4_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_id_q4_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mv_id_q5_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_id_q5_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mv_id_q6_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_id_q6_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mv_id_iq2_xxs_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_f32_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_f32_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
|
||||||
@ -132,6 +136,7 @@ struct ggml_metal_context {
|
|||||||
GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_iq2_xxs_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_id_f32_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_id_f32_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_id_f16_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_id_f16_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_id_q4_0_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_id_q4_0_f32);
|
||||||
@ -144,6 +149,7 @@ struct ggml_metal_context {
|
|||||||
GGML_METAL_DECL_KERNEL(mul_mm_id_q4_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_id_q4_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_id_q5_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_id_q5_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_id_q6_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_id_q6_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_id_iq2_xxs_f32);
|
||||||
GGML_METAL_DECL_KERNEL(rope_f32);
|
GGML_METAL_DECL_KERNEL(rope_f32);
|
||||||
GGML_METAL_DECL_KERNEL(rope_f16);
|
GGML_METAL_DECL_KERNEL(rope_f16);
|
||||||
GGML_METAL_DECL_KERNEL(alibi_f32);
|
GGML_METAL_DECL_KERNEL(alibi_f32);
|
||||||
@ -259,6 +265,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
NSError * error = nil;
|
NSError * error = nil;
|
||||||
NSString * libPath = [bundle pathForResource:@"default" ofType:@"metallib"];
|
NSString * libPath = [bundle pathForResource:@"default" ofType:@"metallib"];
|
||||||
if (libPath != nil) {
|
if (libPath != nil) {
|
||||||
|
// pre-compiled library found
|
||||||
NSURL * libURL = [NSURL fileURLWithPath:libPath];
|
NSURL * libURL = [NSURL fileURLWithPath:libPath];
|
||||||
GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]);
|
GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]);
|
||||||
ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
|
ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
|
||||||
@ -291,6 +298,13 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
options = [MTLCompileOptions new];
|
options = [MTLCompileOptions new];
|
||||||
options.preprocessorMacros = @{ @"QK_K" : @(64) };
|
options.preprocessorMacros = @{ @"QK_K" : @(64) };
|
||||||
#endif
|
#endif
|
||||||
|
// try to disable fast-math
|
||||||
|
// NOTE: this seems to have no effect whatsoever
|
||||||
|
// instead, in order to disable fast-math, we have to build default.metallib from the command line
|
||||||
|
// using xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air
|
||||||
|
// and go through the "pre-compiled library found" path above
|
||||||
|
//[options setFastMathEnabled:false];
|
||||||
|
|
||||||
ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
|
ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,6 +383,8 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
GGML_METAL_ADD_KERNEL(get_rows_q4_K);
|
GGML_METAL_ADD_KERNEL(get_rows_q4_K);
|
||||||
GGML_METAL_ADD_KERNEL(get_rows_q5_K);
|
GGML_METAL_ADD_KERNEL(get_rows_q5_K);
|
||||||
GGML_METAL_ADD_KERNEL(get_rows_q6_K);
|
GGML_METAL_ADD_KERNEL(get_rows_q6_K);
|
||||||
|
GGML_METAL_ADD_KERNEL(get_rows_i32);
|
||||||
|
GGML_METAL_ADD_KERNEL(get_rows_iq2_xxs);
|
||||||
GGML_METAL_ADD_KERNEL(rms_norm);
|
GGML_METAL_ADD_KERNEL(rms_norm);
|
||||||
GGML_METAL_ADD_KERNEL(group_norm);
|
GGML_METAL_ADD_KERNEL(group_norm);
|
||||||
GGML_METAL_ADD_KERNEL(norm);
|
GGML_METAL_ADD_KERNEL(norm);
|
||||||
@ -387,6 +403,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
GGML_METAL_ADD_KERNEL(mul_mv_q4_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q4_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mv_q5_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q5_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mv_q6_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q6_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mv_iq2_xxs_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mv_id_f32_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_id_f32_f32);
|
||||||
//GGML_METAL_ADD_KERNEL(mul_mv_id_f16_f16);
|
//GGML_METAL_ADD_KERNEL(mul_mv_id_f16_f16);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mv_id_f16_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_id_f16_f32);
|
||||||
@ -402,6 +419,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
GGML_METAL_ADD_KERNEL(mul_mv_id_q4_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_id_q4_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mv_id_q5_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_id_q5_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mv_id_q6_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_id_q6_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mv_id_iq2_xxs_f32);
|
||||||
if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
|
if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_f32_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_f32_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
|
||||||
@ -415,6 +433,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_iq2_xxs_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_id_f32_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_id_f32_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_id_f16_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_id_f16_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_id_q4_0_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_id_q4_0_f32);
|
||||||
@ -427,6 +446,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
GGML_METAL_ADD_KERNEL(mul_mm_id_q4_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_id_q4_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_id_q5_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_id_q5_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_id_q6_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_id_q6_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_id_iq2_xxs_f32);
|
||||||
}
|
}
|
||||||
GGML_METAL_ADD_KERNEL(rope_f32);
|
GGML_METAL_ADD_KERNEL(rope_f32);
|
||||||
GGML_METAL_ADD_KERNEL(rope_f16);
|
GGML_METAL_ADD_KERNEL(rope_f16);
|
||||||
@ -491,6 +511,8 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
|||||||
GGML_METAL_DEL_KERNEL(get_rows_q4_K);
|
GGML_METAL_DEL_KERNEL(get_rows_q4_K);
|
||||||
GGML_METAL_DEL_KERNEL(get_rows_q5_K);
|
GGML_METAL_DEL_KERNEL(get_rows_q5_K);
|
||||||
GGML_METAL_DEL_KERNEL(get_rows_q6_K);
|
GGML_METAL_DEL_KERNEL(get_rows_q6_K);
|
||||||
|
GGML_METAL_DEL_KERNEL(get_rows_i32);
|
||||||
|
GGML_METAL_DEL_KERNEL(get_rows_iq2_xxs);
|
||||||
GGML_METAL_DEL_KERNEL(rms_norm);
|
GGML_METAL_DEL_KERNEL(rms_norm);
|
||||||
GGML_METAL_DEL_KERNEL(group_norm);
|
GGML_METAL_DEL_KERNEL(group_norm);
|
||||||
GGML_METAL_DEL_KERNEL(norm);
|
GGML_METAL_DEL_KERNEL(norm);
|
||||||
@ -509,6 +531,7 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
|||||||
GGML_METAL_DEL_KERNEL(mul_mv_q4_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q4_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mv_q5_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q5_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mv_q6_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q6_K_f32);
|
||||||
|
GGML_METAL_DEL_KERNEL(mul_mv_iq2_xxs_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mv_id_f32_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_id_f32_f32);
|
||||||
//GGML_METAL_DEL_KERNEL(mul_mv_id_f16_f16);
|
//GGML_METAL_DEL_KERNEL(mul_mv_id_f16_f16);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mv_id_f16_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_id_f16_f32);
|
||||||
@ -524,6 +547,7 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
|||||||
GGML_METAL_DEL_KERNEL(mul_mv_id_q4_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_id_q4_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mv_id_q5_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_id_q5_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mv_id_q6_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_id_q6_K_f32);
|
||||||
|
GGML_METAL_DEL_KERNEL(mul_mv_id_iq2_xxs_f32);
|
||||||
if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
|
if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_f32_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_f32_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
|
||||||
@ -537,6 +561,7 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
|||||||
GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
|
||||||
|
GGML_METAL_DEL_KERNEL(mul_mm_iq2_xxs_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_id_f32_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_id_f32_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_id_f16_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_id_f16_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_id_q4_0_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_id_q4_0_f32);
|
||||||
@ -549,6 +574,7 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
|||||||
GGML_METAL_DEL_KERNEL(mul_mm_id_q4_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_id_q4_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_id_q5_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_id_q5_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_id_q6_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_id_q6_K_f32);
|
||||||
|
GGML_METAL_DEL_KERNEL(mul_mm_id_iq2_xxs_f32);
|
||||||
}
|
}
|
||||||
GGML_METAL_DEL_KERNEL(rope_f32);
|
GGML_METAL_DEL_KERNEL(rope_f32);
|
||||||
GGML_METAL_DEL_KERNEL(rope_f16);
|
GGML_METAL_DEL_KERNEL(rope_f16);
|
||||||
@ -966,7 +992,7 @@ static bool ggml_metal_supports_op(const struct ggml_tensor * op) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void ggml_metal_graph_compute(
|
bool ggml_metal_graph_compute(
|
||||||
struct ggml_metal_context * ctx,
|
struct ggml_metal_context * ctx,
|
||||||
struct ggml_cgraph * gf) {
|
struct ggml_cgraph * gf) {
|
||||||
@autoreleasepool {
|
@autoreleasepool {
|
||||||
@ -1230,7 +1256,7 @@ void ggml_metal_graph_compute(
|
|||||||
// not sure how to avoid this
|
// not sure how to avoid this
|
||||||
// TODO: make a simpler cpy_bytes kernel
|
// TODO: make a simpler cpy_bytes kernel
|
||||||
|
|
||||||
const int nth = MIN(1024, ne00);
|
const int nth = MIN((int) ctx->pipeline_cpy_f32_f32.maxTotalThreadsPerThreadgroup, ne00);
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_cpy_f32_f32];
|
[encoder setComputePipelineState:ctx->pipeline_cpy_f32_f32];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
@ -1285,7 +1311,7 @@ void ggml_metal_graph_compute(
|
|||||||
[encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:26];
|
[encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:26];
|
||||||
[encoder setBytes:&offs length:sizeof(offs) atIndex:27];
|
[encoder setBytes:&offs length:sizeof(offs) atIndex:27];
|
||||||
|
|
||||||
const int nth = MIN(1024, ne0);
|
const int nth = MIN((int) ctx->pipeline_add.maxTotalThreadsPerThreadgroup, ne00);
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
@ -1293,7 +1319,7 @@ void ggml_metal_graph_compute(
|
|||||||
{
|
{
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
|
||||||
const float scale = *(const float *) src1->data;
|
const float scale = *(const float *) dst->op_params;
|
||||||
|
|
||||||
int64_t n = ggml_nelements(dst);
|
int64_t n = ggml_nelements(dst);
|
||||||
|
|
||||||
@ -1530,6 +1556,7 @@ void ggml_metal_graph_compute(
|
|||||||
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break;
|
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break;
|
||||||
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break;
|
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break;
|
||||||
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break;
|
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break;
|
||||||
|
case GGML_TYPE_IQ2_XXS: [encoder setComputePipelineState:ctx->pipeline_mul_mm_iq2_xxs_f32]; break;
|
||||||
default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
|
default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
|
||||||
}
|
}
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
@ -1642,6 +1669,12 @@ void ggml_metal_graph_compute(
|
|||||||
nth1 = 32;
|
nth1 = 32;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q6_K_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q6_K_f32];
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
{
|
||||||
|
nth0 = 4;
|
||||||
|
nth1 = 16;
|
||||||
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_iq2_xxs_f32];
|
||||||
|
} break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
|
GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
|
||||||
@ -1649,6 +1682,10 @@ void ggml_metal_graph_compute(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (ggml_is_quantized(src0t)) {
|
||||||
|
GGML_ASSERT(ne00 >= nth0*nth1);
|
||||||
|
}
|
||||||
|
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
@ -1671,9 +1708,14 @@ void ggml_metal_graph_compute(
|
|||||||
|
|
||||||
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
|
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
|
||||||
src0t == GGML_TYPE_Q5_0 || src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 ||
|
src0t == GGML_TYPE_Q5_0 || src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 ||
|
||||||
|
//src0t == GGML_TYPE_IQ2_XXS ||
|
||||||
src0t == GGML_TYPE_Q2_K) { // || src0t == GGML_TYPE_Q4_K) {
|
src0t == GGML_TYPE_Q2_K) { // || src0t == GGML_TYPE_Q4_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
}
|
}
|
||||||
|
else if (src0t == GGML_TYPE_IQ2_XXS) {
|
||||||
|
[encoder setThreadgroupMemoryLength:(256*8+128) atIndex:0];
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
|
}
|
||||||
else if (src0t == GGML_TYPE_Q4_K) {
|
else if (src0t == GGML_TYPE_Q4_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
}
|
}
|
||||||
@ -1707,6 +1749,9 @@ void ggml_metal_graph_compute(
|
|||||||
// TODO: make this more general
|
// TODO: make this more general
|
||||||
GGML_ASSERT(n_as <= 8);
|
GGML_ASSERT(n_as <= 8);
|
||||||
|
|
||||||
|
// max size of the src1ids array in the kernel stack
|
||||||
|
GGML_ASSERT(ne11 <= 512);
|
||||||
|
|
||||||
struct ggml_tensor * src2 = gf->nodes[i]->src[2];
|
struct ggml_tensor * src2 = gf->nodes[i]->src[2];
|
||||||
|
|
||||||
const int64_t ne20 = src2 ? src2->ne[0] : 0;
|
const int64_t ne20 = src2 ? src2->ne[0] : 0;
|
||||||
@ -1724,9 +1769,6 @@ void ggml_metal_graph_compute(
|
|||||||
GGML_ASSERT(!ggml_is_transposed(src2));
|
GGML_ASSERT(!ggml_is_transposed(src2));
|
||||||
GGML_ASSERT(!ggml_is_transposed(src1));
|
GGML_ASSERT(!ggml_is_transposed(src1));
|
||||||
|
|
||||||
GGML_ASSERT(ne20 % 32 == 0);
|
|
||||||
// !!!!!!!!! TODO: this assert is probably required but not sure!
|
|
||||||
//GGML_ASSERT(ne20 >= 64);
|
|
||||||
GGML_ASSERT(src1t == GGML_TYPE_F32);
|
GGML_ASSERT(src1t == GGML_TYPE_F32);
|
||||||
|
|
||||||
const uint r2 = ne12/ne22;
|
const uint r2 = ne12/ne22;
|
||||||
@ -1734,22 +1776,22 @@ void ggml_metal_graph_compute(
|
|||||||
|
|
||||||
// find the break-even point where the matrix-matrix kernel becomes more efficient compared
|
// find the break-even point where the matrix-matrix kernel becomes more efficient compared
|
||||||
// to the matrix-vector kernel
|
// to the matrix-vector kernel
|
||||||
int ne11_mm_min = 1;
|
int ne11_mm_min = n_as;
|
||||||
|
|
||||||
const int idx = ((int32_t *) dst->op_params)[0];
|
const int idx = ((int32_t *) dst->op_params)[0];
|
||||||
|
|
||||||
// batch size
|
// batch size
|
||||||
GGML_ASSERT(ne01 == ne11);
|
GGML_ASSERT(ne01 == ne11);
|
||||||
|
|
||||||
const int64_t _ne1 = 1; // kernel_mul_mm_impl needs a reference in constant memory
|
|
||||||
|
|
||||||
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
|
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
|
||||||
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
|
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
|
||||||
// !!!
|
// !!!
|
||||||
// TODO: for now, always use mat-vec kernels until we figure out how to improve the
|
// TODO: for now, always use mat-vec kernels until we figure out how to improve the
|
||||||
// indirect matrix multiplication
|
// indirect matrix multiplication
|
||||||
// !!!
|
// !!!
|
||||||
if ([ctx->device supportsFamily:MTLGPUFamilyApple7] && _ne1 > ne11_mm_min) {
|
if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
|
||||||
|
ne20 % 32 == 0 && ne20 >= 64 &&
|
||||||
|
ne11 > ne11_mm_min) {
|
||||||
switch (src2->type) {
|
switch (src2->type) {
|
||||||
case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_f32_f32]; break;
|
case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_f32_f32]; break;
|
||||||
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_f16_f32]; break;
|
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_f16_f32]; break;
|
||||||
@ -1763,6 +1805,7 @@ void ggml_metal_graph_compute(
|
|||||||
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_q4_K_f32]; break;
|
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_q4_K_f32]; break;
|
||||||
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_q5_K_f32]; break;
|
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_q5_K_f32]; break;
|
||||||
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_q6_K_f32]; break;
|
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_q6_K_f32]; break;
|
||||||
|
case GGML_TYPE_IQ2_XXS: [encoder setComputePipelineState:ctx->pipeline_mul_mm_id_iq2_xxs_f32]; break;
|
||||||
default: GGML_ASSERT(false && "MUL_MAT_ID not implemented");
|
default: GGML_ASSERT(false && "MUL_MAT_ID not implemented");
|
||||||
}
|
}
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
@ -1779,14 +1822,15 @@ void ggml_metal_graph_compute(
|
|||||||
[encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
|
[encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
|
||||||
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
|
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
|
||||||
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
|
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
|
||||||
[encoder setBytes:&_ne1 length:sizeof(_ne1) atIndex:14];
|
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14];
|
||||||
[encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
|
[encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
|
||||||
[encoder setBytes:&r2 length:sizeof(r2) atIndex:16];
|
[encoder setBytes:&r2 length:sizeof(r2) atIndex:16];
|
||||||
[encoder setBytes:&r3 length:sizeof(r3) atIndex:17];
|
[encoder setBytes:&r3 length:sizeof(r3) atIndex:17];
|
||||||
[encoder setBytes:&idx length:sizeof(idx) atIndex:18];
|
[encoder setBytes:&idx length:sizeof(idx) atIndex:18];
|
||||||
// TODO: how to make this an array? read Metal docs
|
// TODO: how to make this an array? read Metal docs
|
||||||
for (int j = 0; j < n_as; ++j) {
|
for (int j = 0; j < 8; ++j) {
|
||||||
struct ggml_tensor * src_cur = dst->src[2 + j];
|
// NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
|
||||||
|
struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
|
||||||
|
|
||||||
size_t offs_src_cur = 0;
|
size_t offs_src_cur = 0;
|
||||||
id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(ctx, src_cur, &offs_src_cur);
|
id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(ctx, src_cur, &offs_src_cur);
|
||||||
@ -1796,8 +1840,7 @@ void ggml_metal_graph_compute(
|
|||||||
|
|
||||||
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
|
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
|
||||||
|
|
||||||
// TODO: processing one row at a time (ne11 -> 1) is not efficient
|
[encoder dispatchThreadgroups:MTLSizeMake((ne11 + 31)/32, (ne21 + 63)/64, n_as*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake( (_ne1 + 31)/32, (ne21 + 63)/64, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
|
|
||||||
} else {
|
} else {
|
||||||
int nth0 = 32;
|
int nth0 = 32;
|
||||||
int nth1 = 1;
|
int nth1 = 1;
|
||||||
@ -1878,13 +1921,25 @@ void ggml_metal_graph_compute(
|
|||||||
nth1 = 32;
|
nth1 = 32;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mv_id_q6_K_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_id_q6_K_f32];
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
{
|
||||||
|
nth0 = 4;
|
||||||
|
nth1 = 16;
|
||||||
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_id_iq2_xxs_f32];
|
||||||
|
} break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
|
GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src2t);
|
||||||
GGML_ASSERT(false && "not implemented");
|
GGML_ASSERT(false && "not implemented");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (ggml_is_quantized(src2t)) {
|
||||||
|
GGML_ASSERT(ne20 >= nth0*nth1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int64_t _ne1 = 1; // kernels needs a reference in constant memory
|
||||||
|
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
@ -1909,8 +1964,9 @@ void ggml_metal_graph_compute(
|
|||||||
[encoder setBytes:&r3 length:sizeof(r3) atIndex:21];
|
[encoder setBytes:&r3 length:sizeof(r3) atIndex:21];
|
||||||
[encoder setBytes:&idx length:sizeof(idx) atIndex:22];
|
[encoder setBytes:&idx length:sizeof(idx) atIndex:22];
|
||||||
// TODO: how to make this an array? read Metal docs
|
// TODO: how to make this an array? read Metal docs
|
||||||
for (int j = 0; j < n_as; ++j) {
|
for (int j = 0; j < 8; ++j) {
|
||||||
struct ggml_tensor * src_cur = dst->src[2 + j];
|
// NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
|
||||||
|
struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
|
||||||
|
|
||||||
size_t offs_src_cur = 0;
|
size_t offs_src_cur = 0;
|
||||||
id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(ctx, src_cur, &offs_src_cur);
|
id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(ctx, src_cur, &offs_src_cur);
|
||||||
@ -1920,9 +1976,14 @@ void ggml_metal_graph_compute(
|
|||||||
|
|
||||||
if (src2t == GGML_TYPE_Q4_0 || src2t == GGML_TYPE_Q4_1 ||
|
if (src2t == GGML_TYPE_Q4_0 || src2t == GGML_TYPE_Q4_1 ||
|
||||||
src2t == GGML_TYPE_Q5_0 || src2t == GGML_TYPE_Q5_1 || src2t == GGML_TYPE_Q8_0 ||
|
src2t == GGML_TYPE_Q5_0 || src2t == GGML_TYPE_Q5_1 || src2t == GGML_TYPE_Q8_0 ||
|
||||||
|
//src2t == GGML_TYPE_IQ2_XXS ||
|
||||||
src2t == GGML_TYPE_Q2_K) { // || src2t == GGML_TYPE_Q4_K) {
|
src2t == GGML_TYPE_Q2_K) { // || src2t == GGML_TYPE_Q4_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
}
|
}
|
||||||
|
else if (src2t == GGML_TYPE_IQ2_XXS) {
|
||||||
|
[encoder setThreadgroupMemoryLength:(256*8+128) atIndex:0];
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
|
}
|
||||||
else if (src2t == GGML_TYPE_Q4_K) {
|
else if (src2t == GGML_TYPE_Q4_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
}
|
}
|
||||||
@ -1959,6 +2020,8 @@ void ggml_metal_graph_compute(
|
|||||||
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_K]; break;
|
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_K]; break;
|
||||||
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_K]; break;
|
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_K]; break;
|
||||||
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_K]; break;
|
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_K]; break;
|
||||||
|
case GGML_TYPE_I32: [encoder setComputePipelineState:ctx->pipeline_get_rows_i32]; break;
|
||||||
|
case GGML_TYPE_IQ2_XXS: [encoder setComputePipelineState:ctx->pipeline_get_rows_iq2_xxs]; break;
|
||||||
default: GGML_ASSERT(false && "not implemented");
|
default: GGML_ASSERT(false && "not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2229,7 +2292,7 @@ void ggml_metal_graph_compute(
|
|||||||
[encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
|
[encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
|
||||||
[encoder setBytes:&sf length:sizeof(sf) atIndex:18];
|
[encoder setBytes:&sf length:sizeof(sf) atIndex:18];
|
||||||
|
|
||||||
const int nth = MIN(1024, ne0);
|
const int nth = MIN((int) ctx->pipeline_upscale_f32.maxTotalThreadsPerThreadgroup, ne0);
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
@ -2382,10 +2445,11 @@ void ggml_metal_graph_compute(
|
|||||||
MTLCommandBufferStatus status = (MTLCommandBufferStatus) [ctx->command_buffers[i] status];
|
MTLCommandBufferStatus status = (MTLCommandBufferStatus) [ctx->command_buffers[i] status];
|
||||||
if (status != MTLCommandBufferStatusCompleted) {
|
if (status != MTLCommandBufferStatusCompleted) {
|
||||||
GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
|
GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
|
||||||
GGML_ASSERT(false);
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2665,10 +2729,10 @@ static ggml_backend_buffer_type_t ggml_backend_metal_get_default_buffer_type(ggm
|
|||||||
UNUSED(backend);
|
UNUSED(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
static bool ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
||||||
struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
|
struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
|
||||||
|
|
||||||
ggml_metal_graph_compute(metal_ctx, cgraph);
|
return ggml_metal_graph_compute(metal_ctx, cgraph);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
||||||
|
1029
ggml-metal.metal
1029
ggml-metal.metal
File diff suppressed because it is too large
Load Diff
@ -6,19 +6,19 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void ggml_cl_init(void);
|
GGML_API void ggml_cl_init(void);
|
||||||
|
|
||||||
void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
||||||
|
|
||||||
void * ggml_cl_host_malloc(size_t size);
|
GGML_API void * ggml_cl_host_malloc(size_t size);
|
||||||
void ggml_cl_host_free(void * ptr);
|
GGML_API void ggml_cl_host_free(void * ptr);
|
||||||
|
|
||||||
void ggml_cl_free_data(const struct ggml_tensor* tensor);
|
GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
|
||||||
|
|
||||||
void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
|
GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
769
ggml-quants.c
769
ggml-quants.c
File diff suppressed because it is too large
Load Diff
@ -70,7 +70,7 @@ static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block s
|
|||||||
// 2-bit quantization
|
// 2-bit quantization
|
||||||
// weight is represented as x = a * q + b
|
// weight is represented as x = a * q + b
|
||||||
// 16 blocks of 16 elements each
|
// 16 blocks of 16 elements each
|
||||||
// Effectively 2.5625 bits per weight
|
// Effectively 2.625 bits per weight
|
||||||
typedef struct {
|
typedef struct {
|
||||||
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
||||||
uint8_t qs[QK_K/4]; // quants
|
uint8_t qs[QK_K/4]; // quants
|
||||||
@ -165,6 +165,14 @@ typedef struct {
|
|||||||
} block_q8_K;
|
} block_q8_K;
|
||||||
static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
|
static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
|
||||||
|
|
||||||
|
// (Almost) "true" 2-bit quantization.
|
||||||
|
// Due to the need to use blocks as per ggml dsign, it ends up using
|
||||||
|
// 2.0625 bpw because of the 16-bit scale for each block of 256.
|
||||||
|
typedef struct {
|
||||||
|
ggml_fp16_t d;
|
||||||
|
uint16_t qs[QK_K/8];
|
||||||
|
} block_iq2_xxs;
|
||||||
|
static_assert(sizeof(block_iq2_xxs) == sizeof(ggml_fp16_t) + QK_K/8*sizeof(uint16_t), "wrong iq2_xxs block size/padding");
|
||||||
|
|
||||||
// Quantization
|
// Quantization
|
||||||
void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k);
|
void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k);
|
||||||
@ -180,6 +188,7 @@ void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict
|
|||||||
void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
|
void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
|
||||||
void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
|
void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
|
||||||
void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
|
void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
|
||||||
|
void quantize_row_iq2_xxs_reference(const float * restrict x, block_iq2_xxs * restrict y, int k);
|
||||||
|
|
||||||
void quantize_row_q4_0(const float * restrict x, void * restrict y, int k);
|
void quantize_row_q4_0(const float * restrict x, void * restrict y, int k);
|
||||||
void quantize_row_q4_1(const float * restrict x, void * restrict y, int k);
|
void quantize_row_q4_1(const float * restrict x, void * restrict y, int k);
|
||||||
@ -194,6 +203,7 @@ void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
|
|||||||
void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
|
void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
|
||||||
void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
|
void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
|
||||||
void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
|
void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
|
||||||
|
void quantize_row_iq2_xxs(const float * restrict x, void * restrict y, int k);
|
||||||
|
|
||||||
// Dequantization
|
// Dequantization
|
||||||
void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k);
|
void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k);
|
||||||
@ -209,6 +219,7 @@ void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int
|
|||||||
void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
|
void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
|
||||||
void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
|
void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
|
||||||
void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
|
void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
|
||||||
|
void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int k);
|
||||||
|
|
||||||
// Dot product
|
// Dot product
|
||||||
void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
@ -222,3 +233,4 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx,
|
|||||||
void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
|
void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
|
296
ggml.c
296
ggml.c
@ -573,6 +573,17 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
|
|||||||
.vec_dot = ggml_vec_dot_q6_K_q8_K,
|
.vec_dot = ggml_vec_dot_q6_K_q8_K,
|
||||||
.vec_dot_type = GGML_TYPE_Q8_K,
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
||||||
},
|
},
|
||||||
|
[GGML_TYPE_IQ2_XXS] = {
|
||||||
|
.type_name = "iq2_xxs",
|
||||||
|
.blck_size = QK_K,
|
||||||
|
.type_size = sizeof(block_iq2_xxs),
|
||||||
|
.is_quantized = true,
|
||||||
|
.to_float = (ggml_to_float_t) dequantize_row_iq2_xxs,
|
||||||
|
.from_float = quantize_row_iq2_xxs,
|
||||||
|
.from_float_reference = (ggml_from_float_t) quantize_row_iq2_xxs_reference,
|
||||||
|
.vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
|
||||||
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
||||||
|
},
|
||||||
[GGML_TYPE_Q8_K] = {
|
[GGML_TYPE_Q8_K] = {
|
||||||
.type_name = "q8_K",
|
.type_name = "q8_K",
|
||||||
.blck_size = QK_K,
|
.blck_size = QK_K,
|
||||||
@ -2111,6 +2122,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
|
|||||||
case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
|
case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
|
case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
|
case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break;
|
||||||
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
||||||
}
|
}
|
||||||
@ -4041,7 +4053,6 @@ static struct ggml_tensor * ggml_group_norm_impl(
|
|||||||
result->op = GGML_OP_GROUP_NORM;
|
result->op = GGML_OP_GROUP_NORM;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
result->src[0] = a;
|
result->src[0] = a;
|
||||||
result->src[1] = NULL; // TODO: maybe store epsilon here?
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -4171,23 +4182,23 @@ struct ggml_tensor * ggml_out_prod(
|
|||||||
static struct ggml_tensor * ggml_scale_impl(
|
static struct ggml_tensor * ggml_scale_impl(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b,
|
float s,
|
||||||
bool inplace) {
|
bool inplace) {
|
||||||
GGML_ASSERT(ggml_is_scalar(b));
|
|
||||||
GGML_ASSERT(ggml_is_padded_1d(a));
|
GGML_ASSERT(ggml_is_padded_1d(a));
|
||||||
|
|
||||||
bool is_node = false;
|
bool is_node = false;
|
||||||
|
|
||||||
if (a->grad || b->grad) {
|
if (a->grad) {
|
||||||
is_node = true;
|
is_node = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||||
|
|
||||||
|
ggml_set_op_params(result, &s, sizeof(s));
|
||||||
|
|
||||||
result->op = GGML_OP_SCALE;
|
result->op = GGML_OP_SCALE;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
result->src[0] = a;
|
result->src[0] = a;
|
||||||
result->src[1] = b;
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -4195,15 +4206,15 @@ static struct ggml_tensor * ggml_scale_impl(
|
|||||||
struct ggml_tensor * ggml_scale(
|
struct ggml_tensor * ggml_scale(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b) {
|
float s) {
|
||||||
return ggml_scale_impl(ctx, a, b, false);
|
return ggml_scale_impl(ctx, a, s, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * ggml_scale_inplace(
|
struct ggml_tensor * ggml_scale_inplace(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b) {
|
float s) {
|
||||||
return ggml_scale_impl(ctx, a, b, true);
|
return ggml_scale_impl(ctx, a, s, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml_set
|
// ggml_set
|
||||||
@ -4767,8 +4778,11 @@ struct ggml_tensor * ggml_get_rows(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: implement non F32 return
|
// TODO: implement non F32 return
|
||||||
//struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
|
enum ggml_type type = GGML_TYPE_F32;
|
||||||
struct ggml_tensor * result = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
|
if (a->type == GGML_TYPE_I32) {
|
||||||
|
type = a->type;
|
||||||
|
}
|
||||||
|
struct ggml_tensor * result = ggml_new_tensor_4d(ctx, type, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
|
||||||
|
|
||||||
result->op = GGML_OP_GET_ROWS;
|
result->op = GGML_OP_GET_ROWS;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
@ -5541,7 +5555,6 @@ static struct ggml_tensor * ggml_upscale_impl(
|
|||||||
result->op_params[0] = scale_factor;
|
result->op_params[0] = scale_factor;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
result->src[0] = a;
|
result->src[0] = a;
|
||||||
result->src[1] = NULL;
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -5846,7 +5859,6 @@ struct ggml_tensor * ggml_get_rel_pos(
|
|||||||
result->op = GGML_OP_GET_REL_POS;
|
result->op = GGML_OP_GET_REL_POS;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
result->src[0] = a;
|
result->src[0] = a;
|
||||||
result->src[1] = NULL;
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -6941,14 +6953,165 @@ static void ggml_compute_forward_dup_f32(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy.
|
||||||
|
static void ggml_compute_forward_dup_bytes(
|
||||||
|
const struct ggml_compute_params * params,
|
||||||
|
const struct ggml_tensor * src0,
|
||||||
|
struct ggml_tensor * dst) {
|
||||||
|
GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
|
||||||
|
GGML_ASSERT(src0->type == dst->type);
|
||||||
|
|
||||||
|
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) {
|
||||||
|
ggml_compute_forward_dup_same_cont(params, src0, dst);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_TENSOR_UNARY_OP_LOCALS;
|
||||||
|
|
||||||
|
const size_t type_size = ggml_type_size(src0->type);
|
||||||
|
const int ith = params->ith; // thread index
|
||||||
|
const int nth = params->nth; // number of threads
|
||||||
|
|
||||||
|
|
||||||
|
// parallelize by rows
|
||||||
|
const int nr = ne01;
|
||||||
|
// number of rows per thread
|
||||||
|
const int dr = (nr + nth - 1) / nth;
|
||||||
|
// row range for this thread
|
||||||
|
const int ir0 = dr * ith;
|
||||||
|
const int ir1 = MIN(ir0 + dr, nr);
|
||||||
|
|
||||||
|
if (src0->type == dst->type &&
|
||||||
|
ne00 == ne0 &&
|
||||||
|
nb00 == type_size && nb0 == type_size) {
|
||||||
|
// copy by rows
|
||||||
|
const size_t rs = ne00 * type_size;
|
||||||
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||||
|
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||||
|
for (int64_t i01 = ir0; i01 < ir1; i01++) {
|
||||||
|
memcpy(
|
||||||
|
((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
|
||||||
|
((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
|
||||||
|
rs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ggml_is_contiguous(dst)) {
|
||||||
|
size_t id = 0;
|
||||||
|
char * dst_ptr = (char *) dst->data;
|
||||||
|
const size_t rs = ne00 * type_size;
|
||||||
|
|
||||||
|
if (nb00 == type_size) {
|
||||||
|
// src0 is contigous on first dimension, copy by rows
|
||||||
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||||
|
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||||
|
id += rs * ir0;
|
||||||
|
for (int64_t i01 = ir0; i01 < ir1; i01++) {
|
||||||
|
const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
|
||||||
|
memcpy(dst_ptr + id, src0_ptr, rs);
|
||||||
|
id += rs;
|
||||||
|
}
|
||||||
|
id += rs * (ne01 - ir1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
//printf("%s: this is not optimal - fix me\n", __func__);
|
||||||
|
|
||||||
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||||
|
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||||
|
id += rs * ir0;
|
||||||
|
for (int64_t i01 = ir0; i01 < ir1; i01++) {
|
||||||
|
for (int64_t i00 = 0; i00 < ne00; i00++) {
|
||||||
|
const char * src0_ptr = (char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03;
|
||||||
|
memcpy(dst_ptr + id, src0_ptr, type_size);
|
||||||
|
|
||||||
|
id += type_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
id += rs * (ne01 - ir1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// dst counters
|
||||||
|
|
||||||
|
int64_t i10 = 0;
|
||||||
|
int64_t i11 = 0;
|
||||||
|
int64_t i12 = 0;
|
||||||
|
int64_t i13 = 0;
|
||||||
|
|
||||||
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||||
|
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||||
|
i10 += ne00 * ir0;
|
||||||
|
while (i10 >= ne0) {
|
||||||
|
i10 -= ne0;
|
||||||
|
if (++i11 == ne1) {
|
||||||
|
i11 = 0;
|
||||||
|
if (++i12 == ne2) {
|
||||||
|
i12 = 0;
|
||||||
|
if (++i13 == ne3) {
|
||||||
|
i13 = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int64_t i01 = ir0; i01 < ir1; i01++) {
|
||||||
|
for (int64_t i00 = 0; i00 < ne00; i00++) {
|
||||||
|
const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
|
||||||
|
char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
|
||||||
|
|
||||||
|
memcpy(dst_ptr, src0_ptr, type_size);
|
||||||
|
|
||||||
|
if (++i10 == ne0) {
|
||||||
|
i10 = 0;
|
||||||
|
if (++i11 == ne1) {
|
||||||
|
i11 = 0;
|
||||||
|
if (++i12 == ne2) {
|
||||||
|
i12 = 0;
|
||||||
|
if (++i13 == ne3) {
|
||||||
|
i13 = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i10 += ne00 * (ne01 - ir1);
|
||||||
|
while (i10 >= ne0) {
|
||||||
|
i10 -= ne0;
|
||||||
|
if (++i11 == ne1) {
|
||||||
|
i11 = 0;
|
||||||
|
if (++i12 == ne2) {
|
||||||
|
i12 = 0;
|
||||||
|
if (++i13 == ne3) {
|
||||||
|
i13 = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void ggml_compute_forward_dup(
|
static void ggml_compute_forward_dup(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
const struct ggml_tensor * src0,
|
const struct ggml_tensor * src0,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
|
if (src0->type == dst->type) {
|
||||||
ggml_compute_forward_dup_same_cont(params, src0, dst);
|
ggml_compute_forward_dup_bytes(params, src0, dst);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
{
|
{
|
||||||
@ -7285,6 +7448,7 @@ static void ggml_compute_forward_add(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_add_q_f32(params, src0, src1, dst);
|
ggml_compute_forward_add_q_f32(params, src0, src1, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -7549,6 +7713,7 @@ static void ggml_compute_forward_add1(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
|
ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -7663,6 +7828,7 @@ static void ggml_compute_forward_acc(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
@ -8407,10 +8573,12 @@ static void ggml_compute_forward_repeat(
|
|||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
|
case GGML_TYPE_I16:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_repeat_f16(params, src0, dst);
|
ggml_compute_forward_repeat_f16(params, src0, dst);
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
|
case GGML_TYPE_I32:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_repeat_f32(params, src0, dst);
|
ggml_compute_forward_repeat_f32(params, src0, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -8553,6 +8721,7 @@ static void ggml_compute_forward_concat(
|
|||||||
struct ggml_tensor* dst) {
|
struct ggml_tensor* dst) {
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
|
case GGML_TYPE_I32:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_concat_f32(params, src0, src1, dst);
|
ggml_compute_forward_concat_f32(params, src0, src1, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -9550,10 +9719,10 @@ static void ggml_compute_forward_group_norm(
|
|||||||
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
||||||
// helper function to determine if it is better to use BLAS or not
|
// helper function to determine if it is better to use BLAS or not
|
||||||
// for large matrices, BLAS is faster
|
// for large matrices, BLAS is faster
|
||||||
static bool ggml_compute_forward_mul_mat_use_blas(
|
static bool ggml_compute_forward_mul_mat_use_blas(struct ggml_tensor * dst) {
|
||||||
const struct ggml_tensor * src0,
|
const struct ggml_tensor * src0 = dst->src[0];
|
||||||
const struct ggml_tensor * src1,
|
const struct ggml_tensor * src1 = dst->src[1];
|
||||||
struct ggml_tensor * dst) {
|
|
||||||
//const int64_t ne00 = src0->ne[0];
|
//const int64_t ne00 = src0->ne[0];
|
||||||
//const int64_t ne01 = src0->ne[1];
|
//const int64_t ne01 = src0->ne[1];
|
||||||
|
|
||||||
@ -9633,7 +9802,7 @@ static void ggml_compute_forward_mul_mat(
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
||||||
if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
|
if (ggml_compute_forward_mul_mat_use_blas(dst)) {
|
||||||
if (params->ith != 0) {
|
if (params->ith != 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -9690,7 +9859,7 @@ static void ggml_compute_forward_mul_mat(
|
|||||||
const size_t row_size = ggml_row_size(vec_dot_type, ne10);
|
const size_t row_size = ggml_row_size(vec_dot_type, ne10);
|
||||||
|
|
||||||
assert(params->wsize >= ne11*ne12*ne13*row_size);
|
assert(params->wsize >= ne11*ne12*ne13*row_size);
|
||||||
assert(src1->type == GGML_TYPE_F32);
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
||||||
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||||
@ -10301,6 +10470,7 @@ static void ggml_compute_forward_out_prod(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
|
ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -10325,19 +10495,18 @@ static void ggml_compute_forward_out_prod(
|
|||||||
static void ggml_compute_forward_scale_f32(
|
static void ggml_compute_forward_scale_f32(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
const struct ggml_tensor * src0,
|
const struct ggml_tensor * src0,
|
||||||
const struct ggml_tensor * src1,
|
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
GGML_ASSERT(ggml_is_contiguous(dst));
|
GGML_ASSERT(ggml_is_contiguous(dst));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
||||||
GGML_ASSERT(ggml_is_scalar(src1));
|
|
||||||
|
|
||||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// scale factor
|
// scale factor
|
||||||
const float v = *(float *) src1->data;
|
float v;
|
||||||
|
memcpy(&v, dst->op_params, sizeof(float));
|
||||||
|
|
||||||
const int ith = params->ith;
|
const int ith = params->ith;
|
||||||
const int nth = params->nth;
|
const int nth = params->nth;
|
||||||
@ -10368,12 +10537,11 @@ static void ggml_compute_forward_scale_f32(
|
|||||||
static void ggml_compute_forward_scale(
|
static void ggml_compute_forward_scale(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
const struct ggml_tensor * src0,
|
const struct ggml_tensor * src0,
|
||||||
const struct ggml_tensor * src1,
|
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_scale_f32(params, src0, src1, dst);
|
ggml_compute_forward_scale_f32(params, src0, dst);
|
||||||
} break;
|
} break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
@ -10477,6 +10645,7 @@ static void ggml_compute_forward_set(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
@ -10671,6 +10840,7 @@ static void ggml_compute_forward_get_rows(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_get_rows_q(params, src0, src1, dst);
|
ggml_compute_forward_get_rows_q(params, src0, src1, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -10679,6 +10849,7 @@ static void ggml_compute_forward_get_rows(
|
|||||||
ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
|
ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
|
case GGML_TYPE_I32:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
|
ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -11306,6 +11477,7 @@ static void ggml_compute_forward_alibi(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
case GGML_TYPE_Q8_K:
|
case GGML_TYPE_Q8_K:
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
@ -11380,6 +11552,7 @@ static void ggml_compute_forward_clamp(
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
case GGML_TYPE_Q8_K:
|
case GGML_TYPE_Q8_K:
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
@ -14383,7 +14556,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_SCALE:
|
case GGML_OP_SCALE:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
|
ggml_compute_forward_scale(params, tensor->src[0], tensor);
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_SET:
|
case GGML_OP_SET:
|
||||||
{
|
{
|
||||||
@ -14839,7 +15012,7 @@ static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct gg
|
|||||||
|
|
||||||
static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
|
static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
|
||||||
if (ggml_hash_contains(zero_table, a)) {
|
if (ggml_hash_contains(zero_table, a)) {
|
||||||
struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0));
|
struct ggml_tensor * a_zero = ggml_scale(ctx, a, 0.0f);
|
||||||
return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
|
return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
|
||||||
} else {
|
} else {
|
||||||
return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
|
return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
|
||||||
@ -14975,7 +15148,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||||||
src0->grad,
|
src0->grad,
|
||||||
ggml_scale(ctx,
|
ggml_scale(ctx,
|
||||||
ggml_mul(ctx, src0, tensor->grad),
|
ggml_mul(ctx, src0, tensor->grad),
|
||||||
ggml_new_f32(ctx, 2.0f)),
|
2.0f),
|
||||||
zero_table);
|
zero_table);
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
@ -14989,7 +15162,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||||||
ggml_div(ctx,
|
ggml_div(ctx,
|
||||||
tensor->grad,
|
tensor->grad,
|
||||||
tensor),
|
tensor),
|
||||||
ggml_new_f32(ctx, 0.5f)),
|
0.5f),
|
||||||
zero_table);
|
zero_table);
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
@ -15155,17 +15328,13 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||||||
{
|
{
|
||||||
// necessary for llama
|
// necessary for llama
|
||||||
if (src0->grad) {
|
if (src0->grad) {
|
||||||
|
float s;
|
||||||
|
memcpy(&s, tensor->op_params, sizeof(float));
|
||||||
|
|
||||||
src0->grad =
|
src0->grad =
|
||||||
ggml_add_or_set(ctx,
|
ggml_add_or_set(ctx,
|
||||||
src0->grad,
|
src0->grad,
|
||||||
ggml_scale_impl(ctx, tensor->grad, src1, false),
|
ggml_scale_impl(ctx, tensor->grad, s, false),
|
||||||
zero_table);
|
|
||||||
}
|
|
||||||
if (src1->grad) {
|
|
||||||
src1->grad =
|
|
||||||
ggml_add_or_set(ctx,
|
|
||||||
src1->grad,
|
|
||||||
ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
|
|
||||||
zero_table);
|
zero_table);
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
@ -15343,6 +15512,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||||||
const int n_past = ((int32_t *) tensor->op_params)[0];
|
const int n_past = ((int32_t *) tensor->op_params)[0];
|
||||||
src0->grad =
|
src0->grad =
|
||||||
ggml_add_or_set(ctx, src0->grad,
|
ggml_add_or_set(ctx, src0->grad,
|
||||||
|
/* ggml_diag_mask_inf_impl() shouldn't be here */
|
||||||
|
/* ref: https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */
|
||||||
ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
|
ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
|
||||||
zero_table);
|
zero_table);
|
||||||
}
|
}
|
||||||
@ -16150,24 +16321,6 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
|
|||||||
|
|
||||||
//n_tasks = MIN(n_threads, MAX(1, nr0/128));
|
//n_tasks = MIN(n_threads, MAX(1, nr0/128));
|
||||||
//printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
|
//printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
|
||||||
|
|
||||||
#if defined(GGML_USE_CUBLAS)
|
|
||||||
if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
|
|
||||||
n_tasks = 1; // TODO: this actually is doing nothing
|
|
||||||
// the threads are still spinning
|
|
||||||
}
|
|
||||||
#elif defined(GGML_USE_CLBLAST)
|
|
||||||
if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
|
|
||||||
n_tasks = 1; // TODO: this actually is doing nothing
|
|
||||||
// the threads are still spinning
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
|
||||||
if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
|
|
||||||
n_tasks = 1; // TODO: this actually is doing nothing
|
|
||||||
// the threads are still spinning
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
{
|
{
|
||||||
@ -16340,6 +16493,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
|||||||
state->shared->node_n += 1;
|
state->shared->node_n += 1;
|
||||||
return (thread_ret_t) GGML_EXIT_ABORTED;
|
return (thread_ret_t) GGML_EXIT_ABORTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
|
if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
|
||||||
// all other threads are finished and spinning
|
// all other threads are finished and spinning
|
||||||
// do finalize and init here so we don't have synchronize again
|
// do finalize and init here so we don't have synchronize again
|
||||||
@ -16405,14 +16559,18 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
|||||||
} else {
|
} else {
|
||||||
// wait for other threads to finish
|
// wait for other threads to finish
|
||||||
const int last = node_n;
|
const int last = node_n;
|
||||||
|
|
||||||
|
const bool do_yield = last < 0 || cgraph->nodes[last]->op == GGML_OP_MUL_MAT;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
// TODO: this sched_yield can have significant impact on the performance - either positive or negative
|
// TODO: this sched_yield can have significant impact on the performance - either positive or negative
|
||||||
// depending on the workload and the operating system.
|
// depending on the workload and the operating system.
|
||||||
// since it is not clear what is the best approach, it should potentially become user-configurable
|
// since it is not clear what is the best approach, it should potentially become user-configurable
|
||||||
// ref: https://github.com/ggerganov/ggml/issues/291
|
// ref: https://github.com/ggerganov/ggml/issues/291
|
||||||
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
// UPD: adding the do_yield flag seems to resolve the issue universally
|
||||||
|
if (do_yield) {
|
||||||
sched_yield();
|
sched_yield();
|
||||||
#endif
|
}
|
||||||
|
|
||||||
node_n = atomic_load(&state->shared->node_n);
|
node_n = atomic_load(&state->shared->node_n);
|
||||||
if (node_n != last) break;
|
if (node_n != last) break;
|
||||||
@ -16491,7 +16649,7 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
|
|||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
||||||
if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
|
if (ggml_compute_forward_mul_mat_use_blas(node)) {
|
||||||
if (node->src[0]->type != GGML_TYPE_F32) {
|
if (node->src[0]->type != GGML_TYPE_F32) {
|
||||||
// here we need memory just for single 2D matrix from src0
|
// here we need memory just for single 2D matrix from src0
|
||||||
cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
|
cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
|
||||||
@ -17460,9 +17618,9 @@ static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// ADAM
|
// Using AdamW - ref: https://arxiv.org/pdf/1711.05101v3.pdf
|
||||||
//
|
//
|
||||||
// ref: https://arxiv.org/pdf/1412.6980.pdf
|
// (Original Adam - ref: https://arxiv.org/pdf/1412.6980.pdf)
|
||||||
//
|
//
|
||||||
|
|
||||||
static enum ggml_opt_result ggml_opt_adam(
|
static enum ggml_opt_result ggml_opt_adam(
|
||||||
@ -18510,6 +18668,12 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i
|
|||||||
block_q6_K * block = (block_q6_K*)dst + start / QK_K;
|
block_q6_K * block = (block_q6_K*)dst + start / QK_K;
|
||||||
result = ggml_quantize_q6_K(src + start, block, n, n, hist);
|
result = ggml_quantize_q6_K(src + start, block, n, n, hist);
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
{
|
||||||
|
GGML_ASSERT(start % QK_K == 0);
|
||||||
|
block_iq2_xxs * block = (block_iq2_xxs*)dst + start / QK_K;
|
||||||
|
result = ggml_quantize_iq2_xxs(src + start, block, n, n, hist);
|
||||||
|
} break;
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
{
|
{
|
||||||
int elemsize = sizeof(ggml_fp16_t);
|
int elemsize = sizeof(ggml_fp16_t);
|
||||||
@ -19355,7 +19519,7 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
|
|||||||
data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
|
data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
|
||||||
}
|
}
|
||||||
gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
|
gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
|
||||||
free(data);
|
free((void *)data);
|
||||||
} else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
|
} else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
|
||||||
GGML_ASSERT(false && "nested arrays not supported");
|
GGML_ASSERT(false && "nested arrays not supported");
|
||||||
} else {
|
} else {
|
||||||
@ -19645,6 +19809,14 @@ int ggml_cpu_has_avx(void) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ggml_cpu_has_avx_vnni(void) {
|
||||||
|
#if defined(__AVXVNNI__)
|
||||||
|
return 1;
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
int ggml_cpu_has_avx2(void) {
|
int ggml_cpu_has_avx2(void) {
|
||||||
#if defined(__AVX2__)
|
#if defined(__AVX2__)
|
||||||
return 1;
|
return 1;
|
||||||
|
13
ggml.h
13
ggml.h
@ -255,6 +255,8 @@
|
|||||||
#define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached")
|
#define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached")
|
||||||
#elif defined(__GNUC__)
|
#elif defined(__GNUC__)
|
||||||
#define GGML_UNREACHABLE() __builtin_unreachable()
|
#define GGML_UNREACHABLE() __builtin_unreachable()
|
||||||
|
#elif defined(_MSC_VER)
|
||||||
|
#define GGML_UNREACHABLE() __assume(0)
|
||||||
#else
|
#else
|
||||||
#define GGML_UNREACHABLE() ((void) 0)
|
#define GGML_UNREACHABLE() ((void) 0)
|
||||||
#endif
|
#endif
|
||||||
@ -337,6 +339,7 @@ extern "C" {
|
|||||||
GGML_TYPE_Q5_K = 13,
|
GGML_TYPE_Q5_K = 13,
|
||||||
GGML_TYPE_Q6_K = 14,
|
GGML_TYPE_Q6_K = 14,
|
||||||
GGML_TYPE_Q8_K = 15,
|
GGML_TYPE_Q8_K = 15,
|
||||||
|
GGML_TYPE_IQ2_XXS = 16,
|
||||||
GGML_TYPE_I8,
|
GGML_TYPE_I8,
|
||||||
GGML_TYPE_I16,
|
GGML_TYPE_I16,
|
||||||
GGML_TYPE_I32,
|
GGML_TYPE_I32,
|
||||||
@ -371,6 +374,7 @@ extern "C" {
|
|||||||
GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
|
GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
|
||||||
GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
|
GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
|
||||||
GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
|
GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
|
||||||
|
GGML_FTYPE_MOSTLY_IQ2_XXS = 15, // except 1d tensors
|
||||||
};
|
};
|
||||||
|
|
||||||
// available tensor operations:
|
// available tensor operations:
|
||||||
@ -484,7 +488,8 @@ extern "C" {
|
|||||||
enum ggml_log_level {
|
enum ggml_log_level {
|
||||||
GGML_LOG_LEVEL_ERROR = 2,
|
GGML_LOG_LEVEL_ERROR = 2,
|
||||||
GGML_LOG_LEVEL_WARN = 3,
|
GGML_LOG_LEVEL_WARN = 3,
|
||||||
GGML_LOG_LEVEL_INFO = 4
|
GGML_LOG_LEVEL_INFO = 4,
|
||||||
|
GGML_LOG_LEVEL_DEBUG = 5
|
||||||
};
|
};
|
||||||
|
|
||||||
// ggml object
|
// ggml object
|
||||||
@ -1094,13 +1099,13 @@ extern "C" {
|
|||||||
GGML_API struct ggml_tensor * ggml_scale(
|
GGML_API struct ggml_tensor * ggml_scale(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
float s);
|
||||||
|
|
||||||
// in-place, returns view(a)
|
// in-place, returns view(a)
|
||||||
GGML_API struct ggml_tensor * ggml_scale_inplace(
|
GGML_API struct ggml_tensor * ggml_scale_inplace(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
float s);
|
||||||
|
|
||||||
// b -> view(a,offset,nb1,nb2,3), return modified a
|
// b -> view(a,offset,nb1,nb2,3), return modified a
|
||||||
GGML_API struct ggml_tensor * ggml_set(
|
GGML_API struct ggml_tensor * ggml_set(
|
||||||
@ -2064,6 +2069,7 @@ extern "C" {
|
|||||||
GGML_API size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
GGML_API size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
GGML_API size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
GGML_API size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
GGML_API size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
GGML_API size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
|
GGML_API size_t ggml_quantize_iq2_xxs(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
|
|
||||||
GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
|
GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
|
||||||
|
|
||||||
@ -2195,6 +2201,7 @@ extern "C" {
|
|||||||
//
|
//
|
||||||
|
|
||||||
GGML_API int ggml_cpu_has_avx (void);
|
GGML_API int ggml_cpu_has_avx (void);
|
||||||
|
GGML_API int ggml_cpu_has_avx_vnni (void);
|
||||||
GGML_API int ggml_cpu_has_avx2 (void);
|
GGML_API int ggml_cpu_has_avx2 (void);
|
||||||
GGML_API int ggml_cpu_has_avx512 (void);
|
GGML_API int ggml_cpu_has_avx512 (void);
|
||||||
GGML_API int ggml_cpu_has_avx512_vbmi(void);
|
GGML_API int ggml_cpu_has_avx512_vbmi(void);
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
This is a Python package for writing binary files in the [GGUF](https://github.com/ggerganov/ggml/pull/302)
|
This is a Python package for writing binary files in the [GGUF](https://github.com/ggerganov/ggml/pull/302)
|
||||||
(GGML Universal File) format.
|
(GGML Universal File) format.
|
||||||
|
|
||||||
See [convert-llama-hf-to-gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert-llama-hf-to-gguf.py)
|
See [convert-llama-hf-to-gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert-hf-to-gguf.py)
|
||||||
as an example for its usage.
|
as an example for its usage.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
@ -46,6 +46,8 @@ class Keys:
|
|||||||
HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
|
HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
|
||||||
MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
|
MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
|
||||||
CLAMP_KQV = "{arch}.attention.clamp_kqv"
|
CLAMP_KQV = "{arch}.attention.clamp_kqv"
|
||||||
|
KEY_LENGTH = "{arch}.attention.key_length"
|
||||||
|
VALUE_LENGTH = "{arch}.attention.value_length"
|
||||||
LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
|
LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
|
||||||
LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
|
LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
|
||||||
|
|
||||||
@ -96,6 +98,7 @@ class MODEL_ARCH(IntEnum):
|
|||||||
STABLELM = auto()
|
STABLELM = auto()
|
||||||
QWEN = auto()
|
QWEN = auto()
|
||||||
PHI2 = auto()
|
PHI2 = auto()
|
||||||
|
PLAMO = auto()
|
||||||
|
|
||||||
|
|
||||||
class MODEL_TENSOR(IntEnum):
|
class MODEL_TENSOR(IntEnum):
|
||||||
@ -119,6 +122,7 @@ class MODEL_TENSOR(IntEnum):
|
|||||||
FFN_GATE = auto()
|
FFN_GATE = auto()
|
||||||
FFN_DOWN = auto()
|
FFN_DOWN = auto()
|
||||||
FFN_UP = auto()
|
FFN_UP = auto()
|
||||||
|
FFN_ACT = auto()
|
||||||
FFN_GATE_EXP = auto()
|
FFN_GATE_EXP = auto()
|
||||||
FFN_DOWN_EXP = auto()
|
FFN_DOWN_EXP = auto()
|
||||||
FFN_UP_EXP = auto()
|
FFN_UP_EXP = auto()
|
||||||
@ -142,6 +146,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
|||||||
MODEL_ARCH.STABLELM: "stablelm",
|
MODEL_ARCH.STABLELM: "stablelm",
|
||||||
MODEL_ARCH.QWEN: "qwen",
|
MODEL_ARCH.QWEN: "qwen",
|
||||||
MODEL_ARCH.PHI2: "phi2",
|
MODEL_ARCH.PHI2: "phi2",
|
||||||
|
MODEL_ARCH.PLAMO: "plamo",
|
||||||
}
|
}
|
||||||
|
|
||||||
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
||||||
@ -167,6 +172,7 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
|||||||
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
||||||
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||||
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||||
|
MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn",
|
||||||
MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate.{xid}",
|
MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate.{xid}",
|
||||||
MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down.{xid}",
|
MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down.{xid}",
|
||||||
MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up.{xid}",
|
MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up.{xid}",
|
||||||
@ -267,6 +273,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||||||
MODEL_TENSOR.FFN_NORM,
|
MODEL_TENSOR.FFN_NORM,
|
||||||
MODEL_TENSOR.FFN_DOWN,
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
MODEL_TENSOR.FFN_UP,
|
MODEL_TENSOR.FFN_UP,
|
||||||
|
MODEL_TENSOR.FFN_ACT,
|
||||||
],
|
],
|
||||||
MODEL_ARCH.GPTJ: [
|
MODEL_ARCH.GPTJ: [
|
||||||
MODEL_TENSOR.TOKEN_EMBD,
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
@ -349,8 +356,32 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||||||
MODEL_TENSOR.FFN_DOWN,
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
MODEL_TENSOR.FFN_UP,
|
MODEL_TENSOR.FFN_UP,
|
||||||
],
|
],
|
||||||
|
MODEL_ARCH.PLAMO: [
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
|
MODEL_TENSOR.OUTPUT,
|
||||||
|
MODEL_TENSOR.ROPE_FREQS,
|
||||||
|
MODEL_TENSOR.ATTN_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_Q,
|
||||||
|
MODEL_TENSOR.ATTN_K,
|
||||||
|
MODEL_TENSOR.ATTN_V,
|
||||||
|
MODEL_TENSOR.ATTN_OUT,
|
||||||
|
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||||
|
MODEL_TENSOR.FFN_GATE,
|
||||||
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
|
MODEL_TENSOR.FFN_UP,
|
||||||
|
],
|
||||||
MODEL_ARCH.GPT2: [
|
MODEL_ARCH.GPT2: [
|
||||||
# TODO
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
|
MODEL_TENSOR.POS_EMBD,
|
||||||
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
|
MODEL_TENSOR.OUTPUT,
|
||||||
|
MODEL_TENSOR.ATTN_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_QKV,
|
||||||
|
MODEL_TENSOR.ATTN_OUT,
|
||||||
|
MODEL_TENSOR.FFN_NORM,
|
||||||
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
|
MODEL_TENSOR.FFN_UP,
|
||||||
],
|
],
|
||||||
MODEL_ARCH.PHI2: [
|
MODEL_ARCH.PHI2: [
|
||||||
MODEL_TENSOR.TOKEN_EMBD,
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
|
@ -333,6 +333,12 @@ class GGUFWriter:
|
|||||||
def add_head_count_kv(self, count: int) -> None:
|
def add_head_count_kv(self, count: int) -> None:
|
||||||
self.add_uint32(Keys.Attention.HEAD_COUNT_KV.format(arch=self.arch), count)
|
self.add_uint32(Keys.Attention.HEAD_COUNT_KV.format(arch=self.arch), count)
|
||||||
|
|
||||||
|
def add_key_length(self, length: int) -> None:
|
||||||
|
self.add_uint32(Keys.Attention.KEY_LENGTH.format(arch=self.arch), length)
|
||||||
|
|
||||||
|
def add_value_length(self, length: int) -> None:
|
||||||
|
self.add_uint32(Keys.Attention.VALUE_LENGTH.format(arch=self.arch), length)
|
||||||
|
|
||||||
def add_max_alibi_bias(self, bias: float) -> None:
|
def add_max_alibi_bias(self, bias: float) -> None:
|
||||||
self.add_float32(Keys.Attention.MAX_ALIBI_BIAS.format(arch=self.arch), bias)
|
self.add_float32(Keys.Attention.MAX_ALIBI_BIAS.format(arch=self.arch), bias)
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ class TensorNameMap:
|
|||||||
"tok_embeddings", # llama-pth
|
"tok_embeddings", # llama-pth
|
||||||
"embeddings.word_embeddings", # bert
|
"embeddings.word_embeddings", # bert
|
||||||
"language_model.embedding.word_embeddings", # persimmon
|
"language_model.embedding.word_embeddings", # persimmon
|
||||||
|
"wte", # gpt2
|
||||||
"transformer.embd.wte", # phi2
|
"transformer.embd.wte", # phi2
|
||||||
),
|
),
|
||||||
|
|
||||||
@ -34,6 +35,7 @@ class TensorNameMap:
|
|||||||
MODEL_TENSOR.POS_EMBD: (
|
MODEL_TENSOR.POS_EMBD: (
|
||||||
"transformer.wpe", # gpt2
|
"transformer.wpe", # gpt2
|
||||||
"embeddings.position_embeddings", # bert
|
"embeddings.position_embeddings", # bert
|
||||||
|
"wpe", # gpt2
|
||||||
),
|
),
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
@ -53,7 +55,7 @@ class TensorNameMap:
|
|||||||
"norm", # llama-pth
|
"norm", # llama-pth
|
||||||
"embeddings.LayerNorm", # bert
|
"embeddings.LayerNorm", # bert
|
||||||
"transformer.norm_f", # mpt
|
"transformer.norm_f", # mpt
|
||||||
"ln_f", # refact bloom qwen
|
"ln_f", # refact bloom qwen gpt2
|
||||||
"language_model.encoder.final_layernorm", # persimmon
|
"language_model.encoder.final_layernorm", # persimmon
|
||||||
"lm_head.ln", # phi2
|
"lm_head.ln", # phi2
|
||||||
),
|
),
|
||||||
@ -78,7 +80,9 @@ class TensorNameMap:
|
|||||||
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
||||||
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
|
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
|
||||||
"model.layers.{bid}.ln1", # yi
|
"model.layers.{bid}.ln1", # yi
|
||||||
|
"h.{bid}.ln_1", # gpt2
|
||||||
"transformer.h.{bid}.ln", # phi2
|
"transformer.h.{bid}.ln", # phi2
|
||||||
|
"model.layers.layers.{bid}.norm", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention norm 2
|
# Attention norm 2
|
||||||
@ -94,6 +98,7 @@ class TensorNameMap:
|
|||||||
"transformer.h.{bid}.self_attention.query_key_value", # falcon
|
"transformer.h.{bid}.self_attention.query_key_value", # falcon
|
||||||
"h.{bid}.self_attention.query_key_value", # bloom
|
"h.{bid}.self_attention.query_key_value", # bloom
|
||||||
"language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
|
"language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
|
||||||
|
"h.{bid}.attn.c_attn", # gpt2
|
||||||
"transformer.h.{bid}.mixer.Wqkv", # phi2
|
"transformer.h.{bid}.mixer.Wqkv", # phi2
|
||||||
),
|
),
|
||||||
|
|
||||||
@ -103,6 +108,7 @@ class TensorNameMap:
|
|||||||
"layers.{bid}.attention.wq", # llama-pth
|
"layers.{bid}.attention.wq", # llama-pth
|
||||||
"encoder.layer.{bid}.attention.self.query", # bert
|
"encoder.layer.{bid}.attention.self.query", # bert
|
||||||
"transformer.h.{bid}.attn.q_proj", # gpt-j
|
"transformer.h.{bid}.attn.q_proj", # gpt-j
|
||||||
|
"model.layers.layers.{bid}.self_attn.q_proj", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention key
|
# Attention key
|
||||||
@ -111,6 +117,7 @@ class TensorNameMap:
|
|||||||
"layers.{bid}.attention.wk", # llama-pth
|
"layers.{bid}.attention.wk", # llama-pth
|
||||||
"encoder.layer.{bid}.attention.self.key", # bert
|
"encoder.layer.{bid}.attention.self.key", # bert
|
||||||
"transformer.h.{bid}.attn.k_proj", # gpt-j
|
"transformer.h.{bid}.attn.k_proj", # gpt-j
|
||||||
|
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention value
|
# Attention value
|
||||||
@ -119,6 +126,7 @@ class TensorNameMap:
|
|||||||
"layers.{bid}.attention.wv", # llama-pth
|
"layers.{bid}.attention.wv", # llama-pth
|
||||||
"encoder.layer.{bid}.attention.self.value", # bert
|
"encoder.layer.{bid}.attention.self.value", # bert
|
||||||
"transformer.h.{bid}.attn.v_proj", # gpt-j
|
"transformer.h.{bid}.attn.v_proj", # gpt-j
|
||||||
|
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention output
|
# Attention output
|
||||||
@ -133,13 +141,16 @@ class TensorNameMap:
|
|||||||
"encoder.layer.{bid}.attention.output.dense", # bert
|
"encoder.layer.{bid}.attention.output.dense", # bert
|
||||||
"transformer.h.{bid}.attn.out_proj", # gpt-j
|
"transformer.h.{bid}.attn.out_proj", # gpt-j
|
||||||
"language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
|
"language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
|
||||||
|
"h.{bid}.attn.c_proj", # gpt2
|
||||||
"transformer.h.{bid}.mixer.out_proj", # phi2
|
"transformer.h.{bid}.mixer.out_proj", # phi2
|
||||||
|
"model.layers.layers.{bid}.self_attn.o_proj", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
# Rotary embeddings
|
# Rotary embeddings
|
||||||
MODEL_TENSOR.ATTN_ROT_EMBD: (
|
MODEL_TENSOR.ATTN_ROT_EMBD: (
|
||||||
"model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
|
"model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
|
||||||
"layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
|
"layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
|
||||||
|
"model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
# Feed-forward norm
|
# Feed-forward norm
|
||||||
@ -153,6 +164,7 @@ class TensorNameMap:
|
|||||||
"encoder.layer.{bid}.output.LayerNorm", # bert
|
"encoder.layer.{bid}.output.LayerNorm", # bert
|
||||||
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
|
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
|
||||||
"model.layers.{bid}.ln2", # yi
|
"model.layers.{bid}.ln2", # yi
|
||||||
|
"h.{bid}.ln_2", # gpt2
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_GATE_INP: (
|
MODEL_TENSOR.FFN_GATE_INP: (
|
||||||
@ -173,7 +185,9 @@ class TensorNameMap:
|
|||||||
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
||||||
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
||||||
"transformer.h.{bid}.mlp.w1", # qwen
|
"transformer.h.{bid}.mlp.w1", # qwen
|
||||||
|
"h.{bid}.mlp.c_fc", # gpt2
|
||||||
"transformer.h.{bid}.mlp.fc1", # phi2
|
"transformer.h.{bid}.mlp.fc1", # phi2
|
||||||
|
"model.layers.layers.{bid}.mlp.up_proj", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_UP_EXP: (
|
MODEL_TENSOR.FFN_UP_EXP: (
|
||||||
@ -181,11 +195,17 @@ class TensorNameMap:
|
|||||||
"model.layers.{bid}.block_sparse_moe.experts.{xid}.w3", # mixtral
|
"model.layers.{bid}.block_sparse_moe.experts.{xid}.w3", # mixtral
|
||||||
),
|
),
|
||||||
|
|
||||||
|
# AWQ-activation gate
|
||||||
|
MODEL_TENSOR.FFN_ACT: (
|
||||||
|
"transformer.blocks.{bid}.ffn.act", # mpt
|
||||||
|
),
|
||||||
|
|
||||||
# Feed-forward gate
|
# Feed-forward gate
|
||||||
MODEL_TENSOR.FFN_GATE: (
|
MODEL_TENSOR.FFN_GATE: (
|
||||||
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact
|
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact
|
||||||
"layers.{bid}.feed_forward.w1", # llama-pth
|
"layers.{bid}.feed_forward.w1", # llama-pth
|
||||||
"transformer.h.{bid}.mlp.w2", # qwen
|
"transformer.h.{bid}.mlp.w2", # qwen
|
||||||
|
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||||
@ -205,7 +225,9 @@ class TensorNameMap:
|
|||||||
"encoder.layer.{bid}.output.dense", # bert
|
"encoder.layer.{bid}.output.dense", # bert
|
||||||
"transformer.h.{bid}.mlp.fc_out", # gpt-j
|
"transformer.h.{bid}.mlp.fc_out", # gpt-j
|
||||||
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
|
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
|
||||||
|
"h.{bid}.mlp.c_proj", # gpt2
|
||||||
"transformer.h.{bid}.mlp.fc2", # phi2
|
"transformer.h.{bid}.mlp.fc2", # phi2
|
||||||
|
"model.layers.layers.{bid}.mlp.down_proj", # plamo
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||||
|
81
llama.h
81
llama.h
@ -103,6 +103,7 @@ extern "C" {
|
|||||||
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
|
||||||
|
|
||||||
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
||||||
};
|
};
|
||||||
@ -127,7 +128,7 @@ extern "C" {
|
|||||||
bool sorted;
|
bool sorted;
|
||||||
} llama_token_data_array;
|
} llama_token_data_array;
|
||||||
|
|
||||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
typedef bool (*llama_progress_callback)(float progress, void *ctx);
|
||||||
|
|
||||||
// Input data for llama_decode
|
// Input data for llama_decode
|
||||||
// A llama_batch object can contain input about one or many sequences
|
// A llama_batch object can contain input about one or many sequences
|
||||||
@ -180,7 +181,9 @@ extern "C" {
|
|||||||
int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
||||||
const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
|
const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
|
||||||
|
|
||||||
// called with a progress value between 0 and 1, pass NULL to disable
|
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
||||||
|
// If the provided progress_callback returns true, model loading continues.
|
||||||
|
// If it returns false, model loading is immediately aborted.
|
||||||
llama_progress_callback progress_callback;
|
llama_progress_callback progress_callback;
|
||||||
|
|
||||||
// context pointer passed to the progress callback
|
// context pointer passed to the progress callback
|
||||||
@ -224,7 +227,7 @@ extern "C" {
|
|||||||
|
|
||||||
// model quantization parameters
|
// model quantization parameters
|
||||||
typedef struct llama_model_quantize_params {
|
typedef struct llama_model_quantize_params {
|
||||||
int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
|
int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
|
||||||
enum llama_ftype ftype; // quantize to this llama_ftype
|
enum llama_ftype ftype; // quantize to this llama_ftype
|
||||||
bool allow_requantize; // allow quantizing non-f32/f16 tensors
|
bool allow_requantize; // allow quantizing non-f32/f16 tensors
|
||||||
bool quantize_output_tensor; // quantize output.weight
|
bool quantize_output_tensor; // quantize output.weight
|
||||||
@ -308,21 +311,20 @@ extern "C" {
|
|||||||
|
|
||||||
LLAMA_API int64_t llama_time_us(void);
|
LLAMA_API int64_t llama_time_us(void);
|
||||||
|
|
||||||
LLAMA_API int llama_max_devices (void);
|
LLAMA_API int32_t llama_max_devices(void);
|
||||||
LLAMA_API bool llama_mmap_supported (void);
|
LLAMA_API bool llama_mmap_supported (void);
|
||||||
LLAMA_API bool llama_mlock_supported(void);
|
LLAMA_API bool llama_mlock_supported(void);
|
||||||
|
|
||||||
LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
|
LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
|
||||||
|
|
||||||
// TODO: become more consistent with returned int types across the API
|
|
||||||
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
|
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
|
||||||
LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
|
LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
|
||||||
|
|
||||||
LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model);
|
LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model);
|
||||||
|
|
||||||
LLAMA_API int llama_n_vocab (const struct llama_model * model);
|
LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
|
||||||
LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
|
LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
|
||||||
LLAMA_API int llama_n_embd (const struct llama_model * model);
|
LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
|
||||||
|
|
||||||
// Get the model's RoPE frequency scaling factor
|
// Get the model's RoPE frequency scaling factor
|
||||||
LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
|
LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
|
||||||
@ -333,19 +335,19 @@ extern "C" {
|
|||||||
// - GGUF array values are not supported by these functions
|
// - GGUF array values are not supported by these functions
|
||||||
|
|
||||||
// Get metadata value as a string by key name
|
// Get metadata value as a string by key name
|
||||||
LLAMA_API int llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
|
LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
|
||||||
|
|
||||||
// Get the number of metadata key/value pairs
|
// Get the number of metadata key/value pairs
|
||||||
LLAMA_API int llama_model_meta_count(const struct llama_model * model);
|
LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
|
||||||
|
|
||||||
// Get metadata key name by index
|
// Get metadata key name by index
|
||||||
LLAMA_API int llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size);
|
LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
|
||||||
|
|
||||||
// Get metadata value as a string by index
|
// Get metadata value as a string by index
|
||||||
LLAMA_API int llama_model_meta_val_str_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size);
|
LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
|
||||||
|
|
||||||
// Get a string describing the model type
|
// Get a string describing the model type
|
||||||
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
||||||
|
|
||||||
// Returns the total size of all the tensors in the model in bytes
|
// Returns the total size of all the tensors in the model in bytes
|
||||||
LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
|
LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
|
||||||
@ -357,7 +359,7 @@ extern "C" {
|
|||||||
LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
|
LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
|
||||||
|
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
LLAMA_API int llama_model_quantize(
|
LLAMA_API uint32_t llama_model_quantize(
|
||||||
const char * fname_inp,
|
const char * fname_inp,
|
||||||
const char * fname_out,
|
const char * fname_out,
|
||||||
const llama_model_quantize_params * params);
|
const llama_model_quantize_params * params);
|
||||||
@ -368,20 +370,20 @@ extern "C" {
|
|||||||
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
|
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
|
||||||
// will be applied on top of the previous one
|
// will be applied on top of the previous one
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
|
LLAMA_API DEPRECATED(int32_t llama_apply_lora_from_file(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
const char * path_lora,
|
const char * path_lora,
|
||||||
float scale,
|
float scale,
|
||||||
const char * path_base_model,
|
const char * path_base_model,
|
||||||
int n_threads),
|
int32_t n_threads),
|
||||||
"use llama_model_apply_lora_from_file instead");
|
"use llama_model_apply_lora_from_file instead");
|
||||||
|
|
||||||
LLAMA_API int llama_model_apply_lora_from_file(
|
LLAMA_API int32_t llama_model_apply_lora_from_file(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
const char * path_lora,
|
const char * path_lora,
|
||||||
float scale,
|
float scale,
|
||||||
const char * path_base_model,
|
const char * path_base_model,
|
||||||
int n_threads);
|
int32_t n_threads);
|
||||||
|
|
||||||
//
|
//
|
||||||
// KV cache
|
// KV cache
|
||||||
@ -437,10 +439,10 @@ extern "C" {
|
|||||||
|
|
||||||
// Returns the number of tokens in the KV cache (slow, use only for debug)
|
// Returns the number of tokens in the KV cache (slow, use only for debug)
|
||||||
// If a KV cell has multiple sequences assigned to it, it will be counted multiple times
|
// If a KV cell has multiple sequences assigned to it, it will be counted multiple times
|
||||||
LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
|
LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
|
||||||
|
|
||||||
// Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
|
// Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
|
||||||
LLAMA_API int llama_get_kv_cache_used_cells(const struct llama_context * ctx);
|
LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
|
||||||
|
|
||||||
// Clear the KV cache
|
// Clear the KV cache
|
||||||
LLAMA_API void llama_kv_cache_clear(
|
LLAMA_API void llama_kv_cache_clear(
|
||||||
@ -483,6 +485,17 @@ extern "C" {
|
|||||||
llama_pos p1,
|
llama_pos p1,
|
||||||
llama_pos delta);
|
llama_pos delta);
|
||||||
|
|
||||||
|
// Integer division of the positions by factor of `d > 1`
|
||||||
|
// If the KV cache is RoPEd, the KV data is updated accordingly
|
||||||
|
// p0 < 0 : [0, p1]
|
||||||
|
// p1 < 0 : [p0, inf)
|
||||||
|
LLAMA_API void llama_kv_cache_seq_div(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_seq_id seq_id,
|
||||||
|
llama_pos p0,
|
||||||
|
llama_pos p1,
|
||||||
|
int d);
|
||||||
|
|
||||||
//
|
//
|
||||||
// State / sessions
|
// State / sessions
|
||||||
//
|
//
|
||||||
@ -531,7 +544,7 @@ extern "C" {
|
|||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_token * tokens,
|
llama_token * tokens,
|
||||||
int32_t n_tokens,
|
int32_t n_tokens,
|
||||||
int n_past),
|
int32_t n_past),
|
||||||
"use llama_decode() instead");
|
"use llama_decode() instead");
|
||||||
|
|
||||||
// Same as llama_eval, but use float matrix input directly.
|
// Same as llama_eval, but use float matrix input directly.
|
||||||
@ -540,7 +553,7 @@ extern "C" {
|
|||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
float * embd,
|
float * embd,
|
||||||
int32_t n_tokens,
|
int32_t n_tokens,
|
||||||
int n_past),
|
int32_t n_past),
|
||||||
"use llama_decode() instead");
|
"use llama_decode() instead");
|
||||||
|
|
||||||
// Return batch for single sequence of tokens starting at pos_0
|
// Return batch for single sequence of tokens starting at pos_0
|
||||||
@ -572,7 +585,7 @@ extern "C" {
|
|||||||
// 0 - success
|
// 0 - success
|
||||||
// 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
|
// 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
|
||||||
// < 0 - error
|
// < 0 - error
|
||||||
LLAMA_API int llama_decode(
|
LLAMA_API int32_t llama_decode(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
struct llama_batch batch);
|
struct llama_batch batch);
|
||||||
|
|
||||||
@ -612,10 +625,10 @@ extern "C" {
|
|||||||
LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
|
LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
|
||||||
|
|
||||||
// Returns -1 if unknown, 1 for true or 0 for false.
|
// Returns -1 if unknown, 1 for true or 0 for false.
|
||||||
LLAMA_API int llama_add_bos_token(const struct llama_model * model);
|
LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
|
||||||
|
|
||||||
// Returns -1 if unknown, 1 for true or 0 for false.
|
// Returns -1 if unknown, 1 for true or 0 for false.
|
||||||
LLAMA_API int llama_add_eos_token(const struct llama_model * model);
|
LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
|
||||||
|
|
||||||
// codellama infill tokens
|
// codellama infill tokens
|
||||||
LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
|
LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
|
||||||
@ -633,12 +646,12 @@ extern "C" {
|
|||||||
/// @return Returns a negative number on failure - the number of tokens that would have been returned
|
/// @return Returns a negative number on failure - the number of tokens that would have been returned
|
||||||
/// @param special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext.
|
/// @param special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext.
|
||||||
/// Does not insert a leading space.
|
/// Does not insert a leading space.
|
||||||
LLAMA_API int llama_tokenize(
|
LLAMA_API int32_t llama_tokenize(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
const char * text,
|
const char * text,
|
||||||
int text_len,
|
int32_t text_len,
|
||||||
llama_token * tokens,
|
llama_token * tokens,
|
||||||
int n_max_tokens,
|
int32_t n_max_tokens,
|
||||||
bool add_bos,
|
bool add_bos,
|
||||||
bool special);
|
bool special);
|
||||||
|
|
||||||
@ -646,11 +659,11 @@ extern "C" {
|
|||||||
// Uses the vocabulary in the provided context.
|
// Uses the vocabulary in the provided context.
|
||||||
// Does not write null terminator to the buffer.
|
// Does not write null terminator to the buffer.
|
||||||
// User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens.
|
// User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens.
|
||||||
LLAMA_API int llama_token_to_piece(
|
LLAMA_API int32_t llama_token_to_piece(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
llama_token token,
|
llama_token token,
|
||||||
char * buf,
|
char * buf,
|
||||||
int length);
|
int32_t length);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Grammar
|
// Grammar
|
||||||
@ -702,7 +715,7 @@ extern "C" {
|
|||||||
LLAMA_API void llama_sample_top_k(
|
LLAMA_API void llama_sample_top_k(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_token_data_array * candidates,
|
llama_token_data_array * candidates,
|
||||||
int k,
|
int32_t k,
|
||||||
size_t min_keep);
|
size_t min_keep);
|
||||||
|
|
||||||
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||||
@ -761,7 +774,7 @@ extern "C" {
|
|||||||
llama_token_data_array * candidates,
|
llama_token_data_array * candidates,
|
||||||
float tau,
|
float tau,
|
||||||
float eta,
|
float eta,
|
||||||
int m,
|
int32_t m,
|
||||||
float * mu);
|
float * mu);
|
||||||
|
|
||||||
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||||
@ -834,8 +847,8 @@ extern "C" {
|
|||||||
llama_beam_search_callback_fn_t callback,
|
llama_beam_search_callback_fn_t callback,
|
||||||
void * callback_data,
|
void * callback_data,
|
||||||
size_t n_beams,
|
size_t n_beams,
|
||||||
int n_past,
|
int32_t n_past,
|
||||||
int n_predict);
|
int32_t n_predict);
|
||||||
|
|
||||||
// Performance information
|
// Performance information
|
||||||
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
|
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user