mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
6381d4e110
* gguf : first API pass
* gguf : read header + meta data
* gguf : read tensor info
* gguf : initial model loading - not tested
* gguf : add gguf_get_tensor_name()
* gguf : do not support passing existing ggml_context to gguf_init
* gguf : simplify gguf_get_val
* gguf : gguf.c is now part of ggml.c
* gguf : read / write sample models
* gguf : add comments
* refactor : reduce code duplication and better API (#2415)
* gguf : expose the gguf_type enum through the API for now
* gguf : add array support
* gguf.py : some code style changes
* convert.py : start a new simplified implementation by removing old stuff
* convert.py : remove GGML vocab + other obsolete stuff
* GGUF : write tensor (#2426)
* WIP: Write tensor
* GGUF : Support writing tensors in Python
* refactor : rm unused import and upd todos
* fix : fix errors upd writing example
* rm example.gguf
* gitignore *.gguf
* undo formatting
* gguf : add gguf_find_key (#2438)
* gguf.cpp : find key example
* ggml.h : add gguf_find_key
* ggml.c : add gguf_find_key
* gguf : fix writing tensors
* gguf : do not hardcode tensor names to read
* gguf : write sample tensors to read
* gguf : add tokenization constants
* quick and dirty conversion example
* gguf : fix writing gguf arrays
* gguf : write tensors one by one and code reuse
* gguf : fix writing gguf arrays
* gguf : write tensors one by one
* gguf : write tensors one by one
* gguf : write tokenizer data
* gguf : upd gguf conversion script
* Update convert-llama-h5-to-gguf.py
* gguf : handle already encoded string
* ggml.h : get array str and f32
* ggml.c : get arr str and f32
* gguf.py : support any type
* Update convert-llama-h5-to-gguf.py
* gguf : fix set is not subscriptable
* gguf : update convert-llama-h5-to-gguf.py
* constants.py : add layer norm eps
* gguf.py : add layer norm eps and merges
* ggml.h : increase GGML_MAX_NAME to 64
* ggml.c : add gguf_get_arr_n
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Makefile : add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* gguf : support custom alignment value
* gguf : fix typo in function call
* gguf : mmap tensor data example
* fix : update convert-llama-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* convert-gptneox-h5-to-gguf.py : Special tokens
* gptneox-main.cpp : special tokens
* Update gptneox-main.cpp
* constants.py : special tokens
* gguf.py : accumulate kv and tensor info data + special tokens
* convert-gptneox-h5-to-gguf.py : accumulate kv and ti + special tokens
* gguf : gguf counterpart of llama-util.h
* gguf-util.h : update note
* convert-llama-h5-to-gguf.py : accumulate kv / ti + special tokens
* convert-llama-h5-to-gguf.py : special tokens
* Delete gptneox-common.cpp
* Delete gptneox-common.h
* convert-gptneox-h5-to-gguf.py : gpt2bpe tokenizer
* gptneox-main.cpp : gpt2 bpe tokenizer
* gpt2 bpe tokenizer (handles merges and unicode)
* Makefile : remove gptneox-common
* gguf.py : bytesarray for gpt2bpe tokenizer
* cmpnct_gpt2bpe.hpp : comments
* gguf.py : use custom alignment if present
* gguf : minor stuff
* Update gptneox-main.cpp
* map tensor names
* convert-gptneox-h5-to-gguf.py : map tensor names
* convert-llama-h5-to-gguf.py : map tensor names
* gptneox-main.cpp : map tensor names
* gguf : start implementing libllama in GGUF (WIP)
* gguf : start implementing libllama in GGUF (WIP)
* rm binary commited by mistake
* upd .gitignore
* gguf : calculate n_mult
* gguf : inference with 7B model working (WIP)
* gguf : rm deprecated function
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : add gguf_get_kv_type
* gguf : add gguf_get_kv_type
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver
* gguf : rm references to old file formats
* gguf : shorter name for member variable
* gguf : rm redundant method
* gguf : get rid of n_mult, read n_ff from file
* Update gguf_tensor_map.py
* Update gptneox-main.cpp
* gguf : rm references to old file magics
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : quantization is working
* gguf : roper closing of file
* gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : no need to convert tensors twice
* convert-llama-h5-to-gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : simplify nbytes
* convert-llama-h5-to-gguf.py : simplify nbytes
* gptneox-main.cpp : n_layer --> n_block
* constants.py : n_layer --> n_block
* gguf.py : n_layer --> n_block
* convert-gptneox-h5-to-gguf.py : n_layer --> n_block
* convert-llama-h5-to-gguf.py : n_layer --> n_block
* gptneox-main.cpp : n_layer --> n_block
* Update gguf_tensor_map.py
* convert-gptneox-h5-to-gguf.py : load model in parts to save memory
* convert-llama-h5-to-gguf.py : load model in parts to save memory
* convert : write more metadata for LLaMA
* convert : rm quantization version
* convert-gptneox-h5-to-gguf.py : add file_type key
* gptneox-main.cpp : add file_type key
* fix conflicts
* gguf : add todos and comments
* convert-gptneox-h5-to-gguf.py : tensor name map changes
* Create gguf_namemap.py : tensor name map changes
* Delete gguf_tensor_map.py
* gptneox-main.cpp : tensor name map changes
* convert-llama-h5-to-gguf.py : fixes
* gguf.py : dont add empty strings
* simple : minor style changes
* gguf : use UNIX line ending
* Create convert-llama-7b-pth-to-gguf.py
* llama : sync gguf-llama.cpp with latest llama.cpp (#2608)
* llama : sync gguf-llama.cpp with latest llama.cpp
* minor : indentation + assert
* llama : refactor gguf_buffer and gguf_ctx_buffer
* llama : minor
* gitignore : add gptneox-main
* llama : tokenizer fixes (#2549)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* convert : update convert-new.py with tokenizer fixes (#2614)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* llama : sync gguf-llama with llama (#2613)
* llama : sync gguf-llama with llama
* tests : fix build + warnings (test-tokenizer-1 still fails)
* tests : fix wstring_convert
* convert : fix layer names
* llama : sync gguf-llama.cpp
* convert : update HF converter to new tokenizer voodoo magics
* llama : update tokenizer style
* convert-llama-h5-to-gguf.py : add token types
* constants.py : add token types
* gguf.py : add token types
* convert-llama-7b-pth-to-gguf.py : add token types
* gguf-llama.cpp : fix n_head_kv
* convert-llama-h5-to-gguf.py : add 70b gqa support
* gguf.py : add tensor data layout
* convert-llama-h5-to-gguf.py : add tensor data layout
* convert-llama-7b-pth-to-gguf.py : add tensor data layout
* gptneox-main.cpp : add tensor data layout
* convert-llama-h5-to-gguf.py : clarify the reverse permute
* llama : refactor model loading code (#2620)
* llama : style formatting + remove helper methods
* llama : fix quantization using gguf tool
* llama : simplify gguf_file_saver
* llama : fix method names
* llama : simplify write_header()
* llama : no need to pass full file loader to the file saver
just gguf_ctx
* llama : gguf_file_saver write I32
* llama : refactor tensor names (#2622)
* gguf: update tensor names searched in quantization
* gguf : define tensor names as constants
* gguf : initial write API (not tested yet)
* gguf : write to file API (not tested)
* gguf : initial write API ready + example
* gguf : fix header write
* gguf : fixes + simplify example + add ggml_nbytes_pad()
* gguf : minor
* llama : replace gguf_file_saver with new gguf write API
* gguf : streaming support when writing files
* gguf : remove oboslete write methods
* gguf : remove obosolete gguf_get_arr_xxx API
* llama : simplify gguf_file_loader
* llama : move hparams and vocab from gguf_file_loader to llama_model_loader
* llama : merge gguf-util.h in llama.cpp
* llama : reorder definitions in .cpp to match .h
* llama : minor simplifications
* llama : refactor llama_model_loader (WIP)
wip : remove ggml_ctx from llama_model_loader
wip : merge gguf_file_loader in llama_model_loader
* llama : fix shape prints
* llama : fix Windows build + fix norm_rms_eps key
* llama : throw error on missing KV paris in model meta data
* llama : improve printing + log meta data
* llama : switch print order of meta data
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
* gguf : deduplicate (#2629)
* gguf : better type names
* dedup : CPU + Metal is working
* ggml : fix warnings about unused results
* llama.cpp : fix line feed and compiler warning
* llama : fix strncpy warning + note token_to_str does not write null
* llama : restore the original load/save session implementation
Will migrate this to GGUF in the future
* convert-llama-h5-to-gguf.py : support alt ctx param name
* ggml : assert when using ggml_mul with non-F32 src1
* examples : dedup simple
---------
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
* gguf.py : merge all files in gguf.py
* convert-new.py : pick #2427 for HF 70B support
* examples/gguf : no need to keep q option for quantization any more
* llama.cpp : print actual model size
* llama.cpp : use ggml_elements()
* convert-new.py : output gguf (#2635)
* convert-new.py : output gguf (WIP)
* convert-new.py : add gguf key-value pairs
* llama : add hparams.ctx_train + no longer print ftype
* convert-new.py : minor fixes
* convert-new.py : vocab-only option should work now
* llama : fix tokenizer to use llama_char_to_byte
* tests : add new ggml-vocab-llama.gguf
* convert-new.py : tensor name mapping
* convert-new.py : add map for skipping tensor serialization
* convert-new.py : convert script now works
* gguf.py : pick some of the refactoring from #2644
* convert-new.py : minor fixes
* convert.py : update to support GGUF output
* Revert "ci : disable CI temporary to not waste energy"
This reverts commit 7e82d25f40
.
* convert.py : n_head_kv optional and .gguf file extension
* convert.py : better always have n_head_kv and default it to n_head
* llama : sync with recent PRs on master
* editorconfig : ignore models folder
ggml-ci
* ci : update ".bin" to ".gguf" extension
ggml-ci
* llama : fix llama_model_loader memory leak
* gptneox : move as a WIP example
* llama : fix lambda capture
ggml-ci
* ggml : fix bug in gguf_set_kv
ggml-ci
* common.h : .bin --> .gguf
* quantize-stats.cpp : .bin --> .gguf
* convert.py : fix HF tensor permuting / unpacking
ggml-ci
* llama.cpp : typo
* llama : throw error if gguf fails to init from file
ggml-ci
* llama : fix tensor name grepping during quantization
ggml-ci
* gguf.py : write tensors in a single pass (#2644)
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : style fixes in simple conversion script
* gguf : refactor gptneox conversion script
* gguf : rename h5 to hf (for HuggingFace)
* gguf : refactor pth to gguf conversion script
* gguf : rm file_type key and method
* gguf.py : fix vertical alignment
* gguf.py : indentation
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* convert-gptneox-hf-to-gguf.py : fixes
* gguf.py : gptneox mapping
* convert-llama-hf-to-gguf.py : fixes
* convert-llama-7b-pth-to-gguf.py : fixes
* ggml.h : reverse GGUF_MAGIC
* gguf.py : reverse GGUF_MAGIC
* test-tokenizer-0.cpp : fix warning
* llama.cpp : print kv general.name
* llama.cpp : get special token kv and linefeed token id
* llama : print number of tensors per type + print arch + style
* tests : update vocab file with new magic
* editorconfig : fix whitespaces
* llama : re-order functions
* llama : remove C++ API + reorganize common source in /common dir
* llama : minor API updates
* llama : avoid hardcoded special tokens
* llama : fix MPI build
ggml-ci
* llama : introduce enum llama_vocab_type + remove hardcoded string constants
* convert-falcon-hf-to-gguf.py : falcon HF --> gguf conversion, not tested
* falcon-main.cpp : falcon inference example
* convert-falcon-hf-to-gguf.py : remove extra kv
* convert-gptneox-hf-to-gguf.py : remove extra kv
* convert-llama-7b-pth-to-gguf.py : remove extra kv
* convert-llama-hf-to-gguf.py : remove extra kv
* gguf.py : fix for falcon 40b
* falcon-main.cpp : fix for falcon 40b
* convert-falcon-hf-to-gguf.py : update ref
* convert-falcon-hf-to-gguf.py : add tensor data layout
* cmpnct_gpt2bpe.hpp : fixes
* falcon-main.cpp : fixes
* gptneox-main.cpp : fixes
* cmpnct_gpt2bpe.hpp : remove non-general stuff
* Update examples/server/README.md
Co-authored-by: slaren <slarengh@gmail.com>
* cmpnct_gpt2bpe.hpp : cleanup
* convert-llama-hf-to-gguf.py : special tokens
* convert-llama-7b-pth-to-gguf.py : special tokens
* convert-permute-debug.py : permute debug print
* convert-permute-debug-master.py : permute debug for master
* convert-permute-debug.py : change permute type of attn_q
* convert.py : 70b model working (change attn_q permute)
* Delete convert-permute-debug-master.py
* Delete convert-permute-debug.py
* convert-llama-hf-to-gguf.py : fix attn_q permute
* gguf.py : fix rope scale kv
* convert-llama-hf-to-gguf.py : rope scale and added tokens
* convert-llama-7b-pth-to-gguf.py : rope scale and added tokens
* llama.cpp : use rope scale kv
* convert-llama-7b-pth-to-gguf.py : rope scale fix
* convert-llama-hf-to-gguf.py : rope scale fix
* py : fix whitespace
* gguf : add Python script to convert GGMLv3 LLaMA models to GGUF (#2682)
* First pass at converting GGMLv3 LLaMA models to GGUF
* Cleanups, better output during conversion
* Fix vocab space conversion logic
* More vocab conversion fixes
* Add description to converted GGUF files
* Improve help text, expand warning
* Allow specifying name and description for output GGUF
* Allow overriding vocab and hyperparams from original model metadata
* Use correct params override var name
* Fix wrong type size for Q8_K
Better handling of original style metadata
* Set default value for gguf add_tensor raw_shape KW arg
* llama : improve token type support (#2668)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* llama : add API for token type
ggml-ci
* tests : use new tokenizer type API (#2692)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* Improve commentary
* Use token type API in test-tokenizer-1.cpp
* py : cosmetics
* readme : add notice about new file format
ggml-ci
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
Co-authored-by: goerch <jhr.walter@t-online.de>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com>
768 lines
32 KiB
C++
768 lines
32 KiB
C++
#include "common.h"
|
|
|
|
#include <cassert>
|
|
#include <iostream>
|
|
#include <cstring>
|
|
#include <fstream>
|
|
#include <string>
|
|
#include <iterator>
|
|
#include <algorithm>
|
|
#include <sstream>
|
|
#include <unordered_set>
|
|
#include <regex>
|
|
|
|
#if defined(__APPLE__) && defined(__MACH__)
|
|
#include <sys/types.h>
|
|
#include <sys/sysctl.h>
|
|
#endif
|
|
|
|
#if defined(_WIN32)
|
|
#define WIN32_LEAN_AND_MEAN
|
|
#define NOMINMAX
|
|
#include <windows.h>
|
|
#include <fcntl.h>
|
|
#include <io.h>
|
|
#else
|
|
#include <sys/ioctl.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#if defined(_MSC_VER)
|
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
|
#endif
|
|
|
|
int32_t get_num_physical_cores() {
|
|
#ifdef __linux__
|
|
// enumerate the set of thread siblings, num entries is num cores
|
|
std::unordered_set<std::string> siblings;
|
|
for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
|
|
std::ifstream thread_siblings("/sys/devices/system/cpu"
|
|
+ std::to_string(cpu) + "/topology/thread_siblings");
|
|
if (!thread_siblings.is_open()) {
|
|
break; // no more cpus
|
|
}
|
|
std::string line;
|
|
if (std::getline(thread_siblings, line)) {
|
|
siblings.insert(line);
|
|
}
|
|
}
|
|
if (siblings.size() > 0) {
|
|
return static_cast<int32_t>(siblings.size());
|
|
}
|
|
#elif defined(__APPLE__) && defined(__MACH__)
|
|
int32_t num_physical_cores;
|
|
size_t len = sizeof(num_physical_cores);
|
|
int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0);
|
|
if (result == 0) {
|
|
return num_physical_cores;
|
|
}
|
|
result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0);
|
|
if (result == 0) {
|
|
return num_physical_cores;
|
|
}
|
|
#elif defined(_WIN32)
|
|
//TODO: Implement
|
|
#endif
|
|
unsigned int n_threads = std::thread::hardware_concurrency();
|
|
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
|
|
}
|
|
|
|
void process_escapes(std::string& input) {
|
|
std::size_t input_len = input.length();
|
|
std::size_t output_idx = 0;
|
|
|
|
for (std::size_t input_idx = 0; input_idx < input_len; ++input_idx) {
|
|
if (input[input_idx] == '\\' && input_idx + 1 < input_len) {
|
|
switch (input[++input_idx]) {
|
|
case 'n': input[output_idx++] = '\n'; break;
|
|
case 'r': input[output_idx++] = '\r'; break;
|
|
case 't': input[output_idx++] = '\t'; break;
|
|
case '\'': input[output_idx++] = '\''; break;
|
|
case '\"': input[output_idx++] = '\"'; break;
|
|
case '\\': input[output_idx++] = '\\'; break;
|
|
default: input[output_idx++] = '\\';
|
|
input[output_idx++] = input[input_idx]; break;
|
|
}
|
|
} else {
|
|
input[output_idx++] = input[input_idx];
|
|
}
|
|
}
|
|
|
|
input.resize(output_idx);
|
|
}
|
|
|
|
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|
bool invalid_param = false;
|
|
bool escape_prompt = false;
|
|
std::string arg;
|
|
gpt_params default_params;
|
|
const std::string arg_prefix = "--";
|
|
|
|
for (int i = 1; i < argc; i++) {
|
|
arg = argv[i];
|
|
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
|
|
std::replace(arg.begin(), arg.end(), '_', '-');
|
|
}
|
|
|
|
if (arg == "-s" || arg == "--seed") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.seed = std::stoul(argv[i]);
|
|
} else if (arg == "-t" || arg == "--threads") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.n_threads = std::stoi(argv[i]);
|
|
if (params.n_threads <= 0) {
|
|
params.n_threads = std::thread::hardware_concurrency();
|
|
}
|
|
} else if (arg == "-p" || arg == "--prompt") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.prompt = argv[i];
|
|
} else if (arg == "-e") {
|
|
escape_prompt = true;
|
|
} else if (arg == "--prompt-cache") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.path_prompt_cache = argv[i];
|
|
} else if (arg == "--prompt-cache-all") {
|
|
params.prompt_cache_all = true;
|
|
} else if (arg == "--prompt-cache-ro") {
|
|
params.prompt_cache_ro = true;
|
|
} else if (arg == "-f" || arg == "--file") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
std::ifstream file(argv[i]);
|
|
if (!file) {
|
|
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
|
|
if (params.prompt.back() == '\n') {
|
|
params.prompt.pop_back();
|
|
}
|
|
} else if (arg == "-n" || arg == "--n-predict") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.n_predict = std::stoi(argv[i]);
|
|
} else if (arg == "--top-k") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.top_k = std::stoi(argv[i]);
|
|
} else if (arg == "-c" || arg == "--ctx-size") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.n_ctx = std::stoi(argv[i]);
|
|
} else if (arg == "--rope-freq-base") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.rope_freq_base = std::stof(argv[i]);
|
|
} else if (arg == "--rope-freq-scale") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.rope_freq_scale = std::stof(argv[i]);
|
|
} else if (arg == "--rope-scale") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.rope_freq_scale = 1.0f/std::stof(argv[i]);
|
|
} else if (arg == "--memory-f32") {
|
|
params.memory_f16 = false;
|
|
} else if (arg == "--top-p") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.top_p = std::stof(argv[i]);
|
|
} else if (arg == "--temp") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.temp = std::stof(argv[i]);
|
|
} else if (arg == "--tfs") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.tfs_z = std::stof(argv[i]);
|
|
} else if (arg == "--typical") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.typical_p = std::stof(argv[i]);
|
|
} else if (arg == "--repeat-last-n") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.repeat_last_n = std::stoi(argv[i]);
|
|
} else if (arg == "--repeat-penalty") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.repeat_penalty = std::stof(argv[i]);
|
|
} else if (arg == "--frequency-penalty") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.frequency_penalty = std::stof(argv[i]);
|
|
} else if (arg == "--presence-penalty") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.presence_penalty = std::stof(argv[i]);
|
|
} else if (arg == "--mirostat") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.mirostat = std::stoi(argv[i]);
|
|
} else if (arg == "--mirostat-lr") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.mirostat_eta = std::stof(argv[i]);
|
|
} else if (arg == "--mirostat-ent") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.mirostat_tau = std::stof(argv[i]);
|
|
} else if (arg == "--cfg-negative-prompt") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.cfg_negative_prompt = argv[i];
|
|
} else if (arg == "--cfg-negative-prompt-file") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
std::ifstream file(argv[i]);
|
|
if (!file) {
|
|
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.cfg_negative_prompt));
|
|
if (params.cfg_negative_prompt.back() == '\n') {
|
|
params.cfg_negative_prompt.pop_back();
|
|
}
|
|
} else if (arg == "--cfg-scale") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.cfg_scale = std::stof(argv[i]);
|
|
} else if (arg == "-b" || arg == "--batch-size") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.n_batch = std::stoi(argv[i]);
|
|
params.n_batch = std::min(512, params.n_batch);
|
|
} else if (arg == "--keep") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.n_keep = std::stoi(argv[i]);
|
|
} else if (arg == "--chunks") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.n_chunks = std::stoi(argv[i]);
|
|
} else if (arg == "-m" || arg == "--model") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.model = argv[i];
|
|
} else if (arg == "-a" || arg == "--alias") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.model_alias = argv[i];
|
|
} else if (arg == "--lora") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.lora_adapter = argv[i];
|
|
params.use_mmap = false;
|
|
} else if (arg == "--lora-base") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.lora_base = argv[i];
|
|
} else if (arg == "-i" || arg == "--interactive") {
|
|
params.interactive = true;
|
|
} else if (arg == "--embedding") {
|
|
params.embedding = true;
|
|
} else if (arg == "--interactive-first") {
|
|
params.interactive_first = true;
|
|
} else if (arg == "-ins" || arg == "--instruct") {
|
|
params.instruct = true;
|
|
} else if (arg == "--multiline-input") {
|
|
params.multiline_input = true;
|
|
} else if (arg == "--simple-io") {
|
|
params.simple_io = true;
|
|
} else if (arg == "--color") {
|
|
params.use_color = true;
|
|
} else if (arg == "--mlock") {
|
|
params.use_mlock = true;
|
|
} else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
|
|
params.n_gpu_layers = std::stoi(argv[i]);
|
|
#else
|
|
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
|
|
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
|
|
#endif
|
|
} else if (arg == "--main-gpu" || arg == "-mg") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
#ifdef GGML_USE_CUBLAS
|
|
params.main_gpu = std::stoi(argv[i]);
|
|
#else
|
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n");
|
|
#endif
|
|
} else if (arg == "--tensor-split" || arg == "-ts") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
#ifdef GGML_USE_CUBLAS
|
|
std::string arg_next = argv[i];
|
|
|
|
// split string by , and /
|
|
const std::regex regex{R"([,/]+)"};
|
|
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
|
|
std::vector<std::string> split_arg{it, {}};
|
|
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
|
|
|
|
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
|
|
if (i < split_arg.size()) {
|
|
params.tensor_split[i] = std::stof(split_arg[i]);
|
|
} else {
|
|
params.tensor_split[i] = 0.0f;
|
|
}
|
|
}
|
|
#else
|
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
|
|
#endif // GGML_USE_CUBLAS
|
|
} else if (arg == "--mul-mat-q" || arg == "-mmq") {
|
|
#ifdef GGML_USE_CUBLAS
|
|
params.mul_mat_q = true;
|
|
#else
|
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n");
|
|
#endif // GGML_USE_CUBLAS
|
|
} else if (arg == "--low-vram" || arg == "-lv") {
|
|
#ifdef GGML_USE_CUBLAS
|
|
params.low_vram = true;
|
|
#else
|
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
|
|
#endif // GGML_USE_CUBLAS
|
|
} else if (arg == "--no-mmap") {
|
|
params.use_mmap = false;
|
|
} else if (arg == "--mtest") {
|
|
params.mem_test = true;
|
|
} else if (arg == "--numa") {
|
|
params.numa = true;
|
|
} else if (arg == "--export") {
|
|
params.export_cgraph = true;
|
|
} else if (arg == "--verbose-prompt") {
|
|
params.verbose_prompt = true;
|
|
} else if (arg == "-r" || arg == "--reverse-prompt") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.antiprompt.push_back(argv[i]);
|
|
} else if (arg == "--perplexity") {
|
|
params.perplexity = true;
|
|
} else if (arg == "--hellaswag") {
|
|
params.hellaswag = true;
|
|
} else if (arg == "--hellaswag-tasks") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.hellaswag_tasks = std::stoi(argv[i]);
|
|
} else if (arg == "--ignore-eos") {
|
|
params.ignore_eos = true;
|
|
} else if (arg == "--no-penalize-nl") {
|
|
params.penalize_nl = false;
|
|
} else if (arg == "-l" || arg == "--logit-bias") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
std::stringstream ss(argv[i]);
|
|
llama_token key;
|
|
char sign;
|
|
std::string value_str;
|
|
try {
|
|
if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
|
|
params.logit_bias[key] = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
|
|
} else {
|
|
throw std::exception();
|
|
}
|
|
} catch (const std::exception&) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
} else if (arg == "-h" || arg == "--help") {
|
|
gpt_print_usage(argc, argv, default_params);
|
|
exit(0);
|
|
} else if (arg == "--random-prompt") {
|
|
params.random_prompt = true;
|
|
} else if (arg == "--in-prefix-bos") {
|
|
params.input_prefix_bos = true;
|
|
} else if (arg == "--in-prefix") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.input_prefix = argv[i];
|
|
} else if (arg == "--in-suffix") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.input_suffix = argv[i];
|
|
} else if (arg == "--grammar") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
params.grammar = argv[i];
|
|
} else if (arg == "--grammar-file") {
|
|
if (++i >= argc) {
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
std::ifstream file(argv[i]);
|
|
if (!file) {
|
|
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
|
invalid_param = true;
|
|
break;
|
|
}
|
|
std::copy(
|
|
std::istreambuf_iterator<char>(file),
|
|
std::istreambuf_iterator<char>(),
|
|
std::back_inserter(params.grammar)
|
|
);
|
|
} else {
|
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
|
gpt_print_usage(argc, argv, default_params);
|
|
exit(1);
|
|
}
|
|
}
|
|
if (invalid_param) {
|
|
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
|
gpt_print_usage(argc, argv, default_params);
|
|
exit(1);
|
|
}
|
|
if (params.prompt_cache_all &&
|
|
(params.interactive || params.interactive_first ||
|
|
params.instruct)) {
|
|
fprintf(stderr, "error: --prompt-cache-all not supported in interactive mode yet\n");
|
|
gpt_print_usage(argc, argv, default_params);
|
|
exit(1);
|
|
}
|
|
|
|
if (escape_prompt) {
|
|
process_escapes(params.prompt);
|
|
process_escapes(params.input_prefix);
|
|
process_escapes(params.input_suffix);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|
fprintf(stdout, "usage: %s [options]\n", argv[0]);
|
|
fprintf(stdout, "\n");
|
|
fprintf(stdout, "options:\n");
|
|
fprintf(stdout, " -h, --help show this help message and exit\n");
|
|
fprintf(stdout, " -i, --interactive run in interactive mode\n");
|
|
fprintf(stdout, " --interactive-first run in interactive mode and wait for input right away\n");
|
|
fprintf(stdout, " -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
|
fprintf(stdout, " --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
|
|
fprintf(stdout, " -r PROMPT, --reverse-prompt PROMPT\n");
|
|
fprintf(stdout, " halt generation at PROMPT, return control in interactive mode\n");
|
|
fprintf(stdout, " (can be specified more than once for multiple prompts).\n");
|
|
fprintf(stdout, " --color colorise output to distinguish prompt and user input from generations\n");
|
|
fprintf(stdout, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for < 0)\n");
|
|
fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
|
fprintf(stdout, " -p PROMPT, --prompt PROMPT\n");
|
|
fprintf(stdout, " prompt to start generation with (default: empty)\n");
|
|
fprintf(stdout, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
|
|
fprintf(stdout, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n");
|
|
fprintf(stdout, " --prompt-cache-all if specified, saves user input and generations to cache as well.\n");
|
|
fprintf(stdout, " not supported with --interactive or other interactive options\n");
|
|
fprintf(stdout, " --prompt-cache-ro if specified, uses the prompt cache but does not update it.\n");
|
|
fprintf(stdout, " --random-prompt start with a randomized prompt.\n");
|
|
fprintf(stdout, " --in-prefix-bos prefix BOS to user inputs, preceding the `--in-prefix` string\n");
|
|
fprintf(stdout, " --in-prefix STRING string to prefix user inputs with (default: empty)\n");
|
|
fprintf(stdout, " --in-suffix STRING string to suffix after user inputs with (default: empty)\n");
|
|
fprintf(stdout, " -f FNAME, --file FNAME\n");
|
|
fprintf(stdout, " prompt file to start generation.\n");
|
|
fprintf(stdout, " -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict);
|
|
fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
|
|
fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
|
fprintf(stdout, " --top-k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
|
|
fprintf(stdout, " --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
|
|
fprintf(stdout, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
|
|
fprintf(stdout, " --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)params.typical_p);
|
|
fprintf(stdout, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", params.repeat_last_n);
|
|
fprintf(stdout, " --repeat-penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)params.repeat_penalty);
|
|
fprintf(stdout, " --presence-penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)params.presence_penalty);
|
|
fprintf(stdout, " --frequency-penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)params.frequency_penalty);
|
|
fprintf(stdout, " --mirostat N use Mirostat sampling.\n");
|
|
fprintf(stdout, " Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n");
|
|
fprintf(stdout, " (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", params.mirostat);
|
|
fprintf(stdout, " --mirostat-lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)params.mirostat_eta);
|
|
fprintf(stdout, " --mirostat-ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)params.mirostat_tau);
|
|
fprintf(stdout, " -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n");
|
|
fprintf(stdout, " modifies the likelihood of token appearing in the completion,\n");
|
|
fprintf(stdout, " i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n");
|
|
fprintf(stdout, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n");
|
|
fprintf(stdout, " --grammar GRAMMAR BNF-like grammar to constrain generations (see samples in grammars/ dir)\n");
|
|
fprintf(stdout, " --grammar-file FNAME file to read grammar from\n");
|
|
fprintf(stdout, " --cfg-negative-prompt PROMPT\n");
|
|
fprintf(stdout, " negative prompt to use for guidance. (default: empty)\n");
|
|
fprintf(stdout, " --cfg-negative-prompt-file FNAME\n");
|
|
fprintf(stdout, " negative prompt file to use for guidance. (default: empty)\n");
|
|
fprintf(stdout, " --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", params.cfg_scale);
|
|
fprintf(stdout, " --rope-scale N RoPE context linear scaling factor, inverse of --rope-freq-scale (default: %g)\n", 1.0f/params.rope_freq_scale);
|
|
fprintf(stdout, " --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: %.1f)\n", params.rope_freq_base);
|
|
fprintf(stdout, " --rope-freq-scale N RoPE frequency linear scaling factor, inverse of --rope-scale (default: %g)\n", params.rope_freq_scale);
|
|
fprintf(stdout, " --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
|
|
fprintf(stdout, " --no-penalize-nl do not penalize newline token\n");
|
|
fprintf(stdout, " --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
|
|
fprintf(stdout, " not recommended: doubles context memory required and no measurable increase in quality\n");
|
|
fprintf(stdout, " --temp N temperature (default: %.1f)\n", (double)params.temp);
|
|
fprintf(stdout, " --perplexity compute perplexity over each ctx window of the prompt\n");
|
|
fprintf(stdout, " --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
|
|
fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %zu)\n", params.hellaswag_tasks);
|
|
fprintf(stdout, " --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
|
fprintf(stdout, " --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
|
if (llama_mlock_supported()) {
|
|
fprintf(stdout, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
|
}
|
|
if (llama_mmap_supported()) {
|
|
fprintf(stdout, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
|
}
|
|
fprintf(stdout, " --numa attempt optimizations that help on some NUMA systems\n");
|
|
fprintf(stdout, " if run without this previously, it is recommended to drop the system page cache before using this\n");
|
|
fprintf(stdout, " see https://github.com/ggerganov/llama.cpp/issues/1437\n");
|
|
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
|
|
fprintf(stdout, " -ngl N, --n-gpu-layers N\n");
|
|
fprintf(stdout, " number of layers to store in VRAM\n");
|
|
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
|
|
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
|
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n" );
|
|
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n" );
|
|
fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
|
|
fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
|
|
fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
|
|
#endif
|
|
fprintf(stdout, " --mtest compute maximum memory usage\n");
|
|
fprintf(stdout, " --export export the computation graph to 'llama.ggml'\n");
|
|
fprintf(stdout, " --verbose-prompt print prompt before generation\n");
|
|
fprintf(stderr, " --simple-io use basic IO for better compatibility in subprocesses and limited consoles\n");
|
|
fprintf(stdout, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
|
|
fprintf(stdout, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
|
|
fprintf(stdout, " -m FNAME, --model FNAME\n");
|
|
fprintf(stdout, " model path (default: %s)\n", params.model.c_str());
|
|
fprintf(stdout, "\n");
|
|
}
|
|
|
|
std::string gpt_random_prompt(std::mt19937 & rng) {
|
|
const int r = rng() % 10;
|
|
switch (r) {
|
|
case 0: return "So";
|
|
case 1: return "Once upon a time";
|
|
case 2: return "When";
|
|
case 3: return "The";
|
|
case 4: return "After";
|
|
case 5: return "If";
|
|
case 6: return "import";
|
|
case 7: return "He";
|
|
case 8: return "She";
|
|
case 9: return "They";
|
|
default: return "To";
|
|
}
|
|
|
|
return "The";
|
|
}
|
|
|
|
//
|
|
// Model utils
|
|
//
|
|
|
|
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
|
|
auto lparams = llama_context_default_params();
|
|
|
|
lparams.n_ctx = params.n_ctx;
|
|
lparams.n_batch = params.n_batch;
|
|
lparams.n_gpu_layers = params.n_gpu_layers;
|
|
lparams.main_gpu = params.main_gpu;
|
|
lparams.tensor_split = params.tensor_split;
|
|
lparams.low_vram = params.low_vram;
|
|
lparams.mul_mat_q = params.mul_mat_q;
|
|
lparams.seed = params.seed;
|
|
lparams.f16_kv = params.memory_f16;
|
|
lparams.use_mmap = params.use_mmap;
|
|
lparams.use_mlock = params.use_mlock;
|
|
lparams.logits_all = params.perplexity;
|
|
lparams.embedding = params.embedding;
|
|
lparams.rope_freq_base = params.rope_freq_base;
|
|
lparams.rope_freq_scale = params.rope_freq_scale;
|
|
|
|
return lparams;
|
|
}
|
|
|
|
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params) {
|
|
auto lparams = llama_context_params_from_gpt_params(params);
|
|
|
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), lparams);
|
|
if (model == NULL) {
|
|
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
|
|
return std::make_tuple(nullptr, nullptr);
|
|
}
|
|
|
|
llama_context * lctx = llama_new_context_with_model(model, lparams);
|
|
if (lctx == NULL) {
|
|
fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
|
|
llama_free_model(model);
|
|
return std::make_tuple(nullptr, nullptr);
|
|
}
|
|
|
|
if (!params.lora_adapter.empty()) {
|
|
int err = llama_model_apply_lora_from_file(model,
|
|
params.lora_adapter.c_str(),
|
|
params.lora_base.empty() ? NULL : params.lora_base.c_str(),
|
|
params.n_threads);
|
|
if (err != 0) {
|
|
fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
|
|
llama_free(lctx);
|
|
llama_free_model(model);
|
|
return std::make_tuple(nullptr, nullptr);
|
|
}
|
|
}
|
|
|
|
if (params.ignore_eos) {
|
|
params.logit_bias[llama_token_eos(lctx)] = -INFINITY;
|
|
}
|
|
|
|
return std::make_tuple(model, lctx);
|
|
}
|
|
|
|
//
|
|
// Vocab utils
|
|
//
|
|
|
|
std::vector<llama_token> llama_tokenize(
|
|
struct llama_context * ctx,
|
|
const std::string & text,
|
|
bool add_bos) {
|
|
// upper limit for the number of tokens
|
|
int n_tokens = text.length() + add_bos;
|
|
std::vector<llama_token> result(n_tokens);
|
|
n_tokens = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
|
if (n_tokens < 0) {
|
|
result.resize(-n_tokens);
|
|
int check = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
|
GGML_ASSERT(check == -n_tokens);
|
|
} else {
|
|
result.resize(n_tokens);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) {
|
|
std::vector<char> result(8, 0);
|
|
const int n_tokens = llama_token_to_str(ctx, token, result.data(), result.size());
|
|
if (n_tokens < 0) {
|
|
result.resize(-n_tokens);
|
|
int check = llama_token_to_str(ctx, token, result.data(), result.size());
|
|
GGML_ASSERT(check == -n_tokens);
|
|
} else {
|
|
result.resize(n_tokens);
|
|
}
|
|
|
|
return std::string(result.data(), result.size());
|
|
}
|
|
|
|
std::vector<llama_token> llama_tokenize_bpe(
|
|
struct llama_context * ctx,
|
|
const std::string & text,
|
|
bool add_bos) {
|
|
int n_tokens = text.length() + add_bos;
|
|
std::vector<llama_token> result(n_tokens);
|
|
n_tokens = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
|
if (n_tokens < 0) {
|
|
result.resize(-n_tokens);
|
|
int check = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
|
GGML_ASSERT(check == -n_tokens);
|
|
} else {
|
|
result.resize(n_tokens);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token) {
|
|
std::vector<char> result(8, 0);
|
|
const int n_tokens = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
|
|
if (n_tokens < 0) {
|
|
result.resize(-n_tokens);
|
|
const int check = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
|
|
GGML_ASSERT(check == -n_tokens);
|
|
} else {
|
|
result.resize(n_tokens);
|
|
}
|
|
|
|
return std::string(result.data(), result.size());
|
|
}
|
|
|