mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
6381d4e110
* gguf : first API pass
* gguf : read header + meta data
* gguf : read tensor info
* gguf : initial model loading - not tested
* gguf : add gguf_get_tensor_name()
* gguf : do not support passing existing ggml_context to gguf_init
* gguf : simplify gguf_get_val
* gguf : gguf.c is now part of ggml.c
* gguf : read / write sample models
* gguf : add comments
* refactor : reduce code duplication and better API (#2415)
* gguf : expose the gguf_type enum through the API for now
* gguf : add array support
* gguf.py : some code style changes
* convert.py : start a new simplified implementation by removing old stuff
* convert.py : remove GGML vocab + other obsolete stuff
* GGUF : write tensor (#2426)
* WIP: Write tensor
* GGUF : Support writing tensors in Python
* refactor : rm unused import and upd todos
* fix : fix errors upd writing example
* rm example.gguf
* gitignore *.gguf
* undo formatting
* gguf : add gguf_find_key (#2438)
* gguf.cpp : find key example
* ggml.h : add gguf_find_key
* ggml.c : add gguf_find_key
* gguf : fix writing tensors
* gguf : do not hardcode tensor names to read
* gguf : write sample tensors to read
* gguf : add tokenization constants
* quick and dirty conversion example
* gguf : fix writing gguf arrays
* gguf : write tensors one by one and code reuse
* gguf : fix writing gguf arrays
* gguf : write tensors one by one
* gguf : write tensors one by one
* gguf : write tokenizer data
* gguf : upd gguf conversion script
* Update convert-llama-h5-to-gguf.py
* gguf : handle already encoded string
* ggml.h : get array str and f32
* ggml.c : get arr str and f32
* gguf.py : support any type
* Update convert-llama-h5-to-gguf.py
* gguf : fix set is not subscriptable
* gguf : update convert-llama-h5-to-gguf.py
* constants.py : add layer norm eps
* gguf.py : add layer norm eps and merges
* ggml.h : increase GGML_MAX_NAME to 64
* ggml.c : add gguf_get_arr_n
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Makefile : add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* gguf : support custom alignment value
* gguf : fix typo in function call
* gguf : mmap tensor data example
* fix : update convert-llama-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* convert-gptneox-h5-to-gguf.py : Special tokens
* gptneox-main.cpp : special tokens
* Update gptneox-main.cpp
* constants.py : special tokens
* gguf.py : accumulate kv and tensor info data + special tokens
* convert-gptneox-h5-to-gguf.py : accumulate kv and ti + special tokens
* gguf : gguf counterpart of llama-util.h
* gguf-util.h : update note
* convert-llama-h5-to-gguf.py : accumulate kv / ti + special tokens
* convert-llama-h5-to-gguf.py : special tokens
* Delete gptneox-common.cpp
* Delete gptneox-common.h
* convert-gptneox-h5-to-gguf.py : gpt2bpe tokenizer
* gptneox-main.cpp : gpt2 bpe tokenizer
* gpt2 bpe tokenizer (handles merges and unicode)
* Makefile : remove gptneox-common
* gguf.py : bytesarray for gpt2bpe tokenizer
* cmpnct_gpt2bpe.hpp : comments
* gguf.py : use custom alignment if present
* gguf : minor stuff
* Update gptneox-main.cpp
* map tensor names
* convert-gptneox-h5-to-gguf.py : map tensor names
* convert-llama-h5-to-gguf.py : map tensor names
* gptneox-main.cpp : map tensor names
* gguf : start implementing libllama in GGUF (WIP)
* gguf : start implementing libllama in GGUF (WIP)
* rm binary commited by mistake
* upd .gitignore
* gguf : calculate n_mult
* gguf : inference with 7B model working (WIP)
* gguf : rm deprecated function
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : add gguf_get_kv_type
* gguf : add gguf_get_kv_type
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver
* gguf : rm references to old file formats
* gguf : shorter name for member variable
* gguf : rm redundant method
* gguf : get rid of n_mult, read n_ff from file
* Update gguf_tensor_map.py
* Update gptneox-main.cpp
* gguf : rm references to old file magics
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : quantization is working
* gguf : roper closing of file
* gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : no need to convert tensors twice
* convert-llama-h5-to-gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : simplify nbytes
* convert-llama-h5-to-gguf.py : simplify nbytes
* gptneox-main.cpp : n_layer --> n_block
* constants.py : n_layer --> n_block
* gguf.py : n_layer --> n_block
* convert-gptneox-h5-to-gguf.py : n_layer --> n_block
* convert-llama-h5-to-gguf.py : n_layer --> n_block
* gptneox-main.cpp : n_layer --> n_block
* Update gguf_tensor_map.py
* convert-gptneox-h5-to-gguf.py : load model in parts to save memory
* convert-llama-h5-to-gguf.py : load model in parts to save memory
* convert : write more metadata for LLaMA
* convert : rm quantization version
* convert-gptneox-h5-to-gguf.py : add file_type key
* gptneox-main.cpp : add file_type key
* fix conflicts
* gguf : add todos and comments
* convert-gptneox-h5-to-gguf.py : tensor name map changes
* Create gguf_namemap.py : tensor name map changes
* Delete gguf_tensor_map.py
* gptneox-main.cpp : tensor name map changes
* convert-llama-h5-to-gguf.py : fixes
* gguf.py : dont add empty strings
* simple : minor style changes
* gguf : use UNIX line ending
* Create convert-llama-7b-pth-to-gguf.py
* llama : sync gguf-llama.cpp with latest llama.cpp (#2608)
* llama : sync gguf-llama.cpp with latest llama.cpp
* minor : indentation + assert
* llama : refactor gguf_buffer and gguf_ctx_buffer
* llama : minor
* gitignore : add gptneox-main
* llama : tokenizer fixes (#2549)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* convert : update convert-new.py with tokenizer fixes (#2614)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* llama : sync gguf-llama with llama (#2613)
* llama : sync gguf-llama with llama
* tests : fix build + warnings (test-tokenizer-1 still fails)
* tests : fix wstring_convert
* convert : fix layer names
* llama : sync gguf-llama.cpp
* convert : update HF converter to new tokenizer voodoo magics
* llama : update tokenizer style
* convert-llama-h5-to-gguf.py : add token types
* constants.py : add token types
* gguf.py : add token types
* convert-llama-7b-pth-to-gguf.py : add token types
* gguf-llama.cpp : fix n_head_kv
* convert-llama-h5-to-gguf.py : add 70b gqa support
* gguf.py : add tensor data layout
* convert-llama-h5-to-gguf.py : add tensor data layout
* convert-llama-7b-pth-to-gguf.py : add tensor data layout
* gptneox-main.cpp : add tensor data layout
* convert-llama-h5-to-gguf.py : clarify the reverse permute
* llama : refactor model loading code (#2620)
* llama : style formatting + remove helper methods
* llama : fix quantization using gguf tool
* llama : simplify gguf_file_saver
* llama : fix method names
* llama : simplify write_header()
* llama : no need to pass full file loader to the file saver
just gguf_ctx
* llama : gguf_file_saver write I32
* llama : refactor tensor names (#2622)
* gguf: update tensor names searched in quantization
* gguf : define tensor names as constants
* gguf : initial write API (not tested yet)
* gguf : write to file API (not tested)
* gguf : initial write API ready + example
* gguf : fix header write
* gguf : fixes + simplify example + add ggml_nbytes_pad()
* gguf : minor
* llama : replace gguf_file_saver with new gguf write API
* gguf : streaming support when writing files
* gguf : remove oboslete write methods
* gguf : remove obosolete gguf_get_arr_xxx API
* llama : simplify gguf_file_loader
* llama : move hparams and vocab from gguf_file_loader to llama_model_loader
* llama : merge gguf-util.h in llama.cpp
* llama : reorder definitions in .cpp to match .h
* llama : minor simplifications
* llama : refactor llama_model_loader (WIP)
wip : remove ggml_ctx from llama_model_loader
wip : merge gguf_file_loader in llama_model_loader
* llama : fix shape prints
* llama : fix Windows build + fix norm_rms_eps key
* llama : throw error on missing KV paris in model meta data
* llama : improve printing + log meta data
* llama : switch print order of meta data
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
* gguf : deduplicate (#2629)
* gguf : better type names
* dedup : CPU + Metal is working
* ggml : fix warnings about unused results
* llama.cpp : fix line feed and compiler warning
* llama : fix strncpy warning + note token_to_str does not write null
* llama : restore the original load/save session implementation
Will migrate this to GGUF in the future
* convert-llama-h5-to-gguf.py : support alt ctx param name
* ggml : assert when using ggml_mul with non-F32 src1
* examples : dedup simple
---------
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
* gguf.py : merge all files in gguf.py
* convert-new.py : pick #2427 for HF 70B support
* examples/gguf : no need to keep q option for quantization any more
* llama.cpp : print actual model size
* llama.cpp : use ggml_elements()
* convert-new.py : output gguf (#2635)
* convert-new.py : output gguf (WIP)
* convert-new.py : add gguf key-value pairs
* llama : add hparams.ctx_train + no longer print ftype
* convert-new.py : minor fixes
* convert-new.py : vocab-only option should work now
* llama : fix tokenizer to use llama_char_to_byte
* tests : add new ggml-vocab-llama.gguf
* convert-new.py : tensor name mapping
* convert-new.py : add map for skipping tensor serialization
* convert-new.py : convert script now works
* gguf.py : pick some of the refactoring from #2644
* convert-new.py : minor fixes
* convert.py : update to support GGUF output
* Revert "ci : disable CI temporary to not waste energy"
This reverts commit 7e82d25f40
.
* convert.py : n_head_kv optional and .gguf file extension
* convert.py : better always have n_head_kv and default it to n_head
* llama : sync with recent PRs on master
* editorconfig : ignore models folder
ggml-ci
* ci : update ".bin" to ".gguf" extension
ggml-ci
* llama : fix llama_model_loader memory leak
* gptneox : move as a WIP example
* llama : fix lambda capture
ggml-ci
* ggml : fix bug in gguf_set_kv
ggml-ci
* common.h : .bin --> .gguf
* quantize-stats.cpp : .bin --> .gguf
* convert.py : fix HF tensor permuting / unpacking
ggml-ci
* llama.cpp : typo
* llama : throw error if gguf fails to init from file
ggml-ci
* llama : fix tensor name grepping during quantization
ggml-ci
* gguf.py : write tensors in a single pass (#2644)
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : style fixes in simple conversion script
* gguf : refactor gptneox conversion script
* gguf : rename h5 to hf (for HuggingFace)
* gguf : refactor pth to gguf conversion script
* gguf : rm file_type key and method
* gguf.py : fix vertical alignment
* gguf.py : indentation
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* convert-gptneox-hf-to-gguf.py : fixes
* gguf.py : gptneox mapping
* convert-llama-hf-to-gguf.py : fixes
* convert-llama-7b-pth-to-gguf.py : fixes
* ggml.h : reverse GGUF_MAGIC
* gguf.py : reverse GGUF_MAGIC
* test-tokenizer-0.cpp : fix warning
* llama.cpp : print kv general.name
* llama.cpp : get special token kv and linefeed token id
* llama : print number of tensors per type + print arch + style
* tests : update vocab file with new magic
* editorconfig : fix whitespaces
* llama : re-order functions
* llama : remove C++ API + reorganize common source in /common dir
* llama : minor API updates
* llama : avoid hardcoded special tokens
* llama : fix MPI build
ggml-ci
* llama : introduce enum llama_vocab_type + remove hardcoded string constants
* convert-falcon-hf-to-gguf.py : falcon HF --> gguf conversion, not tested
* falcon-main.cpp : falcon inference example
* convert-falcon-hf-to-gguf.py : remove extra kv
* convert-gptneox-hf-to-gguf.py : remove extra kv
* convert-llama-7b-pth-to-gguf.py : remove extra kv
* convert-llama-hf-to-gguf.py : remove extra kv
* gguf.py : fix for falcon 40b
* falcon-main.cpp : fix for falcon 40b
* convert-falcon-hf-to-gguf.py : update ref
* convert-falcon-hf-to-gguf.py : add tensor data layout
* cmpnct_gpt2bpe.hpp : fixes
* falcon-main.cpp : fixes
* gptneox-main.cpp : fixes
* cmpnct_gpt2bpe.hpp : remove non-general stuff
* Update examples/server/README.md
Co-authored-by: slaren <slarengh@gmail.com>
* cmpnct_gpt2bpe.hpp : cleanup
* convert-llama-hf-to-gguf.py : special tokens
* convert-llama-7b-pth-to-gguf.py : special tokens
* convert-permute-debug.py : permute debug print
* convert-permute-debug-master.py : permute debug for master
* convert-permute-debug.py : change permute type of attn_q
* convert.py : 70b model working (change attn_q permute)
* Delete convert-permute-debug-master.py
* Delete convert-permute-debug.py
* convert-llama-hf-to-gguf.py : fix attn_q permute
* gguf.py : fix rope scale kv
* convert-llama-hf-to-gguf.py : rope scale and added tokens
* convert-llama-7b-pth-to-gguf.py : rope scale and added tokens
* llama.cpp : use rope scale kv
* convert-llama-7b-pth-to-gguf.py : rope scale fix
* convert-llama-hf-to-gguf.py : rope scale fix
* py : fix whitespace
* gguf : add Python script to convert GGMLv3 LLaMA models to GGUF (#2682)
* First pass at converting GGMLv3 LLaMA models to GGUF
* Cleanups, better output during conversion
* Fix vocab space conversion logic
* More vocab conversion fixes
* Add description to converted GGUF files
* Improve help text, expand warning
* Allow specifying name and description for output GGUF
* Allow overriding vocab and hyperparams from original model metadata
* Use correct params override var name
* Fix wrong type size for Q8_K
Better handling of original style metadata
* Set default value for gguf add_tensor raw_shape KW arg
* llama : improve token type support (#2668)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* llama : add API for token type
ggml-ci
* tests : use new tokenizer type API (#2692)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* Improve commentary
* Use token type API in test-tokenizer-1.cpp
* py : cosmetics
* readme : add notice about new file format
ggml-ci
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
Co-authored-by: goerch <jhr.walter@t-online.de>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com>
424 lines
17 KiB
C++
424 lines
17 KiB
C++
#include "grammar-parser.h"
|
|
#include <cstdint>
|
|
#include <cwchar>
|
|
#include <string>
|
|
#include <utility>
|
|
#include <stdexcept>
|
|
#include <exception>
|
|
|
|
namespace grammar_parser {
|
|
// NOTE: assumes valid utf8 (but checks for overrun)
|
|
// copied from llama.cpp
|
|
std::pair<uint32_t, const char *> decode_utf8(const char * src) {
|
|
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
|
uint8_t first_byte = static_cast<uint8_t>(*src);
|
|
uint8_t highbits = first_byte >> 4;
|
|
int len = lookup[highbits];
|
|
uint8_t mask = (1 << (8 - len)) - 1;
|
|
uint32_t value = first_byte & mask;
|
|
const char * end = src + len; // may overrun!
|
|
const char * pos = src + 1;
|
|
for ( ; pos < end && *pos; pos++) {
|
|
value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
|
|
}
|
|
return std::make_pair(value, pos);
|
|
}
|
|
|
|
uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
|
auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id));
|
|
return result.first->second;
|
|
}
|
|
|
|
uint32_t generate_symbol_id(parse_state & state, const std::string & base_name) {
|
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
|
state.symbol_ids[base_name + '_' + std::to_string(next_id)] = next_id;
|
|
return next_id;
|
|
}
|
|
|
|
void add_rule(
|
|
parse_state & state,
|
|
uint32_t rule_id,
|
|
const std::vector<llama_grammar_element> & rule) {
|
|
if (state.rules.size() <= rule_id) {
|
|
state.rules.resize(rule_id + 1);
|
|
}
|
|
state.rules[rule_id] = rule;
|
|
}
|
|
|
|
bool is_word_char(char c) {
|
|
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '-' || ('0' <= c && c <= '9');
|
|
}
|
|
|
|
std::pair<uint32_t, const char *> parse_hex(const char * src, int size) {
|
|
const char * pos = src;
|
|
const char * end = src + size;
|
|
uint32_t value = 0;
|
|
for ( ; pos < end && *pos; pos++) {
|
|
value <<= 4;
|
|
char c = *pos;
|
|
if ('a' <= c && c <= 'f') {
|
|
value += c - 'a' + 10;
|
|
} else if ('A' <= c && c <= 'F') {
|
|
value += c - 'A' + 10;
|
|
} else if ('0' <= c && c <= '9') {
|
|
value += c - '0';
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
if (pos != end) {
|
|
throw std::runtime_error("expecting " + std::to_string(size) + " hex chars at " + src);
|
|
}
|
|
return std::make_pair(value, pos);
|
|
}
|
|
|
|
const char * parse_space(const char * src, bool newline_ok) {
|
|
const char * pos = src;
|
|
while (*pos == ' ' || *pos == '\t' || *pos == '#' ||
|
|
(newline_ok && (*pos == '\r' || *pos == '\n'))) {
|
|
if (*pos == '#') {
|
|
while (*pos && *pos != '\r' && *pos != '\n') {
|
|
pos++;
|
|
}
|
|
} else {
|
|
pos++;
|
|
}
|
|
}
|
|
return pos;
|
|
}
|
|
|
|
const char * parse_name(const char * src) {
|
|
const char * pos = src;
|
|
while (is_word_char(*pos)) {
|
|
pos++;
|
|
}
|
|
if (pos == src) {
|
|
throw std::runtime_error(std::string("expecting name at ") + src);
|
|
}
|
|
return pos;
|
|
}
|
|
|
|
std::pair<uint32_t, const char *> parse_char(const char * src) {
|
|
if (*src == '\\') {
|
|
switch (src[1]) {
|
|
case 'x': return parse_hex(src + 2, 2);
|
|
case 'u': return parse_hex(src + 2, 4);
|
|
case 'U': return parse_hex(src + 2, 8);
|
|
case 't': return std::make_pair('\t', src + 2);
|
|
case 'r': return std::make_pair('\r', src + 2);
|
|
case 'n': return std::make_pair('\n', src + 2);
|
|
case '\\':
|
|
case '"':
|
|
case '[':
|
|
case ']':
|
|
return std::make_pair(src[1], src + 2);
|
|
default:
|
|
throw std::runtime_error(std::string("unknown escape at ") + src);
|
|
}
|
|
} else if (*src) {
|
|
return decode_utf8(src);
|
|
}
|
|
throw std::runtime_error("unexpected end of input");
|
|
}
|
|
|
|
const char * parse_alternates(
|
|
parse_state & state,
|
|
const char * src,
|
|
const std::string & rule_name,
|
|
uint32_t rule_id,
|
|
bool is_nested);
|
|
|
|
const char * parse_sequence(
|
|
parse_state & state,
|
|
const char * src,
|
|
const std::string & rule_name,
|
|
std::vector<llama_grammar_element> & out_elements,
|
|
bool is_nested) {
|
|
size_t last_sym_start = out_elements.size();
|
|
const char * pos = src;
|
|
while (*pos) {
|
|
if (*pos == '"') { // literal string
|
|
pos++;
|
|
last_sym_start = out_elements.size();
|
|
while (*pos != '"') {
|
|
auto char_pair = parse_char(pos);
|
|
pos = char_pair.second;
|
|
out_elements.push_back({LLAMA_GRETYPE_CHAR, char_pair.first});
|
|
}
|
|
pos = parse_space(pos + 1, is_nested);
|
|
} else if (*pos == '[') { // char range(s)
|
|
pos++;
|
|
enum llama_gretype start_type = LLAMA_GRETYPE_CHAR;
|
|
if (*pos == '^') {
|
|
pos++;
|
|
start_type = LLAMA_GRETYPE_CHAR_NOT;
|
|
}
|
|
last_sym_start = out_elements.size();
|
|
while (*pos != ']') {
|
|
auto char_pair = parse_char(pos);
|
|
pos = char_pair.second;
|
|
enum llama_gretype type = last_sym_start < out_elements.size()
|
|
? LLAMA_GRETYPE_CHAR_ALT
|
|
: start_type;
|
|
|
|
out_elements.push_back({type, char_pair.first});
|
|
if (pos[0] == '-' && pos[1] != ']') {
|
|
auto endchar_pair = parse_char(pos + 1);
|
|
pos = endchar_pair.second;
|
|
out_elements.push_back({LLAMA_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
|
}
|
|
}
|
|
pos = parse_space(pos + 1, is_nested);
|
|
} else if (is_word_char(*pos)) { // rule reference
|
|
const char * name_end = parse_name(pos);
|
|
uint32_t ref_rule_id = get_symbol_id(state, pos, name_end - pos);
|
|
pos = parse_space(name_end, is_nested);
|
|
last_sym_start = out_elements.size();
|
|
out_elements.push_back({LLAMA_GRETYPE_RULE_REF, ref_rule_id});
|
|
} else if (*pos == '(') { // grouping
|
|
// parse nested alternates into synthesized rule
|
|
pos = parse_space(pos + 1, true);
|
|
uint32_t sub_rule_id = generate_symbol_id(state, rule_name);
|
|
pos = parse_alternates(state, pos, rule_name, sub_rule_id, true);
|
|
last_sym_start = out_elements.size();
|
|
// output reference to synthesized rule
|
|
out_elements.push_back({LLAMA_GRETYPE_RULE_REF, sub_rule_id});
|
|
if (*pos != ')') {
|
|
throw std::runtime_error(std::string("expecting ')' at ") + pos);
|
|
}
|
|
pos = parse_space(pos + 1, is_nested);
|
|
} else if (*pos == '*' || *pos == '+' || *pos == '?') { // repetition operator
|
|
if (last_sym_start == out_elements.size()) {
|
|
throw std::runtime_error(std::string("expecting preceeding item to */+/? at ") + pos);
|
|
}
|
|
|
|
// apply transformation to previous symbol (last_sym_start to end) according to
|
|
// rewrite rules:
|
|
// S* --> S' ::= S S' |
|
|
// S+ --> S' ::= S S' | S
|
|
// S? --> S' ::= S |
|
|
uint32_t sub_rule_id = generate_symbol_id(state, rule_name);
|
|
std::vector<llama_grammar_element> sub_rule;
|
|
// add preceding symbol to generated rule
|
|
sub_rule.insert(
|
|
sub_rule.end(), out_elements.begin() + last_sym_start, out_elements.end());
|
|
if (*pos == '*' || *pos == '+') {
|
|
// cause generated rule to recurse
|
|
sub_rule.push_back({LLAMA_GRETYPE_RULE_REF, sub_rule_id});
|
|
}
|
|
// mark start of alternate def
|
|
sub_rule.push_back({LLAMA_GRETYPE_ALT, 0});
|
|
if (*pos == '+') {
|
|
// add preceding symbol as alternate only for '+' (otherwise empty)
|
|
sub_rule.insert(
|
|
sub_rule.end(), out_elements.begin() + last_sym_start, out_elements.end());
|
|
}
|
|
sub_rule.push_back({LLAMA_GRETYPE_END, 0});
|
|
add_rule(state, sub_rule_id, sub_rule);
|
|
|
|
// in original rule, replace previous symbol with reference to generated rule
|
|
out_elements.resize(last_sym_start);
|
|
out_elements.push_back({LLAMA_GRETYPE_RULE_REF, sub_rule_id});
|
|
|
|
pos = parse_space(pos + 1, is_nested);
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
return pos;
|
|
}
|
|
|
|
const char * parse_alternates(
|
|
parse_state & state,
|
|
const char * src,
|
|
const std::string & rule_name,
|
|
uint32_t rule_id,
|
|
bool is_nested) {
|
|
std::vector<llama_grammar_element> rule;
|
|
const char * pos = parse_sequence(state, src, rule_name, rule, is_nested);
|
|
while (*pos == '|') {
|
|
rule.push_back({LLAMA_GRETYPE_ALT, 0});
|
|
pos = parse_space(pos + 1, true);
|
|
pos = parse_sequence(state, pos, rule_name, rule, is_nested);
|
|
}
|
|
rule.push_back({LLAMA_GRETYPE_END, 0});
|
|
add_rule(state, rule_id, rule);
|
|
return pos;
|
|
}
|
|
|
|
const char * parse_rule(parse_state & state, const char * src) {
|
|
const char * name_end = parse_name(src);
|
|
const char * pos = parse_space(name_end, false);
|
|
size_t name_len = name_end - src;
|
|
uint32_t rule_id = get_symbol_id(state, src, name_len);
|
|
const std::string name(src, name_len);
|
|
|
|
if (!(pos[0] == ':' && pos[1] == ':' && pos[2] == '=')) {
|
|
throw std::runtime_error(std::string("expecting ::= at ") + pos);
|
|
}
|
|
pos = parse_space(pos + 3, true);
|
|
|
|
pos = parse_alternates(state, pos, name, rule_id, false);
|
|
|
|
if (*pos == '\r') {
|
|
pos += pos[1] == '\n' ? 2 : 1;
|
|
} else if (*pos == '\n') {
|
|
pos++;
|
|
} else if (*pos) {
|
|
throw std::runtime_error(std::string("expecting newline or end at ") + pos);
|
|
}
|
|
return parse_space(pos, true);
|
|
}
|
|
|
|
parse_state parse(const char * src) {
|
|
try {
|
|
parse_state state;
|
|
const char * pos = parse_space(src, true);
|
|
while (*pos) {
|
|
pos = parse_rule(state, pos);
|
|
}
|
|
return state;
|
|
} catch (const std::exception & err) {
|
|
fprintf(stderr, "%s: error parsing grammar: %s\n", __func__, err.what());
|
|
return parse_state();
|
|
}
|
|
}
|
|
|
|
void print_grammar_char(FILE * file, uint32_t c) {
|
|
if (0x20 <= c && c <= 0x7f) {
|
|
fprintf(file, "%c", static_cast<char>(c));
|
|
} else {
|
|
// cop out of encoding UTF-8
|
|
fprintf(file, "<U+%04X>", c);
|
|
}
|
|
}
|
|
|
|
bool is_char_element(llama_grammar_element elem) {
|
|
switch (elem.type) {
|
|
case LLAMA_GRETYPE_CHAR: return true;
|
|
case LLAMA_GRETYPE_CHAR_NOT: return true;
|
|
case LLAMA_GRETYPE_CHAR_ALT: return true;
|
|
case LLAMA_GRETYPE_CHAR_RNG_UPPER: return true;
|
|
default: return false;
|
|
}
|
|
}
|
|
|
|
void print_rule_binary(FILE * file, const std::vector<llama_grammar_element> & rule) {
|
|
for (auto elem : rule) {
|
|
switch (elem.type) {
|
|
case LLAMA_GRETYPE_END: fprintf(file, "END"); break;
|
|
case LLAMA_GRETYPE_ALT: fprintf(file, "ALT"); break;
|
|
case LLAMA_GRETYPE_RULE_REF: fprintf(file, "RULE_REF"); break;
|
|
case LLAMA_GRETYPE_CHAR: fprintf(file, "CHAR"); break;
|
|
case LLAMA_GRETYPE_CHAR_NOT: fprintf(file, "CHAR_NOT"); break;
|
|
case LLAMA_GRETYPE_CHAR_RNG_UPPER: fprintf(file, "CHAR_RNG_UPPER"); break;
|
|
case LLAMA_GRETYPE_CHAR_ALT: fprintf(file, "CHAR_ALT"); break;
|
|
}
|
|
switch (elem.type) {
|
|
case LLAMA_GRETYPE_END:
|
|
case LLAMA_GRETYPE_ALT:
|
|
case LLAMA_GRETYPE_RULE_REF:
|
|
fprintf(file, "(%u) ", elem.value);
|
|
break;
|
|
case LLAMA_GRETYPE_CHAR:
|
|
case LLAMA_GRETYPE_CHAR_NOT:
|
|
case LLAMA_GRETYPE_CHAR_RNG_UPPER:
|
|
case LLAMA_GRETYPE_CHAR_ALT:
|
|
fprintf(file, "(\"");
|
|
print_grammar_char(file, elem.value);
|
|
fprintf(file, "\") ");
|
|
break;
|
|
}
|
|
}
|
|
fprintf(file, "\n");
|
|
}
|
|
|
|
void print_rule(
|
|
FILE * file,
|
|
uint32_t rule_id,
|
|
const std::vector<llama_grammar_element> & rule,
|
|
const std::map<uint32_t, std::string> & symbol_id_names) {
|
|
if (rule.empty() || rule.back().type != LLAMA_GRETYPE_END) {
|
|
throw std::runtime_error(
|
|
"malformed rule, does not end with LLAMA_GRETYPE_END: " + std::to_string(rule_id));
|
|
}
|
|
fprintf(file, "%s ::= ", symbol_id_names.at(rule_id).c_str());
|
|
for (size_t i = 0, end = rule.size() - 1; i < end; i++) {
|
|
llama_grammar_element elem = rule[i];
|
|
switch (elem.type) {
|
|
case LLAMA_GRETYPE_END:
|
|
throw std::runtime_error(
|
|
"unexpected end of rule: " + std::to_string(rule_id) + "," +
|
|
std::to_string(i));
|
|
case LLAMA_GRETYPE_ALT:
|
|
fprintf(file, "| ");
|
|
break;
|
|
case LLAMA_GRETYPE_RULE_REF:
|
|
fprintf(file, "%s ", symbol_id_names.at(elem.value).c_str());
|
|
break;
|
|
case LLAMA_GRETYPE_CHAR:
|
|
fprintf(file, "[");
|
|
print_grammar_char(file, elem.value);
|
|
break;
|
|
case LLAMA_GRETYPE_CHAR_NOT:
|
|
fprintf(file, "[^");
|
|
print_grammar_char(file, elem.value);
|
|
break;
|
|
case LLAMA_GRETYPE_CHAR_RNG_UPPER:
|
|
if (i == 0 || !is_char_element(rule[i - 1])) {
|
|
throw std::runtime_error(
|
|
"LLAMA_GRETYPE_CHAR_RNG_UPPER without preceding char: " +
|
|
std::to_string(rule_id) + "," + std::to_string(i));
|
|
}
|
|
fprintf(file, "-");
|
|
print_grammar_char(file, elem.value);
|
|
break;
|
|
case LLAMA_GRETYPE_CHAR_ALT:
|
|
if (i == 0 || !is_char_element(rule[i - 1])) {
|
|
throw std::runtime_error(
|
|
"LLAMA_GRETYPE_CHAR_ALT without preceding char: " +
|
|
std::to_string(rule_id) + "," + std::to_string(i));
|
|
}
|
|
print_grammar_char(file, elem.value);
|
|
break;
|
|
}
|
|
if (is_char_element(elem)) {
|
|
switch (rule[i + 1].type) {
|
|
case LLAMA_GRETYPE_CHAR_ALT:
|
|
case LLAMA_GRETYPE_CHAR_RNG_UPPER:
|
|
break;
|
|
default:
|
|
fprintf(file, "] ");
|
|
}
|
|
}
|
|
}
|
|
fprintf(file, "\n");
|
|
}
|
|
|
|
void print_grammar(FILE * file, const parse_state & state) {
|
|
try {
|
|
std::map<uint32_t, std::string> symbol_id_names;
|
|
for (auto kv : state.symbol_ids) {
|
|
symbol_id_names[kv.second] = kv.first;
|
|
}
|
|
for (size_t i = 0, end = state.rules.size(); i < end; i++) {
|
|
// fprintf(file, "%zu: ", i);
|
|
// print_rule_binary(file, state.rules[i]);
|
|
print_rule(file, uint32_t(i), state.rules[i], symbol_id_names);
|
|
// fprintf(file, "\n");
|
|
}
|
|
} catch (const std::exception & err) {
|
|
fprintf(stderr, "\n%s: error printing grammar: %s\n", __func__, err.what());
|
|
}
|
|
}
|
|
|
|
std::vector<const llama_grammar_element *> parse_state::c_rules() {
|
|
std::vector<const llama_grammar_element *> ret;
|
|
for (const auto & rule : rules) {
|
|
ret.push_back(rule.data());
|
|
}
|
|
return ret;
|
|
}
|
|
}
|