From 9b84ae1806cded4d6683c7b810925da5ead40607 Mon Sep 17 00:00:00 2001 From: Clint Herron Date: Thu, 4 Apr 2024 03:44:28 -0400 Subject: [PATCH] examples : add GBNF validator program (#5948) * Revising GBNF validator program to be much simpler. * Changing from streams to using cstdio * Adding final newline character. --- Makefile | 4 + examples/gbnf-validator/CMakeLists.txt | 5 + examples/gbnf-validator/gbnf-validator.cpp | 132 +++++++++++++++++++++ llama.cpp | 22 +--- llama.h | 28 +++++ 5 files changed, 171 insertions(+), 20 deletions(-) create mode 100644 examples/gbnf-validator/CMakeLists.txt create mode 100644 examples/gbnf-validator/gbnf-validator.cpp diff --git a/Makefile b/Makefile index ebbbcd354..bdd5ef335 100644 --- a/Makefile +++ b/Makefile @@ -867,6 +867,10 @@ passkey: examples/passkey/passkey.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) +gbnf-validator: examples/gbnf-validator/gbnf-validator.cpp ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + ifeq ($(UNAME_S),Darwin) swift: examples/batched.swift (cd examples/batched.swift; make build) diff --git a/examples/gbnf-validator/CMakeLists.txt b/examples/gbnf-validator/CMakeLists.txt new file mode 100644 index 000000000..166e3ad2a --- /dev/null +++ b/examples/gbnf-validator/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET gbnf-validator) +add_executable(${TARGET} gbnf-validator.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common grammar-parser llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/gbnf-validator/gbnf-validator.cpp b/examples/gbnf-validator/gbnf-validator.cpp new file mode 100644 index 000000000..e4c0c1689 --- /dev/null +++ b/examples/gbnf-validator/gbnf-validator.cpp @@ -0,0 +1,132 @@ +#define LLAMA_API_INTERNAL + +#include "grammar-parser.h" +#include "ggml.h" +#include "llama.h" +#include "unicode.h" + +#include +#include +#include +#include + +static bool llama_sample_grammar_string(struct llama_grammar * grammar, const std::string & input_str, size_t & error_pos, std::string & error_msg) { + auto decoded = decode_utf8(input_str, {}); + const auto & code_points = decoded.first; + + size_t pos = 0; + for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) { + auto prev_stacks = grammar->stacks; + grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it); + if (grammar->stacks.empty()) { + error_pos = pos; + error_msg = "Unexpected character '" + unicode_cpt_to_utf8(*it) + "'"; + grammar->stacks = prev_stacks; + return false; + } + ++pos; + } + + for (const auto & stack : grammar->stacks) { + if (stack.empty()) { + return true; + } + } + + error_pos = pos; + error_msg = "Unexpected end of input"; + return false; +} + +static void print_error_message(const std::string & input_str, size_t error_pos, const std::string & error_msg) { + fprintf(stdout, "Input string is invalid according to the grammar.\n"); + fprintf(stdout, "Error: %s at position %zu\n", error_msg.c_str(), error_pos); + fprintf(stdout, "\n"); + fprintf(stdout, "Input string:\n"); + fprintf(stdout, "%s", input_str.substr(0, error_pos).c_str()); + if (error_pos < input_str.size()) { + fprintf(stdout, "\033[1;31m%c", input_str[error_pos]); + if (error_pos+1 < input_str.size()) { + fprintf(stdout, "\033[0;31m%s", input_str.substr(error_pos+1).c_str()); + } + fprintf(stdout, "\033[0m\n"); + } +} + +int main(int argc, char** argv) { + if (argc != 3) { + fprintf(stdout, "Usage: %s \n", argv[0]); + return 1; + } + + const std::string grammar_filename = argv[1]; + const std::string input_filename = argv[2]; + + // Read the GBNF grammar file + FILE* grammar_file = fopen(grammar_filename.c_str(), "r"); + if (!grammar_file) { + fprintf(stdout, "Failed to open grammar file: %s\n", grammar_filename.c_str()); + return 1; + } + + fseek(grammar_file, 0, SEEK_END); + size_t grammar_size = ftell(grammar_file); + fseek(grammar_file, 0, SEEK_SET); + + std::string grammar_str(grammar_size, ' '); + fread(&grammar_str[0], 1, grammar_size, grammar_file); + fclose(grammar_file); + + // Parse the GBNF grammar + auto parsed_grammar = grammar_parser::parse(grammar_str.c_str()); + + // will be empty (default) if there are parse errors + if (parsed_grammar.rules.empty()) { + fprintf(stdout, "%s: failed to parse grammar\n", __func__); + return 1; + } + + // Ensure that there is a "root" node. + if (parsed_grammar.symbol_ids.find("root") == parsed_grammar.symbol_ids.end()) { + fprintf(stdout, "%s: grammar does not contain a 'root' symbol\n", __func__); + return 1; + } + + std::vector grammar_rules(parsed_grammar.c_rules()); + + // Create the LLAMA grammar + auto grammar = llama_grammar_init( + grammar_rules.data(), + grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); + + // Read the input file + FILE* input_file = fopen(input_filename.c_str(), "r"); + if (!input_file) { + fprintf(stdout, "Failed to open input file: %s\n", input_filename.c_str()); + return 1; + } + + fseek(input_file, 0, SEEK_END); + size_t input_size = ftell(input_file); + fseek(input_file, 0, SEEK_SET); + + std::string input_str(input_size, ' '); + fread(&input_str[0], 1, input_size, input_file); + fclose(input_file); + + // Validate the input string against the grammar + size_t error_pos; + std::string error_msg; + bool is_valid = llama_sample_grammar_string(grammar, input_str, error_pos, error_msg); + + if (is_valid) { + fprintf(stdout, "Input string is valid according to the grammar.\n"); + } else { + print_error_message(input_str, error_pos, error_msg); + } + + // Clean up + llama_grammar_free(grammar); + + return 0; +} diff --git a/llama.cpp b/llama.cpp index 267ac4cc0..9a1c11043 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11621,28 +11621,10 @@ static std::vector llama_tokenize_internal(const llama_vocab & // grammar - internal // -struct llama_partial_utf8 { - uint32_t value; // bit value so far (unshifted) - int n_remain; // num bytes remaining; -1 indicates invalid sequence -}; - -struct llama_grammar { - const std::vector> rules; - std::vector> stacks; - - // buffer for partially generated UTF-8 sequence from accepted tokens - llama_partial_utf8 partial_utf8; -}; - -struct llama_grammar_candidate { - size_t index; - const uint32_t * code_points; - llama_partial_utf8 partial_utf8; -}; // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`. -static std::pair, llama_partial_utf8> decode_utf8( +std::pair, llama_partial_utf8> decode_utf8( const std::string & src, llama_partial_utf8 partial_start) { static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 }; @@ -11844,7 +11826,7 @@ static void llama_grammar_advance_stack( // be positioned at a character range (see `llama_grammar_advance_stack`), and // produces the N possible stacks if the given char is accepted at those // positions -static std::vector> llama_grammar_accept( +std::vector> llama_grammar_accept( const std::vector> & rules, const std::vector> & stacks, const uint32_t chr) { diff --git a/llama.h b/llama.h index f061d014c..036b32685 100644 --- a/llama.h +++ b/llama.h @@ -1007,10 +1007,38 @@ extern "C" { struct ggml_tensor; +struct llama_partial_utf8 { + uint32_t value; // bit value so far (unshifted) + int n_remain; // num bytes remaining; -1 indicates invalid sequence +}; + +struct llama_grammar { + const std::vector> rules; + std::vector> stacks; + + // buffer for partially generated UTF-8 sequence from accepted tokens + llama_partial_utf8 partial_utf8; +}; + +struct llama_grammar_candidate { + size_t index; + const uint32_t * code_points; + llama_partial_utf8 partial_utf8; +}; + const std::vector> & llama_internal_get_tensor_map( struct llama_context * ctx ); +std::vector> llama_grammar_accept( + const std::vector> & rules, + const std::vector> & stacks, + const uint32_t chr); + +std::pair, llama_partial_utf8> decode_utf8( + const std::string & src, + llama_partial_utf8 partial_start); + #endif // LLAMA_API_INTERNAL #endif // LLAMA_H