mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
6381d4e110
* gguf : first API pass
* gguf : read header + meta data
* gguf : read tensor info
* gguf : initial model loading - not tested
* gguf : add gguf_get_tensor_name()
* gguf : do not support passing existing ggml_context to gguf_init
* gguf : simplify gguf_get_val
* gguf : gguf.c is now part of ggml.c
* gguf : read / write sample models
* gguf : add comments
* refactor : reduce code duplication and better API (#2415)
* gguf : expose the gguf_type enum through the API for now
* gguf : add array support
* gguf.py : some code style changes
* convert.py : start a new simplified implementation by removing old stuff
* convert.py : remove GGML vocab + other obsolete stuff
* GGUF : write tensor (#2426)
* WIP: Write tensor
* GGUF : Support writing tensors in Python
* refactor : rm unused import and upd todos
* fix : fix errors upd writing example
* rm example.gguf
* gitignore *.gguf
* undo formatting
* gguf : add gguf_find_key (#2438)
* gguf.cpp : find key example
* ggml.h : add gguf_find_key
* ggml.c : add gguf_find_key
* gguf : fix writing tensors
* gguf : do not hardcode tensor names to read
* gguf : write sample tensors to read
* gguf : add tokenization constants
* quick and dirty conversion example
* gguf : fix writing gguf arrays
* gguf : write tensors one by one and code reuse
* gguf : fix writing gguf arrays
* gguf : write tensors one by one
* gguf : write tensors one by one
* gguf : write tokenizer data
* gguf : upd gguf conversion script
* Update convert-llama-h5-to-gguf.py
* gguf : handle already encoded string
* ggml.h : get array str and f32
* ggml.c : get arr str and f32
* gguf.py : support any type
* Update convert-llama-h5-to-gguf.py
* gguf : fix set is not subscriptable
* gguf : update convert-llama-h5-to-gguf.py
* constants.py : add layer norm eps
* gguf.py : add layer norm eps and merges
* ggml.h : increase GGML_MAX_NAME to 64
* ggml.c : add gguf_get_arr_n
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Makefile : add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* gguf : support custom alignment value
* gguf : fix typo in function call
* gguf : mmap tensor data example
* fix : update convert-llama-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* convert-gptneox-h5-to-gguf.py : Special tokens
* gptneox-main.cpp : special tokens
* Update gptneox-main.cpp
* constants.py : special tokens
* gguf.py : accumulate kv and tensor info data + special tokens
* convert-gptneox-h5-to-gguf.py : accumulate kv and ti + special tokens
* gguf : gguf counterpart of llama-util.h
* gguf-util.h : update note
* convert-llama-h5-to-gguf.py : accumulate kv / ti + special tokens
* convert-llama-h5-to-gguf.py : special tokens
* Delete gptneox-common.cpp
* Delete gptneox-common.h
* convert-gptneox-h5-to-gguf.py : gpt2bpe tokenizer
* gptneox-main.cpp : gpt2 bpe tokenizer
* gpt2 bpe tokenizer (handles merges and unicode)
* Makefile : remove gptneox-common
* gguf.py : bytesarray for gpt2bpe tokenizer
* cmpnct_gpt2bpe.hpp : comments
* gguf.py : use custom alignment if present
* gguf : minor stuff
* Update gptneox-main.cpp
* map tensor names
* convert-gptneox-h5-to-gguf.py : map tensor names
* convert-llama-h5-to-gguf.py : map tensor names
* gptneox-main.cpp : map tensor names
* gguf : start implementing libllama in GGUF (WIP)
* gguf : start implementing libllama in GGUF (WIP)
* rm binary commited by mistake
* upd .gitignore
* gguf : calculate n_mult
* gguf : inference with 7B model working (WIP)
* gguf : rm deprecated function
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : add gguf_get_kv_type
* gguf : add gguf_get_kv_type
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver
* gguf : rm references to old file formats
* gguf : shorter name for member variable
* gguf : rm redundant method
* gguf : get rid of n_mult, read n_ff from file
* Update gguf_tensor_map.py
* Update gptneox-main.cpp
* gguf : rm references to old file magics
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : quantization is working
* gguf : roper closing of file
* gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : no need to convert tensors twice
* convert-llama-h5-to-gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : simplify nbytes
* convert-llama-h5-to-gguf.py : simplify nbytes
* gptneox-main.cpp : n_layer --> n_block
* constants.py : n_layer --> n_block
* gguf.py : n_layer --> n_block
* convert-gptneox-h5-to-gguf.py : n_layer --> n_block
* convert-llama-h5-to-gguf.py : n_layer --> n_block
* gptneox-main.cpp : n_layer --> n_block
* Update gguf_tensor_map.py
* convert-gptneox-h5-to-gguf.py : load model in parts to save memory
* convert-llama-h5-to-gguf.py : load model in parts to save memory
* convert : write more metadata for LLaMA
* convert : rm quantization version
* convert-gptneox-h5-to-gguf.py : add file_type key
* gptneox-main.cpp : add file_type key
* fix conflicts
* gguf : add todos and comments
* convert-gptneox-h5-to-gguf.py : tensor name map changes
* Create gguf_namemap.py : tensor name map changes
* Delete gguf_tensor_map.py
* gptneox-main.cpp : tensor name map changes
* convert-llama-h5-to-gguf.py : fixes
* gguf.py : dont add empty strings
* simple : minor style changes
* gguf : use UNIX line ending
* Create convert-llama-7b-pth-to-gguf.py
* llama : sync gguf-llama.cpp with latest llama.cpp (#2608)
* llama : sync gguf-llama.cpp with latest llama.cpp
* minor : indentation + assert
* llama : refactor gguf_buffer and gguf_ctx_buffer
* llama : minor
* gitignore : add gptneox-main
* llama : tokenizer fixes (#2549)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* convert : update convert-new.py with tokenizer fixes (#2614)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* llama : sync gguf-llama with llama (#2613)
* llama : sync gguf-llama with llama
* tests : fix build + warnings (test-tokenizer-1 still fails)
* tests : fix wstring_convert
* convert : fix layer names
* llama : sync gguf-llama.cpp
* convert : update HF converter to new tokenizer voodoo magics
* llama : update tokenizer style
* convert-llama-h5-to-gguf.py : add token types
* constants.py : add token types
* gguf.py : add token types
* convert-llama-7b-pth-to-gguf.py : add token types
* gguf-llama.cpp : fix n_head_kv
* convert-llama-h5-to-gguf.py : add 70b gqa support
* gguf.py : add tensor data layout
* convert-llama-h5-to-gguf.py : add tensor data layout
* convert-llama-7b-pth-to-gguf.py : add tensor data layout
* gptneox-main.cpp : add tensor data layout
* convert-llama-h5-to-gguf.py : clarify the reverse permute
* llama : refactor model loading code (#2620)
* llama : style formatting + remove helper methods
* llama : fix quantization using gguf tool
* llama : simplify gguf_file_saver
* llama : fix method names
* llama : simplify write_header()
* llama : no need to pass full file loader to the file saver
just gguf_ctx
* llama : gguf_file_saver write I32
* llama : refactor tensor names (#2622)
* gguf: update tensor names searched in quantization
* gguf : define tensor names as constants
* gguf : initial write API (not tested yet)
* gguf : write to file API (not tested)
* gguf : initial write API ready + example
* gguf : fix header write
* gguf : fixes + simplify example + add ggml_nbytes_pad()
* gguf : minor
* llama : replace gguf_file_saver with new gguf write API
* gguf : streaming support when writing files
* gguf : remove oboslete write methods
* gguf : remove obosolete gguf_get_arr_xxx API
* llama : simplify gguf_file_loader
* llama : move hparams and vocab from gguf_file_loader to llama_model_loader
* llama : merge gguf-util.h in llama.cpp
* llama : reorder definitions in .cpp to match .h
* llama : minor simplifications
* llama : refactor llama_model_loader (WIP)
wip : remove ggml_ctx from llama_model_loader
wip : merge gguf_file_loader in llama_model_loader
* llama : fix shape prints
* llama : fix Windows build + fix norm_rms_eps key
* llama : throw error on missing KV paris in model meta data
* llama : improve printing + log meta data
* llama : switch print order of meta data
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
* gguf : deduplicate (#2629)
* gguf : better type names
* dedup : CPU + Metal is working
* ggml : fix warnings about unused results
* llama.cpp : fix line feed and compiler warning
* llama : fix strncpy warning + note token_to_str does not write null
* llama : restore the original load/save session implementation
Will migrate this to GGUF in the future
* convert-llama-h5-to-gguf.py : support alt ctx param name
* ggml : assert when using ggml_mul with non-F32 src1
* examples : dedup simple
---------
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
* gguf.py : merge all files in gguf.py
* convert-new.py : pick #2427 for HF 70B support
* examples/gguf : no need to keep q option for quantization any more
* llama.cpp : print actual model size
* llama.cpp : use ggml_elements()
* convert-new.py : output gguf (#2635)
* convert-new.py : output gguf (WIP)
* convert-new.py : add gguf key-value pairs
* llama : add hparams.ctx_train + no longer print ftype
* convert-new.py : minor fixes
* convert-new.py : vocab-only option should work now
* llama : fix tokenizer to use llama_char_to_byte
* tests : add new ggml-vocab-llama.gguf
* convert-new.py : tensor name mapping
* convert-new.py : add map for skipping tensor serialization
* convert-new.py : convert script now works
* gguf.py : pick some of the refactoring from #2644
* convert-new.py : minor fixes
* convert.py : update to support GGUF output
* Revert "ci : disable CI temporary to not waste energy"
This reverts commit 7e82d25f40
.
* convert.py : n_head_kv optional and .gguf file extension
* convert.py : better always have n_head_kv and default it to n_head
* llama : sync with recent PRs on master
* editorconfig : ignore models folder
ggml-ci
* ci : update ".bin" to ".gguf" extension
ggml-ci
* llama : fix llama_model_loader memory leak
* gptneox : move as a WIP example
* llama : fix lambda capture
ggml-ci
* ggml : fix bug in gguf_set_kv
ggml-ci
* common.h : .bin --> .gguf
* quantize-stats.cpp : .bin --> .gguf
* convert.py : fix HF tensor permuting / unpacking
ggml-ci
* llama.cpp : typo
* llama : throw error if gguf fails to init from file
ggml-ci
* llama : fix tensor name grepping during quantization
ggml-ci
* gguf.py : write tensors in a single pass (#2644)
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : style fixes in simple conversion script
* gguf : refactor gptneox conversion script
* gguf : rename h5 to hf (for HuggingFace)
* gguf : refactor pth to gguf conversion script
* gguf : rm file_type key and method
* gguf.py : fix vertical alignment
* gguf.py : indentation
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* convert-gptneox-hf-to-gguf.py : fixes
* gguf.py : gptneox mapping
* convert-llama-hf-to-gguf.py : fixes
* convert-llama-7b-pth-to-gguf.py : fixes
* ggml.h : reverse GGUF_MAGIC
* gguf.py : reverse GGUF_MAGIC
* test-tokenizer-0.cpp : fix warning
* llama.cpp : print kv general.name
* llama.cpp : get special token kv and linefeed token id
* llama : print number of tensors per type + print arch + style
* tests : update vocab file with new magic
* editorconfig : fix whitespaces
* llama : re-order functions
* llama : remove C++ API + reorganize common source in /common dir
* llama : minor API updates
* llama : avoid hardcoded special tokens
* llama : fix MPI build
ggml-ci
* llama : introduce enum llama_vocab_type + remove hardcoded string constants
* convert-falcon-hf-to-gguf.py : falcon HF --> gguf conversion, not tested
* falcon-main.cpp : falcon inference example
* convert-falcon-hf-to-gguf.py : remove extra kv
* convert-gptneox-hf-to-gguf.py : remove extra kv
* convert-llama-7b-pth-to-gguf.py : remove extra kv
* convert-llama-hf-to-gguf.py : remove extra kv
* gguf.py : fix for falcon 40b
* falcon-main.cpp : fix for falcon 40b
* convert-falcon-hf-to-gguf.py : update ref
* convert-falcon-hf-to-gguf.py : add tensor data layout
* cmpnct_gpt2bpe.hpp : fixes
* falcon-main.cpp : fixes
* gptneox-main.cpp : fixes
* cmpnct_gpt2bpe.hpp : remove non-general stuff
* Update examples/server/README.md
Co-authored-by: slaren <slarengh@gmail.com>
* cmpnct_gpt2bpe.hpp : cleanup
* convert-llama-hf-to-gguf.py : special tokens
* convert-llama-7b-pth-to-gguf.py : special tokens
* convert-permute-debug.py : permute debug print
* convert-permute-debug-master.py : permute debug for master
* convert-permute-debug.py : change permute type of attn_q
* convert.py : 70b model working (change attn_q permute)
* Delete convert-permute-debug-master.py
* Delete convert-permute-debug.py
* convert-llama-hf-to-gguf.py : fix attn_q permute
* gguf.py : fix rope scale kv
* convert-llama-hf-to-gguf.py : rope scale and added tokens
* convert-llama-7b-pth-to-gguf.py : rope scale and added tokens
* llama.cpp : use rope scale kv
* convert-llama-7b-pth-to-gguf.py : rope scale fix
* convert-llama-hf-to-gguf.py : rope scale fix
* py : fix whitespace
* gguf : add Python script to convert GGMLv3 LLaMA models to GGUF (#2682)
* First pass at converting GGMLv3 LLaMA models to GGUF
* Cleanups, better output during conversion
* Fix vocab space conversion logic
* More vocab conversion fixes
* Add description to converted GGUF files
* Improve help text, expand warning
* Allow specifying name and description for output GGUF
* Allow overriding vocab and hyperparams from original model metadata
* Use correct params override var name
* Fix wrong type size for Q8_K
Better handling of original style metadata
* Set default value for gguf add_tensor raw_shape KW arg
* llama : improve token type support (#2668)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* llama : add API for token type
ggml-ci
* tests : use new tokenizer type API (#2692)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* Improve commentary
* Use token type API in test-tokenizer-1.cpp
* py : cosmetics
* readme : add notice about new file format
ggml-ci
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
Co-authored-by: goerch <jhr.walter@t-online.de>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com>
501 lines
16 KiB
C++
501 lines
16 KiB
C++
#include "console.h"
|
|
#include <vector>
|
|
#include <iostream>
|
|
|
|
#if defined(_WIN32)
|
|
#define WIN32_LEAN_AND_MEAN
|
|
#ifndef NOMINMAX
|
|
#define NOMINMAX
|
|
#endif
|
|
#include <windows.h>
|
|
#include <fcntl.h>
|
|
#include <io.h>
|
|
#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
|
#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
|
|
#endif
|
|
#else
|
|
#include <climits>
|
|
#include <sys/ioctl.h>
|
|
#include <unistd.h>
|
|
#include <wchar.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <signal.h>
|
|
#include <termios.h>
|
|
#endif
|
|
|
|
#define ANSI_COLOR_RED "\x1b[31m"
|
|
#define ANSI_COLOR_GREEN "\x1b[32m"
|
|
#define ANSI_COLOR_YELLOW "\x1b[33m"
|
|
#define ANSI_COLOR_BLUE "\x1b[34m"
|
|
#define ANSI_COLOR_MAGENTA "\x1b[35m"
|
|
#define ANSI_COLOR_CYAN "\x1b[36m"
|
|
#define ANSI_COLOR_RESET "\x1b[0m"
|
|
#define ANSI_BOLD "\x1b[1m"
|
|
|
|
namespace console {
|
|
|
|
//
|
|
// Console state
|
|
//
|
|
|
|
static bool advanced_display = false;
|
|
static bool simple_io = true;
|
|
static display_t current_display = reset;
|
|
|
|
static FILE* out = stdout;
|
|
|
|
#if defined (_WIN32)
|
|
static void* hConsole;
|
|
#else
|
|
static FILE* tty = nullptr;
|
|
static termios initial_state;
|
|
#endif
|
|
|
|
//
|
|
// Init and cleanup
|
|
//
|
|
|
|
void init(bool use_simple_io, bool use_advanced_display) {
|
|
advanced_display = use_advanced_display;
|
|
simple_io = use_simple_io;
|
|
#if defined(_WIN32)
|
|
// Windows-specific console initialization
|
|
DWORD dwMode = 0;
|
|
hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
|
|
if (hConsole == INVALID_HANDLE_VALUE || !GetConsoleMode(hConsole, &dwMode)) {
|
|
hConsole = GetStdHandle(STD_ERROR_HANDLE);
|
|
if (hConsole != INVALID_HANDLE_VALUE && (!GetConsoleMode(hConsole, &dwMode))) {
|
|
hConsole = nullptr;
|
|
simple_io = true;
|
|
}
|
|
}
|
|
if (hConsole) {
|
|
// Check conditions combined to reduce nesting
|
|
if (advanced_display && !(dwMode & ENABLE_VIRTUAL_TERMINAL_PROCESSING) &&
|
|
!SetConsoleMode(hConsole, dwMode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)) {
|
|
advanced_display = false;
|
|
}
|
|
// Set console output codepage to UTF8
|
|
SetConsoleOutputCP(CP_UTF8);
|
|
}
|
|
HANDLE hConIn = GetStdHandle(STD_INPUT_HANDLE);
|
|
if (hConIn != INVALID_HANDLE_VALUE && GetConsoleMode(hConIn, &dwMode)) {
|
|
// Set console input codepage to UTF16
|
|
_setmode(_fileno(stdin), _O_WTEXT);
|
|
|
|
// Set ICANON (ENABLE_LINE_INPUT) and ECHO (ENABLE_ECHO_INPUT)
|
|
if (simple_io) {
|
|
dwMode |= ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT;
|
|
} else {
|
|
dwMode &= ~(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT);
|
|
}
|
|
if (!SetConsoleMode(hConIn, dwMode)) {
|
|
simple_io = true;
|
|
}
|
|
}
|
|
#else
|
|
// POSIX-specific console initialization
|
|
if (!simple_io) {
|
|
struct termios new_termios;
|
|
tcgetattr(STDIN_FILENO, &initial_state);
|
|
new_termios = initial_state;
|
|
new_termios.c_lflag &= ~(ICANON | ECHO);
|
|
new_termios.c_cc[VMIN] = 1;
|
|
new_termios.c_cc[VTIME] = 0;
|
|
tcsetattr(STDIN_FILENO, TCSANOW, &new_termios);
|
|
|
|
tty = fopen("/dev/tty", "w+");
|
|
if (tty != nullptr) {
|
|
out = tty;
|
|
}
|
|
}
|
|
|
|
setlocale(LC_ALL, "");
|
|
#endif
|
|
}
|
|
|
|
void cleanup() {
|
|
// Reset console display
|
|
set_display(reset);
|
|
|
|
#if !defined(_WIN32)
|
|
// Restore settings on POSIX systems
|
|
if (!simple_io) {
|
|
if (tty != nullptr) {
|
|
out = stdout;
|
|
fclose(tty);
|
|
tty = nullptr;
|
|
}
|
|
tcsetattr(STDIN_FILENO, TCSANOW, &initial_state);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
//
|
|
// Display and IO
|
|
//
|
|
|
|
// Keep track of current display and only emit ANSI code if it changes
|
|
void set_display(display_t display) {
|
|
if (advanced_display && current_display != display) {
|
|
fflush(stdout);
|
|
switch(display) {
|
|
case reset:
|
|
fprintf(out, ANSI_COLOR_RESET);
|
|
break;
|
|
case prompt:
|
|
fprintf(out, ANSI_COLOR_YELLOW);
|
|
break;
|
|
case user_input:
|
|
fprintf(out, ANSI_BOLD ANSI_COLOR_GREEN);
|
|
break;
|
|
case error:
|
|
fprintf(out, ANSI_BOLD ANSI_COLOR_RED);
|
|
}
|
|
current_display = display;
|
|
fflush(out);
|
|
}
|
|
}
|
|
|
|
char32_t getchar32() {
|
|
#if defined(_WIN32)
|
|
HANDLE hConsole = GetStdHandle(STD_INPUT_HANDLE);
|
|
wchar_t high_surrogate = 0;
|
|
|
|
while (true) {
|
|
INPUT_RECORD record;
|
|
DWORD count;
|
|
if (!ReadConsoleInputW(hConsole, &record, 1, &count) || count == 0) {
|
|
return WEOF;
|
|
}
|
|
|
|
if (record.EventType == KEY_EVENT && record.Event.KeyEvent.bKeyDown) {
|
|
wchar_t wc = record.Event.KeyEvent.uChar.UnicodeChar;
|
|
if (wc == 0) {
|
|
continue;
|
|
}
|
|
|
|
if ((wc >= 0xD800) && (wc <= 0xDBFF)) { // Check if wc is a high surrogate
|
|
high_surrogate = wc;
|
|
continue;
|
|
}
|
|
if ((wc >= 0xDC00) && (wc <= 0xDFFF)) { // Check if wc is a low surrogate
|
|
if (high_surrogate != 0) { // Check if we have a high surrogate
|
|
return ((high_surrogate - 0xD800) << 10) + (wc - 0xDC00) + 0x10000;
|
|
}
|
|
}
|
|
|
|
high_surrogate = 0; // Reset the high surrogate
|
|
return static_cast<char32_t>(wc);
|
|
}
|
|
}
|
|
#else
|
|
wchar_t wc = getwchar();
|
|
if (static_cast<wint_t>(wc) == WEOF) {
|
|
return WEOF;
|
|
}
|
|
|
|
#if WCHAR_MAX == 0xFFFF
|
|
if ((wc >= 0xD800) && (wc <= 0xDBFF)) { // Check if wc is a high surrogate
|
|
wchar_t low_surrogate = getwchar();
|
|
if ((low_surrogate >= 0xDC00) && (low_surrogate <= 0xDFFF)) { // Check if the next wchar is a low surrogate
|
|
return (static_cast<char32_t>(wc & 0x03FF) << 10) + (low_surrogate & 0x03FF) + 0x10000;
|
|
}
|
|
}
|
|
if ((wc >= 0xD800) && (wc <= 0xDFFF)) { // Invalid surrogate pair
|
|
return 0xFFFD; // Return the replacement character U+FFFD
|
|
}
|
|
#endif
|
|
|
|
return static_cast<char32_t>(wc);
|
|
#endif
|
|
}
|
|
|
|
void pop_cursor() {
|
|
#if defined(_WIN32)
|
|
if (hConsole != NULL) {
|
|
CONSOLE_SCREEN_BUFFER_INFO bufferInfo;
|
|
GetConsoleScreenBufferInfo(hConsole, &bufferInfo);
|
|
|
|
COORD newCursorPosition = bufferInfo.dwCursorPosition;
|
|
if (newCursorPosition.X == 0) {
|
|
newCursorPosition.X = bufferInfo.dwSize.X - 1;
|
|
newCursorPosition.Y -= 1;
|
|
} else {
|
|
newCursorPosition.X -= 1;
|
|
}
|
|
|
|
SetConsoleCursorPosition(hConsole, newCursorPosition);
|
|
return;
|
|
}
|
|
#endif
|
|
putc('\b', out);
|
|
}
|
|
|
|
int estimateWidth(char32_t codepoint) {
|
|
#if defined(_WIN32)
|
|
return 1;
|
|
#else
|
|
return wcwidth(codepoint);
|
|
#endif
|
|
}
|
|
|
|
int put_codepoint(const char* utf8_codepoint, size_t length, int expectedWidth) {
|
|
#if defined(_WIN32)
|
|
CONSOLE_SCREEN_BUFFER_INFO bufferInfo;
|
|
if (!GetConsoleScreenBufferInfo(hConsole, &bufferInfo)) {
|
|
// go with the default
|
|
return expectedWidth;
|
|
}
|
|
COORD initialPosition = bufferInfo.dwCursorPosition;
|
|
DWORD nNumberOfChars = length;
|
|
WriteConsole(hConsole, utf8_codepoint, nNumberOfChars, &nNumberOfChars, NULL);
|
|
|
|
CONSOLE_SCREEN_BUFFER_INFO newBufferInfo;
|
|
GetConsoleScreenBufferInfo(hConsole, &newBufferInfo);
|
|
|
|
// Figure out our real position if we're in the last column
|
|
if (utf8_codepoint[0] != 0x09 && initialPosition.X == newBufferInfo.dwSize.X - 1) {
|
|
DWORD nNumberOfChars;
|
|
WriteConsole(hConsole, &" \b", 2, &nNumberOfChars, NULL);
|
|
GetConsoleScreenBufferInfo(hConsole, &newBufferInfo);
|
|
}
|
|
|
|
int width = newBufferInfo.dwCursorPosition.X - initialPosition.X;
|
|
if (width < 0) {
|
|
width += newBufferInfo.dwSize.X;
|
|
}
|
|
return width;
|
|
#else
|
|
// We can trust expectedWidth if we've got one
|
|
if (expectedWidth >= 0 || tty == nullptr) {
|
|
fwrite(utf8_codepoint, length, 1, out);
|
|
return expectedWidth;
|
|
}
|
|
|
|
fputs("\033[6n", tty); // Query cursor position
|
|
int x1;
|
|
int y1;
|
|
int x2;
|
|
int y2;
|
|
int results = 0;
|
|
results = fscanf(tty, "\033[%d;%dR", &y1, &x1);
|
|
|
|
fwrite(utf8_codepoint, length, 1, tty);
|
|
|
|
fputs("\033[6n", tty); // Query cursor position
|
|
results += fscanf(tty, "\033[%d;%dR", &y2, &x2);
|
|
|
|
if (results != 4) {
|
|
return expectedWidth;
|
|
}
|
|
|
|
int width = x2 - x1;
|
|
if (width < 0) {
|
|
// Calculate the width considering text wrapping
|
|
struct winsize w;
|
|
ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
|
|
width += w.ws_col;
|
|
}
|
|
return width;
|
|
#endif
|
|
}
|
|
|
|
void replace_last(char ch) {
|
|
#if defined(_WIN32)
|
|
pop_cursor();
|
|
put_codepoint(&ch, 1, 1);
|
|
#else
|
|
fprintf(out, "\b%c", ch);
|
|
#endif
|
|
}
|
|
|
|
void append_utf8(char32_t ch, std::string & out) {
|
|
if (ch <= 0x7F) {
|
|
out.push_back(static_cast<unsigned char>(ch));
|
|
} else if (ch <= 0x7FF) {
|
|
out.push_back(static_cast<unsigned char>(0xC0 | ((ch >> 6) & 0x1F)));
|
|
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
|
|
} else if (ch <= 0xFFFF) {
|
|
out.push_back(static_cast<unsigned char>(0xE0 | ((ch >> 12) & 0x0F)));
|
|
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
|
|
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
|
|
} else if (ch <= 0x10FFFF) {
|
|
out.push_back(static_cast<unsigned char>(0xF0 | ((ch >> 18) & 0x07)));
|
|
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 12) & 0x3F)));
|
|
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
|
|
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
|
|
} else {
|
|
// Invalid Unicode code point
|
|
}
|
|
}
|
|
|
|
// Helper function to remove the last UTF-8 character from a string
|
|
void pop_back_utf8_char(std::string & line) {
|
|
if (line.empty()) {
|
|
return;
|
|
}
|
|
|
|
size_t pos = line.length() - 1;
|
|
|
|
// Find the start of the last UTF-8 character (checking up to 4 bytes back)
|
|
for (size_t i = 0; i < 3 && pos > 0; ++i, --pos) {
|
|
if ((line[pos] & 0xC0) != 0x80) {
|
|
break; // Found the start of the character
|
|
}
|
|
}
|
|
line.erase(pos);
|
|
}
|
|
|
|
bool readline_advanced(std::string & line, bool multiline_input) {
|
|
if (out != stdout) {
|
|
fflush(stdout);
|
|
}
|
|
|
|
line.clear();
|
|
std::vector<int> widths;
|
|
bool is_special_char = false;
|
|
bool end_of_stream = false;
|
|
|
|
char32_t input_char;
|
|
while (true) {
|
|
fflush(out); // Ensure all output is displayed before waiting for input
|
|
input_char = getchar32();
|
|
|
|
if (input_char == '\r' || input_char == '\n') {
|
|
break;
|
|
}
|
|
|
|
if (input_char == (char32_t) WEOF || input_char == 0x04 /* Ctrl+D*/) {
|
|
end_of_stream = true;
|
|
break;
|
|
}
|
|
|
|
if (is_special_char) {
|
|
set_display(user_input);
|
|
replace_last(line.back());
|
|
is_special_char = false;
|
|
}
|
|
|
|
if (input_char == '\033') { // Escape sequence
|
|
char32_t code = getchar32();
|
|
if (code == '[' || code == 0x1B) {
|
|
// Discard the rest of the escape sequence
|
|
while ((code = getchar32()) != (char32_t) WEOF) {
|
|
if ((code >= 'A' && code <= 'Z') || (code >= 'a' && code <= 'z') || code == '~') {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
} else if (input_char == 0x08 || input_char == 0x7F) { // Backspace
|
|
if (!widths.empty()) {
|
|
int count;
|
|
do {
|
|
count = widths.back();
|
|
widths.pop_back();
|
|
// Move cursor back, print space, and move cursor back again
|
|
for (int i = 0; i < count; i++) {
|
|
replace_last(' ');
|
|
pop_cursor();
|
|
}
|
|
pop_back_utf8_char(line);
|
|
} while (count == 0 && !widths.empty());
|
|
}
|
|
} else {
|
|
int offset = line.length();
|
|
append_utf8(input_char, line);
|
|
int width = put_codepoint(line.c_str() + offset, line.length() - offset, estimateWidth(input_char));
|
|
if (width < 0) {
|
|
width = 0;
|
|
}
|
|
widths.push_back(width);
|
|
}
|
|
|
|
if (!line.empty() && (line.back() == '\\' || line.back() == '/')) {
|
|
set_display(prompt);
|
|
replace_last(line.back());
|
|
is_special_char = true;
|
|
}
|
|
}
|
|
|
|
bool has_more = multiline_input;
|
|
if (is_special_char) {
|
|
replace_last(' ');
|
|
pop_cursor();
|
|
|
|
char last = line.back();
|
|
line.pop_back();
|
|
if (last == '\\') {
|
|
line += '\n';
|
|
fputc('\n', out);
|
|
has_more = !has_more;
|
|
} else {
|
|
// llama will just eat the single space, it won't act as a space
|
|
if (line.length() == 1 && line.back() == ' ') {
|
|
line.clear();
|
|
pop_cursor();
|
|
}
|
|
has_more = false;
|
|
}
|
|
} else {
|
|
if (end_of_stream) {
|
|
has_more = false;
|
|
} else {
|
|
line += '\n';
|
|
fputc('\n', out);
|
|
}
|
|
}
|
|
|
|
fflush(out);
|
|
return has_more;
|
|
}
|
|
|
|
bool readline_simple(std::string & line, bool multiline_input) {
|
|
#if defined(_WIN32)
|
|
std::wstring wline;
|
|
if (!std::getline(std::wcin, wline)) {
|
|
// Input stream is bad or EOF received
|
|
line.clear();
|
|
GenerateConsoleCtrlEvent(CTRL_C_EVENT, 0);
|
|
return false;
|
|
}
|
|
|
|
int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wline[0], (int)wline.size(), NULL, 0, NULL, NULL);
|
|
line.resize(size_needed);
|
|
WideCharToMultiByte(CP_UTF8, 0, &wline[0], (int)wline.size(), &line[0], size_needed, NULL, NULL);
|
|
#else
|
|
if (!std::getline(std::cin, line)) {
|
|
// Input stream is bad or EOF received
|
|
line.clear();
|
|
return false;
|
|
}
|
|
#endif
|
|
if (!line.empty()) {
|
|
char last = line.back();
|
|
if (last == '/') { // Always return control on '/' symbol
|
|
line.pop_back();
|
|
return false;
|
|
}
|
|
if (last == '\\') { // '\\' changes the default action
|
|
line.pop_back();
|
|
multiline_input = !multiline_input;
|
|
}
|
|
}
|
|
line += '\n';
|
|
|
|
// By default, continue input if multiline_input is set
|
|
return multiline_input;
|
|
}
|
|
|
|
bool readline(std::string & line, bool multiline_input) {
|
|
set_display(user_input);
|
|
|
|
if (simple_io) {
|
|
return readline_simple(line, multiline_input);
|
|
}
|
|
return readline_advanced(line, multiline_input);
|
|
}
|
|
|
|
}
|