mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-13 04:00:16 +00:00
examples : fix build after sampling refactoring
ggml-ci
This commit is contained in:
parent
4a7f43f28c
commit
7e48e21b1f
2
Makefile
2
Makefile
@ -545,7 +545,7 @@ llama.o: llama.cpp ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h l
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
COMMON_H_DEPS = common/common.h common/sampling.h build-info.h common/log.h
|
COMMON_H_DEPS = common/common.h common/sampling.h build-info.h common/log.h
|
||||||
COMMON_DEPS = $(COMMON_H_DEPS) common.o sampling.o
|
COMMON_DEPS = $(COMMON_H_DEPS) common.o sampling.o grammar-parser.o
|
||||||
|
|
||||||
common.o: common/common.cpp $(COMMON_H_DEPS)
|
common.o: common/common.cpp $(COMMON_H_DEPS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
136
common/log.h
136
common/log.h
@ -579,75 +579,75 @@ inline std::string log_var_to_string_impl(const std::vector<int> & var)
|
|||||||
return buf.str();
|
return buf.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define LOG_TOKENS_TOSTR_PRETTY(ctx, tokens) \
|
template <typename C, typename T>
|
||||||
[&tokens, &ctx]() \
|
inline std::string LOG_TOKENS_TOSTR_PRETTY(const C & ctx, const T & tokens)
|
||||||
{ \
|
{
|
||||||
std::stringstream buf; \
|
std::stringstream buf;
|
||||||
buf << "[ "; \
|
buf << "[ ";
|
||||||
\
|
|
||||||
bool first = true; \
|
|
||||||
for (const auto &token : tokens) \
|
|
||||||
{ \
|
|
||||||
if (!first) \
|
|
||||||
buf << ", "; \
|
|
||||||
else \
|
|
||||||
first = false; \
|
|
||||||
\
|
|
||||||
auto detokenized = llama_token_to_piece(ctx, token); \
|
|
||||||
\
|
|
||||||
detokenized.erase( \
|
|
||||||
std::remove_if( \
|
|
||||||
detokenized.begin(), \
|
|
||||||
detokenized.end(), \
|
|
||||||
[](const unsigned char c) { return !std::isprint(c); }), \
|
|
||||||
detokenized.end()); \
|
|
||||||
\
|
|
||||||
buf \
|
|
||||||
<< "'" << detokenized << "'" \
|
|
||||||
<< ":" << std::to_string(token); \
|
|
||||||
} \
|
|
||||||
buf << " ]"; \
|
|
||||||
\
|
|
||||||
return buf.str(); \
|
|
||||||
}() \
|
|
||||||
.c_str()
|
|
||||||
|
|
||||||
#define LOG_BATCH_TOSTR_PRETTY(ctx, batch) \
|
bool first = true;
|
||||||
[&batch, &ctx]() \
|
for (const auto &token : tokens)
|
||||||
{ \
|
{
|
||||||
std::stringstream buf; \
|
if (!first) {
|
||||||
buf << "[ "; \
|
buf << ", ";
|
||||||
\
|
} else {
|
||||||
bool first = true; \
|
first = false;
|
||||||
for (int i = 0; i < batch.n_tokens; ++i) \
|
}
|
||||||
{ \
|
|
||||||
if (!first) \
|
auto detokenized = llama_token_to_piece(ctx, token);
|
||||||
buf << ", "; \
|
|
||||||
else \
|
detokenized.erase(
|
||||||
first = false; \
|
std::remove_if(
|
||||||
\
|
detokenized.begin(),
|
||||||
auto detokenized = llama_token_to_piece(ctx, batch.token[i]); \
|
detokenized.end(),
|
||||||
\
|
[](const unsigned char c) { return !std::isprint(c); }),
|
||||||
detokenized.erase( \
|
detokenized.end());
|
||||||
std::remove_if( \
|
|
||||||
detokenized.begin(), \
|
buf
|
||||||
detokenized.end(), \
|
<< "'" << detokenized << "'"
|
||||||
[](const unsigned char c) { return !std::isprint(c); }), \
|
<< ":" << std::to_string(token);
|
||||||
detokenized.end()); \
|
}
|
||||||
\
|
buf << " ]";
|
||||||
buf \
|
|
||||||
<< "\n" << std::to_string(i) \
|
return buf.str();
|
||||||
<< ":token '" << detokenized << "'" \
|
}
|
||||||
<< ":pos " << std::to_string(batch.pos[i]) \
|
|
||||||
<< ":n_seq_id " << std::to_string(batch.n_seq_id[i]) \
|
template <typename C, typename B>
|
||||||
<< ":seq_id " << std::to_string(batch.seq_id[i][0]) \
|
inline std::string LOG_BATCH_TOSTR_PRETTY(const C & ctx, const B & batch)
|
||||||
<< ":logits " << std::to_string(batch.logits[i]); \
|
{
|
||||||
} \
|
std::stringstream buf;
|
||||||
buf << " ]"; \
|
buf << "[ ";
|
||||||
\
|
|
||||||
return buf.str(); \
|
bool first = true;
|
||||||
}() \
|
for (int i = 0; i < batch.n_tokens; ++i)
|
||||||
.c_str()
|
{
|
||||||
|
if (!first) {
|
||||||
|
buf << ", ";
|
||||||
|
} else {
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto detokenized = llama_token_to_piece(ctx, batch.token[i]);
|
||||||
|
|
||||||
|
detokenized.erase(
|
||||||
|
std::remove_if(
|
||||||
|
detokenized.begin(),
|
||||||
|
detokenized.end(),
|
||||||
|
[](const unsigned char c) { return !std::isprint(c); }),
|
||||||
|
detokenized.end());
|
||||||
|
|
||||||
|
buf
|
||||||
|
<< "\n" << std::to_string(i)
|
||||||
|
<< ":token '" << detokenized << "'"
|
||||||
|
<< ":pos " << std::to_string(batch.pos[i])
|
||||||
|
<< ":n_seq_id " << std::to_string(batch.n_seq_id[i])
|
||||||
|
<< ":seq_id " << std::to_string(batch.seq_id[i][0])
|
||||||
|
<< ":logits " << std::to_string(batch.logits[i]);
|
||||||
|
}
|
||||||
|
buf << " ]";
|
||||||
|
|
||||||
|
return buf.str();
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef LOG_DISABLE_LOGS
|
#ifdef LOG_DISABLE_LOGS
|
||||||
|
|
||||||
|
@ -50,6 +50,7 @@ struct llama_sampling_context {
|
|||||||
// internal
|
// internal
|
||||||
grammar_parser::parse_state parsed_grammar;
|
grammar_parser::parse_state parsed_grammar;
|
||||||
|
|
||||||
|
// TODO: replace with ring-buffer
|
||||||
std::vector<llama_token> prev;
|
std::vector<llama_token> prev;
|
||||||
std::vector<llama_token_data> cur;
|
std::vector<llama_token_data> cur;
|
||||||
};
|
};
|
||||||
|
@ -257,12 +257,12 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix));
|
LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix));
|
||||||
LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix));
|
LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix));
|
||||||
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp));
|
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
|
||||||
|
|
||||||
// Should not run without any tokens
|
// Should not run without any tokens
|
||||||
if (embd_inp.empty()) {
|
if (embd_inp.empty()) {
|
||||||
embd_inp.push_back(llama_token_bos(ctx));
|
embd_inp.push_back(llama_token_bos(ctx));
|
||||||
LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp));
|
LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokenize negative prompt
|
// Tokenize negative prompt
|
||||||
@ -273,10 +273,10 @@ int main(int argc, char ** argv) {
|
|||||||
LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
|
LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
|
||||||
|
|
||||||
guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos);
|
guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos);
|
||||||
LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp));
|
LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
|
||||||
|
|
||||||
std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos);
|
std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos);
|
||||||
LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp));
|
LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
|
||||||
|
|
||||||
original_prompt_len = original_inp.size();
|
original_prompt_len = original_inp.size();
|
||||||
guidance_offset = (int)guidance_inp.size() - original_prompt_len;
|
guidance_offset = (int)guidance_inp.size() - original_prompt_len;
|
||||||
@ -294,8 +294,8 @@ int main(int argc, char ** argv) {
|
|||||||
params.n_keep = (int)embd_inp.size();
|
params.n_keep = (int)embd_inp.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx));
|
LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str());
|
||||||
LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx));
|
LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str());
|
||||||
|
|
||||||
|
|
||||||
// enable interactive mode if interactive start is specified
|
// enable interactive mode if interactive start is specified
|
||||||
@ -388,9 +388,6 @@ int main(int argc, char ** argv) {
|
|||||||
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: replace with ring-buffer
|
|
||||||
std::vector<llama_token> last_tokens(n_ctx);
|
|
||||||
std::fill(last_tokens.begin(), last_tokens.end(), 0);
|
|
||||||
LOG_TEE("\n##### Infill mode #####\n\n");
|
LOG_TEE("\n##### Infill mode #####\n\n");
|
||||||
if (params.infill) {
|
if (params.infill) {
|
||||||
printf("\n************\n");
|
printf("\n************\n");
|
||||||
@ -433,11 +430,7 @@ int main(int argc, char ** argv) {
|
|||||||
std::vector<llama_token> embd;
|
std::vector<llama_token> embd;
|
||||||
std::vector<llama_token> embd_guidance;
|
std::vector<llama_token> embd_guidance;
|
||||||
|
|
||||||
const int n_vocab = llama_n_vocab(model);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params);
|
||||||
|
|
||||||
llama_sampling_context ctx_sampling = llama_sampling_context_init(params, grammar);
|
|
||||||
std::vector<llama_token_data> candidates;
|
|
||||||
candidates.reserve(n_vocab);
|
|
||||||
|
|
||||||
while (n_remain != 0 || params.interactive) {
|
while (n_remain != 0 || params.interactive) {
|
||||||
// predict
|
// predict
|
||||||
@ -484,7 +477,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
|
LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
|
||||||
|
|
||||||
LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd));
|
LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -512,7 +505,7 @@ int main(int argc, char ** argv) {
|
|||||||
input_buf = embd_guidance.data();
|
input_buf = embd_guidance.data();
|
||||||
input_size = embd_guidance.size();
|
input_size = embd_guidance.size();
|
||||||
|
|
||||||
LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance));
|
LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str());
|
||||||
} else {
|
} else {
|
||||||
input_buf = embd.data();
|
input_buf = embd.data();
|
||||||
input_size = embd.size();
|
input_size = embd.size();
|
||||||
@ -535,7 +528,7 @@ int main(int argc, char ** argv) {
|
|||||||
n_eval = params.n_batch;
|
n_eval = params.n_batch;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd));
|
LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
||||||
LOG_TEE("%s : failed to eval\n", __func__);
|
LOG_TEE("%s : failed to eval\n", __func__);
|
||||||
@ -554,12 +547,11 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
|
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
|
||||||
|
|
||||||
const llama_token id = llama_sampling_sample(ctx, ctx_guidance, ctx_sampling, last_tokens, candidates);
|
const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
|
||||||
|
|
||||||
last_tokens.erase(last_tokens.begin());
|
llama_sampling_accept(ctx_sampling, ctx, id);
|
||||||
last_tokens.push_back(id);
|
|
||||||
|
|
||||||
LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, last_tokens));
|
LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
|
||||||
|
|
||||||
embd.push_back(id);
|
embd.push_back(id);
|
||||||
|
|
||||||
@ -575,8 +567,8 @@ int main(int argc, char ** argv) {
|
|||||||
LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
|
LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
|
||||||
while ((int) embd_inp.size() > n_consumed) {
|
while ((int) embd_inp.size() > n_consumed) {
|
||||||
embd.push_back(embd_inp[n_consumed]);
|
embd.push_back(embd_inp[n_consumed]);
|
||||||
last_tokens.erase(last_tokens.begin());
|
ctx_sampling->prev.erase(ctx_sampling->prev.begin());
|
||||||
last_tokens.push_back(embd_inp[n_consumed]);
|
ctx_sampling->prev.push_back(embd_inp[n_consumed]);
|
||||||
++n_consumed;
|
++n_consumed;
|
||||||
if ((int) embd.size() >= params.n_batch) {
|
if ((int) embd.size() >= params.n_batch) {
|
||||||
break;
|
break;
|
||||||
@ -608,7 +600,7 @@ int main(int argc, char ** argv) {
|
|||||||
if ((int) embd_inp.size() <= n_consumed) {
|
if ((int) embd_inp.size() <= n_consumed) {
|
||||||
|
|
||||||
// deal with eot token in infill mode
|
// deal with eot token in infill mode
|
||||||
if ((last_tokens.back() == llama_token_eot(ctx) || is_interacting) && params.interactive){
|
if ((ctx_sampling->prev.back() == llama_token_eot(ctx) || is_interacting) && params.interactive){
|
||||||
if(is_interacting && !params.interactive_first) {
|
if(is_interacting && !params.interactive_first) {
|
||||||
// print an eot token
|
// print an eot token
|
||||||
printf("%s", llama_token_to_piece(ctx, llama_token_eot(ctx)).c_str());
|
printf("%s", llama_token_to_piece(ctx, llama_token_eot(ctx)).c_str());
|
||||||
@ -675,7 +667,7 @@ int main(int argc, char ** argv) {
|
|||||||
is_interacting = false;
|
is_interacting = false;
|
||||||
}
|
}
|
||||||
// deal with end of text token in interactive mode
|
// deal with end of text token in interactive mode
|
||||||
else if (last_tokens.back() == llama_token_eos(ctx)) {
|
else if (ctx_sampling->prev.back() == llama_token_eos(ctx)) {
|
||||||
LOG("found EOS token\n");
|
LOG("found EOS token\n");
|
||||||
|
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
@ -727,7 +719,7 @@ int main(int argc, char ** argv) {
|
|||||||
const size_t original_size = embd_inp.size();
|
const size_t original_size = embd_inp.size();
|
||||||
|
|
||||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
const auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
||||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp));
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||||
|
|
||||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#include "console.h"
|
#include "console.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "build-info.h"
|
#include "build-info.h"
|
||||||
#include "grammar-parser.h"
|
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
@ -245,12 +244,12 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
LOG("prompt: \"%s\"\n", log_tostr(params.prompt));
|
LOG("prompt: \"%s\"\n", log_tostr(params.prompt));
|
||||||
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp));
|
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
|
||||||
|
|
||||||
// Should not run without any tokens
|
// Should not run without any tokens
|
||||||
if (embd_inp.empty()) {
|
if (embd_inp.empty()) {
|
||||||
embd_inp.push_back(llama_token_bos(ctx));
|
embd_inp.push_back(llama_token_bos(ctx));
|
||||||
LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp));
|
LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokenize negative prompt
|
// Tokenize negative prompt
|
||||||
@ -261,10 +260,10 @@ int main(int argc, char ** argv) {
|
|||||||
LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
|
LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
|
||||||
|
|
||||||
guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos);
|
guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos);
|
||||||
LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp));
|
LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
|
||||||
|
|
||||||
std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos);
|
std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos);
|
||||||
LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp));
|
LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
|
||||||
|
|
||||||
original_prompt_len = original_inp.size();
|
original_prompt_len = original_inp.size();
|
||||||
guidance_offset = (int)guidance_inp.size() - original_prompt_len;
|
guidance_offset = (int)guidance_inp.size() - original_prompt_len;
|
||||||
@ -323,8 +322,8 @@ int main(int argc, char ** argv) {
|
|||||||
const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", add_bos);
|
const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", add_bos);
|
||||||
const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
|
const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
|
||||||
|
|
||||||
LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx));
|
LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str());
|
||||||
LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx));
|
LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str());
|
||||||
|
|
||||||
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
||||||
if (params.instruct) {
|
if (params.instruct) {
|
||||||
@ -403,35 +402,6 @@ int main(int argc, char ** argv) {
|
|||||||
LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
||||||
LOG_TEE("\n\n");
|
LOG_TEE("\n\n");
|
||||||
|
|
||||||
struct llama_grammar * grammar = NULL;
|
|
||||||
grammar_parser::parse_state parsed_grammar;
|
|
||||||
|
|
||||||
if (!params.grammar.empty()) {
|
|
||||||
parsed_grammar = grammar_parser::parse(params.grammar.c_str());
|
|
||||||
// will be empty (default) if there are parse errors
|
|
||||||
if (parsed_grammar.rules.empty()) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
LOG_TEE("%s: grammar:\n", __func__);
|
|
||||||
grammar_parser::print_grammar(stderr, parsed_grammar);
|
|
||||||
LOG_TEE("\n");
|
|
||||||
|
|
||||||
{
|
|
||||||
auto it = sparams.logit_bias.find(llama_token_eos(ctx));
|
|
||||||
if (it != sparams.logit_bias.end() && it->second == -INFINITY) {
|
|
||||||
LOG_TEE("%s: warning: EOS token is disabled, which will cause most grammars to fail\n", __func__);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
|
|
||||||
grammar = llama_grammar_init(
|
|
||||||
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: replace with ring-buffer
|
|
||||||
std::vector<llama_token> last_tokens(n_ctx);
|
|
||||||
std::fill(last_tokens.begin(), last_tokens.end(), 0);
|
|
||||||
|
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
const char *control_message;
|
const char *control_message;
|
||||||
if (params.multiline_input) {
|
if (params.multiline_input) {
|
||||||
@ -471,11 +441,7 @@ int main(int argc, char ** argv) {
|
|||||||
std::vector<llama_token> embd;
|
std::vector<llama_token> embd;
|
||||||
std::vector<llama_token> embd_guidance;
|
std::vector<llama_token> embd_guidance;
|
||||||
|
|
||||||
const int n_vocab = llama_n_vocab(model);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params);
|
||||||
|
|
||||||
llama_sampling_context ctx_sampling = llama_sampling_context_init(params, grammar);
|
|
||||||
std::vector<llama_token_data> candidates;
|
|
||||||
candidates.reserve(n_vocab);
|
|
||||||
|
|
||||||
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
||||||
// predict
|
// predict
|
||||||
@ -522,7 +488,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
|
LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
|
||||||
|
|
||||||
LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd));
|
LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
|
||||||
|
|
||||||
LOG("clear session path\n");
|
LOG("clear session path\n");
|
||||||
path_session.clear();
|
path_session.clear();
|
||||||
@ -574,7 +540,7 @@ int main(int argc, char ** argv) {
|
|||||||
input_buf = embd_guidance.data();
|
input_buf = embd_guidance.data();
|
||||||
input_size = embd_guidance.size();
|
input_size = embd_guidance.size();
|
||||||
|
|
||||||
LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance));
|
LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str());
|
||||||
} else {
|
} else {
|
||||||
input_buf = embd.data();
|
input_buf = embd.data();
|
||||||
input_size = embd.size();
|
input_size = embd.size();
|
||||||
@ -597,7 +563,7 @@ int main(int argc, char ** argv) {
|
|||||||
n_eval = params.n_batch;
|
n_eval = params.n_batch;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd));
|
LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
||||||
LOG_TEE("%s : failed to eval\n", __func__);
|
LOG_TEE("%s : failed to eval\n", __func__);
|
||||||
@ -627,12 +593,11 @@ int main(int argc, char ** argv) {
|
|||||||
LOG("saved session to %s\n", path_session.c_str());
|
LOG("saved session to %s\n", path_session.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
const llama_token id = llama_sampling_sample(ctx, ctx_guidance, ctx_sampling, last_tokens, candidates);
|
const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
|
||||||
|
|
||||||
last_tokens.erase(last_tokens.begin());
|
llama_sampling_accept(ctx_sampling, ctx, id);
|
||||||
last_tokens.push_back(id);
|
|
||||||
|
|
||||||
LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, last_tokens));
|
LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
|
||||||
|
|
||||||
embd.push_back(id);
|
embd.push_back(id);
|
||||||
|
|
||||||
@ -648,8 +613,8 @@ int main(int argc, char ** argv) {
|
|||||||
LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
|
LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
|
||||||
while ((int) embd_inp.size() > n_consumed) {
|
while ((int) embd_inp.size() > n_consumed) {
|
||||||
embd.push_back(embd_inp[n_consumed]);
|
embd.push_back(embd_inp[n_consumed]);
|
||||||
last_tokens.erase(last_tokens.begin());
|
ctx_sampling->prev.erase(ctx_sampling->prev.begin());
|
||||||
last_tokens.push_back(embd_inp[n_consumed]);
|
ctx_sampling->prev.push_back(embd_inp[n_consumed]);
|
||||||
++n_consumed;
|
++n_consumed;
|
||||||
if ((int) embd.size() >= params.n_batch) {
|
if ((int) embd.size() >= params.n_batch) {
|
||||||
break;
|
break;
|
||||||
@ -682,7 +647,7 @@ int main(int argc, char ** argv) {
|
|||||||
// check for reverse prompt
|
// check for reverse prompt
|
||||||
if (!params.antiprompt.empty()) {
|
if (!params.antiprompt.empty()) {
|
||||||
std::string last_output;
|
std::string last_output;
|
||||||
for (auto id : last_tokens) {
|
for (auto id : ctx_sampling->prev) {
|
||||||
last_output += llama_token_to_piece(ctx, id);
|
last_output += llama_token_to_piece(ctx, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -711,7 +676,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// deal with end of text token in interactive mode
|
// deal with end of text token in interactive mode
|
||||||
if (last_tokens.back() == llama_token_eos(ctx)) {
|
if (ctx_sampling->prev.back() == llama_token_eos(ctx)) {
|
||||||
LOG("found EOS token\n");
|
LOG("found EOS token\n");
|
||||||
|
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
@ -783,7 +748,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
const auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
||||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp));
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||||
|
|
||||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||||
|
|
||||||
@ -810,15 +775,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
if (n_past > 0) {
|
if (n_past > 0) {
|
||||||
if (is_interacting) {
|
if (is_interacting) {
|
||||||
// reset grammar state if we're restarting generation
|
llama_sampling_reset(ctx_sampling);
|
||||||
if (grammar != NULL) {
|
|
||||||
llama_grammar_free(grammar);
|
|
||||||
|
|
||||||
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
|
|
||||||
grammar = llama_grammar_init(
|
|
||||||
grammar_rules.data(), grammar_rules.size(),
|
|
||||||
parsed_grammar.symbol_ids.at("root"));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
is_interacting = false;
|
is_interacting = false;
|
||||||
}
|
}
|
||||||
@ -850,9 +807,7 @@ int main(int argc, char ** argv) {
|
|||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
|
||||||
if (grammar != NULL) {
|
llama_sampling_free(ctx_sampling);
|
||||||
llama_grammar_free(grammar);
|
|
||||||
}
|
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
|
|
||||||
#ifndef LOG_DISABLE_LOGS
|
#ifndef LOG_DISABLE_LOGS
|
||||||
|
@ -51,6 +51,12 @@ static std::vector<std::string> k_prompts = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct client {
|
struct client {
|
||||||
|
~client() {
|
||||||
|
if (ctx_sampling) {
|
||||||
|
llama_sampling_free(ctx_sampling);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int32_t id = 0;
|
int32_t id = 0;
|
||||||
|
|
||||||
llama_seq_id seq_id = -1;
|
llama_seq_id seq_id = -1;
|
||||||
@ -68,9 +74,7 @@ struct client {
|
|||||||
std::string prompt;
|
std::string prompt;
|
||||||
std::string response;
|
std::string response;
|
||||||
|
|
||||||
std::vector<llama_token> tokens_prev;
|
struct llama_sampling_context * ctx_sampling = nullptr;
|
||||||
|
|
||||||
llama_sampling_context ctx_sampling;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void print_date_time() {
|
static void print_date_time() {
|
||||||
@ -148,20 +152,14 @@ int main(int argc, char ** argv) {
|
|||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
|
|
||||||
const int n_ctx = llama_n_ctx(ctx);
|
const int n_ctx = llama_n_ctx(ctx);
|
||||||
const int n_vocab = llama_n_vocab(model);
|
|
||||||
|
|
||||||
std::vector<client> clients(n_clients);
|
std::vector<client> clients(n_clients);
|
||||||
for (size_t i = 0; i < clients.size(); ++i) {
|
for (size_t i = 0; i < clients.size(); ++i) {
|
||||||
auto & client = clients[i];
|
auto & client = clients[i];
|
||||||
client.id = i;
|
client.id = i;
|
||||||
client.tokens_prev.resize(std::max(256, params.n_predict));
|
client.ctx_sampling = llama_sampling_init(params);
|
||||||
std::fill(client.tokens_prev.begin(), client.tokens_prev.end(), 0);
|
|
||||||
client.ctx_sampling = llama_sampling_context_init(params, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<llama_token_data> candidates;
|
|
||||||
candidates.reserve(n_vocab);
|
|
||||||
|
|
||||||
std::vector<llama_token> tokens_system;
|
std::vector<llama_token> tokens_system;
|
||||||
tokens_system = ::llama_tokenize(ctx, k_system, true);
|
tokens_system = ::llama_tokenize(ctx, k_system, true);
|
||||||
const int32_t n_tokens_system = tokens_system.size();
|
const int32_t n_tokens_system = tokens_system.size();
|
||||||
@ -253,7 +251,7 @@ int main(int argc, char ** argv) {
|
|||||||
client.prompt = client.input + "\nAssistant:";
|
client.prompt = client.input + "\nAssistant:";
|
||||||
client.response = "";
|
client.response = "";
|
||||||
|
|
||||||
std::fill(client.tokens_prev.begin(), client.tokens_prev.end(), 0);
|
llama_sampling_reset(client.ctx_sampling);
|
||||||
|
|
||||||
// do not prepend BOS because we have a system prompt!
|
// do not prepend BOS because we have a system prompt!
|
||||||
std::vector<llama_token> tokens_prompt;
|
std::vector<llama_token> tokens_prompt;
|
||||||
@ -262,7 +260,7 @@ int main(int argc, char ** argv) {
|
|||||||
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
|
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
|
||||||
batch.token [batch.n_tokens] = tokens_prompt[i];
|
batch.token [batch.n_tokens] = tokens_prompt[i];
|
||||||
batch.pos [batch.n_tokens] = i + n_tokens_system;
|
batch.pos [batch.n_tokens] = i + n_tokens_system;
|
||||||
batch.n_seq_id[batch.n_tokens] = client.id;
|
batch.n_seq_id[batch.n_tokens] = 1;
|
||||||
batch.seq_id [batch.n_tokens][0] = client.id;
|
batch.seq_id [batch.n_tokens][0] = client.id;
|
||||||
batch.logits [batch.n_tokens] = false;
|
batch.logits [batch.n_tokens] = false;
|
||||||
batch.n_tokens += 1;
|
batch.n_tokens += 1;
|
||||||
@ -346,7 +344,9 @@ int main(int argc, char ** argv) {
|
|||||||
//printf("client %d, seq %d, token %d, pos %d, batch %d\n",
|
//printf("client %d, seq %d, token %d, pos %d, batch %d\n",
|
||||||
// client.id, client.seq_id, client.sampled, client.n_decoded, client.i_batch);
|
// client.id, client.seq_id, client.sampled, client.n_decoded, client.i_batch);
|
||||||
|
|
||||||
const llama_token id = llama_sampling_sample(ctx, NULL, client.ctx_sampling, client.tokens_prev, candidates, client.i_batch - i);
|
const llama_token id = llama_sampling_sample(client.ctx_sampling, ctx, NULL, client.i_batch - i);
|
||||||
|
|
||||||
|
llama_sampling_accept(client.ctx_sampling, ctx, id);
|
||||||
|
|
||||||
if (client.n_decoded == 1) {
|
if (client.n_decoded == 1) {
|
||||||
// start measuring generation time after the first token to make sure all concurrent clients
|
// start measuring generation time after the first token to make sure all concurrent clients
|
||||||
@ -354,11 +354,8 @@ int main(int argc, char ** argv) {
|
|||||||
client.t_start_gen = ggml_time_us();
|
client.t_start_gen = ggml_time_us();
|
||||||
}
|
}
|
||||||
|
|
||||||
// remember which tokens were sampled - used for repetition penalties during sampling
|
|
||||||
client.tokens_prev.erase(client.tokens_prev.begin());
|
|
||||||
client.tokens_prev.push_back(id);
|
|
||||||
|
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||||
|
|
||||||
client.response += token_str;
|
client.response += token_str;
|
||||||
client.sampled = id;
|
client.sampled = id;
|
||||||
|
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "build-info.h"
|
#include "build-info.h"
|
||||||
#include "grammar-parser.h"
|
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
// crash the server in debug mode, otherwise send an http 500 error
|
// crash the server in debug mode, otherwise send an http 500 error
|
||||||
@ -195,17 +194,13 @@ struct llama_server_context
|
|||||||
|
|
||||||
json prompt;
|
json prompt;
|
||||||
std::vector<llama_token> embd;
|
std::vector<llama_token> embd;
|
||||||
std::vector<llama_token> last_n_tokens;
|
|
||||||
|
|
||||||
llama_model *model = nullptr;
|
llama_model *model = nullptr;
|
||||||
llama_context *ctx = nullptr;
|
llama_context *ctx = nullptr;
|
||||||
gpt_params params;
|
gpt_params params;
|
||||||
llama_sampling_context ctx_sampling;
|
llama_sampling_context *ctx_sampling;
|
||||||
int n_ctx;
|
int n_ctx;
|
||||||
|
|
||||||
grammar_parser::parse_state parsed_grammar;
|
|
||||||
llama_grammar *grammar = nullptr;
|
|
||||||
|
|
||||||
bool truncated = false;
|
bool truncated = false;
|
||||||
bool stopped_eos = false;
|
bool stopped_eos = false;
|
||||||
bool stopped_word = false;
|
bool stopped_word = false;
|
||||||
@ -252,11 +247,10 @@ struct llama_server_context
|
|||||||
n_remain = 0;
|
n_remain = 0;
|
||||||
n_past = 0;
|
n_past = 0;
|
||||||
|
|
||||||
if (grammar != nullptr) {
|
if (ctx_sampling != nullptr) {
|
||||||
llama_grammar_free(grammar);
|
llama_sampling_free(ctx_sampling);
|
||||||
grammar = nullptr;
|
|
||||||
ctx_sampling = llama_sampling_context_init(params, NULL);
|
|
||||||
}
|
}
|
||||||
|
ctx_sampling = llama_sampling_init(params);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool loadModel(const gpt_params ¶ms_)
|
bool loadModel(const gpt_params ¶ms_)
|
||||||
@ -269,8 +263,6 @@ struct llama_server_context
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
n_ctx = llama_n_ctx(ctx);
|
n_ctx = llama_n_ctx(ctx);
|
||||||
last_n_tokens.resize(n_ctx);
|
|
||||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,27 +313,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
bool loadGrammar()
|
bool loadGrammar()
|
||||||
{
|
{
|
||||||
if (!params.grammar.empty()) {
|
ctx_sampling = llama_sampling_init(params);
|
||||||
parsed_grammar = grammar_parser::parse(params.grammar.c_str());
|
|
||||||
// will be empty (default) if there are parse errors
|
|
||||||
if (parsed_grammar.rules.empty()) {
|
|
||||||
LOG_ERROR("grammar parse error", {{"grammar", params.grammar}});
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
grammar_parser::print_grammar(stderr, parsed_grammar);
|
|
||||||
|
|
||||||
{
|
|
||||||
auto it = params.sampling_params.logit_bias.find(llama_token_eos(ctx));
|
|
||||||
if (it != params.sampling_params.logit_bias.end() && it->second == -INFINITY) {
|
|
||||||
LOG_WARNING("EOS token is disabled, which will cause most grammars to fail", {});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
|
|
||||||
grammar = llama_grammar_init(
|
|
||||||
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
|
||||||
}
|
|
||||||
ctx_sampling = llama_sampling_context_init(params, grammar);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,7 +355,7 @@ struct llama_server_context
|
|||||||
std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
|
std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
|
||||||
const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
|
const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
|
||||||
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
|
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
|
||||||
std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin());
|
std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), ctx_sampling->prev.begin());
|
||||||
|
|
||||||
LOG_VERBOSE("input truncated", {
|
LOG_VERBOSE("input truncated", {
|
||||||
{"n_ctx", params.n_ctx},
|
{"n_ctx", params.n_ctx},
|
||||||
@ -398,8 +370,8 @@ struct llama_server_context
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
const size_t ps = num_prompt_tokens;
|
const size_t ps = num_prompt_tokens;
|
||||||
std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
|
std::fill(ctx_sampling->prev.begin(), ctx_sampling->prev.end() - ps, 0);
|
||||||
std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
|
std::copy(prompt_tokens.begin(), prompt_tokens.end(), ctx_sampling->prev.end() - ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
// compare the evaluated prompt with the new prompt
|
// compare the evaluated prompt with the new prompt
|
||||||
@ -443,7 +415,7 @@ struct llama_server_context
|
|||||||
std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
|
std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
|
||||||
const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
|
const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
|
||||||
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
|
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
|
||||||
std::copy(prompt_tokens.end() - n_ctx, prompt_tokens.end(), last_n_tokens.begin());
|
std::copy(prompt_tokens.end() - n_ctx, prompt_tokens.end(), ctx_sampling->prev.begin());
|
||||||
|
|
||||||
LOG_VERBOSE("input truncated", {
|
LOG_VERBOSE("input truncated", {
|
||||||
{"n_ctx", n_ctx},
|
{"n_ctx", n_ctx},
|
||||||
@ -458,8 +430,8 @@ struct llama_server_context
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
const size_t ps = num_prompt_tokens;
|
const size_t ps = num_prompt_tokens;
|
||||||
std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
|
std::fill(ctx_sampling->prev.begin(), ctx_sampling->prev.end() - ps, 0);
|
||||||
std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
|
std::copy(prompt_tokens.begin(), prompt_tokens.end(), ctx_sampling->prev.end() - ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
// compare the evaluated prompt with the new prompt
|
// compare the evaluated prompt with the new prompt
|
||||||
@ -554,27 +526,24 @@ struct llama_server_context
|
|||||||
|
|
||||||
{
|
{
|
||||||
// out of user input, sample next token
|
// out of user input, sample next token
|
||||||
std::vector<llama_token_data> candidates;
|
result.tok = llama_sampling_sample(ctx_sampling, ctx, NULL);
|
||||||
candidates.reserve(llama_n_vocab(model));
|
|
||||||
|
|
||||||
result.tok = llama_sampling_sample(ctx, NULL, ctx_sampling, last_n_tokens, candidates);
|
llama_token_data_array cur_p = { ctx_sampling->cur.data(), ctx_sampling->cur.size(), false };
|
||||||
|
|
||||||
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
|
||||||
|
|
||||||
const int32_t n_probs = params.sampling_params.n_probs;
|
const int32_t n_probs = params.sampling_params.n_probs;
|
||||||
if (params.sampling_params.temp <= 0 && n_probs > 0)
|
if (params.sampling_params.temp <= 0 && n_probs > 0)
|
||||||
{
|
{
|
||||||
// For llama_sample_token_greedy we need to sort candidates
|
// For llama_sample_token_greedy we need to sort candidates
|
||||||
llama_sample_softmax(ctx, &candidates_p);
|
llama_sample_softmax(ctx, &cur_p);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
|
for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
|
||||||
{
|
{
|
||||||
result.probs.push_back({candidates_p.data[i].id, candidates_p.data[i].p});
|
result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p});
|
||||||
}
|
}
|
||||||
|
|
||||||
last_n_tokens.erase(last_n_tokens.begin());
|
llama_sampling_accept(ctx_sampling, ctx, result.tok);
|
||||||
last_n_tokens.push_back(result.tok);
|
|
||||||
if (tg) {
|
if (tg) {
|
||||||
num_tokens_predicted++;
|
num_tokens_predicted++;
|
||||||
}
|
}
|
||||||
@ -1235,7 +1204,7 @@ static void parse_options_completion(const json &body, llama_server_context &lla
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
llama.ctx_sampling = llama_sampling_context_init(llama.params, llama.grammar);
|
llama.ctx_sampling = llama_sampling_init(llama.params);
|
||||||
|
|
||||||
LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
|
LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
|
||||||
}
|
}
|
||||||
@ -1793,9 +1762,7 @@ int main(int argc, char **argv)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llama.grammar != nullptr) {
|
llama_sampling_free(llama.ctx_sampling);
|
||||||
llama_grammar_free(llama.grammar);
|
|
||||||
}
|
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -138,7 +138,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
const auto & tokens = drafts[i].tokens;
|
const auto & tokens = drafts[i].tokens;
|
||||||
|
|
||||||
LOG("draft %d: %s\n", i, LOG_TOKENS_TOSTR_PRETTY(ctx_dft, tokens));
|
LOG("draft %d: %s\n", i, LOG_TOKENS_TOSTR_PRETTY(ctx_dft, tokens).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
int i_dft = 0;
|
int i_dft = 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user