mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 02:14:35 +00:00
Server: Enable setting default sampling parameters via command-line (#8402)
* Load server sampling parameters from the server context by default. * Wordsmithing comment
This commit is contained in:
parent
fd560fe680
commit
a59f8fdc85
@ -884,7 +884,8 @@ struct server_context {
|
||||
|
||||
bool launch_slot_with_task(server_slot & slot, const server_task & task) {
|
||||
slot_params default_params;
|
||||
llama_sampling_params default_sparams;
|
||||
// Sampling parameter defaults are loaded from the global server context (but individual requests can still override them)
|
||||
llama_sampling_params default_sparams = params.sparams;
|
||||
auto & data = task.data;
|
||||
|
||||
if (data.count("__oaicompat") != 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user