mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
bfe76d4a17
* common : move arg parser to arg.cpp * better categorize args * add cmake * missing climits * missing cstdarg * common : more explicit includes * fix build * refactor gpt_params_parse * update server readme * fix test --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
53 lines
1.5 KiB
C++
53 lines
1.5 KiB
C++
#include "arg.h"
|
|
#include "common.h"
|
|
|
|
#include <fstream>
|
|
#include <string>
|
|
|
|
// Export usage message (-h) to markdown format
|
|
|
|
static void export_md(std::string fname, llama_example ex) {
|
|
std::ofstream file(fname, std::ofstream::out | std::ofstream::trunc);
|
|
|
|
gpt_params params;
|
|
auto ctx_arg = gpt_params_parser_init(params, ex);
|
|
|
|
file << "| Argument | Explanation |\n";
|
|
file << "| -------- | ----------- |\n";
|
|
for (auto & opt : ctx_arg.options) {
|
|
file << "| `";
|
|
// args
|
|
for (const auto & arg : opt.args) {
|
|
if (arg == opt.args.front()) {
|
|
file << arg;
|
|
if (opt.args.size() > 1) file << ", ";
|
|
} else {
|
|
file << arg << (arg != opt.args.back() ? ", " : "");
|
|
}
|
|
}
|
|
// value hint
|
|
if (opt.value_hint) {
|
|
std::string md_value_hint(opt.value_hint);
|
|
string_replace_all(md_value_hint, "|", "\\|");
|
|
file << " " << md_value_hint;
|
|
}
|
|
if (opt.value_hint_2) {
|
|
std::string md_value_hint_2(opt.value_hint_2);
|
|
string_replace_all(md_value_hint_2, "|", "\\|");
|
|
file << " " << md_value_hint_2;
|
|
}
|
|
// help text
|
|
std::string md_help(opt.help);
|
|
string_replace_all(md_help, "\n", "<br/>");
|
|
string_replace_all(md_help, "|", "\\|");
|
|
file << "` | " << md_help << " |\n";
|
|
}
|
|
}
|
|
|
|
int main(int, char **) {
|
|
export_md("autogen-main.md", LLAMA_EXAMPLE_MAIN);
|
|
export_md("autogen-server.md", LLAMA_EXAMPLE_SERVER);
|
|
|
|
return 0;
|
|
}
|