mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 10:54:36 +00:00
7eee341bee
Some checks are pending
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-cuda.Dockerfile platforms:linux/amd64 tag:full-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-musa.Dockerfile platforms:linux/amd64 tag:full-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full.Dockerfile platforms:linux/amd64,linux/arm64 tag:full]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-cuda.Dockerfile platforms:linux/amd64 tag:light-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-intel.Dockerfile platforms:linux/amd64 tag:light-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-musa.Dockerfile platforms:linux/amd64 tag:light-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli.Dockerfile platforms:linux/amd64,linux/arm64 tag:light]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-cuda.Dockerfile platforms:linux/amd64 tag:server-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-intel.Dockerfile platforms:linux/amd64 tag:server-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-musa.Dockerfile platforms:linux/amd64 tag:server-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server.Dockerfile platforms:linux/amd64,linux/arm64 tag:server]) (push) Waiting to run
Nix CI / nix-eval (macos-latest) (push) Waiting to run
Nix CI / nix-eval (ubuntu-latest) (push) Waiting to run
Nix CI / nix-build (macos-latest) (push) Waiting to run
Nix CI / nix-build (ubuntu-latest) (push) Waiting to run
flake8 Lint / Lint (push) Waiting to run
* common : use common_ prefix for common library functions --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
48 lines
1.4 KiB
C++
48 lines
1.4 KiB
C++
#include "ggml.h"
|
|
#include "llama.h"
|
|
#include "common.h"
|
|
#include "ngram-cache.h"
|
|
|
|
#include <cstdint>
|
|
#include <cstdio>
|
|
#include <fstream>
|
|
#include <iostream>
|
|
#include <string>
|
|
#include <unordered_map>
|
|
#include <vector>
|
|
|
|
static void print_usage(char* argv0) {
|
|
fprintf(stderr, "Merges multiple lookup cache files into a single one.\n");
|
|
fprintf(stderr, "Usage: %s [--help] lookup_part_1.bin lookup_part_2.bin ... lookup_merged.bin\n", argv0);
|
|
}
|
|
|
|
int main(int argc, char ** argv){
|
|
if (argc < 3) {
|
|
print_usage(argv[0]);
|
|
exit(1);
|
|
}
|
|
|
|
std::vector<std::string> args;
|
|
args.resize(argc-1);
|
|
for (int i = 0; i < argc-1; ++i) {
|
|
args[i] = argv[i+1];
|
|
if (args[i] == "-h" || args[i] == "--help") {
|
|
print_usage(argv[0]);
|
|
exit(0);
|
|
}
|
|
}
|
|
|
|
fprintf(stderr, "lookup-merge: loading file %s\n", args[0].c_str());
|
|
common_ngram_cache ngram_cache_merged = common_ngram_cache_load(args[0]);
|
|
|
|
for (size_t i = 1; i < args.size()-1; ++i) {
|
|
fprintf(stderr, "lookup-merge: loading file %s\n", args[i].c_str());
|
|
common_ngram_cache ngram_cache = common_ngram_cache_load(args[i]);
|
|
|
|
common_ngram_cache_merge(ngram_cache_merged, ngram_cache);
|
|
}
|
|
|
|
fprintf(stderr, "lookup-merge: saving file %s\n", args.back().c_str());
|
|
common_ngram_cache_save(ngram_cache_merged, args.back());
|
|
}
|