mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
simple : add README.md
This commit is contained in:
parent
5a3369d8e8
commit
8845160058
3
examples/parallel/README.md
Normal file
3
examples/parallel/README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# llama.cpp/example/parallel
|
||||||
|
|
||||||
|
Simplified simluation for serving incoming requests in parallel
|
67
examples/simple/README.md
Normal file
67
examples/simple/README.md
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
# llama.cpp/example/simple
|
||||||
|
|
||||||
|
The purpose of this example is to demonstrate a minimal usage of llama.cpp for generating text with a given prompt.
|
||||||
|
The example demonstrates single-batch as well as parallel generation.
|
||||||
|
|
||||||
|
## Single-batch generation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./simple ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" 1
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
main: n_len = 32, n_ctx = 2048, n_parallel = 1, n_kv_req = 32
|
||||||
|
|
||||||
|
Hello my name is Shawn and I'm a 20 year old male from the United States. I'm a 20 year old
|
||||||
|
|
||||||
|
main: decoded 27 tokens in 2.31 s, speed: 11.68 t/s
|
||||||
|
|
||||||
|
llama_print_timings: load time = 579.15 ms
|
||||||
|
llama_print_timings: sample time = 0.72 ms / 28 runs ( 0.03 ms per token, 38888.89 tokens per second)
|
||||||
|
llama_print_timings: prompt eval time = 655.63 ms / 10 tokens ( 65.56 ms per token, 15.25 tokens per second)
|
||||||
|
llama_print_timings: eval time = 2180.97 ms / 27 runs ( 80.78 ms per token, 12.38 tokens per second)
|
||||||
|
llama_print_timings: total time = 2891.13 ms
|
||||||
|
```
|
||||||
|
|
||||||
|
## Parallel generation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./simple ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" 4
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
main: n_len = 32, n_ctx = 2048, n_parallel = 4, n_kv_req = 113
|
||||||
|
|
||||||
|
Hello my name is
|
||||||
|
|
||||||
|
main: generating 4 sequences ...
|
||||||
|
|
||||||
|
main: stream 0 finished
|
||||||
|
main: stream 1 finished
|
||||||
|
main: stream 2 finished
|
||||||
|
main: stream 3 finished
|
||||||
|
|
||||||
|
sequence 0:
|
||||||
|
|
||||||
|
Hello my name is Shirley. I am a 25-year-old female who has been working for over 5 years as a b
|
||||||
|
|
||||||
|
sequence 1:
|
||||||
|
|
||||||
|
Hello my name is Renee and I'm a 32 year old female from the United States. I'm looking for a man between
|
||||||
|
|
||||||
|
sequence 2:
|
||||||
|
|
||||||
|
Hello my name is Diana. I am looking for a housekeeping job. I have experience with children and have my own transportation. I am
|
||||||
|
|
||||||
|
sequence 3:
|
||||||
|
|
||||||
|
Hello my name is Cody. I am a 3 year old neutered male. I am a very friendly cat. I am very playful and
|
||||||
|
|
||||||
|
main: decoded 108 tokens in 3.57 s, speed: 30.26 t/s
|
||||||
|
|
||||||
|
llama_print_timings: load time = 587.00 ms
|
||||||
|
llama_print_timings: sample time = 2.56 ms / 112 runs ( 0.02 ms per token, 43664.72 tokens per second)
|
||||||
|
llama_print_timings: prompt eval time = 4089.11 ms / 118 tokens ( 34.65 ms per token, 28.86 tokens per second)
|
||||||
|
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
||||||
|
llama_print_timings: total time = 4156.04 ms
|
||||||
|
```
|
36
llama.h
36
llama.h
@ -90,24 +90,24 @@ extern "C" {
|
|||||||
// model file types
|
// model file types
|
||||||
enum llama_ftype {
|
enum llama_ftype {
|
||||||
LLAMA_FTYPE_ALL_F32 = 0,
|
LLAMA_FTYPE_ALL_F32 = 0,
|
||||||
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||||
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
||||||
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
||||||
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
|
||||||
|
|
||||||
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user