2023-09-15 20:59:49 +00:00
# include "common.h"
2023-05-20 08:06:11 +00:00
# include "llama.h"
2023-03-10 18:40:58 +00:00
# include <cstdio>
2023-06-10 07:59:17 +00:00
# include <cstring>
2023-06-13 10:23:23 +00:00
# include <vector>
2023-03-10 18:40:58 +00:00
# include <string>
2024-01-14 07:45:56 +00:00
# include <unordered_map>
2023-03-21 17:21:50 +00:00
2023-06-13 10:23:23 +00:00
struct quant_option {
std : : string name ;
llama_ftype ftype ;
std : : string desc ;
2023-04-26 16:43:27 +00:00
} ;
2023-06-13 10:23:23 +00:00
static const std : : vector < struct quant_option > QUANT_OPTIONS = {
2024-07-16 07:00:30 +00:00
{ " Q4_0 " , LLAMA_FTYPE_MOSTLY_Q4_0 , " 4.34G, +0.4685 ppl @ Llama-3-8B " , } ,
{ " Q4_1 " , LLAMA_FTYPE_MOSTLY_Q4_1 , " 4.78G, +0.4511 ppl @ Llama-3-8B " , } ,
{ " Q5_0 " , LLAMA_FTYPE_MOSTLY_Q5_0 , " 5.21G, +0.1316 ppl @ Llama-3-8B " , } ,
{ " Q5_1 " , LLAMA_FTYPE_MOSTLY_Q5_1 , " 5.65G, +0.1062 ppl @ Llama-3-8B " , } ,
{ " IQ2_XXS " , LLAMA_FTYPE_MOSTLY_IQ2_XXS , " 2.06 bpw quantization " , } ,
{ " IQ2_XS " , LLAMA_FTYPE_MOSTLY_IQ2_XS , " 2.31 bpw quantization " , } ,
{ " IQ2_S " , LLAMA_FTYPE_MOSTLY_IQ2_S , " 2.5 bpw quantization " , } ,
{ " IQ2_M " , LLAMA_FTYPE_MOSTLY_IQ2_M , " 2.7 bpw quantization " , } ,
{ " IQ1_S " , LLAMA_FTYPE_MOSTLY_IQ1_S , " 1.56 bpw quantization " , } ,
{ " IQ1_M " , LLAMA_FTYPE_MOSTLY_IQ1_M , " 1.75 bpw quantization " , } ,
{ " Q2_K " , LLAMA_FTYPE_MOSTLY_Q2_K , " 2.96G, +3.5199 ppl @ Llama-3-8B " , } ,
{ " Q2_K_S " , LLAMA_FTYPE_MOSTLY_Q2_K_S , " 2.96G, +3.1836 ppl @ Llama-3-8B " , } ,
{ " IQ3_XXS " , LLAMA_FTYPE_MOSTLY_IQ3_XXS , " 3.06 bpw quantization " , } ,
{ " IQ3_S " , LLAMA_FTYPE_MOSTLY_IQ3_S , " 3.44 bpw quantization " , } ,
{ " IQ3_M " , LLAMA_FTYPE_MOSTLY_IQ3_M , " 3.66 bpw quantization mix " , } ,
{ " Q3_K " , LLAMA_FTYPE_MOSTLY_Q3_K_M , " alias for Q3_K_M " } ,
{ " IQ3_XS " , LLAMA_FTYPE_MOSTLY_IQ3_XS , " 3.3 bpw quantization " , } ,
{ " Q3_K_S " , LLAMA_FTYPE_MOSTLY_Q3_K_S , " 3.41G, +1.6321 ppl @ Llama-3-8B " , } ,
{ " Q3_K_M " , LLAMA_FTYPE_MOSTLY_Q3_K_M , " 3.74G, +0.6569 ppl @ Llama-3-8B " , } ,
{ " Q3_K_L " , LLAMA_FTYPE_MOSTLY_Q3_K_L , " 4.03G, +0.5562 ppl @ Llama-3-8B " , } ,
{ " IQ4_NL " , LLAMA_FTYPE_MOSTLY_IQ4_NL , " 4.50 bpw non-linear quantization " , } ,
{ " IQ4_XS " , LLAMA_FTYPE_MOSTLY_IQ4_XS , " 4.25 bpw non-linear quantization " , } ,
{ " Q4_K " , LLAMA_FTYPE_MOSTLY_Q4_K_M , " alias for Q4_K_M " , } ,
{ " Q4_K_S " , LLAMA_FTYPE_MOSTLY_Q4_K_S , " 4.37G, +0.2689 ppl @ Llama-3-8B " , } ,
{ " Q4_K_M " , LLAMA_FTYPE_MOSTLY_Q4_K_M , " 4.58G, +0.1754 ppl @ Llama-3-8B " , } ,
{ " Q5_K " , LLAMA_FTYPE_MOSTLY_Q5_K_M , " alias for Q5_K_M " , } ,
{ " Q5_K_S " , LLAMA_FTYPE_MOSTLY_Q5_K_S , " 5.21G, +0.1049 ppl @ Llama-3-8B " , } ,
{ " Q5_K_M " , LLAMA_FTYPE_MOSTLY_Q5_K_M , " 5.33G, +0.0569 ppl @ Llama-3-8B " , } ,
{ " Q6_K " , LLAMA_FTYPE_MOSTLY_Q6_K , " 6.14G, +0.0217 ppl @ Llama-3-8B " , } ,
{ " Q8_0 " , LLAMA_FTYPE_MOSTLY_Q8_0 , " 7.96G, +0.0026 ppl @ Llama-3-8B " , } ,
{ " Q4_0_4_4 " , LLAMA_FTYPE_MOSTLY_Q4_0_4_4 , " 4.34G, +0.4685 ppl @ Llama-3-8B " , } ,
{ " Q4_0_4_8 " , LLAMA_FTYPE_MOSTLY_Q4_0_4_8 , " 4.34G, +0.4685 ppl @ Llama-3-8B " , } ,
{ " Q4_0_8_8 " , LLAMA_FTYPE_MOSTLY_Q4_0_8_8 , " 4.34G, +0.4685 ppl @ Llama-3-8B " , } ,
{ " F16 " , LLAMA_FTYPE_MOSTLY_F16 , " 14.00G, +0.0020 ppl @ Mistral-7B " , } ,
{ " BF16 " , LLAMA_FTYPE_MOSTLY_BF16 , " 14.00G, -0.0050 ppl @ Mistral-7B " , } ,
{ " F32 " , LLAMA_FTYPE_ALL_F32 , " 26.00G @ 7B " , } ,
2023-09-01 14:02:48 +00:00
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
2024-07-16 07:00:30 +00:00
{ " COPY " , LLAMA_FTYPE_ALL_F32 , " only copy tensors, no quantizing " , } ,
2023-06-13 10:23:23 +00:00
} ;
2024-04-26 18:06:33 +00:00
static const char * const LLM_KV_QUANTIZE_IMATRIX_FILE = " quantize.imatrix.file " ;
static const char * const LLM_KV_QUANTIZE_IMATRIX_DATASET = " quantize.imatrix.dataset " ;
static const char * const LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES = " quantize.imatrix.entries_count " ;
static const char * const LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS = " quantize.imatrix.chunks_count " ;
2023-06-13 10:23:23 +00:00
2024-09-06 21:17:25 +00:00
// TODO: share with imatrix.cpp
static const char * const LLM_KV_IMATRIX_DATASET = " imatrix.dataset " ;
static const char * const LLM_KV_IMATRIX_CHUNK_COUNT = " imatrix.chunk_count " ;
static const char * const LLM_KV_IMATRIX_CHUNK_SIZE = " imatrix.chunk_size " ;
2023-09-15 19:38:27 +00:00
static bool try_parse_ftype ( const std : : string & ftype_str_in , llama_ftype & ftype , std : : string & ftype_str_out ) {
2023-06-13 10:23:23 +00:00
std : : string ftype_str ;
for ( auto ch : ftype_str_in ) {
ftype_str . push_back ( std : : toupper ( ch ) ) ;
}
for ( auto & it : QUANT_OPTIONS ) {
if ( it . name = = ftype_str ) {
ftype = it . ftype ;
ftype_str_out = it . name ;
return true ;
}
2023-05-04 22:58:56 +00:00
}
try {
int ftype_int = std : : stoi ( ftype_str ) ;
2023-06-13 10:23:23 +00:00
for ( auto & it : QUANT_OPTIONS ) {
if ( it . ftype = = ftype_int ) {
ftype = it . ftype ;
ftype_str_out = it . name ;
2023-05-04 22:58:56 +00:00
return true ;
}
}
}
catch ( . . . ) {
// stoi failed
}
return false ;
}
2023-03-10 18:40:58 +00:00
// usage:
2024-08-06 23:43:00 +00:00
// ./llama-quantize [--allow-requantize] [--leave-output-tensor] [--pure] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads]
2023-03-10 18:40:58 +00:00
//
2023-09-28 21:41:44 +00:00
[[noreturn]]
2023-09-15 19:38:27 +00:00
static void usage ( const char * executable ) {
2024-03-26 12:09:30 +00:00
printf ( " usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads] \n \n " , executable ) ;
2023-09-01 14:02:48 +00:00
printf ( " --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit \n " ) ;
printf ( " --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing \n " ) ;
2023-10-29 16:32:28 +00:00
printf ( " --pure: Disable k-quant mixtures and quantize all tensors to the same type \n " ) ;
2024-01-14 14:21:12 +00:00
printf ( " --imatrix file_name: use data in file_name as importance matrix for quant optimizations \n " ) ;
2024-01-14 07:45:56 +00:00
printf ( " --include-weights tensor_name: use importance matrix for this/these tensor(s) \n " ) ;
printf ( " --exclude-weights tensor_name: use importance matrix for this/these tensor(s) \n " ) ;
2024-03-26 12:09:30 +00:00
printf ( " --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor \n " ) ;
printf ( " --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor \n " ) ;
2024-04-25 10:29:35 +00:00
printf ( " --keep-split: will generate quatized model in the same shards as input " ) ;
2024-03-26 12:09:30 +00:00
printf ( " --override-kv KEY=TYPE:VALUE \n " ) ;
printf ( " Advanced option to override model metadata by key in the quantized model. May be specified multiple times. \n " ) ;
2024-01-14 07:45:56 +00:00
printf ( " Note: --include-weights and --exclude-weights cannot be used together \n " ) ;
2023-09-01 14:02:48 +00:00
printf ( " \n Allowed quantization types: \n " ) ;
2023-06-13 10:23:23 +00:00
for ( auto & it : QUANT_OPTIONS ) {
2023-09-01 14:02:48 +00:00
if ( it . name ! = " COPY " ) {
printf ( " %2d or " , it . ftype ) ;
} else {
printf ( " " ) ;
}
2024-01-14 07:45:56 +00:00
printf ( " %-7s : %s \n " , it . name . c_str ( ) , it . desc . c_str ( ) ) ;
2023-06-10 07:59:17 +00:00
}
exit ( 1 ) ;
}
2024-04-26 18:06:33 +00:00
static int load_imatrix ( const std : : string & imatrix_file , std : : string & imatrix_dataset , std : : unordered_map < std : : string , std : : vector < float > > & imatrix_data ) {
2024-09-06 21:17:25 +00:00
struct ggml_context * ctx = nullptr ;
struct gguf_init_params meta_gguf_params = {
/* .no_alloc = */ false , // the data is needed
/* .ctx = */ & ctx ,
} ;
struct gguf_context * ctx_gguf = gguf_init_from_file ( imatrix_file . c_str ( ) , meta_gguf_params ) ;
if ( ! ctx_gguf ) {
2024-04-03 13:07:05 +00:00
exit ( 1 ) ;
2024-01-14 07:45:56 +00:00
}
2024-09-06 21:17:25 +00:00
const int32_t n_entries = gguf_get_n_tensors ( ctx_gguf ) ;
if ( n_entries < 2 ) {
fprintf ( stderr , " %s: no data in file %s \n " , __func__ , imatrix_file . c_str ( ) ) ;
gguf_free ( ctx_gguf ) ;
ggml_free ( ctx ) ;
2024-04-03 13:07:05 +00:00
exit ( 1 ) ;
2024-01-14 07:45:56 +00:00
}
2024-09-06 21:17:25 +00:00
const int dataset_idx = gguf_find_key ( ctx_gguf , LLM_KV_IMATRIX_DATASET ) ;
const int chunk_count_idx = gguf_find_key ( ctx_gguf , LLM_KV_IMATRIX_CHUNK_COUNT ) ;
const int chunk_size_idx = gguf_find_key ( ctx_gguf , LLM_KV_IMATRIX_CHUNK_SIZE ) ;
if ( dataset_idx < 0 | | chunk_count_idx < 0 | | chunk_size_idx < 0 ) {
fprintf ( stderr , " %s: missing imatrix metadata in file %s \n " , __func__ , imatrix_file . c_str ( ) ) ;
gguf_free ( ctx_gguf ) ;
ggml_free ( ctx ) ;
exit ( 1 ) ;
}
const uint32_t chunk_size = gguf_get_val_u32 ( ctx_gguf , chunk_size_idx ) ;
const std : : string sums_suffix { " .sums " } ;
const std : : string counts_suffix { " .counts " } ;
// TODO: allow loading from mis-ordered imatrix files
for ( int32_t i = 0 ; i < n_entries - 1 ; i + = 2 ) {
std : : string sums_name { gguf_get_tensor_name ( ctx_gguf , i + 0 ) } ;
std : : string counts_name { gguf_get_tensor_name ( ctx_gguf , i + 1 ) } ;
if ( sums_name . size ( ) < sums_suffix . size ( ) | |
counts_name . size ( ) < counts_suffix . size ( ) | |
! std : : equal ( sums_name . begin ( ) , sums_name . end ( ) - sums_suffix . size ( ) , counts_name . begin ( ) ) | |
! std : : equal ( sums_suffix . rbegin ( ) , sums_suffix . rend ( ) , sums_name . rbegin ( ) ) | |
! std : : equal ( counts_suffix . rbegin ( ) , counts_suffix . rend ( ) , counts_name . rbegin ( ) ) ) {
fprintf ( stderr , " %s: mismatched sums and counts for entry %d \n " , __func__ , i / 2 ) ;
gguf_free ( ctx_gguf ) ;
ggml_free ( ctx ) ;
2024-04-03 13:07:05 +00:00
exit ( 1 ) ;
2024-01-14 07:45:56 +00:00
}
2024-09-06 21:17:25 +00:00
struct ggml_tensor * sums = ggml_get_tensor ( ctx , sums_name . c_str ( ) ) ;
struct ggml_tensor * counts = ggml_get_tensor ( ctx , counts_name . c_str ( ) ) ;
if ( ! sums | | ! counts ) {
fprintf ( stderr , " %s: failed reading data for entry %d \n " , __func__ , i / 2 ) ;
gguf_free ( ctx_gguf ) ;
ggml_free ( ctx ) ;
2024-04-03 13:07:05 +00:00
exit ( 1 ) ;
2024-01-14 07:45:56 +00:00
}
2024-04-03 13:07:05 +00:00
2024-09-06 21:17:25 +00:00
const int64_t ne0 = sums - > ne [ 0 ] ;
const int64_t ne1 = sums - > ne [ 1 ] ;
std : : string name = sums_name . substr ( 0 , sums_name . size ( ) - sums_suffix . size ( ) ) ;
auto & e = imatrix_data [ name ] ;
e . resize ( ggml_nelements ( sums ) ) ;
float max_count = 0.0f ;
for ( int64_t j = 0 ; j < ne1 ; + + j ) {
const float count = ( ( const float * ) counts - > data ) [ ne1 ] ;
for ( int64_t i = 0 ; i < ne0 ; + + i ) {
e [ ne1 * ne0 + ne0 ] = ( ( const float * ) sums - > data ) [ ne1 * ne0 + ne0 ] / count ;
}
if ( count > max_count ) {
max_count = count ;
}
}
2024-04-03 13:07:05 +00:00
if ( getenv ( " LLAMA_TRACE " ) ) {
2024-09-06 21:17:25 +00:00
printf ( " %s: loaded data (size = %6d, ncall = %6d) for '%s' \n " , __func__ , int ( e . size ( ) ) , int ( max_count / chunk_size ) , name . c_str ( ) ) ;
2024-01-14 07:45:56 +00:00
}
}
2024-09-06 21:17:25 +00:00
gguf_free ( ctx_gguf ) ;
ggml_free ( ctx ) ;
2024-04-26 18:06:33 +00:00
2024-09-06 21:17:25 +00:00
int m_last_chunk = gguf_get_val_u32 ( ctx_gguf , chunk_count_idx ) ;
imatrix_dataset = gguf_get_val_str ( ctx_gguf , dataset_idx ) ;
printf ( " %s: imatrix dataset='%s' \n " , __func__ , imatrix_dataset . c_str ( ) ) ;
printf ( " %s: loaded %d importance matrix entries from %s computed on %d chunks \n " , __func__ , int ( imatrix_data . size ( ) ) , imatrix_file . c_str ( ) , m_last_chunk ) ;
return m_last_chunk ;
2024-01-14 07:45:56 +00:00
}
2024-04-26 18:06:33 +00:00
static int prepare_imatrix ( const std : : string & imatrix_file ,
std : : string & imatrix_dataset ,
2024-03-26 12:09:30 +00:00
const std : : vector < std : : string > & included_weights ,
const std : : vector < std : : string > & excluded_weights ,
std : : unordered_map < std : : string , std : : vector < float > > & imatrix_data ) {
2024-04-26 18:06:33 +00:00
int m_last_call = - 1 ;
2024-01-14 07:45:56 +00:00
if ( ! imatrix_file . empty ( ) ) {
2024-04-26 18:06:33 +00:00
m_last_call = load_imatrix ( imatrix_file , imatrix_dataset , imatrix_data ) ;
2024-01-14 07:45:56 +00:00
}
if ( imatrix_data . empty ( ) ) {
2024-04-26 18:06:33 +00:00
return m_last_call ;
2024-01-14 07:45:56 +00:00
}
if ( ! excluded_weights . empty ( ) ) {
for ( auto & name : excluded_weights ) {
for ( auto it = imatrix_data . begin ( ) ; it ! = imatrix_data . end ( ) ; ) {
auto pos = it - > first . find ( name ) ;
if ( pos ! = std : : string : : npos ) it = imatrix_data . erase ( it ) ;
else + + it ;
}
}
}
if ( ! included_weights . empty ( ) ) {
std : : unordered_map < std : : string , std : : vector < float > > tmp ;
for ( auto & name : included_weights ) {
for ( auto & e : imatrix_data ) {
auto pos = e . first . find ( name ) ;
if ( pos ! = std : : string : : npos ) {
tmp . emplace ( std : : move ( e ) ) ;
}
}
}
imatrix_data = std : : move ( tmp ) ;
}
if ( ! imatrix_data . empty ( ) ) {
printf ( " %s: have %d importance matrix entries \n " , __func__ , int ( imatrix_data . size ( ) ) ) ;
}
2024-04-26 18:06:33 +00:00
return m_last_call ;
2024-01-14 07:45:56 +00:00
}
2024-03-22 18:47:14 +00:00
static ggml_type parse_ggml_type ( const char * arg ) {
ggml_type result = GGML_TYPE_COUNT ;
for ( int j = 0 ; j < GGML_TYPE_COUNT ; + + j ) {
auto type = ggml_type ( j ) ;
const auto * name = ggml_type_name ( type ) ;
if ( name & & strcmp ( arg , name ) = = 0 ) {
result = type ; break ;
}
}
return result ;
}
2023-03-10 18:40:58 +00:00
int main ( int argc , char * * argv ) {
2023-05-04 22:58:56 +00:00
if ( argc < 3 ) {
2023-06-10 07:59:17 +00:00
usage ( argv [ 0 ] ) ;
}
llama_model_quantize_params params = llama_model_quantize_default_params ( ) ;
int arg_idx = 1 ;
2024-01-14 07:45:56 +00:00
std : : string imatrix_file ;
std : : vector < std : : string > included_weights , excluded_weights ;
2024-03-26 12:09:30 +00:00
std : : vector < llama_model_kv_override > kv_overrides ;
2023-06-10 07:59:17 +00:00
for ( ; arg_idx < argc & & strncmp ( argv [ arg_idx ] , " -- " , 2 ) = = 0 ; arg_idx + + ) {
if ( strcmp ( argv [ arg_idx ] , " --leave-output-tensor " ) = = 0 ) {
params . quantize_output_tensor = false ;
2024-03-22 18:47:14 +00:00
} else if ( strcmp ( argv [ arg_idx ] , " --output-tensor-type " ) = = 0 ) {
if ( arg_idx < argc - 1 ) {
params . output_tensor_type = parse_ggml_type ( argv [ + + arg_idx ] ) ;
} else {
usage ( argv [ 0 ] ) ;
}
} else if ( strcmp ( argv [ arg_idx ] , " --token-embedding-type " ) = = 0 ) {
if ( arg_idx < argc - 1 ) {
params . token_embedding_type = parse_ggml_type ( argv [ + + arg_idx ] ) ;
} else {
usage ( argv [ 0 ] ) ;
}
2024-03-26 12:09:30 +00:00
} else if ( strcmp ( argv [ arg_idx ] , " --override-kv " ) = = 0 ) {
2024-05-22 17:04:20 +00:00
if ( arg_idx = = argc - 1 | | ! string_parse_kv_override ( argv [ + + arg_idx ] , kv_overrides ) ) {
2024-03-26 12:09:30 +00:00
usage ( argv [ 0 ] ) ;
}
2023-06-10 07:59:17 +00:00
} else if ( strcmp ( argv [ arg_idx ] , " --allow-requantize " ) = = 0 ) {
params . allow_requantize = true ;
2023-10-29 16:32:28 +00:00
} else if ( strcmp ( argv [ arg_idx ] , " --pure " ) = = 0 ) {
params . pure = true ;
2024-01-14 07:45:56 +00:00
} else if ( strcmp ( argv [ arg_idx ] , " --imatrix " ) = = 0 ) {
if ( arg_idx < argc - 1 ) {
imatrix_file = argv [ + + arg_idx ] ;
} else {
usage ( argv [ 0 ] ) ;
}
} else if ( strcmp ( argv [ arg_idx ] , " --include-weights " ) = = 0 ) {
if ( arg_idx < argc - 1 ) {
2024-02-03 11:23:37 +00:00
included_weights . emplace_back ( argv [ + + arg_idx ] ) ;
2024-01-14 07:45:56 +00:00
} else {
usage ( argv [ 0 ] ) ;
}
} else if ( strcmp ( argv [ arg_idx ] , " --exclude-weights " ) = = 0 ) {
if ( arg_idx < argc - 1 ) {
2024-02-03 11:23:37 +00:00
excluded_weights . emplace_back ( argv [ + + arg_idx ] ) ;
2024-01-14 07:45:56 +00:00
} else {
usage ( argv [ 0 ] ) ;
}
2024-05-19 16:37:04 +00:00
} else if ( strcmp ( argv [ arg_idx ] , " --keep-split " ) = = 0 ) {
2024-04-25 10:29:35 +00:00
params . keep_split = true ;
2023-06-10 07:59:17 +00:00
} else {
usage ( argv [ 0 ] ) ;
2023-04-26 16:43:27 +00:00
}
2023-06-10 07:59:17 +00:00
}
2023-08-28 06:32:25 +00:00
if ( argc - arg_idx < 2 ) {
2024-01-14 07:45:56 +00:00
printf ( " %s: bad arguments \n " , argv [ 0 ] ) ;
usage ( argv [ 0 ] ) ;
}
if ( ! included_weights . empty ( ) & & ! excluded_weights . empty ( ) ) {
2023-06-10 07:59:17 +00:00
usage ( argv [ 0 ] ) ;
2023-03-10 18:40:58 +00:00
}
2024-04-26 18:06:33 +00:00
std : : string imatrix_dataset ;
2024-01-14 07:45:56 +00:00
std : : unordered_map < std : : string , std : : vector < float > > imatrix_data ;
2024-04-26 18:06:33 +00:00
int m_last_call = prepare_imatrix ( imatrix_file , imatrix_dataset , included_weights , excluded_weights , imatrix_data ) ;
2024-01-14 07:45:56 +00:00
if ( ! imatrix_data . empty ( ) ) {
params . imatrix = & imatrix_data ;
2024-04-26 18:06:33 +00:00
{
llama_model_kv_override kvo ;
std : : strcpy ( kvo . key , LLM_KV_QUANTIZE_IMATRIX_FILE ) ;
kvo . tag = LLAMA_KV_OVERRIDE_TYPE_STR ;
strncpy ( kvo . val_str , imatrix_file . c_str ( ) , 127 ) ;
kvo . val_str [ 127 ] = ' \0 ' ;
kv_overrides . emplace_back ( std : : move ( kvo ) ) ;
}
if ( ! imatrix_dataset . empty ( ) ) {
llama_model_kv_override kvo ;
std : : strcpy ( kvo . key , LLM_KV_QUANTIZE_IMATRIX_DATASET ) ;
kvo . tag = LLAMA_KV_OVERRIDE_TYPE_STR ;
strncpy ( kvo . val_str , imatrix_dataset . c_str ( ) , 127 ) ;
kvo . val_str [ 127 ] = ' \0 ' ;
kv_overrides . emplace_back ( std : : move ( kvo ) ) ;
}
{
llama_model_kv_override kvo ;
std : : strcpy ( kvo . key , LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES ) ;
kvo . tag = LLAMA_KV_OVERRIDE_TYPE_INT ;
kvo . val_i64 = imatrix_data . size ( ) ;
kv_overrides . emplace_back ( std : : move ( kvo ) ) ;
}
if ( m_last_call > 0 ) {
llama_model_kv_override kvo ;
std : : strcpy ( kvo . key , LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS ) ;
kvo . tag = LLAMA_KV_OVERRIDE_TYPE_INT ;
kvo . val_i64 = m_last_call ;
kv_overrides . emplace_back ( std : : move ( kvo ) ) ;
}
2024-01-14 07:45:56 +00:00
}
2024-03-26 12:09:30 +00:00
if ( ! kv_overrides . empty ( ) ) {
kv_overrides . emplace_back ( ) ;
kv_overrides . back ( ) . key [ 0 ] = 0 ;
params . kv_overrides = & kv_overrides ;
}
2024-01-14 07:45:56 +00:00
2024-02-16 09:31:07 +00:00
llama_backend_init ( ) ;
2023-03-11 15:40:14 +00:00
2023-05-04 22:58:56 +00:00
// parse command line arguments
2023-06-10 07:59:17 +00:00
const std : : string fname_inp = argv [ arg_idx ] ;
arg_idx + + ;
2023-05-04 22:58:56 +00:00
std : : string fname_out ;
std : : string ftype_str ;
2024-04-25 10:29:35 +00:00
std : : string suffix = " .gguf " ;
2023-06-10 07:59:17 +00:00
if ( try_parse_ftype ( argv [ arg_idx ] , params . ftype , ftype_str ) ) {
2023-05-04 22:58:56 +00:00
std : : string fpath ;
2023-08-28 06:32:25 +00:00
const size_t pos = fname_inp . find_last_of ( " / \\ " ) ;
2023-05-04 22:58:56 +00:00
if ( pos ! = std : : string : : npos ) {
fpath = fname_inp . substr ( 0 , pos + 1 ) ;
}
2024-04-25 10:29:35 +00:00
// export as [inp path]/ggml-model-[ftype]. Only add extension if there is no splitting
fname_out = fpath + " ggml-model- " + ftype_str ;
if ( ! params . keep_split ) {
fname_out + = suffix ;
}
2023-05-04 22:58:56 +00:00
arg_idx + + ;
2023-09-01 14:02:48 +00:00
if ( ftype_str = = " COPY " ) {
params . only_copy = true ;
}
2024-03-26 12:09:30 +00:00
} else {
2023-05-04 22:58:56 +00:00
fname_out = argv [ arg_idx ] ;
2024-04-25 10:29:35 +00:00
if ( params . keep_split & & fname_out . find ( suffix ) ! = std : : string : : npos ) {
fname_out = fname_out . substr ( 0 , fname_out . length ( ) - suffix . length ( ) ) ;
}
2023-05-04 22:58:56 +00:00
arg_idx + + ;
2023-03-10 18:40:58 +00:00
2023-05-04 22:58:56 +00:00
if ( argc < = arg_idx ) {
fprintf ( stderr , " %s: missing ftype \n " , __func__ ) ;
return 1 ;
}
2023-06-10 07:59:17 +00:00
if ( ! try_parse_ftype ( argv [ arg_idx ] , params . ftype , ftype_str ) ) {
2023-05-04 22:58:56 +00:00
fprintf ( stderr , " %s: invalid ftype '%s' \n " , __func__ , argv [ 3 ] ) ;
return 1 ;
2023-09-07 17:22:29 +00:00
}
if ( ftype_str = = " COPY " ) {
params . only_copy = true ;
2023-05-04 22:58:56 +00:00
}
arg_idx + + ;
}
// parse nthreads
if ( argc > arg_idx ) {
try {
2023-06-10 07:59:17 +00:00
params . nthread = std : : stoi ( argv [ arg_idx ] ) ;
2023-05-04 22:58:56 +00:00
}
catch ( const std : : exception & e ) {
fprintf ( stderr , " %s: invalid nthread '%s' (%s) \n " , __func__ , argv [ arg_idx ] , e . what ( ) ) ;
2023-04-26 16:43:27 +00:00
return 1 ;
}
}
2024-02-18 16:16:55 +00:00
if ( ( params . ftype = = LLAMA_FTYPE_MOSTLY_IQ2_XS | | params . ftype = = LLAMA_FTYPE_MOSTLY_IQ2_XXS | |
2024-02-26 16:28:38 +00:00
params . ftype = = LLAMA_FTYPE_MOSTLY_IQ2_S | |
2024-03-26 14:21:27 +00:00
params . ftype = = LLAMA_FTYPE_MOSTLY_Q2_K_S | |
params . ftype = = LLAMA_FTYPE_MOSTLY_IQ1_S | |
params . ftype = = LLAMA_FTYPE_MOSTLY_IQ1_M ) & & imatrix_data . empty ( ) ) {
fprintf ( stderr , " \n ========================================================================================================== \n " ) ;
fprintf ( stderr , " Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix \n " ) ;
fprintf ( stderr , " ========================================================================================================== \n \n \n " ) ;
2024-01-14 07:45:56 +00:00
return 1 ;
}
2023-09-15 20:59:49 +00:00
print_build_info ( ) ;
2023-05-01 16:23:47 +00:00
2023-05-04 22:58:56 +00:00
fprintf ( stderr , " %s: quantizing '%s' to '%s' as %s " , __func__ , fname_inp . c_str ( ) , fname_out . c_str ( ) , ftype_str . c_str ( ) ) ;
2023-06-10 07:59:17 +00:00
if ( params . nthread > 0 ) {
fprintf ( stderr , " using %d threads " , params . nthread ) ;
2023-05-04 22:58:56 +00:00
}
fprintf ( stderr , " \n " ) ;
2023-03-10 18:40:58 +00:00
2023-05-20 08:06:11 +00:00
const int64_t t_main_start_us = llama_time_us ( ) ;
2023-03-10 18:40:58 +00:00
int64_t t_quantize_us = 0 ;
// load the model
{
2023-05-20 08:06:11 +00:00
const int64_t t_start_us = llama_time_us ( ) ;
2023-03-10 18:40:58 +00:00
2023-06-10 07:59:17 +00:00
if ( llama_model_quantize ( fname_inp . c_str ( ) , fname_out . c_str ( ) , & params ) ) {
2023-03-10 18:40:58 +00:00
fprintf ( stderr , " %s: failed to quantize model from '%s' \n " , __func__ , fname_inp . c_str ( ) ) ;
return 1 ;
}
2023-05-20 08:06:11 +00:00
t_quantize_us = llama_time_us ( ) - t_start_us ;
2023-03-10 18:40:58 +00:00
}
// report timing
{
2023-05-20 08:06:11 +00:00
const int64_t t_main_end_us = llama_time_us ( ) ;
2023-03-10 18:40:58 +00:00
printf ( " \n " ) ;
2023-03-28 16:48:20 +00:00
printf ( " %s: quantize time = %8.2f ms \n " , __func__ , t_quantize_us / 1000.0 ) ;
printf ( " %s: total time = %8.2f ms \n " , __func__ , ( t_main_end_us - t_main_start_us ) / 1000.0 ) ;
2023-03-10 18:40:58 +00:00
}
2023-07-10 15:49:56 +00:00
llama_backend_free ( ) ;
2023-03-10 18:40:58 +00:00
return 0 ;
}