mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
ggml: new optimization interface (ggml/988)
This commit is contained in:
parent
5c9a8b22b1
commit
8a43e940ab
@ -228,6 +228,7 @@ set(GGML_PUBLIC_HEADERS
|
|||||||
include/ggml-cann.h
|
include/ggml-cann.h
|
||||||
include/ggml-cuda.h
|
include/ggml-cuda.h
|
||||||
include/ggml-kompute.h
|
include/ggml-kompute.h
|
||||||
|
include/ggml-opt.h
|
||||||
include/ggml-metal.h
|
include/ggml-metal.h
|
||||||
include/ggml-rpc.h
|
include/ggml-rpc.h
|
||||||
include/ggml-sycl.h
|
include/ggml-sycl.h
|
||||||
|
@ -86,7 +86,7 @@ extern "C" {
|
|||||||
GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
|
|
||||||
// "offset" refers to the offset of the tensor data for setting/getting data
|
// "offset" refers to the offset in tensor->data for setting/getting data
|
||||||
GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
GGML_API void ggml_backend_tensor_memset( struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
|
GGML_API void ggml_backend_tensor_memset( struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
|
||||||
@ -242,14 +242,20 @@ extern "C" {
|
|||||||
ggml_backend_sched_reserve(sched, reserve_graph);
|
ggml_backend_sched_reserve(sched, reserve_graph);
|
||||||
|
|
||||||
// compute
|
// compute
|
||||||
graph = build_graph(sched);
|
graph = build_graph(sched); // the graph and its tensors are single-use in terms of allocation, multi-use in terms of computation
|
||||||
ggml_backend_sched_graph_compute(sched, graph);
|
for (int i = 0; i < 10; ++i) {
|
||||||
|
ggml_backend_sched_graph_compute(sched, graph); // on the first iteration the graph is allocated automatically
|
||||||
|
}
|
||||||
|
|
||||||
// if there are graph inputs:
|
// if there are graph inputs:
|
||||||
ggml_backend_sched_reset(sched);
|
graph = build_graph(sched); // get a new graph that is not allocated (the metadata for the old graph is freed once ggml_free is called)
|
||||||
ggml_backend_sched_alloc_graph(sched, graph);
|
ggml_backend_sched_reset(sched); // clear the allocation of the previous graph
|
||||||
ggml_backend_tensor_set(input_tensor, ...);
|
ggml_backend_sched_alloc_graph(sched, graph); // explicitly allocate the new graph but do not execute it
|
||||||
ggml_backend_sched_graph_compute(sched, graph);
|
ggml_backend_tensor_set(input_tensor, ...); // copy data to the newly allocated graph tensors
|
||||||
|
ggml_backend_sched_graph_compute(sched, graph); // execute the graph
|
||||||
|
|
||||||
|
// as an alternative to the above it is also possible to assign the inputs to a dedicated context and
|
||||||
|
// allocate them statically via ggml_backend_alloc_ctx_tensors
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -264,7 +270,7 @@ extern "C" {
|
|||||||
//
|
//
|
||||||
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
||||||
|
|
||||||
// Initialize a backend scheduler
|
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
|
||||||
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
||||||
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
@ -289,7 +295,9 @@ extern "C" {
|
|||||||
GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||||
GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
|
GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
// Reset all assignments and allocators - must be called before changing the node backends
|
// Reset all assignments and allocators - must be called before changing the node backends or allocating a new graph.
|
||||||
|
// This in effect deallocates all tensors that were previously allocated and leaves them with dangling pointers.
|
||||||
|
// The correct way to use this API is to discard the deallocated tensors and create new ones.
|
||||||
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
|
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
// Set a callback to be called for each resulting node during graph compute
|
// Set a callback to be called for each resulting node during graph compute
|
||||||
|
216
ggml/include/ggml-opt.h
Normal file
216
ggml/include/ggml-opt.h
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
// This file contains functionality for training models using GGML.
|
||||||
|
// It is not strictly needed vs. just vanilla GGML but it provides a more high-level interface for common needs such as datasets.
|
||||||
|
// At the bottom of this file especially there are relatively high-level functions that are suitable use or adaptation in user code.
|
||||||
|
//
|
||||||
|
// Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de)
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct ggml_opt_dataset;
|
||||||
|
struct ggml_opt_context;
|
||||||
|
struct ggml_opt_result;
|
||||||
|
|
||||||
|
typedef struct ggml_opt_dataset * ggml_opt_dataset_t;
|
||||||
|
typedef struct ggml_opt_context * ggml_opt_context_t;
|
||||||
|
typedef struct ggml_opt_result * ggml_opt_result_t;
|
||||||
|
|
||||||
|
// ====== Loss ======
|
||||||
|
|
||||||
|
// built-in loss types, i.e. the built-in quantities minimized by the optimizer
|
||||||
|
// custom loss types can be defined via mean or sum which simply reduce the outputs for all datapoints to a single value
|
||||||
|
enum ggml_opt_loss_type {
|
||||||
|
GGML_OPT_LOSS_TYPE_MEAN,
|
||||||
|
GGML_OPT_LOSS_TYPE_SUM,
|
||||||
|
GGML_OPT_LOSS_TYPE_CROSS_ENTROPY,
|
||||||
|
GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR,
|
||||||
|
};
|
||||||
|
|
||||||
|
// ====== Dataset ======
|
||||||
|
|
||||||
|
GGML_API ggml_opt_dataset_t ggml_opt_dataset_init(
|
||||||
|
int64_t ne_datapoint, // number of elements per datapoint
|
||||||
|
int64_t ne_label, // number of elements per label
|
||||||
|
int64_t ndata, // total number of datapoints/labels
|
||||||
|
int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied)
|
||||||
|
GGML_API void ggml_opt_dataset_free(ggml_opt_dataset_t dataset);
|
||||||
|
|
||||||
|
// get underlying tensors that store the data
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_dataset_data (ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata]
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset); // shape = [nd_label, ndata]
|
||||||
|
|
||||||
|
// shuffle idata first datapoints from dataset with RNG from opt_ctx, shuffle all datapoints if idata is negative
|
||||||
|
GGML_API void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata);
|
||||||
|
|
||||||
|
// get batch at position ibatch from dataset and copy the data to data_batch and labels_batch
|
||||||
|
GGML_API void ggml_opt_dataset_get_batch(
|
||||||
|
ggml_opt_dataset_t dataset,
|
||||||
|
struct ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch]
|
||||||
|
struct ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch]
|
||||||
|
int64_t ibatch);
|
||||||
|
|
||||||
|
// ====== Model / Context ======
|
||||||
|
|
||||||
|
enum ggml_opt_build_type {
|
||||||
|
GGML_OPT_BUILD_TYPE_FORWARD,
|
||||||
|
GGML_OPT_BUILD_TYPE_GRAD,
|
||||||
|
GGML_OPT_BUILD_TYPE_OPT,
|
||||||
|
};
|
||||||
|
|
||||||
|
// parameters that control which optimizer is used and how said optimizer tries to find the minimal loss
|
||||||
|
struct ggml_opt_optimizer_params {
|
||||||
|
// AdamW optimizer parameters
|
||||||
|
struct {
|
||||||
|
float alpha; // learning rate
|
||||||
|
float beta1;
|
||||||
|
float beta2;
|
||||||
|
float eps; // epsilon for numerical stability
|
||||||
|
float wd; // weight decay for AdamW, use 0.0f to disable
|
||||||
|
} adamw;
|
||||||
|
};
|
||||||
|
|
||||||
|
// callback to calculate optimizer parameters prior to a backward pass
|
||||||
|
// userdata can be used to pass arbitrary data
|
||||||
|
typedef struct ggml_opt_optimizer_params (*ggml_opt_get_optimizer_params)(void * userdata);
|
||||||
|
|
||||||
|
// returns the default optimizer params (constant)
|
||||||
|
// userdata is not used
|
||||||
|
GGML_API struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata);
|
||||||
|
|
||||||
|
// parameters for initializing a new optimization context
|
||||||
|
struct ggml_opt_params {
|
||||||
|
ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs
|
||||||
|
|
||||||
|
struct ggml_context * ctx_compute; // created in user code, holds non-static tensors
|
||||||
|
|
||||||
|
// the forward graph is defined by inputs and outputs
|
||||||
|
// those tensors and all tensors inbetween are not intended to be reusable between multiple optimization contexts
|
||||||
|
struct ggml_tensor * inputs;
|
||||||
|
struct ggml_tensor * outputs;
|
||||||
|
|
||||||
|
enum ggml_opt_loss_type loss_type;
|
||||||
|
enum ggml_opt_build_type build_type;
|
||||||
|
|
||||||
|
int32_t opt_period; // after how many gradient accumulation steps an optimizer step should be done
|
||||||
|
|
||||||
|
ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters
|
||||||
|
void * get_opt_pars_ud; // userdata for calculating optimizer parameters
|
||||||
|
};
|
||||||
|
|
||||||
|
// get parameters for an optimization context with defaults set where possible
|
||||||
|
// parameters for which no sensible defaults exist are supplied as arguments to this function
|
||||||
|
GGML_API ggml_opt_params ggml_opt_default_params(
|
||||||
|
ggml_backend_sched_t backend_sched,
|
||||||
|
struct ggml_context * ctx_compute,
|
||||||
|
struct ggml_tensor * inputs,
|
||||||
|
struct ggml_tensor * outputs,
|
||||||
|
enum ggml_opt_loss_type loss_type);
|
||||||
|
|
||||||
|
GGML_API ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params);
|
||||||
|
GGML_API void ggml_opt_free(ggml_opt_context_t opt_ctx);
|
||||||
|
|
||||||
|
// set gradients to zero, initilize loss, and optionally reset the optimizer
|
||||||
|
GGML_API void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer);
|
||||||
|
|
||||||
|
// get underlying tensors that store data
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_inputs( ggml_opt_context_t opt_ctx); // forward graph input tensor
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_outputs( ggml_opt_context_t opt_ctx); // forward graph output tensor
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_labels( ggml_opt_context_t opt_ctx); // labels to compare outputs against
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_loss( ggml_opt_context_t opt_ctx); // scalar tensor that contains the loss
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_pred( ggml_opt_context_t opt_ctx); // predictions made by outputs
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node);
|
||||||
|
|
||||||
|
// ====== Optimization Result ======
|
||||||
|
|
||||||
|
GGML_API ggml_opt_result_t ggml_opt_result_init();
|
||||||
|
GGML_API void ggml_opt_result_free(ggml_opt_result_t result);
|
||||||
|
GGML_API void ggml_opt_result_reset(ggml_opt_result_t result);
|
||||||
|
|
||||||
|
// get data from result, uncertainties are optional and can be ignored by passing NULL
|
||||||
|
GGML_API void ggml_opt_result_ndata( ggml_opt_result_t result, int64_t * ndata); // writes 1 value, number of datapoints
|
||||||
|
GGML_API void ggml_opt_result_loss( ggml_opt_result_t result, double * loss, double * unc); // writes 1 value
|
||||||
|
GGML_API void ggml_opt_result_pred( ggml_opt_result_t result, int32_t * pred); // writes ndata values
|
||||||
|
GGML_API void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc); // writes 1 value
|
||||||
|
|
||||||
|
// ====== Computation ======
|
||||||
|
|
||||||
|
// do forward pass, increment result if not NULL
|
||||||
|
GGML_API void ggml_opt_forward(ggml_opt_context_t opt_ctx, ggml_opt_result_t result);
|
||||||
|
|
||||||
|
// do forward pass, increment result if not NULL, do backward pass
|
||||||
|
GGML_API void ggml_opt_forward_backward(ggml_opt_context_t opt_ctx, ggml_opt_result_t result);
|
||||||
|
|
||||||
|
// ############################################################################
|
||||||
|
// ## The high-level functions start here. They do not depend on any private ##
|
||||||
|
// ## functions or structs and can be copied to and adapted for user code. ##
|
||||||
|
// ############################################################################
|
||||||
|
|
||||||
|
// ====== Intended Usage ======
|
||||||
|
//
|
||||||
|
// 1. Select the appropriate loss for your problem.
|
||||||
|
// 2. Create a dataset and set the data for the "data" tensor. Also set the "labels" tensor if your loss needs them.
|
||||||
|
// Setting the shard size to 1 will be fine, it's the granularity with which data is shuffled/loaded (bigger values are faster).
|
||||||
|
// 3. Create a GGML graph for your model with no_alloc == true. Use two separate contexts for the tensors.
|
||||||
|
// The first context should contain the model parameters and inputs and be allocated statically in user code.
|
||||||
|
// The second context should contain all other tensors and will be (re)allocated automatically.
|
||||||
|
// Due to this automated allocation the data of the second context is not defined when accessed in user code.
|
||||||
|
// Note that the second dimension of the inputs/outputs are interpreted as the number of datapoints in those tensors.
|
||||||
|
// 4. Call ggml_opt_fit. If you need more control you can use ggml_opt_epoch instead.
|
||||||
|
|
||||||
|
// signature for a callback while evaluating opt_ctx on dataset, called after an evaluation
|
||||||
|
typedef void (*ggml_opt_epoch_callback)(
|
||||||
|
bool train, // true after training evaluation, false after validation evaluation
|
||||||
|
ggml_opt_context_t opt_ctx,
|
||||||
|
ggml_opt_dataset_t dataset,
|
||||||
|
ggml_opt_result_t result, // result associated with the dataset subsection
|
||||||
|
int64_t ibatch, // number of batches that have been evaluated so far
|
||||||
|
int64_t ibatch_max, // total number of batches in this dataset subsection
|
||||||
|
int64_t t_start_us); // time at which the evaluation on the dataset subsection was started
|
||||||
|
|
||||||
|
// do training on front of dataset, do evaluation only on back of dataset
|
||||||
|
GGML_API void ggml_opt_epoch(
|
||||||
|
ggml_opt_context_t opt_ctx,
|
||||||
|
ggml_opt_dataset_t dataset,
|
||||||
|
ggml_opt_result_t result_train, // result to increment during training, ignored if NULL
|
||||||
|
ggml_opt_result_t result_eval, // result to increment during evaluation, ignored if NULL
|
||||||
|
int64_t idata_split, // data index at which to split training and evaluation
|
||||||
|
ggml_opt_epoch_callback callback_train,
|
||||||
|
ggml_opt_epoch_callback callback_eval);
|
||||||
|
|
||||||
|
// callback that prints a progress bar on stderr
|
||||||
|
GGML_API void ggml_opt_epoch_callback_progress_bar(
|
||||||
|
bool train,
|
||||||
|
ggml_opt_context_t opt_ctx,
|
||||||
|
ggml_opt_dataset_t dataset,
|
||||||
|
ggml_opt_result_t result,
|
||||||
|
int64_t ibatch,
|
||||||
|
int64_t ibatch_max,
|
||||||
|
int64_t t_start_us);
|
||||||
|
|
||||||
|
// fit model defined by inputs and outputs to dataset
|
||||||
|
GGML_API void ggml_opt_fit(
|
||||||
|
ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs
|
||||||
|
ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs
|
||||||
|
ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch]
|
||||||
|
ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used
|
||||||
|
ggml_opt_dataset_t dataset, // dataset with data and optionally also labels
|
||||||
|
enum ggml_opt_loss_type loss_type, // loss to minimize
|
||||||
|
ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t)
|
||||||
|
int64_t nepoch, // how many times the dataset should be iterated over
|
||||||
|
int64_t nbatch_logical, // datapoints optimizer step, must be a multiple of ndata_batch in inputs/outputs
|
||||||
|
float val_split, // fraction of the dataset to use for validation, must be in [0.0f, 1.0f)
|
||||||
|
bool silent); // whether or not info prints to stderr should be suppressed
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
@ -602,7 +602,6 @@ extern "C" {
|
|||||||
|
|
||||||
int32_t flags;
|
int32_t flags;
|
||||||
|
|
||||||
struct ggml_tensor * grad;
|
|
||||||
struct ggml_tensor * src[GGML_MAX_SRC];
|
struct ggml_tensor * src[GGML_MAX_SRC];
|
||||||
|
|
||||||
// source tensor and offset for views
|
// source tensor and offset for views
|
||||||
@ -615,7 +614,7 @@ extern "C" {
|
|||||||
|
|
||||||
void * extra; // extra things e.g. for ggml-cuda.cu
|
void * extra; // extra things e.g. for ggml-cuda.cu
|
||||||
|
|
||||||
// char padding[4];
|
char padding[8];
|
||||||
};
|
};
|
||||||
|
|
||||||
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
|
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
|
||||||
@ -1985,28 +1984,20 @@ extern "C" {
|
|||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * grad,
|
struct ggml_tensor * grad,
|
||||||
float alpha,
|
struct ggml_tensor * m,
|
||||||
float beta1,
|
struct ggml_tensor * v,
|
||||||
float beta2,
|
struct ggml_tensor * adamw_params); // parameters such a the learning rate
|
||||||
float eps,
|
|
||||||
float wd); // weight decay
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// automatic differentiation
|
// automatic differentiation
|
||||||
//
|
//
|
||||||
|
|
||||||
GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
|
GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
|
||||||
GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool accumulate);
|
GGML_API void ggml_build_backward_expand(
|
||||||
|
struct ggml_context * ctx_static, // context for static gradients (loss + gradient accumulation)
|
||||||
GGML_API void ggml_build_opt_adamw(
|
struct ggml_context * ctx_compute, // context for gradient computation
|
||||||
struct ggml_context * ctx,
|
struct ggml_cgraph * cgraph,
|
||||||
struct ggml_cgraph * gf,
|
bool accumulate); // whether or not gradients should be accumulated, requires static allocation of tensors in ctx_static
|
||||||
struct ggml_cgraph * gb,
|
|
||||||
float alpha,
|
|
||||||
float beta1,
|
|
||||||
float beta2,
|
|
||||||
float eps,
|
|
||||||
float wd); // weight decay
|
|
||||||
|
|
||||||
// graph allocation in a context
|
// graph allocation in a context
|
||||||
GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
|
GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
|
||||||
@ -2026,7 +2017,9 @@ extern "C" {
|
|||||||
GGML_API size_t ggml_graph_overhead(void);
|
GGML_API size_t ggml_graph_overhead(void);
|
||||||
GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads);
|
GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
|
GGML_API struct ggml_tensor * ggml_graph_get_tensor (const struct ggml_cgraph * cgraph, const char * name);
|
||||||
|
GGML_API struct ggml_tensor * ggml_graph_get_grad (const struct ggml_cgraph * cgraph, const struct ggml_tensor * node);
|
||||||
|
GGML_API struct ggml_tensor * ggml_graph_get_grad_acc(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node);
|
||||||
|
|
||||||
GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
|
GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
|
||||||
GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
|
GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
|
||||||
@ -2037,198 +2030,15 @@ extern "C" {
|
|||||||
// dump the graph into a file using the dot format
|
// dump the graph into a file using the dot format
|
||||||
GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
|
GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
|
||||||
|
|
||||||
// build gradient checkpointing backward graph gb for gf using provided checkpoints
|
// TODO these functions were sandwiched in the old optimization interface, is there a better place for them?
|
||||||
// gb_tmp will contain original backward graph with rewritten backward process nodes,
|
|
||||||
// but without the second forward pass nodes.
|
|
||||||
GGML_API void ggml_build_backward_gradient_checkpointing(
|
|
||||||
struct ggml_context * ctx,
|
|
||||||
struct ggml_cgraph * gf,
|
|
||||||
struct ggml_cgraph * gb,
|
|
||||||
struct ggml_cgraph * gb_tmp,
|
|
||||||
struct ggml_tensor * * checkpoints,
|
|
||||||
int n_checkpoints);
|
|
||||||
//
|
|
||||||
// optimization
|
|
||||||
//
|
|
||||||
|
|
||||||
// optimization methods
|
|
||||||
enum ggml_opt_type {
|
|
||||||
GGML_OPT_TYPE_ADAM,
|
|
||||||
GGML_OPT_TYPE_LBFGS,
|
|
||||||
};
|
|
||||||
|
|
||||||
// linesearch methods
|
|
||||||
enum ggml_linesearch {
|
|
||||||
GGML_LINESEARCH_DEFAULT = 1,
|
|
||||||
|
|
||||||
GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
|
|
||||||
GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
|
|
||||||
GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
// optimization return values
|
|
||||||
enum ggml_opt_result {
|
|
||||||
GGML_OPT_RESULT_OK = 0,
|
|
||||||
GGML_OPT_RESULT_DID_NOT_CONVERGE,
|
|
||||||
GGML_OPT_RESULT_NO_CONTEXT,
|
|
||||||
GGML_OPT_RESULT_INVALID_WOLFE,
|
|
||||||
GGML_OPT_RESULT_FAIL,
|
|
||||||
GGML_OPT_RESULT_CANCEL,
|
|
||||||
|
|
||||||
GGML_LINESEARCH_FAIL = -128,
|
|
||||||
GGML_LINESEARCH_MINIMUM_STEP,
|
|
||||||
GGML_LINESEARCH_MAXIMUM_STEP,
|
|
||||||
GGML_LINESEARCH_MAXIMUM_ITERATIONS,
|
|
||||||
GGML_LINESEARCH_INVALID_PARAMETERS,
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
|
|
||||||
typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
|
typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
|
||||||
|
|
||||||
// Set callback for all future logging events.
|
// Set callback for all future logging events.
|
||||||
// If this is not called, or NULL is supplied, everything is output on stderr.
|
// If this is not called, or NULL is supplied, everything is output on stderr.
|
||||||
GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data);
|
GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data);
|
||||||
|
|
||||||
// optimization parameters
|
|
||||||
//
|
|
||||||
// see ggml.c (ggml_opt_default_params) for default values
|
|
||||||
//
|
|
||||||
struct ggml_opt_params {
|
|
||||||
enum ggml_opt_type type;
|
|
||||||
|
|
||||||
size_t graph_size;
|
|
||||||
|
|
||||||
int n_threads;
|
|
||||||
|
|
||||||
// delta-based convergence test
|
|
||||||
//
|
|
||||||
// if past == 0 - disabled
|
|
||||||
// if past > 0:
|
|
||||||
// stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
|
|
||||||
//
|
|
||||||
int past;
|
|
||||||
float delta;
|
|
||||||
|
|
||||||
// maximum number of iterations without improvement
|
|
||||||
//
|
|
||||||
// if 0 - disabled
|
|
||||||
// if > 0:
|
|
||||||
// assume convergence if no cost improvement in this number of iterations
|
|
||||||
//
|
|
||||||
int max_no_improvement;
|
|
||||||
|
|
||||||
bool print_forward_graph;
|
|
||||||
bool print_backward_graph;
|
|
||||||
|
|
||||||
int n_gradient_accumulation;
|
|
||||||
|
|
||||||
// ADAM parameters
|
|
||||||
struct {
|
|
||||||
int n_iter;
|
|
||||||
|
|
||||||
float sched; // schedule multiplier (fixed, decay or warmup)
|
|
||||||
float decay; // weight decay for AdamW, use 0.0f to disable
|
|
||||||
int decay_min_ndim; // minimum number of tensor dimension to apply weight decay
|
|
||||||
float alpha; // learning rate
|
|
||||||
float beta1;
|
|
||||||
float beta2;
|
|
||||||
float eps; // epsilon for numerical stability
|
|
||||||
float eps_f; // epsilon for convergence test
|
|
||||||
float eps_g; // epsilon for convergence test
|
|
||||||
float gclip; // gradient clipping
|
|
||||||
} adam;
|
|
||||||
|
|
||||||
// LBFGS parameters
|
|
||||||
struct {
|
|
||||||
int m; // number of corrections to approximate the inv. Hessian
|
|
||||||
int n_iter;
|
|
||||||
int max_linesearch;
|
|
||||||
|
|
||||||
float eps; // convergence tolerance
|
|
||||||
float ftol; // line search tolerance
|
|
||||||
float wolfe;
|
|
||||||
float min_step;
|
|
||||||
float max_step;
|
|
||||||
|
|
||||||
enum ggml_linesearch linesearch;
|
|
||||||
} lbfgs;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_opt_context {
|
|
||||||
struct ggml_context * ctx;
|
|
||||||
struct ggml_opt_params params;
|
|
||||||
|
|
||||||
int iter;
|
|
||||||
int64_t nx; // number of parameter elements
|
|
||||||
|
|
||||||
bool just_initialized;
|
|
||||||
|
|
||||||
float loss_before;
|
|
||||||
float loss_after;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
struct ggml_tensor * g; // current gradient
|
|
||||||
struct ggml_tensor * m; // first moment
|
|
||||||
struct ggml_tensor * v; // second moment
|
|
||||||
struct ggml_tensor * pf; // past function values
|
|
||||||
float fx_best;
|
|
||||||
float fx_prev;
|
|
||||||
int n_no_improvement;
|
|
||||||
} adam;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
struct ggml_tensor * x; // current parameters
|
|
||||||
struct ggml_tensor * xp; // previous parameters
|
|
||||||
struct ggml_tensor * g; // current gradient
|
|
||||||
struct ggml_tensor * gp; // previous gradient
|
|
||||||
struct ggml_tensor * d; // search direction
|
|
||||||
struct ggml_tensor * pf; // past function values
|
|
||||||
struct ggml_tensor * lmal; // the L-BFGS memory alpha
|
|
||||||
struct ggml_tensor * lmys; // the L-BFGS memory ys
|
|
||||||
struct ggml_tensor * lms; // the L-BFGS memory s
|
|
||||||
struct ggml_tensor * lmy; // the L-BFGS memory y
|
|
||||||
float fx_best;
|
|
||||||
float step;
|
|
||||||
int j;
|
|
||||||
int k;
|
|
||||||
int end;
|
|
||||||
int n_no_improvement;
|
|
||||||
} lbfgs;
|
|
||||||
};
|
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
|
GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
|
||||||
|
|
||||||
GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
|
|
||||||
|
|
||||||
// optimize the function defined by the tensor f
|
|
||||||
GGML_API enum ggml_opt_result ggml_opt(
|
|
||||||
struct ggml_context * ctx,
|
|
||||||
struct ggml_opt_params params,
|
|
||||||
struct ggml_tensor * f);
|
|
||||||
|
|
||||||
// initialize optimizer context
|
|
||||||
GGML_API void ggml_opt_init(
|
|
||||||
struct ggml_context * ctx,
|
|
||||||
struct ggml_opt_context * opt,
|
|
||||||
struct ggml_opt_params params,
|
|
||||||
int64_t nx);
|
|
||||||
|
|
||||||
// continue optimizing the function defined by the tensor f
|
|
||||||
GGML_API enum ggml_opt_result ggml_opt_resume(
|
|
||||||
struct ggml_context * ctx,
|
|
||||||
struct ggml_opt_context * opt,
|
|
||||||
struct ggml_tensor * f);
|
|
||||||
|
|
||||||
// continue optimizing the function defined by the tensor f
|
|
||||||
GGML_API enum ggml_opt_result ggml_opt_resume_g(
|
|
||||||
struct ggml_context * ctx,
|
|
||||||
struct ggml_opt_context * opt,
|
|
||||||
struct ggml_tensor * f,
|
|
||||||
struct ggml_cgraph * gf,
|
|
||||||
struct ggml_cgraph * gb,
|
|
||||||
ggml_opt_callback callback,
|
|
||||||
void * callback_data);
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// quantization
|
// quantization
|
||||||
//
|
//
|
||||||
|
@ -207,9 +207,11 @@ add_library(ggml-base
|
|||||||
../include/ggml-alloc.h
|
../include/ggml-alloc.h
|
||||||
../include/ggml-backend.h
|
../include/ggml-backend.h
|
||||||
../include/ggml-cpp.h
|
../include/ggml-cpp.h
|
||||||
|
../include/ggml-opt.h
|
||||||
ggml.c
|
ggml.c
|
||||||
ggml-alloc.c
|
ggml-alloc.c
|
||||||
ggml-backend.cpp
|
ggml-backend.cpp
|
||||||
|
ggml-opt.cpp
|
||||||
ggml-threading.cpp
|
ggml-threading.cpp
|
||||||
ggml-threading.h
|
ggml-threading.h
|
||||||
ggml-quants.c
|
ggml-quants.c
|
||||||
|
@ -466,18 +466,12 @@ static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) {
|
|||||||
return ggml_gallocr_hash_get(galloc, t)->allocated;
|
return ggml_gallocr_hash_get(galloc, t)->allocated;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_gallocr_set_node_offset(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id, size_t offset) {
|
|
||||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
|
||||||
hn->buffer_id = buffer_id;
|
|
||||||
hn->offset = offset;
|
|
||||||
hn->allocated = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) {
|
static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) {
|
||||||
return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated;
|
return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
|
static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
|
||||||
|
GGML_ASSERT(buffer_id >= 0);
|
||||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
||||||
|
|
||||||
if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) {
|
if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) {
|
||||||
@ -816,7 +810,11 @@ static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor *
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
|
static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
|
||||||
size_t node_size = (node->data || node->view_src) ? 0 : ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
|
size_t node_size = 0;
|
||||||
|
if (!node->data && !node->view_src) {
|
||||||
|
GGML_ASSERT(talloc->buffer_id >= 0); // prevent segfault when misusing the API
|
||||||
|
node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
|
||||||
|
}
|
||||||
return talloc->size_max >= node_size;
|
return talloc->size_max >= node_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, siz
|
|||||||
buf->iface.get_tensor(buf, tensor, data, offset, size);
|
buf->iface.get_tensor(buf, tensor, data, offset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_API void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
||||||
ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
|
ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
|
||||||
|
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
|
@ -12220,7 +12220,12 @@ static void ggml_compute_forward_opt_step_adamw_f32(
|
|||||||
const struct ggml_tensor * src0_grad = dst->src[1];
|
const struct ggml_tensor * src0_grad = dst->src[1];
|
||||||
const struct ggml_tensor * src0_grad_m = dst->src[2];
|
const struct ggml_tensor * src0_grad_m = dst->src[2];
|
||||||
const struct ggml_tensor * src0_grad_v = dst->src[3];
|
const struct ggml_tensor * src0_grad_v = dst->src[3];
|
||||||
|
const struct ggml_tensor * adamw_params = dst->src[4];
|
||||||
|
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad));
|
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad));
|
||||||
|
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m));
|
||||||
|
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v));
|
||||||
|
GGML_ASSERT(ggml_nelements(adamw_params) == 7);
|
||||||
|
|
||||||
const int ith = params->ith;
|
const int ith = params->ith;
|
||||||
const int nth = params->nth;
|
const int nth = params->nth;
|
||||||
@ -12237,16 +12242,14 @@ static void ggml_compute_forward_opt_step_adamw_f32(
|
|||||||
const int ir0 = dr*ith;
|
const int ir0 = dr*ith;
|
||||||
const int ir1 = MIN(ir0 + dr, nr);
|
const int ir1 = MIN(ir0 + dr, nr);
|
||||||
|
|
||||||
/* const float gnorm = 1.0f; */
|
const float * adamw_params_ptr = ggml_get_data_f32(adamw_params);
|
||||||
int64_t iter; memcpy(&iter, &dst->op_params[0], sizeof(int64_t));
|
const float alpha = adamw_params_ptr[0];
|
||||||
const float alpha = ggml_get_op_params_f32(dst, 2);
|
const float beta1 = adamw_params_ptr[1];
|
||||||
const float beta1 = ggml_get_op_params_f32(dst, 3);
|
const float beta2 = adamw_params_ptr[2];
|
||||||
const float beta2 = ggml_get_op_params_f32(dst, 4);
|
const float eps = adamw_params_ptr[3];
|
||||||
const float eps = ggml_get_op_params_f32(dst, 5);
|
const float wd = adamw_params_ptr[4];
|
||||||
const float wd = ggml_get_op_params_f32(dst, 6);
|
const float beta1h = adamw_params_ptr[5];
|
||||||
|
const float beta2h = adamw_params_ptr[6];
|
||||||
const float beta1h = alpha/(1.0f - powf(beta1, iter));
|
|
||||||
const float beta2h = 1.0f/(1.0f - powf(beta2, iter));
|
|
||||||
|
|
||||||
for (int ir = ir0; ir < ir1; ++ir) {
|
for (int ir = ir0; ir < ir1; ++ir) {
|
||||||
const int64_t i03 = ir/(ne02*ne01);
|
const int64_t i03 = ir/(ne02*ne01);
|
||||||
@ -12270,17 +12273,9 @@ static void ggml_compute_forward_opt_step_adamw_f32(
|
|||||||
// The weight decay is applied independently of the Adam momenta m and v.
|
// The weight decay is applied independently of the Adam momenta m and v.
|
||||||
// This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss.
|
// This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss.
|
||||||
// See: https://arxiv.org/pdf/1711.05101v3.pdf
|
// See: https://arxiv.org/pdf/1711.05101v3.pdf
|
||||||
w[i00] = w[i00]*(1.0f - alpha*wd) - mh/vh;
|
w[i00] = w[i00]*(1.0f - alpha*wd) - alpha*mh/vh;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_barrier(params->threadpool);
|
|
||||||
if (ith != 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
iter++;
|
|
||||||
memcpy(&dst->op_params[0], &iter, sizeof(int64_t));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_compute_forward_opt_step_adamw(
|
static void ggml_compute_forward_opt_step_adamw(
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
|
#include "ggml-impl.h"
|
||||||
#include "opt-step-adamw.cuh"
|
#include "opt-step-adamw.cuh"
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
|
||||||
static __global__ void opt_step_adamw_f32(
|
static __global__ void opt_step_adamw_f32(
|
||||||
float * __restrict__ x, const float * __restrict__ g, float * __restrict__ g_m, float * __restrict__ g_v, const int64_t k,
|
float * __restrict__ x, const float * __restrict__ g, float * __restrict__ g_m, float * __restrict__ g_v,
|
||||||
const float alpha, const float beta1, const float beta2, const float eps, const float wd,
|
const float * __restrict__ pars, const int64_t k) {
|
||||||
const float beta1h, const float beta2h) {
|
|
||||||
|
|
||||||
const int64_t i = (int64_t) blockIdx.x*blockDim.x + threadIdx.x;
|
const int64_t i = (int64_t) blockIdx.x*blockDim.x + threadIdx.x;
|
||||||
|
|
||||||
@ -13,6 +13,14 @@ static __global__ void opt_step_adamw_f32(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const float alpha = pars[0];
|
||||||
|
const float beta1 = pars[1];
|
||||||
|
const float beta2 = pars[2];
|
||||||
|
const float eps = pars[3];
|
||||||
|
const float wd = pars[4];
|
||||||
|
const float beta1h = pars[5];
|
||||||
|
const float beta2h = pars[6];
|
||||||
|
|
||||||
const float gi = g[i];
|
const float gi = g[i];
|
||||||
const float gmi = g_m[i]*beta1 + gi*(1.0f - beta1);
|
const float gmi = g_m[i]*beta1 + gi*(1.0f - beta1);
|
||||||
const float gvi = g_v[i]*beta2 + gi*gi*(1.0f - beta2);
|
const float gvi = g_v[i]*beta2 + gi*gi*(1.0f - beta2);
|
||||||
@ -23,17 +31,15 @@ static __global__ void opt_step_adamw_f32(
|
|||||||
const float mh = gmi*beta1h;
|
const float mh = gmi*beta1h;
|
||||||
const float vh = sqrtf(gvi*beta2h) + eps;
|
const float vh = sqrtf(gvi*beta2h) + eps;
|
||||||
|
|
||||||
x[i] = x[i]*(1.0f - alpha*wd) - mh/vh;
|
x[i] = x[i]*(1.0f - alpha*wd) - alpha*mh/vh;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void opt_step_adamw_f32_cuda(
|
static void opt_step_adamw_f32_cuda(
|
||||||
float * x, const float * g, float * g_m, float * g_v, const int64_t k,
|
float * x, const float * g, float * g_m, float * g_v, const float * pars, const int64_t k, cudaStream_t stream) {
|
||||||
const float alpha, const float beta1, const float beta2, const float eps, const float wd,
|
|
||||||
const float beta1h, const float beta2h, cudaStream_t stream) {
|
|
||||||
|
|
||||||
const dim3 block_dims(CUDA_OPT_STEP_ADAMW_BLOCK_SIZE, 1, 1);
|
const dim3 block_dims(CUDA_OPT_STEP_ADAMW_BLOCK_SIZE, 1, 1);
|
||||||
const dim3 block_nums((k + CUDA_OPT_STEP_ADAMW_BLOCK_SIZE - 1) / CUDA_OPT_STEP_ADAMW_BLOCK_SIZE, 1, 1);
|
const dim3 block_nums((k + CUDA_OPT_STEP_ADAMW_BLOCK_SIZE - 1) / CUDA_OPT_STEP_ADAMW_BLOCK_SIZE, 1, 1);
|
||||||
opt_step_adamw_f32<<<block_nums, block_dims, 0, stream>>>(x, g, g_m, g_v, k, alpha, beta1, beta2, eps, wd, beta1h, beta2h);
|
opt_step_adamw_f32<<<block_nums, block_dims, 0, stream>>>(x, g, g_m, g_v, pars, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_opt_step_adamw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_opt_step_adamw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
@ -41,40 +47,32 @@ void ggml_cuda_opt_step_adamw(ggml_backend_cuda_context & ctx, ggml_tensor * dst
|
|||||||
const ggml_tensor * src0_grad = dst->src[1];
|
const ggml_tensor * src0_grad = dst->src[1];
|
||||||
const ggml_tensor * src0_grad_m = dst->src[2];
|
const ggml_tensor * src0_grad_m = dst->src[2];
|
||||||
const ggml_tensor * src0_grad_v = dst->src[3];
|
const ggml_tensor * src0_grad_v = dst->src[3];
|
||||||
|
const ggml_tensor * adamw_params = dst->src[4];
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(src0_grad->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0_grad->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(src0_grad_m->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0_grad_m->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(src0_grad_v->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0_grad_v->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(adamw_params->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0_grad));
|
GGML_ASSERT(ggml_is_contiguous(src0_grad));
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0_grad_m));
|
GGML_ASSERT(ggml_is_contiguous(src0_grad_m));
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0_grad_v));
|
GGML_ASSERT(ggml_is_contiguous(src0_grad_v));
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(adamw_params));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad));
|
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m));
|
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v));
|
GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v));
|
||||||
|
GGML_ASSERT(ggml_nelements(adamw_params) == 7);
|
||||||
|
|
||||||
float * src0_d = (float *) src0->data;
|
float * src0_d = (float *) src0->data;
|
||||||
const float * src0_grad_d = (const float *) src0_grad->data;
|
const float * src0_grad_d = (const float *) src0_grad->data;
|
||||||
float * src0_grad_m_d = (float *) src0_grad_m->data;
|
float * src0_grad_m_d = (float *) src0_grad_m->data;
|
||||||
float * src0_grad_v_d = (float *) src0_grad_v->data;
|
float * src0_grad_v_d = (float *) src0_grad_v->data;
|
||||||
|
const float * adamw_params_d = (const float *) adamw_params->data;
|
||||||
|
|
||||||
cudaStream_t stream = ctx.stream();
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
const int64_t ne = ggml_nelements(src0);
|
const int64_t ne = ggml_nelements(src0);
|
||||||
|
|
||||||
int64_t iter; memcpy(&iter, &dst->op_params[0], sizeof(int64_t));
|
opt_step_adamw_f32_cuda(src0_d, src0_grad_d, src0_grad_m_d, src0_grad_v_d, adamw_params_d, ne, stream);
|
||||||
float alpha; memcpy(&alpha, &dst->op_params[2], sizeof(float));
|
|
||||||
float beta1; memcpy(&beta1, &dst->op_params[3], sizeof(float));
|
|
||||||
float beta2; memcpy(&beta2, &dst->op_params[4], sizeof(float));
|
|
||||||
float eps; memcpy(&eps, &dst->op_params[5], sizeof(float));
|
|
||||||
float wd; memcpy(&wd, &dst->op_params[6], sizeof(float));
|
|
||||||
|
|
||||||
const float beta1h = alpha/(1.0f - powf(beta1, iter));
|
|
||||||
const float beta2h = 1.0f/(1.0f - powf(beta2, iter));
|
|
||||||
|
|
||||||
opt_step_adamw_f32_cuda(src0_d, src0_grad_d, src0_grad_m_d, src0_grad_v_d, ne, alpha, beta1, beta2, eps, wd, beta1h, beta2h, stream);
|
|
||||||
|
|
||||||
iter++;
|
|
||||||
memcpy(&dst->op_params[0], &iter, sizeof(int64_t));
|
|
||||||
}
|
}
|
||||||
|
@ -196,7 +196,7 @@ void ggml_hash_set_reset(struct ggml_hash_set * hash_set);
|
|||||||
static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
||||||
|
|
||||||
// returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
// returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
||||||
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key);
|
||||||
|
|
||||||
// returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
// returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
||||||
static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
||||||
@ -210,7 +210,7 @@ static inline size_t ggml_hash(const struct ggml_tensor * p) {
|
|||||||
return (size_t)(uintptr_t)p >> 4;
|
return (size_t)(uintptr_t)p >> 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
|
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key) {
|
||||||
size_t h = ggml_hash(key) % hash_set->size;
|
size_t h = ggml_hash(key) % hash_set->size;
|
||||||
|
|
||||||
// linear probing
|
// linear probing
|
||||||
@ -281,13 +281,14 @@ enum ggml_cgraph_eval_order {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_cgraph {
|
struct ggml_cgraph {
|
||||||
int size;
|
int size; // maximum number of nodes/leafs/grads/grad_accs
|
||||||
int n_nodes;
|
int n_nodes; // number of nodes currently in use
|
||||||
int n_leafs;
|
int n_leafs; // number of leafs currently in use
|
||||||
|
|
||||||
struct ggml_tensor ** nodes;
|
struct ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
|
||||||
struct ggml_tensor ** grads;
|
struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
|
||||||
struct ggml_tensor ** leafs;
|
struct ggml_tensor ** grad_accs; // accumulators for node gradients
|
||||||
|
struct ggml_tensor ** leafs; // tensors with constant data
|
||||||
|
|
||||||
struct ggml_hash_set visited_hash_set;
|
struct ggml_hash_set visited_hash_set;
|
||||||
|
|
||||||
|
@ -3639,6 +3639,12 @@ static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
|
|||||||
return ctx->all_data;
|
return ctx->all_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_metal_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
||||||
|
memset((char *)tensor->data + offset, value, size);
|
||||||
|
|
||||||
|
UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
||||||
memcpy((char *)tensor->data + offset, data, size);
|
memcpy((char *)tensor->data + offset, data, size);
|
||||||
|
|
||||||
@ -3671,7 +3677,7 @@ static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
|
|||||||
/* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
|
/* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
|
||||||
/* .get_base = */ ggml_backend_metal_buffer_get_base,
|
/* .get_base = */ ggml_backend_metal_buffer_get_base,
|
||||||
/* .init_tensor = */ NULL,
|
/* .init_tensor = */ NULL,
|
||||||
/* .memset_tensor = */ NULL,
|
/* .memset_tensor = */ ggml_backend_metal_buffer_memset_tensor,
|
||||||
/* .set_tensor = */ ggml_backend_metal_buffer_set_tensor,
|
/* .set_tensor = */ ggml_backend_metal_buffer_set_tensor,
|
||||||
/* .get_tensor = */ ggml_backend_metal_buffer_get_tensor,
|
/* .get_tensor = */ ggml_backend_metal_buffer_get_tensor,
|
||||||
/* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor,
|
/* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor,
|
||||||
|
867
ggml/src/ggml-opt.cpp
Normal file
867
ggml/src/ggml-opt.cpp
Normal file
@ -0,0 +1,867 @@
|
|||||||
|
#include "ggml-opt.h"
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-alloc.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
#include "ggml-impl.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <map>
|
||||||
|
#include <random>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
struct ggml_opt_dataset {
|
||||||
|
struct ggml_context * ctx;
|
||||||
|
ggml_backend_buffer_t buf;
|
||||||
|
struct ggml_tensor * data;
|
||||||
|
struct ggml_tensor * labels;
|
||||||
|
|
||||||
|
int64_t ndata;
|
||||||
|
int64_t ndata_shard;
|
||||||
|
size_t nbs_data;
|
||||||
|
size_t nbs_labels;
|
||||||
|
|
||||||
|
std::vector<int64_t> permutation;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_opt_context {
|
||||||
|
ggml_backend_sched_t backend_sched;
|
||||||
|
ggml_cgraph * allocated_graph;
|
||||||
|
ggml_cgraph * allocated_graph_copy;
|
||||||
|
struct ggml_context * ctx_static;
|
||||||
|
struct ggml_context * ctx_static_cpu;
|
||||||
|
struct ggml_context * ctx_compute;
|
||||||
|
struct ggml_context * ctx_copy;
|
||||||
|
ggml_backend_buffer_t buf_static;
|
||||||
|
ggml_backend_buffer_t buf_static_cpu;
|
||||||
|
std::mt19937 rng;
|
||||||
|
|
||||||
|
struct ggml_tensor * inputs;
|
||||||
|
struct ggml_tensor * outputs;
|
||||||
|
struct ggml_tensor * labels;
|
||||||
|
|
||||||
|
struct ggml_tensor * loss;
|
||||||
|
struct ggml_tensor * pred;
|
||||||
|
struct ggml_tensor * ncorrect;
|
||||||
|
|
||||||
|
struct ggml_cgraph * gf;
|
||||||
|
struct ggml_cgraph * gb_grad;
|
||||||
|
struct ggml_cgraph * gb_opt;
|
||||||
|
|
||||||
|
int64_t iter;
|
||||||
|
int32_t opt_period;
|
||||||
|
int32_t opt_i;
|
||||||
|
bool loss_per_datapoint;
|
||||||
|
|
||||||
|
ggml_opt_get_optimizer_params get_opt_pars;
|
||||||
|
void * get_opt_pars_ud;
|
||||||
|
struct ggml_tensor * adamw_params;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_opt_result {
|
||||||
|
int64_t ndata = 0;
|
||||||
|
std::vector<float> loss;
|
||||||
|
std::vector<int32_t> pred;
|
||||||
|
int64_t ncorrect = 0;
|
||||||
|
|
||||||
|
bool loss_per_datapoint = false;
|
||||||
|
int64_t opt_period = -1;
|
||||||
|
};
|
||||||
|
|
||||||
|
// ====== Dataset ======
|
||||||
|
|
||||||
|
ggml_opt_dataset_t ggml_opt_dataset_init(int64_t ne_datapoint, int64_t ne_label, int64_t ndata, int64_t ndata_shard) {
|
||||||
|
GGML_ASSERT(ne_datapoint > 0);
|
||||||
|
GGML_ASSERT(ne_label >= 0);
|
||||||
|
GGML_ASSERT(ndata > 0);
|
||||||
|
GGML_ASSERT(ndata_shard > 0);
|
||||||
|
|
||||||
|
ggml_opt_dataset_t result = new ggml_opt_dataset;
|
||||||
|
result->ndata = ndata;
|
||||||
|
result->ndata_shard = ndata_shard;
|
||||||
|
|
||||||
|
{
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ 2*ggml_tensor_overhead(),
|
||||||
|
/*.mem_buffer =*/ nullptr,
|
||||||
|
/*.no_alloc =*/ true,
|
||||||
|
};
|
||||||
|
result->ctx = ggml_init(params);
|
||||||
|
}
|
||||||
|
|
||||||
|
result->data = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_datapoint, ndata);
|
||||||
|
result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata;
|
||||||
|
|
||||||
|
if (ne_label > 0) {
|
||||||
|
result->labels = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_label, ndata);
|
||||||
|
result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata;
|
||||||
|
} else {
|
||||||
|
result->labels = nullptr;
|
||||||
|
result->nbs_labels = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type());
|
||||||
|
|
||||||
|
const int64_t nshards = ndata/ndata_shard;
|
||||||
|
result->permutation.resize(nshards);
|
||||||
|
for (int64_t i = 0; i < nshards; ++i) {
|
||||||
|
result->permutation[i] = i;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) {
|
||||||
|
ggml_backend_buffer_free(dataset->buf);
|
||||||
|
ggml_free(dataset->ctx);
|
||||||
|
delete dataset;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) {
|
||||||
|
return dataset->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) {
|
||||||
|
return dataset->labels;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) {
|
||||||
|
GGML_ASSERT(idata <= dataset->ndata);
|
||||||
|
|
||||||
|
if (idata < 0) {
|
||||||
|
std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ASSERT(idata % dataset->ndata_shard == 0);
|
||||||
|
const int64_t ishard_max = idata / dataset->ndata_shard;
|
||||||
|
std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) {
|
||||||
|
GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch));
|
||||||
|
GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch));
|
||||||
|
GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
|
||||||
|
|
||||||
|
const size_t nb_data_batch = ggml_nbytes(data_batch);
|
||||||
|
GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
|
||||||
|
const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
|
||||||
|
|
||||||
|
if (labels_batch) {
|
||||||
|
const size_t nb_labels_batch = ggml_nbytes(labels_batch);
|
||||||
|
GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels);
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
|
||||||
|
|
||||||
|
for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
|
||||||
|
const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
|
||||||
|
|
||||||
|
const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data;
|
||||||
|
ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data);
|
||||||
|
|
||||||
|
if (!labels_batch) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels;
|
||||||
|
ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ====== Model / Context ======
|
||||||
|
|
||||||
|
struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) {
|
||||||
|
GGML_UNUSED(userdata);
|
||||||
|
|
||||||
|
ggml_opt_optimizer_params result;
|
||||||
|
|
||||||
|
result.adamw.alpha = 0.001f;
|
||||||
|
result.adamw.beta1 = 0.9f;
|
||||||
|
result.adamw.beta2 = 0.999f;
|
||||||
|
result.adamw.eps = 1e-8f;
|
||||||
|
result.adamw.wd = 0.0f;
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_opt_params ggml_opt_default_params(
|
||||||
|
ggml_backend_sched_t backend_sched,
|
||||||
|
struct ggml_context * ctx_compute,
|
||||||
|
struct ggml_tensor * inputs,
|
||||||
|
struct ggml_tensor * outputs,
|
||||||
|
enum ggml_opt_loss_type loss_type) {
|
||||||
|
return {
|
||||||
|
/*backend_sched =*/ backend_sched,
|
||||||
|
/*ctx_compute =*/ ctx_compute,
|
||||||
|
/*inputs =*/ inputs,
|
||||||
|
/*logits =*/ outputs,
|
||||||
|
/*loss_type =*/ loss_type,
|
||||||
|
/*build_type =*/ GGML_OPT_BUILD_TYPE_OPT,
|
||||||
|
/*opt_period =*/ 1,
|
||||||
|
/*get_opt_pars =*/ ggml_opt_get_default_optimizer_params,
|
||||||
|
/*get_opt_pars_ud =*/ nullptr,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_tensor * map_tensor(std::map<ggml_tensor *, ggml_tensor *> & tensor_map, ggml_context * ctx, ggml_tensor * tensor) {
|
||||||
|
if (!tensor) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tensor_map.find(tensor) != tensor_map.end()) {
|
||||||
|
return tensor_map[tensor];
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor);
|
||||||
|
tensor_map[tensor] = new_tensor;
|
||||||
|
|
||||||
|
new_tensor->op = tensor->op;
|
||||||
|
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
||||||
|
new_tensor->nb[i] = tensor->nb[i];
|
||||||
|
}
|
||||||
|
new_tensor->flags = tensor->flags;
|
||||||
|
memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params));
|
||||||
|
strcpy(new_tensor->name, tensor->name);
|
||||||
|
new_tensor->data = tensor->data;
|
||||||
|
new_tensor->buffer = tensor->buffer;
|
||||||
|
new_tensor->extra = tensor->extra;
|
||||||
|
new_tensor->view_offs = tensor->view_offs;
|
||||||
|
new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src);
|
||||||
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
||||||
|
new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new_tensor;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * graph) {
|
||||||
|
std::map<ggml_tensor *, ggml_tensor *> tensor_map;
|
||||||
|
|
||||||
|
ggml_cgraph * new_graph = ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true);
|
||||||
|
|
||||||
|
for (int i = 0; i < graph->n_leafs; i++) {
|
||||||
|
ggml_build_forward_expand(new_graph, map_tensor(tensor_map, ctx, graph->leafs[i]));
|
||||||
|
}
|
||||||
|
for (int i = 0; i < graph->n_nodes; i++) {
|
||||||
|
ggml_build_forward_expand(new_graph, map_tensor(tensor_map, ctx, graph->nodes[i]));
|
||||||
|
}
|
||||||
|
for (int i = 0; i < graph->n_nodes; ++i) {
|
||||||
|
const size_t igrad_src = ggml_hash_find(&graph->visited_hash_set, graph->nodes[i]);
|
||||||
|
const size_t igrad_dst = ggml_hash_find(&new_graph->visited_hash_set, new_graph->nodes[i]);
|
||||||
|
graph->grads[igrad_dst] = new_graph->grads[igrad_src];
|
||||||
|
graph->grad_accs[igrad_dst] = new_graph->grad_accs[igrad_src];
|
||||||
|
}
|
||||||
|
|
||||||
|
return new_graph;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_opt_alloc_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph) {
|
||||||
|
GGML_ASSERT(graph);
|
||||||
|
if (opt_ctx->allocated_graph == graph) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph
|
||||||
|
|
||||||
|
{
|
||||||
|
ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE,
|
||||||
|
/*.mem_buffer =*/ nullptr,
|
||||||
|
/*.no_alloc =*/ true,
|
||||||
|
};
|
||||||
|
ggml_free(opt_ctx->ctx_copy);
|
||||||
|
opt_ctx->ctx_copy = ggml_init(params);
|
||||||
|
}
|
||||||
|
|
||||||
|
opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph);
|
||||||
|
|
||||||
|
ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
|
||||||
|
opt_ctx->allocated_graph = graph;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) {
|
||||||
|
ggml_opt_context_t result = new struct ggml_opt_context;
|
||||||
|
result->backend_sched = params.backend_sched;
|
||||||
|
result->allocated_graph = nullptr;
|
||||||
|
result->allocated_graph_copy = nullptr;
|
||||||
|
result->ctx_compute = params.ctx_compute;
|
||||||
|
result->ctx_copy = nullptr;
|
||||||
|
result->inputs = params.inputs;
|
||||||
|
result->outputs = params.outputs;
|
||||||
|
result->iter = 1;
|
||||||
|
result->opt_period = params.opt_period;
|
||||||
|
result->opt_i = 0;
|
||||||
|
result->get_opt_pars = params.get_opt_pars;
|
||||||
|
result->get_opt_pars_ud = params.get_opt_pars_ud;
|
||||||
|
|
||||||
|
GGML_ASSERT(result->inputs->data && "the inputs must be allocated statically");
|
||||||
|
GGML_ASSERT(result->opt_period >= 1);
|
||||||
|
|
||||||
|
const bool accumulate = params.build_type == GGML_OPT_BUILD_TYPE_GRAD ||
|
||||||
|
(params.build_type == GGML_OPT_BUILD_TYPE_OPT && result->opt_period > 1);
|
||||||
|
|
||||||
|
ggml_set_input(result->inputs);
|
||||||
|
ggml_set_output(result->outputs);
|
||||||
|
|
||||||
|
result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass.
|
||||||
|
ggml_build_forward_expand(result->gf, result->outputs);
|
||||||
|
|
||||||
|
int n_param = 0;
|
||||||
|
for (int i = 0; i < result->gf->n_nodes; ++i) {
|
||||||
|
if (result->gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
|
||||||
|
n_param++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// The static context is used for:
|
||||||
|
// - gradients (1 tensor per param if using gradient accumulation)
|
||||||
|
// - optimizer momenta (2 tensors per param)
|
||||||
|
// - labels
|
||||||
|
// - loss + its gradient (up to 5 tensors)
|
||||||
|
// - pred
|
||||||
|
// - ncorrect (2 tensors).
|
||||||
|
const size_t tensors_per_param = (accumulate ? 1 : 0) + (params.build_type == GGML_OPT_BUILD_TYPE_OPT ? 2 : 0);
|
||||||
|
const size_t size_meta = (tensors_per_param*n_param + 9) * ggml_tensor_overhead();
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ size_meta,
|
||||||
|
/*.mem_buffer =*/ nullptr,
|
||||||
|
/*.no_alloc =*/ true,
|
||||||
|
};
|
||||||
|
result->ctx_static = ggml_init(params);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// The static cpu context is used for:
|
||||||
|
// - optimizer parameters (1 for the entire context)
|
||||||
|
const size_t size_meta = 1 * ggml_tensor_overhead();
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ size_meta,
|
||||||
|
/*.mem_buffer =*/ nullptr,
|
||||||
|
/*.no_alloc =*/ true,
|
||||||
|
};
|
||||||
|
result->ctx_static_cpu = ggml_init(params);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
switch (params.loss_type) {
|
||||||
|
case GGML_OPT_LOSS_TYPE_MEAN: {
|
||||||
|
result->labels = nullptr;
|
||||||
|
result->loss = ggml_sum(result->ctx_static, result->outputs);
|
||||||
|
ggml_set_name(result->loss, "loss_sum");
|
||||||
|
const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs));
|
||||||
|
result->loss = ggml_scale(result->ctx_static, result->loss, scale);
|
||||||
|
ggml_set_name(result->loss, "loss_mean");
|
||||||
|
result->loss_per_datapoint = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case GGML_OPT_LOSS_TYPE_SUM: {
|
||||||
|
result->labels = nullptr;
|
||||||
|
result->loss = ggml_sum(result->ctx_static, result->outputs);
|
||||||
|
ggml_set_name(result->loss, "loss_sum");
|
||||||
|
result->loss_per_datapoint = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: {
|
||||||
|
result->labels = ggml_dup_tensor(result->ctx_static, result->outputs);
|
||||||
|
ggml_set_input(result->labels);
|
||||||
|
ggml_set_name(result->labels, "labels");
|
||||||
|
result->loss = ggml_cross_entropy_loss(result->ctx_static, result->outputs, result->labels);
|
||||||
|
ggml_set_name(result->loss, "loss_cross_entropy");
|
||||||
|
if (result->opt_period > 1) {
|
||||||
|
result->loss = ggml_scale(result->ctx_static, result->loss, 1.0f / result->opt_period);
|
||||||
|
ggml_set_name(result->loss, "loss_cross_entropy_scaled");
|
||||||
|
}
|
||||||
|
result->loss_per_datapoint = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: {
|
||||||
|
result->labels = ggml_dup_tensor(result->ctx_static, result->outputs);
|
||||||
|
ggml_set_input(result->labels);
|
||||||
|
ggml_set_name(result->labels, "labels");
|
||||||
|
result->loss = ggml_sub(result->ctx_static, result->outputs, result->labels);
|
||||||
|
ggml_set_name(result->loss, "loss_error");
|
||||||
|
result->loss = ggml_sqr(result->ctx_static, result->loss);
|
||||||
|
ggml_set_name(result->loss, "loss_squared_error");
|
||||||
|
result->loss = ggml_sum(result->ctx_static, result->loss);
|
||||||
|
ggml_set_name(result->loss, "loss_sum_squared_error");
|
||||||
|
const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs));
|
||||||
|
result->loss = ggml_scale(result->ctx_static, result->loss, scale);
|
||||||
|
ggml_set_name(result->loss, "loss_mean_squared_error");
|
||||||
|
result->loss_per_datapoint = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ggml_set_output(result->loss);
|
||||||
|
ggml_set_loss(result->loss);
|
||||||
|
ggml_build_forward_expand(result->gf, result->loss);
|
||||||
|
|
||||||
|
result->pred = ggml_argmax(result->ctx_static, result->outputs);
|
||||||
|
ggml_set_name(result->pred, "pred");
|
||||||
|
ggml_set_output(result->pred);
|
||||||
|
ggml_build_forward_expand(result->gf, result->pred);
|
||||||
|
|
||||||
|
if (result->labels) {
|
||||||
|
result->ncorrect = ggml_count_equal(result->ctx_static, result->pred, ggml_argmax(result->ctx_static, result->labels));
|
||||||
|
ggml_set_name(result->ncorrect, "ncorrect");
|
||||||
|
ggml_set_output(result->ncorrect);
|
||||||
|
ggml_build_forward_expand(result->gf, result->ncorrect);
|
||||||
|
} else {
|
||||||
|
result->ncorrect = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.build_type == GGML_OPT_BUILD_TYPE_FORWARD) {
|
||||||
|
result->gb_grad = nullptr;
|
||||||
|
result->gb_opt = nullptr;
|
||||||
|
|
||||||
|
result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
|
||||||
|
result->buf_static_cpu = nullptr;
|
||||||
|
|
||||||
|
ggml_opt_alloc_graph(result, result->gf);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients.
|
||||||
|
result->gb_grad = ggml_graph_dup(result->ctx_compute, result->gf);
|
||||||
|
ggml_build_backward_expand(result->ctx_static, result->ctx_compute, result->gb_grad, accumulate);
|
||||||
|
|
||||||
|
if (params.build_type == GGML_OPT_BUILD_TYPE_GRAD) {
|
||||||
|
result->gb_opt = nullptr;
|
||||||
|
|
||||||
|
result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
|
||||||
|
result->buf_static_cpu = nullptr;
|
||||||
|
|
||||||
|
ggml_opt_alloc_graph(result, result->gb_grad);
|
||||||
|
ggml_graph_reset(result->gb_grad);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ASSERT(params.build_type == GGML_OPT_BUILD_TYPE_OPT);
|
||||||
|
|
||||||
|
// gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step.
|
||||||
|
result->gb_opt = ggml_graph_dup(result->ctx_compute, result->gb_grad);
|
||||||
|
|
||||||
|
result->adamw_params = ggml_new_tensor_1d(result->ctx_static_cpu, GGML_TYPE_F32, 7);
|
||||||
|
ggml_set_input(result->adamw_params);
|
||||||
|
ggml_set_name(result->adamw_params, "adamw_params");
|
||||||
|
|
||||||
|
for (int i = result->gf->n_nodes-1; i >= 0; --i) {
|
||||||
|
struct ggml_tensor * node = result->gb_opt->nodes[i];
|
||||||
|
struct ggml_tensor * grad = ggml_graph_get_grad(result->gb_opt, node);
|
||||||
|
|
||||||
|
if (node->flags & GGML_TENSOR_FLAG_PARAM) {
|
||||||
|
struct ggml_tensor * m = ggml_dup_tensor(result->ctx_static, node);
|
||||||
|
struct ggml_tensor * v = ggml_dup_tensor(result->ctx_static, node);
|
||||||
|
struct ggml_tensor * opt_step = ggml_opt_step_adamw(result->ctx_compute, node, grad, m, v, result->adamw_params);
|
||||||
|
ggml_build_forward_expand(result->gb_opt, opt_step);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result->buf_static = ggml_backend_alloc_ctx_tensors(
|
||||||
|
result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
|
||||||
|
|
||||||
|
result->buf_static_cpu = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx_static_cpu, ggml_backend_cpu_buffer_type());
|
||||||
|
|
||||||
|
ggml_opt_alloc_graph(result, result->gb_opt);
|
||||||
|
ggml_graph_reset(result->gb_opt);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_free(ggml_opt_context_t opt_ctx) {
|
||||||
|
if (opt_ctx == nullptr) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ggml_backend_buffer_free(opt_ctx->buf_static);
|
||||||
|
ggml_backend_buffer_free(opt_ctx->buf_static_cpu);
|
||||||
|
ggml_free(opt_ctx->ctx_static);
|
||||||
|
ggml_free(opt_ctx->ctx_static_cpu);
|
||||||
|
delete opt_ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) {
|
||||||
|
if (optimizer) {
|
||||||
|
ggml_graph_reset(opt_ctx->gb_opt);
|
||||||
|
opt_ctx->iter = 1;
|
||||||
|
} else {
|
||||||
|
ggml_graph_reset(opt_ctx->gb_grad);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) {
|
||||||
|
return opt_ctx->inputs;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) {
|
||||||
|
return opt_ctx->outputs;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) {
|
||||||
|
return opt_ctx->labels;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) {
|
||||||
|
return opt_ctx->loss;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) {
|
||||||
|
return opt_ctx->pred;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) {
|
||||||
|
return opt_ctx->ncorrect;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) {
|
||||||
|
return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ====== Optimization Result ======
|
||||||
|
|
||||||
|
ggml_opt_result_t ggml_opt_result_init() {
|
||||||
|
return new ggml_opt_result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_result_free(ggml_opt_result_t result) {
|
||||||
|
delete result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_result_reset(ggml_opt_result_t result) {
|
||||||
|
result->ndata = 0;
|
||||||
|
result->loss.clear();
|
||||||
|
result->pred.clear();
|
||||||
|
result->ncorrect = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) {
|
||||||
|
*ndata = result->ndata;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) {
|
||||||
|
const int64_t nbatches = result->loss.size(); // Number of physical batches.
|
||||||
|
|
||||||
|
if (nbatches == 0) {
|
||||||
|
*loss = 0.0;
|
||||||
|
*unc = NAN;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
double sum = 0.0;
|
||||||
|
double sum_squared = 0.0;
|
||||||
|
|
||||||
|
for (const float & loss : result->loss) {
|
||||||
|
// If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch.
|
||||||
|
const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss;
|
||||||
|
sum += loss_scaled;
|
||||||
|
sum_squared += loss_scaled*loss_scaled;
|
||||||
|
}
|
||||||
|
|
||||||
|
const double mean = sum/nbatches;
|
||||||
|
*loss = result->loss_per_datapoint ? mean : sum;
|
||||||
|
|
||||||
|
if (!unc) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nbatches < 2) {
|
||||||
|
*unc = NAN;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1)
|
||||||
|
*unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) {
|
||||||
|
for (size_t i = 0; i < result->pred.size(); ++i) {
|
||||||
|
pred[i] = result->pred[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) {
|
||||||
|
*accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN;
|
||||||
|
|
||||||
|
if (!unc) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
*unc = result->ncorrect >= 0 && result->ndata >= 2 ?
|
||||||
|
sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ====== Computation ======
|
||||||
|
|
||||||
|
static void ggml_opt_eval_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph, ggml_opt_result * result) {
|
||||||
|
if (graph != opt_ctx->gf) {
|
||||||
|
struct ggml_opt_optimizer_params opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud);
|
||||||
|
|
||||||
|
GGML_ASSERT(opt_pars.adamw.alpha > 0.0f);
|
||||||
|
GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f);
|
||||||
|
GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f);
|
||||||
|
GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f);
|
||||||
|
GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f);
|
||||||
|
GGML_ASSERT(opt_pars.adamw.eps >= 0.0f);
|
||||||
|
GGML_ASSERT(opt_pars.adamw.wd >= 0.0f);
|
||||||
|
GGML_ASSERT(opt_pars.adamw.wd <= 1.0f);
|
||||||
|
|
||||||
|
// beta1, beta2 after applying warmup
|
||||||
|
const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter));
|
||||||
|
const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter));
|
||||||
|
|
||||||
|
float * adamw_par_data = ggml_get_data_f32(opt_ctx->adamw_params);
|
||||||
|
adamw_par_data[0] = opt_pars.adamw.alpha;
|
||||||
|
adamw_par_data[1] = opt_pars.adamw.beta1;
|
||||||
|
adamw_par_data[2] = opt_pars.adamw.beta2;
|
||||||
|
adamw_par_data[3] = opt_pars.adamw.eps;
|
||||||
|
adamw_par_data[4] = opt_pars.adamw.wd;
|
||||||
|
adamw_par_data[5] = beta1h;
|
||||||
|
adamw_par_data[6] = beta2h;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_opt_alloc_graph(opt_ctx, graph);
|
||||||
|
ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
|
||||||
|
opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt;
|
||||||
|
|
||||||
|
if (!result) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result->ndata == 0) {
|
||||||
|
result->loss_per_datapoint = opt_ctx->loss_per_datapoint;
|
||||||
|
result->opt_period = opt_ctx->opt_period;
|
||||||
|
} else {
|
||||||
|
GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint);
|
||||||
|
GGML_ASSERT(result->opt_period == opt_ctx->opt_period);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int64_t ndata = opt_ctx->outputs->ne[1];
|
||||||
|
GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported");
|
||||||
|
result->ndata += ndata;
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_is_scalar(opt_ctx->loss));
|
||||||
|
GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32);
|
||||||
|
float loss;
|
||||||
|
ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss));
|
||||||
|
result->loss.push_back(loss);
|
||||||
|
|
||||||
|
GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32);
|
||||||
|
std::vector<int32_t> pred(ndata);
|
||||||
|
ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred));
|
||||||
|
result->pred.insert(result->pred.end(), pred.begin(), pred.end());
|
||||||
|
|
||||||
|
if (!opt_ctx->labels || result->ncorrect < 0) {
|
||||||
|
result->ncorrect = -1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect));
|
||||||
|
GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64);
|
||||||
|
int64_t ncorrect;
|
||||||
|
ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect));
|
||||||
|
result->ncorrect += ncorrect;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_forward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) {
|
||||||
|
ggml_opt_eval_graph(opt_ctx, opt_ctx->gf, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_forward_backward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) {
|
||||||
|
if (opt_ctx->opt_period == 1) {
|
||||||
|
ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
|
||||||
|
if (opt_i_next == 0) {
|
||||||
|
ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result);
|
||||||
|
ggml_opt_reset(opt_ctx, /*optimizer =*/ false);
|
||||||
|
} else {
|
||||||
|
ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_grad, result);
|
||||||
|
}
|
||||||
|
opt_ctx->opt_i = opt_i_next;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ====== High-Level Functions ======
|
||||||
|
|
||||||
|
void ggml_opt_epoch(
|
||||||
|
ggml_opt_context_t opt_ctx,
|
||||||
|
ggml_opt_dataset_t dataset,
|
||||||
|
ggml_opt_result_t result_train,
|
||||||
|
ggml_opt_result_t result_eval,
|
||||||
|
int64_t idata_split,
|
||||||
|
ggml_opt_epoch_callback callback_train,
|
||||||
|
ggml_opt_epoch_callback callback_eval) {
|
||||||
|
struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx);
|
||||||
|
struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
|
||||||
|
struct ggml_tensor * data = ggml_opt_dataset_data(dataset);
|
||||||
|
GGML_ASSERT(data->ne[0] == inputs->ne[0]);
|
||||||
|
|
||||||
|
const int64_t ndata = data->ne[1];
|
||||||
|
const int64_t ndata_batch = inputs->ne[1];
|
||||||
|
|
||||||
|
GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0);
|
||||||
|
const int64_t nbatches = ndata/ndata_batch;
|
||||||
|
|
||||||
|
idata_split = idata_split < 0 ? ndata : idata_split;
|
||||||
|
GGML_ASSERT(idata_split % ndata_batch == 0);
|
||||||
|
const int64_t ibatch_split = idata_split / ndata_batch;
|
||||||
|
|
||||||
|
int64_t ibatch = 0;
|
||||||
|
int64_t t_loop_start = ggml_time_us();
|
||||||
|
for (; ibatch < ibatch_split; ++ibatch) {
|
||||||
|
ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
|
||||||
|
ggml_opt_forward_backward(opt_ctx, result_train);
|
||||||
|
if (callback_train) {
|
||||||
|
callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t_loop_start = ggml_time_us();
|
||||||
|
for (; ibatch < nbatches; ++ibatch) {
|
||||||
|
ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
|
||||||
|
ggml_opt_forward(opt_ctx, result_eval);
|
||||||
|
if (callback_eval) {
|
||||||
|
callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_epoch_callback_progress_bar(
|
||||||
|
bool train,
|
||||||
|
ggml_opt_context_t opt_ctx,
|
||||||
|
ggml_opt_dataset_t dataset,
|
||||||
|
ggml_opt_result_t result,
|
||||||
|
int64_t ibatch,
|
||||||
|
int64_t ibatch_max,
|
||||||
|
int64_t t_start_us) {
|
||||||
|
fprintf(stderr, "%s[", train ? "train: " : "val: ");
|
||||||
|
|
||||||
|
constexpr int64_t bar_length = 25;
|
||||||
|
for (int64_t j = 0; j < bar_length; ++j) {
|
||||||
|
const int64_t ibatch_j = ibatch_max * j/bar_length;
|
||||||
|
if (ibatch_j < ibatch) {
|
||||||
|
fprintf(stderr, "=");
|
||||||
|
} else if (ibatch_max * (j - 1)/bar_length < ibatch) {
|
||||||
|
fprintf(stderr, ">");
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, " ");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1];
|
||||||
|
const int64_t idata = ibatch*batch_size;
|
||||||
|
const int64_t idata_max = ibatch_max*batch_size;
|
||||||
|
|
||||||
|
double loss;
|
||||||
|
double loss_unc;
|
||||||
|
ggml_opt_result_loss(result, &loss, &loss_unc);
|
||||||
|
|
||||||
|
double accuracy;
|
||||||
|
double accuracy_unc;
|
||||||
|
ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc);
|
||||||
|
|
||||||
|
const int64_t t_ibatch_us = ggml_time_us() - t_start_us;
|
||||||
|
int64_t t_ibatch_s = t_ibatch_us / 1000000;
|
||||||
|
const int64_t t_ibatch_h = t_ibatch_s / 3600;
|
||||||
|
t_ibatch_s -= t_ibatch_h * 3600;
|
||||||
|
const int64_t t_ibatch_m = t_ibatch_s / 60;
|
||||||
|
t_ibatch_s -= t_ibatch_m * 60;
|
||||||
|
|
||||||
|
const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch;
|
||||||
|
int64_t t_eta_s = t_eta_us / 1000000;
|
||||||
|
const int64_t t_eta_h = t_eta_s / 3600;
|
||||||
|
t_eta_s -= t_eta_h * 3600;
|
||||||
|
const int64_t t_eta_m = t_eta_s / 60;
|
||||||
|
t_eta_s -= t_eta_m * 60;
|
||||||
|
|
||||||
|
fprintf(stderr, "| data=%06" PRId64 "/%06" PRId64 ", loss=%.6lf+-%.6lf, accuracy=%.2lf+-%.2lf%%, "
|
||||||
|
"t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 ", ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 "]\r",
|
||||||
|
idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc,
|
||||||
|
t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s);
|
||||||
|
if (ibatch == ibatch_max) {
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
fflush(stderr);
|
||||||
|
|
||||||
|
GGML_UNUSED(dataset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_opt_fit(
|
||||||
|
ggml_backend_sched_t backend_sched,
|
||||||
|
ggml_context * ctx_compute,
|
||||||
|
ggml_tensor * inputs,
|
||||||
|
ggml_tensor * outputs,
|
||||||
|
ggml_opt_dataset_t dataset,
|
||||||
|
enum ggml_opt_loss_type loss_type,
|
||||||
|
ggml_opt_get_optimizer_params get_opt_pars,
|
||||||
|
int64_t nepoch,
|
||||||
|
int64_t nbatch_logical,
|
||||||
|
float val_split,
|
||||||
|
bool silent) {
|
||||||
|
ggml_time_init();
|
||||||
|
const int64_t t_start_us = ggml_time_us();
|
||||||
|
|
||||||
|
const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1];
|
||||||
|
const int64_t nbatch_physical = inputs->ne[1];
|
||||||
|
GGML_ASSERT(ndata % nbatch_logical == 0);
|
||||||
|
GGML_ASSERT(nbatch_logical % nbatch_physical == 0);
|
||||||
|
|
||||||
|
const int64_t opt_period = nbatch_logical / nbatch_physical;
|
||||||
|
const int64_t nbatches_logical = ndata / nbatch_logical;
|
||||||
|
|
||||||
|
GGML_ASSERT(val_split >= 0.0f);
|
||||||
|
GGML_ASSERT(val_split < 1.0f);
|
||||||
|
const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical)
|
||||||
|
const int64_t idata_split = ibatch_split * nbatch_physical;
|
||||||
|
|
||||||
|
int64_t epoch = 1;
|
||||||
|
|
||||||
|
ggml_opt_params params = ggml_opt_default_params(backend_sched, ctx_compute, inputs, outputs, loss_type);
|
||||||
|
params.opt_period = opt_period;
|
||||||
|
params.get_opt_pars = get_opt_pars;
|
||||||
|
params.get_opt_pars_ud = &epoch;
|
||||||
|
ggml_opt_context_t opt_ctx = ggml_opt_init(params);
|
||||||
|
|
||||||
|
// Shuffling the data is generally useful but there is only a point if not all data is used in a single batch.
|
||||||
|
if (nbatch_logical < ndata) {
|
||||||
|
ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation).
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_opt_result_t result_train = ggml_opt_result_init();
|
||||||
|
ggml_opt_result_t result_val = ggml_opt_result_init();
|
||||||
|
|
||||||
|
ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar;
|
||||||
|
|
||||||
|
for (; epoch <= nepoch; ++epoch) {
|
||||||
|
if (nbatch_logical < idata_split) {
|
||||||
|
ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_opt_result_reset(result_train);
|
||||||
|
ggml_opt_result_reset(result_val);
|
||||||
|
|
||||||
|
if (!silent) {
|
||||||
|
fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch);
|
||||||
|
}
|
||||||
|
ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback);
|
||||||
|
if (!silent) {
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!silent) {
|
||||||
|
int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000;
|
||||||
|
const int64_t t_total_h = t_total_s / 3600;
|
||||||
|
t_total_s -= t_total_h * 3600;
|
||||||
|
const int64_t t_total_m = t_total_s / 60;
|
||||||
|
t_total_s -= t_total_m * 60;
|
||||||
|
fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_opt_free(opt_ctx);
|
||||||
|
ggml_opt_result_free(result_train);
|
||||||
|
ggml_opt_result_free(result_val);
|
||||||
|
}
|
1376
ggml/src/ggml.c
1376
ggml/src/ggml.c
File diff suppressed because it is too large
Load Diff
@ -811,11 +811,11 @@ struct test_case {
|
|||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
ggml_graph_cpy(gf, gb);
|
ggml_graph_cpy(gf, gb);
|
||||||
ggml_build_backward_expand(ctx, gf, gb, false);
|
ggml_build_backward_expand(ctx, ctx, gb, false);
|
||||||
if (expect.size() != 1 || expect[0] != 0.0f) {
|
if (expect.size() != 1 || expect[0] != 0.0f) {
|
||||||
GGML_ASSERT(ggml_graph_n_nodes(gb) > ggml_graph_n_nodes(gf));
|
GGML_ASSERT(ggml_graph_n_nodes(gb) > ggml_graph_n_nodes(gf));
|
||||||
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
||||||
GGML_ASSERT(!(t->flags & GGML_TENSOR_FLAG_PARAM) || t->grad->op != GGML_OP_NONE);
|
GGML_ASSERT(!(t->flags & GGML_TENSOR_FLAG_PARAM) || ggml_graph_get_grad(gb, t)->op != GGML_OP_NONE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -862,7 +862,13 @@ struct test_case {
|
|||||||
const char * bn = ggml_backend_name(backend);
|
const char * bn = ggml_backend_name(backend);
|
||||||
const int64_t ne = ggml_nelements(t);
|
const int64_t ne = ggml_nelements(t);
|
||||||
|
|
||||||
std::vector<float> ga = tensor_to_float(t->grad);
|
std::vector<float> ga;
|
||||||
|
struct ggml_tensor * grad = ggml_graph_get_grad(gb, t);
|
||||||
|
if (grad) {
|
||||||
|
ga = tensor_to_float(grad);
|
||||||
|
} else {
|
||||||
|
ga.resize(ne); // default value is 0.0f
|
||||||
|
}
|
||||||
|
|
||||||
for (int64_t i = 0; i < ne; ++i) { // gradient algebraic
|
for (int64_t i = 0; i < ne; ++i) { // gradient algebraic
|
||||||
// check for nans
|
// check for nans
|
||||||
@ -2500,6 +2506,35 @@ struct test_sum_rows : public test_case {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// GGML_OP_MEAN
|
||||||
|
struct test_mean : public test_case {
|
||||||
|
const ggml_type type;
|
||||||
|
const std::array<int64_t, 4> ne;
|
||||||
|
|
||||||
|
std::string vars() override {
|
||||||
|
return VARS_TO_STR2(type, ne);
|
||||||
|
}
|
||||||
|
|
||||||
|
test_mean(ggml_type type = GGML_TYPE_F32,
|
||||||
|
std::array<int64_t, 4> ne = {10, 5, 4, 3})
|
||||||
|
: type(type), ne(ne) {}
|
||||||
|
|
||||||
|
ggml_tensor * build_graph(ggml_context * ctx) override {
|
||||||
|
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
|
||||||
|
ggml_set_param(ctx, a);
|
||||||
|
ggml_set_name(a, "a");
|
||||||
|
|
||||||
|
ggml_tensor * out = ggml_mean(ctx, a);
|
||||||
|
ggml_set_name(out, "out");
|
||||||
|
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
float grad_eps() override {
|
||||||
|
return 0.1f * ne[0]*ne[1]*ne[2]*ne[3];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// GGML_OP_UPSCALE
|
// GGML_OP_UPSCALE
|
||||||
struct test_upscale : public test_case {
|
struct test_upscale : public test_case {
|
||||||
const ggml_type type;
|
const ggml_type type;
|
||||||
@ -2834,24 +2869,14 @@ struct test_cross_entropy_loss : public test_case {
|
|||||||
struct test_opt_step_adamw : public test_case {
|
struct test_opt_step_adamw : public test_case {
|
||||||
const ggml_type type;
|
const ggml_type type;
|
||||||
const std::array<int64_t, 4> ne;
|
const std::array<int64_t, 4> ne;
|
||||||
const float alpha;
|
|
||||||
const float beta1;
|
|
||||||
const float beta2;
|
|
||||||
const float eps;
|
|
||||||
const float wd;
|
|
||||||
|
|
||||||
std::string vars() override {
|
std::string vars() override {
|
||||||
return VARS_TO_STR7(type, ne, alpha, beta1, beta2, eps, wd);
|
return VARS_TO_STR2(type, ne);
|
||||||
}
|
}
|
||||||
|
|
||||||
test_opt_step_adamw(ggml_type type = GGML_TYPE_F32,
|
test_opt_step_adamw(ggml_type type = GGML_TYPE_F32,
|
||||||
std::array<int64_t, 4> ne = {10, 5, 4, 3},
|
std::array<int64_t, 4> ne = {10, 5, 4, 3})
|
||||||
float alpha = 1e-3f,
|
: type(type), ne(ne) {}
|
||||||
float beta1 = 0.9f,
|
|
||||||
float beta2 = 0.999f,
|
|
||||||
float eps = 1e-8f,
|
|
||||||
float wd = 0.0f)
|
|
||||||
: type(type), ne(ne), alpha(alpha), beta1(beta1), beta2(beta2), eps(eps), wd(wd) {}
|
|
||||||
|
|
||||||
ggml_tensor * build_graph(ggml_context * ctx) override {
|
ggml_tensor * build_graph(ggml_context * ctx) override {
|
||||||
ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
|
ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
|
||||||
@ -2861,7 +2886,16 @@ struct test_opt_step_adamw : public test_case {
|
|||||||
ggml_tensor * grad = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
|
ggml_tensor * grad = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
|
||||||
ggml_set_name(grad, "grad");
|
ggml_set_name(grad, "grad");
|
||||||
|
|
||||||
ggml_tensor * out = ggml_opt_step_adamw(ctx, a, grad, alpha, beta1, beta2, eps, wd);
|
ggml_tensor * grad_m = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
|
||||||
|
ggml_set_name(grad_m, "grad_m");
|
||||||
|
|
||||||
|
ggml_tensor * grad_v = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
|
||||||
|
ggml_set_name(grad_v, "grad_v");
|
||||||
|
|
||||||
|
ggml_tensor * adamw_params = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 7);
|
||||||
|
ggml_set_name(adamw_params, "adamw_params");
|
||||||
|
|
||||||
|
ggml_tensor * out = ggml_opt_step_adamw(ctx, a, grad, grad_m, grad_v, adamw_params);
|
||||||
ggml_set_name(out, "out");
|
ggml_set_name(out, "out");
|
||||||
|
|
||||||
return out;
|
return out;
|
||||||
@ -2869,7 +2903,7 @@ struct test_opt_step_adamw : public test_case {
|
|||||||
|
|
||||||
void initialize_tensors(ggml_context * ctx) override {
|
void initialize_tensors(ggml_context * ctx) override {
|
||||||
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
||||||
init_tensor_uniform(t, 0.0f, 1.0f); // grad_v needs non-negative values.
|
init_tensor_uniform(t, 0.0f, 1.0f); // grad_v and adamw_params need non-negative values.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3735,6 +3769,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
|
|||||||
|
|
||||||
test_cases.emplace_back(new test_sum());
|
test_cases.emplace_back(new test_sum());
|
||||||
test_cases.emplace_back(new test_sum_rows());
|
test_cases.emplace_back(new test_sum_rows());
|
||||||
|
test_cases.emplace_back(new test_mean());
|
||||||
test_cases.emplace_back(new test_upscale());
|
test_cases.emplace_back(new test_upscale());
|
||||||
test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true));
|
test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true));
|
||||||
test_cases.emplace_back(new test_upscale_ext());
|
test_cases.emplace_back(new test_upscale_ext());
|
||||||
@ -3766,9 +3801,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test_cases.emplace_back(new test_cross_entropy_loss());
|
test_cases.emplace_back(new test_cross_entropy_loss());
|
||||||
for (float wd : {0.0f, 1e-2f}) {
|
test_cases.emplace_back(new test_opt_step_adamw(GGML_TYPE_F32, {10, 5, 4, 3}));
|
||||||
test_cases.emplace_back(new test_opt_step_adamw(GGML_TYPE_F32, {10, 5, 4, 3}, 1.0f, 1e-3f, 0.9f, 0.999f, wd));
|
|
||||||
}
|
|
||||||
|
|
||||||
// these tests are disabled to save execution time, but they can be handy for debugging
|
// these tests are disabled to save execution time, but they can be handy for debugging
|
||||||
#if 0
|
#if 0
|
||||||
@ -3938,6 +3971,8 @@ int main(int argc, char ** argv) {
|
|||||||
ggml_backend_free(backend);
|
ggml_backend_free(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ggml_quantize_free();
|
||||||
|
|
||||||
printf("%zu/%zu backends passed\n", n_ok, ggml_backend_dev_count());
|
printf("%zu/%zu backends passed\n", n_ok, ggml_backend_dev_count());
|
||||||
|
|
||||||
if (n_ok != ggml_backend_dev_count()) {
|
if (n_ok != ggml_backend_dev_count()) {
|
||||||
@ -3945,8 +3980,6 @@ int main(int argc, char ** argv) {
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_quantize_free();
|
|
||||||
|
|
||||||
printf("\033[1;32mOK\033[0m\n");
|
printf("\033[1;32mOK\033[0m\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
1031
tests/test-opt.cpp
1031
tests/test-opt.cpp
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user