import Foundation // import llama enum LlamaError: Error { case couldNotInitializeContext } actor LlamaContext { private var model: OpaquePointer private var context: OpaquePointer private var batch: llama_batch private var tokens_list: [llama_token] var n_len: Int32 = 512 var n_cur: Int32 = 0 var n_decode: Int32 = 0 init(model: OpaquePointer, context: OpaquePointer) { self.model = model self.context = context self.tokens_list = [] self.batch = llama_batch_init(512, 0, 1) } deinit { llama_free(context) llama_free_model(model) llama_backend_free() } static func createContext(path: String) throws -> LlamaContext { llama_backend_init(false) let model_params = llama_model_default_params() let model = llama_load_model_from_file(path, model_params) guard let model else { print("Could not load model at \(path)") throw LlamaError.couldNotInitializeContext } var ctx_params = llama_context_default_params() ctx_params.seed = 1234 ctx_params.n_ctx = 2048 ctx_params.n_threads = 8 ctx_params.n_threads_batch = 8 let context = llama_new_context_with_model(model, ctx_params) guard let context else { print("Could not load context!") throw LlamaError.couldNotInitializeContext } return LlamaContext(model: model, context: context) } func get_n_tokens() -> Int32 { return batch.n_tokens; } func completion_init(text: String) { print("attempting to complete \"\(text)\"") tokens_list = tokenize(text: text, add_bos: true) let n_ctx = llama_n_ctx(context) let n_kv_req = tokens_list.count + (Int(n_len) - tokens_list.count) print("\n n_len = \(n_len), n_ctx = \(n_ctx), n_kv_req = \(n_kv_req)") if n_kv_req > n_ctx { print("error: n_kv_req > n_ctx, the required KV cache size is not big enough") } for id in tokens_list { print(token_to_piece(token: id)) } // batch = llama_batch_init(512, 0) // done in init() batch.n_tokens = Int32(tokens_list.count) for i1 in 0.. String { var new_token_id: llama_token = 0 let n_vocab = llama_n_vocab(model) let logits = llama_get_logits_ith(context, batch.n_tokens - 1) var candidates = Array() candidates.reserveCapacity(Int(n_vocab)) for token_id in 0.. [llama_token] { let n_tokens = text.count + (add_bos ? 1 : 0) let tokens = UnsafeMutablePointer.allocate(capacity: n_tokens) let tokenCount = llama_tokenize(model, text, Int32(text.count), tokens, Int32(n_tokens), add_bos, false) var swiftTokens: [llama_token] = [] for i in 0.. String { let result = UnsafeMutablePointer.allocate(capacity: 8) result.initialize(repeating: Int8(0), count: 8) let _ = llama_token_to_piece(model, token, result, 8) let resultStr = String(cString: result) result.deallocate() return resultStr } }