mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 10:54:36 +00:00
83796e62bc
* llama : refactor unicode stuff ggml-ci * unicode : names * make : fix c++ compiler * unicode : names * unicode : straighten tables * zig : fix build * unicode : put nfd normalization behind API ggml-ci * swift : fix build * unicode : add BOM * unicode : add <cstdint> ggml-ci * unicode : pass as cpts as const ref
62 lines
1.8 KiB
Swift
62 lines
1.8 KiB
Swift
// swift-tools-version:5.5
|
|
|
|
import PackageDescription
|
|
|
|
let package = Package(
|
|
name: "llama",
|
|
platforms: [
|
|
.macOS(.v12),
|
|
.iOS(.v14),
|
|
.watchOS(.v4),
|
|
.tvOS(.v14)
|
|
],
|
|
products: [
|
|
.library(name: "llama", targets: ["llama"]),
|
|
],
|
|
targets: [
|
|
.target(
|
|
name: "llama",
|
|
path: ".",
|
|
exclude: [
|
|
"cmake",
|
|
"examples",
|
|
"scripts",
|
|
"models",
|
|
"tests",
|
|
"CMakeLists.txt",
|
|
"ggml-cuda.cu",
|
|
"ggml-cuda.h",
|
|
"Makefile"
|
|
],
|
|
sources: [
|
|
"ggml.c",
|
|
"llama.cpp",
|
|
"unicode.cpp",
|
|
"ggml-alloc.c",
|
|
"ggml-backend.c",
|
|
"ggml-quants.c",
|
|
"ggml-metal.m",
|
|
],
|
|
resources: [
|
|
.process("ggml-metal.metal")
|
|
],
|
|
publicHeadersPath: "spm-headers",
|
|
cSettings: [
|
|
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
|
.define("GGML_USE_ACCELERATE"),
|
|
.unsafeFlags(["-fno-objc-arc"]),
|
|
.define("GGML_USE_METAL"),
|
|
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
|
// We should consider add this in the future when we drop support for iOS 14
|
|
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
|
// .define("ACCELERATE_NEW_LAPACK"),
|
|
// .define("ACCELERATE_LAPACK_ILP64")
|
|
],
|
|
linkerSettings: [
|
|
.linkedFramework("Accelerate")
|
|
]
|
|
)
|
|
],
|
|
cxxLanguageStandard: .cxx11
|
|
)
|