mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 14:59:52 +00:00
c83ad6d01e
Some checks are pending
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-cuda.Dockerfile platforms:linux/amd64 tag:full-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full.Dockerfile platforms:linux/amd64,linux/arm64 tag:full]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-cuda.Dockerfile platforms:linux/amd64 tag:light-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-intel.Dockerfile platforms:linux/amd64 tag:light-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli.Dockerfile platforms:linux/amd64,linux/arm64 tag:light]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-cuda.Dockerfile platforms:linux/amd64 tag:server-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-intel.Dockerfile platforms:linux/amd64 tag:server-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server.Dockerfile platforms:linux/amd64,linux/arm64 tag:server]) (push) Waiting to run
Nix CI / nix-eval (macos-latest) (push) Waiting to run
Nix CI / nix-eval (ubuntu-latest) (push) Waiting to run
Nix CI / nix-build (macos-latest) (push) Waiting to run
Nix CI / nix-build (ubuntu-latest) (push) Waiting to run
flake8 Lint / Lint (push) Waiting to run
Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
80 lines
2.0 KiB
Swift
80 lines
2.0 KiB
Swift
// swift-tools-version:5.5
|
|
|
|
import PackageDescription
|
|
|
|
var sources = [
|
|
"src/llama.cpp",
|
|
"src/llama-vocab.cpp",
|
|
"src/llama-grammar.cpp",
|
|
"src/llama-sampling.cpp",
|
|
"src/unicode.cpp",
|
|
"src/unicode-data.cpp",
|
|
"ggml/src/ggml.c",
|
|
"ggml/src/ggml-alloc.c",
|
|
"ggml/src/ggml-backend.cpp",
|
|
"ggml/src/ggml-quants.c",
|
|
"ggml/src/ggml-aarch64.c",
|
|
]
|
|
|
|
var resources: [Resource] = []
|
|
var linkerSettings: [LinkerSetting] = []
|
|
var cSettings: [CSetting] = [
|
|
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
|
.unsafeFlags(["-fno-objc-arc"]),
|
|
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
|
// We should consider add this in the future when we drop support for iOS 14
|
|
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
|
// .define("ACCELERATE_NEW_LAPACK"),
|
|
// .define("ACCELERATE_LAPACK_ILP64")
|
|
]
|
|
|
|
#if canImport(Darwin)
|
|
sources.append("ggml/src/ggml-metal.m")
|
|
resources.append(.process("ggml/src/ggml-metal.metal"))
|
|
linkerSettings.append(.linkedFramework("Accelerate"))
|
|
cSettings.append(
|
|
contentsOf: [
|
|
.define("GGML_USE_ACCELERATE"),
|
|
.define("GGML_USE_METAL")
|
|
]
|
|
)
|
|
#endif
|
|
|
|
#if os(Linux)
|
|
cSettings.append(.define("_GNU_SOURCE"))
|
|
#endif
|
|
|
|
let package = Package(
|
|
name: "llama",
|
|
platforms: [
|
|
.macOS(.v12),
|
|
.iOS(.v14),
|
|
.watchOS(.v4),
|
|
.tvOS(.v14)
|
|
],
|
|
products: [
|
|
.library(name: "llama", targets: ["llama"]),
|
|
],
|
|
targets: [
|
|
.target(
|
|
name: "llama",
|
|
path: ".",
|
|
exclude: [
|
|
"cmake",
|
|
"examples",
|
|
"scripts",
|
|
"models",
|
|
"tests",
|
|
"CMakeLists.txt",
|
|
"Makefile"
|
|
],
|
|
sources: sources,
|
|
resources: resources,
|
|
publicHeadersPath: "spm-headers",
|
|
cSettings: cSettings,
|
|
linkerSettings: linkerSettings
|
|
)
|
|
],
|
|
cxxLanguageStandard: .cxx11
|
|
)
|