mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 19:34:35 +00:00
37 lines
666 B
Nix
37 lines
666 B
Nix
|
{
|
||
|
lib,
|
||
|
llamaVersion,
|
||
|
numpy,
|
||
|
tqdm,
|
||
|
sentencepiece,
|
||
|
pyyaml,
|
||
|
poetry-core,
|
||
|
buildPythonPackage,
|
||
|
pytestCheckHook,
|
||
|
}:
|
||
|
|
||
|
buildPythonPackage {
|
||
|
pname = "gguf";
|
||
|
version = llamaVersion;
|
||
|
pyproject = true;
|
||
|
nativeBuildInputs = [ poetry-core ];
|
||
|
propagatedBuildInputs = [
|
||
|
numpy
|
||
|
tqdm
|
||
|
sentencepiece
|
||
|
pyyaml
|
||
|
];
|
||
|
src = lib.cleanSource ../../gguf-py;
|
||
|
pythonImportsCheck = [
|
||
|
"numpy"
|
||
|
"gguf"
|
||
|
];
|
||
|
nativeCheckInputs = [ pytestCheckHook ];
|
||
|
doCheck = true;
|
||
|
meta = with lib; {
|
||
|
description = "Python package for writing binary files in the GGUF format";
|
||
|
license = licenses.mit;
|
||
|
maintainers = [ maintainers.ditsuke ];
|
||
|
};
|
||
|
}
|