| 123456789101112131415161718192021222324252627282930313233343536373839404142434445 |
- [tool.poetry]
- name = "llama-cpp-scripts"
- version = "0.0.0"
- description = "Scripts that ship with llama.cpp"
- authors = ["GGML <ggml@ggml.ai>"]
- readme = "README.md"
- homepage = "https://ggml.ai"
- repository = "https://github.com/ggml-org/llama.cpp"
- keywords = ["ggml", "gguf", "llama.cpp"]
- packages = [{ include = "*.py", from = "." }]
- classifiers = [
- "Programming Language :: Python :: 3",
- "License :: OSI Approved :: MIT License",
- "Operating System :: OS Independent",
- ]
- [tool.poetry.dependencies]
- python = ">=3.9"
- numpy = "^1.25.0"
- sentencepiece = ">=0.1.98,<=0.2.0"
- transformers = ">=4.35.2,<5.0.0"
- protobuf = ">=4.21.0,<5.0.0"
- gguf = { path = "./gguf-py" }
- torch = { version = "^2.2.0", source = "pytorch" }
- [tool.poetry.dev-dependencies]
- pytest = "^5.2"
- # Force wheel + cpu
- # For discussion and context see https://github.com/python-poetry/poetry#6409
- [[tool.poetry.source]]
- name = "pytorch"
- url = "https://download.pytorch.org/whl/cpu"
- priority = "explicit"
- [build-system]
- requires = ["poetry-core>=1.0.0"]
- build-backend = "poetry.core.masonry.api"
- [tool.poetry.scripts]
- llama-convert-hf-to-gguf = "convert_hf_to_gguf:main"
- llama-convert-lora-to-gguf = "convert_lora_to_gguf:main"
- llama-convert-llama-ggml-to-gguf = "convert_llama_ggml_to_gguf:main"
- llama-ggml-vk-generate-shaders = "ggml_vk_generate_shaders:main"
|