pyproject.toml 1.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344
  1. [tool.poetry]
  2. name = "llama-cpp-scripts"
  3. version = "0.0.0"
  4. description = "Scripts that ship with llama.cpp"
  5. authors = ["GGML <ggml@ggml.ai>"]
  6. readme = "README.md"
  7. homepage = "https://ggml.ai"
  8. repository = "https://github.com/ggerganov/llama.cpp"
  9. keywords = ["ggml", "gguf", "llama.cpp"]
  10. packages = [{ include = "*.py", from = "." }]
  11. classifiers = [
  12. "Programming Language :: Python :: 3",
  13. "License :: OSI Approved :: MIT License",
  14. "Operating System :: OS Independent",
  15. ]
  16. [tool.poetry.dependencies]
  17. python = ">=3.9"
  18. numpy = "^1.25.0"
  19. sentencepiece = ">=0.1.98,<0.2.0"
  20. transformers = ">=4.35.2,<5.0.0"
  21. protobuf = ">=4.21.0,<5.0.0"
  22. gguf = { path = "./gguf-py" }
  23. torch = { version = "^2.2.0", source = "pytorch" }
  24. [tool.poetry.dev-dependencies]
  25. pytest = "^5.2"
  26. [[tool.poetry.source]]
  27. name = "pytorch"
  28. url = "https://download.pytorch.org/whl/cpu"
  29. priority = "explicit"
  30. [build-system]
  31. requires = ["poetry-core>=1.0.0"]
  32. build-backend = "poetry.core.masonry.api"
  33. [tool.poetry.scripts]
  34. llama-convert-hf-to-gguf = "convert_hf_to_gguf:main"
  35. llama-convert-llama-ggml-to-gguf = "convert_llama_ggml_to_gguf:main"
  36. llama-convert-lora-to-ggml = "convert_lora_to_ggml:main"
  37. llama-convert-persimmon-to-gguf = "convert_persimmon_to_gguf:main"
  38. llama-convert = "convert:main"
  39. llama-ggml-vk-generate-shaders = "ggml_vk_generate_shaders:main"