1
0

llama-cpp-cuda.srpm.spec 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. # SRPM for building from source and packaging an RPM for RPM-based distros.
  2. # https://docs.fedoraproject.org/en-US/quick-docs/creating-rpm-packages
  3. # Built and maintained by John Boero - boeroboy@gmail.com
  4. # In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
  5. # Notes for llama.cpp:
  6. # 1. Tags are currently based on hash - which will not sort asciibetically.
  7. # We need to declare standard versioning if people want to sort latest releases.
  8. # 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
  9. # 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
  10. # Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
  11. # 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
  12. # It is up to the user to install the correct vendor-specific support.
  13. Name: llama.cpp-cuda
  14. Version: %( date "+%%Y%%m%%d" )
  15. Release: 1%{?dist}
  16. Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
  17. License: MIT
  18. Source0: https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
  19. BuildRequires: coreutils make gcc-c++ git cuda-toolkit
  20. Requires: cuda-toolkit
  21. URL: https://github.com/ggml-org/llama.cpp
  22. %define debug_package %{nil}
  23. %define source_date_epoch_from_changelog 0
  24. %description
  25. CPU inference for Meta's Lllama2 models using default options.
  26. %prep
  27. %setup -n llama.cpp-master
  28. %build
  29. make -j GGML_CUDA=1
  30. %install
  31. mkdir -p %{buildroot}%{_bindir}/
  32. cp -p llama-cli %{buildroot}%{_bindir}/llama-cuda-cli
  33. cp -p llama-completion %{buildroot}%{_bindir}/llama-cuda-completion
  34. cp -p llama-server %{buildroot}%{_bindir}/llama-cuda-server
  35. cp -p llama-simple %{buildroot}%{_bindir}/llama-cuda-simple
  36. mkdir -p %{buildroot}/usr/lib/systemd/system
  37. %{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamacuda.service
  38. [Unit]
  39. Description=Llama.cpp server, CPU only (no GPU support in this build).
  40. After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
  41. [Service]
  42. Type=simple
  43. EnvironmentFile=/etc/sysconfig/llama
  44. ExecStart=/usr/bin/llama-cuda-server $LLAMA_ARGS
  45. ExecReload=/bin/kill -s HUP $MAINPID
  46. Restart=never
  47. [Install]
  48. WantedBy=default.target
  49. EOF
  50. mkdir -p %{buildroot}/etc/sysconfig
  51. %{__cat} <<EOF > %{buildroot}/etc/sysconfig/llama
  52. LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
  53. EOF
  54. %clean
  55. rm -rf %{buildroot}
  56. rm -rf %{_builddir}/*
  57. %files
  58. %{_bindir}/llama-cuda-cli
  59. %{_bindir}/llama-cuda-completion
  60. %{_bindir}/llama-cuda-server
  61. %{_bindir}/llama-cuda-simple
  62. /usr/lib/systemd/system/llamacuda.service
  63. %config /etc/sysconfig/llama
  64. %pre
  65. %post
  66. %preun
  67. %postun
  68. %changelog