|
|
@@ -6,6 +6,7 @@
|
|
|
# Notes for llama.cpp:
|
|
|
# 1. Tags are currently based on hash - which will not sort asciibetically.
|
|
|
# We need to declare standard versioning if people want to sort latest releases.
|
|
|
+# In the meantime, YYYYMMDD format will be used.
|
|
|
# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
|
|
|
# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
|
|
|
# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
|
|
|
@@ -13,12 +14,13 @@
|
|
|
# It is up to the user to install the correct vendor-specific support.
|
|
|
|
|
|
Name: llama.cpp
|
|
|
-Version: master
|
|
|
+Version: %( date "+%%Y%%m%%d" )
|
|
|
Release: 1%{?dist}
|
|
|
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
|
|
|
License: MIT
|
|
|
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
|
|
|
-BuildRequires: coreutils make gcc-c++ git
|
|
|
+BuildRequires: coreutils make gcc-c++ git libstdc++-devel
|
|
|
+Requires: libstdc++
|
|
|
URL: https://github.com/ggerganov/llama.cpp
|
|
|
|
|
|
%define debug_package %{nil}
|
|
|
@@ -26,27 +28,52 @@ URL: https://github.com/ggerganov/llama.cpp
|
|
|
|
|
|
%description
|
|
|
CPU inference for Meta's Lllama2 models using default options.
|
|
|
+Models are not included in this package and must be downloaded separately.
|
|
|
|
|
|
%prep
|
|
|
-%autosetup
|
|
|
+%setup -n llama.cpp-master
|
|
|
|
|
|
%build
|
|
|
make -j
|
|
|
|
|
|
%install
|
|
|
mkdir -p %{buildroot}%{_bindir}/
|
|
|
-cp -p main %{buildroot}%{_bindir}/llamacpp
|
|
|
-cp -p server %{buildroot}%{_bindir}/llamacppserver
|
|
|
-cp -p simple %{buildroot}%{_bindir}/llamacppsimple
|
|
|
+cp -p main %{buildroot}%{_bindir}/llama
|
|
|
+cp -p server %{buildroot}%{_bindir}/llamaserver
|
|
|
+cp -p simple %{buildroot}%{_bindir}/llamasimple
|
|
|
+
|
|
|
+mkdir -p %{buildroot}/usr/lib/systemd/system
|
|
|
+%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llama.service
|
|
|
+[Unit]
|
|
|
+Description=Llama.cpp server, CPU only (no GPU support in this build).
|
|
|
+After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
|
|
|
+
|
|
|
+[Service]
|
|
|
+Type=simple
|
|
|
+EnvironmentFile=/etc/sysconfig/llama
|
|
|
+ExecStart=/usr/bin/llamaserver $LLAMA_ARGS
|
|
|
+ExecReload=/bin/kill -s HUP $MAINPID
|
|
|
+Restart=never
|
|
|
+
|
|
|
+[Install]
|
|
|
+WantedBy=default.target
|
|
|
+EOF
|
|
|
+
|
|
|
+mkdir -p %{buildroot}/etc/sysconfig
|
|
|
+%{__cat} <<EOF > %{buildroot}/etc/sysconfig/llama
|
|
|
+LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
|
|
|
+EOF
|
|
|
|
|
|
%clean
|
|
|
rm -rf %{buildroot}
|
|
|
rm -rf %{_builddir}/*
|
|
|
|
|
|
%files
|
|
|
-%{_bindir}/llamacpp
|
|
|
-%{_bindir}/llamacppserver
|
|
|
-%{_bindir}/llamacppsimple
|
|
|
+%{_bindir}/llama
|
|
|
+%{_bindir}/llamaserver
|
|
|
+%{_bindir}/llamasimple
|
|
|
+/usr/lib/systemd/system/llama.service
|
|
|
+%config /etc/sysconfig/llama
|
|
|
|
|
|
%pre
|
|
|
|