1
0

package.nix 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. {
  2. lib,
  3. glibc,
  4. config,
  5. stdenv,
  6. mkShell,
  7. runCommand,
  8. cmake,
  9. ninja,
  10. pkg-config,
  11. git,
  12. python3,
  13. mpi,
  14. blas,
  15. cudaPackages,
  16. darwin,
  17. rocmPackages,
  18. vulkan-headers,
  19. vulkan-loader,
  20. curl,
  21. useBlas ? builtins.all (x: !x) [
  22. useCuda
  23. useMetalKit
  24. useRocm
  25. useVulkan
  26. ] && blas.meta.available,
  27. useCuda ? config.cudaSupport,
  28. useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin,
  29. useMpi ? false, # Increases the runtime closure size by ~700M
  30. useRocm ? config.rocmSupport,
  31. enableCurl ? true,
  32. useVulkan ? false,
  33. llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
  34. # It's necessary to consistently use backendStdenv when building with CUDA support,
  35. # otherwise we get libstdc++ errors downstream.
  36. effectiveStdenv ? if useCuda then cudaPackages.backendStdenv else stdenv,
  37. enableStatic ? effectiveStdenv.hostPlatform.isStatic,
  38. precompileMetalShaders ? false
  39. }@inputs:
  40. let
  41. inherit (lib)
  42. cmakeBool
  43. cmakeFeature
  44. optionals
  45. strings
  46. versionOlder
  47. ;
  48. stdenv = throw "Use effectiveStdenv instead";
  49. suffices =
  50. lib.optionals useBlas [ "BLAS" ]
  51. ++ lib.optionals useCuda [ "CUDA" ]
  52. ++ lib.optionals useMetalKit [ "MetalKit" ]
  53. ++ lib.optionals useMpi [ "MPI" ]
  54. ++ lib.optionals useRocm [ "ROCm" ]
  55. ++ lib.optionals useVulkan [ "Vulkan" ];
  56. pnameSuffix =
  57. strings.optionalString (suffices != [ ])
  58. "-${strings.concatMapStringsSep "-" strings.toLower suffices}";
  59. descriptionSuffix =
  60. strings.optionalString (suffices != [ ])
  61. ", accelerated with ${strings.concatStringsSep ", " suffices}";
  62. executableSuffix = effectiveStdenv.hostPlatform.extensions.executable;
  63. # TODO: package the Python in this repository in a Nix-like way.
  64. # It'd be nice to migrate to buildPythonPackage, as well as ensure this repo
  65. # is PEP 517-compatible, and ensure the correct .dist-info is generated.
  66. # https://peps.python.org/pep-0517/
  67. #
  68. # TODO: Package up each Python script or service appropriately, by making
  69. # them into "entrypoints"
  70. llama-python = python3.withPackages (
  71. ps: [
  72. ps.numpy
  73. ps.sentencepiece
  74. ]
  75. );
  76. # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
  77. llama-python-extra = python3.withPackages (
  78. ps: [
  79. ps.numpy
  80. ps.sentencepiece
  81. ps.tiktoken
  82. ps.torchWithoutCuda
  83. ps.transformers
  84. # server bench
  85. ps.matplotlib
  86. # server tests
  87. ps.openai
  88. ps.behave
  89. ps.prometheus-client
  90. # for examples/pydantic-models-to-grammar-examples.py
  91. ps.docstring-parser
  92. ps.pydantic
  93. # for scripts/compare-llama-bench.py
  94. ps.gitpython
  95. ps.tabulate
  96. ]
  97. );
  98. xcrunHost = runCommand "xcrunHost" {} ''
  99. mkdir -p $out/bin
  100. ln -s /usr/bin/xcrun $out/bin
  101. '';
  102. # apple_sdk is supposed to choose sane defaults, no need to handle isAarch64
  103. # separately
  104. darwinBuildInputs =
  105. with darwin.apple_sdk.frameworks;
  106. [
  107. Accelerate
  108. CoreVideo
  109. CoreGraphics
  110. ]
  111. ++ optionals useMetalKit [ MetalKit ];
  112. cudaBuildInputs = with cudaPackages; [
  113. cuda_cccl.dev # <nv/target>
  114. # A temporary hack for reducing the closure size, remove once cudaPackages
  115. # have stopped using lndir: https://github.com/NixOS/nixpkgs/issues/271792
  116. cuda_cudart.dev
  117. cuda_cudart.lib
  118. cuda_cudart.static
  119. libcublas.dev
  120. libcublas.lib
  121. libcublas.static
  122. ];
  123. rocmBuildInputs = with rocmPackages; [
  124. clr
  125. hipblas
  126. rocblas
  127. ];
  128. vulkanBuildInputs = [
  129. vulkan-headers
  130. vulkan-loader
  131. ];
  132. in
  133. effectiveStdenv.mkDerivation (
  134. finalAttrs: {
  135. pname = "llama-cpp${pnameSuffix}";
  136. version = llamaVersion;
  137. # Note: none of the files discarded here are visible in the sandbox or
  138. # affect the output hash. This also means they can be modified without
  139. # triggering a rebuild.
  140. src = lib.cleanSourceWith {
  141. filter =
  142. name: type:
  143. let
  144. noneOf = builtins.all (x: !x);
  145. baseName = baseNameOf name;
  146. in
  147. noneOf [
  148. (lib.hasSuffix ".nix" name) # Ignore *.nix files when computing outPaths
  149. (lib.hasSuffix ".md" name) # Ignore *.md changes whe computing outPaths
  150. (lib.hasPrefix "." baseName) # Skip hidden files and directories
  151. (baseName == "flake.lock")
  152. ];
  153. src = lib.cleanSource ../../.;
  154. };
  155. postPatch = ''
  156. substituteInPlace ./ggml/src/ggml-metal.m \
  157. --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
  158. substituteInPlace ./ggml/src/ggml-metal.m \
  159. --replace '[bundle pathForResource:@"default" ofType:@"metallib"];' "@\"$out/bin/default.metallib\";"
  160. '';
  161. # With PR#6015 https://github.com/ggerganov/llama.cpp/pull/6015,
  162. # `default.metallib` may be compiled with Metal compiler from XCode
  163. # and we need to escape sandbox on MacOS to access Metal compiler.
  164. # `xcrun` is used find the path of the Metal compiler, which is varible
  165. # and not on $PATH
  166. # see https://github.com/ggerganov/llama.cpp/pull/6118 for discussion
  167. __noChroot = effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders;
  168. nativeBuildInputs =
  169. [
  170. cmake
  171. ninja
  172. pkg-config
  173. git
  174. ]
  175. ++ optionals useCuda [
  176. cudaPackages.cuda_nvcc
  177. # TODO: Replace with autoAddDriverRunpath
  178. # once https://github.com/NixOS/nixpkgs/pull/275241 has been merged
  179. cudaPackages.autoAddOpenGLRunpathHook
  180. ]
  181. ++ optionals (effectiveStdenv.hostPlatform.isGnu && enableStatic) [
  182. glibc.static
  183. ] ++ optionals (effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders) [
  184. xcrunHost
  185. ];
  186. buildInputs =
  187. optionals effectiveStdenv.isDarwin darwinBuildInputs
  188. ++ optionals useCuda cudaBuildInputs
  189. ++ optionals useMpi [ mpi ]
  190. ++ optionals useRocm rocmBuildInputs
  191. ++ optionals useBlas [ blas ]
  192. ++ optionals useVulkan vulkanBuildInputs
  193. ++ optionals enableCurl [ curl ];
  194. cmakeFlags =
  195. [
  196. (cmakeBool "LLAMA_BUILD_SERVER" true)
  197. (cmakeBool "BUILD_SHARED_LIBS" (!enableStatic))
  198. (cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
  199. (cmakeBool "LLAMA_CURL" enableCurl)
  200. (cmakeBool "GGML_NATIVE" false)
  201. (cmakeBool "GGML_BLAS" useBlas)
  202. (cmakeBool "GGML_CUDA" useCuda)
  203. (cmakeBool "GGML_HIPBLAS" useRocm)
  204. (cmakeBool "GGML_METAL" useMetalKit)
  205. (cmakeBool "GGML_VULKAN" useVulkan)
  206. (cmakeBool "GGML_STATIC" enableStatic)
  207. ]
  208. ++ optionals useCuda [
  209. (
  210. with cudaPackages.flags;
  211. cmakeFeature "CMAKE_CUDA_ARCHITECTURES" (
  212. builtins.concatStringsSep ";" (map dropDot cudaCapabilities)
  213. )
  214. )
  215. ]
  216. ++ optionals useRocm [
  217. (cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.llvm.clang}/bin/clang")
  218. (cmakeFeature "CMAKE_HIP_ARCHITECTURES" (builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets))
  219. ]
  220. ++ optionals useMetalKit [
  221. (lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
  222. (cmakeBool "GGML_METAL_EMBED_LIBRARY" (!precompileMetalShaders))
  223. ];
  224. # Environment variables needed for ROCm
  225. env = optionals useRocm {
  226. ROCM_PATH = "${rocmPackages.clr}";
  227. HIP_DEVICE_LIB_PATH = "${rocmPackages.rocm-device-libs}/amdgcn/bitcode";
  228. };
  229. # TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
  230. # if they haven't been added yet.
  231. postInstall = ''
  232. mkdir -p $out/include
  233. cp $src/include/llama.h $out/include/
  234. '';
  235. # Define the shells here, but don't add in the inputsFrom to avoid recursion.
  236. passthru = {
  237. inherit
  238. useBlas
  239. useCuda
  240. useMetalKit
  241. useMpi
  242. useRocm
  243. useVulkan
  244. ;
  245. shell = mkShell {
  246. name = "shell-${finalAttrs.finalPackage.name}";
  247. description = "contains numpy and sentencepiece";
  248. buildInputs = [ llama-python ];
  249. inputsFrom = [ finalAttrs.finalPackage ];
  250. shellHook = ''
  251. addToSearchPath "LD_LIBRARY_PATH" "${lib.getLib effectiveStdenv.cc.cc}/lib"
  252. '';
  253. };
  254. shell-extra = mkShell {
  255. name = "shell-extra-${finalAttrs.finalPackage.name}";
  256. description = "contains numpy, sentencepiece, torchWithoutCuda, and transformers";
  257. buildInputs = [ llama-python-extra ];
  258. inputsFrom = [ finalAttrs.finalPackage ];
  259. };
  260. };
  261. meta = {
  262. # Configurations we don't want even the CI to evaluate. Results in the
  263. # "unsupported platform" messages. This is mostly a no-op, because
  264. # cudaPackages would've refused to evaluate anyway.
  265. badPlatforms = optionals useCuda lib.platforms.darwin;
  266. # Configurations that are known to result in build failures. Can be
  267. # overridden by importing Nixpkgs with `allowBroken = true`.
  268. broken = (useMetalKit && !effectiveStdenv.isDarwin);
  269. description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}";
  270. homepage = "https://github.com/ggerganov/llama.cpp/";
  271. license = lib.licenses.mit;
  272. # Accommodates `nix run` and `lib.getExe`
  273. mainProgram = "llama-cli";
  274. # These people might respond, on the best effort basis, if you ping them
  275. # in case of Nix-specific regressions or for reviewing Nix-specific PRs.
  276. # Consider adding yourself to this list if you want to ensure this flake
  277. # stays maintained and you're willing to invest your time. Do not add
  278. # other people without their consent. Consider removing people after
  279. # they've been unreachable for long periods of time.
  280. # Note that lib.maintainers is defined in Nixpkgs, but you may just add
  281. # an attrset following the same format as in
  282. # https://github.com/NixOS/nixpkgs/blob/f36a80e54da29775c78d7eff0e628c2b4e34d1d7/maintainers/maintainer-list.nix
  283. maintainers = with lib.maintainers; [
  284. philiptaron
  285. SomeoneSerge
  286. ];
  287. # Extend `badPlatforms` instead
  288. platforms = lib.platforms.all;
  289. };
  290. }
  291. )