flake.nix 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. {
  2. description = "Port of Facebook's LLaMA model in C/C++";
  3. inputs = {
  4. nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
  5. flake-parts.url = "github:hercules-ci/flake-parts";
  6. };
  7. # For inspection, use `nix flake show github:ggerganov/llama.cpp` or the nix repl:
  8. #
  9. # ```bash
  10. # ❯ nix repl
  11. # nix-repl> :lf github:ggerganov/llama.cpp
  12. # Added 13 variables.
  13. # nix-repl> outputs.apps.x86_64-linux.quantize
  14. # { program = "/nix/store/00000000000000000000000000000000-llama.cpp/bin/quantize"; type = "app"; }
  15. # ```
  16. outputs =
  17. { self, flake-parts, ... }@inputs:
  18. let
  19. # We could include the git revisions in the package names but those would
  20. # needlessly trigger rebuilds:
  21. # llamaVersion = self.dirtyShortRev or self.shortRev;
  22. # Nix already uses cryptographic hashes for versioning, so we'll just fix
  23. # the fake semver for now:
  24. llamaVersion = "0.0.0";
  25. in
  26. flake-parts.lib.mkFlake { inherit inputs; }
  27. {
  28. imports = [
  29. .devops/nix/nixpkgs-instances.nix
  30. .devops/nix/apps.nix
  31. .devops/nix/devshells.nix
  32. .devops/nix/jetson-support.nix
  33. ];
  34. # An overlay can be used to have a more granular control over llama-cpp's
  35. # dependencies and configuration, than that offered by the `.override`
  36. # mechanism. Cf. https://nixos.org/manual/nixpkgs/stable/#chap-overlays.
  37. #
  38. # E.g. in a flake:
  39. # ```
  40. # { nixpkgs, llama-cpp, ... }:
  41. # let pkgs = import nixpkgs {
  42. # overlays = [ (llama-cpp.overlays.default) ];
  43. # system = "aarch64-linux";
  44. # config.allowUnfree = true;
  45. # config.cudaSupport = true;
  46. # config.cudaCapabilities = [ "7.2" ];
  47. # config.cudaEnableForwardCompat = false;
  48. # }; in {
  49. # packages.aarch64-linux.llamaJetsonXavier = pkgs.llamaPackages.llama-cpp;
  50. # }
  51. # ```
  52. #
  53. # Cf. https://nixos.org/manual/nix/unstable/command-ref/new-cli/nix3-flake.html?highlight=flake#flake-format
  54. flake.overlays.default =
  55. (final: prev: {
  56. llamaPackages = final.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
  57. inherit (final.llamaPackages) llama-cpp;
  58. });
  59. systems = [
  60. "aarch64-darwin"
  61. "aarch64-linux"
  62. "x86_64-darwin" # x86_64-darwin isn't tested (and likely isn't relevant)
  63. "x86_64-linux"
  64. ];
  65. perSystem =
  66. {
  67. config,
  68. lib,
  69. pkgs,
  70. pkgsCuda,
  71. pkgsRocm,
  72. ...
  73. }:
  74. {
  75. # We don't use the overlay here so as to avoid making too many instances of nixpkgs,
  76. # cf. https://zimbatm.com/notes/1000-instances-of-nixpkgs
  77. packages =
  78. {
  79. default = (pkgs.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp;
  80. }
  81. // lib.optionalAttrs pkgs.stdenv.isLinux {
  82. opencl = config.packages.default.override { useOpenCL = true; };
  83. cuda = (pkgsCuda.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp;
  84. rocm = (pkgsRocm.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp;
  85. mpi-cpu = config.packages.default.override { useMpi = true; };
  86. mpi-cuda = config.packages.default.override { useMpi = true; };
  87. };
  88. };
  89. };
  90. }