Makefile 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. # Define the default target now so that it is always the first target
  2. BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch simple
  3. ifdef LLAMA_BUILD_SERVER
  4. BUILD_TARGETS += server
  5. LLAMA_SERVER_VERBOSE ?= 1
  6. server: private CXXFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
  7. endif
  8. default: $(BUILD_TARGETS)
  9. ifndef UNAME_S
  10. UNAME_S := $(shell uname -s)
  11. endif
  12. ifndef UNAME_P
  13. UNAME_P := $(shell uname -p)
  14. endif
  15. ifndef UNAME_M
  16. UNAME_M := $(shell uname -m)
  17. endif
  18. CCV := $(shell $(CC) --version | head -n 1)
  19. CXXV := $(shell $(CXX) --version | head -n 1)
  20. # Mac OS + Arm can report x86_64
  21. # ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
  22. ifeq ($(UNAME_S),Darwin)
  23. ifneq ($(UNAME_P),arm)
  24. SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
  25. ifeq ($(SYSCTL_M),1)
  26. # UNAME_P := arm
  27. # UNAME_M := arm64
  28. warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
  29. endif
  30. endif
  31. endif
  32. #
  33. # Compile flags
  34. #
  35. # keep standard at C11 and C++11
  36. # -Ofast tends to produce faster code, but may not be available for some compilers.
  37. #OPT = -Ofast
  38. OPT = -O3
  39. CFLAGS = -I. $(OPT) -std=c11 -fPIC
  40. CXXFLAGS = -I. -I./examples $(OPT) -std=c++11 -fPIC
  41. LDFLAGS =
  42. ifdef LLAMA_DEBUG
  43. CFLAGS += -O0 -g
  44. CXXFLAGS += -O0 -g
  45. LDFLAGS += -g
  46. else
  47. CFLAGS += -DNDEBUG
  48. CXXFLAGS += -DNDEBUG
  49. endif
  50. # warnings
  51. CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith
  52. CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
  53. # OS specific
  54. # TODO: support Windows
  55. ifeq ($(UNAME_S),Linux)
  56. CFLAGS += -pthread
  57. CXXFLAGS += -pthread
  58. endif
  59. ifeq ($(UNAME_S),Darwin)
  60. CFLAGS += -pthread
  61. CXXFLAGS += -pthread
  62. endif
  63. ifeq ($(UNAME_S),FreeBSD)
  64. CFLAGS += -pthread
  65. CXXFLAGS += -pthread
  66. endif
  67. ifeq ($(UNAME_S),NetBSD)
  68. CFLAGS += -pthread
  69. CXXFLAGS += -pthread
  70. endif
  71. ifeq ($(UNAME_S),OpenBSD)
  72. CFLAGS += -pthread
  73. CXXFLAGS += -pthread
  74. endif
  75. ifeq ($(UNAME_S),Haiku)
  76. CFLAGS += -pthread
  77. CXXFLAGS += -pthread
  78. endif
  79. ifdef LLAMA_GPROF
  80. CFLAGS += -pg
  81. CXXFLAGS += -pg
  82. endif
  83. ifdef LLAMA_PERF
  84. CFLAGS += -DGGML_PERF
  85. CXXFLAGS += -DGGML_PERF
  86. endif
  87. # Architecture specific
  88. # TODO: probably these flags need to be tweaked on some architectures
  89. # feel free to update the Makefile for your architecture and send a pull request or issue
  90. ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
  91. # Use all CPU extensions that are available:
  92. CFLAGS += -march=native -mtune=native
  93. CXXFLAGS += -march=native -mtune=native
  94. # Usage AVX-only
  95. #CFLAGS += -mfma -mf16c -mavx
  96. #CXXFLAGS += -mfma -mf16c -mavx
  97. # Usage SSSE3-only (Not is SSE3!)
  98. #CFLAGS += -mssse3
  99. #CXXFLAGS += -mssse3
  100. endif
  101. ifneq ($(filter ppc64%,$(UNAME_M)),)
  102. POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
  103. ifneq (,$(findstring POWER9,$(POWER9_M)))
  104. CFLAGS += -mcpu=power9
  105. CXXFLAGS += -mcpu=power9
  106. endif
  107. # Require c++23's std::byteswap for big-endian support.
  108. ifeq ($(UNAME_M),ppc64)
  109. CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
  110. endif
  111. endif
  112. ifndef LLAMA_NO_K_QUANTS
  113. CFLAGS += -DGGML_USE_K_QUANTS
  114. CXXFLAGS += -DGGML_USE_K_QUANTS
  115. OBJS += k_quants.o
  116. endif
  117. ifndef LLAMA_NO_ACCELERATE
  118. # Mac M1 - include Accelerate framework.
  119. # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
  120. ifeq ($(UNAME_S),Darwin)
  121. CFLAGS += -DGGML_USE_ACCELERATE
  122. LDFLAGS += -framework Accelerate
  123. endif
  124. endif # LLAMA_NO_ACCELERATE
  125. ifdef LLAMA_OPENBLAS
  126. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas -I/usr/include/openblas
  127. ifneq ($(shell grep -e "Arch Linux" -e "ID_LIKE=arch" /etc/os-release 2>/dev/null),)
  128. LDFLAGS += -lopenblas -lcblas
  129. else
  130. LDFLAGS += -lopenblas
  131. endif
  132. endif # LLAMA_OPENBLAS
  133. ifdef LLAMA_BLIS
  134. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
  135. LDFLAGS += -lblis -L/usr/local/lib
  136. endif # LLAMA_BLIS
  137. ifdef LLAMA_CUBLAS
  138. CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  139. CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  140. LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
  141. OBJS += ggml-cuda.o
  142. NVCC = nvcc
  143. NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native
  144. ifdef LLAMA_CUDA_DMMV_X
  145. NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
  146. else
  147. NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
  148. endif # LLAMA_CUDA_DMMV_X
  149. ifdef LLAMA_CUDA_DMMV_Y
  150. NVCCFLAGS += -DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
  151. else
  152. NVCCFLAGS += -DGGML_CUDA_DMMV_Y=1
  153. endif # LLAMA_CUDA_DMMV_Y
  154. ifdef LLAMA_CUDA_KQUANTS_ITER
  155. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
  156. else
  157. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=2
  158. endif
  159. ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
  160. $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
  161. endif # LLAMA_CUBLAS
  162. ifdef LLAMA_CLBLAST
  163. CFLAGS += -DGGML_USE_CLBLAST
  164. CXXFLAGS += -DGGML_USE_CLBLAST
  165. # Mac provides OpenCL as a framework
  166. ifeq ($(UNAME_S),Darwin)
  167. LDFLAGS += -lclblast -framework OpenCL
  168. else
  169. LDFLAGS += -lclblast -lOpenCL
  170. endif
  171. OBJS += ggml-opencl.o
  172. ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
  173. $(CXX) $(CXXFLAGS) -c $< -o $@
  174. endif # LLAMA_CLBLAST
  175. ifdef LLAMA_METAL
  176. CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
  177. CXXFLAGS += -DGGML_USE_METAL
  178. LDFLAGS += -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
  179. OBJS += ggml-metal.o
  180. ggml-metal.o: ggml-metal.m ggml-metal.h
  181. $(CC) $(CFLAGS) -c $< -o $@
  182. endif # LLAMA_METAL
  183. ifneq ($(filter aarch64%,$(UNAME_M)),)
  184. # Apple M1, M2, etc.
  185. # Raspberry Pi 3, 4, Zero 2 (64-bit)
  186. CFLAGS += -mcpu=native
  187. CXXFLAGS += -mcpu=native
  188. endif
  189. ifneq ($(filter armv6%,$(UNAME_M)),)
  190. # Raspberry Pi 1, Zero
  191. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
  192. endif
  193. ifneq ($(filter armv7%,$(UNAME_M)),)
  194. # Raspberry Pi 2
  195. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
  196. endif
  197. ifneq ($(filter armv8%,$(UNAME_M)),)
  198. # Raspberry Pi 3, 4, Zero 2 (32-bit)
  199. CFLAGS += -mfp16-format=ieee -mno-unaligned-access
  200. endif
  201. ifdef LLAMA_NO_K_QUANTS
  202. k_quants.o: k_quants.c k_quants.h
  203. $(CC) $(CFLAGS) -c $< -o $@
  204. endif # LLAMA_NO_K_QUANTS
  205. #
  206. # Print build information
  207. #
  208. $(info I llama.cpp build info: )
  209. $(info I UNAME_S: $(UNAME_S))
  210. $(info I UNAME_P: $(UNAME_P))
  211. $(info I UNAME_M: $(UNAME_M))
  212. $(info I CFLAGS: $(CFLAGS))
  213. $(info I CXXFLAGS: $(CXXFLAGS))
  214. $(info I LDFLAGS: $(LDFLAGS))
  215. $(info I CC: $(CCV))
  216. $(info I CXX: $(CXXV))
  217. $(info )
  218. #
  219. # Build library
  220. #
  221. ggml.o: ggml.c ggml.h ggml-cuda.h
  222. $(CC) $(CFLAGS) -c $< -o $@
  223. llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama-util.h
  224. $(CXX) $(CXXFLAGS) -c $< -o $@
  225. common.o: examples/common.cpp examples/common.h
  226. $(CXX) $(CXXFLAGS) -c $< -o $@
  227. libllama.so: llama.o ggml.o $(OBJS)
  228. $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
  229. clean:
  230. rm -vf *.o *.so main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server vdot train-text-from-scratch build-info.h
  231. #
  232. # Examples
  233. #
  234. main: examples/main/main.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  235. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  236. @echo
  237. @echo '==== Run ./main -h for help. ===='
  238. @echo
  239. simple: examples/simple/simple.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  240. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  241. @echo
  242. @echo '==== Run ./simple -h for help. ===='
  243. @echo
  244. quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS)
  245. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  246. quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS)
  247. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  248. perplexity: examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  249. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  250. embedding: examples/embedding/embedding.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  251. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  252. save-load-state: examples/save-load-state/save-load-state.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  253. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  254. server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp build-info.h ggml.o llama.o common.o $(OBJS)
  255. $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS)
  256. train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS)
  257. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  258. build-info.h: $(wildcard .git/index) scripts/build-info.sh
  259. @sh scripts/build-info.sh > $@.tmp
  260. @if ! cmp -s $@.tmp $@; then \
  261. mv $@.tmp $@; \
  262. else \
  263. rm $@.tmp; \
  264. fi
  265. #
  266. # Tests
  267. #
  268. benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS)
  269. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  270. ./$@
  271. vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
  272. $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
  273. .PHONY: tests clean
  274. tests:
  275. bash ./tests/run-tests.sh