Makefile 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. # Define the default target now so that it is always the first target
  2. BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch simple
  3. ifdef LLAMA_BUILD_SERVER
  4. BUILD_TARGETS += server
  5. LLAMA_SERVER_VERBOSE ?= 1
  6. server: private CXXFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
  7. endif
  8. default: $(BUILD_TARGETS)
  9. ifndef UNAME_S
  10. UNAME_S := $(shell uname -s)
  11. endif
  12. ifndef UNAME_P
  13. UNAME_P := $(shell uname -p)
  14. endif
  15. ifndef UNAME_M
  16. UNAME_M := $(shell uname -m)
  17. endif
  18. CCV := $(shell $(CC) --version | head -n 1)
  19. CXXV := $(shell $(CXX) --version | head -n 1)
  20. # Mac OS + Arm can report x86_64
  21. # ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
  22. ifeq ($(UNAME_S),Darwin)
  23. ifneq ($(UNAME_P),arm)
  24. SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
  25. ifeq ($(SYSCTL_M),1)
  26. # UNAME_P := arm
  27. # UNAME_M := arm64
  28. warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
  29. endif
  30. endif
  31. endif
  32. #
  33. # Compile flags
  34. #
  35. # keep standard at C11 and C++11
  36. # -Ofast tends to produce faster code, but may not be available for some compilers.
  37. #OPT = -Ofast
  38. OPT = -O3
  39. CFLAGS = -I. $(OPT) -std=c11 -fPIC
  40. CXXFLAGS = -I. -I./examples $(OPT) -std=c++11 -fPIC
  41. LDFLAGS =
  42. ifdef LLAMA_DEBUG
  43. CFLAGS += -O0 -g
  44. CXXFLAGS += -O0 -g
  45. LDFLAGS += -g
  46. else
  47. CFLAGS += -DNDEBUG
  48. CXXFLAGS += -DNDEBUG
  49. endif
  50. # warnings
  51. CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith
  52. CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
  53. # OS specific
  54. # TODO: support Windows
  55. ifeq ($(UNAME_S),Linux)
  56. CFLAGS += -pthread
  57. CXXFLAGS += -pthread
  58. endif
  59. ifeq ($(UNAME_S),Darwin)
  60. CFLAGS += -pthread
  61. CXXFLAGS += -pthread
  62. endif
  63. ifeq ($(UNAME_S),FreeBSD)
  64. CFLAGS += -pthread
  65. CXXFLAGS += -pthread
  66. endif
  67. ifeq ($(UNAME_S),NetBSD)
  68. CFLAGS += -pthread
  69. CXXFLAGS += -pthread
  70. endif
  71. ifeq ($(UNAME_S),OpenBSD)
  72. CFLAGS += -pthread
  73. CXXFLAGS += -pthread
  74. endif
  75. ifeq ($(UNAME_S),Haiku)
  76. CFLAGS += -pthread
  77. CXXFLAGS += -pthread
  78. endif
  79. ifdef LLAMA_GPROF
  80. CFLAGS += -pg
  81. CXXFLAGS += -pg
  82. endif
  83. ifdef LLAMA_PERF
  84. CFLAGS += -DGGML_PERF
  85. CXXFLAGS += -DGGML_PERF
  86. endif
  87. # Architecture specific
  88. # TODO: probably these flags need to be tweaked on some architectures
  89. # feel free to update the Makefile for your architecture and send a pull request or issue
  90. ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
  91. # Use all CPU extensions that are available:
  92. CFLAGS += -march=native -mtune=native
  93. CXXFLAGS += -march=native -mtune=native
  94. # Usage AVX-only
  95. #CFLAGS += -mfma -mf16c -mavx
  96. #CXXFLAGS += -mfma -mf16c -mavx
  97. # Usage SSSE3-only (Not is SSE3!)
  98. #CFLAGS += -mssse3
  99. #CXXFLAGS += -mssse3
  100. endif
  101. ifneq ($(filter ppc64%,$(UNAME_M)),)
  102. POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
  103. ifneq (,$(findstring POWER9,$(POWER9_M)))
  104. CFLAGS += -mcpu=power9
  105. CXXFLAGS += -mcpu=power9
  106. endif
  107. # Require c++23's std::byteswap for big-endian support.
  108. ifeq ($(UNAME_M),ppc64)
  109. CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
  110. endif
  111. endif
  112. ifndef LLAMA_NO_K_QUANTS
  113. CFLAGS += -DGGML_USE_K_QUANTS
  114. CXXFLAGS += -DGGML_USE_K_QUANTS
  115. OBJS += k_quants.o
  116. endif
  117. ifndef LLAMA_NO_ACCELERATE
  118. # Mac M1 - include Accelerate framework.
  119. # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
  120. ifeq ($(UNAME_S),Darwin)
  121. CFLAGS += -DGGML_USE_ACCELERATE
  122. LDFLAGS += -framework Accelerate
  123. endif
  124. endif # LLAMA_NO_ACCELERATE
  125. ifdef LLAMA_OPENBLAS
  126. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas -I/usr/include/openblas
  127. LDFLAGS += -lopenblas
  128. endif # LLAMA_OPENBLAS
  129. ifdef LLAMA_BLIS
  130. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
  131. LDFLAGS += -lblis -L/usr/local/lib
  132. endif # LLAMA_BLIS
  133. ifdef LLAMA_CUBLAS
  134. CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  135. CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  136. LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
  137. OBJS += ggml-cuda.o
  138. NVCC = nvcc
  139. NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native
  140. ifdef LLAMA_CUDA_DMMV_X
  141. NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
  142. else
  143. NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
  144. endif # LLAMA_CUDA_DMMV_X
  145. ifdef LLAMA_CUDA_DMMV_Y
  146. NVCCFLAGS += -DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
  147. else
  148. NVCCFLAGS += -DGGML_CUDA_DMMV_Y=1
  149. endif # LLAMA_CUDA_DMMV_Y
  150. ifdef LLAMA_CUDA_KQUANTS_ITER
  151. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
  152. else
  153. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=2
  154. endif
  155. ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
  156. $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
  157. endif # LLAMA_CUBLAS
  158. ifdef LLAMA_CLBLAST
  159. CFLAGS += -DGGML_USE_CLBLAST
  160. CXXFLAGS += -DGGML_USE_CLBLAST
  161. # Mac provides OpenCL as a framework
  162. ifeq ($(UNAME_S),Darwin)
  163. LDFLAGS += -lclblast -framework OpenCL
  164. else
  165. LDFLAGS += -lclblast -lOpenCL
  166. endif
  167. OBJS += ggml-opencl.o
  168. ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
  169. $(CXX) $(CXXFLAGS) -c $< -o $@
  170. endif # LLAMA_CLBLAST
  171. ifdef LLAMA_METAL
  172. CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
  173. CXXFLAGS += -DGGML_USE_METAL
  174. LDFLAGS += -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
  175. OBJS += ggml-metal.o
  176. ggml-metal.o: ggml-metal.m ggml-metal.h
  177. $(CC) $(CFLAGS) -c $< -o $@
  178. endif # LLAMA_METAL
  179. ifneq ($(filter aarch64%,$(UNAME_M)),)
  180. # Apple M1, M2, etc.
  181. # Raspberry Pi 3, 4, Zero 2 (64-bit)
  182. CFLAGS += -mcpu=native
  183. CXXFLAGS += -mcpu=native
  184. endif
  185. ifneq ($(filter armv6%,$(UNAME_M)),)
  186. # Raspberry Pi 1, Zero
  187. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
  188. endif
  189. ifneq ($(filter armv7%,$(UNAME_M)),)
  190. # Raspberry Pi 2
  191. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
  192. endif
  193. ifneq ($(filter armv8%,$(UNAME_M)),)
  194. # Raspberry Pi 3, 4, Zero 2 (32-bit)
  195. CFLAGS += -mfp16-format=ieee -mno-unaligned-access
  196. endif
  197. ifdef LLAMA_NO_K_QUANTS
  198. k_quants.o: k_quants.c k_quants.h
  199. $(CC) $(CFLAGS) -c $< -o $@
  200. endif # LLAMA_NO_K_QUANTS
  201. #
  202. # Print build information
  203. #
  204. $(info I llama.cpp build info: )
  205. $(info I UNAME_S: $(UNAME_S))
  206. $(info I UNAME_P: $(UNAME_P))
  207. $(info I UNAME_M: $(UNAME_M))
  208. $(info I CFLAGS: $(CFLAGS))
  209. $(info I CXXFLAGS: $(CXXFLAGS))
  210. $(info I LDFLAGS: $(LDFLAGS))
  211. $(info I CC: $(CCV))
  212. $(info I CXX: $(CXXV))
  213. $(info )
  214. #
  215. # Build library
  216. #
  217. ggml.o: ggml.c ggml.h ggml-cuda.h
  218. $(CC) $(CFLAGS) -c $< -o $@
  219. llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama-util.h
  220. $(CXX) $(CXXFLAGS) -c $< -o $@
  221. common.o: examples/common.cpp examples/common.h
  222. $(CXX) $(CXXFLAGS) -c $< -o $@
  223. libllama.so: llama.o ggml.o $(OBJS)
  224. $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
  225. clean:
  226. rm -vf *.o *.so main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server vdot train-text-from-scratch build-info.h
  227. #
  228. # Examples
  229. #
  230. main: examples/main/main.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  231. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  232. @echo
  233. @echo '==== Run ./main -h for help. ===='
  234. @echo
  235. simple: examples/simple/simple.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  236. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  237. @echo
  238. @echo '==== Run ./simple -h for help. ===='
  239. @echo
  240. quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS)
  241. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  242. quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS)
  243. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  244. perplexity: examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  245. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  246. embedding: examples/embedding/embedding.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  247. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  248. save-load-state: examples/save-load-state/save-load-state.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  249. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  250. server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp build-info.h ggml.o llama.o common.o $(OBJS)
  251. $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS)
  252. train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS)
  253. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  254. build-info.h: $(wildcard .git/index) scripts/build-info.sh
  255. @sh scripts/build-info.sh > $@.tmp
  256. @if ! cmp -s $@.tmp $@; then \
  257. mv $@.tmp $@; \
  258. else \
  259. rm $@.tmp; \
  260. fi
  261. #
  262. # Tests
  263. #
  264. benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS)
  265. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  266. ./$@
  267. vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
  268. $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
  269. .PHONY: tests clean
  270. tests:
  271. bash ./tests/run-tests.sh