Makefile 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. # Define the default target now so that it is always the first target
  2. BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot
  3. ifdef LLAMA_BUILD_SERVER
  4. BUILD_TARGETS += server
  5. endif
  6. default: $(BUILD_TARGETS)
  7. ifndef UNAME_S
  8. UNAME_S := $(shell uname -s)
  9. endif
  10. ifndef UNAME_P
  11. UNAME_P := $(shell uname -p)
  12. endif
  13. ifndef UNAME_M
  14. UNAME_M := $(shell uname -m)
  15. endif
  16. CCV := $(shell $(CC) --version | head -n 1)
  17. CXXV := $(shell $(CXX) --version | head -n 1)
  18. # Mac OS + Arm can report x86_64
  19. # ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
  20. ifeq ($(UNAME_S),Darwin)
  21. ifneq ($(UNAME_P),arm)
  22. SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
  23. ifeq ($(SYSCTL_M),1)
  24. # UNAME_P := arm
  25. # UNAME_M := arm64
  26. warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
  27. endif
  28. endif
  29. endif
  30. #
  31. # Compile flags
  32. #
  33. # keep standard at C11 and C++11
  34. CFLAGS = -I. -O3 -std=c11 -fPIC
  35. CXXFLAGS = -I. -I./examples -O3 -std=c++11 -fPIC
  36. LDFLAGS =
  37. ifdef LLAMA_DEBUG
  38. CFLAGS += -O0 -g
  39. CXXFLAGS += -O0 -g
  40. LDFLAGS += -g
  41. else
  42. CFLAGS += -DNDEBUG
  43. CXXFLAGS += -DNDEBUG
  44. endif
  45. # warnings
  46. CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith
  47. CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
  48. # OS specific
  49. # TODO: support Windows
  50. ifeq ($(UNAME_S),Linux)
  51. CFLAGS += -pthread
  52. CXXFLAGS += -pthread
  53. endif
  54. ifeq ($(UNAME_S),Darwin)
  55. CFLAGS += -pthread
  56. CXXFLAGS += -pthread
  57. endif
  58. ifeq ($(UNAME_S),FreeBSD)
  59. CFLAGS += -pthread
  60. CXXFLAGS += -pthread
  61. endif
  62. ifeq ($(UNAME_S),NetBSD)
  63. CFLAGS += -pthread
  64. CXXFLAGS += -pthread
  65. endif
  66. ifeq ($(UNAME_S),OpenBSD)
  67. CFLAGS += -pthread
  68. CXXFLAGS += -pthread
  69. endif
  70. ifeq ($(UNAME_S),Haiku)
  71. CFLAGS += -pthread
  72. CXXFLAGS += -pthread
  73. endif
  74. ifdef LLAMA_GPROF
  75. CFLAGS += -pg
  76. CXXFLAGS += -pg
  77. endif
  78. ifdef LLAMA_PERF
  79. CFLAGS += -DGGML_PERF
  80. CXXFLAGS += -DGGML_PERF
  81. endif
  82. # Architecture specific
  83. # TODO: probably these flags need to be tweaked on some architectures
  84. # feel free to update the Makefile for your architecture and send a pull request or issue
  85. ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
  86. # Use all CPU extensions that are available:
  87. CFLAGS += -march=native -mtune=native
  88. CXXFLAGS += -march=native -mtune=native
  89. # Usage AVX-only
  90. #CFLAGS += -mfma -mf16c -mavx
  91. #CXXFLAGS += -mfma -mf16c -mavx
  92. endif
  93. ifneq ($(filter ppc64%,$(UNAME_M)),)
  94. POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
  95. ifneq (,$(findstring POWER9,$(POWER9_M)))
  96. CFLAGS += -mcpu=power9
  97. CXXFLAGS += -mcpu=power9
  98. endif
  99. # Require c++23's std::byteswap for big-endian support.
  100. ifeq ($(UNAME_M),ppc64)
  101. CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
  102. endif
  103. endif
  104. ifndef LLAMA_NO_ACCELERATE
  105. # Mac M1 - include Accelerate framework.
  106. # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
  107. ifeq ($(UNAME_S),Darwin)
  108. CFLAGS += -DGGML_USE_ACCELERATE
  109. LDFLAGS += -framework Accelerate
  110. endif
  111. endif
  112. ifdef LLAMA_OPENBLAS
  113. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas -I/usr/include/openblas
  114. ifneq ($(shell grep -e "Arch Linux" -e "ID_LIKE=arch" /etc/os-release 2>/dev/null),)
  115. LDFLAGS += -lopenblas -lcblas
  116. else
  117. LDFLAGS += -lopenblas
  118. endif
  119. endif
  120. ifdef LLAMA_BLIS
  121. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
  122. LDFLAGS += -lblis -L/usr/local/lib
  123. endif
  124. ifdef LLAMA_CUBLAS
  125. CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  126. CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  127. LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
  128. OBJS += ggml-cuda.o
  129. NVCC = nvcc
  130. NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native
  131. ifdef LLAMA_CUDA_DMMV_X
  132. NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
  133. else
  134. NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
  135. endif # LLAMA_CUDA_DMMV_X
  136. ifdef LLAMA_CUDA_DMMV_Y
  137. NVCCFLAGS += -DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
  138. else
  139. NVCCFLAGS += -DGGML_CUDA_DMMV_Y=1
  140. endif # LLAMA_CUDA_DMMV_Y
  141. ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
  142. $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
  143. endif # LLAMA_CUBLAS
  144. ifdef LLAMA_CLBLAST
  145. CFLAGS += -DGGML_USE_CLBLAST
  146. CXXFLAGS += -DGGML_USE_CLBLAST
  147. # Mac provides OpenCL as a framework
  148. ifeq ($(UNAME_S),Darwin)
  149. LDFLAGS += -lclblast -framework OpenCL
  150. else
  151. LDFLAGS += -lclblast -lOpenCL
  152. endif
  153. OBJS += ggml-opencl.o
  154. ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
  155. $(CXX) $(CXXFLAGS) -c $< -o $@
  156. endif
  157. ifneq ($(filter aarch64%,$(UNAME_M)),)
  158. # Apple M1, M2, etc.
  159. # Raspberry Pi 3, 4, Zero 2 (64-bit)
  160. CFLAGS += -mcpu=native
  161. CXXFLAGS += -mcpu=native
  162. endif
  163. ifneq ($(filter armv6%,$(UNAME_M)),)
  164. # Raspberry Pi 1, Zero
  165. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
  166. endif
  167. ifneq ($(filter armv7%,$(UNAME_M)),)
  168. # Raspberry Pi 2
  169. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
  170. endif
  171. ifneq ($(filter armv8%,$(UNAME_M)),)
  172. # Raspberry Pi 3, 4, Zero 2 (32-bit)
  173. CFLAGS += -mfp16-format=ieee -mno-unaligned-access
  174. endif
  175. #
  176. # Print build information
  177. #
  178. $(info I llama.cpp build info: )
  179. $(info I UNAME_S: $(UNAME_S))
  180. $(info I UNAME_P: $(UNAME_P))
  181. $(info I UNAME_M: $(UNAME_M))
  182. $(info I CFLAGS: $(CFLAGS))
  183. $(info I CXXFLAGS: $(CXXFLAGS))
  184. $(info I LDFLAGS: $(LDFLAGS))
  185. $(info I CC: $(CCV))
  186. $(info I CXX: $(CXXV))
  187. $(info )
  188. #
  189. # Build library
  190. #
  191. ggml.o: ggml.c ggml.h ggml-cuda.h
  192. $(CC) $(CFLAGS) -c $< -o $@
  193. llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama-util.h
  194. $(CXX) $(CXXFLAGS) -c $< -o $@
  195. common.o: examples/common.cpp examples/common.h
  196. $(CXX) $(CXXFLAGS) -c $< -o $@
  197. libllama.so: llama.o ggml.o $(OBJS)
  198. $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
  199. clean:
  200. rm -vf *.o main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server vdot build-info.h
  201. #
  202. # Examples
  203. #
  204. main: examples/main/main.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  205. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  206. @echo
  207. @echo '==== Run ./main -h for help. ===='
  208. @echo
  209. quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS)
  210. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  211. quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS)
  212. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  213. perplexity: examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  214. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  215. embedding: examples/embedding/embedding.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  216. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  217. save-load-state: examples/save-load-state/save-load-state.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  218. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  219. server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp build-info.h ggml.o llama.o common.o $(OBJS)
  220. $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS)
  221. build-info.h: $(wildcard .git/index) scripts/build-info.sh
  222. @sh scripts/build-info.sh > $@.tmp
  223. @if ! cmp -s $@.tmp $@; then \
  224. mv $@.tmp $@; \
  225. else \
  226. rm $@.tmp; \
  227. fi
  228. #
  229. # Tests
  230. #
  231. benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS)
  232. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  233. ./$@
  234. vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
  235. $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
  236. .PHONY: tests clean
  237. tests:
  238. bash ./tests/run-tests.sh