Makefile 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. # Define the default target now so that it is always the first target
  2. BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch simple server libembdinput.so embd-input-test
  3. default: $(BUILD_TARGETS)
  4. ifndef UNAME_S
  5. UNAME_S := $(shell uname -s)
  6. endif
  7. ifndef UNAME_P
  8. UNAME_P := $(shell uname -p)
  9. endif
  10. ifndef UNAME_M
  11. UNAME_M := $(shell uname -m)
  12. endif
  13. CCV := $(shell $(CC) --version | head -n 1)
  14. CXXV := $(shell $(CXX) --version | head -n 1)
  15. # Mac OS + Arm can report x86_64
  16. # ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
  17. ifeq ($(UNAME_S),Darwin)
  18. ifneq ($(UNAME_P),arm)
  19. SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
  20. ifeq ($(SYSCTL_M),1)
  21. # UNAME_P := arm
  22. # UNAME_M := arm64
  23. warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
  24. endif
  25. endif
  26. endif
  27. #
  28. # Compile flags
  29. #
  30. # keep standard at C11 and C++11
  31. # -Ofast tends to produce faster code, but may not be available for some compilers.
  32. ifdef LLAMA_FAST
  33. OPT = -Ofast
  34. else
  35. OPT = -O3
  36. endif
  37. CFLAGS = -I. $(OPT) -std=c11 -fPIC
  38. CXXFLAGS = -I. -I./examples $(OPT) -std=c++11 -fPIC
  39. LDFLAGS =
  40. ifdef LLAMA_DEBUG
  41. CFLAGS += -O0 -g
  42. CXXFLAGS += -O0 -g
  43. LDFLAGS += -g
  44. else
  45. CFLAGS += -DNDEBUG
  46. CXXFLAGS += -DNDEBUG
  47. endif
  48. ifdef LLAMA_SERVER_VERBOSE
  49. CXXFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
  50. endif
  51. # warnings
  52. CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith
  53. CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
  54. # OS specific
  55. # TODO: support Windows
  56. ifeq ($(UNAME_S),Linux)
  57. CFLAGS += -pthread
  58. CXXFLAGS += -pthread
  59. endif
  60. ifeq ($(UNAME_S),Darwin)
  61. CFLAGS += -pthread
  62. CXXFLAGS += -pthread
  63. endif
  64. ifeq ($(UNAME_S),FreeBSD)
  65. CFLAGS += -pthread
  66. CXXFLAGS += -pthread
  67. endif
  68. ifeq ($(UNAME_S),NetBSD)
  69. CFLAGS += -pthread
  70. CXXFLAGS += -pthread
  71. endif
  72. ifeq ($(UNAME_S),OpenBSD)
  73. CFLAGS += -pthread
  74. CXXFLAGS += -pthread
  75. endif
  76. ifeq ($(UNAME_S),Haiku)
  77. CFLAGS += -pthread
  78. CXXFLAGS += -pthread
  79. endif
  80. ifdef LLAMA_GPROF
  81. CFLAGS += -pg
  82. CXXFLAGS += -pg
  83. endif
  84. ifdef LLAMA_PERF
  85. CFLAGS += -DGGML_PERF
  86. CXXFLAGS += -DGGML_PERF
  87. endif
  88. # Architecture specific
  89. # TODO: probably these flags need to be tweaked on some architectures
  90. # feel free to update the Makefile for your architecture and send a pull request or issue
  91. ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
  92. # Use all CPU extensions that are available:
  93. CFLAGS += -march=native -mtune=native
  94. CXXFLAGS += -march=native -mtune=native
  95. # Usage AVX-only
  96. #CFLAGS += -mfma -mf16c -mavx
  97. #CXXFLAGS += -mfma -mf16c -mavx
  98. # Usage SSSE3-only (Not is SSE3!)
  99. #CFLAGS += -mssse3
  100. #CXXFLAGS += -mssse3
  101. endif
  102. ifneq ($(filter ppc64%,$(UNAME_M)),)
  103. POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
  104. ifneq (,$(findstring POWER9,$(POWER9_M)))
  105. CFLAGS += -mcpu=power9
  106. CXXFLAGS += -mcpu=power9
  107. endif
  108. # Require c++23's std::byteswap for big-endian support.
  109. ifeq ($(UNAME_M),ppc64)
  110. CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
  111. endif
  112. endif
  113. ifndef LLAMA_NO_K_QUANTS
  114. CFLAGS += -DGGML_USE_K_QUANTS
  115. CXXFLAGS += -DGGML_USE_K_QUANTS
  116. OBJS += k_quants.o
  117. ifdef LLAMA_QKK_64
  118. CFLAGS += -DGGML_QKK_64
  119. CXXFLAGS += -DGGML_QKK_64
  120. endif
  121. endif
  122. ifndef LLAMA_NO_ACCELERATE
  123. # Mac M1 - include Accelerate framework.
  124. # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
  125. ifeq ($(UNAME_S),Darwin)
  126. CFLAGS += -DGGML_USE_ACCELERATE
  127. LDFLAGS += -framework Accelerate
  128. endif
  129. endif # LLAMA_NO_ACCELERATE
  130. ifdef LLAMA_MPI
  131. CFLAGS += -DGGML_USE_MPI -Wno-cast-qual
  132. CXXFLAGS += -DGGML_USE_MPI -Wno-cast-qual
  133. OBJS += ggml-mpi.o
  134. endif # LLAMA_MPI
  135. ifdef LLAMA_OPENBLAS
  136. CFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags openblas)
  137. LDFLAGS += $(shell pkg-config --libs openblas)
  138. endif # LLAMA_OPENBLAS
  139. ifdef LLAMA_BLIS
  140. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
  141. LDFLAGS += -lblis -L/usr/local/lib
  142. endif # LLAMA_BLIS
  143. ifdef LLAMA_CUBLAS
  144. CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  145. CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  146. LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
  147. OBJS += ggml-cuda.o
  148. NVCC = nvcc
  149. NVCCFLAGS = --forward-unknown-to-host-compiler
  150. ifdef CUDA_DOCKER_ARCH
  151. NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
  152. else
  153. NVCCFLAGS += -arch=native
  154. endif # CUDA_DOCKER_ARCH
  155. ifdef LLAMA_CUDA_FORCE_DMMV
  156. NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
  157. endif # LLAMA_CUDA_FORCE_DMMV
  158. ifdef LLAMA_CUDA_DMMV_X
  159. NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
  160. else
  161. NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
  162. endif # LLAMA_CUDA_DMMV_X
  163. ifdef LLAMA_CUDA_MMV_Y
  164. NVCCFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
  165. else ifdef LLAMA_CUDA_DMMV_Y
  166. NVCCFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_DMMV_Y) # for backwards compatibility
  167. else
  168. NVCCFLAGS += -DGGML_CUDA_MMV_Y=1
  169. endif # LLAMA_CUDA_MMV_Y
  170. ifdef LLAMA_CUDA_DMMV_F16
  171. NVCCFLAGS += -DGGML_CUDA_DMMV_F16
  172. endif # LLAMA_CUDA_DMMV_F16
  173. ifdef LLAMA_CUDA_KQUANTS_ITER
  174. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
  175. else
  176. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=2
  177. endif
  178. ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
  179. $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
  180. endif # LLAMA_CUBLAS
  181. ifdef LLAMA_CLBLAST
  182. CFLAGS += -DGGML_USE_CLBLAST
  183. CXXFLAGS += -DGGML_USE_CLBLAST
  184. # Mac provides OpenCL as a framework
  185. ifeq ($(UNAME_S),Darwin)
  186. LDFLAGS += -lclblast -framework OpenCL
  187. else
  188. LDFLAGS += -lclblast -lOpenCL
  189. endif
  190. OBJS += ggml-opencl.o
  191. ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
  192. $(CXX) $(CXXFLAGS) -c $< -o $@
  193. endif # LLAMA_CLBLAST
  194. ifdef LLAMA_METAL
  195. CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
  196. CXXFLAGS += -DGGML_USE_METAL
  197. LDFLAGS += -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
  198. OBJS += ggml-metal.o
  199. endif # LLAMA_METAL
  200. ifneq ($(filter aarch64%,$(UNAME_M)),)
  201. # Apple M1, M2, etc.
  202. # Raspberry Pi 3, 4, Zero 2 (64-bit)
  203. CFLAGS += -mcpu=native
  204. CXXFLAGS += -mcpu=native
  205. endif
  206. ifneq ($(filter armv6%,$(UNAME_M)),)
  207. # Raspberry Pi 1, Zero
  208. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
  209. endif
  210. ifneq ($(filter armv7%,$(UNAME_M)),)
  211. # Raspberry Pi 2
  212. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
  213. endif
  214. ifneq ($(filter armv8%,$(UNAME_M)),)
  215. # Raspberry Pi 3, 4, Zero 2 (32-bit)
  216. CFLAGS += -mfp16-format=ieee -mno-unaligned-access
  217. endif
  218. ifdef LLAMA_METAL
  219. ggml-metal.o: ggml-metal.m ggml-metal.h
  220. $(CC) $(CFLAGS) -c $< -o $@
  221. endif # LLAMA_METAL
  222. ifdef LLAMA_MPI
  223. ggml-mpi.o: ggml-mpi.c ggml-mpi.h
  224. $(CC) $(CFLAGS) -c $< -o $@
  225. endif # LLAMA_MPI
  226. ifdef LLAMA_NO_K_QUANTS
  227. k_quants.o: k_quants.c k_quants.h
  228. $(CC) $(CFLAGS) -c $< -o $@
  229. endif # LLAMA_NO_K_QUANTS
  230. #
  231. # Print build information
  232. #
  233. $(info I llama.cpp build info: )
  234. $(info I UNAME_S: $(UNAME_S))
  235. $(info I UNAME_P: $(UNAME_P))
  236. $(info I UNAME_M: $(UNAME_M))
  237. $(info I CFLAGS: $(CFLAGS))
  238. $(info I CXXFLAGS: $(CXXFLAGS))
  239. $(info I LDFLAGS: $(LDFLAGS))
  240. $(info I CC: $(CCV))
  241. $(info I CXX: $(CXXV))
  242. $(info )
  243. #
  244. # Build library
  245. #
  246. ggml.o: ggml.c ggml.h ggml-cuda.h
  247. $(CC) $(CFLAGS) -c $< -o $@
  248. llama.o: llama.cpp ggml.h ggml-cuda.h ggml-metal.h llama.h llama-util.h
  249. $(CXX) $(CXXFLAGS) -c $< -o $@
  250. common.o: examples/common.cpp examples/common.h
  251. $(CXX) $(CXXFLAGS) -c $< -o $@
  252. libllama.so: llama.o ggml.o $(OBJS)
  253. $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
  254. clean:
  255. rm -vf *.o *.so main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch embd-input-test build-info.h
  256. #
  257. # Examples
  258. #
  259. main: examples/main/main.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  260. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  261. @echo
  262. @echo '==== Run ./main -h for help. ===='
  263. @echo
  264. simple: examples/simple/simple.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  265. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  266. quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS)
  267. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  268. quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS)
  269. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  270. perplexity: examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  271. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  272. embedding: examples/embedding/embedding.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  273. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  274. save-load-state: examples/save-load-state/save-load-state.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  275. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  276. server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp build-info.h ggml.o llama.o common.o $(OBJS)
  277. $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS)
  278. libembdinput.so: examples/embd-input/embd-input.h examples/embd-input/embd-input-lib.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  279. $(CXX) --shared $(CXXFLAGS) $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS)
  280. embd-input-test: libembdinput.so examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  281. $(CXX) $(CXXFLAGS) $(filter-out %.so,$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput
  282. train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS)
  283. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  284. build-info.h: $(wildcard .git/index) scripts/build-info.sh
  285. @sh scripts/build-info.sh > $@.tmp
  286. @if ! cmp -s $@.tmp $@; then \
  287. mv $@.tmp $@; \
  288. else \
  289. rm $@.tmp; \
  290. fi
  291. #
  292. # Tests
  293. #
  294. benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS)
  295. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  296. ./$@
  297. vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
  298. $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
  299. .PHONY: tests clean
  300. tests:
  301. bash ./tests/run-tests.sh