Makefile 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. # Define the default target now so that it is always the first target
  2. BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf llama-bench
  3. # Binaries only useful for tests
  4. TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0
  5. default: $(BUILD_TARGETS)
  6. ifndef UNAME_S
  7. UNAME_S := $(shell uname -s)
  8. endif
  9. ifndef UNAME_P
  10. UNAME_P := $(shell uname -p)
  11. endif
  12. ifndef UNAME_M
  13. UNAME_M := $(shell uname -m)
  14. endif
  15. CCV := $(shell $(CC) --version | head -n 1)
  16. CXXV := $(shell $(CXX) --version | head -n 1)
  17. # Mac OS + Arm can report x86_64
  18. # ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
  19. ifeq ($(UNAME_S),Darwin)
  20. ifneq ($(UNAME_P),arm)
  21. SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
  22. ifeq ($(SYSCTL_M),1)
  23. # UNAME_P := arm
  24. # UNAME_M := arm64
  25. warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
  26. endif
  27. endif
  28. endif
  29. #
  30. # Compile flags
  31. #
  32. # keep standard at C11 and C++11
  33. # -Ofast tends to produce faster code, but may not be available for some compilers.
  34. ifdef LLAMA_FAST
  35. OPT = -Ofast
  36. else
  37. OPT = -O3
  38. endif
  39. CFLAGS = -I. $(OPT) -std=c11 -fPIC
  40. CXXFLAGS = -I. -I./common $(OPT) -std=c++11 -fPIC
  41. LDFLAGS =
  42. ifdef LLAMA_DEBUG
  43. CFLAGS += -O0 -g
  44. CXXFLAGS += -O0 -g
  45. LDFLAGS += -g
  46. else
  47. CFLAGS += -DNDEBUG
  48. CXXFLAGS += -DNDEBUG
  49. endif
  50. ifdef LLAMA_SERVER_VERBOSE
  51. CXXFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
  52. endif
  53. # warnings
  54. CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith \
  55. -Wmissing-prototypes
  56. CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
  57. # OS specific
  58. # TODO: support Windows
  59. ifeq ($(UNAME_S),Linux)
  60. CFLAGS += -pthread
  61. CXXFLAGS += -pthread
  62. endif
  63. ifeq ($(UNAME_S),Darwin)
  64. CFLAGS += -pthread
  65. CXXFLAGS += -pthread
  66. endif
  67. ifeq ($(UNAME_S),FreeBSD)
  68. CFLAGS += -pthread
  69. CXXFLAGS += -pthread
  70. endif
  71. ifeq ($(UNAME_S),NetBSD)
  72. CFLAGS += -pthread
  73. CXXFLAGS += -pthread
  74. endif
  75. ifeq ($(UNAME_S),OpenBSD)
  76. CFLAGS += -pthread
  77. CXXFLAGS += -pthread
  78. endif
  79. ifeq ($(UNAME_S),Haiku)
  80. CFLAGS += -pthread
  81. CXXFLAGS += -pthread
  82. endif
  83. # detect Windows
  84. ifneq ($(findstring _NT,$(UNAME_S)),)
  85. _WIN32 := 1
  86. endif
  87. # library name prefix
  88. ifneq ($(_WIN32),1)
  89. LIB_PRE := lib
  90. endif
  91. # Dynamic Shared Object extension
  92. ifneq ($(_WIN32),1)
  93. DSO_EXT := .so
  94. else
  95. DSO_EXT := .dll
  96. endif
  97. # Windows Sockets 2 (Winsock) for network-capable apps
  98. ifeq ($(_WIN32),1)
  99. LWINSOCK2 := -lws2_32
  100. endif
  101. ifdef LLAMA_GPROF
  102. CFLAGS += -pg
  103. CXXFLAGS += -pg
  104. endif
  105. ifdef LLAMA_PERF
  106. CFLAGS += -DGGML_PERF
  107. CXXFLAGS += -DGGML_PERF
  108. endif
  109. # Architecture specific
  110. # TODO: probably these flags need to be tweaked on some architectures
  111. # feel free to update the Makefile for your architecture and send a pull request or issue
  112. ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
  113. # Use all CPU extensions that are available:
  114. CFLAGS += -march=native -mtune=native
  115. CXXFLAGS += -march=native -mtune=native
  116. # Usage AVX-only
  117. #CFLAGS += -mfma -mf16c -mavx
  118. #CXXFLAGS += -mfma -mf16c -mavx
  119. # Usage SSSE3-only (Not is SSE3!)
  120. #CFLAGS += -mssse3
  121. #CXXFLAGS += -mssse3
  122. endif
  123. ifneq ($(filter aarch64%,$(UNAME_M)),)
  124. # Apple M1, M2, etc.
  125. # Raspberry Pi 3, 4, Zero 2 (64-bit)
  126. CFLAGS += -mcpu=native
  127. CXXFLAGS += -mcpu=native
  128. endif
  129. ifneq ($(filter armv6%,$(UNAME_M)),)
  130. # Raspberry Pi 1, Zero
  131. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
  132. endif
  133. ifneq ($(filter armv7%,$(UNAME_M)),)
  134. # Raspberry Pi 2
  135. CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
  136. endif
  137. ifneq ($(filter armv8%,$(UNAME_M)),)
  138. # Raspberry Pi 3, 4, Zero 2 (32-bit)
  139. CFLAGS += -mfp16-format=ieee -mno-unaligned-access
  140. endif
  141. ifneq ($(filter ppc64%,$(UNAME_M)),)
  142. POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
  143. ifneq (,$(findstring POWER9,$(POWER9_M)))
  144. CFLAGS += -mcpu=power9
  145. CXXFLAGS += -mcpu=power9
  146. endif
  147. # Require c++23's std::byteswap for big-endian support.
  148. ifeq ($(UNAME_M),ppc64)
  149. CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
  150. endif
  151. endif
  152. ifndef LLAMA_NO_K_QUANTS
  153. CFLAGS += -DGGML_USE_K_QUANTS
  154. CXXFLAGS += -DGGML_USE_K_QUANTS
  155. OBJS += k_quants.o
  156. ifdef LLAMA_QKK_64
  157. CFLAGS += -DGGML_QKK_64
  158. CXXFLAGS += -DGGML_QKK_64
  159. endif
  160. endif
  161. ifndef LLAMA_NO_ACCELERATE
  162. # Mac M1 - include Accelerate framework.
  163. # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
  164. ifeq ($(UNAME_S),Darwin)
  165. CFLAGS += -DGGML_USE_ACCELERATE
  166. LDFLAGS += -framework Accelerate
  167. endif
  168. endif # LLAMA_NO_ACCELERATE
  169. ifdef LLAMA_MPI
  170. CFLAGS += -DGGML_USE_MPI -Wno-cast-qual
  171. CXXFLAGS += -DGGML_USE_MPI -Wno-cast-qual
  172. OBJS += ggml-mpi.o
  173. endif # LLAMA_MPI
  174. ifdef LLAMA_OPENBLAS
  175. CFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags openblas)
  176. LDFLAGS += $(shell pkg-config --libs openblas)
  177. endif # LLAMA_OPENBLAS
  178. ifdef LLAMA_BLIS
  179. CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
  180. LDFLAGS += -lblis -L/usr/local/lib
  181. endif # LLAMA_BLIS
  182. ifdef LLAMA_CUBLAS
  183. CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  184. CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
  185. LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
  186. OBJS += ggml-cuda.o
  187. NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
  188. ifdef LLAMA_CUDA_NVCC
  189. NVCC = $(LLAMA_CUDA_NVCC)
  190. else
  191. NVCC = nvcc
  192. endif #LLAMA_CUDA_NVCC
  193. ifdef CUDA_DOCKER_ARCH
  194. NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
  195. else
  196. NVCCFLAGS += -arch=native
  197. endif # CUDA_DOCKER_ARCH
  198. ifdef LLAMA_CUDA_FORCE_DMMV
  199. NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
  200. endif # LLAMA_CUDA_FORCE_DMMV
  201. ifdef LLAMA_CUDA_DMMV_X
  202. NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
  203. else
  204. NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
  205. endif # LLAMA_CUDA_DMMV_X
  206. ifdef LLAMA_CUDA_MMV_Y
  207. NVCCFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
  208. else ifdef LLAMA_CUDA_DMMV_Y
  209. NVCCFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_DMMV_Y) # for backwards compatibility
  210. else
  211. NVCCFLAGS += -DGGML_CUDA_MMV_Y=1
  212. endif # LLAMA_CUDA_MMV_Y
  213. ifdef LLAMA_CUDA_F16
  214. NVCCFLAGS += -DGGML_CUDA_F16
  215. endif # LLAMA_CUDA_F16
  216. ifdef LLAMA_CUDA_DMMV_F16
  217. NVCCFLAGS += -DGGML_CUDA_F16
  218. endif # LLAMA_CUDA_DMMV_F16
  219. ifdef LLAMA_CUDA_KQUANTS_ITER
  220. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
  221. else
  222. NVCCFLAGS += -DK_QUANTS_PER_ITERATION=2
  223. endif
  224. #ifdef LLAMA_CUDA_CUBLAS
  225. # NVCCFLAGS += -DGGML_CUDA_CUBLAS
  226. #endif # LLAMA_CUDA_CUBLAS
  227. ifdef LLAMA_CUDA_CCBIN
  228. NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
  229. endif
  230. ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
  231. $(NVCC) $(NVCCFLAGS) $(subst -Ofast,-O3,$(CXXFLAGS)) -Wno-pedantic -c $< -o $@
  232. endif # LLAMA_CUBLAS
  233. ifdef LLAMA_CLBLAST
  234. CFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags clblast OpenCL)
  235. CXXFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags clblast OpenCL)
  236. # Mac provides OpenCL as a framework
  237. ifeq ($(UNAME_S),Darwin)
  238. LDFLAGS += -lclblast -framework OpenCL
  239. else
  240. LDFLAGS += $(shell pkg-config --libs clblast OpenCL)
  241. endif
  242. OBJS += ggml-opencl.o
  243. ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
  244. $(CXX) $(CXXFLAGS) -c $< -o $@
  245. endif # LLAMA_CLBLAST
  246. ifdef LLAMA_HIPBLAS
  247. ROCM_PATH ?= /opt/rocm
  248. HIPCC ?= $(ROCM_PATH)/bin/hipcc
  249. GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch)
  250. LLAMA_CUDA_DMMV_X ?= 32
  251. LLAMA_CUDA_MMV_Y ?= 1
  252. LLAMA_CUDA_KQUANTS_ITER ?= 2
  253. CFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
  254. CXXFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
  255. LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
  256. LDFLAGS += -lhipblas -lamdhip64 -lrocblas
  257. HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
  258. HIPFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
  259. HIPFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
  260. HIPFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
  261. HIPFLAGS += -DCC_TURING=1000000000
  262. ifdef LLAMA_CUDA_FORCE_DMMV
  263. HIPFLAGS += -DGGML_CUDA_FORCE_DMMV
  264. endif # LLAMA_CUDA_FORCE_DMMV
  265. OBJS += ggml-cuda.o
  266. ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
  267. $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
  268. endif # LLAMA_HIPBLAS
  269. ifdef LLAMA_METAL
  270. CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
  271. CXXFLAGS += -DGGML_USE_METAL
  272. LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
  273. OBJS += ggml-metal.o
  274. endif # LLAMA_METAL
  275. ifdef LLAMA_METAL
  276. ggml-metal.o: ggml-metal.m ggml-metal.h
  277. $(CC) $(CFLAGS) -c $< -o $@
  278. endif # LLAMA_METAL
  279. ifdef LLAMA_MPI
  280. ggml-mpi.o: ggml-mpi.c ggml-mpi.h
  281. $(CC) $(CFLAGS) -c $< -o $@
  282. endif # LLAMA_MPI
  283. ifdef LLAMA_NO_K_QUANTS
  284. k_quants.o: k_quants.c k_quants.h
  285. $(CC) $(CFLAGS) -c $< -o $@
  286. endif # LLAMA_NO_K_QUANTS
  287. #
  288. # Print build information
  289. #
  290. $(info I llama.cpp build info: )
  291. $(info I UNAME_S: $(UNAME_S))
  292. $(info I UNAME_P: $(UNAME_P))
  293. $(info I UNAME_M: $(UNAME_M))
  294. $(info I CFLAGS: $(CFLAGS))
  295. $(info I CXXFLAGS: $(CXXFLAGS))
  296. $(info I LDFLAGS: $(LDFLAGS))
  297. $(info I CC: $(CCV))
  298. $(info I CXX: $(CXXV))
  299. $(info )
  300. #
  301. # Build library
  302. #
  303. ggml.o: ggml.c ggml.h ggml-cuda.h
  304. $(CC) $(CFLAGS) -c $< -o $@
  305. ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
  306. $(CC) $(CFLAGS) -c $< -o $@
  307. OBJS += ggml-alloc.o
  308. llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h
  309. $(CXX) $(CXXFLAGS) -c $< -o $@
  310. common.o: common/common.cpp common/common.h
  311. $(CXX) $(CXXFLAGS) -c $< -o $@
  312. console.o: common/console.cpp common/console.h
  313. $(CXX) $(CXXFLAGS) -c $< -o $@
  314. grammar-parser.o: common/grammar-parser.cpp common/grammar-parser.h
  315. $(CXX) $(CXXFLAGS) -c $< -o $@
  316. libllama.so: llama.o ggml.o $(OBJS)
  317. $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
  318. clean:
  319. rm -vf *.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch convert-llama2c-to-ggml embd-input-test gguf llama-bench build-info.h $(TEST_TARGETS)
  320. #
  321. # Examples
  322. #
  323. main: examples/main/main.cpp build-info.h ggml.o llama.o common.o console.o grammar-parser.o $(OBJS)
  324. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  325. @echo
  326. @echo '==== Run ./main -h for help. ===='
  327. @echo
  328. simple: examples/simple/simple.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  329. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  330. quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS)
  331. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  332. quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS)
  333. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  334. perplexity: examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  335. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  336. embedding: examples/embedding/embedding.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  337. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  338. save-load-state: examples/save-load-state/save-load-state.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  339. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  340. server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp build-info.h ggml.o llama.o common.o grammar-parser.o $(OBJS)
  341. $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS) $(LWINSOCK2)
  342. $(LIB_PRE)embdinput$(DSO_EXT): examples/embd-input/embd-input.h examples/embd-input/embd-input-lib.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  343. $(CXX) --shared $(CXXFLAGS) $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS)
  344. embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  345. $(CXX) $(CXXFLAGS) $(filter-out %$(DSO_EXT),$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput
  346. gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS)
  347. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  348. train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  349. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  350. convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp build-info.h ggml.o llama.o $(OBJS)
  351. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  352. llama-bench: examples/llama-bench/llama-bench.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  353. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  354. build-info.h: $(wildcard .git/index) scripts/build-info.sh
  355. @sh scripts/build-info.sh > $@.tmp
  356. @if ! cmp -s $@.tmp $@; then \
  357. mv $@.tmp $@; \
  358. else \
  359. rm $@.tmp; \
  360. fi
  361. #
  362. # Tests
  363. #
  364. tests: $(TEST_TARGETS)
  365. benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS)
  366. $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
  367. ./$@
  368. vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
  369. $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
  370. tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  371. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  372. tests/test-grammar-parser: tests/test-grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  373. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  374. tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  375. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  376. tests/test-grad0: tests/test-grad0.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  377. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  378. tests/test-opt: tests/test-opt.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  379. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  380. tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  381. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  382. tests/test-quantize-perf: tests/test-quantize-perf.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  383. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  384. tests/test-sampling: tests/test-sampling.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  385. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
  386. tests/test-tokenizer-0: tests/test-tokenizer-0.cpp build-info.h ggml.o llama.o common.o $(OBJS)
  387. $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)