Makefile 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. MAKEFLAGS += --no-print-directory
  2. define validate_model_path
  3. @if [ -z "$(MODEL_PATH)" ]; then \
  4. echo "Error: MODEL_PATH must be provided either as:"; \
  5. echo " 1. Environment variable: export MODEL_PATH=/path/to/model"; \
  6. echo " 2. Command line argument: make $(1) MODEL_PATH=/path/to/model"; \
  7. exit 1; \
  8. fi
  9. endef
  10. define validate_embedding_model_path
  11. @if [ -z "$(EMBEDDING_MODEL_PATH)" ]; then \
  12. echo "Error: EMBEDDING_MODEL_PATH must be provided either as:"; \
  13. echo " 1. Environment variable: export EMBEDDING_MODEL_PATH=/path/to/model"; \
  14. echo " 2. Command line argument: make $(1) EMBEDDING_MODEL_PATH=/path/to/model"; \
  15. exit 1; \
  16. fi
  17. endef
  18. define quantize_model
  19. @CONVERTED_MODEL="$(1)" QUANTIZED_TYPE="$(QUANTIZED_TYPE)" \
  20. TOKEN_EMBD_TYPE="$(TOKEN_EMBD_TYPE)" OUTPUT_TYPE="$(OUTPUT_TYPE)" \
  21. ./scripts/utils/quantize.sh "$(1)" "$(QUANTIZED_TYPE)" "$(TOKEN_EMBD_TYPE)" "$(OUTPUT_TYPE)"
  22. @echo "Export the quantized model path to $(2) variable in your environment"
  23. endef
  24. ###
  25. ### Casual Model targets/recipes
  26. ###
  27. causal-convert-model-bf16: OUTTYPE=bf16
  28. causal-convert-model-bf16: causal-convert-model
  29. causal-convert-model:
  30. $(call validate_model_path,causal-convert-model)
  31. @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(OUTTYPE)" MODEL_PATH="$(MODEL_PATH)" \
  32. METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
  33. ./scripts/causal/convert-model.sh
  34. causal-convert-mm-model-bf16: OUTTYPE=bf16
  35. causal-convert-mm-model-bf16: MM_OUTTYPE=f16
  36. causal-convert-mm-model-bf16: causal-convert-mm-model
  37. causal-convert-mm-model:
  38. $(call validate_model_path,causal-convert-mm-model)
  39. @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(OUTTYPE)" MODEL_PATH="$(MODEL_PATH)" \
  40. METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
  41. ./scripts/causal/convert-model.sh
  42. @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(MM_OUTTYPE)" MODEL_PATH="$(MODEL_PATH)" \
  43. METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
  44. ./scripts/causal/convert-model.sh --mmproj
  45. causal-run-original-model:
  46. $(call validate_model_path,causal-run-original-model)
  47. @MODEL_PATH="$(MODEL_PATH)" ./scripts/causal/run-org-model.py
  48. causal-run-converted-model:
  49. @CONVERTED_MODEL="$(CONVERTED_MODEL)" ./scripts/causal/run-converted-model.sh
  50. causal-verify-logits: causal-run-original-model causal-run-converted-model
  51. @./scripts/causal/compare-logits.py
  52. @MODEL_PATH="$(MODEL_PATH)" ./scripts/utils/check-nmse.py -m ${MODEL_PATH}
  53. causal-run-original-embeddings:
  54. @./scripts/causal/run-casual-gen-embeddings-org.py
  55. causal-run-converted-embeddings:
  56. @./scripts/causal/run-converted-model-embeddings-logits.sh
  57. causal-verify-embeddings: causal-run-original-embeddings causal-run-converted-embeddings
  58. @./scripts/causal/compare-embeddings-logits.sh
  59. causal-inspect-original-model:
  60. @./scripts/utils/inspect-org-model.py
  61. causal-inspect-converted-model:
  62. @./scripts/utils/inspect-converted-model.sh
  63. causal-start-embedding-server:
  64. @./scripts/utils/run-embedding-server.sh ${CONVERTED_MODEL}
  65. causal-curl-embedding-endpoint: causal-run-original-embeddings
  66. @./scripts/utils/curl-embedding-server.sh | ./scripts/causal/compare-embeddings-logits.sh
  67. causal-quantize-Q8_0: QUANTIZED_TYPE = Q8_0
  68. causal-quantize-Q8_0: causal-quantize-model
  69. causal-quantize-Q4_0: QUANTIZED_TYPE = Q4_0
  70. causal-quantize-Q4_0: causal-quantize-model
  71. # For Quantization Aware Trained (QAT) models in Q4_0 we explicitly set the
  72. # token embedding and output types to Q8_0 instead of the default Q6_K.
  73. causal-quantize-qat-Q4_0: QUANTIZED_TYPE = Q4_0
  74. causal-quantize-qat-Q4_0: TOKEN_EMBD_TYPE = Q8_0
  75. causal-quantize-qat-Q4_0: OUTPUT_TYPE = Q8_0
  76. causal-quantize-qat-Q4_0: causal-quantize-model
  77. causal-quantize-model:
  78. $(call quantize_model,$(CONVERTED_MODEL),QUANTIZED_MODEL)
  79. causal-run-quantized-model:
  80. @QUANTIZED_MODEL="$(QUANTIZED_MODEL)" ./scripts/causal/run-converted-model.sh ${QUANTIZED_MODEL}
  81. ###
  82. ### Embedding Model targets/recipes
  83. ###
  84. embedding-convert-model-bf16: OUTTYPE=bf16
  85. embedding-convert-model-bf16: embedding-convert-model
  86. embedding-convert-model:
  87. $(call validate_embedding_model_path,embedding-convert-model)
  88. @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(OUTTYPE)" MODEL_PATH="$(EMBEDDING_MODEL_PATH)" \
  89. METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
  90. ./scripts/embedding/convert-model.sh
  91. embedding-run-original-model:
  92. $(call validate_embedding_model_path,embedding-run-original-model)
  93. @EMBEDDING_MODEL_PATH="$(EMBEDDING_MODEL_PATH)" \
  94. ./scripts/embedding/run-original-model.py \
  95. $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)")
  96. embedding-run-converted-model:
  97. @./scripts/embedding/run-converted-model.sh $(CONVERTED_EMBEDDING_MODEL) \
  98. $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)")
  99. embedding-verify-logits: embedding-run-original-model embedding-run-converted-model
  100. @./scripts/embedding/compare-embeddings-logits.sh \
  101. $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)")
  102. embedding-inspect-original-model:
  103. $(call validate_embedding_model_path,embedding-inspect-original-model)
  104. @EMBEDDING_MODEL_PATH="$(EMBEDDING_MODEL_PATH)" ./scripts/utils/inspect-org-model.py -m ${EMBEDDING_MODEL_PATH}
  105. embedding-inspect-converted-model:
  106. @CONVERTED_EMBEDDING_MODEL="$(CONVERTED_EMBEDDING_MODEL)" ./scripts/utils/inspect-converted-model.sh ${CONVERTED_EMBEDDING_MODEL}
  107. embedding-start-embedding-server:
  108. @./scripts/utils/run-embedding-server.sh ${CONVERTED_EMBEDDING_MODEL}
  109. embedding-curl-embedding-endpoint:
  110. @./scripts/utils/curl-embedding-server.sh | ./scripts/embedding/compare-embeddings-logits.sh
  111. embedding-quantize-Q8_0: QUANTIZED_TYPE = Q8_0
  112. embedding-quantize-Q8_0: embedding-quantize-model
  113. embedding-quantize-Q4_0: QUANTIZED_TYPE = Q4_0
  114. embedding-quantize-Q4_0: embedding-quantize-model
  115. # For Quantization Aware Trained (QAT) models in Q4_0 we explicitly set the
  116. # token embedding and output types to Q8_0 instead of the default Q6_K.
  117. embedding-quantize-qat-Q4_0: QUANTIZED_TYPE = Q4_0
  118. embedding-quantize-qat-Q4_0: TOKEN_EMBD_TYPE = Q8_0
  119. embedding-quantize-qat-Q4_0: OUTPUT_TYPE = Q8_0
  120. embedding-quantize-qat-Q4_0: embedding-quantize-model
  121. embedding-quantize-model:
  122. $(call quantize_model,$(CONVERTED_EMBEDDING_MODEL),QUANTIZED_EMBEDDING_MODEL)
  123. embedding-run-quantized-model:
  124. @./scripts/embedding/run-converted-model.sh $(QUANTIZED_EMBEDDING_MODEL) \
  125. $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)")
  126. ###
  127. ### Perplexity targets/recipes
  128. ###
  129. perplexity-data-gen:
  130. CONVERTED_MODEL="$(CONVERTED_MODEL)" ./scripts/utils/perplexity-gen.sh
  131. perplexity-run-full:
  132. QUANTIZED_MODEL="$(QUANTIZED_MODEL)" LOOGITS_FILE="$(LOGITS_FILE)" \
  133. ./scripts/utils/perplexity-run.sh
  134. perplexity-run:
  135. QUANTIZED_MODEL="$(QUANTIZED_MODEL)" ./scripts/utils/perplexity-run-simple.sh
  136. ###
  137. ### HuggingFace targets/recipes
  138. ###
  139. hf-create-model:
  140. @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}"
  141. hf-create-model-dry-run:
  142. @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -d
  143. hf-create-model-embedding:
  144. @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -e
  145. hf-create-model-embedding-dry-run:
  146. @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -e -d
  147. hf-create-model-private:
  148. @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -p
  149. hf-upload-gguf-to-model:
  150. @./scripts/utils/hf-upload-gguf-model.py -m "${MODEL_PATH}" -r "${REPO_ID}" -o "${NAME_IN_REPO}"
  151. hf-create-collection:
  152. @./scripts/utils/hf-create-collection.py -n "${NAME}" -d "${DESCRIPTION}" -ns "${NAMESPACE}"
  153. hf-add-model-to-collection:
  154. @./scripts/utils/hf-add-model-to-collection.py -c "${COLLECTION}" -m "${MODEL}"
  155. .PHONY: clean
  156. clean:
  157. @${RM} -rf data .converted_embedding_model.txt .converted_model.txt .embedding_model_name.txt .model_name.txt