|
|
@@ -6,6 +6,7 @@ CONVERTED_MODEL="${1:-"$CONVERTED_MODEL"}"
|
|
|
QUANTIZED_TYPE="${2:-"$QUANTIZED_TYPE"}"
|
|
|
TOKEN_EMBD_TYPE="${3:-"${TOKEN_EMBD_TYPE}"}"
|
|
|
OUTPUT_TYPE="${4:-"${OUTPUT_TYPE}"}"
|
|
|
+BUILD_DIR="${5:-"$BUILD_DIR"}"
|
|
|
QUANTIZED_MODEL=$CONVERTED_MODEL
|
|
|
|
|
|
# Final check if we have a model path
|
|
|
@@ -33,12 +34,16 @@ else
|
|
|
exit 1
|
|
|
fi
|
|
|
|
|
|
-cmake --build ../../build --target llama-quantize -j8
|
|
|
+if [ -z "$BUILD_DIR" ]; then
|
|
|
+ BUILD_DIR="../../build"
|
|
|
+fi
|
|
|
+
|
|
|
+cmake --build $BUILD_DIR --target llama-quantize -j8
|
|
|
|
|
|
echo $TOKEN_EMBD_TYPE
|
|
|
echo $OUTPUT_TYPE
|
|
|
|
|
|
-CMD_ARGS=("../../build/bin/llama-quantize")
|
|
|
+CMD_ARGS=("${BUILD_DIR}/bin/llama-quantize")
|
|
|
[[ -n "$TOKEN_EMBD_TYPE" ]] && CMD_ARGS+=("--token-embedding-type" "$TOKEN_EMBD_TYPE")
|
|
|
[[ -n "$OUTPUT_TYPE" ]] && CMD_ARGS+=("--output-tensor-type" "$OUTPUT_TYPE")
|
|
|
CMD_ARGS+=("$CONVERTED_MODEL" "$QUANTIZED_MODEL" "$QUANTIZED_TYPE")
|