Просмотр исходного кода

tests : minor bash stuff (#6902)

* tests : minor bash stuff

ggml-ci

* llama : fix build

ggml-ci

* tests : fix CUR_DIR -> ROOT_DIR

ggml-ci

* tests : fix fname

ggml-ci
Georgi Gerganov 1 год назад
Родитель
Сommit
aa750c1ede
5 измененных файлов с 23 добавлено и 22 удалено
  1. 2 0
      ci/run.sh
  2. 8 8
      examples/gguf-split/tests.sh
  3. 8 8
      examples/quantize/tests.sh
  4. 3 4
      examples/server/tests/tests.sh
  5. 2 2
      llama.cpp

+ 2 - 0
ci/run.sh

@@ -161,6 +161,7 @@ function gg_run_test_scripts_debug {
     set -e
     set -e
 
 
     (cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
     (cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
+    (cd ./examples/quantize   && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
 
 
     set +e
     set +e
 }
 }
@@ -184,6 +185,7 @@ function gg_run_test_scripts_release {
     set -e
     set -e
 
 
     (cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
     (cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
+    (cd ./examples/quantize   && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
 
 
     set +e
     set +e
 }
 }

+ 8 - 8
examples/gguf-split/tests.sh

@@ -4,16 +4,16 @@ set -eu
 
 
 if [ $# -lt 1 ]
 if [ $# -lt 1 ]
 then
 then
-  echo "usage:   $0 path_to_build_binary [path_to_temp_folder]"
-  echo "example: $0 ../../build/bin ../../tmp"
-  exit 1
+    echo "usage:   $0 path_to_build_binary [path_to_temp_folder]"
+    echo "example: $0 ../../build/bin ../../tmp"
+    exit 1
 fi
 fi
 
 
 if [ $# -gt 1 ]
 if [ $# -gt 1 ]
 then
 then
-  TMP_DIR=$2
+    TMP_DIR=$2
 else
 else
-  TMP_DIR=/tmp
+    TMP_DIR=/tmp
 fi
 fi
 
 
 set -x
 set -x
@@ -21,7 +21,7 @@ set -x
 SPLIT=$1/gguf-split
 SPLIT=$1/gguf-split
 MAIN=$1/main
 MAIN=$1/main
 WORK_PATH=$TMP_DIR/gguf-split
 WORK_PATH=$TMP_DIR/gguf-split
-CUR_DIR=$(pwd)
+ROOT_DIR=$(realpath $(dirname $0)/../../)
 
 
 mkdir -p "$WORK_PATH"
 mkdir -p "$WORK_PATH"
 
 
@@ -30,8 +30,8 @@ rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-merge*.gguf
 
 
 # 1. Get a model
 # 1. Get a model
 (
 (
-  cd $WORK_PATH
-  "$CUR_DIR"/../../scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
+cd $WORK_PATH
+"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
 )
 )
 echo PASS
 echo PASS
 
 

+ 8 - 8
examples/quantize/test.sh → examples/quantize/tests.sh

@@ -4,16 +4,16 @@ set -eu
 
 
 if [ $# -lt 1 ]
 if [ $# -lt 1 ]
 then
 then
-  echo "usage:   $0 path_to_build_binary [path_to_temp_folder]"
-  echo "example: $0 ../../build/bin ../../tmp"
-  exit 1
+    echo "usage:   $0 path_to_build_binary [path_to_temp_folder]"
+    echo "example: $0 ../../build/bin ../../tmp"
+    exit 1
 fi
 fi
 
 
 if [ $# -gt 1 ]
 if [ $# -gt 1 ]
 then
 then
-  TMP_DIR=$2
+    TMP_DIR=$2
 else
 else
-  TMP_DIR=/tmp
+    TMP_DIR=/tmp
 fi
 fi
 
 
 set -x
 set -x
@@ -22,7 +22,7 @@ SPLIT=$1/gguf-split
 QUANTIZE=$1/quantize
 QUANTIZE=$1/quantize
 MAIN=$1/main
 MAIN=$1/main
 WORK_PATH=$TMP_DIR/quantize
 WORK_PATH=$TMP_DIR/quantize
-CUR_DIR=$(pwd)
+ROOT_DIR=$(realpath $(dirname $0)/../../)
 
 
 mkdir -p "$WORK_PATH"
 mkdir -p "$WORK_PATH"
 
 
@@ -31,8 +31,8 @@ rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-requant*.gguf
 
 
 # 1. Get a model
 # 1. Get a model
 (
 (
-  cd $WORK_PATH
-  "$CUR_DIR"/../../scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
+cd $WORK_PATH
+"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
 )
 )
 echo PASS
 echo PASS
 
 

+ 3 - 4
examples/server/tests/tests.sh

@@ -4,9 +4,8 @@ set -eu
 
 
 if [ $# -lt 1 ]
 if [ $# -lt 1 ]
 then
 then
-  # Start @llama.cpp scenario
-  behave --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp
+    # Start @llama.cpp scenario
+    behave --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp
 else
 else
-  behave "$@"
+    behave "$@"
 fi
 fi
-

+ 2 - 2
llama.cpp

@@ -14574,7 +14574,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
             fout.close();
             fout.close();
         }
         }
     };
     };
-    auto new_ofstream = [&](int index = 0) {
+    auto new_ofstream = [&](int index) {
         cur_split = index;
         cur_split = index;
         GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
         GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
         std::string fname = fname_out;
         std::string fname = fname_out;
@@ -14592,7 +14592,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
     };
     };
 
 
     const auto tn = LLM_TN(model.arch);
     const auto tn = LLM_TN(model.arch);
-    new_ofstream();
+    new_ofstream(0);
     for (int i = 0; i < ml.n_tensors; ++i) {
     for (int i = 0; i < ml.n_tensors; ++i) {
         auto weight = ml.get_weight(i);
         auto weight = ml.get_weight(i);
         struct ggml_tensor * tensor = weight->tensor;
         struct ggml_tensor * tensor = weight->tensor;