run-llama2.sh 897 B

123456789101112131415161718192021222324252627
  1. #!/bin/bash
  2. # MIT license
  3. # Copyright (C) 2024 Intel Corporation
  4. # SPDX-License-Identifier: MIT
  5. source /opt/intel/oneapi/setvars.sh
  6. #export GGML_SYCL_DEBUG=1
  7. #ZES_ENABLE_SYSMAN=1, Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory. Recommended to use when --split-mode = layer.
  8. INPUT_PROMPT="Building a website can be done in 10 simple steps:\nStep 1:"
  9. MODEL_FILE=models/llama-2-7b.Q4_0.gguf
  10. NGL=33
  11. CONEXT=8192
  12. if [ $# -gt 0 ]; then
  13. GGML_SYCL_DEVICE=$1
  14. echo "use $GGML_SYCL_DEVICE as main GPU"
  15. #use signle GPU only
  16. ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONEXT} -mg $GGML_SYCL_DEVICE -sm none
  17. else
  18. #use multiple GPUs with same max compute units
  19. ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONEXT}
  20. fi