1
0

run-llama3.sh 1.2 KB

12345678910111213141516171819202122232425262728293031
  1. #!/usr/bin/env bash
  2. # MIT license
  3. # Copyright (C) 2025 Intel Corporation
  4. # SPDX-License-Identifier: MIT
  5. # If you want more control, DPC++ Allows selecting a specific device through the
  6. # following environment variable
  7. export ONEAPI_DEVICE_SELECTOR="level_zero:0"
  8. source /opt/intel/oneapi/setvars.sh
  9. #export GGML_SYCL_DEBUG=1
  10. #ZES_ENABLE_SYSMAN=1, Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory. Recommended to use when --split-mode = layer.
  11. INPUT_PROMPT="Building a website can be done in 10 simple steps:\nStep 1:"
  12. MODEL_FILE=models/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf
  13. NGL=99 # Layers offloaded to the GPU. If the device runs out of memory, reduce this value according to the model you are using.
  14. CONTEXT=4096
  15. #support malloc device memory more than 4GB.
  16. export UR_L0_ENABLE_RELAXED_ALLOCATION_LIMITS=1
  17. if [ $# -gt 0 ]; then
  18. GGML_SYCL_DEVICE=$1
  19. echo "Using $GGML_SYCL_DEVICE as the main GPU"
  20. ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT} -mg $GGML_SYCL_DEVICE -sm none
  21. else
  22. #use multiple GPUs with same max compute units
  23. ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT}
  24. fi