server-llama2-13B.sh 790 B

1234567891011121314151617181920212223242526
  1. #!/bin/bash
  2. set -e
  3. cd "$(dirname "$0")/.." || exit
  4. # Specify the model you want to use here:
  5. MODEL="${MODEL:-./models/llama-2-13b-chat.ggmlv3.q5_K_M.bin}"
  6. PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat-system.txt}
  7. # Adjust to the number of CPU cores you want to use.
  8. N_THREAD="${N_THREAD:-12}"
  9. # Note: you can also override the generation options by specifying them on the command line:
  10. GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 4096 --batch-size 1024}"
  11. # shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
  12. ./llama-server $GEN_OPTIONS \
  13. --model "$MODEL" \
  14. --threads "$N_THREAD" \
  15. --rope-freq-scale 1.0 \
  16. "$@"
  17. # I used this to test the model with mps, but omitted it from the general purpose. If you want to use it, just specify it on the command line.
  18. # -ngl 1 \