chat-vicuna.sh 1.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. #!/usr/bin/env bash
  2. set -e
  3. cd "$(dirname "$0")/.." || exit
  4. MODEL="${MODEL:-./models/ggml-vic13b-uncensored-q5_0.bin}"
  5. PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt}
  6. USER_NAME="### Human"
  7. AI_NAME="### Assistant"
  8. # Adjust to the number of CPU cores you want to use.
  9. N_THREAD="${N_THREAD:-8}"
  10. # Number of tokens to predict (made it larger than default because we want a long interaction)
  11. N_PREDICTS="${N_PREDICTS:-2048}"
  12. # Note: you can also override the generation options by specifying them on the command line:
  13. # For example, override the context size by doing: ./chatLLaMa --ctx_size 1024
  14. GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}"
  15. DATE_TIME=$(date +%H:%M)
  16. DATE_YEAR=$(date +%Y)
  17. PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt)
  18. sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \
  19. -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \
  20. -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \
  21. -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \
  22. $PROMPT_TEMPLATE > $PROMPT_FILE
  23. # shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
  24. ./bin/llama-cli $GEN_OPTIONS \
  25. --model "$MODEL" \
  26. --threads "$N_THREAD" \
  27. --n_predict "$N_PREDICTS" \
  28. --color --interactive \
  29. --file ${PROMPT_FILE} \
  30. --reverse-prompt "### Human:" \
  31. --in-prefix ' ' \
  32. "$@"