|
|
@@ -44,6 +44,7 @@ int32_t get_num_physical_cores();
|
|
|
|
|
|
struct gpt_params {
|
|
|
uint32_t seed = -1; // RNG seed
|
|
|
+
|
|
|
int32_t n_threads = get_num_physical_cores();
|
|
|
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
|
|
|
int32_t n_predict = -1; // new tokens to predict
|
|
|
@@ -54,6 +55,8 @@ struct gpt_params {
|
|
|
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
|
|
int32_t n_parallel = 1; // number of parallel sequences to decode
|
|
|
int32_t n_sequences = 1; // number of sequences to decode
|
|
|
+ float p_accept = 0.5f; // speculative decoding accept probability
|
|
|
+ float p_split = 0.1f; // speculative decoding split probability
|
|
|
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
|
|
|
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
|
|
|
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
|
|
@@ -66,7 +69,8 @@ struct gpt_params {
|
|
|
float yarn_beta_fast = 32.0f; // YaRN low correction dim
|
|
|
float yarn_beta_slow = 1.0f; // YaRN high correction dim
|
|
|
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
|
|
- int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
|
|
|
+ int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // TODO: better to be int32_t for alignment
|
|
|
+ // pinging @cebtenzzre
|
|
|
|
|
|
// // sampling parameters
|
|
|
struct llama_sampling_params sparams;
|
|
|
@@ -90,7 +94,7 @@ struct gpt_params {
|
|
|
int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
|
|
|
// (which is more convenient to use for plotting)
|
|
|
//
|
|
|
- bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
|
|
|
+ bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
|
|
|
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
|
|
|
|
|
|
bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS
|