|
@@ -267,9 +267,9 @@ extern "C" {
|
|
|
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
|
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
|
|
|
|
|
|
|
// main_gpu interpretation depends on split_mode:
|
|
// main_gpu interpretation depends on split_mode:
|
|
|
- // LLAMA_SPLIT_NONE: the GPU that is used for the entire model
|
|
|
|
|
- // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
|
|
|
|
|
- // LLAMA_SPLIT_LAYER: ignored
|
|
|
|
|
|
|
+ // LLAMA_SPLIT_MODE_NONE: the GPU that is used for the entire model
|
|
|
|
|
+ // LLAMA_SPLIT_MODE_ROW: the GPU that is used for small tensors and intermediate results
|
|
|
|
|
+ // LLAMA_SPLIT_MODE_LAYER: ignored
|
|
|
int32_t main_gpu;
|
|
int32_t main_gpu;
|
|
|
|
|
|
|
|
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
|
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|