|
|
@@ -2853,9 +2853,9 @@ static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
|
|
|
}
|
|
|
|
|
|
#ifdef USE_CUDA_GRAPH
|
|
|
-static bool check_node_graph_compatibility(ggml_cgraph * cgraph,
|
|
|
- bool use_cuda_graph) {
|
|
|
+static bool ggml_cuda_graph_check_compability(ggml_cgraph * cgraph) {
|
|
|
|
|
|
+ bool use_cuda_graph = true;
|
|
|
// Loop over nodes in GGML graph to obtain info needed for CUDA graph
|
|
|
|
|
|
const std::string gemma3n_per_layer_proj_src0_name = "inp_per_layer_selected";
|
|
|
@@ -2915,41 +2915,41 @@ static bool check_node_graph_compatibility(ggml_cgraph * cgraph,
|
|
|
return use_cuda_graph;
|
|
|
}
|
|
|
|
|
|
-static void set_ggml_graph_node_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) {
|
|
|
- graph_node_properties->node_address = node->data;
|
|
|
- graph_node_properties->node_op = node->op;
|
|
|
+static void ggml_cuda_graph_node_set_properties(ggml_cuda_graph_node_properties * props, ggml_tensor * node) {
|
|
|
+ props->node_address = node->data;
|
|
|
+ props->node_op = node->op;
|
|
|
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
|
|
- graph_node_properties->ne[i] = node->ne[i];
|
|
|
- graph_node_properties->nb[i] = node->nb[i];
|
|
|
+ props->ne[i] = node->ne[i];
|
|
|
+ props->nb[i] = node->nb[i];
|
|
|
}
|
|
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
|
|
- graph_node_properties->src_address[i] = node->src[i] ? node->src[i]->data : nullptr;
|
|
|
+ props->src_address[i] = node->src[i] ? node->src[i]->data : nullptr;
|
|
|
}
|
|
|
- memcpy(graph_node_properties->op_params, node->op_params, GGML_MAX_OP_PARAMS);
|
|
|
+ memcpy(props->op_params, node->op_params, GGML_MAX_OP_PARAMS);
|
|
|
}
|
|
|
|
|
|
-static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) {
|
|
|
- if (node->data != graph_node_properties->node_address &&
|
|
|
+static bool ggml_cuda_graph_node_properties_match(ggml_tensor * node, ggml_cuda_graph_node_properties * props) {
|
|
|
+ if (node->data != props->node_address &&
|
|
|
node->op != GGML_OP_VIEW) {
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- if (node->op != graph_node_properties->node_op) {
|
|
|
+ if (node->op != props->node_op) {
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
|
|
- if (node->ne[i] != graph_node_properties->ne[i]) {
|
|
|
+ if (node->ne[i] != props->ne[i]) {
|
|
|
return false;
|
|
|
}
|
|
|
- if (node->nb[i] != graph_node_properties->nb[i]) {
|
|
|
+ if (node->nb[i] != props->nb[i]) {
|
|
|
return false;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
|
|
if (node->src[i] &&
|
|
|
- node->src[i]->data != graph_node_properties->src_address[i] &&
|
|
|
+ node->src[i]->data != props->src_address[i] &&
|
|
|
node->op != GGML_OP_VIEW
|
|
|
) {
|
|
|
return false;
|
|
|
@@ -2957,56 +2957,55 @@ static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_gra
|
|
|
}
|
|
|
|
|
|
if ((node->op == GGML_OP_SCALE || node->op == GGML_OP_GLU) &&
|
|
|
- memcmp(graph_node_properties->op_params, node->op_params, GGML_MAX_OP_PARAMS) != 0) {
|
|
|
+ memcmp(props->op_params, node->op_params, GGML_MAX_OP_PARAMS) != 0) {
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static bool is_cuda_graph_update_required(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph) {
|
|
|
+static bool ggml_cuda_graph_update_required(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph) {
|
|
|
|
|
|
- bool cuda_graph_update_required = false;
|
|
|
+ bool res = false;
|
|
|
|
|
|
if (cuda_ctx->cuda_graph->instance == nullptr) {
|
|
|
- cuda_graph_update_required = true;
|
|
|
+ res = true;
|
|
|
}
|
|
|
|
|
|
// Check if the graph size has changed
|
|
|
- if (cuda_ctx->cuda_graph->ggml_graph_properties.size() != (size_t)cgraph->n_nodes + cgraph->n_leafs) {
|
|
|
- cuda_graph_update_required = true;
|
|
|
- cuda_ctx->cuda_graph->ggml_graph_properties.resize(cgraph->n_nodes + cgraph->n_leafs);
|
|
|
+ if (cuda_ctx->cuda_graph->props.size() != (size_t)cgraph->n_nodes + cgraph->n_leafs) {
|
|
|
+ res = true;
|
|
|
+ cuda_ctx->cuda_graph->props.resize(cgraph->n_nodes + cgraph->n_leafs);
|
|
|
}
|
|
|
|
|
|
// Loop over nodes in GGML graph to determine if CUDA graph update is required
|
|
|
// and store properties to allow this comparison for the next token
|
|
|
for (int i = 0; i < cgraph->n_nodes; i++) {
|
|
|
- bool has_matching_properties = true;
|
|
|
-
|
|
|
- if (!cuda_graph_update_required) {
|
|
|
- has_matching_properties = ggml_graph_node_has_matching_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]);
|
|
|
+ bool props_match = true;
|
|
|
+ if (!res) {
|
|
|
+ props_match = ggml_cuda_graph_node_properties_match(cgraph->nodes[i], &cuda_ctx->cuda_graph->props[i]);
|
|
|
}
|
|
|
- if (!has_matching_properties) {
|
|
|
- cuda_graph_update_required = true;
|
|
|
+ if (!props_match) {
|
|
|
+ res = true;
|
|
|
}
|
|
|
- set_ggml_graph_node_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]);
|
|
|
+ ggml_cuda_graph_node_set_properties(&cuda_ctx->cuda_graph->props[i], cgraph->nodes[i]);
|
|
|
}
|
|
|
|
|
|
for (int i = 0; i < cgraph->n_leafs; i++) {
|
|
|
- bool has_matching_properties = true;
|
|
|
- if (!cuda_graph_update_required) {
|
|
|
- has_matching_properties = ggml_graph_node_has_matching_properties(cgraph->leafs[i], &cuda_ctx->cuda_graph->ggml_graph_properties[cgraph->n_nodes + i]);
|
|
|
+ bool props_match= true;
|
|
|
+ if (!res) {
|
|
|
+ props_match = ggml_cuda_graph_node_properties_match(cgraph->leafs[i], &cuda_ctx->cuda_graph->props[cgraph->n_nodes + i]);
|
|
|
}
|
|
|
- if (!has_matching_properties) {
|
|
|
- cuda_graph_update_required = true;
|
|
|
+ if (!props_match) {
|
|
|
+ res = true;
|
|
|
}
|
|
|
- set_ggml_graph_node_properties(cgraph->leafs[i], &cuda_ctx->cuda_graph->ggml_graph_properties[cgraph->n_nodes + i]);
|
|
|
+ ggml_cuda_graph_node_set_properties(&cuda_ctx->cuda_graph->props[cgraph->n_nodes + i], cgraph->leafs[i]);
|
|
|
}
|
|
|
|
|
|
- return cuda_graph_update_required;
|
|
|
+ return res;
|
|
|
}
|
|
|
|
|
|
-static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) {
|
|
|
+static void ggml_cuda_graph_update_executable(ggml_backend_cuda_context * cuda_ctx) {
|
|
|
|
|
|
#if CUDART_VERSION >= 12000
|
|
|
cudaGraphExecUpdateResultInfo result_info;
|
|
|
@@ -3237,10 +3236,11 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph,
|
|
|
- bool & graph_evaluated_or_captured, bool & use_cuda_graph, bool & cuda_graph_update_required) {
|
|
|
+static void ggml_cuda_graph_evaluate_and_capture(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph, const bool use_cuda_graph, const bool cuda_graph_update_required) {
|
|
|
+ bool graph_evaluated_or_captured = false;
|
|
|
+
|
|
|
// flag used to determine whether it is an integrated_gpu
|
|
|
- const bool integrated = ggml_cuda_info().devices[cuda_ctx->device].integrated;
|
|
|
+ const bool integrated = ggml_cuda_info().devices[cuda_ctx->device].integrated;
|
|
|
|
|
|
ggml_cuda_stream_context & stream_ctx = cuda_ctx->stream_context();
|
|
|
bool is_concurrent_event_active = false;
|
|
|
@@ -3710,7 +3710,7 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx
|
|
|
CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0));
|
|
|
}
|
|
|
if (cuda_graph_update_required) { // Update graph executable
|
|
|
- update_cuda_graph_executable(cuda_ctx);
|
|
|
+ ggml_cuda_graph_update_executable(cuda_ctx);
|
|
|
}
|
|
|
// Launch graph
|
|
|
CUDA_CHECK(cudaGraphLaunch(cuda_ctx->cuda_graph->instance, cuda_ctx->stream()));
|
|
|
@@ -3720,43 +3720,25 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static bool ggml_cuda_set_cuda_graph_enabled(ggml_backend_cuda_context * cuda_ctx) {
|
|
|
+static bool ggml_cuda_graph_set_enabled(ggml_backend_cuda_context * cuda_ctx) {
|
|
|
|
|
|
#ifdef USE_CUDA_GRAPH
|
|
|
- static const bool disable_cuda_graphs_due_to_env = (getenv("GGML_CUDA_DISABLE_GRAPHS") != nullptr);
|
|
|
|
|
|
- // Objects required for CUDA Graph
|
|
|
if (cuda_ctx->cuda_graph == nullptr) {
|
|
|
cuda_ctx->cuda_graph.reset(new ggml_cuda_graph());
|
|
|
}
|
|
|
|
|
|
- bool use_cuda_graph = true;
|
|
|
-
|
|
|
if (cuda_ctx->cuda_graph->graph == nullptr) {
|
|
|
if (ggml_cuda_info().devices[cuda_ctx->device].cc < GGML_CUDA_CC_AMPERE) {
|
|
|
cuda_ctx->cuda_graph->disable_due_to_gpu_arch = true;
|
|
|
-#ifndef NDEBUG
|
|
|
GGML_LOG_DEBUG("%s: disabling CUDA graphs due to GPU architecture\n", __func__);
|
|
|
-#endif
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- // Disable CUDA graphs in presence of env var, old GPU, use-case which is changing too rapidly,
|
|
|
- // or previous graph capture failure.
|
|
|
- // Also disable for multi-gpu for now. TO DO investigate
|
|
|
- if (disable_cuda_graphs_due_to_env
|
|
|
- || cuda_ctx->cuda_graph->disable_due_to_gpu_arch
|
|
|
- || cuda_ctx->cuda_graph->disable_due_to_too_many_updates
|
|
|
- || cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture) {
|
|
|
- use_cuda_graph = false;
|
|
|
- }
|
|
|
-
|
|
|
- cuda_ctx->cuda_graph->cuda_graphs_enabled = use_cuda_graph;
|
|
|
+ return cuda_ctx->cuda_graph->is_enabled();
|
|
|
#else
|
|
|
- bool use_cuda_graph = false;
|
|
|
+ return false;
|
|
|
#endif // USE_CUDA_GRAPH
|
|
|
-
|
|
|
- return use_cuda_graph;
|
|
|
}
|
|
|
|
|
|
static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
|
|
|
@@ -3767,30 +3749,14 @@ static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend,
|
|
|
bool use_cuda_graph = false;
|
|
|
bool cuda_graph_update_required = false;
|
|
|
|
|
|
- // graph_optimize calls set_cuda_graph_enabled, in-case it not called (i.e. graph_compute is directly called)
|
|
|
- // we call it here instead.
|
|
|
#ifdef USE_CUDA_GRAPH
|
|
|
- use_cuda_graph = ggml_cuda_set_cuda_graph_enabled(cuda_ctx);
|
|
|
-
|
|
|
- if (use_cuda_graph) {
|
|
|
- cuda_graph_update_required = is_cuda_graph_update_required(cuda_ctx, cgraph);
|
|
|
-
|
|
|
- use_cuda_graph = check_node_graph_compatibility(cgraph, use_cuda_graph);
|
|
|
+ use_cuda_graph = ggml_cuda_graph_set_enabled(cuda_ctx);
|
|
|
|
|
|
- // Disable CUDA graphs (from the next token) if the use-case is demanding too many consecutive graph updates.
|
|
|
- if (use_cuda_graph && cuda_graph_update_required) {
|
|
|
- cuda_ctx->cuda_graph->number_consecutive_updates++;
|
|
|
- } else {
|
|
|
- cuda_ctx->cuda_graph->number_consecutive_updates = 0;
|
|
|
- }
|
|
|
+ if (cuda_ctx->cuda_graph->is_enabled()) {
|
|
|
+ cuda_graph_update_required = ggml_cuda_graph_update_required(cuda_ctx, cgraph);
|
|
|
+ use_cuda_graph = ggml_cuda_graph_check_compability(cgraph);
|
|
|
|
|
|
- if (cuda_ctx->cuda_graph->number_consecutive_updates >= 4) {
|
|
|
- cuda_ctx->cuda_graph->disable_due_to_too_many_updates = true;
|
|
|
- cuda_ctx->cuda_graph->cuda_graphs_enabled = false;
|
|
|
-#ifndef NDEBUG
|
|
|
- GGML_LOG_DEBUG("%s: disabling CUDA graphs due to too many consecutive updates\n", __func__);
|
|
|
-#endif
|
|
|
- }
|
|
|
+ cuda_ctx->cuda_graph->record_update(use_cuda_graph, cuda_graph_update_required);
|
|
|
}
|
|
|
#endif // USE_CUDA_GRAPH
|
|
|
|
|
|
@@ -3804,9 +3770,7 @@ static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend,
|
|
|
CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed));
|
|
|
}
|
|
|
|
|
|
- bool graph_evaluated_or_captured = false;
|
|
|
-
|
|
|
- evaluate_and_capture_cuda_graph(cuda_ctx, cgraph, graph_evaluated_or_captured, use_cuda_graph, cuda_graph_update_required);
|
|
|
+ ggml_cuda_graph_evaluate_and_capture(cuda_ctx, cgraph, use_cuda_graph, cuda_graph_update_required);
|
|
|
|
|
|
return GGML_STATUS_SUCCESS;
|
|
|
}
|
|
|
@@ -3839,7 +3803,7 @@ static void ggml_backend_cuda_event_wait(ggml_backend_t backend, ggml_backend_ev
|
|
|
static void ggml_backend_cuda_graph_optimize(ggml_backend_t backend, ggml_cgraph * cgraph) {
|
|
|
ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *) backend->context;
|
|
|
|
|
|
- const bool use_cuda_graph = ggml_cuda_set_cuda_graph_enabled(cuda_ctx);
|
|
|
+ const bool use_cuda_graph = ggml_cuda_graph_set_enabled(cuda_ctx);
|
|
|
|
|
|
static bool enable_graph_optimization = [] {
|
|
|
const char * env = getenv("GGML_CUDA_GRAPH_OPT");
|