|
|
@@ -868,7 +868,7 @@ do { \
|
|
|
const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
|
|
|
_mm256_extractf128_ps(x[0], 1)); \
|
|
|
const __m128 t1 = _mm_hadd_ps(t0, t0); \
|
|
|
- res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
|
|
|
+ res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
|
|
|
} while (0)
|
|
|
// TODO: is this optimal ?
|
|
|
|
|
|
@@ -1149,7 +1149,7 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
|
|
|
x[i] = _mm_add_ps(x[i], x[offset+i]); \
|
|
|
} \
|
|
|
const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
|
|
|
- res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
|
|
|
+ res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
|
|
|
}
|
|
|
// TODO: is this optimal ?
|
|
|
|
|
|
@@ -2086,6 +2086,7 @@ void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
|
|
|
}
|
|
|
}
|
|
|
#else
|
|
|
+ GGML_UNUSED(numa_flag);
|
|
|
// TODO
|
|
|
#endif
|
|
|
}
|
|
|
@@ -3219,7 +3220,7 @@ const char * ggml_get_name(const struct ggml_tensor * tensor) {
|
|
|
}
|
|
|
|
|
|
struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
|
|
|
- strncpy(tensor->name, name, sizeof(tensor->name));
|
|
|
+ strncpy(tensor->name, name, sizeof(tensor->name) - 1);
|
|
|
tensor->name[sizeof(tensor->name) - 1] = '\0';
|
|
|
return tensor;
|
|
|
}
|
|
|
@@ -18575,7 +18576,9 @@ static enum ggml_opt_result linesearch_backtracking(
|
|
|
(*step) *= width;
|
|
|
}
|
|
|
|
|
|
- GGML_UNREACHABLE();
|
|
|
+ GGML_ASSERT(false && "line search failed");
|
|
|
+
|
|
|
+ return GGML_LINESEARCH_FAIL;
|
|
|
}
|
|
|
|
|
|
static enum ggml_opt_result ggml_opt_lbfgs(
|
|
|
@@ -18843,7 +18846,9 @@ static enum ggml_opt_result ggml_opt_lbfgs(
|
|
|
step[0] = 1.0;
|
|
|
}
|
|
|
|
|
|
- GGML_UNREACHABLE();
|
|
|
+ GGML_ASSERT(false && "lbfgs failed");
|
|
|
+
|
|
|
+ return GGML_OPT_DID_NOT_CONVERGE;
|
|
|
}
|
|
|
|
|
|
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
|