Pārlūkot izejas kodu

llama : allow building all tests on windows when not using shared libs (#13980)

* llama : allow building all tests on windows when not using shared libraries

* add static windows build to ci

* tests : enable debug logs for test-chat

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diego Devesa 7 mēneši atpakaļ
vecāks
revīzija
7f4fbe5183
3 mainītis faili ar 8 papildinājumiem un 4 dzēšanām
  1. 2 2
      .github/workflows/build.yml
  2. 2 2
      tests/CMakeLists.txt
  3. 4 0
      tests/test-chat.cpp

+ 2 - 2
.github/workflows/build.yml

@@ -687,8 +687,8 @@ jobs:
     strategy:
       matrix:
         include:
-          - build: 'cpu-x64'
-            defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF'
+          - build: 'cpu-x64 (static)'
+            defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF'
           - build: 'openblas-x64'
             defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
           - build: 'vulkan-x64'

+ 2 - 2
tests/CMakeLists.txt

@@ -104,8 +104,8 @@ if (LLAMA_LLGUIDANCE)
     llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
 endif ()
 
-if (NOT WIN32)
-    # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API
+if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
+    # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API (when building with shared libraries)
     llama_build_and_test(test-sampling.cpp)
     llama_build_and_test(test-grammar-parser.cpp)
     llama_build_and_test(test-grammar-integration.cpp)

+ 4 - 0
tests/test-chat.cpp

@@ -7,6 +7,8 @@
 //
 #include "chat.h"
 
+#include "log.h"
+
 #include "../src/unicode.h"
 #include "../src/llama-grammar.h"
 
@@ -1428,6 +1430,8 @@ static void test_msg_diffs_compute() {
 }
 
 int main(int argc, char ** argv) {
+    common_log_set_verbosity_thold(999);
+
     // try {
 #ifndef _WIN32
         if (argc > 1) {