ソースを参照

Removes multiple newlines at the end of files that is breaking the editorconfig step of CI. (#8258)

Clint Herron 1 年間 前
コミット
07a3fc0608

+ 0 - 2
.github/ISSUE_TEMPLATE/config.yml

@@ -9,5 +9,3 @@ contact_links:
   - name: Want to contribute?
     url: https://github.com/ggerganov/llama.cpp/wiki/contribute
     about: Head to the contribution guide page of the wiki for areas you can help with
-
-

+ 0 - 1
common/common.h

@@ -459,4 +459,3 @@ void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const cha
 void yaml_dump_non_result_info(
     FILE * stream, const gpt_params & params, const llama_context * lctx,
     const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
-

+ 0 - 1
examples/embedding/README.md

@@ -58,4 +58,3 @@ The above command will output space-separated float values.
 ```powershell
 embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2  --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
 ```
-

+ 0 - 1
examples/infill/infill.cpp

@@ -659,4 +659,3 @@ int main(int argc, char ** argv) {
 
     return 0;
 }
-

+ 0 - 1
examples/lookup/README.md

@@ -10,4 +10,3 @@ More info:
 
 https://github.com/ggerganov/llama.cpp/pull/4484
 https://github.com/ggerganov/llama.cpp/issues/4226
-

+ 0 - 1
examples/main-cmake-pkg/.gitignore

@@ -48,4 +48,3 @@
 build*/
 out/
 tmp/
-

+ 0 - 1
examples/main-cmake-pkg/CMakeLists.txt

@@ -30,4 +30,3 @@ target_include_directories(${TARGET} PRIVATE ${_common_path})
 install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
-

+ 0 - 1
examples/server-embd.py

@@ -31,4 +31,3 @@ for i in range(n-1):
         embedding2 = np.array(result[j])
         similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
         print(f"Similarity between {i} and {j}: {similarity:.2f}")
-

+ 0 - 1
examples/server/tests/features/passkey.feature

@@ -52,4 +52,3 @@ Feature: Passkey / Self-extend with context shift
       #| TheBloke/Llama-2-7B-GGUF        | llama-2-7b.Q2_K.gguf        | 4096        | 3   | 16384 | 512     | 4    | 512    | 500    | 300   | 1234    | 5           | 1234           |
       #| TheBloke/Mixtral-8x7B-v0.1-GGUF | mixtral-8x7b-v0.1.Q2_K.gguf | 32768       | 2   | 16384 | 512     | 4    | 512    | 500    | 100   | 0987    | 5           | 0
       # 987           |
-

+ 0 - 1
examples/server/themes/buttons-top/index.html

@@ -1054,4 +1054,3 @@
 </body>
 
 </html>
-

+ 0 - 1
examples/server/themes/wild/index.html

@@ -1058,4 +1058,3 @@
 </body>
 
 </html>
-

+ 0 - 1
examples/sycl/run-llama2.sh

@@ -34,4 +34,3 @@ fi
 
 #use multiple GPUs with same max compute units
 #ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
-

+ 0 - 1
examples/sycl/win-build-sycl.bat

@@ -31,4 +31,3 @@ exit /B 0
 :ERROR
 echo comomand error: %errorlevel%
 exit /B %errorlevel%
-

+ 0 - 2
examples/sycl/win-run-llama2.bat

@@ -7,5 +7,3 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
 
 
 .\build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0
-
-

+ 0 - 1
ggml/include/ggml-metal.h

@@ -63,4 +63,3 @@ GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
 #ifdef __cplusplus
 }
 #endif
-

+ 0 - 1
ggml/src/ggml-cuda/cpy.cu

@@ -487,4 +487,3 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
         GGML_ASSERT(false);
     }
 }
-

+ 0 - 1
ggml/src/ggml-metal.metal

@@ -6537,4 +6537,3 @@ template [[host_name("kernel_mul_mv_id_iq3_s_f32")]]   kernel kernel_mul_mv_id_t
 template [[host_name("kernel_mul_mv_id_iq2_s_f32")]]   kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl>>;
 template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]]  kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
 template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]]  kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
-

+ 0 - 1
ggml/src/ggml-quants.h

@@ -130,4 +130,3 @@ void iq3xs_free_impl(int grid_size);
 #ifdef __cplusplus
 }
 #endif
-

+ 0 - 1
ggml/src/ggml-vulkan-shaders.hpp

@@ -144954,4 +144954,3 @@ unsigned char sum_rows_f32_data[] = {
 
 };
 const uint64_t sum_rows_f32_len = 2112;
-

+ 0 - 1
scripts/pod-llama.sh

@@ -210,4 +210,3 @@ fi
 # more benches
 #GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-7b/ggml-model-q4_k.gguf  4096 1 99 1 512,3200 128,128,800 1
 #GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-13b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1
-

+ 0 - 1
src/unicode-data.cpp

@@ -7030,4 +7030,3 @@ const std::vector<range_nfd> unicode_ranges_nfd = {  // start, last, nfd
 {0x02FA1C, 0x02FA1C, 0x009F3B},
 {0x02FA1D, 0x02FA1D, 0x02A600},
 };
-

+ 0 - 1
tests/test-rope.cpp

@@ -218,4 +218,3 @@ int main(int /*argc*/, const char ** /*argv*/) {
 
     return 0;
 }
-