Skip to content

Commit c9a3dc8

Browse files
committed
ci : update LLAMA_ -> GGML_ prefix
ggml-ci
1 parent 5b1490a commit c9a3dc8

File tree

4 files changed

+40
-40
lines changed

4 files changed

+40
-40
lines changed

.github/workflows/bench.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ jobs:
109109
run: |
110110
set -eux
111111
cmake -B build \
112-
-DLLAMA_NATIVE=OFF \
112+
-DGGML_NATIVE=OFF \
113113
-DLLAMA_BUILD_SERVER=ON \
114114
-DLLAMA_CURL=ON \
115115
-DLLAMA_CUBLAS=ON \

.github/workflows/build.yml

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ jobs:
4747
sysctl -a
4848
mkdir build
4949
cd build
50-
cmake -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL_EMBED_LIBRARY=ON -DLLAMA_CURL=ON ..
50+
cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL_EMBED_LIBRARY=ON -DLLAMA_CURL=ON ..
5151
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
5252
5353
- name: Test
@@ -105,7 +105,7 @@ jobs:
105105
sysctl -a
106106
# Metal is disabled due to intermittent failures with Github runners not having a GPU:
107107
# https://github.com/ggerganov/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
108-
cmake -B build -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL=OFF -DLLAMA_CURL=ON
108+
cmake -B build -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF -DLLAMA_CURL=ON
109109
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
110110
111111
- name: Test
@@ -305,7 +305,7 @@ jobs:
305305
run: |
306306
mkdir build
307307
cd build
308-
cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DLLAMA_OPENMP=OFF
308+
cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DGGML_OPENMP=OFF
309309
cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
310310
311311
- name: Test
@@ -335,7 +335,7 @@ jobs:
335335
run: |
336336
mkdir build
337337
cd build
338-
cmake -DLLAMA_RPC=ON ..
338+
cmake -DGGML_RPC=ON ..
339339
cmake --build . --config Release -j $(nproc)
340340
341341
- name: Test
@@ -363,7 +363,7 @@ jobs:
363363
run: |
364364
mkdir build
365365
cd build
366-
cmake -DLLAMA_VULKAN=ON ..
366+
cmake -DGGML_VULKAN=ON ..
367367
cmake --build . --config Release -j $(nproc)
368368
369369
ubuntu-22-cmake-hip:
@@ -384,13 +384,13 @@ jobs:
384384
- name: Build with native CMake HIP support
385385
id: cmake_build
386386
run: |
387-
cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DLLAMA_HIPBLAS=ON
387+
cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DGGML_HIPBLAS=ON
388388
cmake --build build --config Release -j $(nproc)
389389
390390
- name: Build with legacy HIP support
391391
id: cmake_build_legacy_hip
392392
run: |
393-
cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DLLAMA_HIPBLAS=ON
393+
cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DGGML_HIPBLAS=ON
394394
cmake --build build2 --config Release -j $(nproc)
395395
396396
ubuntu-22-cmake-sycl:
@@ -431,7 +431,7 @@ jobs:
431431
source /opt/intel/oneapi/setvars.sh
432432
mkdir build
433433
cd build
434-
cmake -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
434+
cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
435435
cmake --build . --config Release -j $(nproc)
436436
437437
ubuntu-22-cmake-sycl-fp16:
@@ -472,10 +472,10 @@ jobs:
472472
source /opt/intel/oneapi/setvars.sh
473473
mkdir build
474474
cd build
475-
cmake -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON ..
475+
cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON ..
476476
cmake --build . --config Release -j $(nproc)
477477
478-
# TODO: build with LLAMA_NO_METAL because test-backend-ops fail on "Apple Paravirtual device" and I don't know
478+
# TODO: build with GGML_NO_METAL because test-backend-ops fail on "Apple Paravirtual device" and I don't know
479479
# how to debug it.
480480
# ref: https://github.com/ggerganov/llama.cpp/actions/runs/7131777249/job/19420981052#step:5:1124
481481
macOS-latest-make:
@@ -497,15 +497,15 @@ jobs:
497497
env:
498498
LLAMA_FATAL_WARNINGS: 1
499499
run: |
500-
LLAMA_NO_METAL=1 make -j $(sysctl -n hw.logicalcpu)
500+
GGML_NO_METAL=1 make -j $(sysctl -n hw.logicalcpu)
501501
502502
- name: Test
503503
id: make_test
504504
run: |
505-
LLAMA_NO_METAL=1 make tests -j $(sysctl -n hw.logicalcpu)
506-
LLAMA_NO_METAL=1 make test -j $(sysctl -n hw.logicalcpu)
505+
GGML_NO_METAL=1 make tests -j $(sysctl -n hw.logicalcpu)
506+
GGML_NO_METAL=1 make test -j $(sysctl -n hw.logicalcpu)
507507
508-
# TODO: build with LLAMA_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know
508+
# TODO: build with GGML_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know
509509
# how to debug it.
510510
# ref: https://github.com/ggerganov/llama.cpp/actions/runs/7132125951/job/19422043567?pr=4359#step:5:6584
511511
# would be great if we fix these
@@ -529,7 +529,7 @@ jobs:
529529
sysctl -a
530530
mkdir build
531531
cd build
532-
cmake -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL=OFF ..
532+
cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF ..
533533
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
534534
535535
- name: Test
@@ -559,7 +559,7 @@ jobs:
559559
mkdir build
560560
cd build
561561
cmake -G Xcode .. \
562-
-DLLAMA_METAL_EMBED_LIBRARY=ON \
562+
-DGGML_METAL_EMBED_LIBRARY=ON \
563563
-DLLAMA_BUILD_EXAMPLES=OFF \
564564
-DLLAMA_BUILD_TESTS=OFF \
565565
-DLLAMA_BUILD_SERVER=OFF \
@@ -588,7 +588,7 @@ jobs:
588588
mkdir build
589589
cd build
590590
cmake -G Xcode .. \
591-
-DLLAMA_METAL_EMBED_LIBRARY=ON \
591+
-DGGML_METAL_EMBED_LIBRARY=ON \
592592
-DLLAMA_BUILD_EXAMPLES=OFF \
593593
-DLLAMA_BUILD_TESTS=OFF \
594594
-DLLAMA_BUILD_SERVER=OFF \
@@ -662,7 +662,7 @@ jobs:
662662
- name: Build using make w/ OpenBLAS
663663
shell: msys2 {0}
664664
run: |
665-
make LLAMA_OPENBLAS=1 -j $(nproc)
665+
make GGML_OPENBLAS=1 -j $(nproc)
666666
667667
- name: Build using CMake
668668
shell: msys2 {0}
@@ -678,7 +678,7 @@ jobs:
678678
- name: Build using CMake w/ OpenBLAS
679679
shell: msys2 {0}
680680
run: |
681-
cmake -B build -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS
681+
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
682682
cmake --build build --config ${{ matrix.build }} -j $(nproc)
683683
684684
windows-latest-cmake:
@@ -693,25 +693,25 @@ jobs:
693693
matrix:
694694
include:
695695
- build: 'rpc-x64'
696-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_RPC=ON -DBUILD_SHARED_LIBS=ON'
696+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=ON'
697697
- build: 'noavx-x64'
698-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON'
698+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DBUILD_SHARED_LIBS=ON'
699699
- build: 'avx2-x64'
700-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
700+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
701701
- build: 'avx-x64'
702-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
702+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
703703
- build: 'avx512-x64'
704-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
704+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_AVX512=ON -DBUILD_SHARED_LIBS=ON'
705705
- build: 'openblas-x64'
706-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
706+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_BLAS=ON -DBUILD_SHARED_LIBS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
707707
- build: 'kompute-x64'
708-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
708+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
709709
- build: 'vulkan-x64'
710-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
710+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
711711
- build: 'llvm-arm64'
712-
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
712+
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
713713
- build: 'msvc-arm64'
714-
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
714+
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
715715

716716
steps:
717717
- name: Clone
@@ -854,7 +854,7 @@ jobs:
854854
run: |
855855
mkdir build
856856
cd build
857-
cmake .. -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=ON
857+
cmake .. -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON
858858
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS}
859859
860860
- name: Determine tag name
@@ -987,7 +987,7 @@ jobs:
987987
run: |
988988
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
989989
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
990-
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DLLAMA_HIPBLAS=ON
990+
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIPBLAS=ON
991991
cmake --build build --config Release
992992
993993
ios-xcode-build:

.github/workflows/server.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,20 +92,20 @@ jobs:
9292
if: ${{ matrix.sanitizer == 'THREAD' }}
9393
run: |
9494
cmake -B build \
95-
-DLLAMA_NATIVE=OFF \
95+
-DGGML_NATIVE=OFF \
9696
-DLLAMA_BUILD_SERVER=ON \
9797
-DLLAMA_CURL=ON \
9898
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
9999
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \
100-
-DLLAMA_OPENMP=OFF ;
100+
-DGGML_OPENMP=OFF ;
101101
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
102102
103103
- name: Build
104104
id: cmake_build
105105
if: ${{ matrix.sanitizer != 'THREAD' }}
106106
run: |
107107
cmake -B build \
108-
-DLLAMA_NATIVE=OFF \
108+
-DGGML_NATIVE=OFF \
109109
-DLLAMA_BUILD_SERVER=ON \
110110
-DLLAMA_CURL=ON \
111111
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \

ci/run.sh

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,11 @@ SRC=`pwd`
3636
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON"
3737

3838
if [ ! -z ${GG_BUILD_METAL} ]; then
39-
CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_METAL_SHADER_DEBUG=ON"
39+
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_EMBED_LIBRARY=ON"
4040
fi
4141

4242
if [ ! -z ${GG_BUILD_CUDA} ]; then
43-
CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_CUDA=1"
43+
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_CUDA=1"
4444
fi
4545

4646
if [ ! -z ${GG_BUILD_SYCL} ]; then
@@ -50,7 +50,7 @@ if [ ! -z ${GG_BUILD_SYCL} ]; then
5050
exit 1
5151
fi
5252

53-
CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_SYCL=1 DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON"
53+
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_SYCL=1 DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON"
5454
fi
5555
## helpers
5656

@@ -284,7 +284,7 @@ function gg_run_open_llama_7b_v2 {
284284

285285
set -e
286286

287-
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
287+
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DGGML_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
288288
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
289289

290290
python3 ../examples/convert-legacy-llama.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
@@ -550,7 +550,7 @@ function gg_run_pythia_2_8b {
550550

551551
set -e
552552

553-
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
553+
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DGGML_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
554554
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
555555

556556
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf

0 commit comments

Comments
 (0)