Skip to content

Commit 17b02d5

Browse files
bandotiggerganov
authored andcommitted
cmake: add ggml find package (ggml-org#11369)
* Add initial ggml cmake package * Add build numbers to ggml find-package * Expand variables with GGML_ prefix * Guard against adding to cache variable twice * Add git to msys2 workflow * Handle ggml-cpu-* variants * Link ggml/ggml-base libraries to their targets * Replace main-cmake-pkg with simple-cmake-pkg * Interface features require c_std_90 * Fix typo * Removed unnecessary bracket from status message * Update examples/simple-cmake-pkg/README.md Co-authored-by: Georgi Gerganov <[email protected]> * Update examples/simple-cmake-pkg/README.md Co-authored-by: Georgi Gerganov <[email protected]> --------- Co-authored-by: Georgi Gerganov <[email protected]>
1 parent a73981f commit 17b02d5

File tree

11 files changed

+284
-233
lines changed

11 files changed

+284
-233
lines changed

.github/workflows/build.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -613,6 +613,7 @@ jobs:
613613
msystem: ${{matrix.sys}}
614614
install: >-
615615
base-devel
616+
git
616617
mingw-w64-${{matrix.env}}-toolchain
617618
mingw-w64-${{matrix.env}}-cmake
618619
mingw-w64-${{matrix.env}}-openblas

CMakeLists.txt

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -188,27 +188,14 @@ set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location o
188188
set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
189189
set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
190190

191-
# At the moment some compile definitions are placed within the ggml/src
192-
# directory but not exported on the `ggml` target. This could be improved by
193-
# determining _precisely_ which defines are necessary for the llama-config
194-
# package.
195-
#
196-
set(GGML_TRANSIENT_DEFINES)
197-
get_target_property(GGML_DIRECTORY ggml SOURCE_DIR)
198-
get_directory_property(GGML_DIR_DEFINES DIRECTORY ${GGML_DIRECTORY} COMPILE_DEFINITIONS)
199-
if (GGML_DIR_DEFINES)
200-
list(APPEND GGML_TRANSIENT_DEFINES ${GGML_DIR_DEFINES})
201-
endif()
202-
get_target_property(GGML_TARGET_DEFINES ggml COMPILE_DEFINITIONS)
203-
if (GGML_TARGET_DEFINES)
204-
list(APPEND GGML_TRANSIENT_DEFINES ${GGML_TARGET_DEFINES})
205-
endif()
206-
get_target_property(GGML_LINK_LIBRARIES ggml LINK_LIBRARIES)
207-
# all public headers
208191
set(LLAMA_PUBLIC_HEADERS
209192
${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h
210193
${CMAKE_CURRENT_SOURCE_DIR}/include/llama-cpp.h)
211-
set_target_properties(llama PROPERTIES PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}")
194+
195+
set_target_properties(llama
196+
PROPERTIES
197+
PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}")
198+
212199
install(TARGETS llama LIBRARY PUBLIC_HEADER)
213200

214201
configure_package_config_file(

cmake/llama-config.cmake.in

Lines changed: 4 additions & 152 deletions
Original file line numberDiff line numberDiff line change
@@ -3,159 +3,13 @@ set(LLAMA_BUILD_COMMIT @LLAMA_BUILD_COMMIT@)
33
set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@)
44
set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@)
55

6-
set(GGML_STATIC @GGML_STATIC@)
7-
set(GGML_NATIVE @GGML_NATIVE@)
8-
set(GGML_LTO @GGML_LTO@)
9-
set(GGML_CCACHE @GGML_CCACHE@)
10-
set(GGML_AVX @GGML_AVX@)
11-
set(GGML_AVX2 @GGML_AVX2@)
12-
set(GGML_AVX512 @GGML_AVX512@)
13-
set(GGML_AVX512_VBMI @GGML_AVX512_VBMI@)
14-
set(GGML_AVX512_VNNI @GGML_AVX512_VNNI@)
15-
set(GGML_AVX512_BF16 @GGML_AVX512_BF16@)
16-
set(GGML_AMX_TILE @GGML_AMX_TILE@)
17-
set(GGML_AMX_INT8 @GGML_AMX_INT8@)
18-
set(GGML_AMX_BF16 @GGML_AMX_BF16@)
19-
set(GGML_FMA @GGML_FMA@)
20-
set(GGML_LASX @GGML_LASX@)
21-
set(GGML_LSX @GGML_LSX@)
22-
set(GGML_RVV @GGML_RVV@)
23-
set(GGML_SVE @GGML_SVE@)
24-
25-
set(GGML_ACCELERATE @GGML_ACCELERATE@)
26-
set(GGML_OPENMP @GGML_OPENMP@)
27-
set(GGML_CPU_HBM @GGML_CPU_HBM@)
28-
set(GGML_BLAS_VENDOR @GGML_BLAS_VENDOR@)
29-
30-
set(GGML_CUDA_FORCE_MMQ @GGML_CUDA_FORCE_MMQ@)
31-
set(GGML_CUDA_FORCE_CUBLAS @GGML_CUDA_FORCE_CUBLAS@)
32-
set(GGML_CUDA_F16 @GGML_CUDA_F16@)
33-
set(GGML_CUDA_PEER_MAX_BATCH_SIZE @GGML_CUDA_PEER_MAX_BATCH_SIZE@)
34-
set(GGML_CUDA_NO_PEER_COPY @GGML_CUDA_NO_PEER_COPY@)
35-
set(GGML_CUDA_NO_VMM @GGML_CUDA_NO_VMM@)
36-
set(GGML_CUDA_FA_ALL_QUANTS @GGML_CUDA_FA_ALL_QUANTS@)
37-
set(GGML_CUDA_GRAPHS @GGML_CUDA_GRAPHS@)
38-
39-
set(GGML_HIP_UMA @GGML_HIP_UMA@)
40-
41-
set(GGML_VULKAN_CHECK_RESULTS @GGML_VULKAN_CHECK_RESULTS@)
42-
set(GGML_VULKAN_DEBUG @GGML_VULKAN_DEBUG@)
43-
set(GGML_VULKAN_MEMORY_DEBUG @GGML_VULKAN_MEMORY_DEBUG@)
44-
set(GGML_VULKAN_SHADER_DEBUG_INFO @GGML_VULKAN_SHADER_DEBUG_INFO@)
45-
set(GGML_VULKAN_PERF @GGML_VULKAN_PERF@)
46-
set(GGML_VULKAN_VALIDATE @GGML_VULKAN_VALIDATE@)
47-
set(GGML_VULKAN_RUN_TESTS @GGML_VULKAN_RUN_TESTS@)
48-
49-
set(GGML_METAL_USE_BF16 @GGML_METAL_USE_BF16@)
50-
set(GGML_METAL_NDEBUG @GGML_METAL_NDEBUG@)
51-
set(GGML_METAL_SHADER_DEBUG @GGML_METAL_SHADER_DEBUG@)
52-
set(GGML_METAL_EMBED_LIBRARY @GGML_METAL_EMBED_LIBRARY@)
53-
set(GGML_METAL_MACOSX_VERSION_MIN @GGML_METAL_MACOSX_VERSION_MIN@)
54-
set(GGML_METAL_STD @GGML_METAL_STD@)
55-
56-
set(GGML_SYCL_F16 @GGML_SYCL_F16@)
57-
set(GGML_SYCL_TARGET @GGML_SYCL_TARGET@)
58-
set(GGML_SYCL_DEVICE_ARCH @GGML_SYCL_DEVICE_ARCH@)
59-
60-
616
@PACKAGE_INIT@
627

638
set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@")
649
set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@")
6510
set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@")
6611

67-
find_package(Threads REQUIRED)
68-
69-
set(_llama_transient_defines "@GGML_TRANSIENT_DEFINES@")
70-
set(_llama_link_deps "")
71-
set(_llama_link_opts "")
72-
foreach(_ggml_lib ggml ggml-base)
73-
string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY")
74-
find_library(${_ggml_lib_var} ${_ggml_lib}
75-
REQUIRED
76-
HINTS ${LLAMA_LIB_DIR}
77-
NO_CMAKE_FIND_ROOT_PATH
78-
)
79-
list(APPEND _llama_link_deps "${${_ggml_lib_var}}")
80-
message(STATUS "Found ${${_ggml_lib_var}}")
81-
endforeach()
82-
83-
foreach(backend amx blas cann cpu cuda hip kompute metal musa rpc sycl vulkan)
84-
string(TOUPPER "GGML_${backend}" backend_id)
85-
set(_ggml_lib "ggml-${backend}")
86-
string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY")
87-
88-
find_library(${_ggml_lib_var} ${_ggml_lib}
89-
HINTS ${LLAMA_LIB_DIR}
90-
NO_CMAKE_FIND_ROOT_PATH
91-
)
92-
if(${_ggml_lib_var})
93-
list(APPEND _llama_link_deps "${${_ggml_lib_var}}")
94-
set(${backend_id} ON)
95-
message(STATUS "Found backend ${${_ggml_lib_var}}")
96-
else()
97-
set(${backend_id} OFF)
98-
endif()
99-
endforeach()
100-
101-
if (NOT LLAMA_SHARED_LIB)
102-
if (APPLE AND GGML_ACCELERATE)
103-
find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
104-
list(APPEND _llama_link_deps ${ACCELERATE_FRAMEWORK})
105-
endif()
106-
107-
if (GGML_OPENMP)
108-
find_package(OpenMP REQUIRED)
109-
list(APPEND _llama_link_deps OpenMP::OpenMP_C OpenMP::OpenMP_CXX)
110-
endif()
111-
112-
if (GGML_CPU_HBM)
113-
find_library(memkind memkind REQUIRED)
114-
list(APPEND _llama_link_deps memkind)
115-
endif()
116-
117-
if (GGML_BLAS)
118-
find_package(BLAS REQUIRED)
119-
list(APPEND _llama_link_deps ${BLAS_LIBRARIES})
120-
list(APPEND _llama_link_opts ${BLAS_LINKER_FLAGS})
121-
endif()
122-
123-
if (GGML_CUDA)
124-
find_package(CUDAToolkit REQUIRED)
125-
endif()
126-
127-
if (GGML_METAL)
128-
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
129-
find_library(METAL_FRAMEWORK Metal REQUIRED)
130-
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
131-
list(APPEND _llama_link_deps ${FOUNDATION_LIBRARY}
132-
${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK})
133-
endif()
134-
135-
if (GGML_VULKAN)
136-
find_package(Vulkan REQUIRED)
137-
list(APPEND _llama_link_deps Vulkan::Vulkan)
138-
endif()
139-
140-
if (GGML_HIP)
141-
find_package(hip REQUIRED)
142-
find_package(hipblas REQUIRED)
143-
find_package(rocblas REQUIRED)
144-
list(APPEND _llama_link_deps hip::host roc::rocblas roc::hipblas)
145-
endif()
146-
147-
if (GGML_SYCL)
148-
find_package(DNNL)
149-
if (${DNNL_FOUND} AND GGML_SYCL_TARGET STREQUAL "INTEL")
150-
list(APPEND _llama_link_deps DNNL::dnnl)
151-
endif()
152-
if (WIN32)
153-
find_package(IntelSYCL REQUIRED)
154-
find_package(MKL REQUIRED)
155-
list(APPEND _llama_link_deps IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL)
156-
endif()
157-
endif()
158-
endif()
12+
find_package(ggml REQUIRED)
15913

16014
find_library(llama_LIBRARY llama
16115
REQUIRED
@@ -167,12 +21,10 @@ add_library(llama UNKNOWN IMPORTED)
16721
set_target_properties(llama
16822
PROPERTIES
16923
INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}"
170-
INTERFACE_LINK_LIBRARIES "${_llama_link_deps}"
171-
INTERFACE_LINK_OPTIONS "${_llama_link_opts}"
172-
INTERFACE_COMPILE_DEFINITIONS "${_llama_transient_defines}"
24+
INTERFACE_LINK_LIBRARIES "ggml::ggml;ggml::ggml-base;"
17325
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
17426
IMPORTED_LOCATION "${llama_LIBRARY}"
175-
INTERFACE_COMPILE_FEATURES cxx_std_11
176-
POSITION_INDEPENDENT_CODE ON )
27+
INTERFACE_COMPILE_FEATURES c_std_90
28+
POSITION_INDEPENDENT_CODE ON)
17729

17830
check_required_components(Llama)

examples/main-cmake-pkg/CMakeLists.txt

Lines changed: 0 additions & 32 deletions
This file was deleted.

examples/main-cmake-pkg/README.md

Lines changed: 0 additions & 31 deletions
This file was deleted.
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
cmake_minimum_required(VERSION 3.12)
2+
project(llama-simple-cmake-pkg)
3+
4+
set(TARGET llama-simple-cmake-pkg)
5+
6+
find_package(Llama REQUIRED)
7+
8+
add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../simple/simple.cpp)
9+
install(TARGETS ${TARGET} RUNTIME)
10+
target_link_libraries(${TARGET} PRIVATE llama ggml::all ${CMAKE_THREAD_LIBS_INIT})
11+
target_compile_features(${TARGET} PRIVATE cxx_std_17)

examples/simple-cmake-pkg/README.md

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# llama.cpp/example/simple-cmake-pkg
2+
3+
This program builds [simple](../simple) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree.
4+
5+
## Building
6+
7+
Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions.
8+
9+
### Considerations
10+
11+
When hardware acceleration libraries are used (e.g. CUDA, Metal, Vulkan, etc.), the appropriate dependencies will be searched for automatically. So, for example, when finding a package
12+
13+
### Build llama.cpp and install to llama.cpp/inst
14+
15+
```sh
16+
git clone https://github.com/ggerganov/llama.cpp
17+
cd llama.cpp
18+
cmake -S . -B build
19+
cmake --build build
20+
cmake --install build --prefix inst
21+
22+
### Build simple-cmake-pkg
23+
24+
```sh
25+
cd examples/simple-cmake-pkg
26+
cmake -S . -B build -DCMAKE_PREFIX_PATH=../../inst/lib/cmake
27+
cmake --build build
28+
```
29+
30+
### Run simple-cmake-pkg
31+
32+
```sh
33+
./build/llama-simple-cmake-pkg -m ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is"
34+
```

0 commit comments

Comments
 (0)