We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8a3d501 commit b8ffaa6Copy full SHA for b8ffaa6
.devops/llama-cli-intel.Dockerfile
@@ -14,7 +14,7 @@ RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
14
echo "LLAMA_SYCL_F16 is set" && \
15
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
16
fi && \
17
- cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
+ cmake -B build -DLLAMA_SYCL=ON ${OPT_SYCL_F16} && \
18
cmake --build build --config Release --target llama-cli
19
20
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
0 commit comments