diff --git a/.github/workflows/llvm-build.yml b/.github/workflows/llvm-build.yml index 9f452ac9c5e1..b5114f2dfbe6 100644 --- a/.github/workflows/llvm-build.yml +++ b/.github/workflows/llvm-build.yml @@ -107,6 +107,7 @@ jobs: -DLLVM_INSTALL_UTILS=ON -DLLVM_TARGETS_TO_BUILD="host;NVPTX;AMDGPU" -DLLVM_ENABLE_TERMINFO=OFF + -DLLVM_ENABLE_ABI_BREAKING_CHECKS=0 llvm-project/llvm ninja -C llvm-project/build check-mlir install @@ -130,6 +131,7 @@ jobs: -DLLVM_INSTALL_UTILS=ON -DLLVM_TARGETS_TO_BUILD="host;NVPTX;AMDGPU" -DLLVM_ENABLE_TERMINFO=OFF + -DLLVM_ENABLE_ABI_BREAKING_CHECKS=0 llvm-project/llvm ninja -C llvm-project/build check-mlir install @@ -175,6 +177,7 @@ jobs: -DCMAKE_LINKER=$LINKER \ -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ -DLLVM_ENABLE_ZSTD=OFF \ + -DLLVM_ENABLE_ABI_BREAKING_CHECKS=0 \ -DLLVM_INSTALL_UTILS=ON \ -DCMAKE_INSTALL_PREFIX="${{ env.llvm_install_dir }}" \ -DLLVM_TARGETS_TO_BUILD="AArch64;NVPTX;AMDGPU" \ @@ -225,6 +228,7 @@ jobs: -DLLVM_TARGETS_TO_BUILD="AArch64;NVPTX;AMDGPU" -DLLVM_USE_HOST_TOOLS=ON -DLLVM_ENABLE_TERMINFO=OFF + -DLLVM_ENABLE_ABI_BREAKING_CHECKS=0 llvm-project/llvm ninja -C llvm-project/build install diff --git a/.github/workflows/llvm-build/almalinux.Dockerfile b/.github/workflows/llvm-build/almalinux.Dockerfile index adf8b5cc6bd7..24c36db10f0a 100644 --- a/.github/workflows/llvm-build/almalinux.Dockerfile +++ b/.github/workflows/llvm-build/almalinux.Dockerfile @@ -33,6 +33,7 @@ RUN cmake -GNinja -Bbuild \ -DLLVM_ENABLE_PROJECTS=mlir \ -DLLVM_ENABLE_TERMINFO=OFF \ -DLLVM_INSTALL_UTILS=ON \ + -DLLVM_ENABLE_ABI_BREAKING_CHECKS=0 \ -DLLVM_TARGETS_TO_BUILD="host;NVPTX;AMDGPU" \ /source/llvm-project/llvm diff --git a/cmake/llvm-hash.txt b/cmake/llvm-hash.txt index 921aca278c74..1522498c600e 100644 --- a/cmake/llvm-hash.txt +++ b/cmake/llvm-hash.txt @@ -1 +1 @@ -1a9acd786d493b00c08d1611f51420d421b74cf1 +ce80c80dca45c7b4636a3e143973e2c6cbdb2884 diff --git a/python/setup.py b/python/setup.py index b99544f1f3ea..736ac621d4d7 100644 --- a/python/setup.py +++ b/python/setup.py @@ -392,7 +392,7 @@ def build_extension(self, ext): "-G", "Ninja", # Ninja is much faster than make "-DCMAKE_MAKE_PROGRAM=" + ninja_dir, # Pass explicit path to ninja otherwise cmake may cache a temporary path - "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON", "-DLLVM_ENABLE_WERROR=ON", + "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON", "-DLLVM_ENABLE_WERROR=ON", "-DLLVM_ENABLE_ABI_BREAKING_CHECKS=0", "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir, "-DTRITON_BUILD_TUTORIALS=OFF", "-DTRITON_BUILD_PYTHON_MODULE=ON", "-DPython3_EXECUTABLE:FILEPATH=" + sys.executable, "-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON", "-DPYTHON_INCLUDE_DIRS=" + python_include_dir, diff --git a/third_party/amd/lib/TritonAMDGPUToLLVM/BuiltinFuncToLLVM.cpp b/third_party/amd/lib/TritonAMDGPUToLLVM/BuiltinFuncToLLVM.cpp index 3c429a3d724a..99dad006dba3 100644 --- a/third_party/amd/lib/TritonAMDGPUToLLVM/BuiltinFuncToLLVM.cpp +++ b/third_party/amd/lib/TritonAMDGPUToLLVM/BuiltinFuncToLLVM.cpp @@ -128,7 +128,7 @@ class CallOpConversion : public mlir::RewritePattern { auto operands = callOp.getOperands(); auto result = callOp.getResult(); - LLVM::LLVMFunctionType calleeType = callOp.getCalleeFunctionType(); + LLVM::LLVMFunctionType calleeType = callOp.getCalleeType().value(); Type returnType = calleeType.getReturnType(); auto loc = callOp.getLoc(); diff --git a/third_party/amd/python/triton_amd.cc b/third_party/amd/python/triton_amd.cc index 598df0fba7db..a6ef2fec7c67 100644 --- a/third_party/amd/python/triton_amd.cc +++ b/third_party/amd/python/triton_amd.cc @@ -195,7 +195,9 @@ void init_triton_amd(py::module &&m) { target->createMCAsmBackend(*sti, *mri, mcOptions)); mcStreamer.reset(target->createMCObjectStreamer( triple, ctx, std::move(mab), mab->createObjectWriter(svos), - std::move(ce), *sti)); + std::move(ce), *sti, mcOptions.MCRelaxAll, + mcOptions.MCIncrementalLinkerCompatible, + /*DWARFMustBeAtTheEnd=*/false)); std::unique_ptr parser( createMCAsmParser(srcMgr, ctx, *mcStreamer, *mai));