Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CI]Split XPU distributed UT into another job #1371

Merged
merged 1 commit into from
Feb 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
140 changes: 131 additions & 9 deletions .github/workflows/_linux_ut.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ permissions: read-all
jobs:
ut_test:
runs-on: ${{ inputs.runner }}
if: ${{ inputs.ut != 'xpu_distributed' }}
timeout-minutes: 900
env:
NEOReadDebugKeys: ${{ inputs.driver == 'rolling' && '1' || '0' }}
Expand Down Expand Up @@ -234,8 +235,134 @@ jobs:
test_cmd="${test_cmd} test_xpu.py"
fi
eval $test_cmd 2>${{ github.workspace }}/ut_log/torch_xpu/torch_xpu_test_error.log | tee ${{ github.workspace }}/ut_log/torch_xpu/torch_xpu_test.log
- name: UT Test Results Check
shell: bash
run: |
function contains() {
contains_status="echo 'Start $2 ...'"
{
[[ $1 =~ (^|,)$2($|,) ]]
} || {
echo "[Warning] $2 is not suppotted type! Skipped!"
contains_status="continue"
}
}
set -xe
echo "UT_NAME=$(echo ${{ inputs.ut }} |sed 's/,/-/g')" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
for ut_suite in $(echo ${{ inputs.ut }} |sed 's/,/ /g')
do
contains "op_regression,op_regression_dev1,op_extended,op_ut,torch_xpu" $ut_suite
$contains_status
cd ${{ github.workspace }}/ut_log/${ut_suite}
cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./
bash ut_result_check.sh ${ut_suite}
done
- name: Upload Inductor XPU UT Log
if: always()
uses: actions/upload-artifact@v4
with:
name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-${{ inputs.abi }}-${{ env.UT_NAME }}
path: ${{ github.workspace }}/ut_log

distributed_ut_test:
runs-on: pvc_e2e
if: contains(inputs.ut, 'xpu_distributed')
timeout-minutes: 900
env:
NEOReadDebugKeys: ${{ inputs.driver == 'rolling' && '1' || '0' }}
DisableScratchPages: ${{ inputs.driver == 'rolling' && '1' || '0' }}
steps:
- name: Checkout torch-xpu-ops
uses: actions/checkout@v4
- name: Prepare Stock Pytorch
run: |
pwd
which conda && conda clean -ay
conda remove --all -y -n xpu_op_${ZE_AFFINITY_MASK} || \
rm -rf $(dirname ${CONDA_EXE})/../envs/xpu_op_${ZE_AFFINITY_MASK}
conda create -n xpu_op_${ZE_AFFINITY_MASK} python=${{ inputs.python }} cmake ninja -y
source activate xpu_op_${ZE_AFFINITY_MASK}
cd ../ && rm -rf pytorch
pip install requests
git clone https://github.com/pytorch/pytorch pytorch
if [ "${{ inputs.pytorch }}" != "nightly_wheel" ]; then
cd pytorch && git checkout $(echo ${{ inputs.pytorch }})
# apply PRs for stock pytorch
python ../torch-xpu-ops/.github/scripts/apply_torch_pr.py
git status && git show -s
git submodule sync && git submodule update --init --recursive
if [[ ${{ inputs.keep_torch_xpu_ops }} == 'true' ]]; then
echo "Don't replace torch-xpu-ops!"
else
rm -rf third_party/torch-xpu-ops && cp -r ../torch-xpu-ops third_party/
# Workaround for torch-xpu-ops ci test
sed -i "s/checkout --quiet \${TORCH_XPU_OPS_COMMIT}/log -n 1/g" caffe2/CMakeLists.txt
fi
fi
- name: Triton Installation
run: |
source activate xpu_op_${ZE_AFFINITY_MASK}
cd ../pytorch
TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton"
if [ -z ${{ inputs.triton }} ]; then
TRITON_COMMIT_ID="$(<.ci/docker/ci_commit_pins/triton-xpu.txt)"
else
TRITON_COMMIT_ID="${{ inputs.triton }}"
fi
echo ${TRITON_REPO}@${TRITON_COMMIT_ID}
if [ "${{ inputs.pytorch }}" != "nightly_wheel" ]; then
pip install --force-reinstall "git+${TRITON_REPO}@${TRITON_COMMIT_ID}#subdirectory=python"
fi
- name: Download Pytorch wheel
if: ${{ inputs.pytorch != 'nightly_wheel' }}
uses: actions/download-artifact@v4
with:
name: Torch-XPU-Wheel-${{ github.event.pull_request.number || github.sha }}-${{ inputs.abi }}
path: ${{ github.workspace }}
- name: Install Pytorch XPU
run: |
source activate xpu_op_${ZE_AFFINITY_MASK}
source .github/scripts/env.sh ${{ inputs.pytorch }}
pip install mkl-static==2025.0.1 mkl-include==2025.0.1
if [[ ${{ inputs.abi }} == '0' ]]; then
export _GLIBCXX_USE_CXX11_ABI=0
else
export _GLIBCXX_USE_CXX11_ABI=1
fi
if [ "${{ inputs.pytorch }}" != "nightly_wheel" ]; then
cd ../pytorch
export CMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}:${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
pip install -r requirements.txt
pip install --force-reinstall ${{ github.workspace }}/torch*.whl
git clone https://github.com/pytorch/vision && cd vision && python setup.py install && cd ..
else
pip install torch torchvision torchaudio --pre --index-url https://download.pytorch.org/whl/nightly/xpu
TORCH_COMMIT_ID=$(python -c 'import torch; print(torch.version.git_version)')
cd ../pytorch
git reset --hard && git checkout ${TORCH_COMMIT_ID}
TORCH_XPU_OPS_COMMIT=$(<third_party/xpu.txt)
rm -rf third_party/torch-xpu-ops
git clone https://github.com/intel/torch-xpu-ops.git third_party/torch-xpu-ops
cd third_party/torch-xpu-ops
git checkout ${TORCH_XPU_OPS_COMMIT}
cd ../..
python third_party/torch-xpu-ops/.github/scripts/apply_torch_pr.py
fi
pip install -r .ci/docker/requirements-ci.txt
- name: Torch Config
run: |
source activate xpu_op_${ZE_AFFINITY_MASK}
source .github/scripts/env.sh ${{ inputs.pytorch }}
python -c "import torch; print(torch.__config__.show())"
python -c "import torch; print(torch.__config__.parallel_info())"
python -c "import torch; print(torch.__config__.torch.xpu.device_count())"
python -c "import triton; print(triton.__version__)"

cd ..
python pytorch/torch/utils/collect_env.py
rm -rf /tmp/torchinductor_*
rm -rf ~/.triton/cache
- name: Run Torch XPU Distributed UT
if: contains(inputs.ut, 'xpu_distributed')
run: |
source .github/scripts/env.sh ${{ inputs.pytorch }}
source activate xpu_op_${ZE_AFFINITY_MASK}
Expand Down Expand Up @@ -263,14 +390,9 @@ jobs:
}
set -xe
echo "UT_NAME=$(echo ${{ inputs.ut }} |sed 's/,/-/g')" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
for ut_suite in $(echo ${{ inputs.ut }} |sed 's/,/ /g')
do
contains "op_regression,op_regression_dev1,op_extended,op_ut,torch_xpu,xpu_distributed" $ut_suite
$contains_status
cd ${{ github.workspace }}/ut_log/${ut_suite}
cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./
bash ut_result_check.sh ${ut_suite}
done
cd ${{ github.workspace }}/ut_log/xpu_distributed
cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./
bash ut_result_check.sh 'xpu_distributed'
- name: Upload Inductor XPU UT Log
if: always()
uses: actions/upload-artifact@v4
Expand Down
14 changes: 1 addition & 13 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,21 +64,9 @@ jobs:
uses: ./.github/workflows/_linux_ut.yml
with:
pytorch: ${{ needs.preci-linux-build.outputs.torch_commit_id }}
ut: op_regression,op_regression_dev1,op_extended,op_ut
ut: op_regression,op_regression_dev1,op_extended,op_ut,xpu_distributed
runner: linux.idc.xpu

preci-ut-distributed:
# Don't run on forked repos and draft PRs
secrets: inherit
if: ${{ (github.repository_owner == 'intel') && (github.event.pull_request.draft == false) }}
name: preci-linux
needs: preci-linux-build
uses: ./.github/workflows/_linux_ut.yml
with:
pytorch: ${{ needs.preci-linux-build.outputs.torch_commit_id }}
ut: xpu_distributed
runner: pvc_e2e

Inductor-XPU-E2E-CI-Tests:
name: preci-linux / e2e_test
needs: preci-linux-build
Expand Down
Loading