Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Test] Pre-submit tests for #68972 #69040

Merged
merged 1 commit into from
Oct 14, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 124 additions & 0 deletions llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64 %s

define void @test(ptr nocapture noundef writeonly %array1, i32 noundef signext %a, i32 noundef signext %b) {
; RV64-LABEL: test:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addiw a3, a1, 5
; RV64-NEXT: slli a4, a3, 2
; RV64-NEXT: add a4, a0, a4
; RV64-NEXT: sw a2, 0(a4)
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: sw a2, 24(a0)
; RV64-NEXT: sw a3, 140(a0)
; RV64-NEXT: ret
entry:
%add = add nsw i32 %a, 5
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
store i32 %b, ptr %arrayidx, align 4
%add3 = add nsw i32 %a, 6
%idxprom4 = sext i32 %add3 to i64
%arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
store i32 %b, ptr %arrayidx5, align 4
%add6 = add nsw i32 %a, 35
%idxprom7 = sext i32 %add6 to i64
%arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
store i32 %add, ptr %arrayidx8, align 4
ret void
}

; test of jumpping, find add's operand has one more use can simplified
define void @test1(ptr nocapture noundef %array1, i32 noundef signext %a, i32 noundef signext %b, i32 noundef signext %x) {
; RV64-LABEL: test1:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addiw a4, a1, 5
; RV64-NEXT: slli a5, a4, 2
; RV64-NEXT: add a5, a0, a5
; RV64-NEXT: mv a6, a4
; RV64-NEXT: bgtz a3, .LBB1_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a6, a2
; RV64-NEXT: .LBB1_2: # %entry
; RV64-NEXT: sw a6, 0(a5)
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: sw a6, 24(a0)
; RV64-NEXT: sw a4, 140(a0)
; RV64-NEXT: ret
entry:
%add = add nsw i32 %a, 5
%cmp = icmp sgt i32 %x, 0
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
%add.b = select i1 %cmp, i32 %add, i32 %b
store i32 %add.b, ptr %arrayidx, align 4
%add5 = add nsw i32 %a, 6
%idxprom6 = sext i32 %add5 to i64
%arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
store i32 %add.b, ptr %arrayidx7, align 4
%add8 = add nsw i32 %a, 35
%idxprom9 = sext i32 %add8 to i64
%arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
store i32 %add, ptr %arrayidx10, align 4
ret void
}

define void @test2(ptr nocapture noundef writeonly %array1, i64 noundef %a, i64 noundef %b) local_unnamed_addr #0 {
; RV64-LABEL: test2:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a3, a1, 5
; RV64-NEXT: slli a4, a3, 3
; RV64-NEXT: add a4, a0, a4
; RV64-NEXT: sd a2, 0(a4)
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: sd a2, 48(a0)
; RV64-NEXT: sd a3, 280(a0)
; RV64-NEXT: ret
entry:
%add = add nsw i64 %a, 5
%arrayidx = getelementptr inbounds i64, ptr %array1, i64 %add
store i64 %b, ptr %arrayidx, align 8
%add2 = add nsw i64 %a, 6
%arrayidx3 = getelementptr inbounds i64, ptr %array1, i64 %add2
store i64 %b, ptr %arrayidx3, align 8
%add4 = add nsw i64 %a, 35
%arrayidx5 = getelementptr inbounds i64, ptr %array1, i64 %add4
store i64 %add, ptr %arrayidx5, align 8
ret void
}

define void @test3(ptr nocapture noundef %array1, i64 noundef %a, i64 noundef %b, i64 noundef %x) {
; RV64-LABEL: test3:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a4, a1, 5
; RV64-NEXT: mv a5, a4
; RV64-NEXT: bgtz a3, .LBB3_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a5, a2
; RV64-NEXT: .LBB3_2: # %entry
; RV64-NEXT: slli a2, a4, 3
; RV64-NEXT: add a2, a0, a2
; RV64-NEXT: sd a5, 0(a2)
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: sd a5, 48(a0)
; RV64-NEXT: sd a4, 280(a0)
; RV64-NEXT: ret
entry:
%add = add nsw i64 %a, 5
%cmp = icmp sgt i64 %x, 0
%spec.select = select i1 %cmp, i64 %add, i64 %b
%0 = getelementptr inbounds i64, ptr %array1, i64 %add
store i64 %spec.select, ptr %0, align 8
%add3 = add nsw i64 %a, 6
%arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
store i64 %spec.select, ptr %arrayidx4, align 8
%add5 = add nsw i64 %a, 35
%arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
store i64 %add, ptr %arrayidx6, align 8
ret void
}