Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AArch64] Add custom lowering for load <3 x i8>. #78632

Merged
merged 17 commits into from
Jan 30, 2024
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 102 additions & 2 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11012,6 +11012,50 @@ SDValue ReconstructShuffleWithRuntimeMask(SDValue Op, SelectionDAG &DAG) {
MaskSourceVec);
}

// Check if Op is a BUILD_VECTOR with 2 extracts and a load that is cheaper to
// insert into a vector and use a shuffle. This improves lowering for loads of
// <3 x i8>.
static SDValue shuffleWithSingleLoad(SDValue Op, SelectionDAG &DAG) {
if (Op.getNumOperands() != 4 || Op.getValueType() != MVT::v4i16)
return SDValue();

SDValue V0 = Op.getOperand(0);
SDValue V1 = Op.getOperand(1);
SDValue V2 = Op.getOperand(2);
SDValue V3 = Op.getOperand(3);
if (V0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a hyper-specific pattern. I assume it's because we are specifically looking for and only care about a single <3 x i8> instruction (a load?) and this is what it's been mangled to by the time we get to see it. If so we might have to tolerate the horror, but should at least call it out in comments.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately yes! I couldn't find any alternative to prevent the folds that create the sub-optimal nodes. I slightly extended the comment at the top of the function. Do you think that's sufficient or should I also add one here?

V1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
V2.getOpcode() != ISD::LOAD ||
!(V3.isUndef() || V3.getOpcode() == ISD::EXTRACT_VECTOR_ELT))
return SDValue();

if (V0.getOperand(0) != V1.getOperand(0) ||
V0.getConstantOperandVal(1) != 0 || V1.getConstantOperandVal(1) != 1 ||
!(V3.isUndef() || V3.getConstantOperandVal(1) == 3))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We're not checking V3.getOperand(0) anywhere.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, added a check, thanks!

return SDValue();

SDLoc dl(Op);
auto *L = cast<LoadSDNode>(Op.getOperand(2));
auto Vec = V0.getOperand(0);

Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Vec.getValueType(), Vec,
SDValue(L, 0), DAG.getConstant(2, dl, MVT::i64));
Vec = DAG.getNode(ISD::BITCAST, dl, MVT::v4i16, Vec);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think Vec could have quite a variety of unexpected types here (though running at a specific phase of DAG might limit that). There's no reason to expect it to have either 4 elements or for each element to be i16 just from what you've checked so far.


SDValue ShuffleOps[] = {DAG.getUNDEF(MVT::v4i16), DAG.getUNDEF(MVT::v4i16)};
ShuffleOps[0] = Vec;

SmallVector<int, 8> Mask(4, -1);
Mask[0] = 0;
Mask[1] = 1;
Mask[2] = 2;
if (!V3.isUndef())
Mask[3] = 3;
SDValue Shuffle =
DAG.getVectorShuffle(MVT::v4i16, dl, ShuffleOps[0], ShuffleOps[1], Mask);
return Shuffle;
}

// Gather data to see if the operation can be modelled as a
// shuffle in combination with VEXTs.
SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
Expand All @@ -11022,6 +11066,10 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
EVT VT = Op.getValueType();
assert(!VT.isScalableVector() &&
"Scalable vectors cannot be used with ISD::BUILD_VECTOR");

if (SDValue S = shuffleWithSingleLoad(Op, DAG))
return S;

unsigned NumElts = VT.getVectorNumElements();

struct ShuffleSourceInfo {
Expand All @@ -11048,6 +11096,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,

// First gather all vectors used as an immediate source for this BUILD_VECTOR
// node.
//
SmallVector<ShuffleSourceInfo, 2> Sources;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
Expand Down Expand Up @@ -21248,6 +21297,51 @@ static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {
return SDValue();
}

// A custom combine to lower load <3 x i8> as the more efficient sequence
// below:
// ldrb wX, [x0, #2]
// ldrh wY, [x0]
// orr wX, wY, wX, lsl #16
// fmov s0, wX
//
static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
EVT MemVT = LD->getMemoryVT();
if (MemVT != EVT::getVectorVT(*DAG.getContext(), MVT::i8, 3) ||
LD->getOriginalAlign() >= 4)
return SDValue();

SDLoc DL(LD);
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
MachineMemOperand *MMO = LD->getMemOperand();
assert(LD->getOffset().isUndef() && "undef offset expected");
inclyc marked this conversation as resolved.
Show resolved Hide resolved

// Load 2 x i8, then 1 x i8.
SDValue L16 = DAG.getLoad(MVT::i16, DL, Chain, BasePtr,
MF.getMachineMemOperand(MMO, 0, 2));
TypeSize Offset2 = TypeSize::getFixed(2);
SDValue L8 = DAG.getLoad(MVT::i8, DL, Chain,
DAG.getMemBasePlusOffset(BasePtr, Offset2, DL),
MF.getMachineMemOperand(MMO, 2, 1));

SDValue Ins16 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::v4i16, L16);

SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Ins16);

SDValue Ext8 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, L8);
SDValue Trunc8 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Ext8);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What are these two doing? They ought to amount to a nop.


SDValue Ins8 = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i8, Cast,
Trunc8, DAG.getConstant(2, DL, MVT::i64));
SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MemVT, Ins8,
DAG.getConstant(0, DL, MVT::i64));
SDValue TokenFactor = DAG.getNode(
ISD::TokenFactor, DL, MVT::Other,
{SDValue(cast<SDNode>(L16), 1), SDValue(cast<SDNode>(L8), 1)});
return DAG.getMergeValues({Extract, TokenFactor}, DL);
}

// Perform TBI simplification if supported by the target and try to break up
// nontemporal loads larger than 256-bits loads for odd types so LDNPQ 256-bit
// load instructions can be selected.
Expand All @@ -21259,10 +21353,16 @@ static SDValue performLOADCombine(SDNode *N,
performTBISimplification(N->getOperand(1), DCI, DAG);

LoadSDNode *LD = cast<LoadSDNode>(N);
EVT MemVT = LD->getMemoryVT();
if (LD->isVolatile() || !LD->isNonTemporal() || !Subtarget->isLittleEndian())
if (LD->isVolatile() || !Subtarget->isLittleEndian())
return SDValue(N, 0);

if (SDValue Res = combineV3I8LoadExt(LD, DAG))
return Res;

if (!LD->isNonTemporal())
return SDValue(N, 0);

EVT MemVT = LD->getMemoryVT();
if (MemVT.isScalableVector() || MemVT.getSizeInBits() <= 256 ||
MemVT.getSizeInBits() % 256 == 0 ||
256 % MemVT.getScalarSizeInBits() != 0)
Expand Down
95 changes: 18 additions & 77 deletions llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,8 @@
define <16 x i8> @load_v3i8(ptr %src) {
; CHECK-LABEL: load_v3i8:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldrh w8, [x0]
; CHECK-NEXT: strh w8, [sp, #12]
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: umov.h w8, v0[0]
; CHECK-NEXT: umov.h w9, v0[1]
; CHECK-NEXT: fmov s0, w8
; CHECK-NEXT: add x8, x0, #2
; CHECK-NEXT: mov.b v0[1], w9
; CHECK-NEXT: ld1.b { v0 }[2], [x8]
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ld1r.4h { v0 }, [x0], #2
; CHECK-NEXT: ld1.b { v0 }[2], [x0]
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8:
Expand Down Expand Up @@ -47,19 +36,11 @@ define <16 x i8> @load_v3i8(ptr %src) {
define <4 x i32> @load_v3i8_to_4xi32(ptr %src) {
; CHECK-LABEL: load_v3i8_to_4xi32:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldrh w8, [x0]
; CHECK-NEXT: ld1r.4h { v0 }, [x0], #2
; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
; CHECK-NEXT: strh w8, [sp, #12]
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ldrsb w8, [x0, #2]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: mov.h v0[1], v0[1]
; CHECK-NEXT: mov.h v0[2], w8
; CHECK-NEXT: ld1.b { v0 }[2], [x0]
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8_to_4xi32:
Expand All @@ -73,7 +54,6 @@ define <4 x i32> @load_v3i8_to_4xi32(ptr %src) {
; BE-NEXT: ldrsb w8, [x0, #2]
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: mov v0.h[1], v0.h[1]
; BE-NEXT: mov v0.h[2], w8
; BE-NEXT: ushll v0.4s, v0.4h, #0
; BE-NEXT: and v0.16b, v0.16b, v1.16b
Expand All @@ -90,19 +70,11 @@ define <4 x i32> @load_v3i8_to_4xi32(ptr %src) {
define <4 x i32> @load_v3i8_to_4xi32_align_2(ptr %src) {
; CHECK-LABEL: load_v3i8_to_4xi32_align_2:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldrh w8, [x0]
; CHECK-NEXT: ld1r.4h { v0 }, [x0], #2
; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
; CHECK-NEXT: strh w8, [sp, #12]
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ldrsb w8, [x0, #2]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: mov.h v0[1], v0[1]
; CHECK-NEXT: mov.h v0[2], w8
; CHECK-NEXT: ld1.b { v0 }[2], [x0]
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8_to_4xi32_align_2:
Expand All @@ -116,7 +88,6 @@ define <4 x i32> @load_v3i8_to_4xi32_align_2(ptr %src) {
; BE-NEXT: ldrsb w8, [x0, #2]
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: mov v0.h[1], v0.h[1]
; BE-NEXT: mov v0.h[2], w8
; BE-NEXT: ushll v0.4s, v0.4h, #0
; BE-NEXT: and v0.16b, v0.16b, v1.16b
Expand Down Expand Up @@ -160,19 +131,13 @@ define <4 x i32> @load_v3i8_to_4xi32_align_4(ptr %src) {
define <4 x i32> @load_v3i8_to_4xi32_const_offset_1(ptr %src) {
; CHECK-LABEL: load_v3i8_to_4xi32_const_offset_1:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldurh w8, [x0, #1]
; CHECK-NEXT: add x8, x0, #1
; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
; CHECK-NEXT: strh w8, [sp, #12]
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ldrsb w8, [x0, #3]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: mov.h v0[1], v0[1]
; CHECK-NEXT: mov.h v0[2], w8
; CHECK-NEXT: ld1r.4h { v0 }, [x8]
; CHECK-NEXT: add x8, x0, #3
; CHECK-NEXT: ld1.b { v0 }[2], [x8]
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8_to_4xi32_const_offset_1:
Expand All @@ -186,7 +151,6 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_1(ptr %src) {
; BE-NEXT: ldrsb w8, [x0, #3]
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: mov v0.h[1], v0.h[1]
; BE-NEXT: mov v0.h[2], w8
; BE-NEXT: ushll v0.4s, v0.4h, #0
; BE-NEXT: and v0.16b, v0.16b, v1.16b
Expand All @@ -204,19 +168,13 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_1(ptr %src) {
define <4 x i32> @load_v3i8_to_4xi32_const_offset_3(ptr %src) {
; CHECK-LABEL: load_v3i8_to_4xi32_const_offset_3:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldurh w8, [x0, #3]
; CHECK-NEXT: add x8, x0, #3
; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
; CHECK-NEXT: strh w8, [sp, #12]
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ldrsb w8, [x0, #5]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: mov.h v0[1], v0[1]
; CHECK-NEXT: mov.h v0[2], w8
; CHECK-NEXT: ld1r.4h { v0 }, [x8]
; CHECK-NEXT: add x8, x0, #5
; CHECK-NEXT: ld1.b { v0 }[2], [x8]
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8_to_4xi32_const_offset_3:
Expand All @@ -230,7 +188,6 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_3(ptr %src) {
; BE-NEXT: ldrsb w8, [x0, #5]
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: mov v0.h[1], v0.h[1]
; BE-NEXT: mov v0.h[2], w8
; BE-NEXT: ushll v0.4s, v0.4h, #0
; BE-NEXT: and v0.16b, v0.16b, v1.16b
Expand Down Expand Up @@ -292,7 +249,6 @@ define <4 x i32> @volatile_load_v3i8_to_4xi32(ptr %src) {
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ldrsb w8, [x0, #2]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: mov.h v0[1], v0[1]
; CHECK-NEXT: mov.h v0[2], w8
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
Expand All @@ -310,7 +266,6 @@ define <4 x i32> @volatile_load_v3i8_to_4xi32(ptr %src) {
; BE-NEXT: ldrsb w8, [x0, #2]
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: mov v0.h[1], v0.h[1]
; BE-NEXT: mov v0.h[2], w8
; BE-NEXT: ushll v0.4s, v0.4h, #0
; BE-NEXT: and v0.16b, v0.16b, v1.16b
Expand Down Expand Up @@ -439,19 +394,12 @@ entry:
define void @load_ext_to_64bits(ptr %src, ptr %dst) {
; CHECK-LABEL: load_ext_to_64bits:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldrh w8, [x0]
; CHECK-NEXT: strh w8, [sp, #12]
; CHECK-NEXT: add x8, x0, #2
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: ld1.b { v0 }[4], [x8]
; CHECK-NEXT: ld1r.4h { v0 }, [x0], #2
; CHECK-NEXT: add x8, x1, #4
; CHECK-NEXT: ld1.b { v0 }[2], [x0]
; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: st1.h { v0 }[2], [x8]
; CHECK-NEXT: str s0, [x1]
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_ext_to_64bits:
Expand Down Expand Up @@ -540,24 +488,17 @@ entry:
define void @load_ext_add_to_64bits(ptr %src, ptr %dst) {
; CHECK-LABEL: load_ext_add_to_64bits:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldrh w9, [x0]
; CHECK-NEXT: ld1r.4h { v0 }, [x0], #2
; CHECK-NEXT: Lloh2:
; CHECK-NEXT: adrp x8, lCPI13_0@PAGE
; CHECK-NEXT: Lloh3:
; CHECK-NEXT: ldr d1, [x8, lCPI13_0@PAGEOFF]
; CHECK-NEXT: add x8, x1, #4
; CHECK-NEXT: strh w9, [sp, #12]
; CHECK-NEXT: add x9, x0, #2
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: ld1.b { v0 }[4], [x9]
; CHECK-NEXT: ld1.b { v0 }[2], [x0]
; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: add.4h v0, v0, v1
; CHECK-NEXT: st1.h { v0 }[2], [x8]
; CHECK-NEXT: str s0, [x1]
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
; CHECK-NEXT: .loh AdrpLdr Lloh2, Lloh3
;
Expand Down
Loading