diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 145f28503635890..6f6d3342fcd7f2b 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -67,7 +67,7 @@ class XtensaDAGToDAGISel : public SelectionDAGISel { Valid = isValidAddrOffset(Scale, OffsetVal); if (Valid) { - // If the first operand is a FI, get the TargetFI Node + // If the first operand is a FI, get the TargetFI Node. if (FrameIndexSDNode *FIN = dyn_cast(Addr.getOperand(0))) Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); @@ -125,6 +125,7 @@ FunctionPass *llvm::createXtensaISelDag(XtensaTargetMachine &TM, void XtensaDAGToDAGISel::Select(SDNode *Node) { SDLoc DL(Node); + EVT VT = Node->getValueType(0); // If we have a custom node, we already have selected! if (Node->isMachineOpcode()) { @@ -132,5 +133,82 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { return; } + switch (Node->getOpcode()) { + case ISD::SHL: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + auto *C = dyn_cast(N1); + // If C is constant in range [1..31] then we can generate SLLI + // instruction using pattern matching, otherwise generate SLL. + if (!C || C->isZero()) { + SDNode *SSL = CurDAG->getMachineNode(Xtensa::SSL, DL, MVT::Glue, N1); + SDNode *SLL = + CurDAG->getMachineNode(Xtensa::SLL, DL, VT, N0, SDValue(SSL, 0)); + ReplaceNode(Node, SLL); + return; + } + break; + } + case ISD::SRL: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + auto *C = dyn_cast(N1); + + // If C is constant then we can generate SRLI + // instruction using pattern matching or EXTUI, otherwise generate SRL. + if (C) { + if (isUInt<4>(C->getZExtValue())) + break; + unsigned ShAmt = C->getZExtValue(); + SDNode *EXTUI = CurDAG->getMachineNode( + Xtensa::EXTUI, DL, VT, N0, CurDAG->getTargetConstant(ShAmt, DL, VT), + CurDAG->getTargetConstant(32 - ShAmt, DL, VT)); + ReplaceNode(Node, EXTUI); + return; + } + + SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); + SDNode *SRL = + CurDAG->getMachineNode(Xtensa::SRL, DL, VT, N0, SDValue(SSR, 0)); + ReplaceNode(Node, SRL); + return; + } + case ISD::SRA: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + auto *C = dyn_cast(N1); + // If C is constant then we can generate SRAI + // instruction using pattern matching, otherwise generate SRA. + if (!C) { + SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); + SDNode *SRA = + CurDAG->getMachineNode(Xtensa::SRA, DL, VT, N0, SDValue(SSR, 0)); + ReplaceNode(Node, SRA); + return; + } + break; + } + case XtensaISD::SRCL: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + SDValue N2 = Node->getOperand(2); + SDNode *SSL = CurDAG->getMachineNode(Xtensa::SSL, DL, MVT::Glue, N2); + SDNode *SRC = + CurDAG->getMachineNode(Xtensa::SRC, DL, VT, N0, N1, SDValue(SSL, 0)); + ReplaceNode(Node, SRC); + return; + } + case XtensaISD::SRCR: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + SDValue N2 = Node->getOperand(2); + SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N2); + SDNode *SRC = + CurDAG->getMachineNode(Xtensa::SRC, DL, VT, N0, N1, SDValue(SSR, 0)); + ReplaceNode(Node, SRC); + return; + } + } + SelectCode(Node); } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 80d01d662a2217b..c7675c2f501761c 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -21,6 +21,7 @@ #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/IR/GlobalVariable.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" @@ -98,6 +99,32 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setCondCodeAction(ISD::SETUGT, MVT::i32, Expand); setCondCodeAction(ISD::SETULE, MVT::i32, Expand); + setOperationAction(ISD::MUL, MVT::i32, Expand); + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::MULHS, MVT::i32, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); + + setOperationAction(ISD::SDIV, MVT::i32, Expand); + setOperationAction(ISD::UDIV, MVT::i32, Expand); + setOperationAction(ISD::SREM, MVT::i32, Expand); + setOperationAction(ISD::UREM, MVT::i32, Expand); + setOperationAction(ISD::SDIVREM, MVT::i32, Expand); + setOperationAction(ISD::UDIVREM, MVT::i32, Expand); + + setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); + + setOperationAction(ISD::BSWAP, MVT::i32, Expand); + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTR, MVT::i32, Expand); + setOperationAction(ISD::CTPOP, MVT::i32, Custom); + setOperationAction(ISD::CTTZ, MVT::i32, Expand); + setOperationAction(ISD::CTLZ, MVT::i32, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + // Implement custom stack allocations setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); // Implement custom stack save and restore @@ -629,8 +656,12 @@ SDValue XtensaTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = Table.getValueType(); unsigned EntrySize = MJTI->getEntrySize(TD); - Index = DAG.getNode(ISD::MUL, DL, Index.getValueType(), Index, - DAG.getConstant(EntrySize, DL, Index.getValueType())); + assert((MJTI->getEntrySize(TD) == 4) && "Unsupported jump-table entry size"); + + Index = DAG.getNode( + ISD::SHL, DL, Index.getValueType(), Index, + DAG.getConstant(Log2_32(EntrySize), DL, Index.getValueType())); + SDValue Addr = DAG.getNode(ISD::ADD, DL, Index.getValueType(), Index, Table); SDValue LD = DAG.getLoad(PtrVT, DL, Chain, Addr, @@ -662,10 +693,12 @@ SDValue XtensaTargetLowering::getAddrPCRel(SDValue Op, return DAG.getNode(XtensaISD::PCREL_WRAPPER, DL, Ty, Op); } -SDValue XtensaTargetLowering::LowerConstantPool(ConstantPoolSDNode *CP, +SDValue XtensaTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - EVT PtrVT = getPointerTy(DAG.getDataLayout()); + EVT PtrVT = Op.getValueType(); + ConstantPoolSDNode *CP = cast(Op); SDValue Result; + if (!CP->isMachineConstantPoolEntry()) { Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset()); @@ -713,6 +746,119 @@ SDValue XtensaTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, return DAG.getMergeValues(Ops, DL); } +SDValue XtensaTargetLowering::LowerShiftLeftParts(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MVT VT = MVT::i32; + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + + // if Shamt - register size < 0: // Shamt < register size + // Lo = Lo << Shamt + // Hi = (Hi << Shamt) | (Lo >>u (register size - Shamt)) + // else: + // Lo = 0 + // Hi = Lo << (Shamt - register size) + + SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT); + SDValue ShamtMinusRegisterSize = + DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize); + + SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); + SDValue HiTrue = DAG.getNode(XtensaISD::SRCL, DL, VT, Hi, Lo, Shamt); + SDValue Zero = DAG.getConstant(0, DL, VT); + SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize); + + SDValue Cond = DAG.getSetCC(DL, VT, ShamtMinusRegisterSize, Zero, ISD::SETLT); + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, LoTrue, Zero); + Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, HiTrue, HiFalse); + + return DAG.getMergeValues({Lo, Hi}, DL); +} + +SDValue XtensaTargetLowering::LowerShiftRightParts(SDValue Op, + SelectionDAG &DAG, + bool IsSRA) const { + SDLoc DL(Op); + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + MVT VT = MVT::i32; + + // SRA expansion: + // if Shamt - register size < 0: // Shamt < register size + // Lo = (Lo >>u Shamt) | (Hi << u (register size - Shamt)) + // Hi = Hi >>s Shamt + // else: + // Lo = Hi >>s (Shamt - register size); + // Hi = Hi >>s (register size - 1) + // + // SRL expansion: + // if Shamt - register size < 0: // Shamt < register size + // Lo = (Lo >>u Shamt) | (Hi << u (register size - Shamt)) + // Hi = Hi >>u Shamt + // else: + // Lo = Hi >>u (Shamt - register size); + // Hi = 0; + + unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; + SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT); + SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT); + SDValue ShamtMinusRegisterSize = + DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize); + + SDValue LoTrue = DAG.getNode(XtensaISD::SRCR, DL, VT, Hi, Lo, Shamt); + SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); + SDValue Zero = DAG.getConstant(0, DL, VT); + SDValue LoFalse = + DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize); + SDValue HiFalse; + + if (IsSRA) { + HiFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, RegisterSizeMinus1); + } else { + HiFalse = Zero; + } + + SDValue Cond = DAG.getSetCC(DL, VT, ShamtMinusRegisterSize, Zero, ISD::SETLT); + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, LoTrue, LoFalse); + Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, HiTrue, HiFalse); + + return DAG.getMergeValues({Lo, Hi}, DL); +} + +SDValue XtensaTargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { + auto &TLI = DAG.getTargetLoweringInfo(); + return TLI.expandCTPOP(Op.getNode(), DAG); +} + +bool XtensaTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, + SDValue C) const { + APInt Imm; + unsigned EltSizeInBits; + + if (ISD::isConstantSplatVector(C.getNode(), Imm)) { + EltSizeInBits = VT.getScalarSizeInBits(); + } else if (VT.isScalarInteger()) { + EltSizeInBits = VT.getSizeInBits(); + if (auto *ConstNode = dyn_cast(C.getNode())) + Imm = ConstNode->getAPIntValue(); + else + return false; + } else { + return false; + } + + // Omit if data size exceeds. + if (EltSizeInBits > 32) + return false; + + // Convert MULT to LSL. + if (Imm.isPowerOf2() && Imm.isIntN(5)) + return true; + + return false; +} + SDValue XtensaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { @@ -726,8 +872,10 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerBlockAddress(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::CTPOP: + return LowerCTPOP(Op, DAG); case ISD::ConstantPool: - return LowerConstantPool(cast(Op), DAG); + return LowerConstantPool(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::STACKSAVE: @@ -736,6 +884,12 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerSTACKRESTORE(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); + case ISD::SHL_PARTS: + return LowerShiftLeftParts(Op, DAG); + case ISD::SRA_PARTS: + return LowerShiftRightParts(Op, DAG, true); + case ISD::SRL_PARTS: + return LowerShiftRightParts(Op, DAG, false); default: report_fatal_error("Unexpected node to lower"); } @@ -747,12 +901,18 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::BR_JT"; case XtensaISD::CALL: return "XtensaISD::CALL"; + case XtensaISD::EXTUI: + return "XtensaISD::EXTUI"; case XtensaISD::PCREL_WRAPPER: return "XtensaISD::PCREL_WRAPPER"; case XtensaISD::RET: return "XtensaISD::RET"; case XtensaISD::SELECT_CC: return "XtensaISD::SELECT_CC"; + case XtensaISD::SRCL: + return "XtensaISD::SRCL"; + case XtensaISD::SRCR: + return "XtensaISD::SRCR"; } return nullptr; } @@ -827,6 +987,8 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *MBB) const { + DebugLoc DL = MI.getDebugLoc(); + switch (MI.getOpcode()) { case Xtensa::SELECT: return emitSelectCC(MI, MBB); diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index dd811ae9f3a7747..8e7346b40dfe59b 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -30,6 +30,11 @@ enum { // There is an optional glue operand at the end. CALL, + // Extract unsigned immediate. Operand 0 is value, operand 1 + // is bit position of the field [0..31], operand 2 is bit size + // of the field [1..16] + EXTUI, + // Wraps a TargetGlobalAddress that should be loaded using PC-relative // accesses. Operand 0 is the address. PCREL_WRAPPER, @@ -40,6 +45,12 @@ enum { // the lhs and rhs (ops #0 and #1) of a conditional expression with the // condition code in op #4 SELECT_CC, + + // SRCL(R) performs shift left(right) of the concatenation of 2 registers + // and returns high(low) 32-bit part of 64-bit result + SRCL, + // Shift Right Combined + SRCR, }; } @@ -50,6 +61,10 @@ class XtensaTargetLowering : public TargetLowering { explicit XtensaTargetLowering(const TargetMachine &TM, const XtensaSubtarget &STI); + MVT getScalarShiftAmountTy(const DataLayout &, EVT LHSTy) const override { + return LHSTy.getSizeInBits() <= 32 ? MVT::i32 : MVT::i64; + } + EVT getSetCCResultType(const DataLayout &, LLVMContext &, EVT VT) const override { if (!VT.isVector()) @@ -82,6 +97,9 @@ class XtensaTargetLowering : public TargetLowering { const SmallVectorImpl &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override; + bool decomposeMulByConstant(LLVMContext &Context, EVT VT, + SDValue C) const override; + const XtensaSubtarget &getSubtarget() const { return Subtarget; } MachineBasicBlock * @@ -101,7 +119,9 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; @@ -111,6 +131,10 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; + SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const; CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index fc134e794153b6c..0d01864b54bc385 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -138,7 +138,8 @@ let Defs = [SAR] in { } def EXTUI : RRR_Inst<0x00, 0x04, 0x00, (outs AR:$r), (ins AR:$t, uimm5:$imm1, imm1_16:$imm2), - "extui\t$r, $t, $imm1, $imm2", []> { + "extui\t$r, $t, $imm1, $imm2", + [(set AR:$r, (Xtensa_extui AR:$t, uimm5:$imm1, imm1_16:$imm2))]> { bits<5> imm1; bits<4> imm2; diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 93cd1c933dbde6e..3dd73b44f336a57 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -24,6 +24,13 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<2, 3>, SDTCisVT<5, i32>]>; + +def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, + SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; + +def SDT_XtensaEXTUI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, + SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; + //===----------------------------------------------------------------------===// // Node definitions //===----------------------------------------------------------------------===// @@ -46,3 +53,9 @@ def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>; def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC, [SDNPInGlue]>; + +def Xtensa_srcl: SDNode<"XtensaISD::SRCL", SDT_XtensaSRC>; + +def Xtensa_srcr: SDNode<"XtensaISD::SRCR", SDT_XtensaSRC>; + +def Xtensa_extui: SDNode<"XtensaISD::EXTUI", SDT_XtensaEXTUI>; diff --git a/llvm/test/CodeGen/Xtensa/bswap.ll b/llvm/test/CodeGen/Xtensa/bswap.ll new file mode 100644 index 000000000000000..6a87aa84351cf26 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/bswap.ll @@ -0,0 +1,404 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare i16 @llvm.bswap.i16(i16) +declare i32 @llvm.bswap.i32(i32) +declare i64 @llvm.bswap.i64(i64) +declare i8 @llvm.bitreverse.i8(i8) +declare i16 @llvm.bitreverse.i16(i16) +declare i32 @llvm.bitreverse.i32(i32) +declare i64 @llvm.bitreverse.i64(i64) + +define i16 @test_bswap_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bswap_i16: +; XTENSA: l32r a8, .LCPI0_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: slli a9, a2, 8 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bswap.i16(i16 %a) + ret i16 %tmp +} + +define i32 @test_bswap_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bswap_i32: +; XTENSA: srli a8, a2, 8 +; XTENSA-NEXT: l32r a9, .LCPI1_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a2, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 8 +; XTENSA-NEXT: slli a10, a2, 24 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bswap.i32(i32 %a) + ret i32 %tmp +} + +define i64 @test_bswap_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bswap_i64: +; XTENSA: srli a8, a3, 8 +; XTENSA-NEXT: l32r a9, .LCPI2_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a3, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a10, a3, a9 +; XTENSA-NEXT: slli a10, a10, 8 +; XTENSA-NEXT: slli a11, a3, 24 +; XTENSA-NEXT: or a10, a11, a10 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a2, 8 +; XTENSA-NEXT: and a10, a10, a9 +; XTENSA-NEXT: extui a11, a2, 24, 8 +; XTENSA-NEXT: or a10, a10, a11 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 8 +; XTENSA-NEXT: slli a11, a2, 24 +; XTENSA-NEXT: or a9, a11, a9 +; XTENSA-NEXT: or a3, a9, a10 +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bswap.i64(i64 %a) + ret i64 %tmp +} + +define i8 @test_bitreverse_i8(i8 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i8: +; XTENSA: movi a8, 15 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: movi a9, 240 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: srli a9, a9, 4 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: movi a10, 51 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i8 @llvm.bitreverse.i8(i8 %a) + ret i8 %tmp +} + +define i16 @test_bitreverse_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i16: +; XTENSA: l32r a8, .LCPI4_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: slli a9, a2, 8 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: l32r a10, .LCPI4_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI4_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI4_3 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bitreverse.i16(i16 %a) + ret i16 %tmp +} + +define i32 @test_bitreverse_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i32: +; XTENSA: srli a8, a2, 8 +; XTENSA-NEXT: l32r a9, .LCPI5_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a2, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 8 +; XTENSA-NEXT: slli a10, a2, 24 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: l32r a10, .LCPI5_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI5_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI5_3 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bitreverse.i32(i32 %a) + ret i32 %tmp +} + +define i64 @test_bitreverse_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i64: +; XTENSA: srli a8, a3, 8 +; XTENSA-NEXT: l32r a9, .LCPI6_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a3, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a10, a3, a9 +; XTENSA-NEXT: slli a10, a10, 8 +; XTENSA-NEXT: slli a11, a3, 24 +; XTENSA-NEXT: or a10, a11, a10 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 4 +; XTENSA-NEXT: l32r a11, .LCPI6_1 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 2 +; XTENSA-NEXT: l32r a7, .LCPI6_2 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 1 +; XTENSA-NEXT: l32r a6, .LCPI6_3 +; XTENSA-NEXT: and a10, a10, a6 +; XTENSA-NEXT: and a8, a8, a6 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a2, 8 +; XTENSA-NEXT: and a10, a10, a9 +; XTENSA-NEXT: extui a5, a2, 24, 8 +; XTENSA-NEXT: or a10, a10, a5 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 8 +; XTENSA-NEXT: slli a5, a2, 24 +; XTENSA-NEXT: or a9, a5, a9 +; XTENSA-NEXT: or a9, a9, a10 +; XTENSA-NEXT: srli a10, a9, 4 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: and a9, a9, a11 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: srli a10, a9, 2 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a9, a9, a7 +; XTENSA-NEXT: slli a9, a9, 2 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: srli a10, a9, 1 +; XTENSA-NEXT: and a10, a10, a6 +; XTENSA-NEXT: and a9, a9, a6 +; XTENSA-NEXT: slli a9, a9, 1 +; XTENSA-NEXT: or a3, a10, a9 +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bitreverse.i64(i64 %a) + ret i64 %tmp +} + +define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bswap_bitreverse_i16: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI7_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI7_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI7_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bswap.i16(i16 %a) + %tmp2 = call i16 @llvm.bitreverse.i16(i16 %tmp) + ret i16 %tmp2 +} + +define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bswap_bitreverse_i32: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI8_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI8_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI8_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bswap.i32(i32 %a) + %tmp2 = call i32 @llvm.bitreverse.i32(i32 %tmp) + ret i32 %tmp2 +} + +define i64 @test_bswap_bitreverse_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bswap_bitreverse_i64: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI9_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a10, a2, a9 +; XTENSA-NEXT: slli a10, a10, 4 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: srli a10, a8, 2 +; XTENSA-NEXT: l32r a11, .LCPI9_1 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 1 +; XTENSA-NEXT: l32r a7, .LCPI9_2 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a10, a8 +; XTENSA-NEXT: srli a8, a3, 4 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a3, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: and a9, a9, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: and a9, a9, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a3, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bswap.i64(i64 %a) + %tmp2 = call i64 @llvm.bitreverse.i64(i64 %tmp) + ret i64 %tmp2 +} + +define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_bswap_i16: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI10_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI10_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI10_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bitreverse.i16(i16 %a) + %tmp2 = call i16 @llvm.bswap.i16(i16 %tmp) + ret i16 %tmp2 +} + +define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_bswap_i32: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI11_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI11_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI11_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bitreverse.i32(i32 %a) + %tmp2 = call i32 @llvm.bswap.i32(i32 %tmp) + ret i32 %tmp2 +} + +define i64 @test_bitreverse_bswap_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_bswap_i64: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI12_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a10, a2, a9 +; XTENSA-NEXT: slli a10, a10, 4 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: srli a10, a8, 2 +; XTENSA-NEXT: l32r a11, .LCPI12_1 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 1 +; XTENSA-NEXT: l32r a7, .LCPI12_2 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a10, a8 +; XTENSA-NEXT: srli a8, a3, 4 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a3, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: and a9, a9, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: and a9, a9, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a3, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bitreverse.i64(i64 %a) + %tmp2 = call i64 @llvm.bswap.i64(i64 %tmp) + ret i64 %tmp2 +} diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll new file mode 100644 index 000000000000000..f58bed19d4ee77e --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -0,0 +1,176 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare i32 @llvm.cttz.i32(i32, i1) +declare i32 @llvm.ctlz.i32(i32, i1) +declare i32 @llvm.ctpop.i32(i32) + +define i32 @test_cttz_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_cttz_i32: +; XTENSA: movi a8, 32 +; XTENSA-NEXT: beqz a2, .LBB0_2 +; XTENSA-NEXT: j .LBB0_1 +; XTENSA-NEXT: .LBB0_1: # %cond.false +; XTENSA-NEXT: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI0_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI0_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI0_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a8, a8, 24, 8 +; XTENSA-NEXT: .LBB0_2: # %cond.end +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) + ret i32 %tmp +} + +define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { +; XTENSA-LABEL: test_cttz_i32_zero_undef: +; XTENSA: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI1_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI1_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI1_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 24, 8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true) + ret i32 %tmp +} + +define i32 @test_ctlz_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i32: +; XTENSA: or a8, a2, a2 +; XTENSA-NEXT: movi a2, 32 +; XTENSA-NEXT: beqz a8, .LBB2_2 +; XTENSA-NEXT: j .LBB2_1 +; XTENSA-NEXT: .LBB2_1: # %cond.false +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 16 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI2_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI2_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI2_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 24, 8 +; XTENSA-NEXT: .LBB2_2: # %cond.end +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) + ret i32 %tmp +} + +define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i32_zero_undef: +; XTENSA: srli a8, a2, 1 +; XTENSA-NEXT: or a8, a2, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 16 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI3_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI3_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI3_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 24, 8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 true) + ret i32 %tmp +} + +define i32 @test_ctpop_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_ctpop_i32: +; XTENSA: srli a8, a2, 1 +; XTENSA-NEXT: l32r a9, .LCPI4_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: sub a8, a2, a8 +; XTENSA-NEXT: l32r a9, .LCPI4_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI4_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 24, 8 +; XTENSA-NEXT: ret + %1 = call i32 @llvm.ctpop.i32(i32 %a) + ret i32 %1 +} diff --git a/llvm/test/CodeGen/Xtensa/div.ll b/llvm/test/CodeGen/Xtensa/div.ll new file mode 100644 index 000000000000000..e10e976fb1b386a --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/div.ll @@ -0,0 +1,486 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +define i32 @udiv(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: udiv: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i32 %a, %b + ret i32 %1 +} + +define i32 @udiv_constant(i32 %a) nounwind { +; XTENSA-LABEL: udiv_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i32 %a, 5 + ret i32 %1 +} + +define i32 @udiv_pow2(i32 %a) nounwind { +; XTENSA-LABEL: udiv_pow2: +; XTENSA: srli a2, a2, 3 +; XTENSA-NEXT: ret + %1 = udiv i32 %a, 8 + ret i32 %1 +} + +define i32 @udiv_constant_lhs(i32 %a) nounwind { +; XTENSA-LABEL: udiv_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a3, a2, a2 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i32 10, %a + ret i32 %1 +} + +define i64 @udiv64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: udiv64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i64 %a, %b + ret i64 %1 +} + +define i64 @udiv64_constant(i64 %a) nounwind { +; XTENSA-LABEL: udiv64_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i64 %a, 5 + ret i64 %1 +} + +define i64 @udiv64_constant_lhs(i64 %a) nounwind { +; XTENSA-LABEL: udiv64_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: or a4, a2, a2 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i64 10, %a + ret i64 %1 +} + +define i8 @udiv8(i8 %a, i8 %b) nounwind { +; XTENSA-LABEL: udiv8: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, 255 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: and a3, a3, a8 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i8 %a, %b + ret i8 %1 +} + +define i8 @udiv8_constant(i8 %a) nounwind { +; XTENSA-LABEL: udiv8_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, 255 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i8 %a, 5 + ret i8 %1 +} + +define i8 @udiv8_pow2(i8 %a) nounwind { +; XTENSA-LABEL: udiv8_pow2: +; XTENSA: movi a8, 248 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a2, a8, 3 +; XTENSA-NEXT: ret + %1 = udiv i8 %a, 8 + ret i8 %1 +} + +define i8 @udiv8_constant_lhs(i8 %a) nounwind { +; XTENSA-LABEL: udiv8_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, 255 +; XTENSA-NEXT: and a3, a2, a8 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i8 10, %a + ret i8 %1 +} + +define i16 @udiv16(i16 %a, i16 %b) nounwind { +; XTENSA-LABEL: udiv16: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: and a3, a3, a8 +; XTENSA-NEXT: l32r a8, .LCPI11_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i16 %a, %b + ret i16 %1 +} + +define i16 @udiv16_constant(i16 %a) nounwind { +; XTENSA-LABEL: udiv16_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI12_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i16 %a, 5 + ret i16 %1 +} + +define i16 @udiv16_pow2(i16 %a) nounwind { +; XTENSA-LABEL: udiv16_pow2: +; XTENSA: l32r a8, .LCPI13_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a2, a8, 3 +; XTENSA-NEXT: ret + %1 = udiv i16 %a, 8 + ret i16 %1 +} + +define i32 @sdiv(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: sdiv: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i32 %a, %b + ret i32 %1 +} + +define i32 @sdiv_constant_lhs(i32 %a) nounwind { +; XTENSA-LABEL: sdiv_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a3, a2, a2 +; XTENSA-NEXT: movi a2, -10 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i32 -10, %a + ret i32 %1 +} + +define i64 @sdiv64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: sdiv64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i64 %a, %b + ret i64 %1 +} + +define i64 @sdiv64_constant(i64 %a) nounwind { +; XTENSA-LABEL: sdiv64_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i64 %a, 5 + ret i64 %1 +} + +define i64 @sdiv64_constant_lhs(i64 %a) nounwind { +; XTENSA-LABEL: sdiv64_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: or a4, a2, a2 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i64 10, %a + ret i64 %1 +} + + +define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: sdiv64_sext_operands: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: srai a5, a4, 31 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = sext i32 %b to i64 + %3 = sdiv i64 %1, %2 + ret i64 %3 +} + +define i8 @sdiv8(i8 %a, i8 %b) nounwind { +; XTENSA-LABEL: sdiv8: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a2, a8, 24 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a3, a8, 24 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i8 %a, %b + ret i8 %1 +} + +define i8 @sdiv8_constant(i8 %a) nounwind { +; XTENSA-LABEL: sdiv8_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a2, a8, 24 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i8 %a, 5 + ret i8 %1 +} + +define i8 @sdiv8_pow2(i8 %a) nounwind { +; XTENSA-LABEL: sdiv8_pow2: +; XTENSA: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: srli a8, a8, 12 +; XTENSA-NEXT: movi a9, 7 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: slli a8, a8, 24 +; XTENSA-NEXT: srai a2, a8, 27 +; XTENSA-NEXT: ret + %1 = sdiv i8 %a, 8 + ret i8 %1 +} + +define i8 @sdiv8_constant_lhs(i8 %a) nounwind { +; XTENSA-LABEL: sdiv8_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a3, a8, 24 +; XTENSA-NEXT: movi a2, -10 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i8 -10, %a + ret i8 %1 +} + +define i16 @sdiv16(i16 %a, i16 %b) nounwind { +; XTENSA-LABEL: sdiv16: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a2, a8, 16 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a3, a8, 16 +; XTENSA-NEXT: l32r a8, .LCPI24_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i16 %a, %b + ret i16 %1 +} + +define i16 @sdiv16_constant(i16 %a) nounwind { +; XTENSA-LABEL: sdiv16_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a2, a8, 16 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI25_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i16 %a, 5 + ret i16 %1 +} + +define i16 @sdiv16_constant_lhs(i16 %a) nounwind { +; XTENSA-LABEL: sdiv16_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a3, a8, 16 +; XTENSA-NEXT: movi a2, -10 +; XTENSA-NEXT: l32r a8, .LCPI26_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i16 -10, %a + ret i16 %1 +} + +define i32 @sdiv_pow2(i32 %a) nounwind { +; XTENSA-LABEL: sdiv_pow2: +; XTENSA: srai a8, a2, 31 +; XTENSA-NEXT: extui a8, a8, 29, 3 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: srai a2, a8, 3 +; XTENSA-NEXT: ret + %1 = sdiv i32 %a, 8 + ret i32 %1 +} + +define i32 @sdiv_pow2_2(i32 %a) nounwind { +; XTENSA-LABEL: sdiv_pow2_2: +; XTENSA: srai a8, a2, 31 +; XTENSA-NEXT: extui a8, a8, 16, 16 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: srai a2, a8, 16 +; XTENSA-NEXT: ret + %1 = sdiv i32 %a, 65536 + ret i32 %1 +} + +define i16 @sdiv16_pow2(i16 %a) nounwind { +; XTENSA-LABEL: sdiv16_pow2: +; XTENSA: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: extui a8, a8, 28, 4 +; XTENSA-NEXT: movi a9, 7 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: slli a8, a8, 16 +; XTENSA-NEXT: srai a2, a8, 19 +; XTENSA-NEXT: ret + %1 = sdiv i16 %a, 8 + ret i16 %1 +} diff --git a/llvm/test/CodeGen/Xtensa/mul.ll b/llvm/test/CodeGen/Xtensa/mul.ll new file mode 100644 index 000000000000000..9b13897293dc1b5 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/mul.ll @@ -0,0 +1,672 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +define signext i32 @square(i32 %a) nounwind { +; XTENSA-LABEL: square: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: or a3, a2, a2 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, %a + ret i32 %1 +} + +define signext i32 @mul(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: mul: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, %b + ret i32 %1 +} + +define signext i32 @mul_constant(i32 %a) nounwind { +; XTENSA-LABEL: mul_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 5 + ret i32 %1 +} + +define i32 @mul_pow2(i32 %a) nounwind { +; XTENSA-LABEL: mul_pow2: +; XTENSA: slli a2, a2, 3 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 8 + ret i32 %1 +} + +define i64 @mul64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: mul64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, %b + ret i64 %1 +} + +define i64 @mul64_constant(i64 %a) nounwind { +; XTENSA-LABEL: mul64_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 5 + ret i64 %1 +} + +define i32 @mulhs(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: mulhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: srai a5, a4, 31 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = sext i32 %b to i64 + %3 = mul i64 %1, %2 + %4 = lshr i64 %3, 32 + %5 = trunc i64 %4 to i32 + ret i32 %5 +} + +define i32 @mulhs_positive_constant(i32 %a) nounwind { +; XTENSA-LABEL: mulhs_positive_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = mul i64 %1, 5 + %3 = lshr i64 %2, 32 + %4 = trunc i64 %3 to i32 + ret i32 %4 +} + +define i32 @mulhs_negative_constant(i32 %a) nounwind { +; XTENSA-LABEL: mulhs_negative_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: movi a4, -5 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = mul i64 %1, -5 + %3 = lshr i64 %2, 32 + %4 = trunc i64 %3 to i32 + ret i32 %4 +} + +define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind { +; XTENSA-LABEL: mulhu: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i32 %a to i64 + %2 = zext i32 %b to i64 + %3 = mul i64 %1, %2 + %4 = lshr i64 %3, 32 + %5 = trunc i64 %4 to i32 + ret i32 %5 +} + +define i32 @mulhsu(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: mulhsu: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: srai a5, a4, 31 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i32 %a to i64 + %2 = sext i32 %b to i64 + %3 = mul i64 %1, %2 + %4 = lshr i64 %3, 32 + %5 = trunc i64 %4 to i32 + ret i32 %5 +} + +define i32 @mulhu_constant(i32 %a) nounwind { +; XTENSA-LABEL: mulhu_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i32 %a to i64 + %2 = mul i64 %1, 5 + %3 = lshr i64 %2, 32 + %4 = trunc i64 %3 to i32 + ret i32 %4 +} + +define i32 @muli32_p65(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 65 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 65 + ret i32 %1 +} + +define i32 @muli32_p63(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 63 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 63 + ret i32 %1 +} + +define i64 @muli64_p65(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 65 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 65 + ret i64 %1 +} + +define i64 @muli64_p63(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 63 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 63 + ret i64 %1 +} + +define i32 @muli32_m63(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, -63 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -63 + ret i32 %1 +} + +define i32 @muli32_m65(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, -65 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -65 + ret i32 %1 +} + +define i64 @muli64_m63(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, -63 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -63 + ret i64 %1 +} + +define i64 @muli64_m65(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, -65 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -65 + ret i64 %1 +} + +define i32 @muli32_p384(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p384: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 384 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 384 + ret i32 %1 +} + +define i32 @muli32_p12288(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p12288: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI21_0 +; XTENSA-NEXT: l32r a8, .LCPI21_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 12288 + ret i32 %1 +} + +define i32 @muli32_p4352(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI22_0 +; XTENSA-NEXT: l32r a8, .LCPI22_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 4352 + ret i32 %1 +} + +define i32 @muli32_p3840(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI23_0 +; XTENSA-NEXT: l32r a8, .LCPI23_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 3840 + ret i32 %1 +} + +define i32 @muli32_m3840(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI24_0 +; XTENSA-NEXT: l32r a8, .LCPI24_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -3840 + ret i32 %1 +} + +define i32 @muli32_m4352(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI25_0 +; XTENSA-NEXT: l32r a8, .LCPI25_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -4352 + ret i32 %1 +} + +define i64 @muli64_p4352(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI26_0 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI26_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 4352 + ret i64 %1 +} + +define i64 @muli64_p3840(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI27_0 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI27_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 3840 + ret i64 %1 +} + +define i64 @muli64_m4352(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI28_0 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI28_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -4352 + ret i64 %1 +} + +define i64 @muli64_m3840(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI29_0 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI29_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -3840 + ret i64 %1 +} + +define i128 @muli128_m3840(i128 %a) nounwind { +; XTENSA-LABEL: muli128_m3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, -1 +; XTENSA-NEXT: s32i a7, a1, 4 +; XTENSA-NEXT: s32i a7, a1, 0 +; XTENSA-NEXT: l32r a6, .LCPI30_0 +; XTENSA-NEXT: l32r a8, .LCPI30_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i128 %a, -3840 + ret i128 %1 +} + +define i128 @muli128_m63(i128 %a) nounwind { +; XTENSA-LABEL: muli128_m63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, -1 +; XTENSA-NEXT: s32i a7, a1, 4 +; XTENSA-NEXT: s32i a7, a1, 0 +; XTENSA-NEXT: movi a6, -63 +; XTENSA-NEXT: l32r a8, .LCPI31_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i128 %a, -63 + ret i128 %1 +} + +define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: mulhsu_i64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill +; XTENSA-NEXT: or a7, a5, a5 +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: srai a8, a7, 31 +; XTENSA-NEXT: s32i a8, a1, 4 +; XTENSA-NEXT: s32i a8, a1, 0 +; XTENSA-NEXT: movi a4, 0 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: or a5, a4, a4 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a4, a4 +; XTENSA-NEXT: or a3, a5, a5 +; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i64 %a to i128 + %2 = sext i64 %b to i128 + %3 = mul i128 %1, %2 + %4 = lshr i128 %3, 64 + %5 = trunc i128 %4 to i64 + ret i64 %5 +} + +define i8 @muladd_demand(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: muladd_demand: +; XTENSA: slli a8, a2, 1 +; XTENSA-NEXT: sub a8, a3, a8 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = add i8 %y, %m + %r = and i8 %a, 15 + ret i8 %r +} + +define i8 @mulsub_demand(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: mulsub_demand: +; XTENSA: addx2 a8, a2, a3 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = sub i8 %y, %m + %r = and i8 %a, 15 + ret i8 %r +} + +define i8 @muladd_demand_2(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: muladd_demand_2: +; XTENSA: slli a8, a2, 1 +; XTENSA-NEXT: sub a8, a3, a8 +; XTENSA-NEXT: movi a9, -16 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = add i8 %y, %m + %r = or i8 %a, 240 + ret i8 %r +} + +define i8 @mulsub_demand_2(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: mulsub_demand_2: +; XTENSA: addx2 a8, a2, a3 +; XTENSA-NEXT: movi a9, -16 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = sub i8 %y, %m + %r = or i8 %a, 240 + ret i8 %r +} + +define signext i32 @mul_imm_2(i32 %a) nounwind { +; XTENSA-LABEL: mul_imm_2: +; XTENSA: slli a2, a2, 1 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 2 + ret i32 %1 +} + +define signext i32 @mul_imm_1024(i32 %a) nounwind { +; XTENSA-LABEL: mul_imm_1024: +; XTENSA: slli a2, a2, 10 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 1024 + ret i32 %1 +} + +define signext i32 @mul_imm_16384(i32 %a) nounwind { +; XTENSA-LABEL: mul_imm_16384: +; XTENSA: slli a2, a2, 14 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 16384 + ret i32 %1 +} + +define <4 x i32> @mul_vec_splat_constant(<4 x i32> %a) { +; XTENSA-LABEL: mul_vec_splat_constant: +; XTENSA: slli a2, a2, 2 +; XTENSA-NEXT: slli a3, a3, 2 +; XTENSA-NEXT: slli a4, a4, 2 +; XTENSA-NEXT: slli a5, a5, 2 +; XTENSA-NEXT: ret + %mul = mul <4 x i32> %a, + ret <4 x i32> %mul +} diff --git a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll new file mode 100644 index 000000000000000..350315e9aefdae3 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll @@ -0,0 +1,501 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +define i32 @rotl_32(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotl_32: +; XTENSA: ssl a3 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: movi a9, 32 +; XTENSA-NEXT: sub a9, a9, a3 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 32, %y + %b = shl i32 %x, %y + %c = lshr i32 %x, %z + %d = or i32 %b, %c + ret i32 %d +} + +define i32 @rotr_32(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotr_32: +; XTENSA: ssr a3 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: movi a9, 32 +; XTENSA-NEXT: sub a9, a9, a3 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 32, %y + %b = lshr i32 %x, %y + %c = shl i32 %x, %z + %d = or i32 %b, %c + ret i32 %d +} + +define i64 @rotl_64(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotl_64: +; XTENSA: movi a8, 64 +; XTENSA-NEXT: sub a8, a8, a4 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: movi a9, 32 +; XTENSA-NEXT: sub a9, a9, a4 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a7, a3 +; XTENSA-NEXT: movi a10, 0 +; XTENSA-NEXT: blt a9, a10, .LBB2_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a11, a7, a7 +; XTENSA-NEXT: .LBB2_2: +; XTENSA-NEXT: ssl a4 +; XTENSA-NEXT: sll a7, a2 +; XTENSA-NEXT: addi a5, a4, -32 +; XTENSA-NEXT: blt a5, a10, .LBB2_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a7, a10, a10 +; XTENSA-NEXT: .LBB2_4: +; XTENSA-NEXT: ssl a4 +; XTENSA-NEXT: src a6, a3, a2 +; XTENSA-NEXT: ssl a5 +; XTENSA-NEXT: sll a4, a2 +; XTENSA-NEXT: blt a5, a10, .LBB2_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: .LBB2_6: +; XTENSA-NEXT: or a2, a7, a11 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a3 +; XTENSA-NEXT: blt a9, a10, .LBB2_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: .LBB2_8: +; XTENSA-NEXT: or a3, a6, a8 +; XTENSA-NEXT: ret + %z = sub i64 64, %y + %b = shl i64 %x, %y + %c = lshr i64 %x, %z + %d = or i64 %b, %c + ret i64 %d +} + +define i64 @rotr_64(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotr_64: +; XTENSA: ssr a4 +; XTENSA-NEXT: src a10, a3, a2 +; XTENSA-NEXT: addi a8, a4, -32 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: movi a9, 0 +; XTENSA-NEXT: blt a8, a9, .LBB3_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a10, a11, a11 +; XTENSA-NEXT: .LBB3_2: +; XTENSA-NEXT: movi a11, 32 +; XTENSA-NEXT: sub a7, a11, a4 +; XTENSA-NEXT: movi a11, 64 +; XTENSA-NEXT: sub a11, a11, a4 +; XTENSA-NEXT: ssl a11 +; XTENSA-NEXT: sll a6, a2 +; XTENSA-NEXT: blt a7, a9, .LBB3_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a6, a9, a9 +; XTENSA-NEXT: .LBB3_4: +; XTENSA-NEXT: ssl a11 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: ssl a7 +; XTENSA-NEXT: sll a5, a2 +; XTENSA-NEXT: blt a7, a9, .LBB3_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a11, a5, a5 +; XTENSA-NEXT: .LBB3_6: +; XTENSA-NEXT: or a2, a10, a6 +; XTENSA-NEXT: ssr a4 +; XTENSA-NEXT: srl a10, a3 +; XTENSA-NEXT: blt a8, a9, .LBB3_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a10, a9, a9 +; XTENSA-NEXT: .LBB3_8: +; XTENSA-NEXT: or a3, a10, a11 +; XTENSA-NEXT: ret + %z = sub i64 64, %y + %b = lshr i64 %x, %y + %c = shl i64 %x, %z + %d = or i64 %b, %c + ret i64 %d +} + +define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotl_32_mask: +; XTENSA: ssl a3 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 0, %y + %and = and i32 %z, 31 + %b = shl i32 %x, %y + %c = lshr i32 %x, %and + %d = or i32 %b, %c + ret i32 %d +} + +define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotl_32_mask_and_63_and_31: +; XTENSA: movi a8, 63 +; XTENSA-NEXT: and a8, a3, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %a = and i32 %y, 63 + %b = shl i32 %x, %a + %c = sub i32 0, %y + %d = and i32 %c, 31 + %e = lshr i32 %x, %d + %f = or i32 %b, %e + ret i32 %f +} + +define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotr_32_mask: +; XTENSA: ssr a3 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 0, %y + %and = and i32 %z, 31 + %b = lshr i32 %x, %y + %c = shl i32 %x, %and + %d = or i32 %b, %c + ret i32 %d +} + +define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotr_32_mask_and_63_and_31: +; XTENSA: movi a8, 63 +; XTENSA-NEXT: and a8, a3, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %a = and i32 %y, 63 + %b = lshr i32 %x, %a + %c = sub i32 0, %y + %d = and i32 %c, 31 + %e = shl i32 %x, %d + %f = or i32 %b, %e + ret i32 %f +} + +define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotl_64_mask: +; XTENSA: ssl a4 +; XTENSA-NEXT: src a10, a3, a2 +; XTENSA-NEXT: addi a8, a4, -32 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a11, a2 +; XTENSA-NEXT: movi a9, 0 +; XTENSA-NEXT: blt a8, a9, .LBB8_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a10, a11, a11 +; XTENSA-NEXT: .LBB8_2: +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: movi a7, 63 +; XTENSA-NEXT: and a7, a11, a7 +; XTENSA-NEXT: ssr a7 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: addi a6, a7, -32 +; XTENSA-NEXT: blt a6, a9, .LBB8_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a11, a9, a9 +; XTENSA-NEXT: .LBB8_4: +; XTENSA-NEXT: ssr a7 +; XTENSA-NEXT: src a7, a3, a2 +; XTENSA-NEXT: ssr a6 +; XTENSA-NEXT: srl a5, a3 +; XTENSA-NEXT: blt a6, a9, .LBB8_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a7, a5, a5 +; XTENSA-NEXT: .LBB8_6: +; XTENSA-NEXT: or a3, a10, a11 +; XTENSA-NEXT: ssl a4 +; XTENSA-NEXT: sll a10, a2 +; XTENSA-NEXT: blt a8, a9, .LBB8_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a10, a9, a9 +; XTENSA-NEXT: .LBB8_8: +; XTENSA-NEXT: or a2, a10, a7 +; XTENSA-NEXT: ret + %z = sub i64 0, %y + %and = and i64 %z, 63 + %b = shl i64 %x, %y + %c = lshr i64 %x, %and + %d = or i64 %b, %c + ret i64 %d +} + +define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotl_64_mask_and_127_and_63: +; XTENSA: movi a8, 127 +; XTENSA-NEXT: and a8, a4, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: addi a9, a8, -32 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a7, a2 +; XTENSA-NEXT: movi a10, 0 +; XTENSA-NEXT: blt a9, a10, .LBB9_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a11, a7, a7 +; XTENSA-NEXT: .LBB9_2: +; XTENSA-NEXT: neg a7, a4 +; XTENSA-NEXT: movi a6, 63 +; XTENSA-NEXT: and a6, a7, a6 +; XTENSA-NEXT: ssr a6 +; XTENSA-NEXT: srl a7, a3 +; XTENSA-NEXT: addi a5, a6, -32 +; XTENSA-NEXT: blt a5, a10, .LBB9_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a7, a10, a10 +; XTENSA-NEXT: .LBB9_4: +; XTENSA-NEXT: ssr a6 +; XTENSA-NEXT: src a6, a3, a2 +; XTENSA-NEXT: ssr a5 +; XTENSA-NEXT: srl a4, a3 +; XTENSA-NEXT: blt a5, a10, .LBB9_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: .LBB9_6: +; XTENSA-NEXT: or a3, a11, a7 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: blt a9, a10, .LBB9_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: .LBB9_8: +; XTENSA-NEXT: or a2, a8, a6 +; XTENSA-NEXT: ret + %a = and i64 %y, 127 + %b = shl i64 %x, %a + %c = sub i64 0, %y + %d = and i64 %c, 63 + %e = lshr i64 %x, %d + %f = or i64 %b, %e + ret i64 %f +} + +define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotr_64_mask: +; XTENSA: ssr a4 +; XTENSA-NEXT: src a10, a3, a2 +; XTENSA-NEXT: addi a8, a4, -32 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: movi a9, 0 +; XTENSA-NEXT: blt a8, a9, .LBB10_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a10, a11, a11 +; XTENSA-NEXT: .LBB10_2: +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: movi a7, 63 +; XTENSA-NEXT: and a7, a11, a7 +; XTENSA-NEXT: ssl a7 +; XTENSA-NEXT: sll a11, a2 +; XTENSA-NEXT: addi a6, a7, -32 +; XTENSA-NEXT: blt a6, a9, .LBB10_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a11, a9, a9 +; XTENSA-NEXT: .LBB10_4: +; XTENSA-NEXT: ssl a7 +; XTENSA-NEXT: src a7, a3, a2 +; XTENSA-NEXT: ssl a6 +; XTENSA-NEXT: sll a5, a2 +; XTENSA-NEXT: blt a6, a9, .LBB10_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a7, a5, a5 +; XTENSA-NEXT: .LBB10_6: +; XTENSA-NEXT: or a2, a10, a11 +; XTENSA-NEXT: ssr a4 +; XTENSA-NEXT: srl a10, a3 +; XTENSA-NEXT: blt a8, a9, .LBB10_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a10, a9, a9 +; XTENSA-NEXT: .LBB10_8: +; XTENSA-NEXT: or a3, a10, a7 +; XTENSA-NEXT: ret + %z = sub i64 0, %y + %and = and i64 %z, 63 + %b = lshr i64 %x, %y + %c = shl i64 %x, %and + %d = or i64 %b, %c + ret i64 %d +} + +define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotr_64_mask_and_127_and_63: +; XTENSA: movi a8, 127 +; XTENSA-NEXT: and a8, a4, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: addi a9, a8, -32 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a7, a3 +; XTENSA-NEXT: movi a10, 0 +; XTENSA-NEXT: blt a9, a10, .LBB11_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a11, a7, a7 +; XTENSA-NEXT: .LBB11_2: +; XTENSA-NEXT: neg a7, a4 +; XTENSA-NEXT: movi a6, 63 +; XTENSA-NEXT: and a6, a7, a6 +; XTENSA-NEXT: ssl a6 +; XTENSA-NEXT: sll a7, a2 +; XTENSA-NEXT: addi a5, a6, -32 +; XTENSA-NEXT: blt a5, a10, .LBB11_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a7, a10, a10 +; XTENSA-NEXT: .LBB11_4: +; XTENSA-NEXT: ssl a6 +; XTENSA-NEXT: src a6, a3, a2 +; XTENSA-NEXT: ssl a5 +; XTENSA-NEXT: sll a4, a2 +; XTENSA-NEXT: blt a5, a10, .LBB11_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: .LBB11_6: +; XTENSA-NEXT: or a2, a11, a7 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a3 +; XTENSA-NEXT: blt a9, a10, .LBB11_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: .LBB11_8: +; XTENSA-NEXT: or a3, a8, a6 +; XTENSA-NEXT: ret + %a = and i64 %y, 127 + %b = lshr i64 %x, %a + %c = sub i64 0, %y + %d = and i64 %c, 63 + %e = shl i64 %x, %d + %f = or i64 %b, %e + ret i64 %f +} + +define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotl_32_mask_shared: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a10, a2 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a3 +; XTENSA-NEXT: add a2, a8, a9 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = shl i32 %b, %maskedamt + %3 = add i32 %1, %2 + ret i32 %3 +} +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotr_32_mask_shared: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a10, a2 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a3 +; XTENSA-NEXT: add a2, a8, a9 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = shl i32 %b, %maskedamt + %3 = add i32 %1, %2 + ret i32 %3 +} +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotl_32_mask_multiple: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a10, a3 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: or a10, a10, a11 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: add a2, a8, a10 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = tail call i32 @llvm.fshl.i32(i32 %b, i32 %b, i32 %maskedamt) + %3 = add i32 %1, %2 + ret i32 %3 +} + +define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotr_32_mask_multiple: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a10, a3 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a11, a3 +; XTENSA-NEXT: or a10, a10, a11 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: add a2, a8, a10 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = tail call i32 @llvm.fshr.i32(i32 %b, i32 %b, i32 %maskedamt) + %3 = add i32 %1, %2 + ret i32 %3 +} diff --git a/llvm/test/CodeGen/Xtensa/shift.ll b/llvm/test/CodeGen/Xtensa/shift.ll new file mode 100644 index 000000000000000..87e847f65d83708 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/shift.ll @@ -0,0 +1,172 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck %s + +define i32 @lshl(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: lshl: +; CHECK: ssl a3 +; CHECK-NEXT: sll a2, a2 +; CHECK-NEXT: ret + %c = shl i32 %x, %y + ret i32 %c +} + +define i32 @lshl_imm_1(i32 %x) nounwind { +; CHECK-LABEL: lshl_imm_1: +; CHECK: slli a2, a2, 1 +; CHECK-NEXT: ret + %c = shl i32 %x, 1 + ret i32 %c +} + +define i32 @lshl_imm_10(i32 %x) nounwind { +; CHECK-LABEL: lshl_imm_10: +; CHECK: slli a2, a2, 10 +; CHECK-NEXT: ret + %c = shl i32 %x, 10 + ret i32 %c +} + +define i32 @lshl_imm_31(i32 %x) nounwind { +; CHECK-LABEL: lshl_imm_31: +; CHECK: slli a2, a2, 31 +; CHECK-NEXT: ret + %c = shl i32 %x, 31 + ret i32 %c +} + +define i32 @lshr(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: lshr: +; CHECK: ssr a3 +; CHECK-NEXT: srl a2, a2 +; CHECK-NEXT: ret + %c = lshr i32 %x, %y + ret i32 %c +} + +define i32 @lshr_imm_1(i32 %x) nounwind { +; CHECK-LABEL: lshr_imm_1: +; CHECK: srli a2, a2, 1 +; CHECK-NEXT: ret + %c = lshr i32 %x, 1 + ret i32 %c +} + +define i32 @lshr_imm_15(i32 %x) nounwind { +; CHECK-LABEL: lshr_imm_15: +; CHECK: srli a2, a2, 15 +; CHECK-NEXT: ret + %c = lshr i32 %x, 15 + ret i32 %c +} + +define i32 @lshr_imm_20(i32 %x) nounwind { +; CHECK-LABEL: lshr_imm_20: +; CHECK: extui a2, a2, 20, 12 +; CHECK-NEXT: ret + %c = lshr i32 %x, 20 + ret i32 %c +} + +define i32 @ashr(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: ashr: +; CHECK: ssr a3 +; CHECK-NEXT: sra a2, a2 +; CHECK-NEXT: ret + %c = ashr i32 %x, %y + ret i32 %c +} + +define i32 @ashr_imm_1(i32 %x) nounwind { +; CHECK-LABEL: ashr_imm_1: +; CHECK: srai a2, a2, 1 +; CHECK-NEXT: ret + %c = ashr i32 %x, 1 + ret i32 %c +} + +define i32 @ashr_imm_10(i32 %x) nounwind { +; CHECK-LABEL: ashr_imm_10: +; CHECK: srai a2, a2, 10 +; CHECK-NEXT: ret + %c = ashr i32 %x, 10 + ret i32 %c +} + +define i32 @ashr_imm_31(i32 %x) nounwind { +; CHECK-LABEL: ashr_imm_31: +; CHECK: srai a2, a2, 31 +; CHECK-NEXT: ret + %c = ashr i32 %x, 31 + ret i32 %c +} + +define i64 @lshl_64(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lshl_64: +; CHECK: ssl a4 +; CHECK-NEXT: src a3, a3, a2 +; CHECK-NEXT: addi a8, a4, -32 +; CHECK-NEXT: ssl a8 +; CHECK-NEXT: sll a10, a2 +; CHECK-NEXT: movi a9, 0 +; CHECK-NEXT: blt a8, a9, .LBB12_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: or a3, a10, a10 +; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: ssl a4 +; CHECK-NEXT: sll a2, a2 +; CHECK-NEXT: blt a8, a9, .LBB12_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: or a2, a9, a9 +; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: ret + %c = shl i64 %x, %y + ret i64 %c +} + +define i64 @lshr_64(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lshr_64: +; CHECK: ssr a4 +; CHECK-NEXT: src a2, a3, a2 +; CHECK-NEXT: addi a8, a4, -32 +; CHECK-NEXT: ssr a8 +; CHECK-NEXT: srl a10, a3 +; CHECK-NEXT: movi a9, 0 +; CHECK-NEXT: blt a8, a9, .LBB13_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: or a2, a10, a10 +; CHECK-NEXT: .LBB13_2: +; CHECK-NEXT: ssr a4 +; CHECK-NEXT: srl a3, a3 +; CHECK-NEXT: blt a8, a9, .LBB13_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: or a3, a9, a9 +; CHECK-NEXT: .LBB13_4: +; CHECK-NEXT: ret + %c = lshr i64 %x, %y + ret i64 %c +} + +define i64 @ashr_64(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: ashr_64: +; CHECK: ssr a4 +; CHECK-NEXT: src a2, a3, a2 +; CHECK-NEXT: addi a9, a4, -32 +; CHECK-NEXT: ssr a9 +; CHECK-NEXT: sra a8, a3 +; CHECK-NEXT: movi a10, 0 +; CHECK-NEXT: blt a9, a10, .LBB14_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: or a2, a8, a8 +; CHECK-NEXT: .LBB14_2: +; CHECK-NEXT: ssr a4 +; CHECK-NEXT: sra a8, a3 +; CHECK-NEXT: blt a9, a10, .LBB14_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: srai a8, a3, 31 +; CHECK-NEXT: .LBB14_4: +; CHECK-NEXT: or a3, a8, a8 +; CHECK-NEXT: ret + %c = ashr i64 %x, %y + ret i64 %c +}