summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Bradbury <asb@igalia.com>2024-01-09 12:25:17 +0000
committerGitHub <noreply@github.com>2024-01-09 12:25:17 +0000
commit197214e39b7100dd0e88aa38cffdce9ee1f4464b (patch)
treeba9804159b0b8055b019f9d86a2e4a62a3544f96
parent4cb2ef4fe372d32d1773f4dd358d6dff91518b5f (diff)
[RFC][SelectionDAG] Add and use SDNode::getAsZExtVal() helper (#76710)
This follows on from #76708, allowing `cast<ConstantSDNode>(N)->getZExtValue()` to be replaced with just `N->getAsZextVal();` Introduced via `git grep -l "cast<ConstantSDNode>\(.*\).*getZExtValue" | xargs sed -E -i 's/cast<ConstantSDNode>\((.*)\)->getZExtValue/\1->getAsZExtVal/'` and then using `git clang-format` on the result.
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGNodes.h7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp10
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp6
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp33
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp15
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp11
-rw-r--r--llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600ISelLowering.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp4
-rw-r--r--llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp24
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp19
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp6
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp19
-rw-r--r--llvm/lib/Target/M68k/M68kISelLowering.cpp4
-rw-r--r--llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp4
-rw-r--r--llvm/lib/Target/MSP430/MSP430ISelLowering.cpp6
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp3
-rw-r--r--llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp13
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp16
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp21
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp8
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp27
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp7
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp22
-rw-r--r--llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp2
40 files changed, 159 insertions, 186 deletions
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 7f957878343a..ebf410cc94de 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -929,6 +929,9 @@ public:
/// Helper method returns the integer value of a ConstantSDNode operand.
inline uint64_t getConstantOperandVal(unsigned Num) const;
+ /// Helper method returns the zero-extended integer value of a ConstantSDNode.
+ inline uint64_t getAsZExtVal() const;
+
/// Helper method returns the APInt of a ConstantSDNode operand.
inline const APInt &getConstantOperandAPInt(unsigned Num) const;
@@ -1645,6 +1648,10 @@ uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
}
+uint64_t SDNode::getAsZExtVal() const {
+ return cast<ConstantSDNode>(this)->getZExtValue();
+}
+
const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 464e1becc0b8..2327664516cc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14713,7 +14713,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
- int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ int Elt = EltNo->getAsZExtVal();
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDLoc DL(N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 34fa1f5a7ed1..032cff416cda 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -551,7 +551,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
SDValue N2 = Node->getOperand(2);
- unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
+ unsigned SubIdx = N2->getAsZExtVal();
// Figure out the register class to create for the destreg. It should be
// the largest legal register class supporting SubIdx sub-registers.
@@ -650,7 +650,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
// Skip physical registers as they don't have a vreg to get and we'll
// insert copies for them in TwoAddressInstructionPass anyway.
if (!R || !R->getReg().isPhysical()) {
- unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
+ unsigned SubIdx = Op->getAsZExtVal();
unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
const TargetRegisterClass *SRC =
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 6e0e1e23419b..589fec0e56f7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -2511,7 +2511,7 @@ SDValue DAGTypeLegalizer::PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
EVT VecVT = Vec->getValueType(0);
EVT EltVT = VecVT.getVectorElementType();
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsZExtVal();
switch (getTypeAction(VecVT)) {
default: break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 92598d885619..814f746f5a4d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -5570,7 +5570,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
getTypeAction(InVT) == TargetLowering::TypeLegal) {
EVT NInVT = InVT.getHalfNumVectorElementsVT(*DAG.getContext());
unsigned NElts = NInVT.getVectorMinNumElements();
- uint64_t IdxVal = cast<ConstantSDNode>(BaseIdx)->getZExtValue();
+ uint64_t IdxVal = BaseIdx->getAsZExtVal();
SDValue Step1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NInVT, InOp0,
DAG.getConstant(alignDown(IdxVal, NElts), dl,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 66461b26468f..ec74d2940099 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1442,7 +1442,7 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsZExtVal();
Hi = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
DAG.getVectorIdxConstant(IdxVal + LoVT.getVectorMinNumElements(), dl));
@@ -1466,7 +1466,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
// If we know the index is in the first half, and we know the subvector
// doesn't cross the boundary between the halves, we can avoid spilling the
// vector, and insert into the lower half of the split vector directly.
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsZExtVal();
if (IdxVal + SubElems <= LoElems) {
Lo = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, LoVT, Lo, SubVec, Idx);
return;
@@ -3279,7 +3279,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N,
SDValue Lo, Hi;
GetSplitVector(SubVec, Lo, Hi);
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsZExtVal();
uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
SDValue FirstInsertion =
@@ -3301,7 +3301,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
GetSplitVector(N->getOperand(0), Lo, Hi);
uint64_t LoEltsMin = Lo.getValueType().getVectorMinNumElements();
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsZExtVal();
if (IdxVal < LoEltsMin) {
assert(IdxVal + SubVT.getVectorMinNumElements() <= LoEltsMin &&
@@ -5257,7 +5257,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
EVT InVT = InOp.getValueType();
// Check if we can just return the input vector after widening.
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsZExtVal();
if (IdxVal == 0 && InVT == WidenVT)
return InOp;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 4151964adc7d..b39be64c06f9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7197,8 +7197,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(isa<ConstantSDNode>(N3) &&
"Insert subvector index must be constant");
assert((VT.isScalableVector() != N2VT.isScalableVector() ||
- (N2VT.getVectorMinNumElements() +
- cast<ConstantSDNode>(N3)->getZExtValue()) <=
+ (N2VT.getVectorMinNumElements() + N3->getAsZExtVal()) <=
VT.getVectorMinNumElements()) &&
"Insert subvector overflow!");
assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() ==
@@ -9986,8 +9985,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
Ops[1].getValueType().isFloatingPoint() &&
VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
isa<ConstantSDNode>(Ops[2]) &&
- (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
- cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
+ (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
"Invalid STRICT_FP_ROUND!");
break;
#if 0
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 1ae682eaf251..2c477b947430 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5644,7 +5644,7 @@ static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
// expansion/promotion) if it was possible to expand a libcall of an
// illegal type during operation legalization. But it's not, so things
// get a bit hacky.
- unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
+ unsigned ScaleInt = Scale->getAsZExtVal();
if ((ScaleInt > 0 || (Saturating && Signed)) &&
(TLI.isTypeLegal(VT) ||
(VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
@@ -7657,8 +7657,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(
- cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
@@ -7674,8 +7673,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(
- cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
setValue(&I,
DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
@@ -8138,7 +8136,7 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
case ISD::VP_IS_FPCLASS: {
const DataLayout DLayout = DAG.getDataLayout();
EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
- auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
+ auto Constant = OpValues[1]->getAsZExtVal();
SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
{OpValues[0], Check, OpValues[2], OpValues[3]});
@@ -9175,8 +9173,7 @@ findMatchingInlineAsmOperand(unsigned OperandNo,
unsigned CurOp = InlineAsm::Op_FirstOperand;
for (; OperandNo; --OperandNo) {
// Advance to the next operand.
- unsigned OpFlag =
- cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
+ unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
const InlineAsm::Flag F(OpFlag);
assert(
(F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
@@ -9482,8 +9479,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
// just use its register.
auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
AsmNodeOperands);
- InlineAsm::Flag Flag(
- cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue());
+ InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
if (OpInfo.isIndirect) {
// This happens on gcc/testsuite/gcc.dg/pr8788-1.c
@@ -9987,14 +9983,14 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
// constant nodes.
SDValue ID = getValue(CI.getArgOperand(0));
assert(ID.getValueType() == MVT::i64);
- SDValue IDConst = DAG.getTargetConstant(
- cast<ConstantSDNode>(ID)->getZExtValue(), DL, ID.getValueType());
+ SDValue IDConst =
+ DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
Ops.push_back(IDConst);
SDValue Shad = getValue(CI.getArgOperand(1));
assert(Shad.getValueType() == MVT::i32);
- SDValue ShadConst = DAG.getTargetConstant(
- cast<ConstantSDNode>(Shad)->getZExtValue(), DL, Shad.getValueType());
+ SDValue ShadConst =
+ DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
Ops.push_back(ShadConst);
// Add the live variables.
@@ -10043,7 +10039,7 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Get the real number of arguments participating in the call <numArgs>
SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
- unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
+ unsigned NumArgs = NArgVal->getAsZExtVal();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
// Intrinsics include all meta-operands up to but not including CC.
@@ -10090,12 +10086,9 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Add the <id> and <numBytes> constants.
SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
+ Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
- MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
// Add the callee.
Ops.push_back(Callee);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 4ae30000015e..9ebef642e423 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -149,7 +149,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
- unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
+ unsigned IID = getOperand(OpNo)->getAsZExtVal();
if (IID < Intrinsic::num_intrinsics)
return Intrinsic::getBaseName((Intrinsic::ID)IID).str();
if (!G)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 99bb3d875d4f..9acfc76d7d5e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2125,7 +2125,7 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
--e; // Don't process a glue operand if it is here.
while (i != e) {
- InlineAsm::Flag Flags(cast<ConstantSDNode>(InOps[i])->getZExtValue());
+ InlineAsm::Flag Flags(InOps[i]->getAsZExtVal());
if (!Flags.isMemKind() && !Flags.isFuncKind()) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin() + i,
@@ -2139,12 +2139,10 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
if (Flags.isUseOperandTiedToDef(TiedToOperand)) {
// We need the constraint ID from the operand this is tied to.
unsigned CurOp = InlineAsm::Op_FirstOperand;
- Flags =
- InlineAsm::Flag(cast<ConstantSDNode>(InOps[CurOp])->getZExtValue());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
for (; TiedToOperand; --TiedToOperand) {
CurOp += Flags.getNumOperandRegisters() + 1;
- Flags = InlineAsm::Flag(
- cast<ConstantSDNode>(InOps[CurOp])->getZExtValue());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
}
}
@@ -2384,9 +2382,8 @@ void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
if (OpNode->getOpcode() == ISD::Constant) {
Ops.push_back(
CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
- Ops.push_back(
- CurDAG->getTargetConstant(cast<ConstantSDNode>(OpNode)->getZExtValue(),
- DL, OpVal.getValueType()));
+ Ops.push_back(CurDAG->getTargetConstant(OpNode->getAsZExtVal(), DL,
+ OpVal.getValueType()));
} else {
Ops.push_back(OpVal);
}
@@ -2456,7 +2453,7 @@ void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
Ops.push_back(*It++);
// Push the args for the call.
- for (uint64_t I = cast<ConstantSDNode>(NumArgs)->getZExtValue(); I != 0; I--)
+ for (uint64_t I = NumArgs->getAsZExtVal(); I != 0; I--)
Ops.push_back(*It++);
// Now push the live variables.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 476d99c2a7e0..edc8cc7d4d1e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -580,7 +580,7 @@ bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
if (!isa<ConstantSDNode>(N.getNode()))
return false;
- uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+ uint64_t Immed = N.getNode()->getAsZExtVal();
unsigned ShiftAmt;
if (Immed >> 12 == 0) {
@@ -611,7 +611,7 @@ bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
return false;
// The immediate operand must be a 24-bit zero-extended immediate.
- uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+ uint64_t Immed = N.getNode()->getAsZExtVal();
// This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
// have the opposite effect on the C flag, so this pattern mustn't match under
@@ -1326,7 +1326,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
// MOV X0, WideImmediate
// LDR X2, [BaseReg, X0]
if (isa<ConstantSDNode>(RHS)) {
- int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
+ int64_t ImmOff = (int64_t)RHS->getAsZExtVal();
// Skip the immediate can be selected by load/store addressing mode.
// Also skip the immediate can be encoded by a single ADD (SUB is also
// checked by using -ImmOff).
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 3583b7d2ce4e..47e665176e8b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3588,8 +3588,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// cmp w13, w12
// can be turned into:
// cmp w12, w11, lsl #1
- if (!isa<ConstantSDNode>(RHS) ||
- !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
+ if (!isa<ConstantSDNode>(RHS) || !isLegalArithImmed(RHS->getAsZExtVal())) {
SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
@@ -3623,7 +3622,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
LHS.getNode()->hasNUsesOfValue(1, 0)) {
- int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
+ int16_t ValueofRHS = RHS->getAsZExtVal();
if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
SDValue SExt =
DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
@@ -5619,7 +5618,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleVal = Scale->getAsZExtVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -5707,7 +5706,7 @@ SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleVal = Scale->getAsZExtVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -22011,7 +22010,7 @@ static SDValue performBRCONDCombine(SDNode *N,
SDValue Cmp = N->getOperand(3);
assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
- unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
+ unsigned CC = CCVal->getAsZExtVal();
if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
return SDValue();
diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index 1a76f354589e..9e43f206efcf 100644
--- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -172,7 +172,7 @@ static SDValue EmitUnrolledSetTag(SelectionDAG &DAG, const SDLoc &dl,
SDValue AArch64SelectionDAGInfo::EmitTargetCodeForSetTag(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr,
SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const {
- uint64_t ObjSize = cast<ConstantSDNode>(Size)->getZExtValue();
+ uint64_t ObjSize = Size->getAsZExtVal();
assert(ObjSize % 16 == 0);
MachineFunction &MF = DAG.getMachineFunction();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 18f434be3cd3..719ae2e8750c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -373,7 +373,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
Subtarget->getRegisterInfo()->getRegClass(RCID);
SDValue SubRegOp = N->getOperand(OpNo + 1);
- unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
+ unsigned SubRegIdx = SubRegOp->getAsZExtVal();
return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
SubRegIdx);
}
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 9a2fb0bc37b2..674fd04f2fc1 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -1651,7 +1651,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = Swz[i]->getAsZExtVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1659,7 +1659,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
SwizzleRemap.clear();
BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = Swz[i]->getAsZExtVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1780,7 +1780,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
// Check that we know which element is being inserted
if (!isa<ConstantSDNode>(EltNo))
return SDValue();
- unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ unsigned Elt = EltNo->getAsZExtVal();
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
@@ -2021,7 +2021,7 @@ bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx,
}
case R600::MOV_IMM_GLOBAL_ADDR:
// Check if the Imm slot is used. Taken from below.
- if (cast<ConstantSDNode>(Imm)->getZExtValue())
+ if (Imm->getAsZExtVal())
return false;
Imm = Src.getOperand(0);
Src = DAG.getRegister(R600::ALU_LITERAL_X, MVT::i32);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 975178b313ae..6ddc7e864fb2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6638,7 +6638,7 @@ SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
EVT InsVT = Ins.getValueType();
EVT EltVT = VecVT.getVectorElementType();
unsigned InsNumElts = InsVT.getVectorNumElements();
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsZExtVal();
SDLoc SL(Op);
if (EltVT.getScalarSizeInBits() == 16 && IdxVal % 2 == 0) {
@@ -7668,7 +7668,7 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
Ops.push_back(IsA16 ? True : False);
if (!Subtarget->hasGFX90AInsts()) {
Ops.push_back(TFE); //tfe
- } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) {
+ } else if (TFE->getAsZExtVal()) {
report_fatal_error("TFE is not supported on this GPU");
}
if (!IsGFX12Plus || BaseOpcode->Sampler || BaseOpcode->MSAA)
@@ -7805,7 +7805,7 @@ SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
setBufferOffsets(Offset, DAG, &Ops[3],
NumLoads > 1 ? Align(16 * NumLoads) : Align(4));
- uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
+ uint64_t InstOffset = Ops[5]->getAsZExtVal();
for (unsigned i = 0; i < NumLoads; ++i) {
Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d4c7a457e9aa..fee900b3efb2 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -338,8 +338,8 @@ bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
return false;
- Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
- Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
+ Offset0 = Off0->getAsZExtVal();
+ Offset1 = Off1->getAsZExtVal();
return true;
}
diff --git a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
index 28e35f8f2a54..17c2d7bb13b4 100644
--- a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
@@ -170,7 +170,7 @@ bool ARCDAGToDAGISel::SelectFrameADDR_ri(SDValue Addr, SDValue &Base,
void ARCDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
case ISD::Constant: {
- uint64_t CVal = cast<ConstantSDNode>(N)->getZExtValue();
+ uint64_t CVal = N->getAsZExtVal();
ReplaceNode(N, CurDAG->getMachineNode(
isInt<12>(CVal) ? ARC::MOV_rs12 : ARC::MOV_rlimm,
SDLoc(N), MVT::i32,
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index adc429b61bbc..e99ee299412a 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -372,7 +372,7 @@ INITIALIZE_PASS(ARMDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsZExtVal();
return true;
}
return false;
@@ -1101,8 +1101,7 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
Offset = N.getOperand(0);
SDValue N1 = N.getOperand(1);
- Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
- SDLoc(N), MVT::i32);
+ Label = CurDAG->getTargetConstant(N1->getAsZExtVal(), SDLoc(N), MVT::i32);
return true;
}
@@ -1942,7 +1941,7 @@ SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
if (!is64BitVector && NumVecs < 3)
NumRegs *= 2;
- unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ unsigned Alignment = Align->getAsZExtVal();
if (Alignment >= 32 && NumRegs == 4)
Alignment = 32;
else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
@@ -2428,7 +2427,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ Alignment = Align->getAsZExtVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -2871,7 +2870,7 @@ void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
Ops.push_back(N->getOperand(OpIdx++)); // limit
SDValue ImmOp = N->getOperand(OpIdx++); // step
- int ImmValue = cast<ConstantSDNode>(ImmOp)->getZExtValue();
+ int ImmValue = ImmOp->getAsZExtVal();
Ops.push_back(getI32Imm(ImmValue, Loc));
if (Predicated)
@@ -2892,7 +2891,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand designating the coprocessor.
SDValue ImmCorpoc = N->getOperand(OpIdx++);
- uint32_t ImmCoprocVal = cast<ConstantSDNode>(ImmCorpoc)->getZExtValue();
+ uint32_t ImmCoprocVal = ImmCorpoc->getAsZExtVal();
Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
// For accumulating variants copy the low and high order parts of the
@@ -2911,7 +2910,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand
SDValue Imm = N->getOperand(OpIdx);
- uint32_t ImmVal = cast<ConstantSDNode>(Imm)->getZExtValue();
+ uint32_t ImmVal = Imm->getAsZExtVal();
Ops.push_back(getI32Imm(ImmVal, Loc));
// Accumulating variants are IT-predicable, add predicate operands.
@@ -2965,7 +2964,7 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ Alignment = Align->getAsZExtVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -3697,7 +3696,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
// Other cases are autogenerated.
break;
case ISD::Constant: {
- unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
+ unsigned Val = N->getAsZExtVal();
// If we can't materialize the constant we need to use a literal pool
if (ConstantMaterializationCost(Val, Subtarget) > 2 &&
!Subtarget->genExecuteOnly()) {
@@ -4132,7 +4131,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
assert(N2.getOpcode() == ISD::Constant);
assert(N3.getOpcode() == ISD::Register);
- unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
+ unsigned CC = (unsigned)N2->getAsZExtVal();
if (InGlue.getOpcode() == ARMISD::CMPZ) {
if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
@@ -4243,8 +4242,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
if (SwitchEQNEToPLMI) {
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal();
switch (CC) {
default: llvm_unreachable("CMPZ must be either NE or EQ!");
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 9f3bcffc7a99..568085bd0ab3 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -4820,8 +4820,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// some tweaks to the heuristics for the previous and->shift transform.
// FIXME: Optimize cases where the LHS isn't a shift.
if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
+ isa<ConstantSDNode>(RHS) && RHS->getAsZExtVal() == 0x80000000U &&
CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
LHS.getConstantOperandVal(1) < 31) {
unsigned ShiftAmt = LHS.getConstantOperandVal(1) + 1;
@@ -5533,7 +5532,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
// Choose GE over PL, which vsel does now support
- if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
+ if (ARMcc->getAsZExtVal() == ARMCC::PL)
ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
}
@@ -7749,7 +7748,7 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
uint64_t Val;
if (!isa<ConstantSDNode>(N))
return SDValue();
- Val = cast<ConstantSDNode>(N)->getZExtValue();
+ Val = N->getAsZExtVal();
if (ST->isThumb1Only()) {
if (Val <= 255 || ~Val <= 255)
@@ -7804,7 +7803,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
SDValue V = Op.getOperand(i);
if (!isa<ConstantSDNode>(V) && !V.isUndef())
continue;
- bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
+ bool BitSet = V.isUndef() ? false : V->getAsZExtVal();
if (BitSet)
Bits32 |= BoolMask << (i * BitsPerBool);
}
@@ -9240,7 +9239,7 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
EVT VT = Op.getValueType();
EVT Op1VT = V1.getValueType();
unsigned NumElts = VT.getVectorNumElements();
- unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();
+ unsigned Index = V2->getAsZExtVal();
assert(VT.getScalarSizeInBits() == 1 &&
"Unexpected custom EXTRACT_SUBVECTOR lowering");
@@ -14618,7 +14617,7 @@ static SDValue PerformORCombineToBFI(SDNode *N,
// Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
// where lsb(mask) == #shamt and masked bits of B are known zero.
SDValue ShAmt = N00.getOperand(1);
- unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
+ unsigned ShAmtC = ShAmt->getAsZExtVal();
unsigned LSB = llvm::countr_zero(Mask);
if (ShAmtC != LSB)
return SDValue();
@@ -18339,8 +18338,7 @@ ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue Chain = N->getOperand(0);
SDValue BB = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal();
// (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
// -> (brcond Chain BB CC CPSR Cmp)
@@ -18373,8 +18371,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue FalseVal = N->getOperand(0);
SDValue TrueVal = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal();
// BFI is only available on V6T2+.
if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index d36bfb188ed3..f91e77adb8f8 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -660,7 +660,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
SDValue Cmp;
if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) {
- uint64_t Imm = cast<ConstantSDNode>(RHS)->getZExtValue();
+ uint64_t Imm = RHS->getAsZExtVal();
// Generate a CPI/CPC pair if RHS is a 16-bit constant. Use the zero
// register for the constant RHS if its lower or higher byte is zero.
SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
@@ -680,7 +680,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
} else if (RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(LHS)) {
// Generate a CPI/CPC pair if LHS is a 16-bit constant. Use the zero
// register for the constant LHS if its lower or higher byte is zero.
- uint64_t Imm = cast<ConstantSDNode>(LHS)->getZExtValue();
+ uint64_t Imm = LHS->getAsZExtVal();
SDValue LHSlo = (Imm & 0xff) == 0
? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
: DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index eb5c59672224..defb1f7324f4 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -743,7 +743,7 @@ void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
//
void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
if (N->getValueType(0) == MVT::i1) {
- assert(!(cast<ConstantSDNode>(N)->getZExtValue() >> 1));
+ assert(!(N->getAsZExtVal() >> 1));
unsigned Opc = (cast<ConstantSDNode>(N)->getSExtValue() != 0)
? Hexagon::PS_true
: Hexagon::PS_false;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index 665e2d79c83d..81035849491b 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -1256,7 +1256,7 @@ HexagonTargetLowering::extractHvxSubvectorReg(SDValue OrigOp, SDValue VecV,
SDValue IdxV, const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const {
MVT VecTy = ty(VecV);
unsigned HwLen = Subtarget.getVectorLength();
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsZExtVal();
MVT ElemTy = VecTy.getVectorElementType();
unsigned ElemWidth = ElemTy.getSizeInBits();
@@ -1299,7 +1299,7 @@ HexagonTargetLowering::extractHvxSubvectorPred(SDValue VecV, SDValue IdxV,
MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen);
SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV);
// IdxV is required to be a constant.
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsZExtVal();
unsigned ResLen = ResTy.getVectorNumElements();
unsigned BitBytes = HwLen / VecTy.getVectorNumElements();
@@ -1801,7 +1801,7 @@ HexagonTargetLowering::LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG)
MVT SrcTy = ty(SrcV);
MVT DstTy = ty(Op);
SDValue IdxV = Op.getOperand(1);
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsZExtVal();
assert(Idx % DstTy.getVectorNumElements() == 0);
(void)Idx;
const SDLoc &dl(Op);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 3e75b9fa5230..70f782b81270 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -525,8 +525,7 @@ LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
if (isa<ConstantSDNode>(Idx) &&
(EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
- EltTy == MVT::f64 ||
- cast<ConstantSDNode>(Idx)->getZExtValue() < NumElts / 2))
+ EltTy == MVT::f64 || Idx->getAsZExtVal() < NumElts / 2))
return Op;
return SDValue();
@@ -1395,28 +1394,28 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqLA32, DAG);
// call void @llvm.loongarch.cacop.[d/w](uimm5, rj, simm12)
- unsigned Imm1 = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm1 = Op2->getAsZExtVal();
int Imm2 = cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue();
if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
return emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG);
return Op;
}
case Intrinsic::loongarch_dbar: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::DBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_ibar: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::IBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_break: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::BREAK, DL, MVT::Other, Chain,
@@ -1425,7 +1424,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
case Intrinsic::loongarch_movgr2fcsr: {
if (!Subtarget.hasBasicF())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqF, DAG);
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<2>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::MOVGR2FCSR, DL, MVT::Other, Chain,
@@ -1434,7 +1433,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
Op.getOperand(3)));
}
case Intrinsic::loongarch_syscall: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::SYSCALL, DL, MVT::Other, Chain,
@@ -1937,7 +1936,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqF);
return;
}
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
if (!isUInt<2>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
@@ -1993,7 +1992,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
CSR_CASE(iocsrrd_d);
#undef CSR_CASE
case Intrinsic::loongarch_csrrd_w: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
if (!isUInt<14>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index c4d7a0dec7f3..158393f02a24 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -2375,7 +2375,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == M68kISD::SUB) {
- unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+ unsigned CondCode = CC->getAsZExtVal();
if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -2491,7 +2491,7 @@ SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Cond = Cmp;
AddTest = false;
} else {
- switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
+ switch (CC->getAsZExtVal()) {
default:
break;
case M68k::COND_VS:
diff --git a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 660861a5d521..efb23b1a4e3f 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -308,12 +308,12 @@ static bool isValidIndexedLoad(const LoadSDNode *LD) {
switch (VT.getSimpleVT().SimpleTy) {
case MVT::i8:
- if (cast<ConstantSDNode>(LD->getOffset())->getZExtValue() != 1)
+ if (LD->getOffset()->getAsZExtVal() != 1)
return false;
break;
case MVT::i16:
- if (cast<ConstantSDNode>(LD->getOffset())->getZExtValue() != 2)
+ if (LD->getOffset()->getAsZExtVal() != 2)
return false;
break;
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 1ed19f9381ec..e68904863cfc 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -1169,8 +1169,8 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
bool Invert = false;
bool Shift = false;
bool Convert = true;
- switch (cast<ConstantSDNode>(TargetCC)->getZExtValue()) {
- default:
+ switch (TargetCC->getAsZExtVal()) {
+ default:
Convert = false;
break;
case MSP430CC::COND_HS:
@@ -1194,7 +1194,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
// C = ~Z for AND instruction, thus we can put Res = ~(SR & 1), however,
// Res = (SR >> 1) & 1 is 1 word shorter.
break;
- }
+ }
EVT VT = Op.getValueType();
SDValue One = DAG.getConstant(1, dl, VT);
if (Convert) {
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 483eba4e4f47..d431d3d91494 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2042,8 +2042,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
return Op;
SDValue CCNode = CondRes.getOperand(2);
- Mips::CondCode CC =
- (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
+ Mips::CondCode CC = (Mips::CondCode)CCNode->getAsZExtVal();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 0ed87ee0809a..c0e978018919 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -76,7 +76,7 @@ void MipsSEDAGToDAGISel::addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
}
unsigned MipsSEDAGToDAGISel::getMSACtrlReg(const SDValue RegIdx) const {
- uint64_t RegNum = cast<ConstantSDNode>(RegIdx)->getZExtValue();
+ uint64_t RegNum = RegIdx->getAsZExtVal();
return Mips::MSACtrlRegClass.getRegister(RegNum);
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 815c46edb6fa..7abe984b34e1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -2076,7 +2076,7 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
VTs = CurDAG->getVTList(EVTs);
}
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsZExtVal();
SmallVector<SDValue, 2> Ops;
Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
@@ -2091,7 +2091,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Offset = N->getOperand(1);
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsZExtVal();
MemSDNode *Mem = cast<MemSDNode>(N);
// How many elements do we have?
@@ -2158,9 +2158,9 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Param = N->getOperand(1);
- unsigned ParamVal = cast<ConstantSDNode>(Param)->getZExtValue();
+ unsigned ParamVal = Param->getAsZExtVal();
SDValue Offset = N->getOperand(2);
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsZExtVal();
MemSDNode *Mem = cast<MemSDNode>(N);
SDValue Glue = N->getOperand(N->getNumOperands() - 1);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index de6de3214521..c65090d915ef 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5812,7 +5812,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SDLoc DL(N);
// Get the intrinsic ID
- unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
+ unsigned IntrinNo = Intrin.getNode()->getAsZExtVal();
switch (IntrinNo) {
default:
return;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index ed96339240d9..26ed74108ec3 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -565,7 +565,7 @@ static bool hasTocDataAttr(SDValue Val, unsigned PointerSize) {
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsZExtVal();
return true;
}
return false;
@@ -575,7 +575,7 @@ static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
/// operand. If so Imm will receive the 64-bit value.
static bool isInt64Immediate(SDNode *N, uint64_t &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsZExtVal();
return true;
}
return false;
@@ -1500,7 +1500,7 @@ static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) {
SDLoc dl(N);
// Get 64 bit value.
- int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ int64_t Imm = N->getAsZExtVal();
if (unsigned MinSize = allUsesTruncate(CurDAG, N)) {
uint64_t SextImm = SignExtend64(Imm, MinSize);
SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64);
@@ -4923,7 +4923,7 @@ bool PPCDAGToDAGISel::trySelectLoopCountIntrinsic(SDNode *N) {
SDNode *NewDecrement = CurDAG->getMachineNode(DecrementOpcode, DecrementLoc,
MVT::i1, DecrementOps);
- unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
+ unsigned Val = RHS->getAsZExtVal();
bool IsBranchOnTrue = (CC == ISD::SETEQ && Val) || (CC == ISD::SETNE && !Val);
unsigned Opcode = IsBranchOnTrue ? PPC::BC : PPC::BCn;
@@ -5765,7 +5765,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
break;
// If the multiplier fits int16, we can handle it with mulli.
- int64_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue();
+ int64_t Imm = Op1->getAsZExtVal();
unsigned Shift = llvm::countr_zero<uint64_t>(Imm);
if (isInt<16>(Imm) || !Shift)
break;
@@ -6612,8 +6612,7 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
// For us to materialize these using one instruction, we must be able to
// represent them as signed 16-bit integers.
- uint64_t True = cast<ConstantSDNode>(TrueRes)->getZExtValue(),
- False = cast<ConstantSDNode>(FalseRes)->getZExtValue();
+ uint64_t True = TrueRes->getAsZExtVal(), False = FalseRes->getAsZExtVal();
if (!isInt<16>(True) || !isInt<16>(False))
break;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 8f27e6677afa..235df1880b37 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2566,7 +2566,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
if (LeadingZero) {
if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
- int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
+ int Val = UniquedVals[Multiple - 1]->getAsZExtVal();
if (Val < 16) // 0,0,0,4 -> vspltisw(4)
return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
@@ -2635,11 +2635,11 @@ bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = (int16_t)N->getAsZExtVal();
if (N->getValueType(0) == MVT::i32)
- return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
+ return Imm == (int32_t)N->getAsZExtVal();
else
- return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
+ return Imm == (int64_t)N->getAsZExtVal();
}
bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
return isIntS16Immediate(Op.getNode(), Imm);
@@ -2684,7 +2684,7 @@ bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = (int64_t)N->getAsZExtVal();
return isInt<34>(Imm);
}
bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
@@ -15580,7 +15580,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
NarrowOp.getOpcode() != ISD::ROTL && NarrowOp.getOpcode() != ISD::ROTR)
break;
- uint64_t Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ uint64_t Imm = Op2->getAsZExtVal();
// Make sure that the constant is narrow enough to fit in the narrow type.
if (!isUInt<32>(Imm))
break;
@@ -16795,7 +16795,7 @@ void PPCTargetLowering::CollectTargetIntrinsicOperands(const CallInst &I,
return;
if (!isa<ConstantSDNode>(Ops[1].getNode()))
return;
- auto IntrinsicID = cast<ConstantSDNode>(Ops[1].getNode())->getZExtValue();
+ auto IntrinsicID = Ops[1].getNode()->getAsZExtVal();
if (IntrinsicID != Intrinsic::ppc_tdw && IntrinsicID != Intrinsic::ppc_tw &&
IntrinsicID != Intrinsic::ppc_trapd && IntrinsicID != Intrinsic::ppc_trap)
return;
@@ -18430,7 +18430,7 @@ PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
if (Flags & PPC::MOF_RPlusSImm16) {
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
- int16_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue();
+ int16_t Imm = Op1->getAsZExtVal();
if (!Align || isAligned(*Align, Imm)) {
Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
Base = Op0;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a5b33e8e293a..0a1a466af591 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3497,7 +3497,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (unsigned I = 0; I < NumElts;) {
SDValue V = Op.getOperand(I);
- bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
+ bool BitValue = !V.isUndef() && V->getAsZExtVal();
Bits |= ((uint64_t)BitValue << BitPos);
++BitPos;
++I;
@@ -3628,8 +3628,8 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (const auto &OpIdx : enumerate(Op->op_values())) {
const auto &SeqV = OpIdx.value();
if (!SeqV.isUndef())
- SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
- << (OpIdx.index() * EltBitSize));
+ SplatValue |=
+ ((SeqV->getAsZExtVal() & EltMask) << (OpIdx.index() * EltBitSize));
}
// On RV64, sign-extend from 32 to 64 bits where possible in order to
@@ -3684,8 +3684,8 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// vector type.
for (const auto &SeqV : Sequence) {
if (!SeqV.isUndef())
- SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
- << (EltIdx * EltBitSize));
+ SplatValue |=
+ ((SeqV->getAsZExtVal() & EltMask) << (EltIdx * EltBitSize));
EltIdx++;
}
@@ -3946,8 +3946,7 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
(isa<RegisterSDNode>(VL) &&
cast<RegisterSDNode>(VL)->getReg() == RISCV::X0))
NewVL = DAG.getRegister(RISCV::X0, MVT::i32);
- else if (isa<ConstantSDNode>(VL) &&
- isUInt<4>(cast<ConstantSDNode>(VL)->getZExtValue()))
+ else if (isa<ConstantSDNode>(VL) && isUInt<4>(VL->getAsZExtVal()))
NewVL = DAG.getNode(ISD::ADD, DL, VL.getValueType(), VL, VL);
if (NewVL) {
@@ -7916,8 +7915,7 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
// Use tail agnostic policy if Idx is the last index of Vec.
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Idx) &&
- cast<ConstantSDNode>(Idx)->getZExtValue() + 1 ==
- VecVT.getVectorNumElements())
+ Idx->getAsZExtVal() + 1 == VecVT.getVectorNumElements())
Policy = RISCVII::TAIL_AGNOSTIC;
SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
Idx, Mask, InsertVL, Policy);
@@ -8177,7 +8175,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
const auto [MinVLMAX, MaxVLMAX] =
RISCVTargetLowering::computeVLMAXBounds(VT, Subtarget);
- uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
+ uint64_t AVLInt = AVL->getAsZExtVal();
if (AVLInt <= MinVLMAX) {
I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
} else if (AVLInt >= 2 * MaxVLMAX) {
@@ -8243,8 +8241,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
SDValue Mask = Operands[NumOps - 3];
SDValue MaskedOff = Operands[1];
// Assume Policy operand is the last operand.
- uint64_t Policy =
- cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
+ uint64_t Policy = Operands[NumOps - 1]->getAsZExtVal();
// We don't need to select maskedoff if it's undef.
if (MaskedOff.isUndef())
return Vec;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index c7d8591c5bdf..320f91c76057 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -1641,7 +1641,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
// If this is a 64-bit constant that is out of the range of LLILF,
// LLIHF and LGFI, split it into two 32-bit pieces.
if (Node->getValueType(0) == MVT::i64) {
- uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue();
+ uint64_t Val = Node->getAsZExtVal();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
uint32_t(Val));
@@ -1677,10 +1677,8 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) {
SDValue CCValid = Node->getOperand(2);
SDValue CCMask = Node->getOperand(3);
- uint64_t ConstCCValid =
- cast<ConstantSDNode>(CCValid.getNode())->getZExtValue();
- uint64_t ConstCCMask =
- cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
+ uint64_t ConstCCValid = CCValid.getNode()->getAsZExtVal();
+ uint64_t ConstCCMask = CCMask.getNode()->getAsZExtVal();
// Invert the condition.
CCMask = CurDAG->getTargetConstant(ConstCCValid ^ ConstCCMask,
SDLoc(Node), CCMask.getValueType());
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 045c4c0aac07..2450c6801a66 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2662,10 +2662,8 @@ static void adjustForFNeg(Comparison &C) {
// with (sext (trunc X)) into a comparison with (shl X, 32).
static void adjustForLTGFR(Comparison &C) {
// Check for a comparison between (shl X, 32) and 0.
- if (C.Op0.getOpcode() == ISD::SHL &&
- C.Op0.getValueType() == MVT::i64 &&
- C.Op1.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ if (C.Op0.getOpcode() == ISD::SHL && C.Op0.getValueType() == MVT::i64 &&
+ C.Op1.getOpcode() == ISD::Constant && C.Op1->getAsZExtVal() == 0) {
auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
if (C1 && C1->getZExtValue() == 32) {
SDValue ShlOp0 = C.Op0.getOperand(0);
@@ -2690,7 +2688,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ C.Op1->getAsZExtVal() == 0) {
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
C.Op0.getValueSizeInBits().getFixedValue()) {
@@ -3035,12 +3033,12 @@ static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- cast<ConstantSDNode>(CmpOp1)->getZExtValue(), Cond);
+ CmpOp1->getAsZExtVal(), Cond);
if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- cast<ConstantSDNode>(CmpOp1)->getZExtValue(), Cond);
+ CmpOp1->getAsZExtVal(), Cond);
}
Comparison C(CmpOp0, CmpOp1, Chain);
C.CCMask = CCMaskForCondCode(Cond);
@@ -3457,12 +3455,11 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
// Check for absolute and negative-absolute selections, including those
// where the comparison value is sign-extended (for LPGFR and LNGFR).
// This check supplements the one in DAGCombiner.
- if (C.Opcode == SystemZISD::ICMP &&
- C.CCMask != SystemZ::CCMASK_CMP_EQ &&
+ if (C.Opcode == SystemZISD::ICMP && C.CCMask != SystemZ::CCMASK_CMP_EQ &&
C.CCMask != SystemZ::CCMASK_CMP_NE &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ C.Op1->getAsZExtVal() == 0) {
if (isAbsolute(C.Op0, TrueOp, FalseOp))
return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
if (isAbsolute(C.Op0, FalseOp, TrueOp))
@@ -3947,8 +3944,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal =
- (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4013,8 +4009,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal =
- (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4213,7 +4208,7 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// If the low part is a constant that is outside the range of LHI,
// then we're better off using IILF.
if (LowOp.getOpcode() == ISD::Constant) {
- int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
+ int64_t Value = int32_t(LowOp->getAsZExtVal());
if (!isInt<16>(Value))
return Op;
}
@@ -5897,7 +5892,7 @@ SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
Op1.getOpcode() != ISD::BITCAST &&
Op1.getOpcode() != ISD::ConstantFP &&
Op2.getOpcode() == ISD::Constant) {
- uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
+ uint64_t Index = Op2->getAsZExtVal();
unsigned Mask = VT.getVectorNumElements() - 1;
if (Index <= Mask)
return Op;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 4bcf89690505..7c47790d1e35 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1869,8 +1869,7 @@ SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
Ops[OpIdx++] = Op.getOperand(2);
while (OpIdx < 18) {
const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
- if (MaskIdx.isUndef() ||
- cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
+ if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) {
bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
} else {
@@ -1912,7 +1911,7 @@ WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
const SDNode *Index = Extract.getOperand(1).getNode();
if (!isa<ConstantSDNode>(Index))
return SDValue();
- unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
+ unsigned IndexVal = Index->getAsZExtVal();
unsigned Scale =
ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
assert(Scale > 1);
@@ -2335,7 +2334,7 @@ WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
if (isa<ConstantSDNode>(IdxNode)) {
// Ensure the index type is i32 to match the tablegen patterns
- uint64_t Idx = cast<ConstantSDNode>(IdxNode)->getZExtValue();
+ uint64_t Idx = IdxNode->getAsZExtVal();
SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
Ops[Op.getNumOperands() - 1] =
DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 73b10cf3067e..53ce720be2da 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2852,7 +2852,7 @@ bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
SDValue &Index, SDValue &Disp,
SDValue &Segment) {
X86ISelAddressMode AM;
- AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
+ AM.Scale = ScaleOp->getAsZExtVal();
// Attempt to match index patterns, as long as we're not relying on implicit
// sign-extension, which is performed BEFORE scale.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c14e03197aae..5a28240ea9e2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7371,7 +7371,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
SDValue ExtIdx) {
- int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
+ int Idx = ExtIdx->getAsZExtVal();
if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
return Idx;
@@ -8795,7 +8795,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
- unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
+ unsigned InsertC = InsIndex->getAsZExtVal();
unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
if (InsertC < NumEltsInLow128Bits)
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
@@ -17756,7 +17756,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec), Idx));
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsZExtVal();
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
DAG.getTargetConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
@@ -24069,7 +24069,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == X86ISD::SUB) {
- unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+ unsigned CondCode = CC->getAsZExtVal();
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -25367,8 +25367,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
Src3.getValueType() != MVT::i8) {
- Src3 = DAG.getTargetConstant(
- cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
+ Src3 = DAG.getTargetConstant(Src3->getAsZExtVal() & 0xff, dl, MVT::i8);
}
// We specify 2 possible opcodes for intrinsics with rounding modes.
@@ -25393,8 +25392,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
SDValue Src4 = Op.getOperand(4);
if (Src4.getValueType() != MVT::i8) {
- Src4 = DAG.getTargetConstant(
- cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
+ Src4 = DAG.getTargetConstant(Src4->getAsZExtVal() & 0xff, dl, MVT::i8);
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
@@ -26796,7 +26794,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
{Chain, Op1, Op2, Size}, VT, MMO);
Chain = Res.getValue(1);
Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsZExtVal();
if (Imm)
Res = DAG.getNode(ISD::SHL, DL, VT, Res,
DAG.getShiftAmountConstant(Imm, VT, DL));
@@ -41845,7 +41843,7 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
+ unsigned ShAmt = Op1->getAsZExtVal();
if (ShAmt >= BitWidth)
break;
@@ -42630,7 +42628,7 @@ static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
APInt Imm(SrcVT.getVectorNumElements(), 0);
for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Op.getOperand(Idx);
- if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
+ if (!In.isUndef() && (In->getAsZExtVal() & 0x1))
Imm.setBit(Idx);
}
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
@@ -53307,7 +53305,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
if (Index.getOpcode() == ISD::ADD &&
Index.getValueType().getVectorElementType() == PtrVT &&
isa<ConstantSDNode>(Scale)) {
- uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleAmt = Scale->getAsZExtVal();
if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
BitVector UndefElts;
if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 05003ec304ad..1535eb622da6 100644
--- a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -142,7 +142,7 @@ void XCoreDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
- uint64_t Val = cast<ConstantSDNode>(N)->getZExtValue();
+ uint64_t Val = N->getAsZExtVal();
if (immMskBitp(N)) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit