diff options
author | Hugo Trachino <32955781+nujaa@users.noreply.github.com> | 2024-02-01 07:40:37 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-01-31 23:40:37 -0800 |
commit | 65066c02770cc3da3b5154fbb7ed9df78ab94b93 (patch) | |
tree | 41608fd4ecb4eee10a4137a633f13749d41f23c9 | |
parent | 54f324f377a92a64fcc5c1d401da9b07bf50a2f1 (diff) |
[mlir] Use `create` instead of `createOrFold` for ConstantOp as folding has no effect (NFC) (#80129)
This aims to clean-up confusing uses of
builder.createOrFold<ConstantOp> since folding of constants fails.
5 files changed, 13 insertions, 13 deletions
diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp index cc97ee74d48b..12d2462061dc 100644 --- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp +++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp @@ -38,7 +38,7 @@ static Value createI32Constant(ConversionPatternRewriter &rewriter, static Value createI1Constant(ConversionPatternRewriter &rewriter, Location loc, bool value) { Type llvmI1 = rewriter.getI1Type(); - return rewriter.createOrFold<LLVM::ConstantOp>(loc, llvmI1, value); + return rewriter.create<LLVM::ConstantOp>(loc, llvmI1, value); } namespace { @@ -163,7 +163,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> { Value ptr = memrefDescriptor.alignedPtr(rewriter, loc); // The stride value is always 0 for raw buffers. This also disables // swizling. - Value stride = rewriter.createOrFold<LLVM::ConstantOp>( + Value stride = rewriter.create<LLVM::ConstantOp>( loc, llvmI16, rewriter.getI16IntegerAttr(0)); Value numRecords; if (memrefType.hasStaticShape()) { diff --git a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp index c625a302a397..b51a13ae362e 100644 --- a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp +++ b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp @@ -89,7 +89,7 @@ void ExtFOnFloat8RewritePattern::rewrite(arith::ExtFOp op, } VectorType inType = in.getType().cast<VectorType>(); int64_t numElements = inType.getNumElements(); - Value zero = rewriter.createOrFold<arith::ConstantOp>( + Value zero = rewriter.create<arith::ConstantOp>( loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0)); Value result = rewriter.createOrFold<vector::SplatOp>(loc, op.getOut().getType(), zero); @@ -209,7 +209,7 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op, } VectorType outType = op.getOut().getType().cast<VectorType>(); int64_t numElements = outType.getNumElements(); - Value zero = rewriter.createOrFold<arith::ConstantOp>( + Value zero = rewriter.create<arith::ConstantOp>( loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0)); Value result = rewriter.createOrFold<vector::SplatOp>(loc, outType, zero); if (outType.getShape().empty()) { diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp index 599bb13190f1..363e6016113b 100644 --- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp +++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp @@ -67,8 +67,8 @@ static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) { Value getLaneId(ConversionPatternRewriter &rewriter, Location loc, const unsigned indexBitwidth) { auto int32Type = IntegerType::get(rewriter.getContext(), 32); - Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32); - Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32); + Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32); + Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32); Value mbcntLo = rewriter.create<ROCDL::MbcntLoOp>(loc, int32Type, ValueRange{minus1, zero}); Value laneId = rewriter.create<ROCDL::MbcntHiOp>(loc, int32Type, @@ -89,8 +89,8 @@ struct GPULaneIdOpToROCDL : ConvertOpToLLVMPattern<gpu::LaneIdOp> { // followed by: %lid = call @llvm.amdgcn.mbcnt.hi(-1, %mlo) Type intTy = IntegerType::get(context, 32); - Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32); - Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32); + Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32); + Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32); Value mbcntLo = rewriter.create<ROCDL::MbcntLoOp>(loc, intTy, ValueRange{minus1, zero}); Value laneId = rewriter.create<ROCDL::MbcntHiOp>( diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp index 06ec53d19b1e..505d85f21111 100644 --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -327,7 +327,7 @@ public: highValues.reserve(rank); for (int i = 0; i < rank; i++) { - Value inputIndex = rewriter.createOrFold<arith::ConstantIndexOp>(loc, i); + Value inputIndex = rewriter.create<arith::ConstantIndexOp>(loc, i); Value lowVal = rewriter.createOrFold<tensor::ExtractOp>( loc, padding, ValueRange({inputIndex, lowIndex})); Value highVal = rewriter.createOrFold<tensor::ExtractOp>( @@ -360,8 +360,8 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> { Location loc = op.getLoc(); int axis = op.getAxis(); - Value axisValue = rewriter.createOrFold<arith::ConstantOp>( - loc, rewriter.getIndexAttr(axis)); + Value axisValue = + rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(axis)); int64_t rank = resultType.getRank(); SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1)); diff --git a/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp index 2108fc591055..7c8403c9609d 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp @@ -44,8 +44,8 @@ struct DecomposeTensorConcatOp : public OpRewritePattern<ConcatOp> { return failure(); int64_t dim = concatOp.getDim(); - Value dimValue = rewriter.createOrFold<arith::ConstantOp>( - loc, rewriter.getIndexAttr(dim)); + Value dimValue = + rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(dim)); int64_t rank = concatOp.getResultType().getRank(); SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1)); |