summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2023-12-13 14:21:10 -0800
committerVitaly Buka <vitalybuka@google.com>2023-12-13 14:21:10 -0800
commita45b8ea2e5ebe274808d18c85dfe3147f10c123d (patch)
tree909d25d1eb66414d20a47de2136ccc5fae3c0cf0
parentf584b177ceaab4d58b8a703c17809f3bffae0ba3 (diff)
parent8bea83b8f5adae8abc5d6a6695c756a616201aa7 (diff)
Created using spr 1.3.4 [skip ci]
-rw-r--r--clang/tools/clang-format/git-clang-format.bat2
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h4
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp31
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h8
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp18
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-constpool.ll1
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-medium-code-model.ll44
-rw-r--r--mlir/include/mlir/Transforms/FoldUtils.h12
-rw-r--r--mlir/lib/Transforms/Utils/FoldUtils.cpp79
-rw-r--r--mlir/test/Transforms/canonicalize-debuginfo.mlir41
-rw-r--r--mlir/test/Transforms/constant-fold-debuginfo.mlir34
11 files changed, 91 insertions, 183 deletions
diff --git a/clang/tools/clang-format/git-clang-format.bat b/clang/tools/clang-format/git-clang-format.bat
index d4bc5172989c..9965cd4312fe 100644
--- a/clang/tools/clang-format/git-clang-format.bat
+++ b/clang/tools/clang-format/git-clang-format.bat
@@ -1 +1 @@
-py -3 git-clang-format %*
+py -3 %~pn0 %*
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
index 73308925e914..694d3d8004af 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
@@ -690,7 +690,9 @@ protected:
bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const;
template <typename Ty> static Ty readBytesAs(const uint8_t *MatchTable) {
- return *reinterpret_cast<const Ty *>(MatchTable);
+ Ty Ret;
+ memcpy(&Ret, MatchTable, sizeof(Ret));
+ return Ret;
}
};
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c0462dc4b0bc..a75bbb49beda 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2624,6 +2624,25 @@ SDValue RISCVTargetLowering::computeVLMax(MVT VecVT, const SDLoc &DL,
VecVT.getVectorElementCount());
}
+std::pair<unsigned, unsigned>
+RISCVTargetLowering::computeVLMAXBounds(MVT VecVT,
+ const RISCVSubtarget &Subtarget) {
+ assert(VecVT.isScalableVector() && "Expected scalable vector");
+
+ unsigned EltSize = VecVT.getScalarSizeInBits();
+ unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
+
+ unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
+ unsigned MaxVLMAX =
+ RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
+
+ unsigned VectorBitsMin = Subtarget.getRealMinVLen();
+ unsigned MinVLMAX =
+ RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
+
+ return std::make_pair(MinVLMAX, MaxVLMAX);
+}
+
// The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
// of either is (currently) supported. This can get us into an infinite loop
// where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
@@ -8123,16 +8142,8 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
// Optimize for constant AVL
if (isa<ConstantSDNode>(AVL)) {
- unsigned EltSize = VT.getScalarSizeInBits();
- unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
-
- unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
- unsigned MaxVLMAX =
- RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
-
- unsigned VectorBitsMin = Subtarget.getRealMinVLen();
- unsigned MinVLMAX =
- RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
+ const auto [MinVLMAX, MaxVLMAX] =
+ RISCVTargetLowering::computeVLMAXBounds(VT, Subtarget);
uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
if (AVLInt <= MinVLMAX) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index ae798cc47bf8..41a2dc5771c8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -744,7 +744,13 @@ public:
// The following equations have been reordered to prevent loss of precision
// when calculating fractional LMUL.
return ((VectorBits / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
- };
+ }
+
+ // Return inclusive (low, high) bounds on the value of VLMAX for the
+ // given scalable container type given known bounds on VLEN.
+ static std::pair<unsigned, unsigned>
+ computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget);
+
static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
static unsigned getRegClassIDForVecVT(MVT VT);
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index bdc9d1d42dd1..425c52dbe7b1 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -711,7 +711,8 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
// Handle constant address.
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
+ if (TM.getCodeModel() != CodeModel::Small &&
+ TM.getCodeModel() != CodeModel::Medium)
return false;
// Can't handle large objects yet.
@@ -1050,7 +1051,8 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
// Handle constant address.
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
+ if (TM.getCodeModel() != CodeModel::Small &&
+ TM.getCodeModel() != CodeModel::Medium)
return false;
// RIP-relative addresses can't have additional register operands.
@@ -3774,7 +3776,8 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
// Can't handle alternate code models yet.
CodeModel::Model CM = TM.getCodeModel();
- if (CM != CodeModel::Small && CM != CodeModel::Large)
+ if (CM != CodeModel::Small && CM != CodeModel::Medium &&
+ CM != CodeModel::Large)
return 0;
// Get opcode and regclass of the output for the given load instruction.
@@ -3812,7 +3815,7 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
else if (OpFlag == X86II::MO_GOTOFF)
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small)
+ else if (Subtarget->is64Bit() && TM.getCodeModel() != CodeModel::Large)
PICBase = X86::RIP;
// Create the load from the constant pool.
@@ -3842,8 +3845,11 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
}
unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
- // Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
+ // Can't handle large GlobalValues yet.
+ if (TM.getCodeModel() != CodeModel::Small &&
+ TM.getCodeModel() != CodeModel::Medium)
+ return 0;
+ if (!isa<GlobalObject>(GV) || TM.isLargeGlobalObject(cast<GlobalObject>(GV)))
return 0;
// Materialize addresses with LEA/MOV instructions.
diff --git a/llvm/test/CodeGen/X86/fast-isel-constpool.ll b/llvm/test/CodeGen/X86/fast-isel-constpool.ll
index 9f70fd559073..9e4cbb61308e 100644
--- a/llvm/test/CodeGen/X86/fast-isel-constpool.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-constpool.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-apple-darwin -fast-isel -code-model=small < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin -fast-isel -code-model=medium < %s | FileCheck %s
; RUN: llc -mtriple=x86_64-apple-darwin -fast-isel -code-model=large < %s | FileCheck %s --check-prefix=LARGE
; RUN: llc -mtriple=x86_64 -fast-isel -code-model=large -relocation-model=pic < %s | FileCheck %s --check-prefix=LARGE_PIC
; RUN: llc -mtriple=x86_64-apple-darwin -fast-isel -code-model=small -mattr=avx < %s | FileCheck %s --check-prefix=AVX
diff --git a/llvm/test/CodeGen/X86/fast-isel-medium-code-model.ll b/llvm/test/CodeGen/X86/fast-isel-medium-code-model.ll
new file mode 100644
index 000000000000..4aa230a209e2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/fast-isel-medium-code-model.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -fast-isel -fast-isel-abort=3 -code-model=medium -large-data-threshold=5 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-linux-gnu -fast-isel -code-model=medium -large-data-threshold=3 < %s -o /dev/null \
+; RUN: -pass-remarks-output=- -pass-remarks-filter=sdagisel | FileCheck %s --check-prefix=FALLBACK --implicit-check-not=missed
+
+declare void @foo()
+
+define void @call_foo() {
+; CHECK-LABEL: call_foo:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+ call void @foo()
+ ret void
+}
+
+@g = internal global i32 42
+
+; FALLBACK: FastISel missed terminator
+; FALLBACK: in function: g_addr
+
+define ptr @g_addr() {
+; CHECK-LABEL: g_addr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movabsq $g, %rax
+; CHECK-NEXT: retq
+ ret ptr @g
+}
+
+; FALLBACK: FastISel missed
+; FALLBACK: in function: load_g
+
+define i32 @load_g() {
+; CHECK-LABEL: load_g:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl g, %eax
+; CHECK-NEXT: retq
+ %i = load i32, ptr @g
+ ret i32 %i
+}
diff --git a/mlir/include/mlir/Transforms/FoldUtils.h b/mlir/include/mlir/Transforms/FoldUtils.h
index 28fa18cf942d..2600da361496 100644
--- a/mlir/include/mlir/Transforms/FoldUtils.h
+++ b/mlir/include/mlir/Transforms/FoldUtils.h
@@ -33,8 +33,7 @@ class Value;
class OperationFolder {
public:
OperationFolder(MLIRContext *ctx, OpBuilder::Listener *listener = nullptr)
- : fusedLocationTag(StringAttr::get(ctx, "CSE")), interfaces(ctx),
- rewriter(ctx, listener) {}
+ : interfaces(ctx), rewriter(ctx, listener) {}
/// Tries to perform folding on the given `op`, including unifying
/// deduplicated constants. If successful, replaces `op`'s uses with
@@ -96,15 +95,6 @@ private:
Dialect *dialect, Attribute value,
Type type, Location loc);
- // Fuse `foldedLocation` into the Location of `retainedOp`. This will result
- // in `retainedOp` having a FusedLoc with `fusedLocationTag` to help trace the
- // source of the fusion. If `retainedOp` already had a FusedLoc with the same
- // tag, `foldedLocation` will simply be appended to it.
- void appendFoldedLocation(Operation *retainedOp, Location foldedLocation);
-
- /// Tag for annotating fused locations as a result of merging constants.
- StringAttr fusedLocationTag;
-
/// A mapping between an insertion region and the constants that have been
/// created within it.
DenseMap<Region *, ConstantMap> foldScopes;
diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp
index 136c4d2216b8..90ee5ba51de3 100644
--- a/mlir/lib/Transforms/Utils/FoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp
@@ -141,7 +141,6 @@ bool OperationFolder::insertKnownConstant(Operation *op, Attribute constValue) {
// If there is an existing constant, replace `op`.
if (folderConstOp) {
notifyRemoval(op);
- appendFoldedLocation(folderConstOp, op->getLoc());
rewriter.replaceOp(op, folderConstOp->getResults());
return false;
}
@@ -295,10 +294,8 @@ OperationFolder::tryGetOrCreateConstant(ConstantMap &uniquedConstants,
// Check if an existing mapping already exists.
auto constKey = std::make_tuple(dialect, value, type);
Operation *&constOp = uniquedConstants[constKey];
- if (constOp) {
- appendFoldedLocation(constOp, loc);
+ if (constOp)
return constOp;
- }
// If one doesn't exist, try to materialize one.
if (!(constOp = materializeConstant(dialect, rewriter, value, type, loc)))
@@ -319,7 +316,6 @@ OperationFolder::tryGetOrCreateConstant(ConstantMap &uniquedConstants,
// materialized operation in favor of the existing one.
if (auto *existingOp = uniquedConstants.lookup(newKey)) {
notifyRemoval(constOp);
- appendFoldedLocation(existingOp, constOp->getLoc());
rewriter.eraseOp(constOp);
referencedDialects[existingOp].push_back(dialect);
return constOp = existingOp;
@@ -330,76 +326,3 @@ OperationFolder::tryGetOrCreateConstant(ConstantMap &uniquedConstants,
auto newIt = uniquedConstants.insert({newKey, constOp});
return newIt.first->second;
}
-
-/// Helper that flattens nested fused locations to a single fused location.
-/// Fused locations nested under non-fused locations are not flattened, and
-/// calling this on non-fused locations is a no-op as a result.
-///
-/// Fused locations are only flattened into parent fused locations if the
-/// child fused location has no metadata, or if the metadata of the parent and
-/// child fused locations are the same---this to avoid breaking cases where
-/// metadata matter.
-static Location FlattenFusedLocationRecursively(const Location loc) {
- auto fusedLoc = dyn_cast<FusedLoc>(loc);
- if (!fusedLoc)
- return loc;
-
- SetVector<Location> flattenedLocs;
- Attribute metadata = fusedLoc.getMetadata();
- ArrayRef<Location> unflattenedLocs = fusedLoc.getLocations();
- bool hasAnyNestedLocChanged = false;
-
- for (const Location &unflattenedLoc : unflattenedLocs) {
- Location flattenedLoc = FlattenFusedLocationRecursively(unflattenedLoc);
-
- auto flattenedFusedLoc = dyn_cast<FusedLoc>(flattenedLoc);
- if (flattenedFusedLoc && (!flattenedFusedLoc.getMetadata() ||
- flattenedFusedLoc.getMetadata() == metadata)) {
- hasAnyNestedLocChanged = true;
- ArrayRef<Location> nestedLocations = flattenedFusedLoc.getLocations();
- flattenedLocs.insert(nestedLocations.begin(), nestedLocations.end());
- } else {
- if (flattenedLoc != unflattenedLoc)
- hasAnyNestedLocChanged = true;
-
- flattenedLocs.insert(flattenedLoc);
- }
- }
-
- if (!hasAnyNestedLocChanged &&
- unflattenedLocs.size() == flattenedLocs.size()) {
- return loc;
- }
-
- return FusedLoc::get(loc->getContext(), flattenedLocs.takeVector(),
- fusedLoc.getMetadata());
-}
-
-void OperationFolder::appendFoldedLocation(Operation *retainedOp,
- Location foldedLocation) {
- // Append into existing fused location if it has the same tag.
- if (auto existingFusedLoc =
- dyn_cast<FusedLocWith<StringAttr>>(retainedOp->getLoc())) {
- StringAttr existingMetadata = existingFusedLoc.getMetadata();
- if (existingMetadata == fusedLocationTag) {
- ArrayRef<Location> existingLocations = existingFusedLoc.getLocations();
- SetVector<Location> locations(existingLocations.begin(),
- existingLocations.end());
- locations.insert(foldedLocation);
- Location newFusedLoc = FusedLoc::get(
- retainedOp->getContext(), locations.takeVector(), existingMetadata);
- retainedOp->setLoc(FlattenFusedLocationRecursively(newFusedLoc));
- return;
- }
- }
-
- // Create a new fusedloc with retainedOp's loc and foldedLocation.
- // If they're already equal, no need to fuse.
- if (retainedOp->getLoc() == foldedLocation)
- return;
-
- Location newFusedLoc =
- FusedLoc::get(retainedOp->getContext(),
- {retainedOp->getLoc(), foldedLocation}, fusedLocationTag);
- retainedOp->setLoc(FlattenFusedLocationRecursively(newFusedLoc));
-}
diff --git a/mlir/test/Transforms/canonicalize-debuginfo.mlir b/mlir/test/Transforms/canonicalize-debuginfo.mlir
deleted file mode 100644
index 217cc29c0095..000000000000
--- a/mlir/test/Transforms/canonicalize-debuginfo.mlir
+++ /dev/null
@@ -1,41 +0,0 @@
-// RUN: mlir-opt %s -pass-pipeline='builtin.module(func.func(canonicalize{test-convergence}))' -split-input-file -mlir-print-debuginfo | FileCheck %s
-
-// CHECK-LABEL: func @merge_constants
-func.func @merge_constants() -> (index, index, index, index, index, index, index) {
- // CHECK-NEXT: arith.constant 42 : index loc(#[[FusedLoc:.*]])
- %0 = arith.constant 42 : index loc("merge_constants":0:0)
- %1 = arith.constant 42 : index loc("merge_constants":1:0)
- %2 = arith.constant 42 : index loc("merge_constants":2:0)
- %3 = arith.constant 42 : index loc("merge_constants":2:0) // repeated loc
- %4 = arith.constant 43 : index loc(fused<"some_label">["merge_constants":3:0])
- %5 = arith.constant 43 : index loc(fused<"some_label">["merge_constants":3:0])
- %6 = arith.constant 43 : index loc(fused<"some_other_label">["merge_constants":3:0])
- return %0, %1, %2, %3, %4, %5, %6 : index, index, index, index, index, index, index
-}
-
-// CHECK-DAG: #[[LocConst0:.*]] = loc("merge_constants":0:0)
-// CHECK-DAG: #[[LocConst1:.*]] = loc("merge_constants":1:0)
-// CHECK-DAG: #[[LocConst2:.*]] = loc("merge_constants":2:0)
-// CHECK-DAG: #[[LocConst3:.*]] = loc("merge_constants":3:0)
-// CHECK-DAG: #[[FusedLoc_CSE_1:.*]] = loc(fused<"CSE">[#[[LocConst0]], #[[LocConst1]], #[[LocConst2]]])
-// CHECK-DAG: #[[FusedLoc_Some_Label:.*]] = loc(fused<"some_label">[#[[LocConst3]]])
-// CHECK-DAG: #[[FusedLoc_Some_Other_Label:.*]] = loc(fused<"some_other_label">[#[[LocConst3]]])
-// CHECK-DAG: #[[FusedLoc_CSE_2:.*]] = loc(fused<"CSE">[#[[FusedLoc_Some_Label]], #[[FusedLoc_Some_Other_Label]]])
-
-// -----
-
-// CHECK-LABEL: func @hoist_constant
-func.func @hoist_constant(%arg0: memref<8xi32>) {
- // CHECK-NEXT: arith.constant 42 : i32 loc(#[[FusedLoc:.*]])
- affine.for %arg1 = 0 to 8 {
- %0 = arith.constant 42 : i32 loc("hoist_constant":0:0)
- %1 = arith.constant 42 : i32 loc("hoist_constant":1:0)
- memref.store %0, %arg0[%arg1] : memref<8xi32>
- memref.store %1, %arg0[%arg1] : memref<8xi32>
- }
- return
-}
-
-// CHECK-DAG: #[[LocConst0:.*]] = loc("hoist_constant":0:0)
-// CHECK-DAG: #[[LocConst1:.*]] = loc("hoist_constant":1:0)
-// CHECK: #[[FusedLoc]] = loc(fused<"CSE">[#[[LocConst0]], #[[LocConst1]]])
diff --git a/mlir/test/Transforms/constant-fold-debuginfo.mlir b/mlir/test/Transforms/constant-fold-debuginfo.mlir
deleted file mode 100644
index 79a25f860a48..000000000000
--- a/mlir/test/Transforms/constant-fold-debuginfo.mlir
+++ /dev/null
@@ -1,34 +0,0 @@
-// RUN: mlir-opt %s -split-input-file -test-constant-fold -mlir-print-debuginfo | FileCheck %s
-
-// CHECK-LABEL: func @fold_and_merge
-func.func @fold_and_merge() -> (i32, i32) {
- %0 = arith.constant 1 : i32
- %1 = arith.constant 5 : i32
-
- // CHECK-NEXT: [[C:%.+]] = arith.constant 6 : i32 loc(#[[FusedLoc:.*]])
- %2 = arith.addi %0, %1 : i32 loc("fold_and_merge":0:0)
-
- %3 = arith.constant 6 : i32 loc("fold_and_merge":1:0)
-
- return %2, %3: i32, i32
-}
-
-// CHECK-DAG: #[[LocConst0:.*]] = loc("fold_and_merge":0:0)
-// CHECK-DAG: #[[LocConst1:.*]] = loc("fold_and_merge":1:0)
-// CHECK: #[[FusedLoc]] = loc(fused<"CSE">[#[[LocConst1]], #[[LocConst0]]])
-
-// -----
-
-// CHECK-LABEL: func @materialize_different_dialect
-func.func @materialize_different_dialect() -> (f32, f32) {
- // CHECK: arith.constant 1.{{0*}}e+00 : f32 loc(#[[FusedLoc:.*]])
- %0 = arith.constant -1.0 : f32
- %1 = math.absf %0 : f32 loc("materialize_different_dialect":0:0)
- %2 = arith.constant 1.0 : f32 loc("materialize_different_dialect":1:0)
-
- return %1, %2: f32, f32
-}
-
-// CHECK-DAG: #[[LocConst0:.*]] = loc("materialize_different_dialect":0:0)
-// CHECK-DAG: #[[LocConst1:.*]] = loc("materialize_different_dialect":1:0)
-// CHECK: #[[FusedLoc]] = loc(fused<"CSE">[#[[LocConst1]], #[[LocConst0]]])