summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrzej WarzyƄski <andrzej.warzynski@arm.com>2023-12-21 18:45:00 +0000
committerGitHub <noreply@github.com>2023-12-21 18:45:00 +0000
commite6f57628790421b16a02b0cb4a67fab4f2f48004 (patch)
tree30f171ccedf92bf96fa15cefdccb898ad7923a33
parent9664ab570ae44068766cc722e8d5e62003d84361 (diff)
[mlir][vector][nfc] Add a test case for scalable vectors (#76138)
Extends fold-arith-extf-into-vector-contract.mlir by adding a test case for scalable vectors.
-rw-r--r--mlir/test/Dialect/Vector/fold-arith-extf-into-vector-contract.mlir38
1 files changed, 35 insertions, 3 deletions
diff --git a/mlir/test/Dialect/Vector/fold-arith-extf-into-vector-contract.mlir b/mlir/test/Dialect/Vector/fold-arith-extf-into-vector-contract.mlir
index 79429afd8ff2..31ae126906f2 100644
--- a/mlir/test/Dialect/Vector/fold-arith-extf-into-vector-contract.mlir
+++ b/mlir/test/Dialect/Vector/fold-arith-extf-into-vector-contract.mlir
@@ -10,9 +10,41 @@
// CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>}
// CHECK-SAME: %[[ARG0]], %[[ARG1]], %[[ARG2]] : vector<64x64xf16>, vector<64x64xf16> into vector<64x64xf32>
// CHECK-NEXT: return %[[R]] : vector<64x64xf32>
-func.func @fold_arith_extf_into_contract(%arg0: vector<64x64xf16>, %arg1: vector<64x64xf16>, %arg2: vector<64x64xf32>) -> vector<64x64xf32> {
+func.func @fold_arith_extf_into_contract(
+ %arg0: vector<64x64xf16>,
+ %arg1: vector<64x64xf16>,
+ %arg2: vector<64x64xf32>) -> vector<64x64xf32> {
%lhs_f32 = arith.extf %arg0 : vector<64x64xf16> to vector<64x64xf32>
%rhs_f32 = arith.extf %arg1 : vector<64x64xf16> to vector<64x64xf32>
- %result = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %lhs_f32, %rhs_f32, %arg2 : vector<64x64xf32>, vector<64x64xf32> into vector<64x64xf32>
+ %result = vector.contract {
+ indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
+ iterator_types = ["parallel", "parallel", "reduction"],
+ kind = #vector.kind<add>}
+ %lhs_f32, %rhs_f32, %arg2 : vector<64x64xf32>, vector<64x64xf32> into vector<64x64xf32>
return %result : vector<64x64xf32>
-} \ No newline at end of file
+}
+
+// -----
+
+// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
+// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
+// CHECK-DAG: #[[$map2:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// CHECK-LABEL: func.func @fold_arith_extf_into_contract_scalable
+// CHECK-SAME: (%[[ARG0:.*]]: vector<[64]x64xf16>, %[[ARG1:.*]]: vector<64x64xf16>, %[[ARG2:.*]]: vector<[64]x64xf32>)
+// CHECK-NEXT: %[[R:.+]] = vector.contract {indexing_maps = [#[[$map0]], #[[$map1]], #[[$map2]]],
+// CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>}
+// CHECK-SAME: %[[ARG0]], %[[ARG1]], %[[ARG2]] : vector<[64]x64xf16>, vector<64x64xf16> into vector<[64]x64xf32>
+// CHECK-NEXT: return %[[R]] : vector<[64]x64xf32>
+func.func @fold_arith_extf_into_contract_scalable(
+ %arg0: vector<[64]x64xf16>,
+ %arg1: vector<64x64xf16>,
+ %arg2: vector<[64]x64xf32>) -> vector<[64]x64xf32> {
+ %lhs_f32 = arith.extf %arg0 : vector<[64]x64xf16> to vector<[64]x64xf32>
+ %rhs_f32 = arith.extf %arg1 : vector<64x64xf16> to vector<64x64xf32>
+ %result = vector.contract {
+ indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
+ iterator_types = ["parallel", "parallel", "reduction"],
+ kind = #vector.kind<add>}
+ %lhs_f32, %rhs_f32, %arg2 : vector<[64]x64xf32>, vector<64x64xf32> into vector<[64]x64xf32>
+ return %result : vector<[64]x64xf32>
+}