summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAart Bik <39774503+aartbik@users.noreply.github.com>2024-02-23 10:52:28 -0800
committerGitHub <noreply@github.com>2024-02-23 10:52:28 -0800
commitf8ce460e48ccc774354df75520d00a67ddbf84c0 (patch)
tree8f64f8842484595a44879dc6c3093b0a2bfd3afd
parenta24421fef713e5b3c0a885cf36a62cc3257be1f3 (diff)
[mlir][sparse] cleanup sparse runtime library (#82807)
remove some obsoleted APIs from the library that have been fully replaced with actual direct IR codegen
-rw-r--r--mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h9
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h43
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h24
-rw-r--r--mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp7
-rw-r--r--mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp50
5 files changed, 12 insertions, 121 deletions
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index a00c9c31256c..1c81d80ea7ec 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -145,12 +145,9 @@ constexpr bool isComplexPrimaryType(PrimaryType valTy) {
/// The actions performed by @newSparseTensor.
enum class Action : uint32_t {
kEmpty = 0,
- kEmptyForward = 1,
- kFromCOO = 2,
- kFromReader = 4,
- kToCOO = 5,
- kPack = 7,
- kSortCOOInPlace = 8,
+ kFromReader = 1,
+ kPack = 2,
+ kSortCOOInPlace = 3,
};
/// This enum defines all supported storage format without the level properties.
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index eff1aca42d3e..fe0e08b5c840 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -149,13 +149,6 @@ public:
MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES)
#undef DECL_GETVALUES
- /// Element-wise forwarding insertions. The first argument is the
- /// dimension-coordinates for the value being inserted.
-#define DECL_FORWARDINGINSERT(VNAME, V) \
- virtual void forwardingInsert(const uint64_t *, V);
- MLIR_SPARSETENSOR_FOREVERY_V(DECL_FORWARDINGINSERT)
-#undef DECL_FORWARDINGINSERT
-
/// Element-wise insertion in lexicographic coordinate order. The first
/// argument is the level-coordinates for the value being inserted.
#define DECL_LEXINSERT(VNAME, V) virtual void lexInsert(const uint64_t *, V);
@@ -171,9 +164,6 @@ public:
MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT)
#undef DECL_EXPINSERT
- /// Finalizes forwarding insertions.
- virtual void endForwardingInsert() = 0;
-
/// Finalizes lexicographic insertions.
virtual void endLexInsert() = 0;
@@ -248,7 +238,7 @@ public:
static SparseTensorStorage<P, C, V> *
newEmpty(uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
- const uint64_t *dim2lvl, const uint64_t *lvl2dim, bool forwarding);
+ const uint64_t *dim2lvl, const uint64_t *lvl2dim);
/// Allocates a new sparse tensor and initializes it from the given COO.
static SparseTensorStorage<P, C, V> *
@@ -284,13 +274,6 @@ public:
*out = &values;
}
- /// Partially specialize forwarding insertions based on template types.
- void forwardingInsert(const uint64_t *dimCoords, V val) final {
- assert(dimCoords && coo);
- map.pushforward(dimCoords, lvlCursor.data());
- coo->add(lvlCursor, val);
- }
-
/// Partially specialize lexicographical insertions based on template types.
void lexInsert(const uint64_t *lvlCoords, V val) final {
assert(lvlCoords);
@@ -345,21 +328,6 @@ public:
}
}
- /// Finalizes forwarding insertions.
- void endForwardingInsert() final {
- // Ensure COO is sorted.
- assert(coo);
- coo->sort();
- // Now actually insert the `elements`.
- const auto &elements = coo->getElements();
- const uint64_t nse = elements.size();
- assert(values.size() == 0);
- values.reserve(nse);
- fromCOO(elements, 0, nse, 0);
- delete coo;
- coo = nullptr;
- }
-
/// Finalizes lexicographic insertions.
void endLexInsert() final {
if (!allDense) {
@@ -653,13 +621,10 @@ template <typename P, typename C, typename V>
SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newEmpty(
uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
- const uint64_t *dim2lvl, const uint64_t *lvl2dim, bool forwarding) {
- SparseTensorCOO<V> *lvlCOO = nullptr;
- if (forwarding)
- lvlCOO = new SparseTensorCOO<V>(lvlRank, lvlSizes);
+ const uint64_t *dim2lvl, const uint64_t *lvl2dim) {
return new SparseTensorStorage<P, C, V>(dimRank, dimSizes, lvlRank, lvlSizes,
- lvlTypes, dim2lvl, lvl2dim, lvlCOO,
- !forwarding);
+ lvlTypes, dim2lvl, lvl2dim, nullptr,
+ true);
}
template <typename P, typename C, typename V>
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index 8b0829aab0d8..d916186c835c 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -38,15 +38,12 @@ extern "C" {
/// This is the "swiss army knife" method for materializing sparse
/// tensors into the computation. The types of the `ptr` argument and
/// the result depend on the action, as explained in the following table,
-/// where "STS" means a sparse-tensor-storage object and "COO" means
-/// a coordinate-scheme object.
+/// where "STS" means a sparse-tensor-storage object.
///
/// Action: `ptr`: Returns:
+/// ---------------------------------------------------------------------------
/// kEmpty - STS, empty
-/// kEmptyForward - STS, empty, with forwarding COO
-/// kFromCOO COO STS, copied from the COO source
/// kFromReader reader STS, input from reader
-/// kToCOO STS COO, copied from the STS source
/// kPack buffers STS, from level buffers
/// kSortCOOInPlace STS STS, sorted in place
MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_newSparseTensor( // NOLINT
@@ -80,14 +77,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEPOSITIONS)
MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES)
#undef DECL_SPARSECOORDINATES
-/// Tensor-storage method for a dim to lvl forwarding insertion.
-#define DECL_FORWARDINGINSERT(VNAME, V) \
- MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_forwardingInsert##VNAME( \
- void *tensor, StridedMemRefType<V, 0> *vref, \
- StridedMemRefType<index_type, 1> *dimCoordsRef); \
- MLIR_SPARSETENSOR_FOREVERY_V(DECL_FORWARDINGINSERT)
-#undef DECL_FORWARDINGINSERT
-
/// Tensor-storage method to insert elements in lexicographical
/// level-coordinate order.
#define DECL_LEXINSERT(VNAME, V) \
@@ -160,21 +149,12 @@ MLIR_CRUNNERUTILS_EXPORT index_type sparseLvlSize(void *tensor, index_type l);
/// Tensor-storage method to get the size of the given dimension.
MLIR_CRUNNERUTILS_EXPORT index_type sparseDimSize(void *tensor, index_type d);
-/// Tensor-storage method to finalize forwarding insertions.
-MLIR_CRUNNERUTILS_EXPORT void endForwardingInsert(void *tensor);
-
/// Tensor-storage method to finalize lexicographic insertions.
MLIR_CRUNNERUTILS_EXPORT void endLexInsert(void *tensor);
/// Releases the memory for the tensor-storage object.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensor(void *tensor);
-/// Releases the memory for the coordinate-scheme object.
-#define DECL_DELCOO(VNAME, V) \
- MLIR_CRUNNERUTILS_EXPORT void delSparseTensorCOO##VNAME(void *coo);
-MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELCOO)
-#undef DECL_DELCOO
-
/// Helper function to read a sparse tensor filename from the environment,
/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
MLIR_CRUNNERUTILS_EXPORT char *getTensorFilename(index_type id);
diff --git a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
index 9e8b240899d8..bbe10b0dcdd4 100644
--- a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
@@ -74,13 +74,6 @@ MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETCOORDINATES)
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETVALUES)
#undef IMPL_GETVALUES
-#define IMPL_FORWARDINGINSERT(VNAME, V) \
- void SparseTensorStorageBase::forwardingInsert(const uint64_t *, V) { \
- FATAL_PIV("forwardingInsert" #VNAME); \
- }
-MLIR_SPARSETENSOR_FOREVERY_V(IMPL_FORWARDINGINSERT)
-#undef IMPL_FORWARDINGINSERT
-
#define IMPL_LEXINSERT(VNAME, V) \
void SparseTensorStorageBase::lexInsert(const uint64_t *, V) { \
FATAL_PIV("lexInsert" #VNAME); \
diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index a5e75a77b4e4..0bc90b281b7c 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -117,20 +117,7 @@ extern "C" {
switch (action) { \
case Action::kEmpty: { \
return SparseTensorStorage<P, C, V>::newEmpty( \
- dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
- false); \
- } \
- case Action::kEmptyForward: { \
- return SparseTensorStorage<P, C, V>::newEmpty( \
- dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
- true); \
- } \
- case Action::kFromCOO: { \
- assert(ptr && "Received nullptr for SparseTensorCOO object"); \
- auto &coo = *static_cast<SparseTensorCOO<V> *>(ptr); \
- return SparseTensorStorage<P, C, V>::newFromCOO( \
- dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
- coo); \
+ dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim); \
} \
case Action::kFromReader: { \
assert(ptr && "Received nullptr for SparseTensorReader object"); \
@@ -138,11 +125,6 @@ extern "C" {
return static_cast<void *>(reader.readSparseTensor<P, C, V>( \
lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim)); \
} \
- case Action::kToCOO: { \
- assert(ptr && "Received nullptr for SparseTensorStorage object"); \
- auto &tensor = *static_cast<SparseTensorStorage<P, C, V> *>(ptr); \
- return tensor.toCOO(); \
- } \
case Action::kPack: { \
assert(ptr && "Received nullptr for SparseTensorStorage object"); \
intptr_t *buffers = static_cast<intptr_t *>(ptr); \
@@ -341,21 +323,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATES)
#undef IMPL_SPARSECOORDINATES
#undef IMPL_GETOVERHEAD
-#define IMPL_FORWARDINGINSERT(VNAME, V) \
- void _mlir_ciface_forwardingInsert##VNAME( \
- void *t, StridedMemRefType<V, 0> *vref, \
- StridedMemRefType<index_type, 1> *dimCoordsRef) { \
- assert(t &&vref); \
- ASSERT_NO_STRIDE(dimCoordsRef); \
- const index_type *dimCoords = MEMREF_GET_PAYLOAD(dimCoordsRef); \
- assert(dimCoords); \
- const V *value = MEMREF_GET_PAYLOAD(vref); \
- static_cast<SparseTensorStorageBase *>(t)->forwardingInsert(dimCoords, \
- *value); \
- }
-MLIR_SPARSETENSOR_FOREVERY_V(IMPL_FORWARDINGINSERT)
-#undef IMPL_FORWARDINGINSERT
-
#define IMPL_LEXINSERT(VNAME, V) \
void _mlir_ciface_lexInsert##VNAME( \
void *t, StridedMemRefType<index_type, 1> *lvlCoordsRef, \
@@ -427,8 +394,8 @@ void _mlir_ciface_getSparseTensorReaderDimSizes(
const uint64_t cSize = MEMREF_GET_USIZE(cref); \
const uint64_t vSize = MEMREF_GET_USIZE(vref); \
ASSERT_USIZE_EQ(lvl2dimRef, dimRank); \
- assert(cSize >= lvlRank * vSize); \
- assert(vSize >= reader.getNSE() && "Not enough space in buffers"); \
+ assert(cSize >= lvlRank * reader.getNSE()); \
+ assert(vSize >= reader.getNSE()); \
(void)dimRank; \
(void)cSize; \
(void)vSize; \
@@ -488,10 +455,6 @@ index_type sparseDimSize(void *tensor, index_type d) {
return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
}
-void endForwardingInsert(void *tensor) {
- return static_cast<SparseTensorStorageBase *>(tensor)->endForwardingInsert();
-}
-
void endLexInsert(void *tensor) {
return static_cast<SparseTensorStorageBase *>(tensor)->endLexInsert();
}
@@ -500,13 +463,6 @@ void delSparseTensor(void *tensor) {
delete static_cast<SparseTensorStorageBase *>(tensor);
}
-#define IMPL_DELCOO(VNAME, V) \
- void delSparseTensorCOO##VNAME(void *coo) { \
- delete static_cast<SparseTensorCOO<V> *>(coo); \
- }
-MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELCOO)
-#undef IMPL_DELCOO
-
char *getTensorFilename(index_type id) {
constexpr size_t bufSize = 80;
char var[bufSize];