Skip to content

Commit

Permalink
[mlir][sparse] Migrate tests to use new syntax (llvm#66543)
Browse files Browse the repository at this point in the history
**COO**
`lvlTypes = [ "compressed_nu", "singleton" ]` to `map = (d0, d1) -> (d0
: compressed(nonunique), d1 : singleton)`
`lvlTypes = [ "compressed_nu_no", "singleton_no" ]` to `map = (d0, d1)
-> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))`

**SortedCOO**
`lvlTypes = [ "compressed_nu", "singleton" ]` to `map = (d0, d1) -> (d0
: compressed(nonunique), d1 : singleton)`

**BCOO**
`lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]` to `map = (d0,
d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 :
singleton)`

**BCSR**
`lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl =
affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod
3)>` to
`map = ( i, j ) ->
      ( i floordiv 2 : compressed,
        j floordiv 3 : compressed,
        i mod 2 : dense,
        j mod 3 : dense
      )`

**Tensor and other supported formats(e.g. CCC, CDC, CCCC)**

Currently, ELL and slice are not supported yet in the new syntax and the
CHECK tests will be updated once printing is set to output the new
syntax.

Previous PRs: llvm#66146, llvm#66309, llvm#66443
  • Loading branch information
yinying-lisa-li authored and zahiraam committed Oct 24, 2023
1 parent 6486eea commit bb2373e
Show file tree
Hide file tree
Showing 58 changed files with 143 additions and 146 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",

// Sorted Coordinate Scheme.
#SortedCOO = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ]
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
}>
... tensor<?x?xf64, #SortedCOO> ...

Expand All @@ -214,8 +214,12 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",

// Block sparse row storage (2x3 blocks).
#BCSR = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed", "dense", "dense" ],
dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)>
map = ( i, j ) ->
( i floordiv 2 : compressed,
j floordiv 3 : compressed,
i mod 2 : dense,
j mod 3 : dense
)
}>
... tensor<20x30xf32, #BCSR> ...

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// RUN: --sparsification="enable-gpu-libgen" | FileCheck %s

#SortedCOO = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ]
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
}>

module {
Expand Down
12 changes: 5 additions & 7 deletions mlir/test/Dialect/SparseTensor/codegen.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
}>

#UCSR = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed_no" ]
map = (d0, d1) -> (d0 : dense, d1 : compressed(nonordered))
}>

#CSC = #sparse_tensor.encoding<{
Expand All @@ -41,21 +41,19 @@
}>

#Dense3D = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "dense", "dense" ],
dimToLvl = affine_map<(i, j, k) -> (k, i, j)>
map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense)
}>

#Coo = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ]
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
}>

#CooPNo = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton_no" ],
dimToLvl = affine_map<(i, j) -> (j, i)>
map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton(nonordered))
}>

#ccoo = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed_nu", "singleton" ]
map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed(nonunique), d2 : singleton)
}>

// CHECK-LABEL: func @sparse_nop(
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s

#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed_nu", "singleton"]}>
#COO = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>

// CHECK-LABEL: func.func @sparse_alloc_copy_CSR(
// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xindex>,
Expand Down
3 changes: 1 addition & 2 deletions mlir/test/Dialect/SparseTensor/conversion.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@
}>

#SparseTensor = #sparse_tensor.encoding<{
lvlTypes = ["dense", "compressed", "compressed"],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed)
}>

// CHECK-LABEL: func @sparse_nop(
Expand Down
3 changes: 1 addition & 2 deletions mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
}>

#SparseTensor = #sparse_tensor.encoding<{
lvlTypes = ["dense", "compressed", "compressed"],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed)
}>

// CHECK-LABEL: func @sparse_convert_1d(
Expand Down
3 changes: 1 addition & 2 deletions mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
}>

#SparseTensor = #sparse_tensor.encoding<{
lvlTypes = ["dense", "compressed", "compressed"],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed)
}>

// CHECK-LABEL: func @sparse_convert_1d(
Expand Down
11 changes: 5 additions & 6 deletions mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,16 @@
}>

#SortedCOO2D = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ],
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton),
}>

#SortedCOO3D = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton)

}>

#TsssPermuted = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed", "compressed" ],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed)
}>

#COOSlice = #sparse_tensor.encoding<{
Expand Down Expand Up @@ -115,13 +114,13 @@ func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32
}

#SparseSingleton64 = #sparse_tensor.encoding<{
lvlTypes = ["singleton"],
map = (d0) -> (d0 : singleton),
posWidth = 64,
crdWidth = 64
}>

#SparseSingleton32 = #sparse_tensor.encoding<{
lvlTypes = ["singleton"],
map = (d0) -> (d0 : singleton),
posWidth = 32,
crdWidth = 32
}>
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coord

// -----

#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}>

func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>)
-> tensor<100x2xf64, #SparseVector> {
Expand Down Expand Up @@ -68,7 +68,7 @@ func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: ten

// -----

#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}>

func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) {
// expected-error@+1 {{input/output trailing COO level-ranks don't match}}
Expand Down Expand Up @@ -270,7 +270,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>)

// -----

#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>

func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index {
// expected-error@+1 {{requested position memory size on a singleton level}}
Expand Down Expand Up @@ -658,7 +658,7 @@ func.func @invalid_concat_dim(%arg0: tensor<2x4xf64, #DC>,

#C = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
#DCC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed", "compressed"]}>
#DCC = #sparse_tensor.encoding<{map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed)}>
func.func @invalid_concat_rank_mismatch(%arg0: tensor<2xf64, #C>,
%arg1: tensor<3x4xf64, #DC>,
%arg2: tensor<4x4x4xf64, #DCC>) -> tensor<9x4xf64, #DC> {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
}>

#SortedCOO = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ]
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
}>

#DCSR = #sparse_tensor.encoding<{
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
}>

#COO = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ]
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
}>

// CHECK-LABEL: func.func @sparse_new(
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/roundtrip.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func.func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32,

// -----

#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>
#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) }>

// CHECK-LABEL: func @sparse_convert_3d_from_sparse(
// CHECK-SAME: %[[A:.*]]: tensor<8x8x8xf64, #{{.*}}>)
Expand All @@ -103,7 +103,7 @@ func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xi

// -----

#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>

// CHECK-LABEL: func @sparse_indices_buffer(
// CHECK-SAME: %[[A:.*]]: tensor<?x?xf64, #{{.*}}>)
Expand Down
8 changes: 6 additions & 2 deletions mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,12 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
// -----

#BCSR = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed", "dense", "dense" ],
dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)>
map = ( i, j ) ->
( i floordiv 2 : compressed,
j floordiv 3 : compressed,
i mod 2 : dense,
j mod 3 : dense
)
}>

// CHECK-LABEL: func private @sparse_bcsr(
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sorted_coo.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s

#SortedCOO = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ]
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
}>

#trait_scale = {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_2d.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -1050,7 +1050,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
}

#BatchedVector = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed_hi" ],
map = (d0, d1) -> (d0 : dense, d1 : compressed(high))
}>
// CHECK-LABEL: func.func @sub_ss_batched(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>,
Expand Down
16 changes: 8 additions & 8 deletions mlir/test/Dialect/SparseTensor/sparse_3d.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@

#Td = #sparse_tensor.encoding<{ map = (d0) -> (d0 : dense) }>

#Tddd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }>
#Tdds = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>
#Tdsd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>
#Tdss = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>
#Tsdd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>
#Tsds = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>
#Tssd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>
#Tsss = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>
#Tddd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : dense) }>
#Tdds = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) }>
#Tdsd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense) }>
#Tdss = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed) }>
#Tsdd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : dense) }>
#Tsds = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : compressed) }>
#Tssd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : dense) }>
#Tsss = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }>

#trait3 = {
indexing_maps = [
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s --sparsification --canonicalize --cse | FileCheck %s

#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>
#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>
#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }>

#trait = {
indexing_maps = [
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) {
}

#BCOO = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ],
map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
}>

// CHECK-LABEL: func.func @foreach_bcoo(
Expand Down
6 changes: 4 additions & 2 deletions mlir/test/Dialect/SparseTensor/sparse_nd.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
// but an acyclic iteration graph using sparse constraints only.

#SparseTensor = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "dense", "dense", "compressed",
"compressed", "dense", "dense", "dense" ]
map = (d0, d1, d2, d3,
d4, d5, d6, d7) -> (d0 : dense, d1 : dense, d2 : dense,
d3 : compressed, d4 : compressed, d5 : dense,
d6 : dense, d7 : dense)
}>

#trait_mul = {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_out.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
}>

#SparseTensor = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed", "compressed" ]
map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)
}>

#trait_scale_inpl = {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_pack.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s --canonicalize --post-sparsification-rewrite="enable-runtime-library=false" --sparse-tensor-codegen -cse --canonicalize | FileCheck %s

#COO = #sparse_tensor.encoding<{
lvlTypes = ["compressed_nu", "singleton"],
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton),
crdWidth=32
}>

Expand Down
3 changes: 1 addition & 2 deletions mlir/test/Dialect/SparseTensor/sparse_perm.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
// RUN: mlir-opt %s -sparsification | FileCheck %s

#X = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "dense", "dense" ],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense)
}>

#trait = {
Expand Down
3 changes: 1 addition & 2 deletions mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@
// RUN: FileCheck %s --check-prefix=CHECK-MIR

#X = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "dense", "dense" ],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense)
}>

#trait = {
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
//
// RUN: mlir-opt %s --linalg-generalize-named-ops --sparsification --cse --canonicalize | FileCheck %s

#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
#COO_2D = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }>
#COO_3D = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton), posWidth = 32, crdWidth = 32 }>


// CHECK-LABEL: func.func @sparse_reshape_fused(
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
}

#VEC = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 32, crdWidth = 32 }>
#COO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
#CCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }>
#COO = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }>
#CCC = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed), posWidth = 32, crdWidth = 32 }>

//
// This kernel can be sparsified as all unsparsifiable operations'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %}


#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
#COO_2D = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }>
#COO_3D = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton), posWidth = 32, crdWidth = 32 }>

module {
func.func private @printMemref3dF32(%ptr : tensor<?x?x?xf32>) attributes { llvm.emit_c_interface }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,15 @@
}>

#SortedCOO = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ]
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
}>

#SortedCOOPerm = #sparse_tensor.encoding<{
lvlTypes = [ "compressed_nu", "singleton" ],
dimToLvl = affine_map<(i,j) -> (j,i)>
map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton)
}>

#CCCPerm = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed", "compressed"],
dimToLvl = affine_map<(d0, d1, d2) -> (d1, d2, d0)>
map = (d0, d1, d2) -> (d1 : compressed, d2 : compressed, d0 : compressed)
}>

module {
Expand Down
Loading

0 comments on commit bb2373e

Please sign in to comment.