diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp index ec74c1c2d54a..66362d5be4f1 100644 --- a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp +++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp @@ -32,7 +32,6 @@ void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm, const SparsifierOptions &options) { // Rewrite named linalg ops into generic ops. - pm.addNestedPass(createLinalgGeneralizationPass()); // Sparsification and bufferization mini-pipeline. @@ -65,6 +64,8 @@ void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm, pm.addNestedPass(createConvertGpuOpsToNVVMOps()); } + // Progressively lower to LLVM. Note that the convert-vector-to-llvm + // pass is repeated on purpose. // TODO(springerm): Add sparse support to the BufferDeallocation pass and add // it to this pipeline. pm.addNestedPass(createConvertLinalgToLoopsPass()); @@ -80,10 +81,7 @@ void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm, pm.addNestedPass(createConvertMathToLLVMPass()); pm.addPass(createConvertMathToLibmPass()); pm.addPass(createConvertComplexToLibmPass()); - - // Repeat convert-vector-to-llvm. pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); - pm.addPass(createConvertComplexToLLVMPass()); pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); pm.addPass(createConvertFuncToLLVMPass()); @@ -101,6 +99,7 @@ void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm, pm.addPass(createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions)); } + // Ensure all casts are realized. pm.addPass(createReconcileUnrealizedCastsPass()); }