diff --git a/src/relay/backend/vm/compiler.cc b/src/relay/backend/vm/compiler.cc index 8fbe31edce3d..d908153f88cb 100644 --- a/src/relay/backend/vm/compiler.cc +++ b/src/relay/backend/vm/compiler.cc @@ -985,8 +985,11 @@ transform::Sequential MemoryOpt(tvm::Target host_target, TargetsMap targets) { // Fuse the shape functions. pass_seqs.push_back(transform::FuseOps()); - // Perform memory planning in order to coalesce/reduce allocations. - pass_seqs.push_back(transform::MemoryPlan()); + // TODO(mbrookhart, jroesch, masahi): this pass is very slow, and is + // incomplete to provide memory resuse optimizations. Disable it until we can + // rewrite it in C++ and complete it. + // // Perform memory planning in order to coalesce/reduce allocations. + // pass_seqs.push_back(transform::MemoryPlan()); // Compute away constant computation introduced by coalescing allocations. pass_seqs.push_back(transform::FoldConstant()); diff --git a/src/relay/backend/vm/lambda_lift.cc b/src/relay/backend/vm/lambda_lift.cc index 8e9cc625063b..fe9a544a719e 100644 --- a/src/relay/backend/vm/lambda_lift.cc +++ b/src/relay/backend/vm/lambda_lift.cc @@ -192,7 +192,6 @@ class LambdaLifter : public ExprMutator { global = module_->GetGlobalVar(name); } else { // Add the lifted function to the module. - std::cout << AsText(lifted_func) << std::endl; module_->Add(global, lifted_func); }