Skip to content

Commit

Permalink
Use IndexMap
Browse files Browse the repository at this point in the history
  • Loading branch information
masahi committed May 17, 2022
1 parent 07fb589 commit 178c3dc
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 35 deletions.
3 changes: 2 additions & 1 deletion python/tvm/tir/tensor_intrin/cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ def shared_32x16_to_ldmatrix_32x16_layout(i, j):


@register_func("tir.index_map.shared_16x16_to_ldmatrix_32x8_layout")
def index_map_shared_16x16_to_ldmatrix_32x8_layout(i, j):
def index_map_shared_16x16_to_ldmatrix_32x8_layout(ind):
i, j = ind[0], ind[1]
thread_id, local_id = shared_16x16_to_ldmatrix_32x8_layout(i, j)
return convert([thread_id, local_id])

Expand Down
56 changes: 22 additions & 34 deletions src/target/source/codegen_cuda.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,17 @@
#include <tvm/arith/analyzer.h>
#include <tvm/runtime/registry.h>
#include <tvm/tir/stmt_functor.h>
#include <tvm/tir/index_map.h>
#include <tvm/arith/iter_affine_map.h>

#include <algorithm>
#include <cmath>
#include <string>
#include <utility>
#include <vector>

#include "literal/cuda_half_t.h"
#include "ptx.h"
#include "tvm/arith/iter_affine_map.h"

namespace tvm {
namespace codegen {
Expand Down Expand Up @@ -839,40 +841,26 @@ void CodeGenCUDA::VisitExpr_(const CallNode* op, std::ostream& os) {
std::string dst = this->PrintExpr(op->args[2]);
std::string src = this->PrintExpr(op->args[3]);
std::string src_offset = this->PrintExpr(op->args[4]);
PrimExpr stride = op->args[5];

ICHECK(m == 16 && n == 16) << "Only m == 16 && n == 16 case supported for now";

const auto* index_map_func =
runtime::Registry::Get("tir.index_map.shared_16x16_to_ldmatrix_32x8_layout");
ICHECK(index_map_func);

auto inverse_index_map =
IndexMap::FromFunc(2, *index_map_func).Inverse({Range(0, 16), Range(0, 16)});
auto indices_16x16 = inverse_index_map->final_indices;

var_idmap_[inverse_index_map->initial_indices[0].get()] = "threadIdx.x";
var_idmap_[inverse_index_map->initial_indices[1].get()] = "local_id";

os << "for (int local_id = 0; local_id < 8; ++local_id) {\n";
os << dst << "[" + this->PrintExpr(indices_16x16[0] * stride + indices_16x16[1]) + "]"
<< " = " << src << "[" << src_offset << " + local_id];\n";
os << "}\n";

if (m == 16 && n == 8) {
std::string stride = this->PrintExpr(op->args[5]);
os << "for (int i = 0; i < 4; ++i) {\n";
os << dst << "[(i / 2 * 8 + threadIdx.x / 4) * " << stride
<< " + (threadIdx.x % 4) * 2 + i % 2]"
<< " = " << src << "[" << src_offset << " + i];\n";
os << "}\n";
} else if (m == 16 && n == 16) {
const auto* index_map =
runtime::Registry::Get("tir.index_map.shared_16x16_to_ldmatrix_32x8_layout");
ICHECK(index_map);

Var var_i("i");
Var var_j("j");
Array<PrimExpr> forward_map = (*index_map)(var_i, var_j);

arith::Analyzer ana;
auto iter_map = arith::DetectIterMap(
forward_map, {{var_i, Range(0, 16)}, {var_j, Range(0, 16)}}, true, true, &ana, true);

Var thread_id("threadIdx.x");
Var local_id("local_id");
auto inverse_map = arith::InverseAffineIterMap(iter_map, {thread_id, local_id});
PrimExpr stride = op->args[5];
auto dst_idx = inverse_map[var_i] * stride + inverse_map[var_j];

var_idmap_[thread_id.get()] = "threadIdx.x";
var_idmap_[local_id.get()] = "local_id";
os << "for (int local_id = 0; local_id < 8; ++local_id) {\n";
os << dst << "[" + this->PrintExpr(dst_idx) + "]"
<< " = " << src << "[" << src_offset << " + local_id];\n";
os << "}\n";
}
} else if (op->op.same_as(builtin::mma_fill())) {
std::string num_elem = this->PrintExpr(op->args[0]);
std::string dst = this->PrintExpr(op->args[1]);
Expand Down

0 comments on commit 178c3dc

Please sign in to comment.