Skip to content

Commit

Permalink
fp8 kv cache quantization
Browse files Browse the repository at this point in the history
  • Loading branch information
ZiyueHuang committed Nov 3, 2023
1 parent e2fb71e commit e9dd28e
Show file tree
Hide file tree
Showing 14 changed files with 920 additions and 17 deletions.
19 changes: 19 additions & 0 deletions csrc/attention.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,28 @@ void single_query_cached_kv_attention(
int max_context_len,
const c10::optional<torch::Tensor>& alibi_slopes);


void single_query_cached_kv_quantized_attention(
torch::Tensor& out, // [num_seqs, num_heads, head_size]
torch::Tensor& query, // [num_seqs, num_heads, head_size]
torch::Tensor& key_cache, // [num_blocks, num_heads, head_size/x, block_size, x]
torch::Tensor& value_cache, // [num_blocks, num_heads, head_size, block_size]
torch::Tensor& head_mapping, // [num_heads]
float scale,
torch::Tensor& block_tables, // [num_seqs, max_num_blocks_per_seq]
torch::Tensor& context_lens, // [num_seqs]
int block_size,
int max_context_len,
const c10::optional<torch::Tensor>& alibi_slopes);


PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"single_query_cached_kv_attention",
&single_query_cached_kv_attention,
"Compute the attention between an input query and the cached key/value tensors");
m.def(
"single_query_cached_kv_quantized_attention",
&single_query_cached_kv_quantized_attention,
"Compute the attention between an input query and the cached & quantized key/value tensors");
}
2 changes: 2 additions & 0 deletions csrc/attention/attention_dtypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,5 @@
#include "dtype_float16.cuh"
#include "dtype_float32.cuh"
#include "dtype_bfloat16.cuh"
#include "dtype_uint8.cuh"

3 changes: 3 additions & 0 deletions csrc/attention/attention_generic.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ struct Vec {};
template<typename T>
struct FloatVec {};

template<typename T>
struct FloatVecTemp {};

// Template vector operations.
template<typename Acc, typename A, typename B>
inline __device__ Acc mul(A a, B b);
Expand Down
Loading

0 comments on commit e9dd28e

Please sign in to comment.