Skip to content

Commit

Permalink
Merge branch 'ggerganov:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
mmnga authored Sep 2, 2023
2 parents 9fbe6d1 + 571083f commit 28a1c53
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 2 deletions.
14 changes: 12 additions & 2 deletions examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1379,7 +1379,13 @@ int main(int argc, char **argv)
}
}

const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
auto probs = llama.generated_token_probs;
if (llama.params.n_probs > 0 && llama.stopped_word) {
const std::vector<llama_token> stop_word_toks = llama_tokenize(llama.ctx, llama.stopping_word, false);
probs = std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.end() - stop_word_toks.size());
}

const json data = format_final_response(llama, llama.generated_text, probs);

llama_print_timings(llama.ctx);

Expand Down Expand Up @@ -1456,7 +1462,11 @@ int main(int argc, char **argv)

if (!llama.has_next_token) {
// Generation is done, send extra information.
const json data = format_final_response(llama, "", llama.generated_token_probs);
const json data = format_final_response(
llama,
"",
std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
);

const std::string str =
"data: " +
Expand Down
17 changes: 17 additions & 0 deletions ggml-cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,29 @@
#if defined(GGML_USE_HIPBLAS)
#define __CUDA_ARCH__ 1300

#ifndef __has_builtin
#define __has_builtin(x) 0
#endif

typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
#if __has_builtin(__builtin_elementwise_sub_sat)
const int8x4_t c = __builtin_elementwise_sub_sat(va, vb);
return reinterpret_cast<const int&>(c);
#else
int8x4_t c;
int16_t tmp;
#pragma unroll
for (int i = 0; i < 4; i++) {
tmp = va[i] - vb[i];
if(tmp > std::numeric_limits<int8_t>::max()) tmp = std::numeric_limits<int8_t>::max();
if(tmp < std::numeric_limits<int8_t>::min()) tmp = std::numeric_limits<int8_t>::min();
c[i] = tmp;
}
return reinterpret_cast<int&>(c);
#endif // __has_builtin(__builtin_elementwise_sub_sat)
}

static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
Expand Down

0 comments on commit 28a1c53

Please sign in to comment.