Skip to content

Commit

Permalink
server : avoid aniprompt in probabilities of final response (ggergano…
Browse files Browse the repository at this point in the history
  • Loading branch information
jhen0409 authored Sep 2, 2023
1 parent f04d002 commit 571083f
Showing 1 changed file with 12 additions and 2 deletions.
14 changes: 12 additions & 2 deletions examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1379,7 +1379,13 @@ int main(int argc, char **argv)
}
}

const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
auto probs = llama.generated_token_probs;
if (llama.params.n_probs > 0 && llama.stopped_word) {
const std::vector<llama_token> stop_word_toks = llama_tokenize(llama.ctx, llama.stopping_word, false);
probs = std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.end() - stop_word_toks.size());
}

const json data = format_final_response(llama, llama.generated_text, probs);

llama_print_timings(llama.ctx);

Expand Down Expand Up @@ -1456,7 +1462,11 @@ int main(int argc, char **argv)

if (!llama.has_next_token) {
// Generation is done, send extra information.
const json data = format_final_response(llama, "", llama.generated_token_probs);
const json data = format_final_response(
llama,
"",
std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
);

const std::string str =
"data: " +
Expand Down

0 comments on commit 571083f

Please sign in to comment.