Skip to content

Commit

Permalink
add command line parser, simplify code
Browse files Browse the repository at this point in the history
  • Loading branch information
slaren committed Oct 9, 2024
1 parent 1c4d573 commit 06444a6
Showing 1 changed file with 98 additions and 68 deletions.
166 changes: 98 additions & 68 deletions examples/simple/simple.cpp
Original file line number Diff line number Diff line change
@@ -1,90 +1,131 @@
#include "llama.h"
#include <cstdio>
#include <cstring>
#include <string>
#include <vector>

static void print_usage(int, char ** argv) {
printf("\nexample usage:\n");
printf("\n %s <model.gguf> [prompt]\n", argv[0]);
printf("\n %s -m model.gguf [-n n_predict] [-ngl n_gpu_layers] [prompt]\n", argv[0]);
printf("\n");
}

int main(int argc, char ** argv) {
// path to the model gguf file
std::string model_path;
// prompt to generate text from
std::string prompt = "Hello my name is";
// number of layers to offload to the GPU
int ngl = 99;
// number of tokens to predict
int n_predict = 32;

if (argc < 2) {
print_usage(argc, argv);
return 1;
}
model_path = argv[1];

if (argc > 2) {
prompt = argv[2];
for (int i = 3; i < argc; i++) {
prompt += " ";
prompt += argv[i];
// parse command line arguments

{
int i = 1;
for (; i < argc; i++) {
if (strcmp(argv[i], "-m") == 0) {
if (i + 1 < argc) {
model_path = argv[++i];
} else {
print_usage(argc, argv);
return 1;
}
} else if (strcmp(argv[i], "-n") == 0) {
if (i + 1 < argc) {
try {
n_predict = std::stoi(argv[++i]);
} catch (...) {
print_usage(argc, argv);
return 1;
}
} else {
print_usage(argc, argv);
return 1;
}
} else if (strcmp(argv[i], "-ngl") == 0) {
if (i + 1 < argc) {
try {
ngl = std::stoi(argv[++i]);
} catch (...) {
print_usage(argc, argv);
return 1;
}
} else {
print_usage(argc, argv);
return 1;
}
} else {
// prompt starts here
break;
}
}
if (model_path.empty()) {
print_usage(argc, argv);
return 1;
}
if (i < argc) {
prompt = argv[i++];
for (; i < argc; i++) {
prompt += " ";
prompt += argv[i];
}
}
}

// initialize the model

llama_model_params model_params = llama_model_default_params();
model_params.n_gpu_layers = 99; // offload all layers to GPU
model_params.n_gpu_layers = ngl;

llama_model * model = llama_load_model_from_file(model_path.c_str(), model_params);

if (model == NULL) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return 1;
}

// tokenize the prompt

// find the number of tokens in the prompt
const int n_prompt = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true);

// allocate space for the tokens and tokenize the prompt
std::vector<llama_token> prompt_tokens(n_prompt);
if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) {
fprintf(stderr, "%s: error: failed to tokenize the prompt\n", __func__);
return 1;
}

// initialize the context

llama_context_params ctx_params = llama_context_default_params();
ctx_params.n_ctx = 512; // maximum context size
// n_ctx is the context size
ctx_params.n_ctx = n_prompt + n_predict - 1;
// n_batch is the maximum number of tokens that can be processed in a single call to llama_decode
ctx_params.n_batch = n_prompt;
// enable performance counters
ctx_params.no_perf = false;

llama_context * ctx = llama_new_context_with_model(model, ctx_params);

if (ctx == NULL) {
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
return 1;
}

// initialize the sampler

auto sparams = llama_sampler_chain_default_params();
sparams.no_perf = false;
llama_sampler * smpl = llama_sampler_chain_init(sparams);

llama_sampler_chain_add(smpl, llama_sampler_init_greedy());

// tokenize the prompt

std::vector<llama_token> tokens_list;
int n_tokens = llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true);
tokens_list.resize(-n_tokens);
if (llama_tokenize(model, prompt.c_str(), prompt.size(), tokens_list.data(), tokens_list.size(), true, true) < 0) {
fprintf(stderr, "%s: error: failed to tokenize the prompt\n", __func__);
return 1;
}

const int n_ctx = llama_n_ctx(ctx);
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());

fprintf(stderr, "%s: n_predict = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, n_kv_req);


// make sure the KV cache is big enough to hold all the prompt and generated tokens
if (n_kv_req > n_ctx) {
fprintf(stderr, "%s: error: n_kv_req > n_ctx, the required KV cache size is not big enough\n", __func__);
fprintf(stderr, "%s: either reduce n_predict or increase n_ctx\n", __func__);
return 1;
}

// print the prompt token-by-token

fprintf(stderr, "\n");

for (auto id : tokens_list) {
for (auto id : prompt_tokens) {
char buf[128];
int n = llama_token_to_piece(model, id, buf, sizeof(buf), 0, true);
if (n < 0) {
Expand All @@ -95,34 +136,31 @@ int main(int argc, char ** argv) {
printf("%s", s.c_str());
}

// create a llama_batch with size 512
// we use this object to submit token data for decoding

llama_batch batch = llama_batch_get_one(tokens_list.data(), tokens_list.size(), 0, 0);

// evaluate the initial prompt
// prepare a batch for the prompt

if (llama_decode(ctx, batch) != 0) {
fprintf(stderr, "%s: llama_decode() failed\n", __func__);
return 1;
}
llama_batch batch = llama_batch_get_one(prompt_tokens.data(), prompt_tokens.size(), 0, 0);

// main loop

int n_cur = batch.n_tokens;
const auto t_main_start = ggml_time_us();
int n_decode = 0;
llama_token new_token_id;

const auto t_main_start = ggml_time_us();
for (int n_pos = 0; n_pos + batch.n_tokens < n_prompt + n_predict; ) {
// evaluate the current batch with the transformer model
if (llama_decode(ctx, batch)) {
fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
return 1;
}

n_pos += batch.n_tokens;

while (n_cur <= n_predict) {
// sample the next token
llama_token new_token_id = llama_sampler_sample(smpl, ctx, -1);
{
new_token_id = llama_sampler_sample(smpl, ctx, -1);

// is it an end of generation?
if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
fprintf(stderr, "\n");

if (llama_token_is_eog(model, new_token_id)) {
break;
}

Expand All @@ -136,22 +174,14 @@ int main(int argc, char ** argv) {
printf("%s", s.c_str());
fflush(stdout);

// prepare the next batch
batch = llama_batch_get_one(&new_token_id, 1, n_cur, 0);
// prepare the next batch with the sampled token
batch = llama_batch_get_one(&new_token_id, 1, n_pos, 0);

n_decode += 1;
}

n_cur += 1;

// evaluate the current batch with the transformer model
if (llama_decode(ctx, batch)) {
fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
return 1;
}
}

fprintf(stderr, "\n");
printf("\n");

const auto t_main_end = ggml_time_us();

Expand Down

0 comments on commit 06444a6

Please sign in to comment.