Skip to content

Commit

Permalink
feat(llama.cp logging): Add some helpful logs when tensors are missing
Browse files Browse the repository at this point in the history
NOTE: This also adds the LLAMA_LOG_DEBUG macro for use in llama.cpp. This
may have been omitted for a good reason! If so, this change is certainly
optional.

Branch: GraniteMoE

Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
  • Loading branch information
gabe-l-hart committed Sep 11, 2024
1 parent 3219f58 commit 5f37be3
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 0 deletions.
1 change: 1 addition & 0 deletions src/llama-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ LLAMA_ATTRIBUTE_FORMAT(2, 3)
void llama_log_internal (ggml_log_level level, const char * format, ...);
void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);

#define LLAMA_LOG_DEBUG(...) llama_log_internal(GGML_LOG_LEVEL_DEBUG , __VA_ARGS__)
#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
Expand Down
5 changes: 5 additions & 0 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1484,34 +1484,39 @@ struct LLM_TN {

std::string operator()(llm_tensor tensor) const {
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
LLAMA_LOG_DEBUG("%s: Missing tensor %d for arch %s\n", __func__, tensor, LLM_ARCH_NAMES.at(arch));
return "__missing__";
}
return LLM_TENSOR_NAMES.at(arch).at(tensor);
}

std::string operator()(llm_tensor tensor, const std::string & suffix) const {
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
LLAMA_LOG_DEBUG("%s(%s): Missing tensor %d for arch %s\n", __func__, suffix.c_str(), tensor, LLM_ARCH_NAMES.at(arch));
return "__missing__";
}
return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
}

std::string operator()(llm_tensor tensor, int bid) const {
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
LLAMA_LOG_DEBUG("%s(%d): Missing tensor %d for arch %s\n", __func__, bid, tensor, LLM_ARCH_NAMES.at(arch));
return "__missing__";
}
return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid);
}

std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
LLAMA_LOG_DEBUG("%s(%s, %d): Missing tensor %d for arch %s\n", __func__, suffix.c_str(), bid, tensor, LLM_ARCH_NAMES.at(arch));
return "__missing__";
}
return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid) + "." + suffix;
}

std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
LLAMA_LOG_DEBUG("%s(%s, %d, %d): Missing tensor %d for arch %s\n", __func__, suffix.c_str(), bid, xid, tensor, LLM_ARCH_NAMES.at(arch));
return "__missing__";
}
return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid, xid) + "." + suffix;
Expand Down

0 comments on commit 5f37be3

Please sign in to comment.