From 34d1acb07f9796ee84dc6904bb902c8031620955 Mon Sep 17 00:00:00 2001 From: Jendrik Seipp Date: Sun, 15 Oct 2023 11:05:35 +0200 Subject: [PATCH 1/8] [trivial] Fix instruction. --- build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.py b/build.py index 7437a9707..c85f776cf 100755 --- a/build.py +++ b/build.py @@ -89,7 +89,7 @@ def try_run(cmd): except OSError as exc: if exc.errno == errno.ENOENT: print(f"Could not find '{cmd[0]}' on your PATH. For installation instructions, " - "see https://www.fast-downward.org/ObtainingAndRunningFastDownward.") + "see BUILD.md in the project root directory.") sys.exit(1) else: raise From a063111082a6e54725fc2cfbe7a817d03ed2c131 Mon Sep 17 00:00:00 2001 From: ClemensBuechner Date: Tue, 17 Oct 2023 14:37:20 +0200 Subject: [PATCH 2/8] [issue1122] Link CPLEX static libraries if dynamic not found. --- src/search/cmake/FindCplex.cmake | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/search/cmake/FindCplex.cmake b/src/search/cmake/FindCplex.cmake index 58a6139f0..27827664d 100644 --- a/src/search/cmake/FindCplex.cmake +++ b/src/search/cmake/FindCplex.cmake @@ -12,7 +12,7 @@ set(IMPORTED_CONFIGURATIONS "Debug" "Release") set(HINT_PATHS ${cplex_DIR} $ENV{cplex_DIR}) -add_library(cplex::cplex IMPORTED SHARED) +add_library(cplex::cplex IMPORTED UNKNOWN) set_target_properties(cplex::cplex PROPERTIES IMPORTED_CONFIGURATIONS "${IMPORTED_CONFIGURATIONS}" ) @@ -157,18 +157,20 @@ foreach(CONFIG_ORIG ${IMPORTED_CONFIGURATIONS}) list(APPEND REQUIRED_LIBRARIES CPLEX_SHARED_LIBRARY_${CONFIG} CPLEX_IMPLIB_${CONFIG}) else() # CPLEX stores .so files in /bin - find_library(CPLEX_SHARED_LIBRARY_${CONFIG} + find_library(CPLEX_LIBRARY_${CONFIG} NAMES cplex${CPLEX_VERSION_NO_DOTS} + cplex HINTS ${HINT_PATHS}/bin + ${HINT_PATHS}/lib PATH_SUFFIXES ${SUFFIXES_${CONFIG}} ) set_target_properties(cplex::cplex PROPERTIES - IMPORTED_LOCATION_${CONFIG} ${CPLEX_SHARED_LIBRARY_${CONFIG}} + IMPORTED_LOCATION_${CONFIG} ${CPLEX_LIBRARY_${CONFIG}} ) - list(APPEND REQUIRED_LIBRARIES CPLEX_SHARED_LIBRARY_${CONFIG}) + list(APPEND REQUIRED_LIBRARIES CPLEX_LIBRARY_${CONFIG}) endif() endforeach() @@ -186,5 +188,5 @@ mark_as_advanced( CPLEX_VERSION_SUBMINOR CPLEX_VERSION_NO_DOTS BITWIDTH_HINTS PLATFORM_HINTS LIBRARY_TYPE_HINTS_RELEASE LIBRARY_TYPE_HINTS_DEBUG SUFFIXES_RELEASE SUFFIXES_DEBUG FIND_OPTIONS COMPILER_HINTS COMPILER_HINT CPLEX_IMPLIB_DEBUG - CPLEX_IMPLIB_RELEASE CPLEX_SHARED_LIBRARY_DEBUG CPLEX_SHARED_LIBRARY_RELEASE + CPLEX_IMPLIB_RELEASE CPLEX_LIBRARY_DEBUG CPLEX_LIBRARY_RELEASE ) From 300e66dc93c4671c06f52af94e40d51c7dee5e4b Mon Sep 17 00:00:00 2001 From: ClemensBuechner Date: Thu, 19 Oct 2023 10:27:54 +0200 Subject: [PATCH 3/8] [issue1122] Specify using shared CPLEX libraries in Windows. The previous fix to find CPLEX libraries in the absence of dynamic ones caused Windows to not find CPLEX anymore. We fix this by distinguishing the type of imported library depending on the operating system. --- src/search/cmake/FindCplex.cmake | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/search/cmake/FindCplex.cmake b/src/search/cmake/FindCplex.cmake index 27827664d..606bba2a4 100644 --- a/src/search/cmake/FindCplex.cmake +++ b/src/search/cmake/FindCplex.cmake @@ -12,7 +12,16 @@ set(IMPORTED_CONFIGURATIONS "Debug" "Release") set(HINT_PATHS ${cplex_DIR} $ENV{cplex_DIR}) -add_library(cplex::cplex IMPORTED UNKNOWN) +if(WIN32) + # On Windows we have to declare the library as SHARED to correctly + # communicate the location of the dll and impllib files. + add_library(cplex::cplex IMPORTED SHARED) +else() + # On Linux, the CPLEX installer sometimes does not provide dynamic + # libraries. If they are absent, we fall back to static ones further down, + # hence we mark the type unknown here. + add_library(cplex::cplex IMPORTED UNKNOWN) +endif() set_target_properties(cplex::cplex PROPERTIES IMPORTED_CONFIGURATIONS "${IMPORTED_CONFIGURATIONS}" ) From ca1e60dd52c639e78d6defc098a780e4d380bff3 Mon Sep 17 00:00:00 2001 From: ClemensBuechner Date: Thu, 19 Oct 2023 11:32:05 +0200 Subject: [PATCH 4/8] [issue1126] Use std::unordered_set to store landmark achievers. Search time is significantly lower in most tested configurations. Improvements range up to 30%. --- src/search/landmarks/landmark.h | 6 +++--- .../landmarks/landmark_cost_partitioning_algorithms.cc | 10 +++++----- .../landmarks/landmark_cost_partitioning_algorithms.h | 4 ++-- src/search/landmarks/landmark_sum_heuristic.cc | 2 +- src/search/landmarks/landmark_sum_heuristic.h | 3 ++- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/search/landmarks/landmark.h b/src/search/landmarks/landmark.h index bca802454..7473f6167 100644 --- a/src/search/landmarks/landmark.h +++ b/src/search/landmarks/landmark.h @@ -3,7 +3,7 @@ #include "../task_proxy.h" -#include +#include namespace landmarks { class Landmark { @@ -31,8 +31,8 @@ class Landmark { bool is_true_in_goal; bool is_derived; - std::set first_achievers; - std::set possible_achievers; + std::unordered_set first_achievers; + std::unordered_set possible_achievers; bool is_true_in_state(const State &state) const; }; diff --git a/src/search/landmarks/landmark_cost_partitioning_algorithms.cc b/src/search/landmarks/landmark_cost_partitioning_algorithms.cc index 807346007..b669b7e78 100644 --- a/src/search/landmarks/landmark_cost_partitioning_algorithms.cc +++ b/src/search/landmarks/landmark_cost_partitioning_algorithms.cc @@ -22,7 +22,7 @@ CostPartitioningAlgorithm::CostPartitioningAlgorithm( : lm_graph(graph), operator_costs(operator_costs) { } -const set &CostPartitioningAlgorithm::get_achievers( +const unordered_set &CostPartitioningAlgorithm::get_achievers( const Landmark &landmark, bool past) const { // Return relevant achievers of the landmark according to its status. if (past) { @@ -60,7 +60,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( for (auto &node : nodes) { int id = node->get_id(); if (future.test(id)) { - const set &achievers = + const unordered_set &achievers = get_achievers(node->get_landmark(), past.test(id)); if (achievers.empty()) return numeric_limits::max(); @@ -93,7 +93,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( for (auto &node : nodes) { int id = node->get_id(); if (future.test(id)) { - const set &achievers = + const unordered_set &achievers = get_achievers(node->get_landmark(), past.test(id)); bool covered_by_action_lm = false; for (int op_id : achievers) { @@ -120,7 +120,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( // TODO: Iterate over Landmarks instead of LandmarkNodes int id = node->get_id(); assert(future.test(id)); - const set &achievers = + const unordered_set &achievers = get_achievers(node->get_landmark(), past.test(id)); double min_cost = numeric_limits::max(); for (int op_id : achievers) { @@ -216,7 +216,7 @@ double OptimalCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( for (int lm_id = 0; lm_id < num_cols; ++lm_id) { const Landmark &landmark = lm_graph.get_node(lm_id)->get_landmark(); if (future.test(lm_id)) { - const set &achievers = + const unordered_set &achievers = get_achievers(landmark, past.test(lm_id)); if (achievers.empty()) return numeric_limits::max(); diff --git a/src/search/landmarks/landmark_cost_partitioning_algorithms.h b/src/search/landmarks/landmark_cost_partitioning_algorithms.h index 833ead053..a4a6cdf80 100644 --- a/src/search/landmarks/landmark_cost_partitioning_algorithms.h +++ b/src/search/landmarks/landmark_cost_partitioning_algorithms.h @@ -5,7 +5,7 @@ #include "../lp/lp_solver.h" -#include +#include #include class OperatorsProxy; @@ -21,7 +21,7 @@ class CostPartitioningAlgorithm { const LandmarkGraph &lm_graph; const std::vector operator_costs; - const std::set &get_achievers( + const std::unordered_set &get_achievers( const Landmark &landmark, bool past) const; public: CostPartitioningAlgorithm(const std::vector &operator_costs, diff --git a/src/search/landmarks/landmark_sum_heuristic.cc b/src/search/landmarks/landmark_sum_heuristic.cc index 07cfc0068..ac687a56b 100644 --- a/src/search/landmarks/landmark_sum_heuristic.cc +++ b/src/search/landmarks/landmark_sum_heuristic.cc @@ -44,7 +44,7 @@ LandmarkSumHeuristic::LandmarkSumHeuristic(const plugins::Options &opts) } int LandmarkSumHeuristic::get_min_cost_of_achievers( - const set &achievers) const { + const unordered_set &achievers) const { int min_cost = numeric_limits::max(); for (int id : achievers) { OperatorProxy op = get_operator_or_axiom(task_proxy, id); diff --git a/src/search/landmarks/landmark_sum_heuristic.h b/src/search/landmarks/landmark_sum_heuristic.h index 6a9a21ab0..c47af5b09 100644 --- a/src/search/landmarks/landmark_sum_heuristic.h +++ b/src/search/landmarks/landmark_sum_heuristic.h @@ -10,7 +10,8 @@ class LandmarkSumHeuristic : public LandmarkHeuristic { std::vector min_first_achiever_costs; std::vector min_possible_achiever_costs; - int get_min_cost_of_achievers(const std::set &achievers) const; + int get_min_cost_of_achievers( + const std::unordered_set &achievers) const; void compute_landmark_costs(); int get_heuristic_value(const State &ancestor_state) override; From c8b0792107556351b365a4005652bcfd193fe3c5 Mon Sep 17 00:00:00 2001 From: Jendrik Seipp Date: Thu, 19 Oct 2023 16:51:48 +0200 Subject: [PATCH 5/8] [trivial] Fix recommended M&S config. --- src/search/merge_and_shrink/merge_and_shrink_heuristic.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc b/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc index 9760bb02a..c35f251c9 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc +++ b/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc @@ -210,8 +210,8 @@ class MergeAndShrinkHeuristicFeature : public plugins::TypedFeature Date: Wed, 8 Nov 2023 21:25:52 +0100 Subject: [PATCH 6/8] [trivial] fix documentation of f-preserving shrink strategy --- src/search/merge_and_shrink/shrink_fh.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/search/merge_and_shrink/shrink_fh.cc b/src/search/merge_and_shrink/shrink_fh.cc index 2cec4d4db..6a334cfd5 100644 --- a/src/search/merge_and_shrink/shrink_fh.cc +++ b/src/search/merge_and_shrink/shrink_fh.cc @@ -218,7 +218,7 @@ class ShrinkFHFeature : public plugins::TypedFeature { document_note( "Note", "The strategy first partitions all states according to their " - "combination of f- and g-values. These partitions are then sorted, " + "combination of f- and h-values. These partitions are then sorted, " "first according to their f-value, then according to their h-value " "(increasing or decreasing, depending on the chosen options). " "States sorted last are shrinked together until reaching max_states."); @@ -229,8 +229,8 @@ class ShrinkFHFeature : public plugins::TypedFeature { "is a numerical parameter for which sensible values include 1000, " "10000, 50000, 100000 and 200000) and the linear merge startegy " "cg_goal_level to obtain the variant 'f-preserving shrinking of " - "transition systems', called called HHH in the IJCAI 2011 paper, see " - "bisimulation based shrink strategy. " + "transition systems', called HHH in the IJCAI 2011 paper. Also " + "see bisimulation based shrink strategy. " "When we last ran experiments on interaction of shrink strategies " "with label reduction, this strategy performed best when used with " "label reduction before merging (and no label reduction before " From 47cc432c745f137bd678351eeb4cef29c533ed41 Mon Sep 17 00:00:00 2001 From: Florian Pommerening Date: Tue, 21 Nov 2023 18:23:44 +0100 Subject: [PATCH 7/8] [issue1106] Support string options in parser. We now support string arguments in double quotes. Strings may use escape symbols '\"', '\\', and '\n' for double quotes, backslashes and newlines. --- src/search/parser/abstract_syntax_tree.cc | 4 ++ .../parser/decorated_abstract_syntax_tree.cc | 52 +++++++++++++++++ .../parser/decorated_abstract_syntax_tree.h | 14 +++++ src/search/parser/lexical_analyzer.cc | 56 ++++++++++++------- src/search/parser/syntax_analyzer.cc | 33 ++++++----- src/search/parser/token_stream.cc | 22 ++++++-- src/search/parser/token_stream.h | 7 ++- src/search/plugins/types.cc | 1 + 8 files changed, 145 insertions(+), 44 deletions(-) diff --git a/src/search/parser/abstract_syntax_tree.cc b/src/search/parser/abstract_syntax_tree.cc index 280174b64..49c1635a9 100644 --- a/src/search/parser/abstract_syntax_tree.cc +++ b/src/search/parser/abstract_syntax_tree.cc @@ -420,6 +420,8 @@ DecoratedASTNodePtr LiteralNode::decorate(DecorateContext &context) const { switch (value.type) { case TokenType::BOOLEAN: return utils::make_unique_ptr(value.content); + case TokenType::STRING: + return utils::make_unique_ptr(value.content); case TokenType::INTEGER: return utils::make_unique_ptr(value.content); case TokenType::FLOAT: @@ -441,6 +443,8 @@ const plugins::Type &LiteralNode::get_type(DecorateContext &context) const { switch (value.type) { case TokenType::BOOLEAN: return plugins::TypeRegistry::instance()->get_type(); + case TokenType::STRING: + return plugins::TypeRegistry::instance()->get_type(); case TokenType::INTEGER: return plugins::TypeRegistry::instance()->get_type(); case TokenType::FLOAT: diff --git a/src/search/parser/decorated_abstract_syntax_tree.cc b/src/search/parser/decorated_abstract_syntax_tree.cc index 3a401d9e7..c1f629438 100644 --- a/src/search/parser/decorated_abstract_syntax_tree.cc +++ b/src/search/parser/decorated_abstract_syntax_tree.cc @@ -218,6 +218,46 @@ void BoolLiteralNode::dump(string indent) const { cout << indent << "BOOL: " << value << endl; } +StringLiteralNode::StringLiteralNode(const string &value) + : value(value) { +} + +plugins::Any StringLiteralNode::construct(ConstructContext &context) const { + utils::TraceBlock block(context, "Constructing string value from '" + value + "'"); + if (!(value.starts_with('"') && value.ends_with('"'))) { + ABORT("String literal value is not enclosed in quotation marks" + " (this should have been caught before constructing this node)."); + } + /* + We are not doing any further syntax checking. Escaped symbols other than + \n will just ignore the escaping \ (e.g., \t is treated as t, not as a + tab). Strings ending in \ will not produce an error but should be excluded + by the previous steps. + */ + string result; + result.reserve(value.length() - 2); + bool escaped = false; + for (char c : value.substr(1, value.size() - 2)) { + if (escaped) { + escaped = false; + if (c == 'n') { + result += '\n'; + } else { + result += c; + } + } else if (c == '\\') { + escaped = true; + } else { + result += c; + } + } + return result; +} + +void StringLiteralNode::dump(string indent) const { + cout << indent << "STRING: " << value << endl; +} + IntLiteralNode::IntLiteralNode(const string &value) : value(value) { } @@ -473,6 +513,18 @@ shared_ptr BoolLiteralNode::clone_shared() const { return make_shared(*this); } +StringLiteralNode::StringLiteralNode(const StringLiteralNode &other) + : value(other.value) { +} + +unique_ptr StringLiteralNode::clone() const { + return utils::make_unique_ptr(*this); +} + +shared_ptr StringLiteralNode::clone_shared() const { + return make_shared(*this); +} + IntLiteralNode::IntLiteralNode(const IntLiteralNode &other) : value(other.value) { } diff --git a/src/search/parser/decorated_abstract_syntax_tree.h b/src/search/parser/decorated_abstract_syntax_tree.h index 0094f8874..105f77bf1 100644 --- a/src/search/parser/decorated_abstract_syntax_tree.h +++ b/src/search/parser/decorated_abstract_syntax_tree.h @@ -157,6 +157,20 @@ class BoolLiteralNode : public DecoratedASTNode { BoolLiteralNode(const BoolLiteralNode &other); }; +class StringLiteralNode : public DecoratedASTNode { + std::string value; +public: + StringLiteralNode(const std::string &value); + + plugins::Any construct(ConstructContext &context) const override; + void dump(std::string indent) const override; + + // TODO: once we get rid of lazy construction, this should no longer be necessary. + virtual std::unique_ptr clone() const override; + virtual std::shared_ptr clone_shared() const override; + StringLiteralNode(const StringLiteralNode &other); +}; + class IntLiteralNode : public DecoratedASTNode { std::string value; public: diff --git a/src/search/parser/lexical_analyzer.cc b/src/search/parser/lexical_analyzer.cc index 9a3736080..9812fd3e7 100644 --- a/src/search/parser/lexical_analyzer.cc +++ b/src/search/parser/lexical_analyzer.cc @@ -24,12 +24,22 @@ static vector> construct_token_type_expressions() { {TokenType::CLOSING_BRACKET, R"(\])"}, {TokenType::COMMA, R"(,)"}, {TokenType::EQUALS, R"(=)"}, + {TokenType::LET, R"(let)"}, + {TokenType::BOOLEAN, R"(true|false)"}, + {TokenType::STRING, R"("(\\\\|\\"|\\n|[^"\\])*")"}, + /* + Floats have to be parsed before integers, so tokens like '1.2' are + parsed as one float token rather than an integer token '1' followed + by a float token '.2'. + */ {TokenType::FLOAT, R"([+-]?(((\d*\.\d+|\d+\.)(e[+-]?\d+|[kmg]\b)?)|\d+e[+-]?\d+))"}, - {TokenType::INTEGER, - R"([+-]?(infinity|\d+([kmg]\b)?))"}, - {TokenType::BOOLEAN, R"(true|false)"}, - {TokenType::LET, R"(let)"}, + {TokenType::INTEGER, R"([+-]?(infinity|\d+([kmg]\b)?))"}, + /* + Identifiers have to be parsed last to prevent reserved words ( + 'infinity', 'true', 'false', and 'let') from being recognized as + identifiers. + */ {TokenType::IDENTIFIER, R"([a-zA-Z_]\w*)"} }; vector> token_type_expression; @@ -42,6 +52,24 @@ static vector> construct_token_type_expressions() { static const vector> token_type_expressions = construct_token_type_expressions(); +static string highlight_position(const string &text, string::const_iterator pos) { + ostringstream message_stream; + int distance_to_highlight = pos - text.begin(); + for (const string &line : utils::split(text, "\n")) { + int line_length = line.size(); + bool highlight_in_line = + distance_to_highlight < line_length && distance_to_highlight >= 0; + message_stream << (highlight_in_line ? "> " : " ") << line << endl; + if (highlight_in_line) { + message_stream << string(distance_to_highlight + 2, ' ') << "^" + << endl; + } + distance_to_highlight -= line.size() + 1; + } + string message = message_stream.str(); + utils::rstrip(message); + return message; +} TokenStream split_tokens(const string &text) { utils::Context context; @@ -59,29 +87,15 @@ TokenStream split_tokens(const string &text) { TokenType token_type = type_and_expression.first; const regex &expression = type_and_expression.second; if (regex_search(start, end, match, expression, regex_constants::match_continuous)) { - tokens.push_back({utils::tolower(match[1]), token_type}); + tokens.push_back({match[1], token_type}); start += match[0].length(); has_match = true; break; } } if (!has_match) { - ostringstream error; - error << "Unable to recognize next token:" << endl; - int distance_to_error = start - text.begin(); - for (const string &line : utils::split(text, "\n")) { - int line_length = line.size(); - bool error_in_line = - distance_to_error < line_length && distance_to_error >= 0; - error << (error_in_line ? "> " : " ") << line << endl; - if (error_in_line) - error << string(distance_to_error + 2, ' ') << "^" << endl; - - distance_to_error -= line.size() + 1; - } - string message = error.str(); - utils::rstrip(message); - context.error(message); + context.error("Unable to recognize next token:\n" + + highlight_position(text, start)); } } return TokenStream(move(tokens)); diff --git a/src/search/parser/syntax_analyzer.cc b/src/search/parser/syntax_analyzer.cc index ffcafbfa4..789dbab4c 100644 --- a/src/search/parser/syntax_analyzer.cc +++ b/src/search/parser/syntax_analyzer.cc @@ -159,9 +159,10 @@ static ASTNodePtr parse_function(TokenStream &tokens, } static unordered_set literal_tokens { - TokenType::FLOAT, - TokenType::INTEGER, TokenType::BOOLEAN, + TokenType::STRING, + TokenType::INTEGER, + TokenType::FLOAT, TokenType::IDENTIFIER }; @@ -191,27 +192,35 @@ static ASTNodePtr parse_list(TokenStream &tokens, SyntaxAnalyzerContext &context return utils::make_unique_ptr(move(elements)); } -static vector PARSE_NODE_TOKEN_TYPES = { - TokenType::LET, TokenType::IDENTIFIER, TokenType::BOOLEAN, - TokenType::INTEGER, TokenType::FLOAT, TokenType::OPENING_BRACKET}; +static vector parse_node_token_types = { + TokenType::OPENING_BRACKET, TokenType::LET, TokenType::BOOLEAN, + TokenType::STRING, TokenType::INTEGER, TokenType::FLOAT, + TokenType::IDENTIFIER}; static ASTNodePtr parse_node(TokenStream &tokens, SyntaxAnalyzerContext &context) { utils::TraceBlock block(context, "Identify node type"); Token token = tokens.peek(context); - if (find(PARSE_NODE_TOKEN_TYPES.begin(), - PARSE_NODE_TOKEN_TYPES.end(), - token.type) == PARSE_NODE_TOKEN_TYPES.end()) { + if (find(parse_node_token_types.begin(), + parse_node_token_types.end(), + token.type) == parse_node_token_types.end()) { ostringstream message; message << "Unexpected token '" << token << "'. Expected any of the following token types: " - << utils::join(PARSE_NODE_TOKEN_TYPES, ", "); + << utils::join(parse_node_token_types, ", "); context.error(message.str()); } switch (token.type) { + case TokenType::OPENING_BRACKET: + return parse_list(tokens, context); case TokenType::LET: return parse_let(tokens, context); + case TokenType::BOOLEAN: + case TokenType::STRING: + case TokenType::INTEGER: + case TokenType::FLOAT: + return parse_literal(tokens, context); case TokenType::IDENTIFIER: if (tokens.has_tokens(2) && tokens.peek(context, 1).type == TokenType::OPENING_PARENTHESIS) { @@ -219,12 +228,6 @@ static ASTNodePtr parse_node(TokenStream &tokens, } else { return parse_literal(tokens, context); } - case TokenType::BOOLEAN: - case TokenType::INTEGER: - case TokenType::FLOAT: - return parse_literal(tokens, context); - case TokenType::OPENING_BRACKET: - return parse_list(tokens, context); default: ABORT("Unknown token type '" + token_type_name(token.type) + "'."); } diff --git a/src/search/parser/token_stream.cc b/src/search/parser/token_stream.cc index c6d689cf1..b2147f5a9 100644 --- a/src/search/parser/token_stream.cc +++ b/src/search/parser/token_stream.cc @@ -11,8 +11,18 @@ using namespace std; namespace parser { +static string case_insensitive_to_lower(const string &content, TokenType type) { + if (type == TokenType::BOOLEAN || + type == TokenType::INTEGER || + type == TokenType::FLOAT || + type == TokenType::IDENTIFIER) { + return utils::tolower(content); + } else { + return content; + } +} Token::Token(const string &content, TokenType type) - : content(content), type(type) { + : content(case_insensitive_to_lower(content, type)), type(type) { } TokenStream::TokenStream(vector &&tokens) @@ -90,16 +100,18 @@ string token_type_name(TokenType token_type) { return ","; case TokenType::EQUALS: return "="; + case TokenType::LET: + return "Let"; + case TokenType::BOOLEAN: + return "Boolean"; + case TokenType::STRING: + return "String"; case TokenType::INTEGER: return "Integer"; case TokenType::FLOAT: return "Float"; - case TokenType::BOOLEAN: - return "Boolean"; case TokenType::IDENTIFIER: return "Identifier"; - case TokenType::LET: - return "Let"; default: ABORT("Unknown token type."); } diff --git a/src/search/parser/token_stream.h b/src/search/parser/token_stream.h index 832cdafc7..71dd831f5 100644 --- a/src/search/parser/token_stream.h +++ b/src/search/parser/token_stream.h @@ -16,11 +16,12 @@ enum class TokenType { CLOSING_BRACKET, COMMA, EQUALS, + LET, + BOOLEAN, + STRING, INTEGER, FLOAT, - BOOLEAN, - IDENTIFIER, - LET + IDENTIFIER }; struct Token { diff --git a/src/search/plugins/types.cc b/src/search/plugins/types.cc index 117c139b5..c1ca2c372 100644 --- a/src/search/plugins/types.cc +++ b/src/search/plugins/types.cc @@ -292,6 +292,7 @@ BasicType TypeRegistry::NO_TYPE = BasicType(typeid(void), ""); TypeRegistry::TypeRegistry() { insert_basic_type(); + insert_basic_type(); insert_basic_type(); insert_basic_type(); } From f3ce70a00e2dd0b7d6640e814e15ef45efabcbf8 Mon Sep 17 00:00:00 2001 From: SimonDold <48084373+SimonDold@users.noreply.github.com> Date: Mon, 27 Nov 2023 09:33:22 +0100 Subject: [PATCH 8/8] [trivial] fix timer precision. (#198) set precision of the timer to 6 digits. --- src/search/utils/timer.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/search/utils/timer.cc b/src/search/utils/timer.cc index 326777038..83d0ba681 100644 --- a/src/search/utils/timer.cc +++ b/src/search/utils/timer.cc @@ -1,6 +1,7 @@ #include "timer.h" #include +#include #include #if OPERATING_SYSTEM == LINUX || OPERATING_SYSTEM == OSX @@ -110,7 +111,7 @@ Duration Timer::reset() { } ostream &operator<<(ostream &os, const Timer &timer) { - os << timer(); + os << fixed << setprecision(6) << timer(); return os; }