diff --git a/build.py b/build.py index 7437a9707..c85f776cf 100755 --- a/build.py +++ b/build.py @@ -89,7 +89,7 @@ def try_run(cmd): except OSError as exc: if exc.errno == errno.ENOENT: print(f"Could not find '{cmd[0]}' on your PATH. For installation instructions, " - "see https://www.fast-downward.org/ObtainingAndRunningFastDownward.") + "see BUILD.md in the project root directory.") sys.exit(1) else: raise diff --git a/misc/style/check-include-guard-convention.py b/misc/style/check-include-guard-convention.py index 88ac5682c..046b00578 100755 --- a/misc/style/check-include-guard-convention.py +++ b/misc/style/check-include-guard-convention.py @@ -18,6 +18,8 @@ def check_header_files(component): errors = [] for filename in header_files: assert filename.endswith(".h"), filename + if "/ext/" in filename: + continue rel_filename = os.path.relpath(filename, start=component_dir) guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper() expected = "#ifndef " + guard diff --git a/src/search/cmake/FindCplex.cmake b/src/search/cmake/FindCplex.cmake index 58a6139f0..606bba2a4 100644 --- a/src/search/cmake/FindCplex.cmake +++ b/src/search/cmake/FindCplex.cmake @@ -12,7 +12,16 @@ set(IMPORTED_CONFIGURATIONS "Debug" "Release") set(HINT_PATHS ${cplex_DIR} $ENV{cplex_DIR}) -add_library(cplex::cplex IMPORTED SHARED) +if(WIN32) + # On Windows we have to declare the library as SHARED to correctly + # communicate the location of the dll and impllib files. + add_library(cplex::cplex IMPORTED SHARED) +else() + # On Linux, the CPLEX installer sometimes does not provide dynamic + # libraries. If they are absent, we fall back to static ones further down, + # hence we mark the type unknown here. + add_library(cplex::cplex IMPORTED UNKNOWN) +endif() set_target_properties(cplex::cplex PROPERTIES IMPORTED_CONFIGURATIONS "${IMPORTED_CONFIGURATIONS}" ) @@ -157,18 +166,20 @@ foreach(CONFIG_ORIG ${IMPORTED_CONFIGURATIONS}) list(APPEND REQUIRED_LIBRARIES CPLEX_SHARED_LIBRARY_${CONFIG} CPLEX_IMPLIB_${CONFIG}) else() # CPLEX stores .so files in /bin - find_library(CPLEX_SHARED_LIBRARY_${CONFIG} + find_library(CPLEX_LIBRARY_${CONFIG} NAMES cplex${CPLEX_VERSION_NO_DOTS} + cplex HINTS ${HINT_PATHS}/bin + ${HINT_PATHS}/lib PATH_SUFFIXES ${SUFFIXES_${CONFIG}} ) set_target_properties(cplex::cplex PROPERTIES - IMPORTED_LOCATION_${CONFIG} ${CPLEX_SHARED_LIBRARY_${CONFIG}} + IMPORTED_LOCATION_${CONFIG} ${CPLEX_LIBRARY_${CONFIG}} ) - list(APPEND REQUIRED_LIBRARIES CPLEX_SHARED_LIBRARY_${CONFIG}) + list(APPEND REQUIRED_LIBRARIES CPLEX_LIBRARY_${CONFIG}) endif() endforeach() @@ -186,5 +197,5 @@ mark_as_advanced( CPLEX_VERSION_SUBMINOR CPLEX_VERSION_NO_DOTS BITWIDTH_HINTS PLATFORM_HINTS LIBRARY_TYPE_HINTS_RELEASE LIBRARY_TYPE_HINTS_DEBUG SUFFIXES_RELEASE SUFFIXES_DEBUG FIND_OPTIONS COMPILER_HINTS COMPILER_HINT CPLEX_IMPLIB_DEBUG - CPLEX_IMPLIB_RELEASE CPLEX_SHARED_LIBRARY_DEBUG CPLEX_SHARED_LIBRARY_RELEASE + CPLEX_IMPLIB_RELEASE CPLEX_LIBRARY_DEBUG CPLEX_LIBRARY_RELEASE ) diff --git a/src/search/landmarks/landmark.h b/src/search/landmarks/landmark.h index 36eac5d78..7473f6167 100644 --- a/src/search/landmarks/landmark.h +++ b/src/search/landmarks/landmark.h @@ -3,11 +3,9 @@ #include "../task_proxy.h" -#include "../utils/hash.h" +#include namespace landmarks { -using Achievers = utils::HashSet; - class Landmark { public: Landmark(std::vector _facts, bool disjunctive, bool conjunctive, @@ -33,8 +31,8 @@ class Landmark { bool is_true_in_goal; bool is_derived; - Achievers first_achievers; - Achievers possible_achievers; + std::unordered_set first_achievers; + std::unordered_set possible_achievers; bool is_true_in_state(const State &state) const; }; diff --git a/src/search/landmarks/landmark_cost_partitioning_algorithms.cc b/src/search/landmarks/landmark_cost_partitioning_algorithms.cc index 6909aff9e..18660cd42 100644 --- a/src/search/landmarks/landmark_cost_partitioning_algorithms.cc +++ b/src/search/landmarks/landmark_cost_partitioning_algorithms.cc @@ -29,7 +29,7 @@ CostPartitioningAlgorithm::CostPartitioningAlgorithm( : lm_graph(graph), operator_costs(operator_costs) { } -const Achievers &CostPartitioningAlgorithm::get_achievers( +const unordered_set &CostPartitioningAlgorithm::get_achievers( const Landmark &landmark, bool past) const { // Return relevant achievers of the landmark according to its status. if (past) { @@ -142,7 +142,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( for (auto &node : nodes) { int id = node->get_id(); if (future.test(id)) { - const Achievers &achievers = + const unordered_set &achievers = get_achievers(node->get_landmark(), past.test(id)); if (achievers.empty()) return numeric_limits::max(); @@ -175,7 +175,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( for (auto &node : nodes) { int id = node->get_id(); if (future.test(id)) { - const Achievers &achievers = + const unordered_set &achievers = get_achievers(node->get_landmark(), past.test(id)); bool covered_by_action_lm = false; for (int op_id : achievers) { @@ -207,7 +207,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( // TODO: Iterate over Landmarks instead of LandmarkNodes int id = node->get_id(); assert(future.test(id)); - const Achievers &achievers = get_achievers(node->get_landmark(), past.test(id)); + const unordered_set &achievers = get_achievers(node->get_landmark(), past.test(id)); achievers_by_lm.emplace_back(achievers.begin(), achievers.end()); } for (int lm_id : compute_landmark_order(achievers_by_lm)) { @@ -241,7 +241,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( for (const LandmarkNode *node : relevant_lms) { int id = node->get_id(); assert(future.test(id)); - const Achievers &achievers = get_achievers(node->get_landmark(), past.test(id)); + const unordered_set &achievers = get_achievers(node->get_landmark(), past.test(id)); double min_cost = numeric_limits::max(); for (int op_id : achievers) { assert(utils::in_bounds(op_id, achieved_lms_by_op)); @@ -265,7 +265,7 @@ LandmarkCanonicalHeuristic::LandmarkCanonicalHeuristic( : CostPartitioningAlgorithm(operator_costs, graph) { } -static bool empty_intersection(const Achievers &x, const Achievers &y) { +static bool empty_intersection(const unordered_set &x, const unordered_set &y) { for (int a : x) { if (y.find(a) != y.end()) { return false; @@ -286,11 +286,11 @@ vector> LandmarkCanonicalHeuristic::compute_max_additive_subsets( for (int i = 0; i < num_landmarks; ++i) { const LandmarkNode *lm1 = relevant_landmarks[i]; int id1 = lm1->get_id(); - const Achievers &achievers1 = get_achievers(lm1->get_landmark(), past_landmarks.test(id1)); + const unordered_set &achievers1 = get_achievers(lm1->get_landmark(), past_landmarks.test(id1)); for (int j = i + 1; j < num_landmarks; ++j) { const LandmarkNode *lm2 = relevant_landmarks[j]; int id2 = lm2->get_id(); - const Achievers &achievers2 = get_achievers(lm2->get_landmark(), past_landmarks.test(id2)); + const unordered_set &achievers2 = get_achievers(lm2->get_landmark(), past_landmarks.test(id2)); if (empty_intersection(achievers1, achievers2)) { /* If the two landmarks are additive, there is an edge in the compatibility graph. */ @@ -307,7 +307,7 @@ vector> LandmarkCanonicalHeuristic::compute_max_additive_subsets( int LandmarkCanonicalHeuristic::compute_minimum_landmark_cost( const LandmarkNode &lm_node, bool past) const { - const Achievers &achievers = get_achievers(lm_node.get_landmark(), past); + const unordered_set &achievers = get_achievers(lm_node.get_landmark(), past); assert(!achievers.empty()); int min_cost = numeric_limits::max(); for (int op_id : achievers) { @@ -406,7 +406,7 @@ double LandmarkPhO::compute_landmark_cost(const LandmarkNode &lm, bool past) con /* Note that there are landmarks without achievers. Example: not-served(p) in miconic:s1-0.pddl. The fact is true in the initial state, and no operator achieves it. For such facts, the (infimum) cost is infinity. */ - const Achievers &achievers = get_achievers(lm.get_landmark(), past); + const unordered_set &achievers = get_achievers(lm.get_landmark(), past); double min_cost = lp_solver.get_infinity(); for (int op_id : achievers) { assert(utils::in_bounds(op_id, operator_costs)); @@ -447,7 +447,7 @@ double LandmarkPhO::get_cost_partitioned_heuristic_value( for (int lm_id = 0; lm_id < num_cols; ++lm_id) { const LandmarkNode *lm = lm_graph.get_node(lm_id); if (future.test(lm_id)) { - const Achievers &achievers = get_achievers(lm->get_landmark(), past.test(lm_id)); + const unordered_set &achievers = get_achievers(lm->get_landmark(), past.test(lm_id)); assert(!achievers.empty()); for (int op_id : achievers) { assert(utils::in_bounds(op_id, lp_constraints)); @@ -555,7 +555,7 @@ double OptimalCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value( for (int lm_id = 0; lm_id < num_cols; ++lm_id) { const Landmark &landmark = lm_graph.get_node(lm_id)->get_landmark(); if (future.test(lm_id)) { - const Achievers &achievers = + const unordered_set &achievers = get_achievers(landmark, past.test(lm_id)); if (achievers.empty()) return numeric_limits::max(); diff --git a/src/search/landmarks/landmark_cost_partitioning_algorithms.h b/src/search/landmarks/landmark_cost_partitioning_algorithms.h index ed77729d1..1bff57a8f 100644 --- a/src/search/landmarks/landmark_cost_partitioning_algorithms.h +++ b/src/search/landmarks/landmark_cost_partitioning_algorithms.h @@ -7,6 +7,7 @@ #include "../lp/lp_solver.h" +#include #include class ConstBitsetView; @@ -31,7 +32,7 @@ class CostPartitioningAlgorithm { const LandmarkGraph &lm_graph; const std::vector operator_costs; - const Achievers &get_achievers( + const std::unordered_set &get_achievers( const Landmark &landmark, bool past) const; public: CostPartitioningAlgorithm(const std::vector &operator_costs, diff --git a/src/search/landmarks/landmark_sum_heuristic.cc b/src/search/landmarks/landmark_sum_heuristic.cc index 18958f64d..ac687a56b 100644 --- a/src/search/landmarks/landmark_sum_heuristic.cc +++ b/src/search/landmarks/landmark_sum_heuristic.cc @@ -1,5 +1,6 @@ #include "landmark_sum_heuristic.h" +#include "landmark.h" #include "landmark_factory.h" #include "landmark_status_manager.h" #include "util.h" @@ -43,7 +44,7 @@ LandmarkSumHeuristic::LandmarkSumHeuristic(const plugins::Options &opts) } int LandmarkSumHeuristic::get_min_cost_of_achievers( - Achievers &achievers) const { + const unordered_set &achievers) const { int min_cost = numeric_limits::max(); for (int id : achievers) { OperatorProxy op = get_operator_or_axiom(task_proxy, id); diff --git a/src/search/landmarks/landmark_sum_heuristic.h b/src/search/landmarks/landmark_sum_heuristic.h index 0c1c4ba19..c47af5b09 100644 --- a/src/search/landmarks/landmark_sum_heuristic.h +++ b/src/search/landmarks/landmark_sum_heuristic.h @@ -1,7 +1,6 @@ #ifndef LANDMARKS_LANDMARK_SUM_HEURISTIC_H #define LANDMARKS_LANDMARK_SUM_HEURISTIC_H -#include "landmark.h" #include "landmark_heuristic.h" namespace landmarks { @@ -11,7 +10,8 @@ class LandmarkSumHeuristic : public LandmarkHeuristic { std::vector min_first_achiever_costs; std::vector min_possible_achiever_costs; - int get_min_cost_of_achievers(Achievers &achievers) const; + int get_min_cost_of_achievers( + const std::unordered_set &achievers) const; void compute_landmark_costs(); int get_heuristic_value(const State &ancestor_state) override; diff --git a/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc b/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc index 9760bb02a..c35f251c9 100644 --- a/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc +++ b/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc @@ -210,8 +210,8 @@ class MergeAndShrinkHeuristicFeature : public plugins::TypedFeature { document_note( "Note", "The strategy first partitions all states according to their " - "combination of f- and g-values. These partitions are then sorted, " + "combination of f- and h-values. These partitions are then sorted, " "first according to their f-value, then according to their h-value " "(increasing or decreasing, depending on the chosen options). " "States sorted last are shrinked together until reaching max_states."); @@ -229,8 +229,8 @@ class ShrinkFHFeature : public plugins::TypedFeature { "is a numerical parameter for which sensible values include 1000, " "10000, 50000, 100000 and 200000) and the linear merge startegy " "cg_goal_level to obtain the variant 'f-preserving shrinking of " - "transition systems', called called HHH in the IJCAI 2011 paper, see " - "bisimulation based shrink strategy. " + "transition systems', called HHH in the IJCAI 2011 paper. Also " + "see bisimulation based shrink strategy. " "When we last ran experiments on interaction of shrink strategies " "with label reduction, this strategy performed best when used with " "label reduction before merging (and no label reduction before " diff --git a/src/search/parser/abstract_syntax_tree.cc b/src/search/parser/abstract_syntax_tree.cc index 280174b64..49c1635a9 100644 --- a/src/search/parser/abstract_syntax_tree.cc +++ b/src/search/parser/abstract_syntax_tree.cc @@ -420,6 +420,8 @@ DecoratedASTNodePtr LiteralNode::decorate(DecorateContext &context) const { switch (value.type) { case TokenType::BOOLEAN: return utils::make_unique_ptr(value.content); + case TokenType::STRING: + return utils::make_unique_ptr(value.content); case TokenType::INTEGER: return utils::make_unique_ptr(value.content); case TokenType::FLOAT: @@ -441,6 +443,8 @@ const plugins::Type &LiteralNode::get_type(DecorateContext &context) const { switch (value.type) { case TokenType::BOOLEAN: return plugins::TypeRegistry::instance()->get_type(); + case TokenType::STRING: + return plugins::TypeRegistry::instance()->get_type(); case TokenType::INTEGER: return plugins::TypeRegistry::instance()->get_type(); case TokenType::FLOAT: diff --git a/src/search/parser/decorated_abstract_syntax_tree.cc b/src/search/parser/decorated_abstract_syntax_tree.cc index 3a401d9e7..c1f629438 100644 --- a/src/search/parser/decorated_abstract_syntax_tree.cc +++ b/src/search/parser/decorated_abstract_syntax_tree.cc @@ -218,6 +218,46 @@ void BoolLiteralNode::dump(string indent) const { cout << indent << "BOOL: " << value << endl; } +StringLiteralNode::StringLiteralNode(const string &value) + : value(value) { +} + +plugins::Any StringLiteralNode::construct(ConstructContext &context) const { + utils::TraceBlock block(context, "Constructing string value from '" + value + "'"); + if (!(value.starts_with('"') && value.ends_with('"'))) { + ABORT("String literal value is not enclosed in quotation marks" + " (this should have been caught before constructing this node)."); + } + /* + We are not doing any further syntax checking. Escaped symbols other than + \n will just ignore the escaping \ (e.g., \t is treated as t, not as a + tab). Strings ending in \ will not produce an error but should be excluded + by the previous steps. + */ + string result; + result.reserve(value.length() - 2); + bool escaped = false; + for (char c : value.substr(1, value.size() - 2)) { + if (escaped) { + escaped = false; + if (c == 'n') { + result += '\n'; + } else { + result += c; + } + } else if (c == '\\') { + escaped = true; + } else { + result += c; + } + } + return result; +} + +void StringLiteralNode::dump(string indent) const { + cout << indent << "STRING: " << value << endl; +} + IntLiteralNode::IntLiteralNode(const string &value) : value(value) { } @@ -473,6 +513,18 @@ shared_ptr BoolLiteralNode::clone_shared() const { return make_shared(*this); } +StringLiteralNode::StringLiteralNode(const StringLiteralNode &other) + : value(other.value) { +} + +unique_ptr StringLiteralNode::clone() const { + return utils::make_unique_ptr(*this); +} + +shared_ptr StringLiteralNode::clone_shared() const { + return make_shared(*this); +} + IntLiteralNode::IntLiteralNode(const IntLiteralNode &other) : value(other.value) { } diff --git a/src/search/parser/decorated_abstract_syntax_tree.h b/src/search/parser/decorated_abstract_syntax_tree.h index 0094f8874..105f77bf1 100644 --- a/src/search/parser/decorated_abstract_syntax_tree.h +++ b/src/search/parser/decorated_abstract_syntax_tree.h @@ -157,6 +157,20 @@ class BoolLiteralNode : public DecoratedASTNode { BoolLiteralNode(const BoolLiteralNode &other); }; +class StringLiteralNode : public DecoratedASTNode { + std::string value; +public: + StringLiteralNode(const std::string &value); + + plugins::Any construct(ConstructContext &context) const override; + void dump(std::string indent) const override; + + // TODO: once we get rid of lazy construction, this should no longer be necessary. + virtual std::unique_ptr clone() const override; + virtual std::shared_ptr clone_shared() const override; + StringLiteralNode(const StringLiteralNode &other); +}; + class IntLiteralNode : public DecoratedASTNode { std::string value; public: diff --git a/src/search/parser/lexical_analyzer.cc b/src/search/parser/lexical_analyzer.cc index 9a3736080..9812fd3e7 100644 --- a/src/search/parser/lexical_analyzer.cc +++ b/src/search/parser/lexical_analyzer.cc @@ -24,12 +24,22 @@ static vector> construct_token_type_expressions() { {TokenType::CLOSING_BRACKET, R"(\])"}, {TokenType::COMMA, R"(,)"}, {TokenType::EQUALS, R"(=)"}, + {TokenType::LET, R"(let)"}, + {TokenType::BOOLEAN, R"(true|false)"}, + {TokenType::STRING, R"("(\\\\|\\"|\\n|[^"\\])*")"}, + /* + Floats have to be parsed before integers, so tokens like '1.2' are + parsed as one float token rather than an integer token '1' followed + by a float token '.2'. + */ {TokenType::FLOAT, R"([+-]?(((\d*\.\d+|\d+\.)(e[+-]?\d+|[kmg]\b)?)|\d+e[+-]?\d+))"}, - {TokenType::INTEGER, - R"([+-]?(infinity|\d+([kmg]\b)?))"}, - {TokenType::BOOLEAN, R"(true|false)"}, - {TokenType::LET, R"(let)"}, + {TokenType::INTEGER, R"([+-]?(infinity|\d+([kmg]\b)?))"}, + /* + Identifiers have to be parsed last to prevent reserved words ( + 'infinity', 'true', 'false', and 'let') from being recognized as + identifiers. + */ {TokenType::IDENTIFIER, R"([a-zA-Z_]\w*)"} }; vector> token_type_expression; @@ -42,6 +52,24 @@ static vector> construct_token_type_expressions() { static const vector> token_type_expressions = construct_token_type_expressions(); +static string highlight_position(const string &text, string::const_iterator pos) { + ostringstream message_stream; + int distance_to_highlight = pos - text.begin(); + for (const string &line : utils::split(text, "\n")) { + int line_length = line.size(); + bool highlight_in_line = + distance_to_highlight < line_length && distance_to_highlight >= 0; + message_stream << (highlight_in_line ? "> " : " ") << line << endl; + if (highlight_in_line) { + message_stream << string(distance_to_highlight + 2, ' ') << "^" + << endl; + } + distance_to_highlight -= line.size() + 1; + } + string message = message_stream.str(); + utils::rstrip(message); + return message; +} TokenStream split_tokens(const string &text) { utils::Context context; @@ -59,29 +87,15 @@ TokenStream split_tokens(const string &text) { TokenType token_type = type_and_expression.first; const regex &expression = type_and_expression.second; if (regex_search(start, end, match, expression, regex_constants::match_continuous)) { - tokens.push_back({utils::tolower(match[1]), token_type}); + tokens.push_back({match[1], token_type}); start += match[0].length(); has_match = true; break; } } if (!has_match) { - ostringstream error; - error << "Unable to recognize next token:" << endl; - int distance_to_error = start - text.begin(); - for (const string &line : utils::split(text, "\n")) { - int line_length = line.size(); - bool error_in_line = - distance_to_error < line_length && distance_to_error >= 0; - error << (error_in_line ? "> " : " ") << line << endl; - if (error_in_line) - error << string(distance_to_error + 2, ' ') << "^" << endl; - - distance_to_error -= line.size() + 1; - } - string message = error.str(); - utils::rstrip(message); - context.error(message); + context.error("Unable to recognize next token:\n" + + highlight_position(text, start)); } } return TokenStream(move(tokens)); diff --git a/src/search/parser/syntax_analyzer.cc b/src/search/parser/syntax_analyzer.cc index ffcafbfa4..789dbab4c 100644 --- a/src/search/parser/syntax_analyzer.cc +++ b/src/search/parser/syntax_analyzer.cc @@ -159,9 +159,10 @@ static ASTNodePtr parse_function(TokenStream &tokens, } static unordered_set literal_tokens { - TokenType::FLOAT, - TokenType::INTEGER, TokenType::BOOLEAN, + TokenType::STRING, + TokenType::INTEGER, + TokenType::FLOAT, TokenType::IDENTIFIER }; @@ -191,27 +192,35 @@ static ASTNodePtr parse_list(TokenStream &tokens, SyntaxAnalyzerContext &context return utils::make_unique_ptr(move(elements)); } -static vector PARSE_NODE_TOKEN_TYPES = { - TokenType::LET, TokenType::IDENTIFIER, TokenType::BOOLEAN, - TokenType::INTEGER, TokenType::FLOAT, TokenType::OPENING_BRACKET}; +static vector parse_node_token_types = { + TokenType::OPENING_BRACKET, TokenType::LET, TokenType::BOOLEAN, + TokenType::STRING, TokenType::INTEGER, TokenType::FLOAT, + TokenType::IDENTIFIER}; static ASTNodePtr parse_node(TokenStream &tokens, SyntaxAnalyzerContext &context) { utils::TraceBlock block(context, "Identify node type"); Token token = tokens.peek(context); - if (find(PARSE_NODE_TOKEN_TYPES.begin(), - PARSE_NODE_TOKEN_TYPES.end(), - token.type) == PARSE_NODE_TOKEN_TYPES.end()) { + if (find(parse_node_token_types.begin(), + parse_node_token_types.end(), + token.type) == parse_node_token_types.end()) { ostringstream message; message << "Unexpected token '" << token << "'. Expected any of the following token types: " - << utils::join(PARSE_NODE_TOKEN_TYPES, ", "); + << utils::join(parse_node_token_types, ", "); context.error(message.str()); } switch (token.type) { + case TokenType::OPENING_BRACKET: + return parse_list(tokens, context); case TokenType::LET: return parse_let(tokens, context); + case TokenType::BOOLEAN: + case TokenType::STRING: + case TokenType::INTEGER: + case TokenType::FLOAT: + return parse_literal(tokens, context); case TokenType::IDENTIFIER: if (tokens.has_tokens(2) && tokens.peek(context, 1).type == TokenType::OPENING_PARENTHESIS) { @@ -219,12 +228,6 @@ static ASTNodePtr parse_node(TokenStream &tokens, } else { return parse_literal(tokens, context); } - case TokenType::BOOLEAN: - case TokenType::INTEGER: - case TokenType::FLOAT: - return parse_literal(tokens, context); - case TokenType::OPENING_BRACKET: - return parse_list(tokens, context); default: ABORT("Unknown token type '" + token_type_name(token.type) + "'."); } diff --git a/src/search/parser/token_stream.cc b/src/search/parser/token_stream.cc index c6d689cf1..b2147f5a9 100644 --- a/src/search/parser/token_stream.cc +++ b/src/search/parser/token_stream.cc @@ -11,8 +11,18 @@ using namespace std; namespace parser { +static string case_insensitive_to_lower(const string &content, TokenType type) { + if (type == TokenType::BOOLEAN || + type == TokenType::INTEGER || + type == TokenType::FLOAT || + type == TokenType::IDENTIFIER) { + return utils::tolower(content); + } else { + return content; + } +} Token::Token(const string &content, TokenType type) - : content(content), type(type) { + : content(case_insensitive_to_lower(content, type)), type(type) { } TokenStream::TokenStream(vector &&tokens) @@ -90,16 +100,18 @@ string token_type_name(TokenType token_type) { return ","; case TokenType::EQUALS: return "="; + case TokenType::LET: + return "Let"; + case TokenType::BOOLEAN: + return "Boolean"; + case TokenType::STRING: + return "String"; case TokenType::INTEGER: return "Integer"; case TokenType::FLOAT: return "Float"; - case TokenType::BOOLEAN: - return "Boolean"; case TokenType::IDENTIFIER: return "Identifier"; - case TokenType::LET: - return "Let"; default: ABORT("Unknown token type."); } diff --git a/src/search/parser/token_stream.h b/src/search/parser/token_stream.h index 832cdafc7..71dd831f5 100644 --- a/src/search/parser/token_stream.h +++ b/src/search/parser/token_stream.h @@ -16,11 +16,12 @@ enum class TokenType { CLOSING_BRACKET, COMMA, EQUALS, + LET, + BOOLEAN, + STRING, INTEGER, FLOAT, - BOOLEAN, - IDENTIFIER, - LET + IDENTIFIER }; struct Token { diff --git a/src/search/plugins/types.cc b/src/search/plugins/types.cc index 117c139b5..c1ca2c372 100644 --- a/src/search/plugins/types.cc +++ b/src/search/plugins/types.cc @@ -292,6 +292,7 @@ BasicType TypeRegistry::NO_TYPE = BasicType(typeid(void), ""); TypeRegistry::TypeRegistry() { insert_basic_type(); + insert_basic_type(); insert_basic_type(); insert_basic_type(); } diff --git a/src/search/utils/timer.cc b/src/search/utils/timer.cc index 326777038..83d0ba681 100644 --- a/src/search/utils/timer.cc +++ b/src/search/utils/timer.cc @@ -1,6 +1,7 @@ #include "timer.h" #include +#include #include #if OPERATING_SYSTEM == LINUX || OPERATING_SYSTEM == OSX @@ -110,7 +111,7 @@ Duration Timer::reset() { } ostream &operator<<(ostream &os, const Timer &timer) { - os << timer(); + os << fixed << setprecision(6) << timer(); return os; }