Skip to content

Commit

Permalink
Merge branch 'main' into scorpion
Browse files Browse the repository at this point in the history
  • Loading branch information
jendrikseipp committed Dec 8, 2023
2 parents 01f9712 + f3ce70a commit db5cd29
Show file tree
Hide file tree
Showing 19 changed files with 192 additions and 77 deletions.
2 changes: 1 addition & 1 deletion build.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def try_run(cmd):
except OSError as exc:
if exc.errno == errno.ENOENT:
print(f"Could not find '{cmd[0]}' on your PATH. For installation instructions, "
"see https://www.fast-downward.org/ObtainingAndRunningFastDownward.")
"see BUILD.md in the project root directory.")
sys.exit(1)
else:
raise
Expand Down
2 changes: 2 additions & 0 deletions misc/style/check-include-guard-convention.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ def check_header_files(component):
errors = []
for filename in header_files:
assert filename.endswith(".h"), filename
if "/ext/" in filename:
continue
rel_filename = os.path.relpath(filename, start=component_dir)
guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper()
expected = "#ifndef " + guard
Expand Down
21 changes: 16 additions & 5 deletions src/search/cmake/FindCplex.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,16 @@
set(IMPORTED_CONFIGURATIONS "Debug" "Release")
set(HINT_PATHS ${cplex_DIR} $ENV{cplex_DIR})

add_library(cplex::cplex IMPORTED SHARED)
if(WIN32)
# On Windows we have to declare the library as SHARED to correctly
# communicate the location of the dll and impllib files.
add_library(cplex::cplex IMPORTED SHARED)
else()
# On Linux, the CPLEX installer sometimes does not provide dynamic
# libraries. If they are absent, we fall back to static ones further down,
# hence we mark the type unknown here.
add_library(cplex::cplex IMPORTED UNKNOWN)
endif()
set_target_properties(cplex::cplex PROPERTIES
IMPORTED_CONFIGURATIONS "${IMPORTED_CONFIGURATIONS}"
)
Expand Down Expand Up @@ -157,18 +166,20 @@ foreach(CONFIG_ORIG ${IMPORTED_CONFIGURATIONS})
list(APPEND REQUIRED_LIBRARIES CPLEX_SHARED_LIBRARY_${CONFIG} CPLEX_IMPLIB_${CONFIG})
else()
# CPLEX stores .so files in /bin
find_library(CPLEX_SHARED_LIBRARY_${CONFIG}
find_library(CPLEX_LIBRARY_${CONFIG}
NAMES
cplex${CPLEX_VERSION_NO_DOTS}
cplex
HINTS
${HINT_PATHS}/bin
${HINT_PATHS}/lib
PATH_SUFFIXES
${SUFFIXES_${CONFIG}}
)
set_target_properties(cplex::cplex PROPERTIES
IMPORTED_LOCATION_${CONFIG} ${CPLEX_SHARED_LIBRARY_${CONFIG}}
IMPORTED_LOCATION_${CONFIG} ${CPLEX_LIBRARY_${CONFIG}}
)
list(APPEND REQUIRED_LIBRARIES CPLEX_SHARED_LIBRARY_${CONFIG})
list(APPEND REQUIRED_LIBRARIES CPLEX_LIBRARY_${CONFIG})
endif()
endforeach()

Expand All @@ -186,5 +197,5 @@ mark_as_advanced(
CPLEX_VERSION_SUBMINOR CPLEX_VERSION_NO_DOTS BITWIDTH_HINTS PLATFORM_HINTS
LIBRARY_TYPE_HINTS_RELEASE LIBRARY_TYPE_HINTS_DEBUG SUFFIXES_RELEASE
SUFFIXES_DEBUG FIND_OPTIONS COMPILER_HINTS COMPILER_HINT CPLEX_IMPLIB_DEBUG
CPLEX_IMPLIB_RELEASE CPLEX_SHARED_LIBRARY_DEBUG CPLEX_SHARED_LIBRARY_RELEASE
CPLEX_IMPLIB_RELEASE CPLEX_LIBRARY_DEBUG CPLEX_LIBRARY_RELEASE
)
8 changes: 3 additions & 5 deletions src/search/landmarks/landmark.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,9 @@

#include "../task_proxy.h"

#include "../utils/hash.h"
#include <unordered_set>

namespace landmarks {
using Achievers = utils::HashSet<int>;

class Landmark {
public:
Landmark(std::vector<FactPair> _facts, bool disjunctive, bool conjunctive,
Expand All @@ -33,8 +31,8 @@ class Landmark {
bool is_true_in_goal;
bool is_derived;

Achievers first_achievers;
Achievers possible_achievers;
std::unordered_set<int> first_achievers;
std::unordered_set<int> possible_achievers;

bool is_true_in_state(const State &state) const;
};
Expand Down
24 changes: 12 additions & 12 deletions src/search/landmarks/landmark_cost_partitioning_algorithms.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ CostPartitioningAlgorithm::CostPartitioningAlgorithm(
: lm_graph(graph), operator_costs(operator_costs) {
}

const Achievers &CostPartitioningAlgorithm::get_achievers(
const unordered_set<int> &CostPartitioningAlgorithm::get_achievers(
const Landmark &landmark, bool past) const {
// Return relevant achievers of the landmark according to its status.
if (past) {
Expand Down Expand Up @@ -142,7 +142,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value(
for (auto &node : nodes) {
int id = node->get_id();
if (future.test(id)) {
const Achievers &achievers =
const unordered_set<int> &achievers =
get_achievers(node->get_landmark(), past.test(id));
if (achievers.empty())
return numeric_limits<double>::max();
Expand Down Expand Up @@ -175,7 +175,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value(
for (auto &node : nodes) {
int id = node->get_id();
if (future.test(id)) {
const Achievers &achievers =
const unordered_set<int> &achievers =
get_achievers(node->get_landmark(), past.test(id));
bool covered_by_action_lm = false;
for (int op_id : achievers) {
Expand Down Expand Up @@ -207,7 +207,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value(
// TODO: Iterate over Landmarks instead of LandmarkNodes
int id = node->get_id();
assert(future.test(id));
const Achievers &achievers = get_achievers(node->get_landmark(), past.test(id));
const unordered_set<int> &achievers = get_achievers(node->get_landmark(), past.test(id));
achievers_by_lm.emplace_back(achievers.begin(), achievers.end());
}
for (int lm_id : compute_landmark_order(achievers_by_lm)) {
Expand Down Expand Up @@ -241,7 +241,7 @@ double UniformCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value(
for (const LandmarkNode *node : relevant_lms) {
int id = node->get_id();
assert(future.test(id));
const Achievers &achievers = get_achievers(node->get_landmark(), past.test(id));
const unordered_set<int> &achievers = get_achievers(node->get_landmark(), past.test(id));
double min_cost = numeric_limits<double>::max();
for (int op_id : achievers) {
assert(utils::in_bounds(op_id, achieved_lms_by_op));
Expand All @@ -265,7 +265,7 @@ LandmarkCanonicalHeuristic::LandmarkCanonicalHeuristic(
: CostPartitioningAlgorithm(operator_costs, graph) {
}

static bool empty_intersection(const Achievers &x, const Achievers &y) {
static bool empty_intersection(const unordered_set<int> &x, const unordered_set<int> &y) {
for (int a : x) {
if (y.find(a) != y.end()) {
return false;
Expand All @@ -286,11 +286,11 @@ vector<vector<int>> LandmarkCanonicalHeuristic::compute_max_additive_subsets(
for (int i = 0; i < num_landmarks; ++i) {
const LandmarkNode *lm1 = relevant_landmarks[i];
int id1 = lm1->get_id();
const Achievers &achievers1 = get_achievers(lm1->get_landmark(), past_landmarks.test(id1));
const unordered_set<int> &achievers1 = get_achievers(lm1->get_landmark(), past_landmarks.test(id1));
for (int j = i + 1; j < num_landmarks; ++j) {
const LandmarkNode *lm2 = relevant_landmarks[j];
int id2 = lm2->get_id();
const Achievers &achievers2 = get_achievers(lm2->get_landmark(), past_landmarks.test(id2));
const unordered_set<int> &achievers2 = get_achievers(lm2->get_landmark(), past_landmarks.test(id2));
if (empty_intersection(achievers1, achievers2)) {
/* If the two landmarks are additive, there is an edge in the
compatibility graph. */
Expand All @@ -307,7 +307,7 @@ vector<vector<int>> LandmarkCanonicalHeuristic::compute_max_additive_subsets(

int LandmarkCanonicalHeuristic::compute_minimum_landmark_cost(
const LandmarkNode &lm_node, bool past) const {
const Achievers &achievers = get_achievers(lm_node.get_landmark(), past);
const unordered_set<int> &achievers = get_achievers(lm_node.get_landmark(), past);
assert(!achievers.empty());
int min_cost = numeric_limits<int>::max();
for (int op_id : achievers) {
Expand Down Expand Up @@ -406,7 +406,7 @@ double LandmarkPhO::compute_landmark_cost(const LandmarkNode &lm, bool past) con
/* Note that there are landmarks without achievers. Example: not-served(p)
in miconic:s1-0.pddl. The fact is true in the initial state, and no
operator achieves it. For such facts, the (infimum) cost is infinity. */
const Achievers &achievers = get_achievers(lm.get_landmark(), past);
const unordered_set<int> &achievers = get_achievers(lm.get_landmark(), past);
double min_cost = lp_solver.get_infinity();
for (int op_id : achievers) {
assert(utils::in_bounds(op_id, operator_costs));
Expand Down Expand Up @@ -447,7 +447,7 @@ double LandmarkPhO::get_cost_partitioned_heuristic_value(
for (int lm_id = 0; lm_id < num_cols; ++lm_id) {
const LandmarkNode *lm = lm_graph.get_node(lm_id);
if (future.test(lm_id)) {
const Achievers &achievers = get_achievers(lm->get_landmark(), past.test(lm_id));
const unordered_set<int> &achievers = get_achievers(lm->get_landmark(), past.test(lm_id));
assert(!achievers.empty());
for (int op_id : achievers) {
assert(utils::in_bounds(op_id, lp_constraints));
Expand Down Expand Up @@ -555,7 +555,7 @@ double OptimalCostPartitioningAlgorithm::get_cost_partitioned_heuristic_value(
for (int lm_id = 0; lm_id < num_cols; ++lm_id) {
const Landmark &landmark = lm_graph.get_node(lm_id)->get_landmark();
if (future.test(lm_id)) {
const Achievers &achievers =
const unordered_set<int> &achievers =
get_achievers(landmark, past.test(lm_id));
if (achievers.empty())
return numeric_limits<double>::max();
Expand Down
3 changes: 2 additions & 1 deletion src/search/landmarks/landmark_cost_partitioning_algorithms.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

#include "../lp/lp_solver.h"

#include <unordered_set>
#include <vector>

class ConstBitsetView;
Expand All @@ -31,7 +32,7 @@ class CostPartitioningAlgorithm {
const LandmarkGraph &lm_graph;
const std::vector<int> operator_costs;

const Achievers &get_achievers(
const std::unordered_set<int> &get_achievers(
const Landmark &landmark, bool past) const;
public:
CostPartitioningAlgorithm(const std::vector<int> &operator_costs,
Expand Down
3 changes: 2 additions & 1 deletion src/search/landmarks/landmark_sum_heuristic.cc
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "landmark_sum_heuristic.h"

#include "landmark.h"
#include "landmark_factory.h"
#include "landmark_status_manager.h"
#include "util.h"
Expand Down Expand Up @@ -43,7 +44,7 @@ LandmarkSumHeuristic::LandmarkSumHeuristic(const plugins::Options &opts)
}

int LandmarkSumHeuristic::get_min_cost_of_achievers(
Achievers &achievers) const {
const unordered_set<int> &achievers) const {
int min_cost = numeric_limits<int>::max();
for (int id : achievers) {
OperatorProxy op = get_operator_or_axiom(task_proxy, id);
Expand Down
4 changes: 2 additions & 2 deletions src/search/landmarks/landmark_sum_heuristic.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
#ifndef LANDMARKS_LANDMARK_SUM_HEURISTIC_H
#define LANDMARKS_LANDMARK_SUM_HEURISTIC_H

#include "landmark.h"
#include "landmark_heuristic.h"

namespace landmarks {
Expand All @@ -11,7 +10,8 @@ class LandmarkSumHeuristic : public LandmarkHeuristic {
std::vector<int> min_first_achiever_costs;
std::vector<int> min_possible_achiever_costs;

int get_min_cost_of_achievers(Achievers &achievers) const;
int get_min_cost_of_achievers(
const std::unordered_set<int> &achievers) const;
void compute_landmark_costs();

int get_heuristic_value(const State &ancestor_state) override;
Expand Down
4 changes: 2 additions & 2 deletions src/search/merge_and_shrink/merge_and_shrink_heuristic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,8 @@ class MergeAndShrinkHeuristicFeature : public plugins::TypedFeature<Evaluator, M
"value would be half of the time allocated for the planner.\n"
"{{{\nmerge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
"merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector="
"score_based_filtering(scoring_functions=[goal_relevance,dfp,"
"total_order])),label_reduction=exact(before_shrinking=true,"
"score_based_filtering(scoring_functions=[goal_relevance(),dfp(),"
"total_order()])),label_reduction=exact(before_shrinking=true,"
"before_merging=false),max_states=50k,threshold_before_merge=1)\n}}}\n");

document_language_support("action costs", "supported");
Expand Down
6 changes: 3 additions & 3 deletions src/search/merge_and_shrink/shrink_fh.cc
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ class ShrinkFHFeature : public plugins::TypedFeature<ShrinkStrategy, ShrinkFH> {
document_note(
"Note",
"The strategy first partitions all states according to their "
"combination of f- and g-values. These partitions are then sorted, "
"combination of f- and h-values. These partitions are then sorted, "
"first according to their f-value, then according to their h-value "
"(increasing or decreasing, depending on the chosen options). "
"States sorted last are shrinked together until reaching max_states.");
Expand All @@ -229,8 +229,8 @@ class ShrinkFHFeature : public plugins::TypedFeature<ShrinkStrategy, ShrinkFH> {
"is a numerical parameter for which sensible values include 1000, "
"10000, 50000, 100000 and 200000) and the linear merge startegy "
"cg_goal_level to obtain the variant 'f-preserving shrinking of "
"transition systems', called called HHH in the IJCAI 2011 paper, see "
"bisimulation based shrink strategy. "
"transition systems', called HHH in the IJCAI 2011 paper. Also "
"see bisimulation based shrink strategy. "
"When we last ran experiments on interaction of shrink strategies "
"with label reduction, this strategy performed best when used with "
"label reduction before merging (and no label reduction before "
Expand Down
4 changes: 4 additions & 0 deletions src/search/parser/abstract_syntax_tree.cc
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,8 @@ DecoratedASTNodePtr LiteralNode::decorate(DecorateContext &context) const {
switch (value.type) {
case TokenType::BOOLEAN:
return utils::make_unique_ptr<BoolLiteralNode>(value.content);
case TokenType::STRING:
return utils::make_unique_ptr<StringLiteralNode>(value.content);
case TokenType::INTEGER:
return utils::make_unique_ptr<IntLiteralNode>(value.content);
case TokenType::FLOAT:
Expand All @@ -441,6 +443,8 @@ const plugins::Type &LiteralNode::get_type(DecorateContext &context) const {
switch (value.type) {
case TokenType::BOOLEAN:
return plugins::TypeRegistry::instance()->get_type<bool>();
case TokenType::STRING:
return plugins::TypeRegistry::instance()->get_type<string>();
case TokenType::INTEGER:
return plugins::TypeRegistry::instance()->get_type<int>();
case TokenType::FLOAT:
Expand Down
52 changes: 52 additions & 0 deletions src/search/parser/decorated_abstract_syntax_tree.cc
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,46 @@ void BoolLiteralNode::dump(string indent) const {
cout << indent << "BOOL: " << value << endl;
}

StringLiteralNode::StringLiteralNode(const string &value)
: value(value) {
}

plugins::Any StringLiteralNode::construct(ConstructContext &context) const {
utils::TraceBlock block(context, "Constructing string value from '" + value + "'");
if (!(value.starts_with('"') && value.ends_with('"'))) {
ABORT("String literal value is not enclosed in quotation marks"
" (this should have been caught before constructing this node).");
}
/*
We are not doing any further syntax checking. Escaped symbols other than
\n will just ignore the escaping \ (e.g., \t is treated as t, not as a
tab). Strings ending in \ will not produce an error but should be excluded
by the previous steps.
*/
string result;
result.reserve(value.length() - 2);
bool escaped = false;
for (char c : value.substr(1, value.size() - 2)) {
if (escaped) {
escaped = false;
if (c == 'n') {
result += '\n';
} else {
result += c;
}
} else if (c == '\\') {
escaped = true;
} else {
result += c;
}
}
return result;
}

void StringLiteralNode::dump(string indent) const {
cout << indent << "STRING: " << value << endl;
}

IntLiteralNode::IntLiteralNode(const string &value)
: value(value) {
}
Expand Down Expand Up @@ -473,6 +513,18 @@ shared_ptr<DecoratedASTNode> BoolLiteralNode::clone_shared() const {
return make_shared<BoolLiteralNode>(*this);
}

StringLiteralNode::StringLiteralNode(const StringLiteralNode &other)
: value(other.value) {
}

unique_ptr<DecoratedASTNode> StringLiteralNode::clone() const {
return utils::make_unique_ptr<StringLiteralNode>(*this);
}

shared_ptr<DecoratedASTNode> StringLiteralNode::clone_shared() const {
return make_shared<StringLiteralNode>(*this);
}

IntLiteralNode::IntLiteralNode(const IntLiteralNode &other)
: value(other.value) {
}
Expand Down
14 changes: 14 additions & 0 deletions src/search/parser/decorated_abstract_syntax_tree.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,20 @@ class BoolLiteralNode : public DecoratedASTNode {
BoolLiteralNode(const BoolLiteralNode &other);
};

class StringLiteralNode : public DecoratedASTNode {
std::string value;
public:
StringLiteralNode(const std::string &value);

plugins::Any construct(ConstructContext &context) const override;
void dump(std::string indent) const override;

// TODO: once we get rid of lazy construction, this should no longer be necessary.
virtual std::unique_ptr<DecoratedASTNode> clone() const override;
virtual std::shared_ptr<DecoratedASTNode> clone_shared() const override;
StringLiteralNode(const StringLiteralNode &other);
};

class IntLiteralNode : public DecoratedASTNode {
std::string value;
public:
Expand Down
Loading

0 comments on commit db5cd29

Please sign in to comment.