Skip to content

Commit

Permalink
Refactor DLC/DAWAANR/MDDS core setters (#4238)
Browse files Browse the repository at this point in the history
Description of changes:

* create a new DLC struct every time a new DLC actor is set up (#4220)
* use standard exception mechanism in the DLC /DAWAANR/MDDS core setters (#4219) and test it
* non-P3M methods no longer crash the system with `std::abort()` (via `errexit()`) when something unexpected happens
* reduce code duplication in DLC by disentangling DLC from P3M
   * DLC now depends on feature `DIPOLES` instead of `DP3M` (aka `DIPOLES + FFTW`)
   * ICC now depends on feature `ELECTROSTATICS` instead of `P3M` (aka `ELECTROSTATICS + FFTW`)
  • Loading branch information
kodiakhq[bot] committed Apr 30, 2021
2 parents c817fae + 0c809ae commit 7ad3322
Show file tree
Hide file tree
Showing 17 changed files with 292 additions and 231 deletions.
55 changes: 21 additions & 34 deletions src/core/electrostatics_magnetostatics/dipole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,23 +66,27 @@ void calc_pressure_long_range() {

void nonbonded_sanity_check(int &state) {
#ifdef DP3M
switch (dipole.method) {
case DIPOLAR_MDLC_P3M:
if (mdlc_sanity_checks())
state = 0; // fall through
case DIPOLAR_P3M:
if (dp3m_sanity_checks(node_grid))
state = 0;
break;
case DIPOLAR_MDLC_DS:
if (mdlc_sanity_checks())
state = 0; // fall through
case DIPOLAR_DS:
if (magnetic_dipolar_direct_sum_sanity_checks())
state = 0;
break;
default:
break;
try {
switch (dipole.method) {
case DIPOLAR_MDLC_P3M:
mdlc_sanity_checks();
// fall through
case DIPOLAR_P3M:
if (dp3m_sanity_checks(node_grid))
state = 0;
break;
case DIPOLAR_MDLC_DS:
mdlc_sanity_checks();
// fall through
case DIPOLAR_DS:
mdds_sanity_checks(mdds_n_replica);
break;
default:
break;
}
} catch (std::runtime_error const &err) {
runtimeErrorMsg() << err.what();
state = 0;
}
#endif
}
Expand Down Expand Up @@ -262,23 +266,6 @@ double calc_energy_long_range(const ParticleRange &particles) {
return energy;
}

int set_mesh() {
switch (dipole.method) {
#ifdef DP3M
case DIPOLAR_MDLC_P3M:
case DIPOLAR_P3M:
set_method_local(DIPOLAR_MDLC_P3M);
return 0;
#endif
case DIPOLAR_MDLC_DS:
case DIPOLAR_DS:
set_method_local(DIPOLAR_MDLC_DS);
return 0;
default:
return 1;
}
}

void bcast_params(const boost::mpi::communicator &comm) {
namespace mpi = boost::mpi;

Expand Down
1 change: 0 additions & 1 deletion src/core/electrostatics_magnetostatics/dipole.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ void calc_long_range_force(const ParticleRange &particles);

double calc_energy_long_range(const ParticleRange &particles);

int set_mesh();
void bcast_params(const boost::mpi::communicator &comm);

/** @brief Set the dipolar prefactor */
Expand Down
55 changes: 27 additions & 28 deletions src/core/electrostatics_magnetostatics/magnetic_non_p3m_methods.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@
#include <utils/constants.hpp>
#include <utils/math/sqr.hpp>

#include <cmath>
#include <cstdio>
#include <stdexcept>
#include <vector>

/**
* Calculate dipolar energy and optionally force between two particles.
* @param[in,out] p1 First particle
Expand Down Expand Up @@ -131,12 +136,14 @@ double dawaanr_calculations(bool force_flag, bool energy_flag,
=============================================================================
*/

int Ncut_off_magnetic_dipolar_direct_sum = 0;

int magnetic_dipolar_direct_sum_sanity_checks() {
/* left for the future, at this moment nothing to do */
int mdds_n_replica = 0;

return 0;
void mdds_sanity_checks(int n_replica) {
if (box_geo.periodic(0) and box_geo.periodic(1) and box_geo.periodic(2) and
n_replica == 0) {
throw std::runtime_error("Dipolar direct sum with replica does not "
"support a periodic system with zero replica.");
}
}

double
Expand All @@ -146,12 +153,6 @@ magnetic_dipolar_direct_sum_calculations(bool force_flag, bool energy_flag,
assert(n_nodes == 1);
assert(force_flag || energy_flag);

if (box_geo.periodic(0) and box_geo.periodic(1) and box_geo.periodic(2) and
Ncut_off_magnetic_dipolar_direct_sum == 0) {
throw std::runtime_error("Dipolar direct sum with replica does not support "
"a periodic system with zero replica.");
};

std::vector<double> x, y, z;
std::vector<double> mx, my, mz;
std::vector<double> fx, fy, fz;
Expand Down Expand Up @@ -209,9 +210,9 @@ magnetic_dipolar_direct_sum_calculations(bool force_flag, bool energy_flag,

int NCUT[3];
for (int i = 0; i < 3; i++) {
NCUT[i] = box_geo.periodic(i) ? Ncut_off_magnetic_dipolar_direct_sum : 0;
NCUT[i] = box_geo.periodic(i) ? mdds_n_replica : 0;
}
auto const NCUT2 = Utils::sqr(Ncut_off_magnetic_dipolar_direct_sum);
auto const NCUT2 = Utils::sqr(mdds_n_replica);

for (int i = 0; i < dip_particles; i++) {
for (int j = 0; j < dip_particles; j++) {
Expand Down Expand Up @@ -298,42 +299,40 @@ magnetic_dipolar_direct_sum_calculations(bool force_flag, bool energy_flag,
return 0.5 * dipole.prefactor * energy;
}

int dawaanr_set_params() {
void dawaanr_set_params() {
if (n_nodes > 1) {
runtimeErrorMsg() << "MPI parallelization not supported by "
<< "DipolarDirectSumCpu.";
return ES_ERROR;
throw std::runtime_error(
"MPI parallelization not supported by DipolarDirectSumCpu.");
}
if (dipole.method != DIPOLAR_ALL_WITH_ALL_AND_NO_REPLICA) {
Dipole::set_method_local(DIPOLAR_ALL_WITH_ALL_AND_NO_REPLICA);
}
// also necessary on 1 CPU, does more than just broadcasting
mpi_bcast_coulomb_params();

return ES_OK;
}

int mdds_set_params(int n_cut) {
void mdds_set_params(int n_replica) {
if (n_nodes > 1) {
runtimeErrorMsg() << "MPI parallelization not supported by "
<< "DipolarDirectSumWithReplicaCpu.";
return ES_ERROR;
throw std::runtime_error(
"MPI parallelization not supported by DipolarDirectSumWithReplicaCpu.");
}

Ncut_off_magnetic_dipolar_direct_sum = n_cut;

if (Ncut_off_magnetic_dipolar_direct_sum == 0) {
if (n_replica < 0) {
throw std::runtime_error("Dipolar direct sum requires n_replica >= 0.");
}
mdds_sanity_checks(n_replica);
if (n_replica == 0) {
fprintf(stderr, "Careful: the number of extra replicas to take into "
"account during the direct sum calculation is zero\n");
}

mdds_n_replica = n_replica;

if (dipole.method != DIPOLAR_DS && dipole.method != DIPOLAR_MDLC_DS) {
Dipole::set_method_local(DIPOLAR_DS);
}

// also necessary on 1 CPU, does more than just broadcasting
mpi_bcast_coulomb_params();
return ES_OK;
}

#endif
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,16 @@
double dawaanr_calculations(bool force_flag, bool energy_flag,
ParticleRange const &particles);

/** Switch on DAWAANR magnetostatics.
* @return ES_ERROR, if not on a single CPU
*/
int dawaanr_set_params();
/** Switch on DAWAANR magnetostatics. */
void dawaanr_set_params();

/* =============================================================================
DIRECT SUM FOR MAGNETIC SYSTEMS
=============================================================================
*/

/** Sanity checks for the magnetic dipolar direct sum*/
int magnetic_dipolar_direct_sum_sanity_checks();
/** Sanity checks for the magnetic dipolar direct sum. */
void mdds_sanity_checks(int n_replica);

/** Core of the method: here you compute all the magnetic forces, torques and
* the energy for the whole system using direct sum
Expand All @@ -74,12 +72,11 @@ double magnetic_dipolar_direct_sum_calculations(bool force_flag,
ParticleRange const &particles);

/** Switch on direct sum magnetostatics.
* @param n_cut cut off for the explicit summation
* @return ES_ERROR, if not on a single CPU
* @param n_replica Number of replicas to be taken for the explicit summation
*/
int mdds_set_params(int n_cut);
void mdds_set_params(int n_replica);

extern int Ncut_off_magnetic_dipolar_direct_sum;
extern int mdds_n_replica;

#endif /*of ifdef DIPOLES */
#endif /* of ifndef MAG_NON_P3M_H */
Loading

0 comments on commit 7ad3322

Please sign in to comment.