Skip to content

Commit

Permalink
References for 'BlueBrain/nmodl#1403'.
Browse files Browse the repository at this point in the history
  • Loading branch information
GitHub Actions Bot committed Aug 19, 2024
1 parent 66d5be8 commit 398bac2
Show file tree
Hide file tree
Showing 9 changed files with 1,735 additions and 637 deletions.
110 changes: 19 additions & 91 deletions global/neuron/thread_newton.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -243,15 +243,28 @@ namespace newton {
* @brief Solver implementation details
*
* Implementation of Newton method for solving system of non-linear equations using Eigen
* - newton::newton_solver is the preferred option: requires user to provide Jacobian
* - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required
* - newton::newton_solver with user, e.g. SymPy, provided Jacobian
*
* @{
*/

static constexpr int MAX_ITER = 1e3;
static constexpr int MAX_ITER = 50;
static constexpr double EPS = 1e-12;

template <int N>
bool is_converged(const Eigen::Matrix<double, N, 1>& X,
const Eigen::Matrix<double, N, N>& J,
const Eigen::Matrix<double, N, 1>& F,
double eps) {
for (Eigen::Index i = 0; i < N; ++i) {
double square_error = J(i, Eigen::all).cwiseAbs2() * (eps * X).cwiseAbs2();
if (F(i) * F(i) > square_error) {
return false;
}
}
return true;
}

/**
* \brief Newton method with user-provided Jacobian
*
Expand All @@ -273,17 +286,14 @@ EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix<double, N, 1>& X,
int max_iter = MAX_ITER) {
// Vector to store result of function F(X):
Eigen::Matrix<double, N, 1> F;
// Matrix to store jacobian of F(X):
// Matrix to store Jacobian of F(X):
Eigen::Matrix<double, N, N> J;
// Solver iteration count:
int iter = -1;
while (++iter < max_iter) {
// calculate F, J from X using user-supplied functor
functor(X, F, J);
// get error norm: here we use sqrt(|F|^2)
double error = F.norm();
if (error < eps) {
// we have converged: return iteration count
if (is_converged(X, J, F, eps)) {
return iter;
}
// In Eigen the default storage order is ColMajor.
Expand All @@ -307,88 +317,6 @@ EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix<double, N, 1>& X,
return -1;
}

static constexpr double SQUARE_ROOT_ULP = 1e-7;
static constexpr double CUBIC_ROOT_ULP = 1e-5;

/**
* \brief Newton method without user-provided Jacobian
*
* Newton method without user-provided Jacobian: given initial vector X and a
* functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with
* initial value of `X` by iterating:
*
* \f[
* X_{n+1} = X_n - J(X_n)^{-1} F(X_n)
* \f]
*
* where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically
* using a symmetric finite difference approximation to the derivative
* when \f$|F|^2 < eps^2\f$, solution has converged/
*
* @return number of iterations (-1 if failed to converge)
*/
template <int N, typename FUNC>
EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix<double, N, 1>& X,
FUNC functor,
double eps = EPS,
int max_iter = MAX_ITER) {
// Vector to store result of function F(X):
Eigen::Matrix<double, N, 1> F;
// Temporary storage for F(X+dx)
Eigen::Matrix<double, N, 1> F_p;
// Temporary storage for F(X-dx)
Eigen::Matrix<double, N, 1> F_m;
// Matrix to store jacobian of F(X):
Eigen::Matrix<double, N, N> J;
// Solver iteration count:
int iter = 0;
while (iter < max_iter) {
// calculate F from X using user-supplied functor
functor(X, F);
// get error norm: here we use sqrt(|F|^2)
double error = F.norm();
if (error < eps) {
// we have converged: return iteration count
return iter;
}
++iter;
// calculate approximate Jacobian
for (int i = 0; i < N; ++i) {
// symmetric finite difference approximation to derivative
// df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx)
// choose dx to be ~(ULP)^{1/3}*X[i]
// https://aip.scitation.org/doi/pdf/10.1063/1.4822971
// also enforce a lower bound ~sqrt(ULP) to avoid dx being too small
double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP);
// F(X + dX)
X[i] += dX;
functor(X, F_p);
// F(X - dX)
X[i] -= 2.0 * dX;
functor(X, F_m);
F_p -= F_m;
// J = (F(X + dX) - F(X - dX)) / (2*dX)
J.col(i) = F_p / (2.0 * dX);
// restore X
X[i] += dX;
}
if (!J.IsRowMajor) {
J.transposeInPlace();
}
Eigen::Matrix<int, N, 1> pivot;
Eigen::Matrix<double, N, 1> rowmax;
// Check if J is singular
if (nmodl::crout::Crout<double>(N, J.data(), pivot.data(), rowmax.data()) < 0) {
return -1;
}
Eigen::Matrix<double, N, 1> X_solve;
nmodl::crout::solveCrout<double>(N, J.data(), F.data(), X_solve.data(), pivot.data());
X -= X_solve;
}
// If we fail to converge after max_iter iterations, return -1
return -1;
}

/**
* Newton method template specializations for \f$N <= 4\f$ Use explicit inverse
* of `F` instead of LU decomposition. This is faster, as there is no pivoting
Expand All @@ -407,7 +335,7 @@ EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix<double, N, 1>& X,
while (++iter < max_iter) {
functor(X, F, J);
double error = F.norm();
if (error < eps) {
if (is_converged(X, J, F, eps)) {
return iter;
}
// The inverse can be called from within OpenACC regions without any issue, as opposed to
Expand Down
110 changes: 19 additions & 91 deletions kinetic/coreneuron/X2Y.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -252,15 +252,28 @@ namespace newton {
* @brief Solver implementation details
*
* Implementation of Newton method for solving system of non-linear equations using Eigen
* - newton::newton_solver is the preferred option: requires user to provide Jacobian
* - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required
* - newton::newton_solver with user, e.g. SymPy, provided Jacobian
*
* @{
*/

static constexpr int MAX_ITER = 1e3;
static constexpr int MAX_ITER = 50;
static constexpr double EPS = 1e-12;

template <int N>
bool is_converged(const Eigen::Matrix<double, N, 1>& X,
const Eigen::Matrix<double, N, N>& J,
const Eigen::Matrix<double, N, 1>& F,
double eps) {
for (Eigen::Index i = 0; i < N; ++i) {
double square_error = J(i, Eigen::all).cwiseAbs2() * (eps * X).cwiseAbs2();
if (F(i) * F(i) > square_error) {
return false;
}
}
return true;
}

/**
* \brief Newton method with user-provided Jacobian
*
Expand All @@ -282,17 +295,14 @@ EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix<double, N, 1>& X,
int max_iter = MAX_ITER) {
// Vector to store result of function F(X):
Eigen::Matrix<double, N, 1> F;
// Matrix to store jacobian of F(X):
// Matrix to store Jacobian of F(X):
Eigen::Matrix<double, N, N> J;
// Solver iteration count:
int iter = -1;
while (++iter < max_iter) {
// calculate F, J from X using user-supplied functor
functor(X, F, J);
// get error norm: here we use sqrt(|F|^2)
double error = F.norm();
if (error < eps) {
// we have converged: return iteration count
if (is_converged(X, J, F, eps)) {
return iter;
}
// In Eigen the default storage order is ColMajor.
Expand All @@ -316,88 +326,6 @@ EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix<double, N, 1>& X,
return -1;
}

static constexpr double SQUARE_ROOT_ULP = 1e-7;
static constexpr double CUBIC_ROOT_ULP = 1e-5;

/**
* \brief Newton method without user-provided Jacobian
*
* Newton method without user-provided Jacobian: given initial vector X and a
* functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with
* initial value of `X` by iterating:
*
* \f[
* X_{n+1} = X_n - J(X_n)^{-1} F(X_n)
* \f]
*
* where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically
* using a symmetric finite difference approximation to the derivative
* when \f$|F|^2 < eps^2\f$, solution has converged/
*
* @return number of iterations (-1 if failed to converge)
*/
template <int N, typename FUNC>
EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix<double, N, 1>& X,
FUNC functor,
double eps = EPS,
int max_iter = MAX_ITER) {
// Vector to store result of function F(X):
Eigen::Matrix<double, N, 1> F;
// Temporary storage for F(X+dx)
Eigen::Matrix<double, N, 1> F_p;
// Temporary storage for F(X-dx)
Eigen::Matrix<double, N, 1> F_m;
// Matrix to store jacobian of F(X):
Eigen::Matrix<double, N, N> J;
// Solver iteration count:
int iter = 0;
while (iter < max_iter) {
// calculate F from X using user-supplied functor
functor(X, F);
// get error norm: here we use sqrt(|F|^2)
double error = F.norm();
if (error < eps) {
// we have converged: return iteration count
return iter;
}
++iter;
// calculate approximate Jacobian
for (int i = 0; i < N; ++i) {
// symmetric finite difference approximation to derivative
// df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx)
// choose dx to be ~(ULP)^{1/3}*X[i]
// https://aip.scitation.org/doi/pdf/10.1063/1.4822971
// also enforce a lower bound ~sqrt(ULP) to avoid dx being too small
double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP);
// F(X + dX)
X[i] += dX;
functor(X, F_p);
// F(X - dX)
X[i] -= 2.0 * dX;
functor(X, F_m);
F_p -= F_m;
// J = (F(X + dX) - F(X - dX)) / (2*dX)
J.col(i) = F_p / (2.0 * dX);
// restore X
X[i] += dX;
}
if (!J.IsRowMajor) {
J.transposeInPlace();
}
Eigen::Matrix<int, N, 1> pivot;
Eigen::Matrix<double, N, 1> rowmax;
// Check if J is singular
if (nmodl::crout::Crout<double>(N, J.data(), pivot.data(), rowmax.data()) < 0) {
return -1;
}
Eigen::Matrix<double, N, 1> X_solve;
nmodl::crout::solveCrout<double>(N, J.data(), F.data(), X_solve.data(), pivot.data());
X -= X_solve;
}
// If we fail to converge after max_iter iterations, return -1
return -1;
}

/**
* Newton method template specializations for \f$N <= 4\f$ Use explicit inverse
* of `F` instead of LU decomposition. This is faster, as there is no pivoting
Expand All @@ -416,7 +344,7 @@ EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix<double, N, 1>& X,
while (++iter < max_iter) {
functor(X, F, J);
double error = F.norm();
if (error < eps) {
if (is_converged(X, J, F, eps)) {
return iter;
}
// The inverse can be called from within OpenACC regions without any issue, as opposed to
Expand Down
Loading

0 comments on commit 398bac2

Please sign in to comment.