Skip to content

Commit

Permalink
Cleanup changes
Browse files Browse the repository at this point in the history
  • Loading branch information
zfergus committed Jan 5, 2024
1 parent 00474f4 commit a094f63
Show file tree
Hide file tree
Showing 6 changed files with 25 additions and 26 deletions.
5 changes: 4 additions & 1 deletion src/polysolve/nonlinear/Criteria.hpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
#pragma once

#include <cstddef>
#include <iostream>
#include <Eigen/Core>

namespace polysolve::nonlinear
{
// Source: https://github.com/PatWie/CppNumericalSolvers/blob/7eddf28fa5a8872a956d3c8666055cac2f5a535d/include/cppoptlib/meta.h
// License: MIT

enum class Status
{
NotStarted = -1,
Expand Down
10 changes: 3 additions & 7 deletions src/polysolve/nonlinear/Problem.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,12 @@ namespace polysolve::nonlinear
/// @param[out] grad Gradient of the function at x.
virtual void gradient(const TVector &x, TVector &grad) = 0;

// TODO: Add dense Hessian

/// @brief Compute the Hessian of the function at x.
/// @param[in] x Degrees of freedom.
/// @param[out] hessian Hessian of the function at x.
virtual void hessian(const TVector &x, TMatrix &hessian)

Check warning on line 48 in src/polysolve/nonlinear/Problem.hpp

View check run for this annotation

Codecov / codecov/patch

src/polysolve/nonlinear/Problem.hpp#L48

Added line #L48 was not covered by tests
{
StiffnessMatrix sparse_hessian;
hessian(x, sparse_hessian);
hessian = sparse_hessian;
throw std::runtime_error("Dense Hessian not implemented.");

Check warning on line 50 in src/polysolve/nonlinear/Problem.hpp

View check run for this annotation

Codecov / codecov/patch

src/polysolve/nonlinear/Problem.hpp#L50

Added line #L50 was not covered by tests
}

/// @brief Compute the Hessian of the function at x.
Expand All @@ -63,13 +59,13 @@ namespace polysolve::nonlinear
/// @param x0 Starting point.
/// @param x1 Ending point.
/// @return True if the step is valid, false otherwise.
virtual bool is_step_valid(const TVector &x0, const TVector &x1) const { return true; }
virtual bool is_step_valid(const TVector &x0, const TVector &x1) { return true; }

/// @brief Determine a maximum step size from x0 to x1.
/// @param x0 Starting point.
/// @param x1 Ending point.
/// @return Maximum step size.
virtual double max_step_size(const TVector &x0, const TVector &x1) const { return 1; }
virtual double max_step_size(const TVector &x0, const TVector &x1) { return 1; }

// --- Callbacks ------------------------------------------------------

Expand Down
14 changes: 7 additions & 7 deletions src/polysolve/nonlinear/Solver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -286,10 +286,10 @@ namespace polysolve::nonlinear
"Starting {} with {} solve f₀={:g} "
"(stopping criteria: max_iters={:d} Δf={:g} ‖∇f‖={:g} ‖Δx‖={:g})",
descent_strategy_name(), m_line_search->name(),
objFunc.value(x), m_stop.iterations,
objFunc(x), m_stop.iterations,
m_stop.fDelta, m_stop.gradNorm, m_stop.xDelta);

update_solver_info(objFunc.value(x));
update_solver_info(objFunc(x));
objFunc.post_step(PostStepData(m_current.iterations, solver_info, x, grad));

int f_delta_step_cnt = 0;
Expand All @@ -310,7 +310,7 @@ namespace polysolve::nonlinear
double energy;
{
POLYSOLVE_SCOPED_STOPWATCH("compute objective function", obj_fun_time, m_logger);
energy = objFunc.value(x);
energy = objFunc(x);
}

if (!std::isfinite(energy))
Expand Down Expand Up @@ -522,7 +522,7 @@ namespace polysolve::nonlinear
m_stop.iterations, m_stop.fDelta, m_stop.gradNorm, m_stop.xDelta);

log_times();
update_solver_info(objFunc.value(x));
update_solver_info(objFunc(x));
}

void Solver::reset(const int ndof)
Expand Down Expand Up @@ -630,10 +630,10 @@ namespace polysolve::nonlinear
Eigen::VectorXd x1 = x - direc * gradient_fd_eps;

objFunc.solution_changed(x2);
double J2 = objFunc.value(x2);
double J2 = objFunc(x2);

objFunc.solution_changed(x1);
double J1 = objFunc.value(x1);
double J1 = objFunc(x1);

double fd = (J2 - J1) / 2 / gradient_fd_eps;
double analytic = direc.dot(grad);
Expand All @@ -653,7 +653,7 @@ namespace polysolve::nonlinear
fd::finite_gradient(
x, [&](const Eigen::VectorXd &x_) {
objFunc.solution_changed(x_);
return objFunc.value(x_);
return objFunc(x_);
},
grad_fd, fd::AccuracyOrder::SECOND, gradient_fd_eps);

Expand Down
12 changes: 6 additions & 6 deletions src/polysolve/nonlinear/Solver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ namespace polysolve::nonlinear

virtual ~Solver() = default;

const TCriteria &get_stop_criteria() { return this->m_stop; }
const TCriteria &get_stop_criteria() { return m_stop; }
void set_stop_criteria(const TCriteria &s) { m_stop = s; }

const TCriteria &criteria() { return m_current; }
Expand All @@ -84,13 +84,13 @@ namespace polysolve::nonlinear

bool converged() const
{
return this->m_status == Status::XDeltaTolerance
|| this->m_status == Status::FDeltaTolerance
|| this->m_status == Status::GradNormTolerance;
return m_status == Status::XDeltaTolerance
|| m_status == Status::FDeltaTolerance
|| m_status == Status::GradNormTolerance;
}

size_t max_iterations() const { return this->m_stop.iterations; }
size_t &max_iterations() { return this->m_stop.iterations; }
size_t max_iterations() const { return m_stop.iterations; }
size_t &max_iterations() { return m_stop.iterations; }
bool allow_out_of_iterations = false;

void add_strategy(const std::shared_ptr<DescentStrategy> &s) { m_strategies.push_back(s); }
Expand Down
2 changes: 1 addition & 1 deletion src/polysolve/nonlinear/line_search/Backtracking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ namespace polysolve::nonlinear::line_search
continue;
}

const double new_energy = objFunc.value(new_x);
const double new_energy = objFunc(new_x);

if (!std::isfinite(new_energy))
{
Expand Down
8 changes: 4 additions & 4 deletions src/polysolve/nonlinear/line_search/LineSearch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ namespace polysolve::nonlinear::line_search

cur_iter = 0;

initial_energy = objFunc.value(x);
initial_energy = objFunc(x);
if (std::isnan(initial_energy))
{
m_logger.error("Original energy in line search is nan!");
Expand Down Expand Up @@ -166,7 +166,7 @@ namespace polysolve::nonlinear::line_search
}
}

const double cur_energy = objFunc.value(x + step_size * delta_x);
const double cur_energy = objFunc(x + step_size * delta_x);

const double descent_step_size = step_size;

Expand All @@ -179,7 +179,7 @@ namespace polysolve::nonlinear::line_search
objFunc.solution_changed(x);

// tolerance for rounding error due to multithreading
assert(abs(initial_energy - objFunc.value(x)) < 1e-15);
assert(abs(initial_energy - objFunc(x)) < 1e-15);

objFunc.line_search_end();
return NaN;
Expand Down Expand Up @@ -211,7 +211,7 @@ namespace polysolve::nonlinear::line_search
while (step_size > current_min_step_size() && cur_iter < current_max_step_size_iter())
{
// Compute the new energy value without contacts
const double energy = objFunc.value(new_x);
const double energy = objFunc(new_x);
const bool is_step_valid = objFunc.is_step_valid(x, new_x);

if (!std::isfinite(energy) || !is_step_valid)
Expand Down

0 comments on commit a094f63

Please sign in to comment.