Line | Branch | Exec | Source |
---|---|---|---|
1 | // -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- | ||
2 | // vi: set et ts=4 sw=4 sts=4: | ||
3 | // | ||
4 | // SPDX-FileCopyrightText: Copyright © DuMux Project contributors, see AUTHORS.md in root folder | ||
5 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
6 | // | ||
7 | /*! | ||
8 | * \file | ||
9 | * \ingroup Nonlinear | ||
10 | * \brief Levenberg-Marquardt algorithm for solving nonlinear least squares problems | ||
11 | */ | ||
12 | #ifndef DUMUX_NONLINEAR_LEASTSQUARES_HH | ||
13 | #define DUMUX_NONLINEAR_LEASTSQUARES_HH | ||
14 | |||
15 | #include <iostream> | ||
16 | #include <cmath> | ||
17 | #include <memory> | ||
18 | |||
19 | #include <dune/common/exceptions.hh> | ||
20 | #include <dune/istl/bvector.hh> | ||
21 | #include <dune/istl/matrix.hh> | ||
22 | #if HAVE_SUITESPARSE_CHOLMOD | ||
23 | #include <dune/istl/cholmod.hh> | ||
24 | #endif // HAVE_SUITESPARSE_CHOLMOD | ||
25 | #include <dune/istl/io.hh> | ||
26 | |||
27 | #include <dumux/common/parameters.hh> | ||
28 | #include <dumux/common/exceptions.hh> | ||
29 | #include <dumux/io/format.hh> | ||
30 | #include <dumux/nonlinear/newtonsolver.hh> | ||
31 | |||
32 | namespace Dumux::Optimization { | ||
33 | |||
34 | //! a solver base class | ||
35 | template<class Variables> | ||
36 | 5 | class Solver | |
37 | { | ||
38 | public: | ||
39 | virtual ~Solver() = default; | ||
40 | virtual bool apply(Variables& vars) = 0; | ||
41 | }; | ||
42 | |||
43 | } // end namespace Dumux::Optimization | ||
44 | |||
45 | #ifndef DOXYGEN // exclude from doxygen | ||
46 | // helper code for curve fitting | ||
47 | namespace Dumux::Optimization::Detail { | ||
48 | |||
49 | template<class T, class F> | ||
50 | class Assembler | ||
51 | { | ||
52 | public: | ||
53 | using Scalar = T; | ||
54 | using JacobianMatrix = Dune::Matrix<T>; | ||
55 | using ResidualType = Dune::BlockVector<T>; | ||
56 | using SolutionVector = Dune::BlockVector<T>; | ||
57 | using Variables = Dune::BlockVector<T>; | ||
58 | |||
59 | /*! | ||
60 | * \brief Assembler for the Levenberg-Marquardt linear system: [J^T J + λ diag(J^T J)] x = J^T r | ||
61 | * \param f The residual function r = f(x) to minimize the norm of (\f$ f: \mathbb{R}^n \mapsto \mathbb{R}^m \f$) | ||
62 | * \param x0 Initial guess for the parameters (vector of size \f$n\f$) | ||
63 | * \param residualSize The number of residuals (\f$m\f$) | ||
64 | */ | ||
65 | 10 | Assembler(const F& f, const SolutionVector& x0, std::size_t residualSize) | |
66 |
1/2✓ Branch 2 taken 5 times.
✗ Branch 3 not taken.
|
10 | : prevSol_(x0), f_(f), solSize_(x0.size()), residualSize_(residualSize) |
67 |
2/4✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 5 times.
✗ Branch 5 not taken.
|
10 | , JT_(solSize_, residualSize_), regularizedJTJ_(solSize_, solSize_) |
68 |
2/4✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 5 times.
✗ Branch 5 not taken.
|
10 | , residual_(residualSize_), projectedResidual_(solSize_) |
69 | { | ||
70 |
1/2✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
|
10 | printMatrix_ = getParamFromGroup<bool>("LevenbergMarquardt", "PrintMatrix", false); |
71 |
1/2✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
|
10 | baseEpsilon_ = getParamFromGroup<Scalar>("LevenbergMarquardt", "BaseEpsilon", 1e-1); |
72 | 10 | } | |
73 | |||
74 | //! initialize the linear system variables | ||
75 | 10 | void setLinearSystem() | |
76 | { | ||
77 | 10 | std::cout << "Setting up linear system with " << solSize_ << " variables and " | |
78 | 10 | << residualSize_ << " residuals." << std::endl; | |
79 | |||
80 |
2/2✓ Branch 1 taken 68 times.
✓ Branch 2 taken 5 times.
|
156 | JT_ = 0.0; |
81 |
2/2✓ Branch 0 taken 250 times.
✓ Branch 1 taken 5 times.
|
510 | regularizedJTJ_ = 0.0; |
82 |
2/2✓ Branch 0 taken 18 times.
✓ Branch 1 taken 5 times.
|
46 | residual_ = 0.0; |
83 | 10 | projectedResidual_ = 0.0; | |
84 | 10 | } | |
85 | |||
86 | //! interface for the Newton scheme: this is J^T J + λ diag(J^T J) | ||
87 |
5/10✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 5 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 3 times.
✗ Branch 8 not taken.
✓ Branch 10 taken 4 times.
✗ Branch 11 not taken.
✓ Branch 13 taken 4 times.
✗ Branch 14 not taken.
|
21 | JacobianMatrix& jacobian() { return regularizedJTJ_; } |
88 | |||
89 | //! interface for the Newton scheme: this is J^T r | ||
90 |
5/30✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 5 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 3 times.
✗ Branch 8 not taken.
✓ Branch 10 taken 4 times.
✗ Branch 11 not taken.
✓ Branch 13 taken 4 times.
✗ Branch 14 not taken.
✗ Branch 16 not taken.
✗ Branch 17 not taken.
✗ Branch 19 not taken.
✗ Branch 20 not taken.
✗ Branch 22 not taken.
✗ Branch 23 not taken.
✗ Branch 25 not taken.
✗ Branch 26 not taken.
✗ Branch 28 not taken.
✗ Branch 29 not taken.
✗ Branch 32 not taken.
✗ Branch 33 not taken.
✗ Branch 36 not taken.
✗ Branch 37 not taken.
✗ Branch 40 not taken.
✗ Branch 41 not taken.
✗ Branch 44 not taken.
✗ Branch 45 not taken.
✗ Branch 48 not taken.
✗ Branch 49 not taken.
|
21 | ResidualType& residual() { return projectedResidual_; } |
91 | |||
92 | //! regularization parameter λ | ||
93 | 15 | void setLambda(const T lambda) { lambda_ = lambda; } | |
94 |
10/30✗ Branch 0 not taken.
✗ Branch 1 not taken.
✓ Branch 4 taken 2 times.
✓ Branch 5 taken 2 times.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✓ Branch 15 taken 2 times.
✓ Branch 16 taken 2 times.
✗ Branch 20 not taken.
✗ Branch 21 not taken.
✗ Branch 22 not taken.
✗ Branch 23 not taken.
✓ Branch 26 taken 2 times.
✓ Branch 27 taken 1 times.
✗ Branch 31 not taken.
✗ Branch 32 not taken.
✗ Branch 33 not taken.
✗ Branch 34 not taken.
✓ Branch 37 taken 2 times.
✓ Branch 38 taken 2 times.
✗ Branch 42 not taken.
✗ Branch 43 not taken.
✗ Branch 44 not taken.
✗ Branch 45 not taken.
✓ Branch 48 taken 2 times.
✓ Branch 49 taken 2 times.
✗ Branch 53 not taken.
✗ Branch 54 not taken.
|
40 | T lambda() { return lambda_; } |
95 | |||
96 | //! assemble the right hand side of the linear system | ||
97 | 42 | void assembleResidual(const SolutionVector& x) | |
98 | { | ||
99 |
2/2✓ Branch 0 taken 3700 times.
✓ Branch 1 taken 21 times.
|
7484 | projectedResidual_ = 0.0; |
100 | 42 | JT_ = 0.0; | |
101 | |||
102 | // assemble residual, J^T, and projected residual, J^T r | ||
103 |
4/6✓ Branch 0 taken 74 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 74 times.
✓ Branch 5 taken 21 times.
|
486 | for (auto rowIt = JT_.begin(); rowIt != JT_.end(); ++rowIt) |
104 | { | ||
105 | 148 | const auto paramIdx = rowIt.index(); | |
106 |
1/2✓ Branch 1 taken 74 times.
✗ Branch 2 not taken.
|
148 | const auto residual = f_(x); |
107 |
1/2✓ Branch 1 taken 74 times.
✗ Branch 2 not taken.
|
148 | auto p = x; |
108 | 148 | const auto eps = baseEpsilon_; | |
109 |
1/2✓ Branch 1 taken 74 times.
✗ Branch 2 not taken.
|
148 | p[paramIdx] = x[paramIdx] + eps; |
110 |
1/2✓ Branch 1 taken 74 times.
✗ Branch 2 not taken.
|
148 | auto deflectedResidual = f_(p); |
111 |
2/2✓ Branch 0 taken 3700 times.
✓ Branch 1 taken 74 times.
|
7548 | deflectedResidual -= residual; |
112 | 7548 | deflectedResidual /= eps; | |
113 |
2/2✓ Branch 0 taken 3700 times.
✓ Branch 1 taken 74 times.
|
7548 | for (auto colIt = rowIt->begin(); colIt != rowIt->end(); ++colIt) |
114 | 7400 | *colIt += deflectedResidual[colIt.index()]; | |
115 | |||
116 |
1/2✓ Branch 0 taken 74 times.
✗ Branch 1 not taken.
|
148 | projectedResidual_[paramIdx] = residual * deflectedResidual; |
117 | } | ||
118 | |||
119 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 21 times.
|
42 | if (printMatrix_) |
120 | { | ||
121 | ✗ | std::cout << std::endl << "J^T = " << std::endl; | |
122 | ✗ | Dune::printmatrix(std::cout, JT_, "", ""); | |
123 | } | ||
124 | 42 | } | |
125 | |||
126 | //! assemble the left hand side of the linear system | ||
127 | 42 | void assembleJacobianAndResidual(const SolutionVector& x) | |
128 | { | ||
129 | 42 | assembleResidual(x); | |
130 | |||
131 | 42 | regularizedJTJ_ = 0.0; | |
132 |
2/2✓ Branch 0 taken 74 times.
✓ Branch 1 taken 21 times.
|
190 | for (auto rowIt = regularizedJTJ_.begin(); rowIt != regularizedJTJ_.end(); ++rowIt) |
133 | { | ||
134 |
2/2✓ Branch 0 taken 274 times.
✓ Branch 1 taken 74 times.
|
696 | for (auto colIt = rowIt->begin(); colIt != rowIt->end(); ++colIt) |
135 | { | ||
136 |
2/2✓ Branch 0 taken 13700 times.
✓ Branch 1 taken 274 times.
|
27948 | for (int j = 0; j < residualSize_; ++j) |
137 | 27400 | *colIt += JT_[rowIt.index()][j]*JT_[colIt.index()][j]; | |
138 | |||
139 |
2/2✓ Branch 0 taken 74 times.
✓ Branch 1 taken 200 times.
|
548 | if (rowIt.index() == colIt.index()) |
140 | { | ||
141 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 74 times.
|
148 | *colIt += lambda_*(*colIt); |
142 | |||
143 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 74 times.
|
148 | if (*colIt == 0.0) |
144 | ✗ | *colIt = 1e-6*lambda_; | |
145 | } | ||
146 | } | ||
147 | } | ||
148 | |||
149 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 21 times.
|
42 | if (printMatrix_) |
150 | { | ||
151 | ✗ | std::cout << std::endl << "J^T J + λ diag(J^T J) = " << std::endl; | |
152 | ✗ | Dune::printmatrix(std::cout, regularizedJTJ_, "", ""); | |
153 | } | ||
154 | 42 | } | |
155 | |||
156 | //! this is the actual cost unction we are minimizing MSE = ||f_(x)||^2 | ||
157 | 52 | T computeMeanSquaredError(const SolutionVector& x) const | |
158 |
1/2✓ Branch 1 taken 26 times.
✗ Branch 2 not taken.
|
104 | { return f_(x).two_norm2(); } |
159 | |||
160 | //! initial guess to be able to reset the solver | ||
161 | 5 | const SolutionVector& prevSol() const | |
162 |
5/10✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 1 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 1 times.
✗ Branch 8 not taken.
✓ Branch 10 taken 1 times.
✗ Branch 11 not taken.
✓ Branch 13 taken 1 times.
✗ Branch 14 not taken.
|
5 | { return prevSol_; } |
163 | |||
164 | private: | ||
165 | SolutionVector prevSol_; // initial guess | ||
166 | const F& f_; //!< the residual function to minimize the norm of | ||
167 | std::size_t solSize_, residualSize_; | ||
168 | |||
169 | JacobianMatrix JT_; // J^T | ||
170 | JacobianMatrix regularizedJTJ_; // J^T J + λ diag(J^T J) | ||
171 | ResidualType residual_; // r = f_(x) | ||
172 | ResidualType projectedResidual_; // J^T r | ||
173 | |||
174 | Scalar lambda_ = 0.0; // regularization parameter | ||
175 | Scalar baseEpsilon_; // for numerical differentiation | ||
176 | |||
177 | bool printMatrix_ = false; | ||
178 | }; | ||
179 | |||
180 | #if HAVE_SUITESPARSE_CHOLMOD | ||
181 | template<class Matrix, class Vector> | ||
182 | class CholmodLinearSolver | ||
183 | { | ||
184 | public: | ||
185 |
5/10✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 1 times.
✗ Branch 4 not taken.
✓ Branch 5 taken 1 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 1 times.
✗ Branch 8 not taken.
✓ Branch 9 taken 1 times.
|
5 | void setResidualReduction(double residualReduction) {} |
186 | |||
187 | 21 | bool solve(const Matrix& A, Vector& x, const Vector& b) const | |
188 | { | ||
189 | 21 | Dune::Cholmod<Vector> solver; // matrix is symmetric | |
190 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
21 | solver.setMatrix(A); |
191 | 21 | Dune::InverseOperatorResult r; | |
192 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
21 | auto bCopy = b; |
193 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
21 | auto xCopy = x; |
194 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
21 | solver.apply(xCopy, bCopy, r); |
195 | 21 | checkResult_(xCopy, r); | |
196 | 21 | if (!r.converged ) | |
197 | ✗ | DUNE_THROW(NumericalProblem, "Linear solver did not converge."); | |
198 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
21 | x = xCopy; |
199 |
1/2✓ Branch 0 taken 21 times.
✗ Branch 1 not taken.
|
42 | return r.converged; |
200 | 42 | } | |
201 | |||
202 | 5 | auto norm(const Vector& residual) const | |
203 | 5 | { return residual.two_norm(); } | |
204 | |||
205 | private: | ||
206 | 21 | void checkResult_(Vector& x, Dune::InverseOperatorResult& result) const | |
207 | { | ||
208 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 21 times.
|
21 | flatVectorForEach(x, [&](auto&& entry, std::size_t){ |
209 | using std::isnan, std::isinf; | ||
210 |
2/4✓ Branch 0 taken 74 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 74 times.
|
74 | if (isnan(entry) || isinf(entry)) |
211 | ✗ | result.converged = false; | |
212 | }); | ||
213 | } | ||
214 | }; | ||
215 | #endif // HAVE_SUITESPARSE_CHOLMOD | ||
216 | |||
217 | } // end namespace Dumux::Optimization::Detail | ||
218 | #endif // DOXYGEN | ||
219 | |||
220 | namespace Dumux::Optimization::Detail { | ||
221 | |||
222 | /*! | ||
223 | * \ingroup Nonlinear | ||
224 | * \brief A nonlinear least squares solver with \f$n\f$ model parameters and \f$m\f$ observations | ||
225 | * \tparam Assembler a class that assembles the linear system of equations in each iteration | ||
226 | * \tparam LinearSolver a class that solves the linear system of equations | ||
227 | * \note The assembler has to assemble the actual least squares problem (i.e. the normal equations) | ||
228 | */ | ||
229 | template<class Assembler, class LinearSolver> | ||
230 | 5 | class LevenbergMarquardt : private Dumux::NewtonSolver<Assembler, LinearSolver, DefaultPartialReassembler, Dune::Communication<Dune::No_Comm>> | |
231 | { | ||
232 | using ParentType = Dumux::NewtonSolver<Assembler, LinearSolver, DefaultPartialReassembler, Dune::Communication<Dune::No_Comm>>; | ||
233 | using Scalar = typename Assembler::Scalar; | ||
234 | using Variables = typename Assembler::Variables; | ||
235 | using SolutionVector = typename Assembler::SolutionVector; | ||
236 | using ResidualVector = typename Assembler::ResidualType; | ||
237 | using Backend = typename ParentType::Backend; | ||
238 | public: | ||
239 | using ParentType::ParentType; | ||
240 | |||
241 | 10 | LevenbergMarquardt(std::shared_ptr<Assembler> assembler, | |
242 | std::shared_ptr<LinearSolver> linearSolver, | ||
243 | int verbosity = 0) | ||
244 |
4/10✓ Branch 2 taken 5 times.
✗ Branch 3 not taken.
✓ Branch 7 taken 5 times.
✗ Branch 8 not taken.
✓ Branch 9 taken 5 times.
✗ Branch 10 not taken.
✓ Branch 14 taken 5 times.
✗ Branch 15 not taken.
✗ Branch 16 not taken.
✗ Branch 17 not taken.
|
30 | : ParentType(assembler, linearSolver, {}, "", "LevenbergMarquardt", verbosity) |
245 | { | ||
246 |
1/2✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
|
10 | assembler->setLambda(getParamFromGroup<Scalar>(ParentType::paramGroup(), "LevenbergMarquardt.LambdaInitial", 0.001)); |
247 |
1/2✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
|
10 | maxLambdaDivisions_ = getParamFromGroup<std::size_t>(ParentType::paramGroup(), "LevenbergMarquardt.MaxLambdaDivisions", 10); |
248 | |||
249 | 10 | this->setUseLineSearch(); | |
250 | 10 | this->setMaxRelativeShift(1e-3); | |
251 | ✗ | } | |
252 | |||
253 | /*! | ||
254 | * \brief Solve the nonlinear least squares problem | ||
255 | * \param vars instance of the `Variables` class representing a numerical | ||
256 | * solution, defining primary and possibly secondary variables | ||
257 | * and information on the time level. | ||
258 | * \return bool true if the solver converged | ||
259 | * \post If converged, the given `Variables` will represent the solution. If the solver | ||
260 | * does not converge, the `Variables` will represent the best solution so far (lowest value of the objective function) | ||
261 | */ | ||
262 | 10 | bool apply(Variables& vars) override | |
263 | { | ||
264 | // try solving the non-linear system | ||
265 |
1/2✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
|
10 | for (std::size_t i = 0; i <= maxLambdaDivisions_; ++i) |
266 | { | ||
267 | // linearize & solve | ||
268 | 10 | const bool converged = ParentType::apply(vars); | |
269 | |||
270 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
10 | if (converged) |
271 | return true; | ||
272 | |||
273 | ✗ | else if (!converged && i < maxLambdaDivisions_) | |
274 | { | ||
275 | ✗ | if (this->verbosity() >= 1) | |
276 | ✗ | std::cout << Fmt::format("LevenbergMarquardt solver did not converge with λ = {:.2e}. ", ParentType::assembler().lambda()) | |
277 | ✗ | << Fmt::format("Retrying with λ = {:.2e}.\n", ParentType::assembler().lambda() * 9); | |
278 | |||
279 | ✗ | ParentType::assembler().setLambda(ParentType::assembler().lambda() * 9); | |
280 | } | ||
281 | |||
282 | // convergence criterium not fulfilled | ||
283 | // return best solution so far | ||
284 | else | ||
285 | { | ||
286 | ✗ | this->solutionChanged_(vars, bestSol_); | |
287 | ✗ | if (this->verbosity() >= 1) | |
288 | ✗ | std::cout << Fmt::format("Choose best solution so far with a MSE of {:.4e}", minResidual_) << std::endl; | |
289 | |||
290 | ✗ | return false; | |
291 | } | ||
292 | } | ||
293 | |||
294 | return false; | ||
295 | } | ||
296 | |||
297 | private: | ||
298 | 42 | void newtonEndStep(Variables &vars, const SolutionVector &uLastIter) override | |
299 | { | ||
300 | 42 | ParentType::newtonEndStep(vars, uLastIter); | |
301 | |||
302 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 21 times.
|
42 | if (ParentType::reduction_ > ParentType::lastReduction_) |
303 | { | ||
304 | ✗ | if (ParentType::assembler().lambda() < 1e5) | |
305 | { | ||
306 | ✗ | ParentType::assembler().setLambda(ParentType::assembler().lambda() * 9); | |
307 | ✗ | if (this->verbosity() > 0) | |
308 | ✗ | std::cout << "-- Increasing λ to " << ParentType::assembler().lambda(); | |
309 | } | ||
310 | else | ||
311 | { | ||
312 | ✗ | if (this->verbosity() > 0) | |
313 | ✗ | std::cout << "-- Keeping λ at " << ParentType::assembler().lambda(); | |
314 | } | ||
315 | } | ||
316 | else | ||
317 | { | ||
318 |
4/4✓ Branch 0 taken 19 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 10 times.
✓ Branch 3 taken 9 times.
|
42 | if (ParentType::reduction_ < 0.1 && ParentType::assembler().lambda() > 1e-5) |
319 | { | ||
320 |
1/2✓ Branch 0 taken 10 times.
✗ Branch 1 not taken.
|
20 | ParentType::assembler().setLambda(ParentType::assembler().lambda() / 11.0); |
321 |
1/2✓ Branch 0 taken 10 times.
✗ Branch 1 not taken.
|
20 | if (this->verbosity() > 0) |
322 | 20 | std::cout << "-- Decreasing λ to " << ParentType::assembler().lambda(); | |
323 | } | ||
324 | else | ||
325 | { | ||
326 |
1/2✓ Branch 0 taken 11 times.
✗ Branch 1 not taken.
|
22 | if (this->verbosity() > 0) |
327 | 22 | std::cout << "-- Keeping λ at " << ParentType::assembler().lambda(); | |
328 | } | ||
329 | } | ||
330 | |||
331 |
1/2✓ Branch 0 taken 21 times.
✗ Branch 1 not taken.
|
42 | if (this->verbosity() > 0) |
332 | 42 | std::cout << ", error reduction: " << ParentType::reduction_ | |
333 | 42 | << " and mean squared error: " << ParentType::residualNorm_ << std::endl; | |
334 | |||
335 | // store best solution | ||
336 |
1/2✓ Branch 0 taken 21 times.
✗ Branch 1 not taken.
|
42 | if (ParentType::residualNorm_ < minResidual_) |
337 | { | ||
338 | 42 | minResidual_ = ParentType::residualNorm_; | |
339 | 42 | bestSol_ = uLastIter; | |
340 | } | ||
341 | 42 | } | |
342 | |||
343 | 42 | void lineSearchUpdate_(Variables& vars, | |
344 | const SolutionVector& uLastIter, | ||
345 | const ResidualVector& deltaU) override | ||
346 | { | ||
347 | 42 | alpha_ = 1.0; | |
348 | 42 | auto uCurrentIter = uLastIter; | |
349 | |||
350 | while (true) | ||
351 | { | ||
352 | 42 | Backend::axpy(-alpha_, deltaU, uCurrentIter); | |
353 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
42 | this->solutionChanged_(vars, uCurrentIter); |
354 | |||
355 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
42 | ParentType::residualNorm_ = ParentType::assembler().computeMeanSquaredError(Backend::dofs(vars)); |
356 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 16 times.
|
42 | if (ParentType::numSteps_ == 0) |
357 |
1/2✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
|
10 | ParentType::initialResidual_ = ParentType::assembler().computeMeanSquaredError(ParentType::assembler().prevSol()); |
358 | 42 | ParentType::reduction_ = ParentType::residualNorm_ / ParentType::initialResidual_; | |
359 | |||
360 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
42 | if (ParentType::reduction_ < ParentType::lastReduction_ || alpha_ <= 0.001) |
361 | { | ||
362 |
1/4✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
|
84 | ParentType::endIterMsgStream_ << Fmt::format(", residual reduction {:.4e}->{:.4e}@α={:.4f}", ParentType::lastReduction_, ParentType::reduction_, alpha_); |
363 |
1/2✓ Branch 0 taken 21 times.
✗ Branch 1 not taken.
|
42 | return; |
364 | } | ||
365 | |||
366 | // try with a smaller update and reset solution | ||
367 | ✗ | alpha_ *= 0.5; | |
368 | ✗ | uCurrentIter = uLastIter; | |
369 | } | ||
370 | 42 | } | |
371 | |||
372 | Scalar minResidual_ = std::numeric_limits<Scalar>::max(); | ||
373 | SolutionVector bestSol_; | ||
374 | std::size_t maxLambdaDivisions_ = 10; | ||
375 | Scalar alpha_ = 1.0; | ||
376 | }; | ||
377 | |||
378 | #if HAVE_SUITESPARSE_CHOLMOD | ||
379 | template<class T, class F> | ||
380 | class NonlinearLeastSquaresSolver : public Solver<typename Optimization::Detail::Assembler<T, F>::Variables> | ||
381 | { | ||
382 | using Assembler = Optimization::Detail::Assembler<T, F>; | ||
383 | using LinearSolver = Optimization::Detail::CholmodLinearSolver<typename Assembler::JacobianMatrix, typename Assembler::ResidualType>; | ||
384 | using Optimizer = Optimization::Detail::LevenbergMarquardt<Assembler, LinearSolver>; | ||
385 | public: | ||
386 | using Variables = typename Assembler::Variables; | ||
387 | |||
388 | 10 | NonlinearLeastSquaresSolver(const F& f, const Dune::BlockVector<T>& x0, std::size_t size) | |
389 |
2/6✓ Branch 2 taken 5 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 5 times.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
|
30 | : solver_(std::make_unique<Optimizer>(std::make_shared<Assembler>(f, x0, size), std::make_shared<LinearSolver>(), 2)) |
390 | 10 | {} | |
391 | |||
392 | 10 | bool apply(Variables& vars) override | |
393 | 10 | { return solver_->apply(vars); } | |
394 | |||
395 | private: | ||
396 | std::unique_ptr<Optimizer> solver_; | ||
397 | }; | ||
398 | #endif // HAVE_SUITESPARSE_CHOLMOD | ||
399 | |||
400 | } // end namespace Dumux::Optimization::Detail | ||
401 | |||
402 | namespace Dumux::Optimization { | ||
403 | |||
404 | #if HAVE_SUITESPARSE_CHOLMOD | ||
405 | |||
406 | /*! | ||
407 | * \ingroup Nonlinear | ||
408 | * \brief Creates a nonlinear least squares solver with \f$n\f$ model parameters and \f$m\f$ observations | ||
409 | * For a detailed description of the mathematical background see \cite NocedalWright2006 | ||
410 | * \param f The residual functional to minimize the norm of (\f$ f: \mathbb{R}^n \mapsto \mathbb{R}^m \f$) | ||
411 | * \param x0 Initial guess for the parameters (vector of size \f$n\f$) | ||
412 | * \param size The number of observations (\f$m\f$) | ||
413 | * \return a unique pointer to the nonlinear least squares solver with a method `apply(Variables& vars)` | ||
414 | * \note The solver can be configured through the parameter group `LevenbergMarquardt` | ||
415 | */ | ||
416 | template<class T, class F> | ||
417 | 5 | auto makeNonlinearLeastSquaresSolver(const F& f, const Dune::BlockVector<T>& x0, std::size_t size) | |
418 | -> std::unique_ptr<Solver<Dune::BlockVector<T>>> | ||
419 |
10/20✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 1 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 1 times.
✗ Branch 8 not taken.
✓ Branch 10 taken 1 times.
✗ Branch 11 not taken.
✓ Branch 13 taken 1 times.
✗ Branch 14 not taken.
✓ Branch 16 taken 1 times.
✗ Branch 17 not taken.
✓ Branch 19 taken 1 times.
✗ Branch 20 not taken.
✓ Branch 22 taken 1 times.
✗ Branch 23 not taken.
✓ Branch 25 taken 1 times.
✗ Branch 26 not taken.
✓ Branch 28 taken 1 times.
✗ Branch 29 not taken.
|
5 | { return std::make_unique<Optimization::Detail::NonlinearLeastSquaresSolver<T, F>>(f, x0, size); } |
420 | |||
421 | #endif // HAVE_SUITESPARSE_CHOLMOD | ||
422 | |||
423 | } // end namespace Dumux::Optimization | ||
424 | |||
425 | #endif | ||
426 |