View Javadoc
1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * The ASF licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *      https://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  
18  /*
19   * This is not the original file distributed by the Apache Software Foundation
20   * It has been modified by the Hipparchus project
21   */
22  package org.hipparchus.optim.nonlinear.vector.leastsquares;
23  
24  import java.util.Arrays;
25  
26  import org.hipparchus.exception.MathIllegalStateException;
27  import org.hipparchus.linear.ArrayRealVector;
28  import org.hipparchus.linear.RealMatrix;
29  import org.hipparchus.optim.ConvergenceChecker;
30  import org.hipparchus.optim.LocalizedOptimFormats;
31  import org.hipparchus.optim.nonlinear.vector.leastsquares.LeastSquaresProblem.Evaluation;
32  import org.hipparchus.util.FastMath;
33  import org.hipparchus.util.Incrementor;
34  import org.hipparchus.util.Precision;
35  
36  
37  /**
38   * This class solves a least-squares problem using the Levenberg-Marquardt
39   * algorithm.
40   *
41   * <p>This implementation <em>should</em> work even for over-determined systems
42   * (i.e. systems having more point than equations). Over-determined systems
43   * are solved by ignoring the point which have the smallest impact according
44   * to their jacobian column norm. Only the rank of the matrix and some loop bounds
45   * are changed to implement this.</p>
46   *
47   * <p>The resolution engine is a simple translation of the MINPACK <a
48   * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
49   * changes. The changes include the over-determined resolution, the use of
50   * inherited convergence checker and the Q.R. decomposition which has been
51   * rewritten following the algorithm described in the
52   * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
53   * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
54   * <p>The authors of the original fortran version are:</p>
55   * <ul>
56   * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
57   * <li>Burton S. Garbow</li>
58   * <li>Kenneth E. Hillstrom</li>
59   * <li>Jorge J. More</li>
60   * </ul>
61   *<p>The redistribution policy for MINPACK is available <a
62   * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
63   * is reproduced below.</p>
64   *
65   * <blockquote>
66   * <p>
67   *    Minpack Copyright Notice (1999) University of Chicago.
68   *    All rights reserved
69   * </p>
70   * <p>
71   * Redistribution and use in source and binary forms, with or without
72   * modification, are permitted provided that the following conditions
73   * are met:</p>
74   * <ol>
75   *  <li>Redistributions of source code must retain the above copyright
76   *      notice, this list of conditions and the following disclaimer.</li>
77   * <li>Redistributions in binary form must reproduce the above
78   *     copyright notice, this list of conditions and the following
79   *     disclaimer in the documentation and/or other materials provided
80   *     with the distribution.</li>
81   * <li>The end-user documentation included with the redistribution, if any,
82   *     must include the following acknowledgment:
83   *     <code>This product includes software developed by the University of
84   *           Chicago, as Operator of Argonne National Laboratory.</code>
85   *     Alternately, this acknowledgment may appear in the software itself,
86   *     if and wherever such third-party acknowledgments normally appear.</li>
87   * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
88   *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
89   *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
90   *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
91   *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
92   *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
93   *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
94   *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
95   *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
96   *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
97   *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
98   *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
99   *     BE CORRECTED.</strong></li>
100  * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
101  *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
102  *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
103  *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
104  *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
105  *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
106  *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
107  *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
108  *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
109  *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
110  * </ol>
111  * </blockquote>
112  *
113  */
114 public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer {
115 
116     /** Twice the "epsilon machine". */
117     private static final double TWO_EPS = 2 * Precision.EPSILON;
118 
119     /* configuration parameters */
120     /** Positive input variable used in determining the initial step bound. */
121     private final double initialStepBoundFactor;
122     /** Desired relative error in the sum of squares. */
123     private final double costRelativeTolerance;
124     /**  Desired relative error in the approximate solution parameters. */
125     private final double parRelativeTolerance;
126     /** Desired max cosine on the orthogonality between the function vector
127      * and the columns of the jacobian. */
128     private final double orthoTolerance;
129     /** Threshold for QR ranking. */
130     private final double qrRankingThreshold;
131 
132     /** Default constructor.
133      * <p>
134      * The default values for the algorithm settings are:
135      * <ul>
136      *  <li>Initial step bound factor: 100</li>
137      *  <li>Cost relative tolerance: 1e-10</li>
138      *  <li>Parameters relative tolerance: 1e-10</li>
139      *  <li>Orthogonality tolerance: 1e-10</li>
140      *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
141      * </ul>
142      **/
143     public LevenbergMarquardtOptimizer() {
144         this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
145     }
146 
147     /**
148      * Construct an instance with all parameters specified.
149      *
150      * @param initialStepBoundFactor initial step bound factor
151      * @param costRelativeTolerance  cost relative tolerance
152      * @param parRelativeTolerance   parameters relative tolerance
153      * @param orthoTolerance         orthogonality tolerance
154      * @param qrRankingThreshold     threshold in the QR decomposition. Columns with a 2
155      *                               norm less than this threshold are considered to be
156      *                               all 0s.
157      */
158     public LevenbergMarquardtOptimizer(
159             final double initialStepBoundFactor,
160             final double costRelativeTolerance,
161             final double parRelativeTolerance,
162             final double orthoTolerance,
163             final double qrRankingThreshold) {
164         this.initialStepBoundFactor = initialStepBoundFactor;
165         this.costRelativeTolerance = costRelativeTolerance;
166         this.parRelativeTolerance = parRelativeTolerance;
167         this.orthoTolerance = orthoTolerance;
168         this.qrRankingThreshold = qrRankingThreshold;
169     }
170 
171     /** Build new instance with initial step bound factor.
172      * @param newInitialStepBoundFactor Positive input variable used in
173      * determining the initial step bound. This bound is set to the
174      * product of initialStepBoundFactor and the euclidean norm of
175      * {@code diag * x} if non-zero, or else to {@code newInitialStepBoundFactor}
176      * itself. In most cases factor should lie in the interval
177      * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
178      * of the matrix is reduced.
179      * @return a new instance.
180      */
181     public LevenbergMarquardtOptimizer withInitialStepBoundFactor(double newInitialStepBoundFactor) {
182         return new LevenbergMarquardtOptimizer(
183                 newInitialStepBoundFactor,
184                 costRelativeTolerance,
185                 parRelativeTolerance,
186                 orthoTolerance,
187                 qrRankingThreshold);
188     }
189 
190     /** Build new instance with cost relative tolerance.
191      * @param newCostRelativeTolerance Desired relative error in the sum of squares.
192      * @return a new instance.
193      */
194     public LevenbergMarquardtOptimizer withCostRelativeTolerance(double newCostRelativeTolerance) {
195         return new LevenbergMarquardtOptimizer(
196                 initialStepBoundFactor,
197                 newCostRelativeTolerance,
198                 parRelativeTolerance,
199                 orthoTolerance,
200                 qrRankingThreshold);
201     }
202 
203     /** Build new instance with parameter relative tolerance.
204      * @param newParRelativeTolerance Desired relative error in the approximate solution
205      * parameters.
206      * @return a new instance.
207      */
208     public LevenbergMarquardtOptimizer withParameterRelativeTolerance(double newParRelativeTolerance) {
209         return new LevenbergMarquardtOptimizer(
210                 initialStepBoundFactor,
211                 costRelativeTolerance,
212                 newParRelativeTolerance,
213                 orthoTolerance,
214                 qrRankingThreshold);
215     }
216 
217     /** Build new instance with ortho tolerance.
218      * @param newOrthoTolerance Desired max cosine on the orthogonality between
219      * the function vector and the columns of the Jacobian.
220      * @return a new instance.
221      */
222     public LevenbergMarquardtOptimizer withOrthoTolerance(double newOrthoTolerance) {
223         return new LevenbergMarquardtOptimizer(
224                 initialStepBoundFactor,
225                 costRelativeTolerance,
226                 parRelativeTolerance,
227                 newOrthoTolerance,
228                 qrRankingThreshold);
229     }
230 
231     /** Build new instance with ranking threshold.
232      * @param newQRRankingThreshold Desired threshold for QR ranking.
233      * If the squared norm of a column vector is smaller or equal to this
234      * threshold during QR decomposition, it is considered to be a zero vector
235      * and hence the rank of the matrix is reduced.
236      * @return a new instance.
237      */
238     public LevenbergMarquardtOptimizer withRankingThreshold(double newQRRankingThreshold) {
239         return new LevenbergMarquardtOptimizer(
240                 initialStepBoundFactor,
241                 costRelativeTolerance,
242                 parRelativeTolerance,
243                 orthoTolerance,
244                 newQRRankingThreshold);
245     }
246 
247     /**
248      * Gets the value of a tuning parameter.
249      * @see #withInitialStepBoundFactor(double)
250      *
251      * @return the parameter's value.
252      */
253     public double getInitialStepBoundFactor() {
254         return initialStepBoundFactor;
255     }
256 
257     /**
258      * Gets the value of a tuning parameter.
259      * @see #withCostRelativeTolerance(double)
260      *
261      * @return the parameter's value.
262      */
263     public double getCostRelativeTolerance() {
264         return costRelativeTolerance;
265     }
266 
267     /**
268      * Gets the value of a tuning parameter.
269      * @see #withParameterRelativeTolerance(double)
270      *
271      * @return the parameter's value.
272      */
273     public double getParameterRelativeTolerance() {
274         return parRelativeTolerance;
275     }
276 
277     /**
278      * Gets the value of a tuning parameter.
279      * @see #withOrthoTolerance(double)
280      *
281      * @return the parameter's value.
282      */
283     public double getOrthoTolerance() {
284         return orthoTolerance;
285     }
286 
287     /**
288      * Gets the value of a tuning parameter.
289      * @see #withRankingThreshold(double)
290      *
291      * @return the parameter's value.
292      */
293     public double getRankingThreshold() {
294         return qrRankingThreshold;
295     }
296 
297     /** {@inheritDoc} */
298     @Override
299     public Optimum optimize(final LeastSquaresProblem problem) {
300         // Pull in relevant data from the problem as locals.
301         final int nR = problem.getObservationSize(); // Number of observed data.
302         final int nC = problem.getParameterSize(); // Number of parameters.
303         // Counters.
304         final Incrementor iterationCounter = problem.getIterationCounter();
305         final Incrementor evaluationCounter = problem.getEvaluationCounter();
306         // Convergence criterion.
307         final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();
308 
309         // arrays shared with the other private methods
310         final int solvedCols  = FastMath.min(nR, nC);
311         /* Parameters evolution direction associated with lmPar. */
312         double[] lmDir = new double[nC];
313         /* Levenberg-Marquardt parameter. */
314         double lmPar = 0;
315 
316         // local point
317         double   delta   = 0;
318         double   xNorm   = 0;
319         double[] diag    = new double[nC];
320         double[] oldX    = new double[nC];
321         double[] oldRes  = new double[nR];
322         double[] qtf     = new double[nR];
323         double[] work1   = new double[nC];
324         double[] work2   = new double[nC];
325         double[] work3   = new double[nC];
326 
327 
328         // Evaluate the function at the starting point and calculate its norm.
329         evaluationCounter.increment();
330         //value will be reassigned in the loop
331         Evaluation current = problem.evaluate(problem.getStart());
332         double[] currentResiduals = current.getResiduals().toArray();
333         double currentCost = current.getCost();
334         double[] currentPoint = current.getPoint().toArray();
335 
336         // Outer loop.
337         boolean firstIteration = true;
338         while (true) {
339             iterationCounter.increment();
340 
341             final Evaluation previous = current;
342 
343             // QR decomposition of the jacobian matrix
344             final InternalData internalData = qrDecomposition(current.getJacobian(), solvedCols);
345             final double[][] weightedJacobian = internalData.weightedJacobian;
346             final int[] permutation = internalData.permutation;
347             final double[] diagR = internalData.diagR;
348             final double[] jacNorm = internalData.jacNorm;
349 
350             //residuals already have weights applied
351             double[] weightedResidual = currentResiduals;
352             for (int i = 0; i < nR; i++) {
353                 qtf[i] = weightedResidual[i];
354             }
355 
356             // compute Qt.res
357             qTy(qtf, internalData);
358 
359             // now we don't need Q anymore,
360             // so let jacobian contain the R matrix with its diagonal elements
361             for (int k = 0; k < solvedCols; ++k) {
362                 int pk = permutation[k];
363                 weightedJacobian[k][pk] = diagR[pk];
364             }
365 
366             if (firstIteration) {
367                 // scale the point according to the norms of the columns
368                 // of the initial jacobian
369                 xNorm = 0;
370                 for (int k = 0; k < nC; ++k) {
371                     double dk = jacNorm[k];
372                     if (dk == 0) {
373                         dk = 1.0;
374                     }
375                     double xk = dk * currentPoint[k];
376                     xNorm  += xk * xk;
377                     diag[k] = dk;
378                 }
379                 xNorm = FastMath.sqrt(xNorm);
380 
381                 // initialize the step bound delta
382                 delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
383             }
384 
385             // check orthogonality between function vector and jacobian columns
386             double maxCosine = 0;
387             if (currentCost != 0) {
388                 for (int j = 0; j < solvedCols; ++j) {
389                     int    pj = permutation[j];
390                     double s  = jacNorm[pj];
391                     if (s != 0) {
392                         double sum = 0;
393                         for (int i = 0; i <= j; ++i) {
394                             sum += weightedJacobian[i][pj] * qtf[i];
395                         }
396                         maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
397                     }
398                 }
399             }
400             if (maxCosine <= orthoTolerance) {
401                 // Convergence has been reached.
402                 return Optimum.of(
403                         current,
404                         evaluationCounter.getCount(),
405                         iterationCounter.getCount());
406             }
407 
408             // rescale if necessary
409             for (int j = 0; j < nC; ++j) {
410                 diag[j] = FastMath.max(diag[j], jacNorm[j]);
411             }
412 
413             // Inner loop.
414             for (double ratio = 0; ratio < 1.0e-4;) {
415 
416                 // save the state
417                 for (int j = 0; j < solvedCols; ++j) {
418                     int pj = permutation[j];
419                     oldX[pj] = currentPoint[pj];
420                 }
421                 final double previousCost = currentCost;
422                 double[] tmpVec = weightedResidual;
423                 weightedResidual = oldRes;
424                 oldRes    = tmpVec;
425 
426                 // determine the Levenberg-Marquardt parameter
427                 lmPar = determineLMParameter(qtf, delta, diag,
428                                      internalData, solvedCols,
429                                      work1, work2, work3, lmDir, lmPar);
430 
431                 // compute the new point and the norm of the evolution direction
432                 double lmNorm = 0;
433                 for (int j = 0; j < solvedCols; ++j) {
434                     int pj = permutation[j];
435                     lmDir[pj] = -lmDir[pj];
436                     currentPoint[pj] = oldX[pj] + lmDir[pj];
437                     double s = diag[pj] * lmDir[pj];
438                     lmNorm  += s * s;
439                 }
440                 lmNorm = FastMath.sqrt(lmNorm);
441                 // on the first iteration, adjust the initial step bound.
442                 if (firstIteration) {
443                     delta = FastMath.min(delta, lmNorm);
444                 }
445 
446                 // Evaluate the function at x + p and calculate its norm.
447                 evaluationCounter.increment();
448                 current = problem.evaluate(new ArrayRealVector(currentPoint));
449                 currentResiduals = current.getResiduals().toArray();
450                 currentCost = current.getCost();
451                 currentPoint = current.getPoint().toArray();
452 
453                 // compute the scaled actual reduction
454                 double actRed = -1.0;
455                 if (0.1 * currentCost < previousCost) {
456                     double r = currentCost / previousCost;
457                     actRed = 1.0 - r * r;
458                 }
459 
460                 // compute the scaled predicted reduction
461                 // and the scaled directional derivative
462                 for (int j = 0; j < solvedCols; ++j) {
463                     int pj = permutation[j];
464                     double dirJ = lmDir[pj];
465                     work1[j] = 0;
466                     for (int i = 0; i <= j; ++i) {
467                         work1[i] += weightedJacobian[i][pj] * dirJ;
468                     }
469                 }
470                 double coeff1 = 0;
471                 for (int j = 0; j < solvedCols; ++j) {
472                     coeff1 += work1[j] * work1[j];
473                 }
474                 double pc2 = previousCost * previousCost;
475                 coeff1 /= pc2;
476                 double coeff2 = lmPar * lmNorm * lmNorm / pc2;
477                 double preRed = coeff1 + 2 * coeff2;
478                 double dirDer = -(coeff1 + coeff2);
479 
480                 // ratio of the actual to the predicted reduction
481                 ratio = (preRed == 0) ? 0 : (actRed / preRed);
482 
483                 // update the step bound
484                 if (ratio <= 0.25) {
485                     double tmp =
486                         (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
487                         if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
488                             tmp = 0.1;
489                         }
490                         delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
491                         lmPar /= tmp;
492                 } else if ((lmPar == 0) || (ratio >= 0.75)) {
493                     delta = 2 * lmNorm;
494                     lmPar *= 0.5;
495                 }
496 
497                 // test for successful iteration.
498                 if (ratio >= 1.0e-4) {
499                     // successful iteration, update the norm
500                     firstIteration = false;
501                     xNorm = 0;
502                     for (int k = 0; k < nC; ++k) {
503                         double xK = diag[k] * currentPoint[k];
504                         xNorm += xK * xK;
505                     }
506                     xNorm = FastMath.sqrt(xNorm);
507 
508                     // tests for convergence.
509                     if (checker != null && checker.converged(iterationCounter.getCount(), previous, current)) {
510                         return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
511                     }
512                 } else {
513                     // failed iteration, reset the previous values
514                     currentCost = previousCost;
515                     for (int j = 0; j < solvedCols; ++j) {
516                         int pj = permutation[j];
517                         currentPoint[pj] = oldX[pj];
518                     }
519                     tmpVec    = weightedResidual;
520                     weightedResidual = oldRes;
521                     oldRes    = tmpVec;
522                     // Reset "current" to previous values.
523                     current = previous;
524                 }
525 
526                 // Default convergence criteria.
527                 if ((FastMath.abs(actRed) <= costRelativeTolerance &&
528                      preRed <= costRelativeTolerance &&
529                      ratio <= 2.0) ||
530                     delta <= parRelativeTolerance * xNorm) {
531                     return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
532                 }
533 
534                 // tests for termination and stringent tolerances
535                 if (FastMath.abs(actRed) <= TWO_EPS &&
536                     preRed <= TWO_EPS &&
537                     ratio <= 2.0) {
538                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
539                                                         costRelativeTolerance);
540                 } else if (delta <= TWO_EPS * xNorm) {
541                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
542                                                         parRelativeTolerance);
543                 } else if (maxCosine <= TWO_EPS) {
544                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
545                                                         orthoTolerance);
546                 }
547             }
548         }
549     }
550 
551     /**
552      * Holds internal data.
553      * This structure was created so that all optimizer fields can be "final".
554      * Code should be further refactored in order to not pass around arguments
555      * that will modified in-place (cf. "work" arrays).
556      */
557     private static class InternalData {
558         /** Weighted Jacobian. */
559         private final double[][] weightedJacobian;
560         /** Columns permutation array. */
561         private final int[] permutation;
562         /** Rank of the Jacobian matrix. */
563         private final int rank;
564         /** Diagonal elements of the R matrix in the QR decomposition. */
565         private final double[] diagR;
566         /** Norms of the columns of the jacobian matrix. */
567         private final double[] jacNorm;
568         /** Coefficients of the Householder transforms vectors. */
569         private final double[] beta;
570 
571         /**
572          * <p>
573          * All arrays are stored by reference
574          * </p>
575          * @param weightedJacobian Weighted Jacobian.
576          * @param permutation Columns permutation array.
577          * @param rank Rank of the Jacobian matrix.
578          * @param diagR Diagonal elements of the R matrix in the QR decomposition.
579          * @param jacNorm Norms of the columns of the jacobian matrix.
580          * @param beta Coefficients of the Householder transforms vectors.
581          */
582         InternalData(double[][] weightedJacobian,
583                      int[] permutation,
584                      int rank,
585                      double[] diagR,
586                      double[] jacNorm,
587                      double[] beta) {
588             this.weightedJacobian = weightedJacobian; // NOPMD - staring array references is intentional and documented here
589             this.permutation = permutation;           // NOPMD - staring array references is intentional and documented here
590             this.rank = rank;
591             this.diagR = diagR;                       // NOPMD - staring array references is intentional and documented here
592             this.jacNorm = jacNorm;                   // NOPMD - staring array references is intentional and documented here
593             this.beta = beta;                         // NOPMD - staring array references is intentional and documented here
594         }
595     }
596 
597     /**
598      * Determines the Levenberg-Marquardt parameter.
599      *
600      * <p>This implementation is a translation in Java of the MINPACK
601      * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
602      * routine.</p>
603      * <p>This method sets the lmPar and lmDir attributes.</p>
604      * <p>The authors of the original fortran function are:</p>
605      * <ul>
606      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
607      *   <li>Burton  S. Garbow</li>
608      *   <li>Kenneth E. Hillstrom</li>
609      *   <li>Jorge   J. More</li>
610      * </ul>
611      * <p>Luc Maisonobe did the Java translation.</p>
612      *
613      * @param qy Array containing qTy.
614      * @param delta Upper bound on the euclidean norm of diagR * lmDir.
615      * @param diag Diagonal matrix.
616      * @param internalData Data (modified in-place in this method).
617      * @param solvedCols Number of solved point.
618      * @param work1 work array
619      * @param work2 work array
620      * @param work3 work array
621      * @param lmDir the "returned" LM direction will be stored in this array.
622      * @param lmPar the value of the LM parameter from the previous iteration.
623      * @return the new LM parameter
624      */
625     private double determineLMParameter(double[] qy, double delta, double[] diag,
626                                       InternalData internalData, int solvedCols,
627                                       double[] work1, double[] work2, double[] work3,
628                                       double[] lmDir, double lmPar) {
629         final double[][] weightedJacobian = internalData.weightedJacobian;
630         final int[] permutation = internalData.permutation;
631         final int rank = internalData.rank;
632         final double[] diagR = internalData.diagR;
633 
634         final int nC = weightedJacobian[0].length;
635 
636         // compute and store in x the gauss-newton direction, if the
637         // jacobian is rank-deficient, obtain a least squares solution
638         for (int j = 0; j < rank; ++j) {
639             lmDir[permutation[j]] = qy[j];
640         }
641         for (int j = rank; j < nC; ++j) {
642             lmDir[permutation[j]] = 0;
643         }
644         for (int k = rank - 1; k >= 0; --k) {
645             int pk = permutation[k];
646             double ypk = lmDir[pk] / diagR[pk];
647             for (int i = 0; i < k; ++i) {
648                 lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
649             }
650             lmDir[pk] = ypk;
651         }
652 
653         // evaluate the function at the origin, and test
654         // for acceptance of the Gauss-Newton direction
655         double dxNorm = 0;
656         for (int j = 0; j < solvedCols; ++j) {
657             int pj = permutation[j];
658             double s = diag[pj] * lmDir[pj];
659             work1[pj] = s;
660             dxNorm += s * s;
661         }
662         dxNorm = FastMath.sqrt(dxNorm);
663         double fp = dxNorm - delta;
664         if (fp <= 0.1 * delta) {
665             lmPar = 0;
666             return lmPar;
667         }
668 
669         // if the jacobian is not rank deficient, the Newton step provides
670         // a lower bound, parl, for the zero of the function,
671         // otherwise set this bound to zero
672         double sum2;
673         double parl = 0;
674         if (rank == solvedCols) {
675             for (int j = 0; j < solvedCols; ++j) {
676                 int pj = permutation[j];
677                 work1[pj] *= diag[pj] / dxNorm;
678             }
679             sum2 = 0;
680             for (int j = 0; j < solvedCols; ++j) {
681                 int pj = permutation[j];
682                 double sum = 0;
683                 for (int i = 0; i < j; ++i) {
684                     sum += weightedJacobian[i][pj] * work1[permutation[i]];
685                 }
686                 double s = (work1[pj] - sum) / diagR[pj];
687                 work1[pj] = s;
688                 sum2 += s * s;
689             }
690             parl = fp / (delta * sum2);
691         }
692 
693         // calculate an upper bound, paru, for the zero of the function
694         sum2 = 0;
695         for (int j = 0; j < solvedCols; ++j) {
696             int pj = permutation[j];
697             double sum = 0;
698             for (int i = 0; i <= j; ++i) {
699                 sum += weightedJacobian[i][pj] * qy[i];
700             }
701             sum /= diag[pj];
702             sum2 += sum * sum;
703         }
704         double gNorm = FastMath.sqrt(sum2);
705         double paru = gNorm / delta;
706         if (paru == 0) {
707             paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
708         }
709 
710         // if the input par lies outside of the interval (parl,paru),
711         // set par to the closer endpoint
712         lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
713         if (lmPar == 0) {
714             lmPar = gNorm / dxNorm;
715         }
716 
717         for (int countdown = 10; countdown >= 0; --countdown) {
718 
719             // evaluate the function at the current value of lmPar
720             if (lmPar == 0) {
721                 lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
722             }
723             double sPar = FastMath.sqrt(lmPar);
724             for (int j = 0; j < solvedCols; ++j) {
725                 int pj = permutation[j];
726                 work1[pj] = sPar * diag[pj];
727             }
728             determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);
729 
730             dxNorm = 0;
731             for (int j = 0; j < solvedCols; ++j) {
732                 int pj = permutation[j];
733                 double s = diag[pj] * lmDir[pj];
734                 work3[pj] = s;
735                 dxNorm += s * s;
736             }
737             dxNorm = FastMath.sqrt(dxNorm);
738             double previousFP = fp;
739             fp = dxNorm - delta;
740 
741             // if the function is small enough, accept the current value
742             // of lmPar, also test for the exceptional cases where parl is zero
743             if (FastMath.abs(fp) <= 0.1 * delta ||
744                 (parl == 0 &&
745                  fp <= previousFP &&
746                  previousFP < 0)) {
747                 return lmPar;
748             }
749 
750             // compute the Newton correction
751             for (int j = 0; j < solvedCols; ++j) {
752                 int pj = permutation[j];
753                 work1[pj] = work3[pj] * diag[pj] / dxNorm;
754             }
755             for (int j = 0; j < solvedCols; ++j) {
756                 int pj = permutation[j];
757                 work1[pj] /= work2[j];
758                 double tmp = work1[pj];
759                 for (int i = j + 1; i < solvedCols; ++i) {
760                     work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
761                 }
762             }
763             sum2 = 0;
764             for (int j = 0; j < solvedCols; ++j) {
765                 double s = work1[permutation[j]];
766                 sum2 += s * s;
767             }
768             double correction = fp / (delta * sum2);
769 
770             // depending on the sign of the function, update parl or paru.
771             if (fp > 0) {
772                 parl = FastMath.max(parl, lmPar);
773             } else if (fp < 0) {
774                 paru = FastMath.min(paru, lmPar);
775             }
776 
777             // compute an improved estimate for lmPar
778             lmPar = FastMath.max(parl, lmPar + correction);
779         }
780 
781         return lmPar;
782     }
783 
784     /**
785      * Solve a*x = b and d*x = 0 in the least squares sense.
786      * <p>This implementation is a translation in Java of the MINPACK
787      * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
788      * routine.</p>
789      * <p>This method sets the lmDir and lmDiag attributes.</p>
790      * <p>The authors of the original fortran function are:</p>
791      * <ul>
792      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
793      *   <li>Burton  S. Garbow</li>
794      *   <li>Kenneth E. Hillstrom</li>
795      *   <li>Jorge   J. More</li>
796      * </ul>
797      * <p>Luc Maisonobe did the Java translation.</p>
798      *
799      * @param qy array containing qTy
800      * @param diag diagonal matrix
801      * @param lmDiag diagonal elements associated with lmDir
802      * @param internalData Data (modified in-place in this method).
803      * @param solvedCols Number of sloved point.
804      * @param work work array
805      * @param lmDir the "returned" LM direction is stored in this array
806      */
807     private void determineLMDirection(double[] qy, double[] diag,
808                                       double[] lmDiag,
809                                       InternalData internalData,
810                                       int solvedCols,
811                                       double[] work,
812                                       double[] lmDir) {
813         final int[] permutation = internalData.permutation;
814         final double[][] weightedJacobian = internalData.weightedJacobian;
815         final double[] diagR = internalData.diagR;
816 
817         // copy R and Qty to preserve input and initialize s
818         //  in particular, save the diagonal elements of R in lmDir
819         for (int j = 0; j < solvedCols; ++j) {
820             int pj = permutation[j];
821             for (int i = j + 1; i < solvedCols; ++i) {
822                 weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
823             }
824             lmDir[j] = diagR[pj];
825             work[j]  = qy[j];
826         }
827 
828         // eliminate the diagonal matrix d using a Givens rotation
829         for (int j = 0; j < solvedCols; ++j) {
830 
831             // prepare the row of d to be eliminated, locating the
832             // diagonal element using p from the Q.R. factorization
833             int pj = permutation[j];
834             double dpj = diag[pj];
835             if (dpj != 0) {
836                 Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
837             }
838             lmDiag[j] = dpj;
839 
840             //  the transformations to eliminate the row of d
841             // modify only a single element of Qty
842             // beyond the first n, which is initially zero.
843             double qtbpj = 0;
844             for (int k = j; k < solvedCols; ++k) {
845                 int pk = permutation[k];
846 
847                 // determine a Givens rotation which eliminates the
848                 // appropriate element in the current row of d
849                 if (lmDiag[k] != 0) {
850 
851                     final double sin;
852                     final double cos;
853                     double rkk = weightedJacobian[k][pk];
854                     if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
855                         final double cotan = rkk / lmDiag[k];
856                         sin   = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
857                         cos   = sin * cotan;
858                     } else {
859                         final double tan = lmDiag[k] / rkk;
860                         cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
861                         sin = cos * tan;
862                     }
863 
864                     // compute the modified diagonal element of R and
865                     // the modified element of (Qty,0)
866                     weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
867                     final double temp = cos * work[k] + sin * qtbpj;
868                     qtbpj = -sin * work[k] + cos * qtbpj;
869                     work[k] = temp;
870 
871                     // accumulate the tranformation in the row of s
872                     for (int i = k + 1; i < solvedCols; ++i) {
873                         double rik = weightedJacobian[i][pk];
874                         final double temp2 = cos * rik + sin * lmDiag[i];
875                         lmDiag[i] = -sin * rik + cos * lmDiag[i];
876                         weightedJacobian[i][pk] = temp2;
877                     }
878                 }
879             }
880 
881             // store the diagonal element of s and restore
882             // the corresponding diagonal element of R
883             lmDiag[j] = weightedJacobian[j][permutation[j]];
884             weightedJacobian[j][permutation[j]] = lmDir[j];
885         }
886 
887         // solve the triangular system for z, if the system is
888         // singular, then obtain a least squares solution
889         int nSing = solvedCols;
890         for (int j = 0; j < solvedCols; ++j) {
891             if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
892                 nSing = j;
893             }
894             if (nSing < solvedCols) {
895                 work[j] = 0;
896             }
897         }
898         if (nSing > 0) {
899             for (int j = nSing - 1; j >= 0; --j) {
900                 int pj = permutation[j];
901                 double sum = 0;
902                 for (int i = j + 1; i < nSing; ++i) {
903                     sum += weightedJacobian[i][pj] * work[i];
904                 }
905                 work[j] = (work[j] - sum) / lmDiag[j];
906             }
907         }
908 
909         // permute the components of z back to components of lmDir
910         for (int j = 0; j < lmDir.length; ++j) {
911             lmDir[permutation[j]] = work[j];
912         }
913     }
914 
915     /**
916      * Decompose a matrix A as A.P = Q.R using Householder transforms.
917      * <p>As suggested in the P. Lascaux and R. Theodor book
918      * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
919      * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
920      * the Householder transforms with u<sub>k</sub> unit vectors such that:
921      * <pre>
922      * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
923      * </pre>
924      * we use <sub>k</sub> non-unit vectors such that:
925      * <pre>
926      * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
927      * </pre>
928      * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
929      * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
930      * them from the v<sub>k</sub> vectors would be costly.</p>
931      * <p>This decomposition handles rank deficient cases since the tranformations
932      * are performed in non-increasing columns norms order thanks to columns
933      * pivoting. The diagonal elements of the R matrix are therefore also in
934      * non-increasing absolute values order.</p>
935      *
936      * @param jacobian Weighted Jacobian matrix at the current point.
937      * @param solvedCols Number of solved point.
938      * @return data used in other methods of this class.
939      * @throws MathIllegalStateException if the decomposition cannot be performed.
940      */
941     private InternalData qrDecomposition(RealMatrix jacobian, int solvedCols)
942         throws MathIllegalStateException {
943         // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
944         // hence the multiplication by -1.
945         final double[][] weightedJacobian = jacobian.scalarMultiply(-1).getData();
946 
947         final int nR = weightedJacobian.length;
948         final int nC = weightedJacobian[0].length;
949 
950         final int[] permutation = new int[nC];
951         final double[] diagR = new double[nC];
952         final double[] jacNorm = new double[nC];
953         final double[] beta = new double[nC];
954 
955         // initializations
956         for (int k = 0; k < nC; ++k) {
957             permutation[k] = k;
958             double norm2 = 0;
959             for (int i = 0; i < nR; ++i) {
960                 double akk = weightedJacobian[i][k];
961                 norm2 += akk * akk;
962             }
963             jacNorm[k] = FastMath.sqrt(norm2);
964         }
965 
966         // transform the matrix column after column
967         for (int k = 0; k < nC; ++k) {
968 
969             // select the column with the greatest norm on active components
970             int nextColumn = -1;
971             double ak2 = Double.NEGATIVE_INFINITY;
972             for (int i = k; i < nC; ++i) {
973                 double norm2 = 0;
974                 for (int j = k; j < nR; ++j) {
975                     double aki = weightedJacobian[j][permutation[i]];
976                     norm2 += aki * aki;
977                 }
978                 if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
979                     throw new MathIllegalStateException(LocalizedOptimFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
980                                                         nR, nC);
981                 }
982                 if (norm2 > ak2) {
983                     nextColumn = i;
984                     ak2        = norm2;
985                 }
986             }
987             if (ak2 <= qrRankingThreshold) {
988                 return new InternalData(weightedJacobian, permutation, k, diagR, jacNorm, beta);
989             }
990             int pk = permutation[nextColumn];
991             permutation[nextColumn] = permutation[k];
992             permutation[k] = pk;
993 
994             // choose alpha such that Hk.u = alpha ek
995             double akk = weightedJacobian[k][pk];
996             double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
997             double betak = 1.0 / (ak2 - akk * alpha);
998             beta[pk] = betak;
999 
1000             // transform the current column
1001             diagR[pk] = alpha;
1002             weightedJacobian[k][pk] -= alpha;
1003 
1004             // transform the remaining columns
1005             for (int dk = nC - 1 - k; dk > 0; --dk) {
1006                 double gamma = 0;
1007                 for (int j = k; j < nR; ++j) {
1008                     gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
1009                 }
1010                 gamma *= betak;
1011                 for (int j = k; j < nR; ++j) {
1012                     weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
1013                 }
1014             }
1015         }
1016 
1017         return new InternalData(weightedJacobian, permutation, solvedCols, diagR, jacNorm, beta);
1018     }
1019 
1020     /**
1021      * Compute the product Qt.y for some Q.R. decomposition.
1022      *
1023      * @param y vector to multiply (will be overwritten with the result)
1024      * @param internalData Data.
1025      */
1026     private void qTy(double[] y,
1027                      InternalData internalData) {
1028         final double[][] weightedJacobian = internalData.weightedJacobian;
1029         final int[] permutation = internalData.permutation;
1030         final double[] beta = internalData.beta;
1031 
1032         final int nR = weightedJacobian.length;
1033         final int nC = weightedJacobian[0].length;
1034 
1035         for (int k = 0; k < nC; ++k) {
1036             int pk = permutation[k];
1037             double gamma = 0;
1038             for (int i = k; i < nR; ++i) {
1039                 gamma += weightedJacobian[i][pk] * y[i];
1040             }
1041             gamma *= beta[pk];
1042             for (int i = k; i < nR; ++i) {
1043                 y[i] -= gamma * weightedJacobian[i][pk];
1044             }
1045         }
1046     }
1047 }