View Javadoc
1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * The ASF licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *      https://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  
18  /*
19   * This is not the original file distributed by the Apache Software Foundation
20   * It has been modified by the Hipparchus project
21   */
22  
23  package org.hipparchus.optim.nonlinear.scalar.noderiv;
24  
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.List;
28  
29  import org.hipparchus.exception.LocalizedCoreFormats;
30  import org.hipparchus.exception.MathIllegalArgumentException;
31  import org.hipparchus.exception.MathIllegalStateException;
32  import org.hipparchus.linear.Array2DRowRealMatrix;
33  import org.hipparchus.linear.EigenDecompositionSymmetric;
34  import org.hipparchus.linear.MatrixUtils;
35  import org.hipparchus.linear.RealMatrix;
36  import org.hipparchus.optim.ConvergenceChecker;
37  import org.hipparchus.optim.OptimizationData;
38  import org.hipparchus.optim.PointValuePair;
39  import org.hipparchus.optim.nonlinear.scalar.GoalType;
40  import org.hipparchus.optim.nonlinear.scalar.MultivariateOptimizer;
41  import org.hipparchus.random.RandomGenerator;
42  import org.hipparchus.util.FastMath;
43  
44  /**
45   * An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
46   * for non-linear, non-convex, non-smooth, global function minimization.
47   * <p>
48   * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
49   * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
50   * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
51   * optima, outlier, etc.) of the objective function. Like a
52   * quasi-Newton method, the CMA-ES learns and applies a variable metric
53   * on the underlying search space. Unlike a quasi-Newton method, the
54   * CMA-ES neither estimates nor uses gradients, making it considerably more
55   * reliable in terms of finding a good, or even close to optimal, solution.
56   * <p>
57   * In general, on smooth objective functions the CMA-ES is roughly ten times
58   * slower than BFGS (counting objective function evaluations, no gradients provided).
59   * For up to \(n=10\) variables also the derivative-free simplex
60   * direct search method (Nelder and Mead) can be faster, but it is
61   * far less reliable than CMA-ES.
62   * <p>
63   * The CMA-ES is particularly well suited for non-separable
64   * and/or badly conditioned problems. To observe the advantage of CMA compared
65   * to a conventional evolution strategy, it will usually take about
66   * \(30 n\) function evaluations. On difficult problems the complete
67   * optimization (a single run) is expected to take <em>roughly</em> between
68   * \(30 n\) and \(300 n^2\)
69   * function evaluations.
70   * <p>
71   * This implementation is translated and adapted from the Matlab version
72   * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.
73   * <p>
74   * For more information, please refer to the following links:
75   * <ul>
76   *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
77   *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
78   *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
79   * </ul>
80   *
81   */
82  public class CMAESOptimizer
83      extends MultivariateOptimizer {
84      // global search parameters
85      /**
86       * Population size, offspring number. The primary strategy parameter to play
87       * with, which can be increased from its default value. Increasing the
88       * population size improves global search properties in exchange to speed.
89       * Speed decreases, as a rule, at most linearly with increasing population
90       * size. It is advisable to begin with the default small population size.
91       */
92      private int lambda; // population size
93      /**
94       * Covariance update mechanism, default is active CMA. isActiveCMA = true
95       * turns on "active CMA" with a negative update of the covariance matrix and
96       * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
97       * pos. def. and is numerically faster. Active CMA usually speeds up the
98       * adaptation.
99       */
100     private final boolean isActiveCMA;
101     /**
102      * Determines how often a new random offspring is generated in case it is
103      * not feasible / beyond the defined limits, default is 0.
104      */
105     private final int checkFeasableCount;
106     /**
107      * @see Sigma
108      */
109     private double[] inputSigma;
110     /** Number of objective variables/problem dimension */
111     private int dimension;
112     /**
113      * Defines the number of initial iterations, where the covariance matrix
114      * remains diagonal and the algorithm has internally linear time complexity.
115      * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
116      * this setting also exhibits linear space complexity. This can be
117      * particularly useful for dimension > 100.
118      * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
119      */
120     private int diagonalOnly;
121     /** Number of objective variables/problem dimension */
122     private boolean isMinimize = true;
123     /** Indicates whether statistic data is collected. */
124     private final boolean generateStatistics;
125 
126     // termination criteria
127     /** Maximal number of iterations allowed. */
128     private final int maxIterations;
129     /** Limit for fitness value. */
130     private final double stopFitness;
131     /** Stop if x-changes larger stopTolUpX. */
132     private double stopTolUpX;
133     /** Stop if x-change smaller stopTolX. */
134     private double stopTolX;
135     /** Stop if fun-changes smaller stopTolFun. */
136     private double stopTolFun;
137     /** Stop if back fun-changes smaller stopTolHistFun. */
138     private double stopTolHistFun;
139 
140     // selection strategy parameters
141     /** Number of parents/points for recombination. */
142     private int mu; //
143     /** log(mu + 0.5), stored for efficiency. */
144     private double logMu2;   // NOPMD - using a field here is for performance reasons
145     /** Array for weighted recombination. */
146     private RealMatrix weights;
147     /** Variance-effectiveness of sum w_i x_i. */
148     private double mueff; //
149 
150     // dynamic strategy parameters and constants
151     /** Overall standard deviation - search volume. */
152     private double sigma;
153     /** Cumulation constant. */
154     private double cc;
155     /** Cumulation constant for step-size. */
156     private double cs;
157     /** Damping for step-size. */
158     private double damps;
159     /** Learning rate for rank-one update. */
160     private double ccov1;
161     /** Learning rate for rank-mu update' */
162     private double ccovmu;
163     /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
164     private double chiN;
165     /** Learning rate for rank-one update - diagonalOnly */
166     private double ccov1Sep;
167     /** Learning rate for rank-mu update - diagonalOnly */
168     private double ccovmuSep;
169 
170     // CMA internal values - updated each generation
171     /** Objective variables. */
172     private RealMatrix xmean;
173     /** Evolution path. */
174     private RealMatrix pc;
175     /** Evolution path for sigma. */
176     private RealMatrix ps;
177     /** Norm of ps, stored for efficiency. */
178     private double normps;
179     /** Coordinate system. */
180     private RealMatrix B;
181     /** Scaling. */
182     private RealMatrix D;
183     /** B*D, stored for efficiency. */
184     private RealMatrix BD;
185     /** Diagonal of sqrt(D), stored for efficiency. */
186     private RealMatrix diagD;
187     /** Covariance matrix. */
188     private RealMatrix C;
189     /** Diagonal of C, used for diagonalOnly. */
190     private RealMatrix diagC;
191     /** Number of iterations already performed. */
192     private int iterations;
193 
194     /** History queue of best values. */
195     private double[] fitnessHistory;
196 
197     /** Random generator. */
198     private final RandomGenerator random;
199 
200     /** History of sigma values. */
201     private final List<Double> statisticsSigmaHistory;
202     /** History of mean matrix. */
203     private final List<RealMatrix> statisticsMeanHistory;
204     /** History of fitness values. */
205     private final List<Double> statisticsFitnessHistory;
206     /** History of D matrix. */
207     private final List<RealMatrix> statisticsDHistory;
208 
209     /** Simple constructor.
210      * @param maxIterations Maximal number of iterations.
211      * @param stopFitness Whether to stop if objective function value is smaller than
212      * {@code stopFitness}.
213      * @param isActiveCMA Chooses the covariance matrix update method.
214      * @param diagonalOnly Number of initial iterations, where the covariance matrix
215      * remains diagonal.
216      * @param checkFeasableCount Determines how often new random objective variables are
217      * generated in case they are out of bounds.
218      * @param random Random generator.
219      * @param generateStatistics Whether statistic data is collected.
220      * @param checker Convergence checker.
221      *
222      */
223     public CMAESOptimizer(int maxIterations,
224                           double stopFitness,
225                           boolean isActiveCMA,
226                           int diagonalOnly,
227                           int checkFeasableCount,
228                           RandomGenerator random,
229                           boolean generateStatistics,
230                           ConvergenceChecker<PointValuePair> checker) {
231         super(checker);
232         this.maxIterations = maxIterations;
233         this.stopFitness = stopFitness;
234         this.isActiveCMA = isActiveCMA;
235         this.diagonalOnly = diagonalOnly;
236         this.checkFeasableCount = checkFeasableCount;
237         this.random = random;
238         this.generateStatistics = generateStatistics;
239         this.statisticsSigmaHistory = new ArrayList<>();
240         this.statisticsMeanHistory = new ArrayList<>();
241         this.statisticsFitnessHistory = new ArrayList<>();
242         this.statisticsDHistory = new ArrayList<>();
243     }
244 
245     /** Get history of sigma values.
246      * @return History of sigma values
247      */
248     public List<Double> getStatisticsSigmaHistory() {
249         return statisticsSigmaHistory;
250     }
251 
252     /** Get history of mean matrix.
253      * @return History of mean matrix
254      */
255     public List<RealMatrix> getStatisticsMeanHistory() {
256         return statisticsMeanHistory;
257     }
258 
259     /** Get history of fitness values.
260      * @return History of fitness values
261      */
262     public List<Double> getStatisticsFitnessHistory() {
263         return statisticsFitnessHistory;
264     }
265 
266     /** Get history of D matrix.
267      * @return History of D matrix
268      */
269     public List<RealMatrix> getStatisticsDHistory() {
270         return statisticsDHistory;
271     }
272 
273     /**
274      * Input sigma values.
275      * They define the initial coordinate-wise standard deviations for
276      * sampling new search points around the initial guess.
277      * It is suggested to set them to the estimated distance from the
278      * initial to the desired optimum.
279      * Small values induce the search to be more local (and very small
280      * values are more likely to find a local optimum close to the initial
281      * guess).
282      * Too small values might however lead to early termination.
283      */
284     public static class Sigma implements OptimizationData {
285         /** Sigma values. */
286         private final double[] s;
287 
288         /** Simple constructor.
289          * @param s Sigma values.
290          * @throws MathIllegalArgumentException if any of the array entries is smaller
291          * than zero.
292          */
293         public Sigma(double[] s)
294             throws MathIllegalArgumentException {
295             for (int i = 0; i < s.length; i++) {
296                 if (s[i] < 0) {
297                     throw new MathIllegalArgumentException(LocalizedCoreFormats.NUMBER_TOO_SMALL, s[i], 0);
298                 }
299             }
300 
301             this.s = s.clone();
302         }
303 
304         /** Get sigma values.
305          * @return the sigma values
306          */
307         public double[] getSigma() {
308             return s.clone();
309         }
310     }
311 
312     /**
313      * Population size.
314      * The number of offspring is the primary strategy parameter.
315      * In the absence of better clues, a good default could be an
316      * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the
317      * number of optimized parameters.
318      * Increasing the population size improves global search properties
319      * at the expense of speed (which in general decreases at most
320      * linearly with increasing population size).
321      */
322     public static class PopulationSize implements OptimizationData {
323         /** Population size. */
324         private final int lambda;
325 
326         /** Simple constructor.
327          * @param size Population size.
328          * @throws MathIllegalArgumentException if {@code size <= 0}.
329          */
330         public PopulationSize(int size)
331             throws MathIllegalArgumentException {
332             if (size <= 0) {
333                 throw new MathIllegalArgumentException(LocalizedCoreFormats.NUMBER_TOO_SMALL_BOUND_EXCLUDED,
334                                                        size, 0);
335             }
336             lambda = size;
337         }
338 
339         /** Get population size.
340          * @return the population size
341          */
342         public int getPopulationSize() {
343             return lambda;
344         }
345     }
346 
347     /**
348      * {@inheritDoc}
349      *
350      * @param optData Optimization data. In addition to those documented in
351      * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[])
352      * MultivariateOptimizer}, this method will register the following data:
353      * <ul>
354      *  <li>{@link Sigma}</li>
355      *  <li>{@link PopulationSize}</li>
356      * </ul>
357      * @return {@inheritDoc}
358      * @throws MathIllegalStateException if the maximal number of
359      * evaluations is exceeded.
360      * @throws MathIllegalArgumentException if the initial guess, target, and weight
361      * arguments have inconsistent dimensions.
362      */
363     @Override
364     public PointValuePair optimize(OptimizationData... optData)
365         throws MathIllegalArgumentException, MathIllegalStateException {
366         // Set up base class and perform computation.
367         return super.optimize(optData);
368     }
369 
370     /** {@inheritDoc} */
371     @Override
372     protected PointValuePair doOptimize() {
373          // -------------------- Initialization --------------------------------
374         isMinimize = getGoalType().equals(GoalType.MINIMIZE);
375         final FitnessFunction fitfun = new FitnessFunction();
376         final double[] guess = getStartPoint();
377         // number of objective variables/problem dimension
378         dimension = guess.length;
379         initializeCMA(guess);
380         iterations = 0;
381         ValuePenaltyPair valuePenalty = fitfun.value(guess);
382         double bestValue = valuePenalty.value+valuePenalty.penalty;
383         push(fitnessHistory, bestValue);
384         PointValuePair optimum
385             = new PointValuePair(getStartPoint(),
386                                  isMinimize ? bestValue : -bestValue);
387         PointValuePair lastResult = null;
388 
389         // -------------------- Generation Loop --------------------------------
390 
391         generationLoop:
392         for (iterations = 1; iterations <= maxIterations; iterations++) {
393             incrementIterationCount();
394 
395             // Generate and evaluate lambda offspring
396             final RealMatrix arz = randn1(dimension, lambda);
397             final RealMatrix arx = zeros(dimension, lambda);
398             final double[] fitness = new double[lambda];
399             final ValuePenaltyPair[] valuePenaltyPairs = new ValuePenaltyPair[lambda];
400             // generate random offspring
401             for (int k = 0; k < lambda; k++) {
402                 RealMatrix arxk = null;
403                 for (int i = 0; i < checkFeasableCount + 1; i++) {
404                     if (diagonalOnly <= 0) {
405                         arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
406                                          .scalarMultiply(sigma)); // m + sig * Normal(0,C)
407                     } else {
408                         arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
409                                          .scalarMultiply(sigma));
410                     }
411                     if (i >= checkFeasableCount ||
412                         fitfun.isFeasible(arxk.getColumn(0))) {
413                         break;
414                     }
415                     // regenerate random arguments for row
416                     arz.setColumn(k, randn(dimension));
417                 }
418                 copyColumn(arxk, 0, arx, k);
419                 try {
420                     valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k)); // compute fitness
421                 } catch (MathIllegalStateException e) {
422                     break generationLoop;
423                 }
424             }
425 
426             // Compute fitnesses by adding value and penalty after scaling by value range.
427             double valueRange = valueRange(valuePenaltyPairs);
428             for (int iValue=0;iValue<valuePenaltyPairs.length;iValue++) {
429                  fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty*valueRange;
430             }
431 
432             // Sort by fitness and compute weighted mean into xmean
433             final int[] arindex = sortedIndices(fitness);
434             // Calculate new xmean, this is selection and recombination
435             final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
436             final RealMatrix bestArx = selectColumns(arx, Arrays.copyOf(arindex, mu));
437             xmean = bestArx.multiply(weights);
438             final RealMatrix bestArz = selectColumns(arz, Arrays.copyOf(arindex, mu));
439             final RealMatrix zmean = bestArz.multiply(weights);
440             final boolean hsig = updateEvolutionPaths(zmean, xold);
441             if (diagonalOnly <= 0) {
442                 updateCovariance(hsig, bestArx, arz, arindex, xold);
443             } else {
444                 updateCovarianceDiagonalOnly(hsig, bestArz);
445             }
446             // Adapt step size sigma - Eq. (5)
447             sigma *= FastMath.exp(FastMath.min(1, (normps/chiN - 1) * cs / damps));
448             final double bestFitness = fitness[arindex[0]];
449             final double worstFitness = fitness[arindex[arindex.length - 1]];
450             if (bestValue > bestFitness) {
451                 bestValue = bestFitness;
452                 lastResult = optimum;
453                 optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
454                                              isMinimize ? bestFitness : -bestFitness);
455                 if (getConvergenceChecker() != null && lastResult != null &&
456                     getConvergenceChecker().converged(iterations, optimum, lastResult)) {
457                     break generationLoop;
458                 }
459             }
460             // handle termination criteria
461             // Break, if fitness is good enough
462             if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
463                 break generationLoop;
464             }
465             final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
466             final double[] pcCol = pc.getColumn(0);
467             for (int i = 0; i < dimension; i++) {
468                 if (sigma * FastMath.max(FastMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
469                     break;
470                 }
471                 if (i >= dimension - 1) {
472                     break generationLoop;
473                 }
474             }
475             for (int i = 0; i < dimension; i++) {
476                 if (sigma * sqrtDiagC[i] > stopTolUpX) {
477                     break generationLoop;
478                 }
479             }
480             final double historyBest = min(fitnessHistory);
481             final double historyWorst = max(fitnessHistory);
482             if (iterations > 2 &&
483                 FastMath.max(historyWorst, worstFitness) -
484                 FastMath.min(historyBest, bestFitness) < stopTolFun) {
485                 break generationLoop;
486             }
487             if (iterations > fitnessHistory.length &&
488                 historyWorst - historyBest < stopTolHistFun) {
489                 break generationLoop;
490             }
491             // condition number of the covariance matrix exceeds 1e14
492             if (max(diagD) / min(diagD) > 1e7) {
493                 break generationLoop;
494             }
495             // user defined termination
496             if (getConvergenceChecker() != null) {
497                 final PointValuePair current
498                     = new PointValuePair(bestArx.getColumn(0),
499                                          isMinimize ? bestFitness : -bestFitness);
500                 if (lastResult != null &&
501                     getConvergenceChecker().converged(iterations, current, lastResult)) {
502                     break generationLoop;
503                     }
504                 lastResult = current;
505             }
506             // Adjust step size in case of equal function values (flat fitness)
507             if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
508                 sigma *= FastMath.exp(0.2 + cs / damps);
509             }
510             if (iterations > 2 && FastMath.max(historyWorst, bestFitness) -
511                 FastMath.min(historyBest, bestFitness) == 0) {
512                 sigma *= FastMath.exp(0.2 + cs / damps);
513             }
514             // store best in history
515             push(fitnessHistory,bestFitness);
516             if (generateStatistics) {
517                 statisticsSigmaHistory.add(sigma);
518                 statisticsFitnessHistory.add(bestFitness);
519                 statisticsMeanHistory.add(xmean.transpose());
520                 statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
521             }
522         }
523         return optimum;
524     }
525 
526     /**
527      * Scans the list of (required and optional) optimization data that
528      * characterize the problem.
529      *
530      * @param optData Optimization data. The following data will be looked for:
531      * <ul>
532      *  <li>{@link Sigma}</li>
533      *  <li>{@link PopulationSize}</li>
534      * </ul>
535      */
536     @Override
537     protected void parseOptimizationData(OptimizationData... optData) {
538         // Allow base class to register its own data.
539         super.parseOptimizationData(optData);
540 
541         // The existing values (as set by the previous call) are reused if
542         // not provided in the argument list.
543         for (OptimizationData data : optData) {
544             if (data instanceof Sigma) {
545                 inputSigma = ((Sigma) data).getSigma();
546                 continue;
547             }
548             if (data instanceof PopulationSize) {
549                 lambda = ((PopulationSize) data).getPopulationSize();
550                 continue;
551             }
552         }
553 
554         checkParameters();
555     }
556 
557     /**
558      * Checks dimensions and values of boundaries and inputSigma if defined.
559      */
560     private void checkParameters() {
561         if (inputSigma != null) {
562             final double[] init = getStartPoint();
563 
564             if (inputSigma.length != init.length) {
565                 throw new MathIllegalArgumentException(LocalizedCoreFormats.DIMENSIONS_MISMATCH,
566                                                        inputSigma.length, init.length);
567             }
568 
569             final double[] lB = getLowerBound();
570             final double[] uB = getUpperBound();
571 
572             for (int i = 0; i < init.length; i++) {
573                 if (inputSigma[i] > uB[i] - lB[i]) {
574                     throw new MathIllegalArgumentException(LocalizedCoreFormats.OUT_OF_RANGE_SIMPLE,
575                                                            inputSigma[i], 0, uB[i] - lB[i]);
576                 }
577             }
578         }
579     }
580 
581     /**
582      * Initialization of the dynamic search parameters
583      *
584      * @param guess Initial guess for the arguments of the fitness function.
585      */
586     private void initializeCMA(double[] guess) {
587         if (lambda <= 0) {
588             throw new MathIllegalArgumentException(LocalizedCoreFormats.NUMBER_TOO_SMALL_BOUND_EXCLUDED,
589                                                    lambda, 0);
590         }
591         // initialize sigma
592         final double[][] sigmaArray = new double[guess.length][1];
593         for (int i = 0; i < guess.length; i++) {
594             sigmaArray[i][0] = inputSigma[i];
595         }
596         final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
597         sigma = max(insigma); // overall standard deviation
598 
599         // initialize termination criteria
600         stopTolUpX = 1e3 * max(insigma);
601         stopTolX = 1e-11 * max(insigma);
602         stopTolFun = 1e-12;
603         stopTolHistFun = 1e-13;
604 
605         // initialize selection strategy parameters
606         mu = lambda / 2; // number of parents/points for recombination
607         logMu2 = FastMath.log(mu + 0.5);
608         weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
609         double sumw = 0;
610         double sumwq = 0;
611         for (int i = 0; i < mu; i++) {
612             double w = weights.getEntry(i, 0);
613             sumw += w;
614             sumwq += w * w;
615         }
616         weights = weights.scalarMultiply(1 / sumw);
617         mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
618 
619         // initialize dynamic strategy parameters and constants
620         cc = (4 + mueff / dimension) /
621                 (dimension + 4 + 2 * mueff / dimension);
622         cs = (mueff + 2) / (dimension + mueff + 3.);
623         damps = (1 + 2 * FastMath.max(0, FastMath.sqrt((mueff - 1) /
624                                                        (dimension + 1)) - 1)) *
625             FastMath.max(0.3,
626                          1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
627         ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
628         ccovmu = FastMath.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
629                               ((dimension + 2) * (dimension + 2) + mueff));
630         ccov1Sep = FastMath.min(1, ccov1 * (dimension + 1.5) / 3);
631         ccovmuSep = FastMath.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
632         chiN = FastMath.sqrt(dimension) *
633                 (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
634         // intialize CMA internal values - updated each generation
635         xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
636         diagD = insigma.scalarMultiply(1 / sigma);
637         diagC = square(diagD);
638         pc = zeros(dimension, 1); // evolution paths for C and sigma
639         ps = zeros(dimension, 1); // B defines the coordinate system
640         normps = ps.getFrobeniusNorm();
641 
642         B = eye(dimension, dimension);
643         D = ones(dimension, 1); // diagonal D defines the scaling
644         BD = times(B, repmat(diagD.transpose(), dimension, 1));
645         C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
646         final int historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
647         fitnessHistory = new double[historySize]; // history of fitness values
648         for (int i = 0; i < historySize; i++) {
649             fitnessHistory[i] = Double.MAX_VALUE;
650         }
651     }
652 
653     /**
654      * Update of the evolution paths ps and pc.
655      *
656      * @param zmean Weighted row matrix of the gaussian random numbers generating
657      * the current offspring.
658      * @param xold xmean matrix of the previous generation.
659      * @return hsig flag indicating a small correction.
660      */
661     private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
662         ps = ps.scalarMultiply(1 - cs).add(
663                 B.multiply(zmean).scalarMultiply(
664                         FastMath.sqrt(cs * (2 - cs) * mueff)));
665         normps = ps.getFrobeniusNorm();
666         final boolean hsig = normps /
667             FastMath.sqrt(1 - FastMath.pow(1 - cs, 2 * iterations)) /
668             chiN < 1.4 + 2 / ((double) dimension + 1);
669         pc = pc.scalarMultiply(1 - cc);
670         if (hsig) {
671             pc = pc.add(xmean.subtract(xold).scalarMultiply(FastMath.sqrt(cc * (2 - cc) * mueff) / sigma));
672         }
673         return hsig;
674     }
675 
676     /**
677      * Update of the covariance matrix C for diagonalOnly > 0
678      *
679      * @param hsig Flag indicating a small correction.
680      * @param bestArz Fitness-sorted matrix of the gaussian random values of the
681      * current offspring.
682      */
683     private void updateCovarianceDiagonalOnly(boolean hsig,
684                                               final RealMatrix bestArz) {
685         // minor correction if hsig==false
686         double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
687         oldFac += 1 - ccov1Sep - ccovmuSep;
688         diagC = diagC.scalarMultiply(oldFac) // regard old matrix
689             .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
690             .add(times(diagC, square(bestArz).multiply(weights)) // plus rank mu update
691                  .scalarMultiply(ccovmuSep));
692         diagD = sqrt(diagC); // replaces eig(C)
693         if (diagonalOnly > 1 &&
694             iterations > diagonalOnly) {
695             // full covariance matrix from now on
696             diagonalOnly = 0;
697             B = eye(dimension, dimension);
698             BD = diag(diagD);
699             C = diag(diagC);
700         }
701     }
702 
703     /**
704      * Update of the covariance matrix C.
705      *
706      * @param hsig Flag indicating a small correction.
707      * @param bestArx Fitness-sorted matrix of the argument vectors producing the
708      * current offspring.
709      * @param arz Unsorted matrix containing the gaussian random values of the
710      * current offspring.
711      * @param arindex Indices indicating the fitness-order of the current offspring.
712      * @param xold xmean matrix of the previous generation.
713      */
714     private void updateCovariance(boolean hsig, final RealMatrix bestArx,
715                                   final RealMatrix arz, final int[] arindex,
716                                   final RealMatrix xold) {
717         double negccov = 0;
718         if (ccov1 + ccovmu > 0) {
719             final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
720                 .scalarMultiply(1 / sigma); // mu difference vectors
721             final RealMatrix roneu = pc.multiplyTransposed(pc)
722                 .scalarMultiply(ccov1); // rank one update
723             // minor correction if hsig==false
724             double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
725             oldFac += 1 - ccov1 - ccovmu;
726             if (isActiveCMA) {
727                 // Adapt covariance matrix C active CMA
728                 negccov = (1 - ccovmu) * 0.25 * mueff /
729                     (FastMath.pow(dimension + 2, 1.5) + 2 * mueff);
730                 // keep at least 0.66 in all directions, small popsize are most
731                 // critical
732                 final double negminresidualvariance = 0.66;
733                 // where to make up for the variance loss
734                 final double negalphaold = 0.5;
735                 // prepare vectors, compute negative updating matrix Cneg
736                 final int[] arReverseIndex = reverse(arindex);
737                 RealMatrix arzneg = selectColumns(arz, Arrays.copyOf(arReverseIndex, mu));
738                 RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
739                 final int[] idxnorms = sortedIndices(arnorms.getRow(0));
740                 final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
741                 final int[] idxReverse = reverse(idxnorms);
742                 final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
743                 arnorms = divide(arnormsReverse, arnormsSorted);
744                 final int[] idxInv = inverse(idxnorms);
745                 final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
746                 // check and set learning rate negccov
747                 final double negcovMax = (1 - negminresidualvariance) /
748                     square(arnormsInv).multiply(weights).getEntry(0, 0);
749                 if (negccov > negcovMax) {
750                     negccov = negcovMax;
751                 }
752                 arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
753                 final RealMatrix artmp = BD.multiply(arzneg);
754                 final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
755                 oldFac += negalphaold * negccov;
756                 C = C.scalarMultiply(oldFac)
757                     .add(roneu) // regard old matrix
758                     .add(arpos.scalarMultiply( // plus rank one update
759                                               ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
760                          .multiply(times(repmat(weights, 1, dimension),
761                                          arpos.transpose())))
762                     .subtract(Cneg.scalarMultiply(negccov));
763             } else {
764                 // Adapt covariance matrix C - nonactive
765                 C = C.scalarMultiply(oldFac) // regard old matrix
766                     .add(roneu) // plus rank one update
767                     .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
768                          .multiply(times(repmat(weights, 1, dimension),
769                                          arpos.transpose())));
770             }
771         }
772         updateBD(negccov);
773     }
774 
775     /**
776      * Update B and D from C.
777      *
778      * @param negccov Negative covariance factor.
779      */
780     private void updateBD(double negccov) {
781         if (ccov1 + ccovmu + negccov > 0 &&
782             (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
783             // to achieve O(N^2)
784             C = triu(C, 0).add(triu(C, 1).transpose());
785             // enforce symmetry to prevent complex numbers
786             final EigenDecompositionSymmetric eig = new EigenDecompositionSymmetric(C);
787             B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
788             D = eig.getD();
789             diagD = diag(D);
790             if (min(diagD) <= 0) {
791                 for (int i = 0; i < dimension; i++) {
792                     if (diagD.getEntry(i, 0) < 0) {
793                         diagD.setEntry(i, 0, 0);
794                     }
795                 }
796                 final double tfac = max(diagD) / 1e14;
797                 C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
798                 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
799             }
800             if (max(diagD) > 1e14 * min(diagD)) {
801                 final double tfac = max(diagD) / 1e14 - min(diagD);
802                 C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
803                 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
804             }
805             diagC = diag(C);
806             diagD = sqrt(diagD); // D contains standard deviations now
807             BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
808         }
809     }
810 
811     /**
812      * Pushes the current best fitness value in a history queue.
813      *
814      * @param vals History queue.
815      * @param val Current best fitness value.
816      */
817     private static void push(double[] vals, double val) {
818         for (int i = vals.length-1; i > 0; i--) {
819             vals[i] = vals[i-1];
820         }
821         vals[0] = val;
822     }
823 
824     /**
825      * Sorts fitness values.
826      *
827      * @param doubles Array of values to be sorted.
828      * @return a sorted array of indices pointing into doubles.
829      */
830     private int[] sortedIndices(final double[] doubles) {
831         final DoubleIndex[] dis = new DoubleIndex[doubles.length];
832         for (int i = 0; i < doubles.length; i++) {
833             dis[i] = new DoubleIndex(doubles[i], i);
834         }
835         Arrays.sort(dis);
836         final int[] indices = new int[doubles.length];
837         for (int i = 0; i < doubles.length; i++) {
838             indices[i] = dis[i].index;
839         }
840         return indices;
841     }
842    /**
843      * Get range of values.
844      *
845      * @param vpPairs Array of valuePenaltyPairs to get range from.
846      * @return a double equal to maximum value minus minimum value.
847      */
848     private double valueRange(final ValuePenaltyPair[] vpPairs) {
849         double max = Double.NEGATIVE_INFINITY;
850         double min = Double.MAX_VALUE;
851         for (ValuePenaltyPair vpPair:vpPairs) {
852             if (vpPair.value > max) {
853                 max = vpPair.value;
854             }
855             if (vpPair.value < min) {
856                 min = vpPair.value;
857             }
858         }
859         return max-min;
860     }
861 
862     /**
863      * Used to sort fitness values. Sorting is always in lower value first
864      * order.
865      */
866     private static class DoubleIndex implements Comparable<DoubleIndex> {
867         /** Value to compare. */
868         private final double value;
869         /** Index into sorted array. */
870         private final int index;
871 
872         /**
873          * @param value Value to compare.
874          * @param index Index into sorted array.
875          */
876         DoubleIndex(double value, int index) {
877             this.value = value;
878             this.index = index;
879         }
880 
881         /** {@inheritDoc} */
882         @Override
883         public int compareTo(DoubleIndex o) {
884             return Double.compare(value, o.value);
885         }
886 
887         /** {@inheritDoc} */
888         @Override
889         public boolean equals(Object other) {
890 
891             if (this == other) {
892                 return true;
893             }
894 
895             if (other instanceof DoubleIndex) {
896                 return Double.compare(value, ((DoubleIndex) other).value) == 0;
897             }
898 
899             return false;
900         }
901 
902         /** {@inheritDoc} */
903         @Override
904         public int hashCode() {
905             long bits = Double.doubleToLongBits(value);
906             return (int) ((1438542 ^ (bits >>> 32) ^ bits));
907         }
908     }
909     /**
910      * Stores the value and penalty (for repair of out of bounds point).
911      */
912     private static class ValuePenaltyPair {
913         /** Objective function value. */
914         private double value;
915         /** Penalty value for repair of out out of bounds points. */
916         private double penalty;
917 
918         /**
919          * @param value Function value.
920          * @param penalty Out-of-bounds penalty.
921         */
922         ValuePenaltyPair(final double value, final double penalty) {
923             this.value   = value;
924             this.penalty = penalty;
925         }
926     }
927 
928 
929     /**
930      * Normalizes fitness values to the range [0,1]. Adds a penalty to the
931      * fitness value if out of range.
932      */
933     private class FitnessFunction {
934         /**
935          * Flag indicating whether the objective variables are forced into their
936          * bounds if defined
937          */
938         private final boolean isRepairMode;
939 
940         /** Simple constructor.
941          */
942         FitnessFunction() {
943             isRepairMode = true;
944         }
945 
946         /**
947          * @param point Normalized objective variables.
948          * @return the objective value + penalty for violated bounds.
949          */
950         public ValuePenaltyPair value(final double[] point) {
951             double value;
952             double penalty=0.0;
953             if (isRepairMode) {
954                 double[] repaired = repair(point);
955                 value = CMAESOptimizer.this.computeObjectiveValue(repaired);
956                 penalty =  penalty(point, repaired);
957             } else {
958                 value = CMAESOptimizer.this.computeObjectiveValue(point);
959             }
960             value = isMinimize ? value : -value;
961             penalty = isMinimize ? penalty : -penalty;
962             return new ValuePenaltyPair(value,penalty);
963         }
964 
965         /**
966          * @param x Normalized objective variables.
967          * @return {@code true} if in bounds.
968          */
969         public boolean isFeasible(final double[] x) {
970             final double[] lB = CMAESOptimizer.this.getLowerBound();
971             final double[] uB = CMAESOptimizer.this.getUpperBound();
972 
973             for (int i = 0; i < x.length; i++) {
974                 if (x[i] < lB[i]) {
975                     return false;
976                 }
977                 if (x[i] > uB[i]) {
978                     return false;
979                 }
980             }
981             return true;
982         }
983 
984         /**
985          * @param x Normalized objective variables.
986          * @return the repaired (i.e. all in bounds) objective variables.
987          */
988         private double[] repair(final double[] x) {
989             final double[] lB = CMAESOptimizer.this.getLowerBound();
990             final double[] uB = CMAESOptimizer.this.getUpperBound();
991 
992             final double[] repaired = new double[x.length];
993             for (int i = 0; i < x.length; i++) {
994                 if (x[i] < lB[i]) {
995                     repaired[i] = lB[i];
996                 } else if (x[i] > uB[i]) {
997                     repaired[i] = uB[i];
998                 } else {
999                     repaired[i] = x[i];
1000                 }
1001             }
1002             return repaired;
1003         }
1004 
1005         /**
1006          * @param x Normalized objective variables.
1007          * @param repaired Repaired objective variables.
1008          * @return Penalty value according to the violation of the bounds.
1009          */
1010         private double penalty(final double[] x, final double[] repaired) {
1011             double penalty = 0;
1012             for (int i = 0; i < x.length; i++) {
1013                 double diff = FastMath.abs(x[i] - repaired[i]);
1014                 penalty += diff;
1015             }
1016             return isMinimize ? penalty : -penalty;
1017         }
1018     }
1019 
1020     // -----Matrix utility functions similar to the Matlab build in functions------
1021 
1022     /**
1023      * @param m Input matrix
1024      * @return Matrix representing the element-wise logarithm of m.
1025      */
1026     private static RealMatrix log(final RealMatrix m) {
1027         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1028         for (int r = 0; r < m.getRowDimension(); r++) {
1029             for (int c = 0; c < m.getColumnDimension(); c++) {
1030                 d[r][c] = FastMath.log(m.getEntry(r, c));
1031             }
1032         }
1033         return new Array2DRowRealMatrix(d, false);
1034     }
1035 
1036     /**
1037      * @param m Input matrix.
1038      * @return Matrix representing the element-wise square root of m.
1039      */
1040     private static RealMatrix sqrt(final RealMatrix m) {
1041         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1042         for (int r = 0; r < m.getRowDimension(); r++) {
1043             for (int c = 0; c < m.getColumnDimension(); c++) {
1044                 d[r][c] = FastMath.sqrt(m.getEntry(r, c));
1045             }
1046         }
1047         return new Array2DRowRealMatrix(d, false);
1048     }
1049 
1050     /**
1051      * @param m Input matrix.
1052      * @return Matrix representing the element-wise square of m.
1053      */
1054     private static RealMatrix square(final RealMatrix m) {
1055         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1056         for (int r = 0; r < m.getRowDimension(); r++) {
1057             for (int c = 0; c < m.getColumnDimension(); c++) {
1058                 double e = m.getEntry(r, c);
1059                 d[r][c] = e * e;
1060             }
1061         }
1062         return new Array2DRowRealMatrix(d, false);
1063     }
1064 
1065     /**
1066      * @param m Input matrix 1.
1067      * @param n Input matrix 2.
1068      * @return the matrix where the elements of m and n are element-wise multiplied.
1069      */
1070     private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
1071         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1072         for (int r = 0; r < m.getRowDimension(); r++) {
1073             for (int c = 0; c < m.getColumnDimension(); c++) {
1074                 d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
1075             }
1076         }
1077         return new Array2DRowRealMatrix(d, false);
1078     }
1079 
1080     /**
1081      * @param m Input matrix 1.
1082      * @param n Input matrix 2.
1083      * @return Matrix where the elements of m and n are element-wise divided.
1084      */
1085     private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
1086         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1087         for (int r = 0; r < m.getRowDimension(); r++) {
1088             for (int c = 0; c < m.getColumnDimension(); c++) {
1089                 d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
1090             }
1091         }
1092         return new Array2DRowRealMatrix(d, false);
1093     }
1094 
1095     /**
1096      * @param m Input matrix.
1097      * @param cols Columns to select.
1098      * @return Matrix representing the selected columns.
1099      */
1100     private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
1101         final double[][] d = new double[m.getRowDimension()][cols.length];
1102         for (int r = 0; r < m.getRowDimension(); r++) {
1103             for (int c = 0; c < cols.length; c++) {
1104                 d[r][c] = m.getEntry(r, cols[c]);
1105             }
1106         }
1107         return new Array2DRowRealMatrix(d, false);
1108     }
1109 
1110     /**
1111      * @param m Input matrix.
1112      * @param k Diagonal position.
1113      * @return Upper triangular part of matrix.
1114      */
1115     private static RealMatrix triu(final RealMatrix m, int k) {
1116         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1117         for (int r = 0; r < m.getRowDimension(); r++) {
1118             for (int c = 0; c < m.getColumnDimension(); c++) {
1119                 d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
1120             }
1121         }
1122         return new Array2DRowRealMatrix(d, false);
1123     }
1124 
1125     /**
1126      * @param m Input matrix.
1127      * @return Row matrix representing the sums of the rows.
1128      */
1129     private static RealMatrix sumRows(final RealMatrix m) {
1130         final double[][] d = new double[1][m.getColumnDimension()];
1131         for (int c = 0; c < m.getColumnDimension(); c++) {
1132             double sum = 0;
1133             for (int r = 0; r < m.getRowDimension(); r++) {
1134                 sum += m.getEntry(r, c);
1135             }
1136             d[0][c] = sum;
1137         }
1138         return new Array2DRowRealMatrix(d, false);
1139     }
1140 
1141     /**
1142      * @param m Input matrix.
1143      * @return the diagonal n-by-n matrix if m is a column matrix or the column
1144      * matrix representing the diagonal if m is a n-by-n matrix.
1145      */
1146     private static RealMatrix diag(final RealMatrix m) {
1147         if (m.getColumnDimension() == 1) {
1148             final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
1149             for (int i = 0; i < m.getRowDimension(); i++) {
1150                 d[i][i] = m.getEntry(i, 0);
1151             }
1152             return new Array2DRowRealMatrix(d, false);
1153         } else {
1154             final double[][] d = new double[m.getRowDimension()][1];
1155             for (int i = 0; i < m.getColumnDimension(); i++) {
1156                 d[i][0] = m.getEntry(i, i);
1157             }
1158             return new Array2DRowRealMatrix(d, false);
1159         }
1160     }
1161 
1162     /**
1163      * Copies a column from m1 to m2.
1164      *
1165      * @param m1 Source matrix.
1166      * @param col1 Source column.
1167      * @param m2 Target matrix.
1168      * @param col2 Target column.
1169      */
1170     private static void copyColumn(final RealMatrix m1, int col1,
1171                                    RealMatrix m2, int col2) {
1172         for (int i = 0; i < m1.getRowDimension(); i++) {
1173             m2.setEntry(i, col2, m1.getEntry(i, col1));
1174         }
1175     }
1176 
1177     /**
1178      * @param n Number of rows.
1179      * @param m Number of columns.
1180      * @return n-by-m matrix filled with 1.
1181      */
1182     private static RealMatrix ones(int n, int m) {
1183         final double[][] d = new double[n][m];
1184         for (int r = 0; r < n; r++) {
1185             Arrays.fill(d[r], 1);
1186         }
1187         return new Array2DRowRealMatrix(d, false);
1188     }
1189 
1190     /**
1191      * @param n Number of rows.
1192      * @param m Number of columns.
1193      * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
1194      * the diagonal.
1195      */
1196     private static RealMatrix eye(int n, int m) {
1197         final double[][] d = new double[n][m];
1198         for (int r = 0; r < n; r++) {
1199             if (r < m) {
1200                 d[r][r] = 1;
1201             }
1202         }
1203         return new Array2DRowRealMatrix(d, false);
1204     }
1205 
1206     /**
1207      * @param n Number of rows.
1208      * @param m Number of columns.
1209      * @return n-by-m matrix of zero values.
1210      */
1211     private static RealMatrix zeros(int n, int m) {
1212         return new Array2DRowRealMatrix(n, m);
1213     }
1214 
1215     /**
1216      * @param mat Input matrix.
1217      * @param n Number of row replicates.
1218      * @param m Number of column replicates.
1219      * @return a matrix which replicates the input matrix in both directions.
1220      */
1221     private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
1222         final int rd = mat.getRowDimension();
1223         final int cd = mat.getColumnDimension();
1224         final double[][] d = new double[n * rd][m * cd];
1225         for (int r = 0; r < n * rd; r++) {
1226             for (int c = 0; c < m * cd; c++) {
1227                 d[r][c] = mat.getEntry(r % rd, c % cd);
1228             }
1229         }
1230         return new Array2DRowRealMatrix(d, false);
1231     }
1232 
1233     /**
1234      * @param start Start value.
1235      * @param end End value.
1236      * @param step Step size.
1237      * @return a sequence as column matrix.
1238      */
1239     private static RealMatrix sequence(double start, double end, double step) {
1240         final int size = (int) ((end - start) / step + 1);
1241         final double[][] d = new double[size][1];
1242         double value = start;
1243         for (int r = 0; r < size; r++) {
1244             d[r][0] = value;
1245             value += step;
1246         }
1247         return new Array2DRowRealMatrix(d, false);
1248     }
1249 
1250     /**
1251      * @param m Input matrix.
1252      * @return the maximum of the matrix element values.
1253      */
1254     private static double max(final RealMatrix m) {
1255         double max = -Double.MAX_VALUE;
1256         for (int r = 0; r < m.getRowDimension(); r++) {
1257             for (int c = 0; c < m.getColumnDimension(); c++) {
1258                 double e = m.getEntry(r, c);
1259                 if (max < e) {
1260                     max = e;
1261                 }
1262             }
1263         }
1264         return max;
1265     }
1266 
1267     /**
1268      * @param m Input matrix.
1269      * @return the minimum of the matrix element values.
1270      */
1271     private static double min(final RealMatrix m) {
1272         double min = Double.MAX_VALUE;
1273         for (int r = 0; r < m.getRowDimension(); r++) {
1274             for (int c = 0; c < m.getColumnDimension(); c++) {
1275                 double e = m.getEntry(r, c);
1276                 if (min > e) {
1277                     min = e;
1278                 }
1279             }
1280         }
1281         return min;
1282     }
1283 
1284     /**
1285      * @param m Input array.
1286      * @return the maximum of the array values.
1287      */
1288     private static double max(final double[] m) {
1289         double max = -Double.MAX_VALUE;
1290         for (int r = 0; r < m.length; r++) {
1291             if (max < m[r]) {
1292                 max = m[r];
1293             }
1294         }
1295         return max;
1296     }
1297 
1298     /**
1299      * @param m Input array.
1300      * @return the minimum of the array values.
1301      */
1302     private static double min(final double[] m) {
1303         double min = Double.MAX_VALUE;
1304         for (int r = 0; r < m.length; r++) {
1305             if (min > m[r]) {
1306                 min = m[r];
1307             }
1308         }
1309         return min;
1310     }
1311 
1312     /**
1313      * @param indices Input index array.
1314      * @return the inverse of the mapping defined by indices.
1315      */
1316     private static int[] inverse(final int[] indices) {
1317         final int[] inverse = new int[indices.length];
1318         for (int i = 0; i < indices.length; i++) {
1319             inverse[indices[i]] = i;
1320         }
1321         return inverse;
1322     }
1323 
1324     /**
1325      * @param indices Input index array.
1326      * @return the indices in inverse order (last is first).
1327      */
1328     private static int[] reverse(final int[] indices) {
1329         final int[] reverse = new int[indices.length];
1330         for (int i = 0; i < indices.length; i++) {
1331             reverse[i] = indices[indices.length - i - 1];
1332         }
1333         return reverse;
1334     }
1335 
1336     /**
1337      * @param size Length of random array.
1338      * @return an array of Gaussian random numbers.
1339      */
1340     private double[] randn(int size) {
1341         final double[] randn = new double[size];
1342         for (int i = 0; i < size; i++) {
1343             randn[i] = random.nextGaussian();
1344         }
1345         return randn;
1346     }
1347 
1348     /**
1349      * @param size Number of rows.
1350      * @param popSize Population size.
1351      * @return a 2-dimensional matrix of Gaussian random numbers.
1352      */
1353     private RealMatrix randn1(int size, int popSize) {
1354         final double[][] d = new double[size][popSize];
1355         for (int r = 0; r < size; r++) {
1356             for (int c = 0; c < popSize; c++) {
1357                 d[r][c] = random.nextGaussian();
1358             }
1359         }
1360         return new Array2DRowRealMatrix(d, false);
1361     }
1362 }