1 /*
2 * Licensed to the Hipparchus project under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The Hipparchus project licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * https://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 package org.hipparchus.ode.nonstiff;
19
20 import org.hipparchus.exception.MathIllegalArgumentException;
21 import org.hipparchus.linear.Array2DRowRealMatrix;
22 import org.hipparchus.linear.RealMatrix;
23 import org.hipparchus.ode.EquationsMapper;
24 import org.hipparchus.ode.ODEStateAndDerivative;
25 import org.hipparchus.ode.nonstiff.interpolators.AdamsStateInterpolator;
26 import org.hipparchus.util.FastMath;
27
28
29 /**
30 * This class implements explicit Adams-Bashforth integrators for Ordinary
31 * Differential Equations.
32 *
33 * <p>Adams-Bashforth methods (in fact due to Adams alone) are explicit
34 * multistep ODE solvers. This implementation is a variation of the classical
35 * one: it uses adaptive stepsize to implement error control, whereas
36 * classical implementations are fixed step size. The value of state vector
37 * at step n+1 is a simple combination of the value at step n and of the
38 * derivatives at steps n, n-1, n-2 ... Depending on the number k of previous
39 * steps one wants to use for computing the next value, different formulas
40 * are available:</p>
41 * <ul>
42 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + h y'<sub>n</sub></li>
43 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + h (3y'<sub>n</sub>-y'<sub>n-1</sub>)/2</li>
44 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + h (23y'<sub>n</sub>-16y'<sub>n-1</sub>+5y'<sub>n-2</sub>)/12</li>
45 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + h (55y'<sub>n</sub>-59y'<sub>n-1</sub>+37y'<sub>n-2</sub>-9y'<sub>n-3</sub>)/24</li>
46 * <li>...</li>
47 * </ul>
48 *
49 * <p>A k-steps Adams-Bashforth method is of order k.</p>
50 *
51 * <p> There must be sufficient time for the {@link #setStarterIntegrator(org.hipparchus.ode.ODEIntegrator)
52 * starter integrator} to take several steps between the the last reset event, and the end
53 * of integration, otherwise an exception may be thrown during integration. The user can
54 * adjust the end date of integration, or the step size of the starter integrator to
55 * ensure a sufficient number of steps can be completed before the end of integration.
56 * </p>
57 *
58 * <p><strong>Implementation details</strong></p>
59 *
60 * <p>We define scaled derivatives s<sub>i</sub>(n) at step n as:
61 * \[
62 * \left\{\begin{align}
63 * s_1(n) &= h y'_n \text{ for first derivative}\\
64 * s_2(n) &= \frac{h^2}{2} y_n'' \text{ for second derivative}\\
65 * s_3(n) &= \frac{h^3}{6} y_n''' \text{ for third derivative}\\
66 * &\cdots\\
67 * s_k(n) &= \frac{h^k}{k!} y_n^{(k)} \text{ for } k^\mathrm{th} \text{ derivative}
68 * \end{align}\right.
69 * \]</p>
70 *
71 * <p>The definitions above use the classical representation with several previous first
72 * derivatives. Lets define
73 * \[
74 * q_n = [ s_1(n-1) s_1(n-2) \ldots s_1(n-(k-1)) ]^T
75 * \]
76 * (we omit the k index in the notation for clarity). With these definitions,
77 * Adams-Bashforth methods can be written:</p>
78 * \[
79 * \left\{\begin{align}
80 * k = 1: & y_{n+1} = y_n + s_1(n) \\
81 * k = 2: & y_{n+1} = y_n + \frac{3}{2} s_1(n) + [ \frac{-1}{2} ] q_n \\
82 * k = 3: & y_{n+1} = y_n + \frac{23}{12} s_1(n) + [ \frac{-16}{12} \frac{5}{12} ] q_n \\
83 * k = 4: & y_{n+1} = y_n + \frac{55}{24} s_1(n) + [ \frac{-59}{24} \frac{37}{24} \frac{-9}{24} ] q_n \\
84 * & \cdots
85 * \end{align}\right.
86 * \]
87 *
88 * <p>Instead of using the classical representation with first derivatives only (y<sub>n</sub>,
89 * s<sub>1</sub>(n) and q<sub>n</sub>), our implementation uses the Nordsieck vector with
90 * higher degrees scaled derivatives all taken at the same step (y<sub>n</sub>, s<sub>1</sub>(n)
91 * and r<sub>n</sub>) where r<sub>n</sub> is defined as:
92 * \[
93 * r_n = [ s_2(n), s_3(n) \ldots s_k(n) ]^T
94 * \]
95 * (here again we omit the k index in the notation for clarity)
96 * </p>
97 *
98 * <p>Taylor series formulas show that for any index offset i, s<sub>1</sub>(n-i) can be
99 * computed from s<sub>1</sub>(n), s<sub>2</sub>(n) ... s<sub>k</sub>(n), the formula being exact
100 * for degree k polynomials.
101 * \[
102 * s_1(n-i) = s_1(n) + \sum_{j\gt 0} (j+1) (-i)^j s_{j+1}(n)
103 * \]
104 * The previous formula can be used with several values for i to compute the transform between
105 * classical representation and Nordsieck vector. The transform between r<sub>n</sub>
106 * and q<sub>n</sub> resulting from the Taylor series formulas above is:
107 * \[
108 * q_n = s_1(n) u + P r_n
109 * \]
110 * where u is the [ 1 1 ... 1 ]<sup>T</sup> vector and P is the (k-1)×(k-1) matrix built
111 * with the \((j+1) (-i)^j\) terms with i being the row number starting from 1 and j being
112 * the column number starting from 1:
113 * \[
114 * P=\begin{bmatrix}
115 * -2 & 3 & -4 & 5 & \ldots \\
116 * -4 & 12 & -32 & 80 & \ldots \\
117 * -6 & 27 & -108 & 405 & \ldots \\
118 * -8 & 48 & -256 & 1280 & \ldots \\
119 * & & \ldots\\
120 * \end{bmatrix}
121 * \]
122 * </p>
123 *
124 * <p>Using the Nordsieck vector has several advantages:</p>
125 * <ul>
126 * <li>it greatly simplifies step interpolation as the interpolator mainly applies
127 * Taylor series formulas,</li>
128 * <li>it simplifies step changes that occur when discrete events that truncate
129 * the step are triggered,</li>
130 * <li>it allows to extend the methods in order to support adaptive stepsize.</li>
131 * </ul>
132 *
133 * <p>The Nordsieck vector at step n+1 is computed from the Nordsieck vector at step n as follows:
134 * <ul>
135 * <li>y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n) + u<sup>T</sup> r<sub>n</sub></li>
136 * <li>s<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, y<sub>n+1</sub>)</li>
137 * <li>r<sub>n+1</sub> = (s<sub>1</sub>(n) - s<sub>1</sub>(n+1)) P<sup>-1</sup> u + P<sup>-1</sup> A P r<sub>n</sub></li>
138 * </ul>
139 * <p>where A is a rows shifting matrix (the lower left part is an identity matrix):</p>
140 * <pre>
141 * [ 0 0 ... 0 0 | 0 ]
142 * [ ---------------+---]
143 * [ 1 0 ... 0 0 | 0 ]
144 * A = [ 0 1 ... 0 0 | 0 ]
145 * [ ... | 0 ]
146 * [ 0 0 ... 1 0 | 0 ]
147 * [ 0 0 ... 0 1 | 0 ]
148 * </pre>
149 *
150 * <p>The P<sup>-1</sup>u vector and the P<sup>-1</sup> A P matrix do not depend on the state,
151 * they only depend on k and therefore are precomputed once for all.</p>
152 *
153 */
154 public class AdamsBashforthIntegrator extends AdamsIntegrator {
155
156 /** Name of integration scheme. */
157 public static final String METHOD_NAME = "Adams-Bashforth";
158
159 /**
160 * Build an Adams-Bashforth integrator with the given order and step control parameters.
161 * @param nSteps number of steps of the method excluding the one being computed
162 * @param minStep minimal step (sign is irrelevant, regardless of
163 * integration direction, forward or backward), the last step can
164 * be smaller than this
165 * @param maxStep maximal step (sign is irrelevant, regardless of
166 * integration direction, forward or backward), the last step can
167 * be smaller than this
168 * @param scalAbsoluteTolerance allowed absolute error
169 * @param scalRelativeTolerance allowed relative error
170 * @exception MathIllegalArgumentException if order is 1 or less
171 */
172 public AdamsBashforthIntegrator(final int nSteps,
173 final double minStep, final double maxStep,
174 final double scalAbsoluteTolerance,
175 final double scalRelativeTolerance)
176 throws MathIllegalArgumentException {
177 super(METHOD_NAME, nSteps, nSteps, minStep, maxStep,
178 scalAbsoluteTolerance, scalRelativeTolerance);
179 }
180
181 /**
182 * Build an Adams-Bashforth integrator with the given order and step control parameters.
183 * @param nSteps number of steps of the method excluding the one being computed
184 * @param minStep minimal step (sign is irrelevant, regardless of
185 * integration direction, forward or backward), the last step can
186 * be smaller than this
187 * @param maxStep maximal step (sign is irrelevant, regardless of
188 * integration direction, forward or backward), the last step can
189 * be smaller than this
190 * @param vecAbsoluteTolerance allowed absolute error
191 * @param vecRelativeTolerance allowed relative error
192 * @exception IllegalArgumentException if order is 1 or less
193 */
194 public AdamsBashforthIntegrator(final int nSteps,
195 final double minStep, final double maxStep,
196 final double[] vecAbsoluteTolerance,
197 final double[] vecRelativeTolerance)
198 throws IllegalArgumentException {
199 super(METHOD_NAME, nSteps, nSteps, minStep, maxStep,
200 vecAbsoluteTolerance, vecRelativeTolerance);
201 }
202
203 /** {@inheritDoc} */
204 @Override
205 protected double errorEstimation(final double[] previousState, final double predictedTime,
206 final double[] predictedState,
207 final double[] predictedScaled,
208 final RealMatrix predictedNordsieck) {
209
210 final StepsizeHelper helper = getStepSizeHelper();
211 double error = 0;
212 for (int i = 0; i < helper.getMainSetDimension(); ++i) {
213 final double tol = helper.getTolerance(i, FastMath.abs(predictedState[i]));
214
215 // apply Taylor formula from high order to low order,
216 // for the sake of numerical accuracy
217 double variation = 0;
218 int sign = predictedNordsieck.getRowDimension() % 2 == 0 ? -1 : 1;
219 for (int k = predictedNordsieck.getRowDimension() - 1; k >= 0; --k) {
220 variation += sign * predictedNordsieck.getEntry(k, i);
221 sign = -sign;
222 }
223 variation -= predictedScaled[i];
224
225 final double ratio = (predictedState[i] - previousState[i] + variation) / tol;
226 error += ratio * ratio;
227
228 }
229
230 return FastMath.sqrt(error / helper.getMainSetDimension());
231
232 }
233
234 /** {@inheritDoc} */
235 @Override
236 protected AdamsStateInterpolator finalizeStep(final double stepSize, final double[] predictedState,
237 final double[] predictedScaled, final Array2DRowRealMatrix predictedNordsieck,
238 final boolean isForward,
239 final ODEStateAndDerivative globalPreviousState,
240 final ODEStateAndDerivative globalCurrentState,
241 final EquationsMapper equationsMapper) {
242 return new AdamsStateInterpolator(getStepSize(), globalCurrentState,
243 predictedScaled, predictedNordsieck, isForward,
244 getStepStart(), globalCurrentState, equationsMapper);
245 }
246
247 }