1 /* 2 * Licensed to the Apache Software Foundation (ASF) under one or more 3 * contributor license agreements. See the NOTICE file distributed with 4 * this work for additional information regarding copyright ownership. 5 * The ASF licenses this file to You under the Apache License, Version 2.0 6 * (the "License"); you may not use this file except in compliance with 7 * the License. You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 */ 17 18 package org.apache.commons.math3.ode.nonstiff; 19 20 import org.apache.commons.math3.exception.DimensionMismatchException; 21 import org.apache.commons.math3.exception.MaxCountExceededException; 22 import org.apache.commons.math3.exception.NoBracketingException; 23 import org.apache.commons.math3.exception.NumberIsTooSmallException; 24 import org.apache.commons.math3.linear.Array2DRowRealMatrix; 25 import org.apache.commons.math3.linear.RealMatrix; 26 import org.apache.commons.math3.ode.EquationsMapper; 27 import org.apache.commons.math3.ode.ExpandableStatefulODE; 28 import org.apache.commons.math3.ode.sampling.NordsieckStepInterpolator; 29 import org.apache.commons.math3.util.FastMath; 30 31 32 /** 33 * This class implements explicit Adams-Bashforth integrators for Ordinary 34 * Differential Equations. 35 * 36 * <p>Adams-Bashforth methods (in fact due to Adams alone) are explicit 37 * multistep ODE solvers. This implementation is a variation of the classical 38 * one: it uses adaptive stepsize to implement error control, whereas 39 * classical implementations are fixed step size. The value of state vector 40 * at step n+1 is a simple combination of the value at step n and of the 41 * derivatives at steps n, n-1, n-2 ... Depending on the number k of previous 42 * steps one wants to use for computing the next value, different formulas 43 * are available:</p> 44 * <ul> 45 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + h y'<sub>n</sub></li> 46 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + h (3y'<sub>n</sub>-y'<sub>n-1</sub>)/2</li> 47 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + h (23y'<sub>n</sub>-16y'<sub>n-1</sub>+5y'<sub>n-2</sub>)/12</li> 48 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + h (55y'<sub>n</sub>-59y'<sub>n-1</sub>+37y'<sub>n-2</sub>-9y'<sub>n-3</sub>)/24</li> 49 * <li>...</li> 50 * </ul> 51 * 52 * <p>A k-steps Adams-Bashforth method is of order k.</p> 53 * 54 * <h3>Implementation details</h3> 55 * 56 * <p>We define scaled derivatives s<sub>i</sub>(n) at step n as: 57 * <pre> 58 * s<sub>1</sub>(n) = h y'<sub>n</sub> for first derivative 59 * s<sub>2</sub>(n) = h<sup>2</sup>/2 y''<sub>n</sub> for second derivative 60 * s<sub>3</sub>(n) = h<sup>3</sup>/6 y'''<sub>n</sub> for third derivative 61 * ... 62 * s<sub>k</sub>(n) = h<sup>k</sup>/k! y<sup>(k)</sup><sub>n</sub> for k<sup>th</sup> derivative 63 * </pre></p> 64 * 65 * <p>The definitions above use the classical representation with several previous first 66 * derivatives. Lets define 67 * <pre> 68 * q<sub>n</sub> = [ s<sub>1</sub>(n-1) s<sub>1</sub>(n-2) ... s<sub>1</sub>(n-(k-1)) ]<sup>T</sup> 69 * </pre> 70 * (we omit the k index in the notation for clarity). With these definitions, 71 * Adams-Bashforth methods can be written: 72 * <ul> 73 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n)</li> 74 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + 3/2 s<sub>1</sub>(n) + [ -1/2 ] q<sub>n</sub></li> 75 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + 23/12 s<sub>1</sub>(n) + [ -16/12 5/12 ] q<sub>n</sub></li> 76 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + 55/24 s<sub>1</sub>(n) + [ -59/24 37/24 -9/24 ] q<sub>n</sub></li> 77 * <li>...</li> 78 * </ul></p> 79 * 80 * <p>Instead of using the classical representation with first derivatives only (y<sub>n</sub>, 81 * s<sub>1</sub>(n) and q<sub>n</sub>), our implementation uses the Nordsieck vector with 82 * higher degrees scaled derivatives all taken at the same step (y<sub>n</sub>, s<sub>1</sub>(n) 83 * and r<sub>n</sub>) where r<sub>n</sub> is defined as: 84 * <pre> 85 * r<sub>n</sub> = [ s<sub>2</sub>(n), s<sub>3</sub>(n) ... s<sub>k</sub>(n) ]<sup>T</sup> 86 * </pre> 87 * (here again we omit the k index in the notation for clarity) 88 * </p> 89 * 90 * <p>Taylor series formulas show that for any index offset i, s<sub>1</sub>(n-i) can be 91 * computed from s<sub>1</sub>(n), s<sub>2</sub>(n) ... s<sub>k</sub>(n), the formula being exact 92 * for degree k polynomials. 93 * <pre> 94 * s<sub>1</sub>(n-i) = s<sub>1</sub>(n) + ∑<sub>j>0</sub> (j+1) (-i)<sup>j</sup> s<sub>j+1</sub>(n) 95 * </pre> 96 * The previous formula can be used with several values for i to compute the transform between 97 * classical representation and Nordsieck vector. The transform between r<sub>n</sub> 98 * and q<sub>n</sub> resulting from the Taylor series formulas above is: 99 * <pre> 100 * q<sub>n</sub> = s<sub>1</sub>(n) u + P r<sub>n</sub> 101 * </pre> 102 * where u is the [ 1 1 ... 1 ]<sup>T</sup> vector and P is the (k-1)×(k-1) matrix built 103 * with the (j+1) (-i)<sup>j</sup> terms with i being the row number starting from 1 and j being 104 * the column number starting from 1: 105 * <pre> 106 * [ -2 3 -4 5 ... ] 107 * [ -4 12 -32 80 ... ] 108 * P = [ -6 27 -108 405 ... ] 109 * [ -8 48 -256 1280 ... ] 110 * [ ... ] 111 * </pre></p> 112 * 113 * <p>Using the Nordsieck vector has several advantages: 114 * <ul> 115 * <li>it greatly simplifies step interpolation as the interpolator mainly applies 116 * Taylor series formulas,</li> 117 * <li>it simplifies step changes that occur when discrete events that truncate 118 * the step are triggered,</li> 119 * <li>it allows to extend the methods in order to support adaptive stepsize.</li> 120 * </ul></p> 121 * 122 * <p>The Nordsieck vector at step n+1 is computed from the Nordsieck vector at step n as follows: 123 * <ul> 124 * <li>y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n) + u<sup>T</sup> r<sub>n</sub></li> 125 * <li>s<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, y<sub>n+1</sub>)</li> 126 * <li>r<sub>n+1</sub> = (s<sub>1</sub>(n) - s<sub>1</sub>(n+1)) P<sup>-1</sup> u + P<sup>-1</sup> A P r<sub>n</sub></li> 127 * </ul> 128 * where A is a rows shifting matrix (the lower left part is an identity matrix): 129 * <pre> 130 * [ 0 0 ... 0 0 | 0 ] 131 * [ ---------------+---] 132 * [ 1 0 ... 0 0 | 0 ] 133 * A = [ 0 1 ... 0 0 | 0 ] 134 * [ ... | 0 ] 135 * [ 0 0 ... 1 0 | 0 ] 136 * [ 0 0 ... 0 1 | 0 ] 137 * </pre></p> 138 * 139 * <p>The P<sup>-1</sup>u vector and the P<sup>-1</sup> A P matrix do not depend on the state, 140 * they only depend on k and therefore are precomputed once for all.</p> 141 * 142 * @since 2.0 143 */ 144 public class AdamsBashforthIntegrator extends AdamsIntegrator { 145 146 /** Integrator method name. */ 147 private static final String METHOD_NAME = "Adams-Bashforth"; 148 149 /** 150 * Build an Adams-Bashforth integrator with the given order and step control parameters. 151 * @param nSteps number of steps of the method excluding the one being computed 152 * @param minStep minimal step (sign is irrelevant, regardless of 153 * integration direction, forward or backward), the last step can 154 * be smaller than this 155 * @param maxStep maximal step (sign is irrelevant, regardless of 156 * integration direction, forward or backward), the last step can 157 * be smaller than this 158 * @param scalAbsoluteTolerance allowed absolute error 159 * @param scalRelativeTolerance allowed relative error 160 * @exception NumberIsTooSmallException if order is 1 or less 161 */ AdamsBashforthIntegrator(final int nSteps, final double minStep, final double maxStep, final double scalAbsoluteTolerance, final double scalRelativeTolerance)162 public AdamsBashforthIntegrator(final int nSteps, 163 final double minStep, final double maxStep, 164 final double scalAbsoluteTolerance, 165 final double scalRelativeTolerance) 166 throws NumberIsTooSmallException { 167 super(METHOD_NAME, nSteps, nSteps, minStep, maxStep, 168 scalAbsoluteTolerance, scalRelativeTolerance); 169 } 170 171 /** 172 * Build an Adams-Bashforth integrator with the given order and step control parameters. 173 * @param nSteps number of steps of the method excluding the one being computed 174 * @param minStep minimal step (sign is irrelevant, regardless of 175 * integration direction, forward or backward), the last step can 176 * be smaller than this 177 * @param maxStep maximal step (sign is irrelevant, regardless of 178 * integration direction, forward or backward), the last step can 179 * be smaller than this 180 * @param vecAbsoluteTolerance allowed absolute error 181 * @param vecRelativeTolerance allowed relative error 182 * @exception IllegalArgumentException if order is 1 or less 183 */ AdamsBashforthIntegrator(final int nSteps, final double minStep, final double maxStep, final double[] vecAbsoluteTolerance, final double[] vecRelativeTolerance)184 public AdamsBashforthIntegrator(final int nSteps, 185 final double minStep, final double maxStep, 186 final double[] vecAbsoluteTolerance, 187 final double[] vecRelativeTolerance) 188 throws IllegalArgumentException { 189 super(METHOD_NAME, nSteps, nSteps, minStep, maxStep, 190 vecAbsoluteTolerance, vecRelativeTolerance); 191 } 192 193 /** Estimate error. 194 * <p> 195 * Error is estimated by interpolating back to previous state using 196 * the state Taylor expansion and comparing to real previous state. 197 * </p> 198 * @param previousState state vector at step start 199 * @param predictedState predicted state vector at step end 200 * @param predictedScaled predicted value of the scaled derivatives at step end 201 * @param predictedNordsieck predicted value of the Nordsieck vector at step end 202 * @return estimated normalized local discretization error 203 */ errorEstimation(final double[] previousState, final double[] predictedState, final double[] predictedScaled, final RealMatrix predictedNordsieck)204 private double errorEstimation(final double[] previousState, 205 final double[] predictedState, 206 final double[] predictedScaled, 207 final RealMatrix predictedNordsieck) { 208 209 double error = 0; 210 for (int i = 0; i < mainSetDimension; ++i) { 211 final double yScale = FastMath.abs(predictedState[i]); 212 final double tol = (vecAbsoluteTolerance == null) ? 213 (scalAbsoluteTolerance + scalRelativeTolerance * yScale) : 214 (vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * yScale); 215 216 // apply Taylor formula from high order to low order, 217 // for the sake of numerical accuracy 218 double variation = 0; 219 int sign = predictedNordsieck.getRowDimension() % 2 == 0 ? -1 : 1; 220 for (int k = predictedNordsieck.getRowDimension() - 1; k >= 0; --k) { 221 variation += sign * predictedNordsieck.getEntry(k, i); 222 sign = -sign; 223 } 224 variation -= predictedScaled[i]; 225 226 final double ratio = (predictedState[i] - previousState[i] + variation) / tol; 227 error += ratio * ratio; 228 229 } 230 231 return FastMath.sqrt(error / mainSetDimension); 232 233 } 234 235 /** {@inheritDoc} */ 236 @Override integrate(final ExpandableStatefulODE equations, final double t)237 public void integrate(final ExpandableStatefulODE equations, final double t) 238 throws NumberIsTooSmallException, DimensionMismatchException, 239 MaxCountExceededException, NoBracketingException { 240 241 sanityChecks(equations, t); 242 setEquations(equations); 243 final boolean forward = t > equations.getTime(); 244 245 // initialize working arrays 246 final double[] y = equations.getCompleteState(); 247 final double[] yDot = new double[y.length]; 248 249 // set up an interpolator sharing the integrator arrays 250 final NordsieckStepInterpolator interpolator = new NordsieckStepInterpolator(); 251 interpolator.reinitialize(y, forward, 252 equations.getPrimaryMapper(), equations.getSecondaryMappers()); 253 254 // set up integration control objects 255 initIntegration(equations.getTime(), y, t); 256 257 // compute the initial Nordsieck vector using the configured starter integrator 258 start(equations.getTime(), y, t); 259 interpolator.reinitialize(stepStart, stepSize, scaled, nordsieck); 260 interpolator.storeTime(stepStart); 261 262 // reuse the step that was chosen by the starter integrator 263 double hNew = stepSize; 264 interpolator.rescale(hNew); 265 266 // main integration loop 267 isLastStep = false; 268 do { 269 270 interpolator.shift(); 271 final double[] predictedY = new double[y.length]; 272 final double[] predictedScaled = new double[y.length]; 273 Array2DRowRealMatrix predictedNordsieck = null; 274 double error = 10; 275 while (error >= 1.0) { 276 277 // predict a first estimate of the state at step end 278 final double stepEnd = stepStart + hNew; 279 interpolator.storeTime(stepEnd); 280 final ExpandableStatefulODE expandable = getExpandable(); 281 final EquationsMapper primary = expandable.getPrimaryMapper(); 282 primary.insertEquationData(interpolator.getInterpolatedState(), predictedY); 283 int index = 0; 284 for (final EquationsMapper secondary : expandable.getSecondaryMappers()) { 285 secondary.insertEquationData(interpolator.getInterpolatedSecondaryState(index), predictedY); 286 ++index; 287 } 288 289 // evaluate the derivative 290 computeDerivatives(stepEnd, predictedY, yDot); 291 292 // predict Nordsieck vector at step end 293 for (int j = 0; j < predictedScaled.length; ++j) { 294 predictedScaled[j] = hNew * yDot[j]; 295 } 296 predictedNordsieck = updateHighOrderDerivativesPhase1(nordsieck); 297 updateHighOrderDerivativesPhase2(scaled, predictedScaled, predictedNordsieck); 298 299 // evaluate error 300 error = errorEstimation(y, predictedY, predictedScaled, predictedNordsieck); 301 302 if (error >= 1.0) { 303 // reject the step and attempt to reduce error by stepsize control 304 final double factor = computeStepGrowShrinkFactor(error); 305 hNew = filterStep(hNew * factor, forward, false); 306 interpolator.rescale(hNew); 307 308 } 309 } 310 311 stepSize = hNew; 312 final double stepEnd = stepStart + stepSize; 313 interpolator.reinitialize(stepEnd, stepSize, predictedScaled, predictedNordsieck); 314 315 // discrete events handling 316 interpolator.storeTime(stepEnd); 317 System.arraycopy(predictedY, 0, y, 0, y.length); 318 stepStart = acceptStep(interpolator, y, yDot, t); 319 scaled = predictedScaled; 320 nordsieck = predictedNordsieck; 321 interpolator.reinitialize(stepEnd, stepSize, scaled, nordsieck); 322 323 if (!isLastStep) { 324 325 // prepare next step 326 interpolator.storeTime(stepStart); 327 328 if (resetOccurred) { 329 // some events handler has triggered changes that 330 // invalidate the derivatives, we need to restart from scratch 331 start(stepStart, y, t); 332 interpolator.reinitialize(stepStart, stepSize, scaled, nordsieck); 333 } 334 335 // stepsize control for next step 336 final double factor = computeStepGrowShrinkFactor(error); 337 final double scaledH = stepSize * factor; 338 final double nextT = stepStart + scaledH; 339 final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t); 340 hNew = filterStep(scaledH, forward, nextIsLast); 341 342 final double filteredNextT = stepStart + hNew; 343 final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t); 344 if (filteredNextIsLast) { 345 hNew = t - stepStart; 346 } 347 348 interpolator.rescale(hNew); 349 350 } 351 352 } while (!isLastStep); 353 354 // dispatch results 355 equations.setTime(stepStart); 356 equations.setCompleteState(y); 357 358 resetInternalState(); 359 360 } 361 362 } 363