Example 4: Central Differences

This example uses the same data as in the One-Sided Differences example. Agreement should be approximately the two-thirds power of machine precision. That agreement is achieved here. Generally this is the most accuracy one can expect using central divided differences. Note that using central differences requires essentially twice the number of evaluations of the function compared with obtaining one-sided differences. This can be a significant issue for functions that are expensive to evaluate. This example shows how to override evaluateF .

import com.imsl.math.*;
import java.text.*;

public class NumericalDerivativesEx4 extends NumericalDerivatives {

    static private int m = 1, n = 2;
    static private double a, b, c, v = 0.0;

    public NumericalDerivativesEx4(NumericalDerivatives.Function fcn) {
        super(fcn);
    }

    // Override evaluateF.
    public double[] evaluateF(int varIndex, double[] y) {
        double[] valueF = new double[m];

        valueF[0] = a * Math.exp(b * y[0]) + c * y[0] * y[1] * y[1];
        return valueF;
    }

    public static void main(String args[]) {
        int[] options = new int[n];
        double u;
        double[] y = new double[n];
        double[] scale = new double[n];
        double[][] actual = new double[m][n];
        double[] re = new double[2];

        // Define data and point of evaluation:
        a = 2.5e6;
        b = 3.4e0;
        c = 4.5e0;
        y[0] = 2.1e0;
        y[1] = 3.2e0;

        // Machine precision, for measuring errors
        u = 2.220446049250313e-016;
        v = Math.pow(3.e0 * u, 2.e0 / 3.e0);

        // Set scaling:
        scale[0] = 1.e0;
        // Increase scale to account for large value of a.
        scale[1] = 8.e3;

        // Compute true values of partials.
        actual[0][0] = a * b * Math.exp(b * y[0]) + c * y[1] * y[1];
        actual[0][1] = 2 * c * y[0] * y[1];

        options[0] = NumericalDerivatives.CENTRAL;
        options[1] = NumericalDerivatives.CENTRAL;

        // Set the increment used at the default value.
        scale[1] = 8.e3;

        NumericalDerivatives.Function fcn
                = new NumericalDerivatives.Function() {
                    public double[] f(int varIndex, double[] y) {
                        return new double[m];
                    }
                };

        NumericalDerivativesEx4 derv = new NumericalDerivativesEx4(fcn);
        derv.setDifferencingMethods(options);
        derv.setScalingFactors(scale);
        double[][] jacobian = derv.evaluateJ(y);

        NumberFormat nf = NumberFormat.getInstance();
        nf.setMaximumFractionDigits(2);
        nf.setMinimumFractionDigits(2);

        PrintMatrixFormat pmf = new PrintMatrixFormat();
        pmf.setNumberFormat(nf);
        new PrintMatrix("Numerical gradient:").print(pmf, jacobian);
        new PrintMatrix("Analytic gradient:").print(pmf, actual);

        // Since the function is never evaluated at the
        // initial point, hold back until the request is made.
        // Check the relative accuracy of central differences.
        // They should be good to about two thirds-precision.
        jacobian[0][0] = (jacobian[0][0] - actual[0][0]) / actual[0][0];
        jacobian[0][1] = (jacobian[0][1] - actual[0][1]) / actual[0][1];
        re[0] = jacobian[0][0];
        re[1] = jacobian[0][1];

        System.out.println("Relative accuracy:");
        System.out.println("df/dy_1       df/dy_2");
        System.out.printf(" %.2fv        %.2fv\n", re[0] / v, re[1] / v);
        System.out.printf("(%.3e)  (%.3e)\n", re[0], re[1]);
    }
}

Output

     Numerical gradient:
           0            1    
0  10,722,141,354.39  60.48  

     Analytic gradient:
           0            1    
0  10,722,141,353.42  60.48  

Relative accuracy:
df/dy_1       df/dy_2
 1.19v        0.27v
(9.100e-11)  (2.045e-11)
Link to Java source.