Experiment with Stepleman & Winarsky method

Find a step which is near to optimal for a central finite difference formula.

References

  • Adaptive numerical differentiation R. S. Stepleman and N. D. Winarsky Journal: Math. Comp. 33 (1979), 1257-1264

import numpy as np
import pylab as pl
import numericalderivative as nd
from matplotlib.ticker import MaxNLocator

Use the method on a simple problem

In the next example, we use the algorithm on the exponential function. We create the SteplemanWinarsky algorithm using the function and the point x. Then we use the find_step() method to compute the step, using an upper bound of the step as an initial point of the algorithm. Finally, use the compute_first_derivative() method to compute an approximate value of the first derivative using finite differences. The get_number_of_function_evaluations() method can be used to get the number of function evaluations.

x = 1.0
algorithm = nd.SteplemanWinarsky(np.exp, x, verbose=True)
initial_step = 1.0e0
step, number_of_iterations = algorithm.find_step(initial_step)
f_prime_approx = algorithm.compute_first_derivative(step)
feval = algorithm.get_number_of_function_evaluations()
f_prime_exact = np.exp(x)  # Since the derivative of exp is exp.
print(f"Computed step = {step:.3e}")
print(f"Number of number_of_iterations = {number_of_iterations}")
print(f"f_prime_approx = {f_prime_approx}")
print(f"f_prime_exact = {f_prime_exact}")
absolute_error = abs(f_prime_approx - f_prime_exact)
+ find_step()
initial_step=1.000e+00
  number_of_iterations=0, h=2.5000e-01, |FD(h_current) - FD(h_previous)|=4.4784e-01
  number_of_iterations=1, h=6.2500e-02, |FD(h_current) - FD(h_previous)|=2.6634e-02
  number_of_iterations=2, h=1.5625e-02, |FD(h_current) - FD(h_previous)|=1.6595e-03
  number_of_iterations=3, h=3.9062e-03, |FD(h_current) - FD(h_previous)|=1.0370e-04
  number_of_iterations=4, h=9.7656e-04, |FD(h_current) - FD(h_previous)|=6.4809e-06
  number_of_iterations=5, h=2.4414e-04, |FD(h_current) - FD(h_previous)|=4.0506e-07
  number_of_iterations=6, h=6.1035e-05, |FD(h_current) - FD(h_previous)|=2.5315e-08
  number_of_iterations=7, h=1.5259e-05, |FD(h_current) - FD(h_previous)|=1.5825e-09
  number_of_iterations=8, h=3.8147e-06, |FD(h_current) - FD(h_previous)|=1.1642e-10
  number_of_iterations=9, h=9.5367e-07, |FD(h_current) - FD(h_previous)|=1.1642e-10
  number_of_iterations=10, h=2.3842e-07, |FD(h_current) - FD(h_previous)|=4.6566e-10
  Stop because no monotony anymore.
Computed step = 9.537e-07
Number of number_of_iterations = 10
f_prime_approx = 2.7182818283326924
f_prime_exact = 2.718281828459045

Use the method on the ScaledExponentialProblem

Consider this problem.

problem = nd.ScaledExponentialProblem()
problem
DerivativeBenchmarkProblem
  • name = scaled exp
  • x = 1.0
  • f(x) = 0.9999990000005
  • f'(x) = -9.999990000004999e-07
  • f''(x) = 9.999990000005e-13
  • f^(3)(x) = -9.999990000005e-19
  • f^(4)(x) = 9.999990000004998e-25
  • f^(5)(x) = -9.999990000004998e-31


name = problem.get_name()
x = problem.get_x()
third_derivative = problem.get_third_derivative()
third_derivative_value = third_derivative(x)
optimum_step, absolute_error = nd.FirstDerivativeCentral.compute_step(
    third_derivative_value
)
print(f"Name = {name}, x = {x}")
print(f"Optimal step for central finite difference formula = {optimum_step}")
print(f"Minimum absolute error= {absolute_error}")
Name = scaled exp, x = 1.0
Optimal step for central finite difference formula = 6.694331732265233
Minimum absolute error= 2.2407016263779204e-17

Plot the error vs h

x = problem.get_x()
function = problem.get_function()
first_derivative = problem.get_first_derivative()
finite_difference = nd.FirstDerivativeCentral(function, x)
number_of_points = 200
step_array = np.logspace(-7.0, 5.0, number_of_points)
error_array = np.zeros((number_of_points))
for i in range(number_of_points):
    f_prime_approx = finite_difference.compute(step_array[i])
    error_array[i] = abs(f_prime_approx - first_derivative(x))
pl.figure()
pl.plot(step_array, error_array)
pl.plot([optimum_step] * 2, [min(error_array), max(error_array)], label=r"$h^*$")
pl.title(f"Central finite difference on {problem.get_name()} at x = {problem.get_x()}")
pl.xlabel("h")
pl.ylabel("Error")
pl.xscale("log")
pl.yscale("log")
pl.legend(bbox_to_anchor=(1, 1))
pl.tight_layout()
Central finite difference on scaled exp at x = 1.0

Use the algorithm to detect h*

function = problem.get_function()
first_derivative = problem.get_first_derivative()
algorithm = nd.SteplemanWinarsky(function, x, verbose=True)
initial_step = 1.0e8
x = problem.get_x()
h_optimal, number_of_iterations = algorithm.find_step(initial_step)
number_of_function_evaluations = algorithm.get_number_of_function_evaluations()
print("Optimum h =", h_optimal)
print("number_of_iterations =", number_of_iterations)
print("Function evaluations =", number_of_function_evaluations)
f_prime_approx = algorithm.compute_first_derivative(h_optimal)
absolute_error = abs(f_prime_approx - first_derivative(x))
print("Absolute error = ", absolute_error)
+ find_step()
initial_step=1.000e+08
  number_of_iterations=0, h=2.5000e+07, |FD(h_current) - FD(h_previous)|=1.3441e+35
  number_of_iterations=1, h=6.2500e+06, |FD(h_current) - FD(h_previous)|=1.4401e+03
  number_of_iterations=2, h=1.5625e+06, |FD(h_current) - FD(h_previous)|=3.9981e-05
  number_of_iterations=3, h=3.9062e+05, |FD(h_current) - FD(h_previous)|=4.3393e-07
  number_of_iterations=4, h=9.7656e+04, |FD(h_current) - FD(h_previous)|=2.4036e-08
  number_of_iterations=5, h=2.4414e+04, |FD(h_current) - FD(h_previous)|=1.4909e-09
  number_of_iterations=6, h=6.1035e+03, |FD(h_current) - FD(h_previous)|=9.3135e-11
  number_of_iterations=7, h=1.5259e+03, |FD(h_current) - FD(h_previous)|=5.8208e-12
  number_of_iterations=8, h=3.8147e+02, |FD(h_current) - FD(h_previous)|=3.6380e-13
  number_of_iterations=9, h=9.5367e+01, |FD(h_current) - FD(h_previous)|=2.2738e-14
  number_of_iterations=10, h=2.3842e+01, |FD(h_current) - FD(h_previous)|=1.4208e-15
  number_of_iterations=11, h=5.9605e+00, |FD(h_current) - FD(h_previous)|=8.3819e-17
  number_of_iterations=12, h=1.4901e+00, |FD(h_current) - FD(h_previous)|=9.3131e-18
  number_of_iterations=13, h=3.7253e-01, |FD(h_current) - FD(h_previous)|=3.7253e-17
  Stop because no monotony anymore.
Optimum h = 1.4901161193847656
number_of_iterations = 13
Function evaluations = 30
Absolute error =  1.9825229646960527e-17

Plot the absolute difference depending on the step

def fd_difference(h1, h2, function, x):
    """
    Compute the difference of central difference approx. for different step sizes

    This function computes the absolute value of the difference of approximations
    evaluated at two different steps h1 and h2:

        d = abs(FD(h1) - FD(h2))

    where FD(h) is the approximation from the finite difference formula
    evaluated from the step h.

    Parameters
    ----------
    h1 : float, > 0
        The first step
    h2 : float, > 0
        The second step
    function : function
        The function
    x : float
        The input point where the derivative is approximated.
    """
    finite_difference = nd.FirstDerivativeCentral(function, x)
    f_prime_approx_1 = finite_difference.compute(h1)
    f_prime_approx_2 = finite_difference.compute(h2)
    diff_current = abs(f_prime_approx_1 - f_prime_approx_2)
    return diff_current

Plot the evolution of | FD(h) - FD(h / 2) | for different values of h

number_of_points = 200
step_array = np.logspace(-7.0, 5.0, number_of_points)
diff_array = np.zeros((number_of_points))
function = problem.get_function()
for i in range(number_of_points):
    diff_array[i] = fd_difference(step_array[i], step_array[i] / 2, function, x)
pl.figure()
pl.plot(step_array, diff_array)
pl.title("F.D. difference")
pl.xlabel("h")
pl.ylabel(r"$|\operatorname{FD}(h) - \operatorname{FD}(h / 2) |$")
pl.xscale("log")
pl.yscale("log")
pl.tight_layout()
F.D. difference

Plot the criterion depending on the step

Plot the evolution of | FD(h) - FD(h / 2) | for different values of h

number_of_points = 20
h_initial = 1.0e5
beta = 4.0
step_array = np.zeros((number_of_points))
diff_array = np.zeros((number_of_points))
function = problem.get_function()
for i in range(number_of_points):
    if i == 0:
        step_array[i] = h_initial / beta
        diff_array[i] = fd_difference(step_array[i], h_initial, function, x)
    else:
        step_array[i] = step_array[i - 1] / beta
        diff_array[i] = fd_difference(step_array[i], step_array[i - 1], function, x)
pl.figure()
pl.plot(step_array, diff_array, "o")
pl.title("F.D. difference")
pl.xlabel("h")
pl.ylabel(r"$|\operatorname{FD}(h) - \operatorname{FD}(h / 2) |$")
pl.xscale("log")
pl.yscale("log")
pl.tight_layout()
F.D. difference

Compute reference step

p = 1.0e-16
beta = 4.0
h_reference = beta * p ** (1 / 3) * x
print("Suggested initial_step = ", h_reference)
Suggested initial_step =  1.8566355334451128e-05

Plot number of lost digits vs h

The number_of_lost_digits() method computes the number of lost digits in the approximated derivative depending on the step.

h = 1.0e4
print("Starting h = ", h)
initialize = nd.SteplemanWinarskyInitialize(algorithm)
n_digits = initialize.number_of_lost_digits(h)
print("Number of lost digits = ", n_digits)
threshold = np.log10(p ** (-1.0 / 3.0) / beta)
print("Threshold = ", threshold)

initial_step, number_of_iterations = initialize.find_initial_step(
    1.0e-5,
    1.0e7,
)
print("initial_step = ", initial_step)
print("number_of_iterations = ", number_of_iterations)

estim_step, number_of_iterations = algorithm.find_step(initial_step)
print("estim_step = ", estim_step)
print("number_of_iterations = ", number_of_iterations)
Starting h =  10000.0
Number of lost digits =  1.6989627661187812
Threshold =  4.7312733420053705
initial_step =  10000.0
number_of_iterations =  1
+ find_step()
initial_step=1.000e+04
  number_of_iterations=0, h=2.5000e+03, |FD(h_current) - FD(h_previous)|=1.5625e-11
  number_of_iterations=1, h=6.2500e+02, |FD(h_current) - FD(h_previous)|=9.7656e-13
  number_of_iterations=2, h=1.5625e+02, |FD(h_current) - FD(h_previous)|=6.1035e-14
  number_of_iterations=3, h=3.9062e+01, |FD(h_current) - FD(h_previous)|=3.8142e-15
  number_of_iterations=4, h=9.7656e+00, |FD(h_current) - FD(h_previous)|=2.4016e-16
  number_of_iterations=5, h=2.4414e+00, |FD(h_current) - FD(h_previous)|=5.6844e-18
  number_of_iterations=6, h=6.1035e-01, |FD(h_current) - FD(h_previous)|=2.2737e-17
  Stop because no monotony anymore.
estim_step =  2.44140625
number_of_iterations =  6
number_of_points = 200
step_array = np.logspace(-7.0, 7.0, number_of_points)
n_digits_array = np.zeros((number_of_points))
for i in range(number_of_points):
    h = step_array[i]
    n_digits_array[i] = initialize.number_of_lost_digits(h)
y_max = initialize.number_of_lost_digits(h_reference)
pl.figure()
pl.plot(step_array, n_digits_array, label="$N(h)$")
pl.plot([h_reference] * 2, [0.0, y_max], "--", label=r"$h_{ref}$")
pl.plot([initial_step] * 2, [0.0, y_max], "--", label=r"$h^{(0)}$")
pl.plot([estim_step] * 2, [0.0, y_max], "--", label=r"$h^\star$")
pl.plot(
    step_array,
    np.array([threshold] * number_of_points),
    ":",
    label=r"$T$",
)
pl.title("Number of digits lost by F.D.")
pl.xlabel("h")
pl.ylabel("$N(h)$")
pl.xscale("log")
_ = pl.legend(bbox_to_anchor=(1.0, 1.0))
pl.tight_layout()
Number of digits lost by F.D.
pl.figure()
pl.plot(step_array, error_array)
pl.plot([initial_step] * 2, [0.0, 1.0e-9], "--", label=r"$h^{(0)}$")
pl.plot([estim_step] * 2, [0.0, 1.0e-9], "--", label=r"$h^\star$")
pl.title("Finite difference")
pl.xlabel("h")
pl.ylabel("Error")
pl.xscale("log")
pl.legend(bbox_to_anchor=(1.0, 1.0))
pl.yscale("log")
pl.tight_layout()
Finite difference