/
trunc_empir.py
106 lines (90 loc) · 3.47 KB
/
trunc_empir.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
"""
Empirical estimation of the truncation error in a scheme
for a problem with one independent variable.
"""
import numpy as np
import scitools.std as plt
def estimate(truncation_error, T, N_0, m, makeplot=True):
"""
Compute the truncation error in a problem with one independent
variable, using m meshes, and estimate the convergence
rate of the truncation error.
The user-supplied function truncation_error(dt, N) computes
the truncation error on a uniform mesh with N intervals of
length dt::
R, t, R_a = truncation_error(dt, N)
where R holds the truncation error at points in the array t,
and R_a are the corresponding theoretical truncation error
values (None if not available).
The truncation_error function is run on a series of meshes
with 2**i*N_0 intervals, i=0,1,...,m-1.
The values of R and R_a are restricted to the coarsest mesh.
and based on these data, the convergence rate of R (pointwise)
and time-integrated R can be estimated empirically.
"""
N = [2**i*N_0 for i in range(m)]
R_I = np.zeros(m) # time-integrated R values on various meshes
R = [None]*m # time series of R restricted to coarsest mesh
R_a = [None]*m # time series of R_a restricted to coarsest mesh
dt = np.zeros(m)
legends_R = []; legends_R_a = [] # all legends of curves
for i in range(m):
dt[i] = T/float(N[i])
R[i], t, R_a[i] = truncation_error(dt[i], N[i])
R_I[i] = np.sqrt(dt[i]*np.sum(R[i]**2))
if i == 0:
t_coarse = t # the coarsest mesh
stride = N[i]/N_0
R[i] = R[i][::stride] # restrict to coarsest mesh
R_a[i] = R_a[i][::stride]
if makeplot:
plt.figure(1)
plt.plot(t_coarse, R[i], log='y')
legends_R.append('N=%d' % N[i])
plt.hold('on')
plt.figure(2)
plt.plot(t_coarse, R_a[i] - R[i], log='y')
plt.hold('on')
legends_R_a.append('N=%d' % N[i])
if makeplot:
plt.figure(1)
plt.xlabel('time')
plt.ylabel('pointwise truncation error')
plt.legend(legends_R)
plt.savefig('R_series.png')
plt.savefig('R_series.pdf')
plt.figure(2)
plt.xlabel('time')
plt.ylabel('pointwise error in estimated truncation error')
plt.legend(legends_R_a)
plt.savefig('R_error.png')
plt.savefig('R_error.pdf')
# Convergence rates
r_R_I = convergence_rates(dt, R_I)
print 'R integrated in time; r:',
print ' '.join(['%.1f' % r for r in r_R_I])
R = np.array(R) # two-dim. numpy array
r_R = [convergence_rates(dt, R[:,n])[-1]
for n in range(len(t_coarse))]
# Plot convergence rates
if makeplot:
plt.figure()
plt.plot(t_coarse, r_R)
plt.xlabel('time')
plt.ylabel('r')
plt.axis([t_coarse[0], t_coarse[-1], 0, 2.5])
plt.title('Pointwise rate $r$ in truncation error $\sim\Delta t^r$')
plt.savefig('R_rate_series.png')
plt.savefig('R_rate_series.pdf')
def convergence_rates(h, E):
"""
Given a sequence of discretization parameters in the list h,
and corresponding errors in the list E,
compute the convergence rate of two successive (h[i], E[i])
and (h[i+1],E[i+1]) experiments, assuming the model E=C*h^r
(for small enough h).
"""
from math import log
r = [log(E[i]/E[i-1])/log(h[i]/h[i-1])
for i in range(1, len(h))]
return r