diff --git a/exercise3/exercise3.pdf b/exercise3/exercise3.pdf index 48eb1fe..cc41ee5 100644 Binary files a/exercise3/exercise3.pdf and b/exercise3/exercise3.pdf differ diff --git a/exercise3/exercise3.typ b/exercise3/exercise3.typ index 9dc09a2..f6e14da 100644 --- a/exercise3/exercise3.typ +++ b/exercise3/exercise3.typ @@ -52,52 +52,36 @@ $ $ then $(2)$ $ - integral_0^1 ln(x) dd(x) & = [x ln(x) - 1/2 dot x^2 / x - 1/6 dot x^3 / x^2 - - 1/12 dot x^4 / x^3 - dots.c]_0^1 \ - & = -underbracket((1/2 + 1/6 + 1/12 + dots.c), 1) - - cancel(lim_(t -> 0) t ln(t)) \ - & = -1, -$ -because with $p(x) = x^2 + 3 x + 2$ we get -$ - 2 + 6 + 12 + dots.c = p(0) + p(1) + dots.c -$ -so we can compute the telescope sum -$ - sum_(k=0)^oo 1/p(k) & = sum_(k=0)^oo 1/(k^2 + 3 k + 2) \ - & = sum_(k=0)^oo 1/((k+1)(k+2)) \ - & = sum_(k=0)^oo (1/(k+1) - 1/(k+2)) \ - & = 1 - cancel(1/2 + 1/2) - cancel(1/3 + 1/3) + dots.c \ - & = 1. + integral_0^1 ln(x) dd(x) & = [x ln(x) - x]_0^1 \ + & = (1 ln(1) - 1) - lim_(t -> 0^+) (t ln(t) - t) \ + & = -1 - 0 = -1, $ +where we used integration by parts and the fact that $lim_(t -> 0^+) t ln(t) = 0$. now we know what to expect from our estimates. recall the trapezoidal rule $ - // I_[a, b][f] approx T_[a, b][f] := (b-a)/2 (f(b) - f(a)) - I_[a, b][f] approx T_[a, b][f] := (b-a)/2 sum_(i=0)^(n-1) (f(x_(i+1)) - f(x_i)) + I_[a, b][f] approx T_[a, b][f] := (b-a)/2 (f(a) + f(b)) $ -in our case $a = 0$ and $b = 1$. for $(1) med f(x) = e^x$ we obtain +in our case $a = 0$ and $b = 1$. for $(1)$ with $f(x) = e^x$ we obtain $ - T_[0, 1][exp] = 1/2 (e - 1), + T_[0, 1][exp] = 1/2 (e^0 + e^1) = 1/2 (1 + e) = (1 + e)/2, $ which yields an error of $ - e - 1 - 1/2 (e - 1) = 1/2 (e - 1). + E_[0,1][exp] = (e - 1) - (1 + e)/2 = (2e - 2 - 1 - e)/2 = (e - 3)/2. $ -for $(2) med f(x) = ln(x)$ we obtain +for $(2)$ with $f(x) = ln(x)$ we obtain $ - T_[0, 1][ln] = 1/2 (lim_(t->0) ln(t) - ln(1)) = 1/2 (-oo - 0) = -oo + T_[0, 1][ln] = 1/2 (ln(0) + ln(1)) = 1/2 (-oo + 0) = -oo $ -which yields an error of $oo$. +which yields an error of $+oo$. thus we can see that using the trapezoidal rule for $ln(x)$ diverges and proves -to be unwieldy. for $exp(x)$ we can see that it was off by half the actual -value. - +to be unwieldy. for $exp(x)$ the actual error is $(e-3)/2 approx #{ calc.round((calc.e - 3) / 2, digits: 3) }$. == c) @@ -105,20 +89,278 @@ recall the upper bound of the error for the trapezoidal rule $ abs(E_[a,b][f]) <= (b-a)^3 / 12 dot max_(xi in [a,b]) abs(f''(xi)) $ -which is for $f = exp$ +which is for $f = exp$ on $[0,1]$ $ - E_[0, 1][exp] = e/12 approx #{ calc.round(calc.e / 12, digits: 3) } + abs(E_[0, 1][exp]) <= 1^3/12 dot max_(xi in [0,1]) abs(e^xi) = e/12 approx #{ calc.round(calc.e / 12, digits: 3) } $ -as opposed to our previous $1/2 (e - 1) -= #{ calc.round((calc.e - 1) / 2, digits: 3) }$. +Our actual error magnitude is $abs((e-3)/2) = (3-e)/2 approx #{ calc.round((3 - calc.e) / 2, digits: 3) }$. +Since $#{ calc.round((3 - calc.e) / 2, digits: 3) } <= #{ calc.round(calc.e / 12, digits: 3) }$, the error bound is satisfied. +== d) + +recall the error bound derived in the lecture, +$ + abs(E_[0,1][f]) <= (max_(xi in + [a,b]) abs(f^((n+1))(xi))) / ((n+1)!) integral_a^b product_(i=0)^n abs( + x + - x_i + ) + dd(x), +$ +where the integral of $f$ is taken over the interval $[a,b]$, and $f in +C^(n+1)[0,1]$. + +for the trapezoidal rule, $n = 1$, so we have $x_0 = 0$ and $x_1 = 1$. for $f(x) += e^x$: + +- $f^((n+1))(x) = f^((2))(x) = e^x$ +- $max_(xi in [0,1]) abs(e^xi) = e$ +- $(n+1)! = 2! = 2$ + +the integral becomes: +$ + integral_0^1 product_(i=0)^1 abs(x - x_i) dd(x) & = integral_0^1 abs(x - 0) dot abs(x - 1) dd(x) \ + & = integral_0^1 x dot abs(x - 1) dd(x) \ + & = integral_0^1 x dot (1 - x) dd(x) quad "since" x - 1 <= 0 "on" [0,1] \ + & = integral_0^1 (x - x^2) dd(x) \ + & = [x^2/2 - x^3/3]_0^1 = 1/2 - 1/3 = 1/6 +$ + +therefore, the error bound is: +$ + abs(E_[0,1][exp]) <= e/2 dot 1/6 = e/12 approx #{ calc.round(calc.e / 12, digits: 3) } +$ + +this gives the same bound as part c), confirming that both error bound formulas +are equivalent for the trapezoidal rule. the actual error +$(3-e)/2 approx #{ calc.round((3 - calc.e) / 2, digits: 3) }$ +is indeed less than this bound. == e) -telescope sum ish +to derive the composite trapezoidal rule, we divide the interval $[a,b]$ into $n$ segments of equal length $h = (b-a)/n$, with points $x_i = a + i h$ for $i = 0, 1, ..., n$. + +applying the basic trapezoidal rule to each subinterval $[x_i, x_(i+1)]$: +$ + T_([x_i, x_(i+1)])[f] = h/2 (f(x_i) + f(x_(i+1))) +$ + +the composite rule sums over all subintervals: +$ + T_([a,b])^n [f] = sum_(i=0)^(n-1) T_([x_i, x_(i+1)])[f] = sum_(i=0)^(n-1) h/2 (f(x_i) + f(x_(i+1))) +$ + +expanding this sum: +$ + T_([a,b])^n [f] = h/2 [f(x_0) + f(x_1) + f(x_1) + f(x_2) + ... + f(x_(n-1)) + f(x_n)] +$ + +we observe that $f(x_0)$ and $f(x_n)$ appear once, while $f(x_1), f(x_2), ..., f(x_(n-1))$ each appear twice. collecting terms: +$ + T_([a,b])^n [f] = h/2 (f(x_0) + f(x_n)) + h sum_(i=1)^(n-1) f(x_i) +$ + +since $x_0 = a$ and $x_n = b$, this gives us the desired composite formula. + +== f) + +to derive the error bound for the composite trapezoidal rule, we +start with the error bound for each subinterval. + +for each subinterval $[x_i, x_(i+1)]$ with length $h$: +$ + |E_([x_i, x_(i+1)])[f]| <= h^3/12 max_(xi in [x_i, x_(i+1)]) |f''(xi)| +$ + +the total error for the composite rule is: +$ + |E_([a,b])^n [f]| = |sum_(i=0)^(n-1) E_([x_i, x_(i+1)])[f]| +$ + +using the triangle inequality: +$ + |sum_(i=0)^(n-1) E_([x_i, x_(i+1)])[f]| <= sum_(i=0)^(n-1) |E_([x_i, x_(i+1)])[f]| +$ + +substituting the error bound for each subinterval: +$ + sum_(i=0)^(n-1) |E_([x_i, x_(i+1)])[f]| <= sum_(i=0)^(n-1) h^3/12 max_(xi in [x_i, x_(i+1)]) |f''(xi)| +$ + +since $[x_i, x_(i+1)] subset [a,b]$, we have: +$ + max_(xi in [x_i, x_(i+1)]) |f''(xi)| <= max_(xi in [a,b]) |f''(xi)| +$ + +therefore: +$ + sum_(i=0)^(n-1) h^3/12 + max_(xi in [x_i, x_(i+1)]) + |f''(xi)| & <= h^3/12 + sum_(i=0)^(n-1) max_(xi in [a,b]) |f''(xi)| \ + & = h^3/12 dot n dot max_(xi in [a,b]) |f''(xi)| \ + & = h^2 dot (b-a)/12 max_(xi in [a,b]) |f''(xi)| \ + & = (b-a)^3/(12n^2) max_(xi in [a,b]) |f''(xi)| +$ + + +== g) + +using the error bound formula: +$ + |E_([a,b])^n [f]| <= (b-a)^3/(12n^2) max_(xi in [a,b]) |f''(xi)| +$ + +=== first problem: $|E_([0,1])^n [exp]| <= 10^(-3)$ + +for $f(x) = e^x$, we have $f''(x) = e^x$. +since $e^x$ is increasing, $max_(xi in [0,1]) |f''(xi)| = e$. + +$ + 1^3/(12n^2) dot e <= 10^(-3) +$ + +solving: $n^2 >= e/(12 dot 10^(-3)) = 1000e/12$ + +therefore $n >= sqrt(1000e/12) approx 15.05$, so $n >= 16$. + +=== second problem: $|E_([0,1])^n [ln]| <= 10^(-5)$ + +for $f(x) = ln(x)$, we have $f''(x) = -1/x^2$. +since $|f''(x)| = 1/x^2 -> infinity$ as $x -> 0^+$, +the function is not twice differentiable at $x = 0$. +the trapezoidal rule error bound does not apply. + +=== third problem: $|E_([1,2])^n [e^x/x]| <= 10^(-3)$ + +for $f(x) = e^x/x$, using the quotient rule: +$f''(x) = (e^x (x^2 - 2x + 2))/x^3$ + +to find the maximum, we check: +- $f''(1) = e$ +- $f''(1.5) approx 1.66$ +- $f''(2) = e^2/4 approx 1.85$ + +since $f''(1) > f''(1.5)$ and $f''(1) > f''(2)$, +we have $max_(xi in [1,2]) |f''(xi)| = e$. + +$ + 1^3/(12n^2) dot e <= 10^(-3) +$ + +solving: $n >= sqrt(1000e/12) approx 15.05$, so $n >= 16$. + + +== h) & i) + +empirically testing shows that $n = 12$ subintervals already +achieves $|E_([0,1])^12 [exp]| <= 10^(-3)$. furthermore, we need $120$ +subintervals for error $10^(-5)$. + + +== j) + +to estimate the convergence rate, we fit the model $E_n approx C h^p$ by taking logarithms: +$ + log |E_n| approx log C + p log h +$ + +using linear regression on $(log h, log |E_n|)$ data for various values of $n$: + +for $exp(x)$ on $[0,1]$: +- estimated $p_1 approx 2.0$ + +for $sqrt(x)$ on $[0,1]$: +- estimated $p_2 approx 1.5$ + + +== k) + +for exp(x): $p_1 approx 2.0 =>$ quadratic + +for sqrt(x): $p_2 approx 1.5$ shows that $f''(x) = -1/4 x^(-3/2)$ is unbounded as $x -> 0^+$. + + +== l) + +given +$ + E_([a,b])^1 = -M/12 (b-a)^3 quad "and" quad + E_([a,b])^2 = -M/48 (b-a)^3 +$ + +we obtain $E_([a,b])^1 = 4 E_([a,b])^2$. + +since +$ + E_([a,b])^1 = I_([a,b])[f] - T_([a,b])^1 quad "and" quad + E_([a,b])^2 = I_([a,b])[f] - T_([a,b])^2 +$ +we get +$ + E_([a,b])^1 - E_([a,b])^2 = T_([a,b])^2 - T_([a,b])^1 +$ + +thus + +$ + 4 E_([a,b])^2 - E_([a,b])^2 & = T_([a,b])^2 - T_([a,b])^1 \ + 3 E_([a,b])^2 & = T_([a,b])^2 - T_([a,b])^1 \ + ==> E_([a,b])^1 & = 4 E_([a,b])^2 + = 4/3 (T_([a,b])^2 - T_([a,b])^1) =: cal(E)_([a,b])^1 [f] \ + E_([a,b])^2 & = 1/3 (T_([a,b])^2 - T_([a,b])^1) =: cal(E)_([a,b])^2 [f] +$ + + +== m) + +for $f(x) = sqrt(x)$ on $[0,1]$ with 10 uniform intervals and tolerance $10^(-4)$: +#[ + #let sqrt_trap1(a, b) = calc.round((b - a) / 2 * (calc.sqrt(a) + calc.sqrt(b)), digits: 6) + #let sqrt_trap2(a, b) = { + let c = (a + b) / 2 + calc.round((b - a) / 4 * (calc.sqrt(a) + 2 * calc.sqrt(c) + calc.sqrt(b)), digits: 6) + } + #let error_est(a, b) = calc.round(calc.abs(4 / 3 * (sqrt_trap2(a, b) - sqrt_trap1(a, b))), digits: 6) + + #show table.cell.where(y: 0): set text(weight: "bold") + #table( + columns: 4, + inset: 5% + 6pt, + [Interval], [$T_([a,b])^1$], [$T_([a,b])^2$], [$|cal(E)_([a,b])^1|$], + + [$[0, 0.1]$], [#sqrt_trap1(0, 0.1)], [#sqrt_trap2(0, 0.1)], [#error_est(0, 0.1)], + [$[0.1, 0.2]$], [#sqrt_trap1(0.1, 0.2)], [#sqrt_trap2(0.1, 0.2)], [#error_est(0.1, 0.2)], + [$[0.2, 0.3]$], [#sqrt_trap1(0.2, 0.3)], [#sqrt_trap2(0.2, 0.3)], [#error_est(0.2, 0.3)], + [$[0.3, 0.4]$], [#sqrt_trap1(0.3, 0.4)], [#sqrt_trap2(0.3, 0.4)], [#error_est(0.3, 0.4)], + [$[0.4, 0.5]$], [#sqrt_trap1(0.4, 0.5)], [#sqrt_trap2(0.4, 0.5)], [#error_est(0.4, 0.5)], + [$[0.5, 0.6]$], [#sqrt_trap1(0.5, 0.6)], [#sqrt_trap2(0.5, 0.6)], [#error_est(0.5, 0.6)], + [$[0.6, 0.7]$], [#sqrt_trap1(0.6, 0.7)], [#sqrt_trap2(0.6, 0.7)], [#error_est(0.6, 0.7)], + [$[0.7, 0.8]$], [#sqrt_trap1(0.7, 0.8)], [#sqrt_trap2(0.7, 0.8)], [#error_est(0.7, 0.8)], + [$[0.8, 0.9]$], [#sqrt_trap1(0.8, 0.9)], [#sqrt_trap2(0.8, 0.9)], [#error_est(0.8, 0.9)], + [$[0.9, 1.0]$], [#sqrt_trap1(0.9, 1.0)], [#sqrt_trap2(0.9, 1.0)], [#error_est(0.9, 1.0)], + ) +] + +intervals needing refinement (error > $10^(-4)$): first 4 intervals + + +== n) + +implemented adaptive trapezoidal quadrature with tolerance $"tol" = 10^(-5)$: + +#table( + columns: 4, + [Function], [Adaptive Result], [Intervals Used], [Uniform (part i)], + + [$e^x$], [1.7183], [64], [120], + [$sqrt(x)$], [0.6667], [79], [-], +) + +the adaptive algorithm automatically allocates computational effort where +needed, making it more efficient than uniform refinement. -overlapping edges = problem 3 diff --git a/exercise3/main.py b/exercise3/main.py new file mode 100644 index 0000000..ea0d1c4 --- /dev/null +++ b/exercise3/main.py @@ -0,0 +1,71 @@ +import numpy as np + + +def step(a, b, n): + return (b - a) / n + + +# this is basically linspace +def interpolation_nodes(a, b, n): + h = step(a, b, n - 1) + return [a + i * h for i in range(n)] + + +def composite_trapezoidal(f, a, b, n): + h = step(a, b, n) + x = interpolation_nodes(a, b, n + 1) + return h / 2 * sum([f(x[i]) + f(x[i + 1]) for i in range(n)]) + + +def error_trapezoidal(f, a, b, n, expected): + return expected - composite_trapezoidal(f, a, b, n) + + +def format_error(f, a, b, n, expected): + return f"error for {f} from {a} to {b} with {n} subintervals is {error_trapezoidal(f, a, b, n, expected)}." + + +def find_subinterval_count(f, a, b, expected, error_threshold): + n = 1 + e = lambda n: error_trapezoidal(f, a, b, n, expected) + while np.abs(e(n)) > error_threshold: + n += 1 + return n + + +def estimate_convergence_rate(f, a, b, expected): + m = 20 + errors = [np.log(abs(error_trapezoidal(f, a, b, n, expected))) for n in range(2, m)] + steps = [np.log(step(a, b, n)) for n in range(2, m)] + coeffs = np.polyfit(steps, errors, 1) # log-log regression line + p = coeffs[0] + C = np.exp(coeffs[1]) + return p, C + + +def adaptive_trapezoidal(f, a, b, tol): + T1 = (b - a) / 2 * (f(a) + f(b)) + c = (a + b) / 2 + T2 = (b - a) / 4 * (f(a) + 2 * f(c) + f(b)) + + E1 = 4 / 3 * (T2 - T1) + + if abs(E1) <= tol: + E2 = 1 / 3 * (T2 - T1) + return T2 + E2, 1 + else: + left_val, left_count = adaptive_trapezoidal(f, a, c, tol * 0.7) + right_val, right_count = adaptive_trapezoidal(f, c, b, tol * 0.7) + return left_val + right_val, left_count + right_count + + +print(format_error(np.exp, 0, 1, 16, np.e - 1)) +print(f"subintervals: {find_subinterval_count(np.exp, 0, 1, np.e - 1, 1e-5)}") +print(f"exp p: {estimate_convergence_rate(np.exp, 0, 1, np.e - 1)[0]:.5f}") +print(f"sqrt p: {estimate_convergence_rate(np.sqrt, 0, 1, 2/3)[0]:.5f}") +print(f"adaptive estimate exp: {adaptive_trapezoidal(np.exp, 0, 1, 1e-5)[0]:.5f}") +print(f"adaptive estimate sqrt: {adaptive_trapezoidal(np.sqrt, 0, 1, 1e-5)[0]:.5f}") +print(f"adaptive n subintervals exp: {adaptive_trapezoidal(np.exp, 0, 1, 1e-5)[1]:.0f}") +print( + f"adaptive n subintervals sqrt: {adaptive_trapezoidal(np.sqrt, 0, 1, 1e-5)[1]:.0f}" +) diff --git a/flake.nix b/flake.nix index 56eeb1f..dba04dd 100644 --- a/flake.nix +++ b/flake.nix @@ -16,8 +16,10 @@ buildInputs = with pkgs; [ typst typstyle + python313 python313Packages.sympy python313Packages.matplotlib + python313Packages.numpy ]; shellHook = '' echo welcome!