Numerical_Analysis_Overiew (1)
Numerical_Analysis_Overiew (1)
Leto Cavé
May 28, 2024
This document provides a comprehensive overview of various numerical methods, including rootfinding, interpolation, numerical integration, optimization, and other methods
such as statistical methods and stochastic processes. The methods are organized into a detailed table for easy comparison.
n
X i−1
Y
P (x) = f (x0 ) + ∆i f (x0 ) (x − xj ) Efficient for incre-
Newton’s Forward Estimate interme- Use divided differ- i=1 j=0 Complex for higher-
mental addition of
Differences diate values ences order differences
∆i f (x) = ∆i−1 f (x + h) − ∆i−1 f (x) data points
Spline Interpola- Estimate interme- Piecewise polyno- Smooth and contin- More complex to im-
Si (x) = ai + bi (x − xi ) + ci (x − xi )2 + di (x − xi )3
tion diate values mial interpolation uous plement
(cubic splines)
Numerical Integration
Less accurate for func-
Z b " n−1
#
Approximate defi- Use trapezoids to h X Simple to imple-
Trapezoidal Rule f (x) dx ≈ f (a) + 2 f (xi ) + f (b) tions with high curva-
nite integrals approximate area a 2 ment
i=1 ture
Steepest Ascen- Find maxima/min- Iterate in gradient xn+1 = xn + α∇f (xn ) Simple implemen- Can be slow near flat
t/Descent ima direction tation regions
√
Golden Section Find maxima/min- Use golden ratio to 1+ 5 b−a Efficient for uni- Limited to univariate
ρ= , xn+1 = xn +
Method ima reduce interval 2 ρ modal functions functions
Statistical Methods
n
Minimize sum of Solve normal equa- min
X
(yi − f (xi ))2 Widely used in re-
Least Squares Sensitive to outliers
squared errors tions i=1
gression
n
Maximum Likeli- Estimate parame- Maximize likeli- max
Y
fθ (yi ) Efficient for param- Requires knowledge of
hood ters hood function i=1
eter estimation distribution
Stochastic Processes
Model random pro- Use transition ma- P (Xn+1 = j|Xn = i) = pij Models probabilis- May require large state
Markov Processes
cesses trices tic systems space
R Code Templates for Numerical Methods
Example Func-
Method Iteration Formula R Code Template Plotting with ggplot2
tion
Rootfinding
1 library(ggplot2)
1 bisection <- function(f, a, b, tol=1e 2 f <- function(x) x^2 - 2
,→ -9) { 3 a <- 0
2 while ((b - a) / 2 > tol) { 4 b <- 2
3 c <- (a + b) / 2 5 root <- bisection(f, a, b)
an +bn 2 4 if (f(c) == 0) return(c) 6 df <- data.frame(x=c(a, b), y=f(c(a,
Bisection Method xn+1 = 2
f (x) = x − 2
5 if (f(a) * f(c) < 0) b <- c else ,→ b)))
,→ a <- c 7 ggplot(df, aes(x=x, y=y)) +
6 } 8 geom_line() +
7 return((a + b) / 2) 9 geom_point(aes(x=root, y=0), color=
8 } ,→ ’red’) +
10 theme_minimal()
1 library(ggplot2)
1 fixed_point <- function(g, x0, tol=1e
2 g <- function(x) cos(x)
,→ -9, max_iter=100) {
3 x0 <- 1
2 x <- x0
4 root <- fixed_point(g, x0)
3 for (i in 1:max_iter) {
5 df <- data.frame(x=seq(0, 2, length.
4 x_new <- g(x)
,→ out=100), y=g(seq(0, 2, length.
Fixed-point Iteration xn+1 = g(xn ) g(x) = cos(x) 5 if (abs(x_new - x) < tol) return(
,→ out=100)))
,→ x_new)
6 ggplot(df, aes(x=x, y=y)) +
6 x <- x_new
7 geom_line() +
7 }
8 geom_point(aes(x=root, y=g(root)),
8 return(x)
,→ color=’red’) +
9 }
9 theme_minimal()
1 library(ggplot2)
2 f <- function(x) x^2 - 2
1 newton_raphson <- function(f, f_prime
3 f_prime <- function(x) 2 * x
,→ , x0, tol=1e-9, max_iter=100) {
4 x0 <- 1
2 x <- x0
5 root <- newton_raphson(f, f_prime, x0
3 for (i in 1:max_iter) {
,→ )
4 x_new <- x - f(x) / f_prime(x)
f (xn ) f (x) = x2 − 2, 6 df <- data.frame(x=seq(0, 2, length.
Newton-Raphson Method xn+1 = xn − 5 if (abs(x_new - x) < tol) return(
f ′ (xn ) f ′ (x) = 2x ,→ out=100), y=f(seq(0, 2, length.
,→ x_new)
,→ out=100)))
6 x <- x_new
7 ggplot(df, aes(x=x, y=y)) +
7 }
8 geom_line() +
8 return(x)
9 geom_point(aes(x=root, y=0), color=
9 }
,→ ’red’) +
10 theme_minimal()
1 library(ggplot2)
1 secant <- function(f, x0, x1, tol=1e
2 f <- function(x) x^2 - 2
,→ -9, max_iter=100) {
3 x0 <- 1
2 for (i in 1:max_iter) {
4 x1 <- 2
3 x2 <- x1 - f(x1) * (x1 - x0) / (f
5 root <- secant(f, x0, x1)
,→ (x1) - f(x0))
6 df <- data.frame(x=seq(0, 3, length.
xn+1 = xn − 4 if (abs(x2 - x1) < tol) return(x2
Secant Method −xn−1 f (x) = x2 − 2 ,→ out=100), y=f(seq(0, 3, length.
f (xn ) f (xxnn)−f (xn−1 ) ,→ )
,→ out=100)))
5 x0 <- x1
7 ggplot(df, aes(x=x, y=y)) +
6 x1 <- x2
8 geom_line() +
7 }
9 geom_point(aes(x=root, y=0), color=
8 return(x1)
,→ ’red’) +
9 }
10 theme_minimal()
Interpolation
1 library(ggplot2)
1 lagrange <- function(x, y, x_new) { 2 x <- c(0, 1, 2)
2 n <- length(x) 3 y <- c(1, 2, 0)
3 L <- rep(1, n) 4 x_new <- seq(0, 2, length.out=100)
4 for (i in 1:n) { 5 y_new <- sapply(x_new, lagrange, x, y
Pn Polynomial inter- 5 for (j in 1:n) { ,→ )
Lagrange Polynomials P (x) = i=0 yi ℓi (x) polation of given 6 if (i != j) L[i] <- L[i] * (x_ 6 df <- data.frame(x=x_new, y=y_new)
points ,→ new - x[j]) / (x[i] - x[j]) 7 ggplot(df, aes(x=x, y=y)) +
7 } 8 geom_line() +
8 } 9 geom_point(data=data.frame(x=x, y=y
9 return(sum(y * L)) ,→ ), aes(x=x, y=y), color=’red’)
10 } ,→ +
10 theme_minimal()
Numerical Integration
1 library(ggplot2)
2 f <- function(x) sin(x)
3 a <- 0
4 b <- pi
1 trapezoidal <- function(f, a, b, n) {
5 n <- 10
2 h <- (b - a) / n
6 integral <- trapezoidal(f, a, b, n)
≈ iApproximate inte- x <- seq(a, b, length.out = n + 1)
Rb 3
ah
f (x) dx 7 df <- data.frame(x=seq(a, b, length.
Trapezoidal Rule gral of f (x) = 4 y <- f(x)
h
Pn−1 ,→ out=100), y=f(seq(a, b, length.
2 f (a) + i=1 f (xi ) + f (b) sin(x) 5 return(h * (sum(y) - 0.5 * (y[1] +
,→ out=100)))
,→ y[n + 1])))
8 ggplot(df, aes(x=x, y=y)) +
6 }
9 geom_line() +
10 geom_area(data=subset(df, x <= b),
,→ fill=’blue’, alpha=0.2) +
11 theme_minimal()
1 library(ggplot2)
2 f <- function(x) sin(x)
1 simpson <- function(f, a, b, n) { 3 a <- 0
2 if (n %% 2 == 1) n <- n + 1 4 b <- pi
3 h <- (b - a) / n 5 n <- 10
4 x <- seq(a, b, length.out = n + 1) 6 integral <- simpson(f, a, b, n)
Rb Approximate inte-
a
f (x) dx ≈ 5 y <- f(x) 7 df <- data.frame(x=seq(a, b, length.
Simpson’s Rule h
gral of f (x) =
,→ out=100), y=f(seq(a, b, length.
P P
return(h / 3 * (y[1] + 4 * sum(y[
3 [f (a) + 4 odd f (xi ) + 2 even f (xi ) + f (b)]
6
sin(x)
,→ seq(2, n, by = 2)]) + 2 * sum(y ,→ out=100)))
,→ [seq(3, n - 1, by = 2)]) + y[n 8 ggplot(df, aes(x=x, y=y)) +
,→ + 1])) 9 geom_line() +
7 } 10 geom_area(data=subset(df, x <= b),
,→ fill=’blue’, alpha=0.2) +
11 theme_minimal()
Optimization
1 library(ggplot2)
1 newton_optimize <- function(f_prime, 2 f <- function(x) -x^2 + 4*x
,→ f_double_prime, x0, tol=1e-9, 3 f_prime <- function(x) -2*x + 4
,→ max_iter=100) { 4 f_double_prime <- function(x) -2
2 x <- x0 5 x0 <- 1
3 for (i in 1:max_iter) { 6 optimum <- newton_optimize(f_prime, f
4 x_new <- x - f_prime(x) / f_ ,→ _double_prime, x0)
Newton-Raphson (Opti- f ′ (xn ) Maximize f (x) =
xn+1 = xn − ,→ double_prime(x) 7 df <- data.frame(x=seq(-1, 5, length.
mization) f ′′ (xn ) −x2 + 4x
5 if (abs(x_new - x) < tol) return( ,→ out=100), y=f(seq(-1, 5, length
,→ x_new) ,→ .out=100)))
6 x <- x_new 8 ggplot(df, aes(x=x, y=y)) +
7 } 9 geom_line() +
8 return(x) 10 geom_point(aes(x=optimum, y=f(
9 } ,→ optimum)), color=’red’) +
11 theme_minimal()
1 library(ggplot2)
1 steepest_ascent <- function(f, grad_f 2 f <- function(x) -x^2 + 4*x
,→ , x0, alpha = 0.01, tol = 1e-9, 3 grad_f <- function(x) -2*x + 4
,→ max_iter = 100) { 4 x0 <- 1
2 x <- x0 5 optimum <- steepest_ascent(f, grad_f,
3 for (i in 1:max_iter) { ,→ x0)
Maximize f (x) = 4 x_new <- x + alpha * grad_f(x) 6 df <- data.frame(x=seq(-1, 5, length.
Steepest Ascent/Descent xn+1 = xn + α∇f (xn )
−x2 + 4x 5 if (abs(f(x_new) - f(x)) < tol) ,→ out=100), y=f(seq(-1, 5, length
,→ return(x_new) ,→ .out=100)))
6 x <- x_new 7 ggplot(df, aes(x=x, y=y)) +
7 } 8 geom_line() +
8 return(x) 9 geom_point(aes(x=optimum, y=f(
9 } ,→ optimum)), color=’red’) +
10 theme_minimal()
1 golden_section <- function(f, a, b, 1 library(ggplot2)
,→ tol = 1e-9) { 2 f <- function(x) -x^2 + 4*x
2 gr <- (1 + sqrt(5)) / 2 3 a <- 0
3 c <- b - (b - a) / gr 4 b <- 4
4 d <- a + (b - a) / gr 5 optimum <- golden_section(f, a, b)
√ 5 while (abs(b - a) > tol) { 6 df <- data.frame(x=seq(a, b, length.
1+ 5
ρ = 2 , xn+1 = xn + Maximize f (x) =
Golden Section Method 6 if (f(c) > f(d)) b <- d else a <- ,→ out=100), y=f(seq(a, b, length.
b−a −x2 + 4x
ρ ,→ c ,→ out=100)))
7 c <- b - (b - a) / gr 7 ggplot(df, aes(x=x, y=y)) +
8 d <- a + (b - a) / gr 8 geom_line() +
9 } 9 geom_point(aes(x=optimum, y=f(
10 return((a + b) / 2) ,→ optimum)), color=’red’) +
11 } 10 theme_minimal()
Statistical Methods
1 library(ggplot2)
2 x <- c(1, 2, 3, 4, 5)
3 y <- c(2, 4, 5, 4, 5)
1 least_squares <- function(x, y) {
4 model <- least_squares(x, y)
Pn 2 2 model <- lm(y ~ x)
Least Squares min i=1 (yi − f (xi )) Linear regression
3 return(model)
5 df <- data.frame(x=x, y=y)
6 ggplot(df, aes(x=x, y=y)) +
4 }
7 geom_point() +
8 geom_smooth(method=’lm’) +
9 theme_minimal()
1 library(ggplot2)
1 max_likelihood <- function(data) {
2 set.seed(123)
2 log_likelihood <- function(params)
3 data <- rnorm(100, mean=5, sd=2)
,→ {
4 params <- max_likelihood(data)
3 mu <- params[1]
5 df <- data.frame(x=data)
4 sigma <- params[2]
Parameter estima- 6 ggplot(df, aes(x=x)) +
Qn 5 -sum(dnorm(data, mean = mu, sd =
Maximum Likelihood max i=1 fθ (yi ) tion for normal dis- 7 geom_histogram(aes(y=..density..),
,→ sigma, log = TRUE))
tribution ,→ bins=20, fill=’blue’, alpha
6 }
,→ =0.5) +
7 result <- optim(c(mean(data), sd(
8 stat_function(fun=dnorm, args=list(
,→ data)), log_likelihood)
,→ mean=params[1], sd=params[2]),
8 return(result$par)
,→ color=’red’) +
9 }
9 theme_minimal()
Stochastic Processes
1 simulate_markov <- function( 1 library(ggplot2)
,→ transition_matrix, states, n) { 2 transition_matrix <- matrix(c(0.8,
2 current_state <- sample(states, 1) ,→ 0.2, 0.5, 0.5), nrow=2, byrow=
3 chain <- numeric(n) ,→ TRUE)
4 chain[1] <- current_state 3 states <- c(1, 2)
5 for (i in 2:n) { 4 n <- 100
P (Xn+1 = j|Xn = i) = Simulate Markov
Markov Processes 6 current_state <- sample(states, 5 chain <- simulate_markov(transition_
pij chain
,→ 1, prob = transition_matrix[ ,→ matrix, states, n)
,→ current_state, ]) 6 df <- data.frame(step=1:n, state=
7 chain[i] <- current_state ,→ chain)
8 } 7 ggplot(df, aes(x=step, y=state)) +
9 return(chain) 8 geom_step() +
10 } 9 theme_minimal()