0% found this document useful (0 votes)
6 views3 pages

Taller 3 corte 3 (Clase 28 octubre).ipynb - Colab

The document contains Python code for various numerical methods, including Gaussian elimination, Jacobi, and Gauss-Seidel methods for solving linear equations. It also includes a Newton-Raphson method for solving nonlinear equations, along with examples demonstrating the application of these methods. The code utilizes libraries such as NumPy and SciPy for efficient computation with sparse matrices.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
6 views3 pages

Taller 3 corte 3 (Clase 28 octubre).ipynb - Colab

The document contains Python code for various numerical methods, including Gaussian elimination, Jacobi, and Gauss-Seidel methods for solving linear equations. It also includes a Newton-Raphson method for solving nonlinear equations, along with examples demonstrating the application of these methods. The code utilizes libraries such as NumPy and SciPy for efficient computation with sparse matrices.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 3

7/11/24, 17:55 Taller 3 corte 3 (Clase 28 octubre).

ipynb - Colab

import numpy as np
from scipy.sparse import dia_matrix, coo_matrix, csr_matrix

def sparsesetup(n):
e = np.ones(n)
n2 = n // 2
a = dia_matrix( ([-e,3*e,-e],[-1,0,1]), shape=(n,n) ).tocsr()
c = coo_matrix( (e/2, (range(n),range(n-1,-1,-1))), shape=(n,n)).tocsr()
a = a + c
a[n2 ,n2-1] = -1
a[n2-1,n2 ] = -1
b = np.zeros(n)
b[0]=2.5; b[-1]=2.5; b[1:n-1]=1.5; b[n2-1:n2+1]=1.0
return a,b

# --- GaussElimin ---


def gaussEliminPivot(a, b):
"""Eliminación gaussiana modificada para matrices dispersas con pivoteo parcial."""
a = a.toarray() # Convertir a matriz densa
n = len(b)
for k in range(0, n-1):
# Pivoteo parcial: encontrar la fila con el mayor valor absoluto en la columna k
max_index = k + np.argmax(np.abs(a[k:, k]))
if a[max_index, k] == 0:
raise ValueError("La matriz es singular o casi singular") # La matriz no tiene solución única
# Intercambiar filas
if max_index != k:
a[[k, max_index]] = a[[max_index, k]]
b[[k, max_index]] = b[[max_index, k]]
# Eliminación gaussiana
for i in range(k+1, n):
if a[i, k] != 0.0:
lam = a[i, k] / a[k, k]
a[i, k+1:n] = a[i, k+1:n] - lam * a[k, k+1:n]
b[i] = b[i] - lam * b[k]
# Sustitución hacia atrás
for k in range(n-1, -1, -1):
b[k] = (b[k] - np.dot(a[k, k+1:n], b[k+1:n])) / a[k, k]
return b

import numpy as np
from sympy import banded, ones, Matrix, symbols
import sympy as sp

def gaussElimin(a, b):


n = len(b)
# Elimination phase
for k in range(0, n-1):
for i in range(k+1, n):
if a[i, k] != 0.0:
lam = a[i, k] / a[k, k]
a[i, k+1:n] = a[i, k+1:n] - lam * a[k, k+1:n]
b[i] = b[i] - lam * b[k]

# Back substitution
for k in range(n-1, -1, -1):
b[k] = (b[k] - np.dot(a[k, k+1:n], b[k+1:n])) / a[k, k]

return b

# --- iterEqs ---


def iterEqs(x, omega):
n = len(x)
x[0] = omega * (x[1] - x[n - 1]) / 2.0 + (1.0 - omega) * x[0]
for i in range(1, n - 1):
x[i] = omega * (x[i - 1] + x[i + 1]) / 2.0 + (1.0 - omega) * x[i]
x[n - 1] = omega * (1.0 - x[0] + x[n - 2]) / 2.0 + (1.0 - omega) * x[n - 1]
return x

# Método de Jacobi
def jacobi(A, b, tol=1e-3, max_iter=100):
n = len(b)

https://colab.research.google.com/drive/1fOVV5F-jnhfgIS0Qqke5GgJ70FQMLx0J#scrollTo=EAYKoX0GkSrQ&printMode=true 1/3
7/11/24, 17:55 Taller 3 corte 3 (Clase 28 octubre).ipynb - Colab
x = np.zeros(n)
for _ in range(max_iter):
x_ant = x.copy()
for i in range(n):
s = sum(A[i, j] * x_ant[j] for j in range(n) if j != i)
x[i] = (b[i] - s) / A[i, i]
if np.linalg.norm(x - x_ant) < tol:
return x
raise ValueError("No convergió en el número máximo de iteraciones")

# Método de Gauss-Seidel
def gauss_seidel(A, b, tol=1e-3, max_iter=100):
n = len(b)
x = np.zeros(n)
for _ in range(max_iter):
x_ant = x.copy()
for i in range(n):
s = sum(A[i, j] * x[j] for j in range(n) if j != i)
x[i] = (b[i] - s) / A[i, i]
if np.linalg.norm(x - x_ant) < tol:
return x
raise ValueError("No convergió en el número máximo de iteraciones")

keyboard_arrow_down Punto 1
a)

import numpy as np
import matplotlib.pyplot as plt
from sympy import banded

a = np.array(banded({0: (0.7071,0,0,0,0,0,0,0,0.7071), 1: (0,1,0,0,0,0,-1,0), -1: (0.7071, 1,-1,0,0,1,-1,1), 2: (0,0,0,0,1,0,0), -2: (0,0,0,1


-3: (0,0,0,0,-0.5,0), 4: (-0.8660,0,0,0,0.7071)}))
b = np.array([0, -1000, 0, 0, 500, 0, 0, -500, 0])

print(gaussElimin(a, b))

[-1035.28094994 732.0471597 0. -267.9528403


-535.9056806 732.0471597 767.9528403 267.9528403
-378.94617494]

b)

from numpy import ones,zeros, float64


from scipy.sparse import dia_matrix
from numpy import diag, dot, reshape

def diagonal(A):
A = A.copy()
n = A.shape[0]
for i in range(n):
row_sum = np.sum(np.abs(A[i, :])) - np.abs(A[i, i])
if np.abs(A[i, i]) <= row_sum:
A[i, i] = row_sum + 1
return A

A = np.array(banded({0: (0.7071,0,0,0,0,0,0,0,0.7071), 1: (0,1,0,0,0,0,-1,0), -1: (0.7071, 1,-1,0,0,1,-1,1), 2: (0,0,0,0,1,0,0), -2: (0,0,0,


-3: (0,0,0,0,-0.5,0), 4: (-0.8660,0,0,0,0.7071)}), dtype = float64)

a = diagonal(A)

b = np.array([0, -1000, 0, 0, 500, 0, 0, -500, 0])

print(jacobi(a,b,6).T)

[[ 8.31933745e+01 -4.00408487e+02 1.35634074e+02 6.52830199e+01


1.94146363e+02 1.13757715e-01 -9.74944815e+01 -1.98553240e+02
1.00932138e+02]]

https://colab.research.google.com/drive/1fOVV5F-jnhfgIS0Qqke5GgJ70FQMLx0J#scrollTo=EAYKoX0GkSrQ&printMode=true 2/3
7/11/24, 17:55 Taller 3 corte 3 (Clase 28 octubre).ipynb - Colab

keyboard_arrow_down Punto 2.
def newtonRaphson2(f, x, tol=1.e-9):
def jacobian(f, x):
h = 1.e-4
n = len(x)
jac = np.zeros((n, n), dtype=float)
f0 = f(x)
f0 = np.array(f0)
for i in range(n):
temp = x[i]
x[i] = temp + h
f1 = f(x)
f1 = np.array(f1)
x[i] = temp
jac[:, i] = (f1 - f0) / h
return jac, f0

for i in range(300):
jac, f0 = jacobian(f, x)
if np.linalg.norm(f0) < tol:
return x

dx = gaussElimin(jac, -f0)
x = x + dx

if np.linalg.norm(dx) < tol * max(np.max(np.abs(x)), 1.0):


return x

print("Too many iterations")


def f(x):
return [
0.3 * x[0] - 0.2 * math.sqrt(abs(x[1] - x[0])) - 0.1 * math.sqrt(abs(x[2] - x[0])),
0.2 * x[1] - 0.1 * math.sqrt(abs(x[3] - x[1])) - 0.2 * math.sqrt(abs(x[2] - x[1])),
0.1 * x[2] - 0.2 * math.sqrt(abs(x[1] - x[2])) - 0.1 * math.sqrt(abs(x[3] - x[2])),
0.2 * x[3] - 0.1 * math.sqrt(abs(x[1] - x[3])) - 0.1 * math.sqrt(abs(x[2] - x[3]))
]
x0 = [0, 1, 0, 0]
x = newtonRaphson2(f, x0)

print("Solución:")
x

Solución:
array([1.3587743 , 2.32410118, 5.81636998, 1.49476356])

https://colab.research.google.com/drive/1fOVV5F-jnhfgIS0Qqke5GgJ70FQMLx0J#scrollTo=EAYKoX0GkSrQ&printMode=true 3/3

You might also like