0% found this document useful (0 votes)
13 views

Experiments - With - Convolutional - Neural - Network - 2 - 6b.ipynb - Colaboratory

Deep Learning Doc

Uploaded by

Rage Beast
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
13 views

Experiments - With - Convolutional - Neural - Network - 2 - 6b.ipynb - Colaboratory

Deep Learning Doc

Uploaded by

Rage Beast
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 6

1 import numpy as np

2 from scipy import signal


3 from keras.datasets import mnist
4 from keras.utils import np_utils

1 #layer.py
2 class Layer:
3 def __init__(self):
4 self.input = None
5 self.output = None
6
7 def forward(self, input):
8 # TODO: return output
9 pass
10
11 def backward(self, output_gradient, learning_rate):
12 # TODO: update parameters and return input gradient
13 pass

1 #dense.py
2 class Dense(Layer):
3 def __init__(self, input_size, output_size):
4 self.weights = np.random.randn(output_size, input_size)
5 self.bias = np.random.randn(output_size, 1)
6
7 def forward(self, input):
8 self.input = input
9 return np.dot(self.weights, self.input) + self.bias
10
11 def backward(self, output_gradient, learning_rate):
12 weights_gradient = np.dot(output_gradient, self.input.T)
13 input_gradient = np.dot(self.weights.T, output_gradient)
14 self.weights -= learning_rate * weights_gradient
15 self.bias -= learning_rate * output_gradient
16 return input_gradient

1 #convolutional.py
2 class Convolutional(Layer):
3 def __init__(self, input_shape, kernel_size, depth):
4 input_depth, input_height, input_width = input_shape
5 self.depth = depth
6 self.input_shape = input_shape
7 self.input_depth = input_depth
8 self.output_shape = (depth, input_height - kernel_size + 1, input_width - kernel_size + 1)
9 self.kernels_shape = (depth, input_depth, kernel_size, kernel_size)
10 self.kernels = np.random.randn(*self.kernels_shape)
11 self.biases = np.random.randn(*self.output_shape)
12
13 def forward(self, input):
14 self.input = input
15 self.output = np.copy(self.biases)
16 for i in range(self.depth):
17 for j in range(self.input_depth):
18 self.output[i] += signal.correlate2d(self.input[j], self.kernels[i, j], "valid")
19 return self.output
20
21 def backward(self, output_gradient, learning_rate):
22 kernels_gradient = np.zeros(self.kernels_shape)
23 input_gradient = np.zeros(self.input_shape)
24
25 for i in range(self.depth):
26 for j in range(self.input_depth):
27 kernels_gradient[i, j] = signal.correlate2d(self.input[j], output_gradient[i], "valid")
28 input_gradient[j] += signal.convolve2d(output_gradient[i], self.kernels[i, j], "full")
29
30 self.kernels -= learning_rate * kernels_gradient
31 self.biases -= learning_rate * output_gradient
32 return input_gradient

1 #reshape.py
2 class Reshape(Layer):
3 def __init__(self, input_shape, output_shape):
4 self.input_shape = input_shape
5 self.output_shape = output_shape
6
7 def forward(self, input):
8 return np.reshape(input, self.output_shape)
9
10 def backward(self, output_gradient, learning_rate):
11 return np.reshape(output_gradient, self.input_shape)

1 #activation.py
2 class Activation(Layer):
3 def __init__(self, activation, activation_prime):
4 self.activation = activation
5 self.activation_prime = activation_prime
6
7 def forward(self, input):
8 self.input = input
9 return self.activation(self.input)
10
11 def backward(self, output_gradient, learning_rate):
12 return np.multiply(output_gradient, self.activation_prime(self.input))

1 #activations.py
2 class Tanh(Activation):
3 def __init__(self):
4 def tanh(x):
5 return np.tanh(x)
6
7 def tanh_prime(x):
8 return 1 - np.tanh(x) ** 2
9
10 super().__init__(tanh, tanh_prime)
11
12 class Sigmoid(Activation):
13 def __init__(self):
14 def sigmoid(x):
15 return 1 / (1 + np.exp(-x))
16
17 def sigmoid_prime(x):
18 s = sigmoid(x)
19 return s * (1 - s)
20
21 super().__init__(sigmoid, sigmoid_prime)
22
23 class Softmax(Layer):
24 def forward(self, input):
25 tmp = np.exp(input)
26 self.output = tmp / np.sum(tmp)
27 return self.output
28
29 def backward(self, output_gradient, learning_rate):
30 # This version is faster than the one presented in the video
31 n = np.size(self.output)
32 return np.dot((np.identity(n) - self.output.T) * self.output, output_gradient)
33 # Original formula:
34 # tmp = np.tile(self.output, n)
35 # return np.dot(tmp * (np.identity(n) - np.transpose(tmp)), output_gradient)
36
37 class Linear(Activation):
38 def __init__(self):
39 def linear(x):
40 return x
41
42 def linear_prime(x):
43 return 1
44
45 super().__init__(linear, linear_prime)

1 #losses.py
2 def mse(y_true, y_pred):
3 return np.mean(np.power(y_true - y_pred, 2))
4
5 def mse_prime(y_true, y_pred):
6 return 2 * (y_pred - y_true) / np.size(y_true)
7
8 def binary_cross_entropy(y_true, y_pred):
9 return np.mean(-y_true * np.log(y_pred) - (1 - y_true) * np.log(1 - y_pred))
10
11 def binary_cross_entropy_prime(y_true, y_pred):
12 return ((1 - y_true) / (1 - y_pred) - y_true / y_pred) / np.size(y_true)

1 errorlist=[]

1 #netwwork.py
2 def predict(network, input):
3 output = input
4 for layer in network:
5 output = layer.forward(output)
6 return output
7
8 def train(network, loss, loss_prime, x_train, y_train, epochs = 1000, learning_rate = 0.01, verbose = True):
9 for e in range(epochs):
10 error = 0
11 for x, y in zip(x_train, y_train):
12 # forward
13 output = predict(network, x)
14
15 # error
16 error += loss(y, output)
17
18 # backward
19 grad = loss_prime(y, output)
20 for layer in reversed(network):
21 grad = layer.backward(grad, learning_rate)
22
23 error /= len(x_train)
24 errorlist.append(error)
25 if verbose:
26 print(f"{e + 1}/{epochs}, error={error}")
27

1 #preprocess data
2 def preprocess_data(x, y, limit):
3 zero_index = np.where(y == 0)[0][:limit]
4 one_index = np.where(y == 1)[0][:limit]
5 all_indices = np.hstack((zero_index, one_index))
6 all_indices = np.random.permutation(all_indices)
7 x, y = x[all_indices], y[all_indices]
8 x = x.reshape(len(x), 1, 28, 28)
9 x = x.astype("float32") / 255
10 y = np_utils.to_categorical(y)
11 y = y.reshape(len(y), 2, 1)
12 return x, y
13

1 class Activation_ReLU:
2
3 # Forward pass
4 def forward(self, inputs):
5 # Remember input values
6 self.inputs = inputs
7 # Calculate output values from inputs
8 self.output = np.maximum(0, inputs)
9
10 # Backward pass
11 def backward(self, dvalues):
12 # Since we need to modify original variable,
13 # let's make a copy of values first
14 self.dinputs = dvalues.copy()
15
16 # Zero gradient where input values were negative
17 self.dinputs[self.inputs <= 0] = 0

1 class ReLu(Activation):
2 def __init__(self):
3 def relu(x):
4 return np.maximum(0, x)
5
6 def relu_prime(x):
7 # if x<0:
8 # return 0
9 # else:
10 # return 1
11 return np.where(x > 0, 1, 0)
12
13 super().__init__(relu, relu_prime)

1 # driver code1 for mnist binary classification using convolutional


2 # neural netwwwork
3 # load MNIST from server, limit to 100 images per class since we're not training on GPU
4 (x_train, y_train), (x_test, y_test) = mnist.load_data()
5 x_train, y_train = preprocess_data(x_train, y_train, 1)
6 x_test, y_test = preprocess_data(x_test, y_test, 200)
7
8 # neural network
9 network = [
10 Convolutional((1, 28, 28), 3, 10),
11 ReLu(),
12 Reshape((10, 26, 26), (10 * 26 * 26, 1)),
13 Dense(10 * 26 * 26, 100),
14 Sigmoid(),
15 Dense(100, 2),
16 Softmax()
17 ]
18
19 # train
20 train(
21 network,
22 binary_cross_entropy,
23 binary_cross_entropy_prime,
24 x_train,
25 y_train,
26 epochs=20,
27 learning_rate=0.1
28 )

1/20, error=2.009656030527962
2/20, error=1.0754939846524614
3/20, error=0.006409573419381769
4/20, error=0.004735627353092339
5/20, error=0.0042532975618651825
6/20, error=0.003899472309944425
7/20, error=0.0036199779644887054
8/20, error=0.003387460110956414
9/20, error=0.0031883233873882632
10/20, error=0.003014660153503465
11/20, error=0.002861272848434969
12/20, error=0.002724453573368278
13/20, error=0.0026014231378611702
14/20, error=0.0024899976200872496
15/20, error=0.0023884496536846337
16/20, error=0.0022953796912986235
17/20, error=0.0022096397561166678
18/20, error=0.0021302801688255165
19/20, error=0.002056511171881308
20/20, error=0.0019876744790849844

1 # driver code1 testing


2 # test
3 correct_outputs = 0
4
5 for x, y in zip(x_test, y_test):
6 output = predict(network, x)
7 if np.argmax(output) == np.argmax(y):
8 correct_outputs += 1
9 print(f"pred: {np.argmax(output)}, true: {np.argmax(y)}")
10
11 print(f"Correct Outputs = {correct_outputs} \nAccuracy = {(correct_outputs/len(y_test)) * 100}%")

pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 0
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 1
pred: 0, true: 0
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 0
pred: 0, true: 0
pred: 1, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 0
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 0
pred: 0, true: 0
pred: 0, true: 0
pred: 1, true: 0
pred: 1, true: 0
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 1
pred: 0, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 1, true: 0
pred: 1, true: 1
pred: 1, true: 1
pred: 0, true: 0
pred: 0, true: 0
pred: 0, true: 0
pred: 0, true: 0
pred: 1, true: 0
pred: 1, true: 0

1 import matplotlib.pyplot as plt

1 x = [i for i in range(1,21)]
2
3 plt.plot(x,errorlist)
4 plt.xlabel('Epochs')
5 plt.ylabel('Error')
6 plt.plot()

[]

Colab paid products - Cancel contracts here

You might also like