0% found this document useful (0 votes)
13 views

DEEP LEARNING MANUAL Final

The document describes four different machine learning projects: solving the XOR problem using a Deep Neural Network (DNN), character recognition using Convolutional Neural Networks (CNN), face recognition using CNN, and language modeling using Recurrent Neural Networks (RNN). Each project includes code snippets for data preparation, model building, training, and evaluation. The projects utilize various libraries such as NumPy, Keras, and Matplotlib for implementation and visualization.

Uploaded by

saiofshridi
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
13 views

DEEP LEARNING MANUAL Final

The document describes four different machine learning projects: solving the XOR problem using a Deep Neural Network (DNN), character recognition using Convolutional Neural Networks (CNN), face recognition using CNN, and language modeling using Recurrent Neural Networks (RNN). Each project includes code snippets for data preparation, model building, training, and evaluation. The projects utilize various libraries such as NumPy, Keras, and Matplotlib for implementation and visualization.

Uploaded by

saiofshridi
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 14

EX.

NO: 1 SOLVING XOR PROBLEM USING DNN


DATE:

Program:
import numpy as np # For matrix math
import matplotlib.pyplot as plt # For plotting
import sys # For printing
X = np.array([[0, 1],[1, 0],[1, 1],[0, 0]])
y = np.array([[1],[1],[0],[0]])
num_i_units, num_h_units, num_o_units, = 2, 2, 1
learning_rate = 0.01
reg_param = 0
max_iter = 5000
np.random.seed(1)
W1 = np.random.normal(0, 1, (num_h_units, num_i_units)) # 2x2
W2 = np.random.normal(0, 1, (num_o_units, num_h_units)) # 1x2
B1 = np.random.random((num_h_units, 1)) # 2x1
B2 = np.random.random((num_o_units, 1)) # 1x1
def sigmoid(z, derv=False):
if derv: return z * (1 - z)
return 1 / (1 + np.exp(-z))
def forward(x, predict=False):
a1 = x.reshape(x.shape[0], 1) # Getting the training example as a
column vector.
z2, a2, z3, a3 = W1.dot(a1) + B1, sigmoid(z2), W2.dot(a2) + B2, sigmoid(z3)
if predict: return a3
return (a1, a2, a3)
dW1, dW2, dB1, dB2 = 0, 0, 0, 0
cost = np.zeros((max_iter, 1)) # Column vector to record the cost of the
NN after each Gradient Descent iteration.
def train(_W1, _W2, _B1, _B2): # The arguments are to bypass
UnboundLocalError error
for i in range(max_iter):
c,DW1, dW2, dB1, dB2 = 0, 0, 0, 0, 0
for j in range(m):
sys.stdout.write("\rIteration: {} and {}".format(i + 1, j + 1))
a0 = X[j].reshape(X[j].shape[0], 1) # 2x1
z1 = _W1.dot(a0) + _B1 # 2x2 * 2x1 + 2x1 = 2x1
a1 = sigmoid(z1) # 2x1
z2 = _W2.dot(a1) + _B2 # 1x2 * 2x1 + 1x1 = 1x1
a2 = sigmoid(z2) # 1x1
# Back prop.
dz2 = a2 - y[j] # 1x1
dW2 += dz2 * a1.T # 1x1 .* 1x2 = 1x2
dz1 = np.multiply((_W2.T * dz2), sigmoid(a1, derv=True)) #
(2x1 * 1x1) .* 2x1 = 2x1
dW1 += dz1.dot(a0.T) # 2x1 * 1x2 = 2x2
dB1 += dz1 # 2x1
c = c + (-(y[j] * np.log(a2)) - ((1 - y[j]) * np.log(1 - a2)))
sys.stdout.flush() # Updating the text.
_W1 = _W1 - learning_rate * (dW1 / m) + ( (reg_param / m) *
_W1)
_W2 = _W2 - learning_rate * (dW2 / m) + ( (reg_param / m) *
_W2)
_B1 = _B1 - learning_rate * (dB1 / m)
_B2 = _B2 - learning_rate * (dB2 / m)
cost[i] = (c / m) + (
(reg_param / (2 * m)) *
(
np.sum(np.power(_W1, 2)) +
np.sum(np.power(_W2, 2))
)
)
return (_W1, _W2, _B1, _B2)
W1, W2, B1, B2 = train(W1, W2, B1, B2)
# Assigning the axes to the different elements.
plt.plot(range(max_iter), cost)
# Labelling the x axis as the iterations axis.
plt.xlabel("Iterations")
# Labelling the y axis as the cost axis.
plt.ylabel("Cost")
# Showing the plot.
plt.show()

Output:
EX. NO: 2 CHARACTER RECOGNITION USING CNN
DATE:

Program:
pip install opencv-python
pip install keras
pip install tensorflow
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
from keras.optimizers import SGD, Adam
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.utils import to_categorical
data = pd.read_csv(r"A_Z Handwritten Data.csv").astype('float32')
X = data.drop('0',axis = 1)
y = data['0']
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size = 0.2)
train_x = np.reshape(train_x.values, (train_x.shape[0], 28,28))
test_x = np.reshape(test_x.values, (test_x.shape[0], 28,28))
word_dict =
{0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',
15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X', 24:'Y',25:'Z'}
y_int = np.int0(y)
# naming the x axis
plt.xlabel("Number of elements")
# naming the y axis
plt.ylabel("Alphabets")
# giving a title
plt.title("Plotting the number of alphabets")
# Turn on the minor TICKS, which are required for the minor GRID
plt.minorticks_on()
# Customize the major grid
plt.grid(which='major', linestyle='-', linewidth='0.5', color='red')
# Customize the minor grid
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
uff = shuffle(train_x[:100])
fig, ax = plt.subplots(3,3, figsize = (10,10))
axes = ax.flatten()
for i in range(9):
_, shu = cv2.threshold(shuff[i], 30, 200, cv2.THRESH_BINARY)
axes[i].imshow(np.reshape(shuff[i], (28,28)), cmap=plt.get_cmap('gray'))
plt.show()
# Reshape data for model creation
train_X = train_x.reshape(train_x.shape[0],train_x.shape[1],train_x.shape[2],1)
print("The new shape of train data: ", train_X.shape)
test_X = test_x.reshape(test_x.shape[0], test_x.shape[1], test_x.shape[2],1)
print("The new shape of train data: ", test_X.shape)
train_yOHE = to_categorical(train_y, num_classes = 26, dtype='int')
print("The new shape of train labels: ", train_yOHE.shape)
test_yOHE = to_categorical(test_y, num_classes = 26, dtype='int')
print("The new shape of test labels: ", test_yOHE.shape)
model = Sequential()

model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu',


input_shape=(28,28,1)))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding =
'same'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding =
'valid'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dense(64,activation ="relu"))
model.add(Dense(128,activation ="relu"))
model.add(Dense(26,activation ="softmax"))
model.compile(optimizer = Adam(learning_rate=0.001),
loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_X, train_yOHE, epochs=1, validation_data =
(test_X,test_yOHE))
model.summary()
model.save(r'model_hand.h5')
print("The validation accuracy is :", history.history['val_accuracy'])
print("The training accuracy is :", history.history['accuracy'])
print("The validation loss is :", history.history['val_loss'])
print("The training loss is :", history.history['loss'])
# Prediction on test data
fig, axes = plt.subplots(3,3, figsize=(8,9))
axes = axes.flatten()
for i,ax in enumerate(axes):
img = np.reshape(test_X[i], (28,28))
ax.imshow(img, cmap=plt.get_cmap('gray'))
pred = word_dict[np.argmax(test_yOHE[i])]
ax.set_title("Prediction: "+pred)
# Predection on External Image
img = cv2.imread(r'test_image.jpg')
img_copy = img.copy()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (400,440))
img_copy = cv2.GaussianBlur(img_copy, (7,7), 0)
img_gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
_, img_thresh = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY_INV)
Output:
EX. NO: 3 FACE RECOGNITION USING CNN
DATE:

Program:
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people(min_faces_per_person=100, resize=1.0, slice_=(slice(60,
188), slice(60, 188)), color=True)
class_count = len(faces.target_names)
print(faces.target_names)
print(faces.images.shape)
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, ax = plt.subplots(3, 6, figsize=(18, 10))
for i, axi in enumerate(ax.flat):
axi.imshow(faces.images[i] / 255) # Scale pixel values so Matplotlib doesn't clip
everything above 1.0
axi.set(xticks=[], yticks=[], xlabel=faces.target_names[faces.target[i]])
from collections import Counter
counts = Counter(faces.target)
names = {}
for key in counts.keys():
names[faces.target_names[key]] = counts[key]
df = pd.DataFrame.from_dict(names, orient='index')
df.plot(kind='bar')
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',
input_shape=(face_images.shape[1:])))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(class_count, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20,
batch_size=25)
acc = hist.history['accuracy']
val_acc = hist.history['val_accuracy']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, '-', label='Training Accuracy')
plt.plot(epochs, val_acc, ':', label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.plot()
from sklearn.metrics import confusion_matrix
y_predicted = model.predict(x_test)
mat = confusion_matrix(y_test.argmax(axis=1), y_predicted.argmax(axis=1))
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False, cmap='Blues',
xticklabels=faces.target_names,
yticklabels=faces.target_names)
plt.xlabel('Predicted label')
plt.ylabel('Actual label')
import keras.utils as image
x = image.load_img('george.jpg', target_size=(face_images.shape[1:]))
plt.xticks([])
plt.yticks([])
plt.imshow(x)
x = image.img_to_array(x) / 255
x = np.expand_dims(x, axis=0)
y = model.predict(x)[0]
for i in range(len(y)):
print(faces.target_names[i] + ': ' + str(y[i]))
EX. NO: 4 LANGUAGE MODELING USING RNN
DATE:

Program:
from future import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters) + 1 # Plus EOS marker
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to
https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters)
def categoryTensor(category):
li = all_categories.index(category)
tensor = torch.zeros(1, n_categories)
tensor[0][li] = 1
return tensor
# One-hot matrix of first to last letters (not including EOS) for input
def inputTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li in range(len(line)):
letter = line[li]
tensor[li][0][all_letters.find(letter)] = 1
return tensor
# ``LongTensor`` of second letter to end (EOS) for target
def targetTensor(line):
letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))]
letter_indexes.append(n_letters - 1) # EOS
return torch.LongTensor(letter_indexes)
# Make category, input, and target tensors from a random category, line pair
def randomTrainingExample():
category, line = randomTrainingPair()
category_tensor = categoryTensor(category)
input_line_tensor = inputTensor(line)
target_line_tensor = targetTensor(line)
return category_tensor, input_line_tensor, target_line_tensor
criterion = nn.NLLLoss()
learning_rate = 0.0005
def train(category_tensor, input_line_tensor, target_line_tensor):
target_line_tensor.unsqueeze_(-1)
hidden = rnn.initHidden()
rnn.zero_grad()
loss = 0
for i in range(input_line_tensor.size(0)):
output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)
l = criterion(output, target_line_tensor[i])
loss += l
loss.backward()
for p in rnn.parameters():
p.data.add_(p.grad.data, alpha=-learning_rate)
return output, loss.item() / input_line_tensor.size(0)
import time
import math
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)

def readLines(filename):
with open(filename, encoding='utf-8') as some_file:
return [unicodeToAscii(line.strip()) for line in some_file]
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
for filename in findFiles('data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
rnn = RNN(n_letters, 128, n_letters)
n_iters = 100000
print_every = 5000
plot_every = 500
all_losses = []
total_loss = 0 # Reset every ``plot_every`` ``iters``
start = time.time()
for iter in range(1, n_iters + 1):
output, loss = train(*randomTrainingExample())
total_loss += loss
if iter % print_every == 0:
print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, loss))
if iter % plot_every == 0:
all_losses.append(total_loss / plot_every)
total_loss = 0
import matplotlib.pyplot as plt
plt.figure()
plt.plot(all_losses)
max_length = 20
# Sample from a category and starting letter
def sample(category, start_letter='A'):
with torch.no_grad(): # no need to track history in sampling
category_tensor = categoryTensor(category)
input = inputTensor(start_letter)
hidden = rnn.initHidden()
output_name = start_letter
for i in range(max_length):
output, hidden = rnn(category_tensor, input[0], hidden)
topv, topi = output.topk(1)
topi = topi[0][0]
if topi == n_letters - 1:
break
else:
letter = all_letters[topi]
output_name += letter
input = inputTensor(letter)
return output_name
# Get multiple samples from one category and multiple starting letters
def samples(category, start_letters='ABC'):
for start_letter in start_letters:
print(sample(category, start_letter))
samples('Russian', 'RUS')
samples('German', 'GER')
samples('Spanish', 'SPA')
samples('Chinese', 'CHI')

Output
EXP. NO: 5 SENTIMENT ANALYSIS USING LSTM
DATE:

Program:
pip install Keras-Preprocessing
import re
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras_preprocessing.sequence import pad_sequences
import keras
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import math
import nltk
data = pd.read_csv('IMDB Dataset.csv')
data
def remove_tags(string):
removelist = ""
result = re.sub('','',string) #remove HTML tags
result = re.sub('https://.*','',result) #remove URLs
result = re.sub(r'[^w'+removelist+']', ' ',result) #remove non-alphanumeric
characters
result = result.lower()
return result
data['review']=data['review'].apply(lambda cw : remove_tags(cw))
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
data['review'] = data['review'].apply(lambda x: ' '.join([word for word in x.split() if
word not in (stop_words)]))
import nltk
nltk.download()
#we want to download 'wordnet' and 'omw-1.4' from nltk
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
st = ""
for w in w_tokenizer.tokenize(text):
st = st + lemmatizer.lemmatize(w) + " "
return st
data['review'] = data.review.apply(lemmatize_text)
data
s = 0.0
for i in data['review']:
word_list = i.split()
s = s + len(word_list)
print("Average length of each review : ",s/data.shape[0])
pos = 0
for i in range(data.shape[0]):
if data.iloc[i]['sentiment'] == 'positive':
pos = pos + 1
neg = data.shape[0]-pos
# model initialization
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
input_length=max_length),
keras.layers.Bidirectional(keras.layers.LSTM(64)),
keras.layers.Dense(24, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
# compile model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# model summary
model.summary()
num_epochs = 5
history = model.fit(train_padded, train_labels,
epochs=num_epochs, verbose=1,
validation_split=0.1)
prediction = model.predict(test_padded)
# Get labels based on probability 1 if p>= 0.5 else 0
pred_labels = []
for i in prediction:
if i >= 0.5:
pred_labels.append(1)
else:
pred_labels.append(0)
print("Accuracy of prediction on test set : ",
accuracy_score(test_labels,pred_labels))
# reviews on which we need to predict
sentence = ["The movie was very touching and heart whelming",
"I have never seen a terrible movie like this",
"the movie plot is terrible but it had good acting"]
# convert to a sequence
sequences = tokenizer.texts_to_sequences(sentence)
# pad the sequence
padded = pad_sequences(sequences, padding='post', maxlen=max_length)
# Get labels based on probability 1 if p>= 0.5 else 0
prediction = model.predict(padded)
pred_labels = []
for i in prediction:
if i >= 0.5:
pred_labels.append(1)
else:
pred_labels.append(0)
for i in range(len(sentence)):
print(sentence[i])
if pred_labels[i] == 1:
s = 'Positive'
else:
s = 'Negative'
print("Predicted sentiment : ",s)

Output:
EXP. NO: 6 PARTS OF SPEECH TAGGING USING SEQUENCE TO SEQUENCE ARCHITECTURE

DATE:

Program:
import numpy as np
import pandas as pd
import json
import functools as fc
from sklearn.metrics import accuracy_score
train = pd.read_csv('data1/train', sep='\t', names=['index', 'word', 'POS'])
train.head()
word = train['word'].values.tolist()
index = train['index'].values.tolist()
pos = train['POS'].values.tolist()
vocab = {}
for i in range(len(word)):
if word[i] in vocab:
vocab[word[i]] += 1
else:
vocab[word[i]] = 1
# replace rare words with <unk> (threshold = 3)
vocab2 = {}
num_unk = 0
for w in vocab:
if vocab[w] >= 3:
vocab2[w] = vocab[w]
else:
num_unk += vocab[w]
'\n')
# sort the vocabulary by occurrences of words
vocab_sorted = sorted(vocab.items(), key=lambda item: item[1], reverse=True)
# write the sorted vocabulary to vocab file
#with open('recap/vocab.txt', 'w') as vocab_file:
with open('output/vocab_frequent', 'w') as vocab_file:
# the format of the vocab is word index occurrence
# we add <unk> to the top of the vocabulary manually
vocab_file.write('<unk>' + '\t' + str(0) + '\t' + str(num_unk) + '\n')
for i in range(len(vocab_sorted)):
vocab_file.write(vocab_sorted[i][0] + '\t' + str(i+1) + '\t' + str(vocab_sorted[i][1]) +
print(f'The total size of my vocabulary is {len(vocab_sorted)}\n')
print(f'The total occurrences of <unk> is {num_unk}\n')
Task 2: Model Learning
# build a vocabulary list with only frequent words (i.e. occur no less than 3 times)
vocab_ls = list(vocab2.keys())
# write the frequent words into a json file
#with open('recap/vocab_frequent.txt', 'w') as output:
with open('output/vocal_frequent', 'w') as output:
for word in vocab_ls:
output.write(word + '\n')
# replace non-frequent words in word with <unk>
if temp > seq[i][p_prime]:
seq[i][p_prime] = temp
pre_pos[i][p_prime] = p
except:
pass
else:
try:
seq[i][p_prime] = seq[i-1][p] * \
transition[p_prime + '|' + p] * \
emission[sentence[i] + '|' + p_prime]
pre_pos[i][p_prime] = p
except:
seq[i][p_prime] = 0
# after we get the maximum probability for every possible pos at every position of a
sentence,
# we can trace backward to find out our prediction on the pos for the sentence.
seq_predict = []
# The pos of the last word in the sentence is the one with the highest probability
# after predicting the pos of the last word in the sentence, we can iterate through pre_pos to
predict
# the pos of the remaining words in the input sentence in the reverse order
# the highest probability
prob_max = max(seq[len(sentence)-1].values())
# the index of the highest probability
index_max = list(seq[len(sentence)-1].values()).index(prob_max)
# the pos of the highest probability
pos_max = list(seq[len(sentence)-1].keys())[index_max]
seq_predict.append(pos_max)
# iterate through pre_pos
for i in range(len(sentence)-1, 0, -1):
# for some rare ss or sx pairs, there is no corresponding key in the
# transition or emission dictionary. In this case, we need to set manually
# the pos to 'UNK' at those positions
try:
pos_max = pre_pos[i][pos_max]
seq_predict.append(pos_max)
except:
seq_predict.append('UNK')
# The final seq_predict should be the reverse of the original
seq_predict = [seq_predict[i] for i in range(len(seq_predict)-1, -1, -1)]
# use viterbi to predict pos for dev
pos_viterbi = [viterbi(s) for s in word_dev2]
# merge the list of sublists to a single list
pos_viterbi = fc.reduce(lambda a, b: a + b, pos_viterbi)
pos_dev = fc.reduce(lambda a, b: a + b, pos_dev2)
acc = accuracy_score(pos_dev, pos_viterbi)
print('The prediction accuracy on the dev data is {:.2f}%'.format(acc * 100))

Output:
EXP. NO: 7 MACHINE TRANSLATION USING ENCODER-DECODER MODEL
DATE:

Program:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
data_path='fra.txt'
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text, _ = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters=sorted(list(input_characters))
target_characters=sorted(list(target_characters))
num_encoder_tokens=len(input_characters)
num_decoder_tokens=len(target_characters)
max_encoder_seq_length=max([len(txt) for txt in input_texts])
max_decoder_seq_length=max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index=dict(
[(char,i) for i, char in enumerate(input_characters)])
target_token_index=dict(
[(char,i) for i, char in enumerate(target_characters)])
for seq_index in range(100):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence)
Output:
EX. NO: 8 IMAGE AUGMENTATION USING GANs
DATE:

Program:
import os
import numpy as np
import keras.utils as image
import matplotlib.pyplot as plt
%matplotlib inline
def load_images_from_path(path, label):
images = []
labels = []
for file in os.listdir(path):
img = image.load_img(os.path.join(path, file), target_size=(224, 224, 3))
images.append(image.img_to_array(img))
labels.append((label))
return images, labels
[]})
def show_images(images):
fig, axes = plt.subplots(1, 8, figsize=(20, 20), subplot_kw={'xticks': [], 'yticks':
for i, ax in enumerate(axes.flat):
ax.imshow(images[i] / 255)
x_train = []
y_train = []
x_test = []
y_test = []
x = image.img_to_array(x)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
predictions = model.predict(x)
for i, label in enumerate(class_labels):
print(f'{label}: {predictions[0][i]}')
x = image.load_img('arctic-wildlife/samples/walrus/walrus_143.png',
target_size=(224, 224))
plt.xticks([])
plt.yticks([])
plt.imshow(x)
x = image.img_to_array(x)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
predictions = model.predict(x)
for i, label in enumerate(class_labels):
print(f'{label}: {predictions[0][i]}')
Output:
EX. NO: 9 MINI- PROJECT
DATE:

Program:
import tensorflow
import pandas as pd
from tensorflow.python.keras import utils
from keras.models import Sequential
from keras.layers import Dense, Flatten, InputLayer
import keras
import imageio # To read images
from PIL import Image # For image resizing
from sklearn.model_selection import train_test_split
df = pd.read_csv(“rit.csv”)
input_num_units = (32, 32, 3)
hidden_num_units = 500
output_num_units = 2
epochs = 5
batch_size = 128
model = Sequential([
InputLayer(input_shape=input_num_units),
Flatten(),
Dense(units=hidden_num_units, activation='relu'),
Dense(units=output_num_units, activation='softmax'),
])
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(train_x, train_y, batch_size=batch_size, epochs=epochs, verbose=1)
model.fit(train_x, train_y, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.2)

Output:

You might also like