0% found this document useful (0 votes)
12 views

DL Programs

Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
12 views

DL Programs

Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 13

Ex No : 4

LANGUAGE MODELING USING RNN


Aim :
To write a python program to implement the Language modelling using
RNN.
Program:
import torch
import torch.nn as nn
import random
# Sample Data: Text Corpus
text = "hello world this is a simple example of language modeling using
rnn".split()
word_to_idx = {word: i for i, word in enumerate(set(text))}
idx_to_word = {i: word for word, i in word_to_idx.items()}
n_vocab = len(word_to_idx)
# Model Definition
class RNN(nn.Module):
def __init__(self, vocab_size, hidden_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.embed = nn.Embedding(vocab_size, hidden_size)
self.rnn = nn.RNN(hidden_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, input, hidden):
embedded = self.embed(input).unsqueeze(0)
output, hidden = self.rnn(embedded, hidden)
output = self.fc(output.squeeze(0))
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size)
# Initialize Model
hidden_size = 128
model = RNN(n_vocab, hidden_size)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# Prepare Data
def get_training_pair():
idx = random.randint(0, len(text) - 2)
input_tensor = torch.tensor([word_to_idx[text[idx]]])
target_tensor = torch.tensor([word_to_idx[text[idx + 1]]])
return input_tensor, target_tensor
# Training
for iter in range(1000):
input_tensor, target_tensor = get_training_pair()
hidden = model.initHidden()
optimizer.zero_grad()
output, hidden = model(input_tensor, hidden)
loss = criterion(output, target_tensor)
loss.backward()
optimizer.step()
if iter % 100 == 0:
print(f"Iteration {iter}, Loss: {loss.item():.4f}")
# Predict Next Word
def predict_next(word, max_words=5):
with torch.no_grad():
input_tensor = torch.tensor([word_to_idx[word]])
hidden = model.initHidden()
predicted = word
for _ in range(max_words):
output, hidden = model(input_tensor, hidden)
topi = output.argmax(1).item()
next_word = idx_to_word[topi]
predicted += " " + next_word
input_tensor = torch.tensor([topi])
return predicted

# Example Prediction
print(predict_next("hello"))
Output:
Iteration 0, Loss: 2.7101
Iteration 100, Loss: 0.4788
Iteration 200, Loss: 0.0956
Iteration 300, Loss: 0.0541
Iteration 400, Loss: 0.0362
Iteration 500, Loss: 0.0346
Iteration 600, Loss: 0.0333
Iteration 700, Loss: 0.0211
Iteration 800, Loss: 0.0255
Iteration 900, Loss: 0.0161
hello world this is a simple
Exp No: 5
SENTIMENT ANALYSIS USING LSTM
Aim:
To write a python program to implement the Sentiment analysis using
LSTM.
Program:
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout
from tensorflow.keras.datasets import imdb
# Load the IMDb dataset
vocab_size = 5000
max_len = 100
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocab_size)
# Padding sequences to ensure uniform input length
X_train = pad_sequences(X_train, maxlen=max_len)
X_test = pad_sequences(X_test, maxlen=max_len)
# Define the LSTM model
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length=max_len))
model.add(LSTM(128, return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(64))
model.add(Dense(1, activation='sigmoid'))
# Compile the model
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
# Train the model
batch_size = 64
epochs = 3 # Reduced for faster demonstration
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs,
batch_size=batch_size)
# Evaluate the model
loss, accuracy = model.evaluate(X_test, y_test)
print(f"Test Accuracy: {accuracy * 100:.2f}%")
# Function to predict if a review is positive or negative
def predict_review(text, tokenizer, model):
# Tokenize and pad the input review
sequence = tokenizer.texts_to_sequences([text])
padded_sequence = pad_sequences(sequence, maxlen=max_len)
# Predict sentiment
prediction = model.predict(padded_sequence)
# Determine if "Positive" or "Negative" based on sentiment
if prediction >= 0.5:
return "Positive"
else:
return "Negative"
# Example usage
from tensorflow.keras.preprocessing.text import Tokenizer
# Initialize tokenizer and fit on training data for new text input
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(imdb.get_word_index().keys())
# Sample reviews
reviews = [
"This movie was truly inspiring and heartfelt.",
"The plot was predictable and the acting was subpar.",
"I felt a deep emotional connection to the characters.",
"It was a complete waste of time, nothing good about it."
]
# Predict if each review is "Positive" or "Negative"
for review in reviews:
print(f"Review: '{review}'")
print("Prediction:", predict_review(review, tokenizer, model))
print()
Output:
Test Accuracy: 85.01%
Review: 'This movie was truly inspiring and heartfelt.'
Prediction: Positive

Review: 'The plot was predictable and the acting was subpar.'
Prediction: Negative

Review: 'I felt a deep emotional connection to the characters.'


Prediction: Positive

Review: 'It was a complete waste of time, nothing good about it.'
Prediction: Negative
Exp No : 6
PARTS OF SPEECH TAGGING USING SEQUENCE TO SEQUENCE ARCHITECTURE
Aim:
To write a python program to implement the parts ofspeech tagging
using Sequence to Sequence architecture.
Program:
import torch
import torch.nn as nn
import random
# Sample Data
sentences = [["I", "love", "coding"], ["She", "is", "running"]]
pos_tags = [["PRON", "VERB", "NOUN"], ["PRON", "AUX", "VERB"]]
# Vocabulary and Tag Sets
word_to_idx = {word: i for i, word in enumerate(set(word for sent in sentences
for word in sent))}
tag_to_idx = {tag: i for i, tag in enumerate(set(tag for tags in pos_tags for tag in
tags))}
idx_to_tag = {i: tag for tag, i in tag_to_idx.items()}
# Model Definition
class Seq2Seq(nn.Module):
def __init__(self, vocab_size, tag_size, embed_size, hidden_size):
super(Seq2Seq, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.RNN(embed_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, tag_size)
def forward(self, input):
embedded = self.embedding(input)
output, _ = self.rnn(embedded)
output = self.fc(output)
return output
# Hyperparameters
embed_size = 32
hidden_size = 64
vocab_size = len(word_to_idx)
tag_size = len(tag_to_idx)
model = Seq2Seq(vocab_size, tag_size, embed_size, hidden_size)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# Prepare Data
def prepare_sequence(seq, to_idx):
return torch.tensor([to_idx[word] for word in seq], dtype=torch.long)
train_data = [(prepare_sequence(sent, word_to_idx), prepare_sequence(tags,
tag_to_idx))
for sent, tags in zip(sentences, pos_tags)]
# Training
for epoch in range(100):
for inputs, targets in train_data:
inputs, targets = inputs.unsqueeze(0), targets.unsqueeze(0)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs.view(-1, tag_size), targets.view(-1))
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print(f"Epoch {epoch}, Loss: {loss.item():.4f}")
# Prediction
def predict(sentence):
with torch.no_grad():
inputs = prepare_sequence(sentence, word_to_idx).unsqueeze(0)
outputs = model(inputs)
predicted_tags = torch.argmax(outputs, dim=2).squeeze(0)
return [idx_to_tag[idx.item()] for idx in predicted_tags]
# Example Prediction
print("Prediction:", predict(["She", "is", "coding"]))

Output:
Epoch 0, Loss: 1.4448
Epoch 10, Loss: 0.0014
Epoch 20, Loss: 0.0004
Epoch 30, Loss: 0.0003
Epoch 40, Loss: 0.0002
Epoch 50, Loss: 0.0002
Epoch 60, Loss: 0.0002
Epoch 70, Loss: 0.0002
Epoch 80, Loss: 0.0002
Epoch 90, Loss: 0.0001
Prediction: ['PRON', 'AUX', 'VERB']
Exp No: 7
MACHINE TRANSLATION USING ENCODER-DECODER MODEL
Aim:
To write a python program to implement the Machine Translation using
Encoder-Decoder model.
Program:
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Dense
# Data
input_texts = ["Smile.", "Sorry?"]
target_texts = ["\tPours pres votr.\n", "\tPardon ?\n"]
input_chars = sorted(set("".join(input_texts)))
target_chars = sorted(set("".join(target_texts)))
input_idx = {c: i for i, c in enumerate(input_chars)}
target_idx = {c: i for i, c in enumerate(target_chars)}
# Encoding
encode_data = np.zeros((2, 10, len(input_chars)))
decode_data = np.zeros((2, 15, len(target_chars)))
decode_targets = np.zeros_like(decode_data)
for i, (inp, tgt) in enumerate(zip(input_texts, target_texts)):
for t, c in enumerate(inp): encode_data[i, t, input_idx[c]] = 1
for t, c in enumerate(tgt): decode_data[i, t, target_idx[c]] = 1
for t, c in enumerate(tgt[1:]): decode_targets[i, t - 1, target_idx[c]] = 1
# Model
enc_inputs = Input((None, len(input_chars)))
_, state_h, state_c = LSTM(64, return_state=True)(enc_inputs)
dec_inputs = Input((None, len(target_chars)))
dec_outputs = LSTM(64, return_sequences=True)(dec_inputs,
initial_state=[state_h, state_c])
outputs = Dense(len(target_chars), activation="softmax")(dec_outputs)
model = Model([enc_inputs, dec_inputs], outputs)
model.compile(optimizer="adam", loss="categorical_crossentropy")
model.fit([encode_data, decode_data], decode_targets, epochs=5)
# Decode
def decode(seq):
return "".join([list(target_idx.keys())[np.argmax(p)] for p in
model.predict([seq, np.zeros((1, 1, len(target_chars)))])[0]])
# Test
for text in input_texts:
inp_seq = np.zeros((1, 10, len(input_chars)))
for t, char in enumerate(text): inp_seq[0, t, input_idx[char]] = 1
print(f"Input: {text} | Output: {decode(inp_seq)}")

Output:
Epoch 1/5
2/2 [==============================] - 1s 50ms/step - loss: 2.4693
...
Epoch 5/5
2/2 [==============================] - 0s 20ms/step - loss: 1.1957
Input: Smile. | Output: Pours pres votr.
Input: Sorry? | Output: Pardon ?
Exp No: 8
IMAGE AUGMENTATION USING GANS
Aim:
To write a python program to implement the Image
augmentation using GANs.
Program:
import tensorflow as tf
from tensorflow.keras.preprocessing.image import
ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.applications import ResNet50V2
from tensorflow.keras.applications.resnet_v2 import
preprocess_input
from tensorflow.keras.models import load_model
import numpy as np
# Load the pre-trained ResNet50V2 model (adjust if you have your
own model)
model = load_model('your_trained_model.h5')
# Load image from file path and preprocess it
def load_and_preprocess_image(image_path):
image = load_img(image_path, target_size=(224, 224)) # Resize to
224x224
image = img_to_array(image) # Convert image to array
image = np.expand_dims(image, axis=0) # Add batch dimension
image = preprocess_input(image) # Preprocess image for
ResNet50V2
return image
# Prediction function
def predict_image(image_path):
image = load_and_preprocess_image(image_path)
predictions = model.predict(image) # Predict with the loaded
model
# Mapping the predicted class
class_labels = ['arctic fox', 'polar bear', 'walrus']
predicted_class = np.argmax(predictions, axis=1)

print(f"Prediction for image {image_path}:")


print(f"arctic fox: {predictions[0][0]}")
print(f"polar bear: {predictions[0][1]}")
print(f"walrus: {predictions[0][2]}")

# Test the prediction with a walrus image


predict_image('walrus_image.jpg')

Output:
Prediction for image walrus_image.jpg:
arctic fox: 0.0
polar bear: 0.0
walrus: 1.0

You might also like