0% found this document useful (0 votes)
11 views

vertopal.com_T1-7

taks

Uploaded by

shivavibe2002
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
11 views

vertopal.com_T1-7

taks

Uploaded by

shivavibe2002
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 16

from sklearn.

datasets import load_iris


from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score

# Load the Iris dataset


iris = load_iris()
X = iris.data
y = iris.target

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=1)

# Standardize the features


sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)

# Train the Perceptron model


ppn = Perceptron(max_iter=40, eta0=0.1, random_state=1)
ppn.fit(X_train_std, y_train)

# Predict the test set results


y_pred = ppn.predict(X_test_std)

# Evaluate the accuracy


print(f'Accuracy: {accuracy_score(y_test, y_pred):.2f}')

Accuracy: 0.87

import numpy as np
import matplotlib.pyplot as plt

# Sigmoid function
def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Tanh function
def tanh(x):
return np.tanh(x)

# ReLU function
def relu(x):
return np.maximum(0, x)

# Leaky ReLU function


def leaky_relu(x, alpha=0.01):
return np.where(x > 0, x, x * alpha)
# Plotting the activation functions
x = np.linspace(-10, 10, 100)

plt.figure(figsize=(10, 8))
plt.plot(x, sigmoid(x), label='Sigmoid')
plt.plot(x, tanh(x), label='Tanh')
plt.plot(x, relu(x), label='ReLU')
plt.plot(x, leaky_relu(x), label='Leaky ReLU')
plt.title('Activation Functions')
plt.legend()
plt.grid(True)
plt.show()

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# Generate a binary classification dataset


X, y = make_classification(n_samples=1000, n_features=20, n_classes=2,
random_state=1)

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=1)

# Standardize the features


sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Build the neural network model


model = Sequential([
Dense(64, input_dim=20, activation='relu'),
Dense(64, activation='relu'),
Dense(1, activation='sigmoid')
])

# Compile the model


model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(X_train, y_train, epochs=100, batch_size=10, verbose=1)

# Evaluate the model


loss, accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f'Test Accuracy: {accuracy:.2f}')

D:\DLLL\DLL\Lib\site-packages\keras\src\layers\core\dense.py:87:
UserWarning: Do not pass an `input_shape`/`input_dim` argument to a
layer. When using Sequential models, prefer using an `Input(shape)`
object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Epoch 1/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.6569 - loss:
0.6405
Epoch 2/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8191 - loss:
0.4395
Epoch 3/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8473 - loss:
0.3610
Epoch 4/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8700 - loss:
0.2950
Epoch 5/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8967 - loss:
0.2586
Epoch 6/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8922 - loss:
0.2791
Epoch 7/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8928 - loss:
0.2515
Epoch 8/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8985 - loss:
0.2550
Epoch 9/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9081 - loss:
0.2352
Epoch 10/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9310 - loss:
0.2097
Epoch 11/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9373 - loss:
0.1976
Epoch 12/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9322 - loss:
0.1954
Epoch 13/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9413 - loss:
0.1761
Epoch 14/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9303 - loss:
0.1954
Epoch 15/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9541 - loss:
0.1506
Epoch 16/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9446 - loss:
0.1719
Epoch 17/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9468 - loss:
0.1678
Epoch 18/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9549 - loss:
0.1652
Epoch 19/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9589 - loss:
0.1474
Epoch 20/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9499 - loss:
0.1471
Epoch 21/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9697 - loss:
0.1297
Epoch 22/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9748 - loss:
0.1237
Epoch 23/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9768 - loss:
0.1087
Epoch 24/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9764 - loss:
0.1181
Epoch 25/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9757 - loss:
0.0978
Epoch 26/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9780 - loss:
0.0940
Epoch 27/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9864 - loss:
0.0815
Epoch 28/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9871 - loss:
0.0804
Epoch 29/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9882 - loss:
0.0678
Epoch 30/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9920 - loss:
0.0571
Epoch 31/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9943 - loss:
0.0519
Epoch 32/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9952 - loss:
0.0489
Epoch 33/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9915 - loss:
0.0441
Epoch 34/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9944 - loss:
0.0420
Epoch 35/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9948 - loss:
0.0306
Epoch 36/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9984 - loss:
0.0334
Epoch 37/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9988 - loss:
0.0282
Epoch 38/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9969 - loss:
0.0252
Epoch 39/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9988 - loss:
0.0245
Epoch 40/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0184
Epoch 41/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0154
Epoch 42/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0140
Epoch 43/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0156
Epoch 44/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0129
Epoch 45/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0119
Epoch 46/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0091
Epoch 47/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0098
Epoch 48/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0100
Epoch 49/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0079
Epoch 50/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
0.0085
Epoch 51/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0061
Epoch 52/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0060
Epoch 53/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0056
Epoch 54/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0054
Epoch 55/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0050
Epoch 56/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0040
Epoch 57/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0044
Epoch 58/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0039
Epoch 59/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0037
Epoch 60/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0034
Epoch 61/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0034
Epoch 62/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0033
Epoch 63/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0029
Epoch 64/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0029
Epoch 65/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0026
Epoch 66/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0021
Epoch 67/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0021
Epoch 68/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0018
Epoch 69/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0016
Epoch 70/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0017
Epoch 71/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0016
Epoch 72/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0016
Epoch 73/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
0.0014
Epoch 74/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0014
Epoch 75/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0012
Epoch 76/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0013
Epoch 77/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0011
Epoch 78/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
0.0011
Epoch 79/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
9.9367e-04
Epoch 80/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
9.3148e-04
Epoch 81/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
9.9012e-04
Epoch 82/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
8.2529e-04
Epoch 83/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
9.2186e-04
Epoch 84/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
8.1299e-04
Epoch 85/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
8.0663e-04
Epoch 86/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
7.7992e-04
Epoch 87/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
6.0982e-04
Epoch 88/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
5.8337e-04
Epoch 89/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
6.2005e-04
Epoch 90/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
5.8850e-04
Epoch 91/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 1.0000 - loss:
5.3105e-04
Epoch 92/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
5.2282e-04
Epoch 93/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
4.5882e-04
Epoch 94/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
4.8520e-04
Epoch 95/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:
4.2262e-04
Epoch 96/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
4.1262e-04
Epoch 97/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
3.7000e-04
Epoch 98/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
3.8988e-04
Epoch 99/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
3.9310e-04
Epoch 100/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss:
3.2804e-04
Test Accuracy: 0.80

import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, GRU, LSTM, Dense

# Load the IMDB dataset


max_features = 10000
maxlen = 500
(x_train, y_train), (x_test, y_test) =
imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length


x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the GRU model


gru_model = Sequential([
Embedding(max_features, 128, input_length=maxlen),
GRU(128, return_sequences=True),
GRU(128),
Dense(1, activation='sigmoid')
])

# Compile the model


gru_model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])

# Train the GRU model


gru_model.fit(x_train, y_train, epochs=10, batch_size=32,
validation_split=0.2)

# Evaluate the GRU model


gru_loss, gru_accuracy = gru_model.evaluate(x_test, y_test)
print(f'GRU Test Accuracy: {gru_accuracy:.2f}')

# Build the LSTM model


lstm_model = Sequential([
Embedding(max_features, 128, input_length=maxlen),
LSTM(128, return_sequences=True),
LSTM(128),
Dense(1, activation='sigmoid')
])

# Compile the model


lstm_model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])

# Train the LSTM model


lstm_model.fit(x_train, y_train, epochs=10, batch_size=32,
validation_split=0.2)

# Evaluate the LSTM model


lstm_loss, lstm_accuracy = lstm_model.evaluate(x_test, y_test)
print(f'LSTM Test Accuracy: {lstm_accuracy:.2f}')

Downloading data from https://storage.googleapis.com/tensorflow/tf-


keras-datasets/imdb.npz
17464789/17464789 ━━━━━━━━━━━━━━━━━━━━ 5s 0us/step
Epoch 1/10

D:\DLLL\DLL\Lib\site-packages\keras\src\layers\core\embedding.py:90:
UserWarning: Argument `input_length` is deprecated. Just remove it.
warnings.warn(

625/625 ━━━━━━━━━━━━━━━━━━━━ 426s 675ms/step - accuracy: 0.6963 -


loss: 0.5475 - val_accuracy: 0.8776 - val_loss: 0.2865
Epoch 2/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 361s 577ms/step - accuracy: 0.9234 -
loss: 0.1987 - val_accuracy: 0.8986 - val_loss: 0.2472
Epoch 3/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 370s 592ms/step - accuracy: 0.9599 -
loss: 0.1109 - val_accuracy: 0.8862 - val_loss: 0.3061
Epoch 4/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 365s 585ms/step - accuracy: 0.9780 -
loss: 0.0649 - val_accuracy: 0.8800 - val_loss: 0.3690
Epoch 5/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 1268s 2s/step - accuracy: 0.9897 - loss:
0.0346 - val_accuracy: 0.8772 - val_loss: 0.4095
Epoch 6/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 388s 621ms/step - accuracy: 0.9928 -
loss: 0.0203 - val_accuracy: 0.8798 - val_loss: 0.5286
Epoch 7/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 478s 679ms/step - accuracy: 0.9945 -
loss: 0.0164 - val_accuracy: 0.8770 - val_loss: 0.6116
Epoch 8/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 549s 877ms/step - accuracy: 0.9942 -
loss: 0.0172 - val_accuracy: 0.8726 - val_loss: 0.6035
Epoch 9/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 413s 661ms/step - accuracy: 0.9956 -
loss: 0.0130 - val_accuracy: 0.8828 - val_loss: 0.6268
Epoch 10/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 405s 648ms/step - accuracy: 0.9967 -
loss: 0.0075 - val_accuracy: 0.8780 - val_loss: 0.6703
782/782 ━━━━━━━━━━━━━━━━━━━━ 134s 171ms/step - accuracy: 0.8635 -
loss: 0.7443
GRU Test Accuracy: 0.87
Epoch 1/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 431s 684ms/step - accuracy: 0.7186 -
loss: 0.5298 - val_accuracy: 0.8610 - val_loss: 0.3262
Epoch 2/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 448s 718ms/step - accuracy: 0.8893 -
loss: 0.2852 - val_accuracy: 0.7778 - val_loss: 0.4798
Epoch 3/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 449s 719ms/step - accuracy: 0.8647 -
loss: 0.3316 - val_accuracy: 0.6588 - val_loss: 0.6006
Epoch 4/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 466s 746ms/step - accuracy: 0.8898 -
loss: 0.2702 - val_accuracy: 0.8588 - val_loss: 0.3653
Epoch 5/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 687s 1s/step - accuracy: 0.9489 - loss:
0.1414 - val_accuracy: 0.8678 - val_loss: 0.3932
Epoch 6/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 790s 1s/step - accuracy: 0.9633 - loss:
0.1045 - val_accuracy: 0.8536 - val_loss: 0.4131
Epoch 7/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 748s 1s/step - accuracy: 0.9642 - loss:
0.1008 - val_accuracy: 0.8598 - val_loss: 0.4098
Epoch 8/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 738s 1s/step - accuracy: 0.9696 - loss:
0.0897 - val_accuracy: 0.8656 - val_loss: 0.4937
Epoch 9/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 770s 1s/step - accuracy: 0.9859 - loss:
0.0480 - val_accuracy: 0.8578 - val_loss: 0.4825
Epoch 10/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 754s 1s/step - accuracy: 0.9857 - loss:
0.0457 - val_accuracy: 0.8462 - val_loss: 0.5925
782/782 ━━━━━━━━━━━━━━━━━━━━ 319s 408ms/step - accuracy: 0.8414 -
loss: 0.6129
LSTM Test Accuracy: 0.84

from tensorflow.keras.datasets import reuters


from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical

# Load the Reuters dataset


max_words = 10000
(x_train, y_train), (x_test, y_test) =
reuters.load_data(num_words=max_words)

# Tokenize and prepare the data


tokenizer = Tokenizer(num_words=max_words)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')

# Convert labels to categorical format


y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

# Build the model


model = Sequential([
Dense(512, input_shape=(max_words,), activation='relu'),
Dropout(0.5),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(46, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=5, batch_size=32,
validation_split=0.2)

# Evaluate the model


score = model.evaluate(x_test, y_test, batch_size=32)
print(f'Test Accuracy: {score[1]:.2f}')

Downloading data from https://storage.googleapis.com/tensorflow/tf-


keras-datasets/reuters.npz
2110848/2110848 ━━━━━━━━━━━━━━━━━━━━ 2s 1us/step
Epoch 1/5
225/225 ━━━━━━━━━━━━━━━━━━━━ 21s 63ms/step - accuracy: 0.5543 - loss:
1.9647 - val_accuracy: 0.7657 - val_loss: 1.0688
Epoch 2/5
225/225 ━━━━━━━━━━━━━━━━━━━━ 14s 61ms/step - accuracy: 0.8114 - loss:
0.8275 - val_accuracy: 0.7974 - val_loss: 0.9105
Epoch 3/5
225/225 ━━━━━━━━━━━━━━━━━━━━ 14s 61ms/step - accuracy: 0.8856 - loss:
0.4954 - val_accuracy: 0.8102 - val_loss: 0.8875
Epoch 4/5
225/225 ━━━━━━━━━━━━━━━━━━━━ 14s 60ms/step - accuracy: 0.9202 - loss:
0.3315 - val_accuracy: 0.8180 - val_loss: 0.9512
Epoch 5/5
225/225 ━━━━━━━━━━━━━━━━━━━━ 14s 60ms/step - accuracy: 0.9420 - loss:
0.2663 - val_accuracy: 0.8147 - val_loss: 0.9796
71/71 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - accuracy: 0.8156 - loss:
1.0390
Test Accuracy: 0.81

import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten,
Dense, Dropout
from tensorflow.keras.utils import to_categorical

# Load the MNIST dataset


(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Reshape and normalize the data
x_train = x_train.reshape(x_train.shape[0], 28, 28,
1).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32')
/ 255

# Convert labels to categorical format


y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

# Build the CNN model


model = Sequential([
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28,
28, 1)),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, kernel_size=(3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=10, batch_size=128,
validation_split=0.2)

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'Test Accuracy: {test_acc:.2f}')

Downloading data from https://storage.googleapis.com/tensorflow/tf-


keras-datasets/mnist.npz
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 2s 0us/step

D:\DLLL\DLL\Lib\site-packages\keras\src\layers\convolutional\
base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Epoch 1/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 24s 56ms/step - accuracy: 0.7800 - loss:
0.7019 - val_accuracy: 0.9766 - val_loss: 0.0779
Epoch 2/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 20s 54ms/step - accuracy: 0.9629 - loss:
0.1246 - val_accuracy: 0.9837 - val_loss: 0.0572
Epoch 3/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 21s 55ms/step - accuracy: 0.9757 - loss:
0.0824 - val_accuracy: 0.9862 - val_loss: 0.0489
Epoch 4/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 21s 55ms/step - accuracy: 0.9792 - loss:
0.0705 - val_accuracy: 0.9872 - val_loss: 0.0443
Epoch 5/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 20s 54ms/step - accuracy: 0.9823 - loss:
0.0555 - val_accuracy: 0.9887 - val_loss: 0.0403
Epoch 6/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 20s 54ms/step - accuracy: 0.9860 - loss:
0.0477 - val_accuracy: 0.9876 - val_loss: 0.0420
Epoch 7/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 20s 53ms/step - accuracy: 0.9866 - loss:
0.0444 - val_accuracy: 0.9898 - val_loss: 0.0371
Epoch 8/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 21s 54ms/step - accuracy: 0.9871 - loss:
0.0394 - val_accuracy: 0.9902 - val_loss: 0.0360
Epoch 9/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 26s 68ms/step - accuracy: 0.9913 - loss:
0.0298 - val_accuracy: 0.9911 - val_loss: 0.0347
Epoch 10/10
375/375 ━━━━━━━━━━━━━━━━━━━━ 20s 54ms/step - accuracy: 0.9896 - loss:
0.0308 - val_accuracy: 0.9905 - val_loss: 0.0356
313/313 ━━━━━━━━━━━━━━━━━━━━ 3s 8ms/step - accuracy: 0.9897 - loss:
0.0311
Test Accuracy: 0.99

import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, SimpleRNN, Dense

# Load the IMDB dataset


max_features = 10000
maxlen = 500
(x_train, y_train), (x_test, y_test) =
imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length


x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the Simple RNN model


model = Sequential([
Embedding(max_features, 128, input_length=maxlen),
SimpleRNN(128),
Dense(1, activation='sigmoid')
])

# Compile the model


model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=3, batch_size=32,
validation_split=0.2)

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'Test Accuracy: {test_acc:.2f}')

Epoch 1/3
625/625 ━━━━━━━━━━━━━━━━━━━━ 62s 95ms/step - accuracy: 0.5335 - loss:
0.6886 - val_accuracy: 0.5994 - val_loss: 0.6500
Epoch 2/3
625/625 ━━━━━━━━━━━━━━━━━━━━ 80s 91ms/step - accuracy: 0.6730 - loss:
0.6060 - val_accuracy: 0.6544 - val_loss: 0.6207
Epoch 3/3
625/625 ━━━━━━━━━━━━━━━━━━━━ 63s 100ms/step - accuracy: 0.7287 - loss:
0.5338 - val_accuracy: 0.6736 - val_loss: 0.6298
782/782 ━━━━━━━━━━━━━━━━━━━━ 24s 31ms/step - accuracy: 0.6723 - loss:
0.6265
Test Accuracy: 0.67

You might also like