ANN Detection Technique
ANN Detection Technique
Semester: 8th
In [1]:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import itertools
import random
from tensorflow.keras.preprocessing.image import ImageDataGenerator
In [ ]:
!git clone https://github.com/umair54321/personality-traits.git
In [ ]:
# Set up directories
train_dir = '/content/personality-traits/DataSet/train'
validation_dir = '/content/personality-traits/DataSet/validation'
test_dir = '/content/personality-traits/DataSet/test'
In [ ]:
# Preprocessing and data augmentation
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
In [ ]:
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(160, 160), # Resize images to 150x150
batch_size=32,
class_mode='categorical' # Adjust class mode as needed
)
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='categorical'
)
In [ ]:
# def load_and_preprocess_data(file_paths):
#
#
#
# return np.array(X_all), np.array(Y_all)
In [ ]:
# # Load and combine dataset from multiple files
# pickle_file_paths = [
# '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_1.pi
# # '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_1.
# # '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_2.
# # '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_3.
# ]
# X, Y = load_and_preprocess_data(pickle_file_paths)
In [ ]:
# # Split the data into train, validation, and test sets
# train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.3, random_state
# val_X, test_X, val_Y, test_Y = train_test_split(test_X, test_Y, test_size=0.5, random
In [ ]:
# Configure the hyperparameters
learning_rate = 0.001,
num_classes = 5 # Adjust based on your dataset
input_shape = (160,160,3) # Assumes (height, width, channels)
image_size = 160 # Resize if necessary
patch_size = 16 # Adjust based on your image size
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [
projection_dim * 2,
projection_dim,
]
transformer_layers = 8
mlp_head_units = [2048, 1024]
In [ ]:
# # Data augmentation
# data_augmentation = keras.Sequential(
# [
# layers.Resizing(image_size, image_size),
# layers.RandomFlip("horizontal"),
# layers.RandomRotation(factor=0.02),
# layers.RandomZoom(height_factor=0.2, width_factor=0.2),
# layers.Normalization(),
# ],
# name="data_augmentation",
# )
# # Compute the mean and variance of the training data for normalization.
# data_augmentation.layers[-1].adapt(train_X)
In [ ]:
def mlp(x, hidden_units, dropout_rate):
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
In [ ]:
# Implement patch creation as a layer
class Patches(layers.Layer):
def init (self, patch_size):
super(). init ()
self.patch_size = patch_size
In [ ]:
x_train,y_train = train_generator.next()
x_test,y_test = validation_generator.next()
x_train.shape
In [ ]:
plt.figure(figsize=(4, 4))
x = train_generator.next()
image = x[0][0]
plt.imshow(image)
plt.axis('off')
resized_image = tf.image.resize(
tf.convert_to_tensor([image]), size = (image_size, image_size)
)
patches = Patches(patch_size)(resized_image)
print(f'Image size: {image_size} X {image_size}')
print(f'Patch size: {patch_size} X {patch_size}')
print(f'Patches per image: {patches.shape[1]}')
print(f'Elements per patch: {patches.shape[-1]}')
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
In [ ]:
class PatchEncoder(layers.Layer):
def
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
input_dim=num_patches, output_dim=projection_dim
In [ ]:
# Build the ViT model
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
# Create patches.
patches = Patches(patch_size)(inputs)
# Encode patches.
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
In [ ]:
model = create_vit_classifier()
model.summary()
Model: "model_6"
========================================================================================
==========
Total params: 15933893 (60.78 MB)
Trainable params: 15933893 (60.78 MB)
Non-trainable params: 0 (0.00 Byte)
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 11/19
12/5/23, 6:19 PM personalitiy_with_VIT
In [ ]:
class BalancedAccuracy(tf.keras.metrics.Metric):
def init (self, name='balanced_accuracy', **kwargs):
super(BalancedAccuracy, self). init (name=name, **kwargs)
self.standard_accuracy = tf.keras.metrics.BinaryAccuracy()
self.total = self.add_weight(name="total", initializer="zeros")
self.count = self.add_weight(name="count", initializer="zeros")
def result(self):
standard_acc = self.standard_accuracy.result()
scaling_factor = tf.math.sqrt(1.0 - standard_acc)
return tf.clip_by_value(imbalance_adjustment+standard_acc, 0, 1)
def reset_state(self):
self.standard_accuracy.reset_states()
self.total.assign(0)
self.count.assign(0)
In [ ]:
def run_experiment(model):
# Compile the model with a fixed learning rate
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=keras.losses.BinaryCrossentropy(from_logits=False),
metrics=[BalancedAccuracy()]
)
return history
In [ ]:
def convert_to_binary(predictions, threshold=0.5):
return (predictions > threshold).astype(int)
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 12/19
12/5/23, 6:19 PM personalitiy_with_VIT
idx = np.random.choice(range(len(test_X)))
ax.imshow(test_X[idx])
bin_pred = test_pred_classes[idx]
true_labels = test_true_classes[idx]
ax.set_title(f"True: {true_labels}, Pred: {bin_pred}, Raw Pred: {np.round(raw_p
ax.axis("off")
plt.tight_layout()
In [ ]:
# Train the model
vit_classifier = create_vit_classifier()
# vit_classifier.summary()
history = run_experiment(vit_classifier)
Epoch 1/50
1/1 [==============================] - 28s 28s/step - loss: 1.1884 - balanced_accuracy:
0.5498 - val_loss: 1.2929 - val_balanced_accuracy: 0.7921
Epoch 2/50
1/1 [==============================] - 0s 140ms/step - loss: 0.8470 - balanced_accuracy:
0.8520 - val_loss: 1.9566 - val_balanced_accuracy: 0.8161
Epoch 3/50
1/1 [==============================] - 0s 141ms/step - loss: 2.2790 - balanced_accuracy:
0.8161 - val_loss: 2.4583 - val_balanced_accuracy: 0.5376
Epoch 4/50
1/1 [==============================] - 0s 140ms/step - loss: 1.8066 - balanced_accuracy:
0.6955 - val_loss: 1.0109 - val_balanced_accuracy: 0.7921
Epoch 5/50
1/1 [==============================] - 0s 140ms/step - loss: 1.2271 - balanced_accuracy:
0.8401 - val_loss: 1.5577 - val_balanced_accuracy: 0.8281
Epoch 6/50
1/1 [==============================] - 0s 143ms/step - loss: 2.0102 - balanced_accuracy:
0.8401 - val_loss: 1.2960 - val_balanced_accuracy: 0.6592
Epoch 7/50
1/1 [==============================] - 0s 146ms/step - loss: 1.0184 - balanced_accuracy:
0.8281 - val_loss: 1.4156 - val_balanced_accuracy: 0.7921
Epoch 8/50
1/1 [==============================] - 0s 115ms/step - loss: 0.9947 - balanced_accuracy:
0.8639 - val_loss: 0.8329 - val_balanced_accuracy: 0.8041
Epoch 9/50
1/1 [==============================] - 0s 142ms/step - loss: 1.4243 - balanced_accuracy:
0.7680 - val_loss: 1.1880 - val_balanced_accuracy: 0.7921
Epoch 10/50
1/1 [==============================] - 0s 140ms/step - loss: 1.0453 - balanced_accuracy:
0.8401 - val_loss: 2.0739 - val_balanced_accuracy: 0.7921
Epoch 11/50
1/1 [==============================] - 0s 104ms/step - loss: 1.2825 - balanced_accuracy:
0.8041 - val_loss: 1.7147 - val_balanced_accuracy: 0.7921
Epoch 12/50
1/1 [==============================] - 0s 139ms/step - loss: 0.8026 - balanced_accuracy:
0.9114 - val_loss: 1.2836 - val_balanced_accuracy: 0.8041
Epoch 13/50
1/1 [==============================] - 0s 104ms/step - loss: 0.9669 - balanced_accuracy:
0.8281 - val_loss: 0.9139 - val_balanced_accuracy: 0.7680
Epoch 14/50
1/1 [==============================] - 0s 104ms/step - loss: 0.8565 - balanced_accuracy:
0.8161 - val_loss: 1.3898 - val_balanced_accuracy: 0.6470
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 13/19
12/5/23, 6:19 PM personalitiy_with_VIT
Epoch 15/50
1/1 [==============================] - 0s 146ms/step - loss: 1.0001 - balanced_accuracy:
0.8281 - val_loss: 1.3538 - val_balanced_accuracy: 0.7318
Epoch 16/50
1/1 [==============================] - 0s 149ms/step - loss: 0.9054 - balanced_accuracy:
0.7800 - val_loss: 1.6595 - val_balanced_accuracy: 0.8161
Epoch 17/50
1/1 [==============================] - 0s 147ms/step - loss: 0.6849 - balanced_accuracy:
0.8877 - val_loss: 1.4094 - val_balanced_accuracy: 0.8281
Epoch 18/50
1/1 [==============================] - 0s 103ms/step - loss: 0.9553 - balanced_accuracy:
0.8996 - val_loss: 1.2483 - val_balanced_accuracy: 0.8401
Epoch 19/50
1/1 [==============================] - 0s 105ms/step - loss: 0.8920 - balanced_accuracy:
0.8520 - val_loss: 1.0668 - val_balanced_accuracy: 0.8401
Epoch 20/50
1/1 [==============================] - 0s 109ms/step - loss: 0.9314 - balanced_accuracy:
0.8877 - val_loss: 0.8210 - val_balanced_accuracy: 0.8161
Epoch 21/50
1/1 [==============================] - 0s 112ms/step - loss: 0.7278 - balanced_accuracy:
0.8520 - val_loss: 0.9103 - val_balanced_accuracy: 0.8520
Epoch 22/50
1/1 [==============================] - 0s 142ms/step - loss: 0.7356 - balanced_accuracy:
0.8758 - val_loss: 1.5541 - val_balanced_accuracy: 0.8161
Epoch 23/50
1/1 [==============================] - 0s 141ms/step - loss: 0.4175 - balanced_accuracy:
0.8996 - val_loss: 2.0443 - val_balanced_accuracy: 0.7921
Epoch 24/50
1/1 [==============================] - 0s 102ms/step - loss: 0.6070 - balanced_accuracy:
0.8639 - val_loss: 1.5228 - val_balanced_accuracy: 0.8041
Epoch 25/50
1/1 [==============================] - 0s 135ms/step - loss: 1.1553 - balanced_accuracy:
0.8639 - val_loss: 0.9599 - val_balanced_accuracy: 0.6834
Epoch 26/50
1/1 [==============================] - 0s 98ms/step - loss: 1.0610 - balanced_accuracy:
0.7318 - val_loss: 0.9751 - val_balanced_accuracy: 0.8281
Epoch 27/50
1/1 [==============================] - 0s 100ms/step - loss: 0.5176 - balanced_accuracy:
0.9349 - val_loss: 1.3711 - val_balanced_accuracy: 0.8161
Epoch 28/50
1/1 [==============================] - 0s 101ms/step - loss: 0.6289 - balanced_accuracy:
0.8996 - val_loss: 1.3199 - val_balanced_accuracy: 0.8401
Epoch 29/50
1/1 [==============================] - 0s 100ms/step - loss: 0.5660 - balanced_accuracy:
0.8758 - val_loss: 1.2180 - val_balanced_accuracy: 0.8041
Epoch 30/50
1/1 [==============================] - 0s 136ms/step - loss: 0.7440 - balanced_accuracy:
0.8639 - val_loss: 1.1841 - val_balanced_accuracy: 0.7559
Epoch 31/50
1/1 [==============================] - 0s 99ms/step - loss: 0.8129 - balanced_accuracy:
0.8161 - val_loss: 1.2620 - val_balanced_accuracy: 0.8161
Epoch 32/50
1/1 [==============================] - 0s 103ms/step - loss: 0.6070 - balanced_accuracy:
0.8996 - val_loss: 2.0693 - val_balanced_accuracy: 0.8041
Epoch 33/50
1/1 [==============================] - 0s 140ms/step - loss: 1.1855 - balanced_accuracy:
0.8639 - val_loss: 1.4341 - val_balanced_accuracy: 0.8041
Epoch 34/50
1/1 [==============================] - 0s 103ms/step - loss: 0.4768 - balanced_accuracy:
0.8639 - val_loss: 0.9001 - val_balanced_accuracy: 0.8161
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 14/19
12/5/23, 6:19 PM personalitiy_with_VIT
Epoch 35/50
1/1 [==============================] - 0s 103ms/step - loss: 0.6302 - balanced_accuracy:
0.8758 - val_loss: 0.8172 - val_balanced_accuracy: 0.7800
Epoch 36/50
1/1 [==============================] - 0s 143ms/step - loss: 0.6734 - balanced_accuracy:
0.7680 - val_loss: 1.2039 - val_balanced_accuracy: 0.8041
Epoch 37/50
1/1 [==============================] - 0s 136ms/step - loss: 0.5008 - balanced_accuracy:
0.8996 - val_loss: 1.7680 - val_balanced_accuracy: 0.8041
Epoch 38/50
1/1 [==============================] - 0s 102ms/step - loss: 0.6049 - balanced_accuracy:
0.9349 - val_loss: 1.8223 - val_balanced_accuracy: 0.8161
Epoch 39/50
1/1 [==============================] - 0s 99ms/step - loss: 0.6112 - balanced_accuracy:
0.8996 - val_loss: 1.9003 - val_balanced_accuracy: 0.8161
Epoch 40/50
1/1 [==============================] - 0s 97ms/step - loss: 0.7249 - balanced_accuracy:
0.8758 - val_loss: 1.2465 - val_balanced_accuracy: 0.8401
Epoch 41/50
1/1 [==============================] - 0s 107ms/step - loss: 0.5592 - balanced_accuracy:
0.9114 - val_loss: 0.9633 - val_balanced_accuracy: 0.8161
Epoch 42/50
1/1 [==============================] - 0s 141ms/step - loss: 0.5266 - balanced_accuracy:
0.8877 - val_loss: 0.8047 - val_balanced_accuracy: 0.7680
Epoch 43/50
1/1 [==============================] - 0s 102ms/step - loss: 0.3356 - balanced_accuracy:
0.9114 - val_loss: 0.7882 - val_balanced_accuracy: 0.8041
Epoch 44/50
1/1 [==============================] - 0s 101ms/step - loss: 0.6323 - balanced_accuracy:
0.8401 - val_loss: 0.9537 - val_balanced_accuracy: 0.8041
Epoch 45/50
1/1 [==============================] - 0s 101ms/step - loss: 0.4009 - balanced_accuracy:
0.8996 - val_loss: 1.8994 - val_balanced_accuracy: 0.7439
Epoch 46/50
1/1 [==============================] - 0s 99ms/step - loss: 0.6047 - balanced_accuracy:
0.8639 - val_loss: 2.5157 - val_balanced_accuracy: 0.7800
Epoch 47/50
1/1 [==============================] - 0s 138ms/step - loss: 0.8430 - balanced_accuracy:
0.8281 - val_loss: 2.4378 - val_balanced_accuracy: 0.7921
Epoch 48/50
1/1 [==============================] - 0s 96ms/step - loss: 0.5966 - balanced_accuracy:
0.9114 - val_loss: 1.8166 - val_balanced_accuracy: 0.7680
Epoch 49/50
1/1 [==============================] - 0s 140ms/step - loss: 0.4953 - balanced_accuracy:
0.9231 - val_loss: 1.5470 - val_balanced_accuracy: 0.8041
Epoch 50/50
1/1 [==============================] - 0s 135ms/step - loss: 0.2758 - balanced_accuracy:
0.9114 - val_loss: 1.6120 - val_balanced_accuracy: 0.8161
1/1 [==============================] - 0s 50ms/step - loss: 1.6120 - balanced_accuracy:
0.8161
Test accuracy: 81.61%
In [ ]:
plt.figure(figsize=(12, 5))
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 15/19
12/5/23, 6:19 PM personalitiy_with_VIT
plt.title('Training and Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.xlabel('Epoch')
test_predictions = vit_classifier.predict(test_X)
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 16/19
12/5/23, 6:19 PM personalitiy_with_VIT
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 17/19
12/5/23, 6:19 PM personalitiy_with_VIT
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 18/19
12/5/23, 6:19 PM personalitiy_with_VIT
In [ ]:
# Multilabel confusion matrix
from sklearn.metrics import multilabel_confusion_matrix
import seaborn as sns
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 10)) # Adjust for better fit
axes = axes.flatten() # Flatten the matrix of axes
if len(class_names) % 2 != 0:
fig.delaxes(axes[-1])
plt.tight_layout()
plt.show()
In [ ]:
#Classification report
from sklearn.metrics import classification_report
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Make sure to replace 'target_names' with the actual names of your classes
target_names = ['Extraversion', 'Agreeableness', 'Conscientiousness', 'Neuroticism', 'O
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 19/19
12/5/23, 6:19 PM personalitiy_with_VIT
df_report = pd.DataFrame(report).transpose()
plt.figure(figsize=(10, 8))
sns.heatmap(selected_metrics, annot=True, fmt=".2f", cmap="Blues")
plt.title('Classification Report')
In [ ]:
file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 20/19