Lab
Lab
i) Xavier Initialization:
Xavier initialization helps in initializing the weights in a way that prevents vanishing or
exploding gradients. You can use the glorot_uniform initializer for Xavier initialization.
model = Sequential()
model = Sequential()
model = Sequential()
model = Sequential()
modelMLP = Sequential()
modelMLP.add(Dense(256, activation='relu',input_dim=3072))
modelMLP.add(Dense(256, activation='relu'))
modelMLP.add(Dense(10, activation='softmax'))
for CNN:
#preprocessing data
xtrain = xtrain.astype('float32')
xtest = xtest.astype('float32')
xtrain = xtrain/255
xtest = xtest/255
#building model
model = Sequential()
model.add(Conv2D(filters = 25, kernel_size=(3,3),
activation='relu', input_shape = (28,28,1)))
model.add(MaxPooling2D(2,2))
model.add(Conv2D(filters = 64, kernel_size=(3,3),
activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Conv2D(filters = 64, kernel_size=(3,3),
activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dense(10, activation = 'softmax'))
TRANSFER LEARNING:
import tensorflow as tf
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dense(10, activation = 'softmax'))
EARLY STOPPING
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
es = EarlyStopping(monitor='val_accuracy', mode='max',
patience=5, restore_best_weights=True)
# Optionally, you can compare the predicted values with the actual
values
actual_values = nifty_data.values[1608 + n_input_nifty:]