ML Brefing
ML Brefing
In [ ]: ####################Supervised Learning####################
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_iris, load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, plot_tree
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.svm import SVC, SVR
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.metrics import accuracy_score, mean_squared_error
# Load the datasets
iris = load_iris()
boston = load_boston()
# Split the datasets into features and target variables
x1, y1 = iris.data, iris.target
x2, y2 = boston.data, boston.target
# Split the datasets into training and testing sets
x1_train, x1_test, y1_train, y1_test = train_test_split(x1, y1, test_size=0.2, random_state=42)
x2_train, x2_test, y2_train, y2_test = train_test_split(x2, y2, test_size=0.2, random_state=42)
# Preprocessing: Standardize features for algorithms that require it
scaler = StandardScaler()
x2_train_scaled = scaler.fit_transform(x2_train)
x2_test_scaled = scaler.transform(x2_test)
########## Classification Algorithms
print("Classification Algorithms:")
# 1 - Logistic Regression
lr_clf = LogisticRegression(max_iter=1000, random_state=42)
lr_clf.fit(x1_train, y1_train)
y1_lr = lr_clf.predict(x1_test)
accuracy_lr = accuracy_score(y1_test, y1_lr)
print(f"1 - Logistic Regression Accuracy: {accuracy_lr:.4f}")
localhost:8888/notebooks/Untitled8.ipynb 3/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
# 2 - Naive Bayes Classifier
nb_clf = GaussianNB()
nb_clf.fit(x1_train, y1_train)
y1_nb = nb_clf.predict(x1_test)
accuracy_nb = accuracy_score(y1_test, y1_nb)
print(f"2 - Naive Bayes Classifier Accuracy: {accuracy_nb:.4f}")
# Regression Algorithms (example with Linear Regression for Boston dataset)
print("Regression Algorithms:")
# Linear Regression
lr_reg = LinearRegression()
lr_reg.fit(x2_train_scaled, y2_train)
y2_lr = lr_reg.predict(x2_test_scaled)
mse_lr = mean_squared_error(y2_test, y2_lr)
print(f"Linear Regression Mean Squared Error: {mse_lr:.4f}")
localhost:8888/notebooks/Untitled8.ipynb 4/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
localhost:8888/notebooks/Untitled8.ipynb 5/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
In [ ]: ########## Algorithms that can be used for both regression and classification
print("\nAlgorithms for Both Regression and Classification:")
# 7 - Decision Trees
# Classification
dt_clf = DecisionTreeClassifier(random_state=42)
dt_clf.fit(x1_train, y1_train)
y1_dt = dt_clf.predict(x1_test)
accuracy_dt = accuracy_score(y1_test, y1_dt)
print(f"7 - Decision Tree Classifier Accuracy: {accuracy_dt:.4f}")
# Regression
dt_reg = DecisionTreeRegressor(random_state=42)
dt_reg.fit(x2_train_scaled, y2_train)
y2_dt = dt_reg.predict(x2_test_scaled)
mse_dt = mean_squared_error(y2_test, y2_dt)
print(f"7 - Decision Tree Regressor MSE: {mse_dt:.4f}")
# 8 - Random Forest
# Classification
rf_clf = RandomForestClassifier(n_estimators=100, random_state=42)
rf_clf.fit(x1_train, y1_train)
y1_rf = rf_clf.predict(x1_test)
accuracy_rf = accuracy_score(y1_test, y1_rf)
print(f"8 - Random Forest Classifier Accuracy: {accuracy_rf:.4f}")
# Regression
rf_reg = RandomForestRegressor(n_estimators=100, random_state=42)
rf_reg.fit(x2_train_scaled, y2_train)
y2_rf = rf_reg.predict(x2_test_scaled)
mse_rf = mean_squared_error(y2_test, y2_rf)
print(f"8 - Random Forest Regressor MSE: {mse_rf:.4f}")
# 9 - Gradient Boosting Machines (GBM)
# Classification
gb_clf = GradientBoostingClassifier(n_estimators=100, random_state=42)
gb_clf.fit(x1_train, y1_train)
y1_gb = gb_clf.predict(x1_test)
accuracy_gb = accuracy_score(y1_test, y1_gb)
print(f"9 - Gradient Boosting Classifier Accuracy: {accuracy_gb:.4f}")
# Regression
gb_reg = GradientBoostingRegressor(n_estimators=100, random_state=42)
localhost:8888/notebooks/Untitled8.ipynb 7/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
gb_reg.fit(x2_train_scaled, y2_train)
y2_gb = gb_reg.predict(x2_test_scaled)
mse_gb = mean_squared_error(y2_test, y2_gb)
print(f"9 - Gradient Boosting Regressor MSE: {mse_gb:.4f}")
# 10 - Support Vector Machines (SVM)
# Classification
svm_clf = SVC(kernel='linear', random_state=42)
svm_clf.fit(x1_train, y1_train)
y1_svm = svm_clf.predict(x1_test)
accuracy_svm = accuracy_score(y1_test, y1_svm)
print(f"10 - SVM Classifier Accuracy: {accuracy_svm:.4f}")
# Regression
svm_reg = SVR(kernel='linear')
svm_reg.fit(x2_train_scaled, y2_train)
y2_svm = svm_reg.predict(x2_test_scaled)
mse_svm = mean_squared_error(y2_test, y2_svm)
print(f"10 - SVM Regressor MSE: {mse_svm:.4f}")
# 11 - K-Nearest Neighbors (KNN)
# Classification
knn_clf = KNeighborsClassifier()
knn_clf.fit(x1_train, y1_train)
y1_knn = knn_clf.predict(x1_test)
accuracy_knn = accuracy_score(y1_test, y1_knn)
print(f"11 - K-Nearest Neighbors Classifier Accuracy: {accuracy_knn:.4f}")
# Regression
knn_reg = KNeighborsRegressor()
knn_reg.fit(x2_train_scaled, y2_train)
y2_knn = knn_reg.predict(x2_test_scaled)
mse_knn = mean_squared_error(y2_test, y2_knn)
print(f"11 - K-Nearest Neighbors Regressor MSE: {mse_knn:.4f}")
# 12 - Neural Networks
# Classification
nn_clf = MLPClassifier(max_iter=1000, random_state=42)
nn_clf.fit(x1_train, y1_train)
y1_nn = nn_clf.predict(x1_test)
accuracy_nn = accuracy_score(y1_test, y1_nn)
print(f"12 - Neural Network Classifier Accuracy: {accuracy_nn:.4f}")
localhost:8888/notebooks/Untitled8.ipynb 8/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
# Regression
nn_reg = MLPRegressor(max_iter=1000, random_state=42)
nn_reg.fit(x2_train_scaled, y2_train)
y2_nn = nn_reg.predict(x2_test_scaled)
mse_nn = mean_squared_error(y2_test, y2_nn)
print(f"12 - Neural Network Regressor MSE: {mse_nn:.4f}")
localhost:8888/notebooks/Untitled8.ipynb 9/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
localhost:8888/notebooks/Untitled8.ipynb 12/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
rf_reg.fit(x2_train_scaled, y2_train)
boston_rf_preds = rf_reg.predict(x2_test_scaled)
print("Random Forest Regressor:")
print("Not applicable for classification.")
# Gradient Boosting Machines (GBM)
# Classification
gb = GradientBoostingClassifier(n_estimators=100, random_state=42)
gb.fit(x1_train, y1_train)
iris_gb_preds = gb.predict(x1_test)
print("Gradient Boosting Classifier:")
print(confusion_matrix(y1_test, iris_gb_preds))
print(classification_report(y1_test, iris_gb_preds))
# Regression
gb_reg = GradientBoostingRegressor(n_estimators=100, random_state=42)
gb_reg.fit(x2_train_scaled, y2_train)
boston_gb_preds = gb_reg.predict(x2_test_scaled)
print("Gradient Boosting Regressor:")
print("Not applicable for classification.")
# Support Vector Machines (SVM)
# Classification
svm = SVC(kernel='linear', random_state=42)
svm.fit(x1_train, y1_train)
iris_svm_preds = svm.predict(x1_test)
print("SVM Classifier:")
print(confusion_matrix(y1_test, iris_svm_preds))
print(classification_report(y1_test, iris_svm_preds))
# Regression
svm_reg = SVR(kernel='linear')
svm_reg.fit(x2_train_scaled, y2_train)
boston_svm_preds = svm_reg.predict(x2_test_scaled)
print("SVM Regressor:")
print("Not applicable for classification.")
# K-Nearest Neighbors (KNN)
# Classification
knn = KNeighborsClassifier()
knn.fit(x1_train, y1_train)
iris_knn_preds = knn.predict(x1_test)
print("K-Nearest Neighbors Classifier:")
localhost:8888/notebooks/Untitled8.ipynb 13/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
print(confusion_matrix(y1_test, iris_knn_preds))
print(classification_report(y1_test, iris_knn_preds))
# Regression
knn_reg = KNeighborsRegressor()
knn_reg.fit(x2_train_scaled, y2_train)
boston_knn_preds = knn_reg.predict(x2_test_scaled)
print("K-Nearest Neighbors Regressor:")
print("Not applicable for classification.")
# Neural Networks
# Classification
nn = MLPClassifier(max_iter=1000, random_state=42)
nn.fit(x1_train, y1_train)
iris_nn_preds = nn.predict(x1_test)
print("Neural Network Classifier:")
print(confusion_matrix(y1_test, iris_nn_preds))
print(classification_report(y1_test, iris_nn_preds))
# Regression
nn_reg = MLPRegressor(max_iter=1000, random_state=42)
nn_reg.fit(x2_train_scaled, y2_train)
boston_nn_preds = nn_reg.predict(x2_test_scaled)
print("Neural Network Regressor:")
print("Not applicable for classification.")
localhost:8888/notebooks/Untitled8.ipynb 14/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
In [ ]: ########## Visualization
from sklearn.tree import plot_tree
# Visualize Decision Tree Classifier
plt.figure(figsize=(15, 10))
plot_tree(dt, filled=True, feature_names=iris.feature_names, class_names=iris.target_names)
plt.title("Decision Tree Classifier")
plt.show()
# Visualize Decision Tree Regressor
plt.figure(figsize=(15, 10))
plot_tree(dt_reg, filled=True, feature_names=boston.feature_names)
plt.title("Decision Tree Regressor")
plt.show()
# Visualize Random Forest Classifier (first tree only)
plt.figure(figsize=(15, 10))
plot_tree(rf.estimators_[0],filled=True,feature_names=iris.feature_names,class_names=iris.target_names)
plt.title("Random Forest Classifier")
plt.show()
# Visualize Random Forest Regressor (first tree only)
plt.figure(figsize=(15, 10))
plot_tree(rf_reg.estimators_[0], filled=True, feature_names=boston.feature_names)
plt.title("Random Forest Regressor")
plt.show()
# Visualize Gradient Boosting Classifier (first tree only)
plt.figure(figsize=(15, 10))
plot_tree(gb.estimators_[0][0],filled=True,feature_names=iris.feature_names,class_names=iris.target_names)
plt.title("Gradient Boosting Classifier")
plt.show()
# Visualize Gradient Boosting Regressor (first tree only)
plt.figure(figsize=(15, 10))
plot_tree(gb_reg.estimators_[0][0], filled=True, feature_names=boston.feature_names)
plt.title("Gradient Boosting Regressor")
plt.show()
# Plotting the decision boundaries for SVM Classifier
plt.figure(figsize=(10, 6))
plt.title("SVM Classifier Decision Boundaries")
plt.xlabel("Sepal Length")
localhost:8888/notebooks/Untitled8.ipynb 16/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
plt.ylabel("Sepal Width")
h = .02 # step size in the mesh
x_min, x_max = x1[:, 0].min() - 1, x1[:, 0].max() + 1
y_min, y_max = x1[:, 1].min() - 1, x1[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = svm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
plt.scatter(x1[:, 0], x1[:, 1], c=y1, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
plt.show()
# Plotting predicted vs actual prices for SVM Regressor
plt.figure(figsize=(10, 6))
plt.title("SVM Regressor Predictions vs Actual")
plt.xlabel("Actual Prices")
plt.ylabel("Predicted Prices")
plt.scatter(y2_test, boston_svm_preds)
plt.plot([min(y2_test), max(y2_test)], [min(y2_test), max(y2_test)], color='red', linestyle='--')
plt.show()
# Visualize K-Nearest Neighbors Classifier (2D only)
plt.figure(figsize=(10, 6))
plt.title("KNN Classifier Decision Boundaries")
plt.xlabel("Sepal Length")
plt.ylabel("Sepal Width")
h = .02 # step size in the mesh
x_min, x_max = x1[:, 0].min() - 1, x1[:, 0].max() + 1
y_min, y_max = x1[:, 1].min() - 1, x1[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
plt.scatter(x1[:, 0], x1[:, 1], c=y1, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
plt.show()
# Plotting predicted vs actual prices for KNN Regressor
plt.figure(figsize=(10, 6))
plt.title("KNN Regressor Predictions vs Actual")
plt.xlabel("Actual Prices")
plt.ylabel("Predicted Prices")
plt.scatter(y2_test, boston_knn_preds)
plt.plot([min(y2_test),max(y2_test)],[min(y2_test), max(y2_test)], color='red',linestyle='--')
plt.show()
localhost:8888/notebooks/Untitled8.ipynb 17/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
# Load dataset
iris = load_iris()
X = iris.data
y = iris.target
# Feature scaling
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 1-K-Means Clustering
kmeans = KMeans(n_clusters=3, random_state=42)
kmeans_pred = kmeans.fit_predict(X_scaled)
# 2-Hierarchical Clustering
hierarchical = AgglomerativeClustering(n_clusters=3)
hierarchical_pred = hierarchical.fit_predict(X_scaled)
# 3-DBSCAN
dbscan = DBSCAN(eps=0.5, min_samples=5)
dbscan_pred = dbscan.fit_predict(X_scaled)
# 4-PCA
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_scaled)
# 5-ICA
ica = FastICA(n_components=2)
X_ica = ica.fit_transform(X_scaled)
# 6-t-SNE
tsne = TSNE(n_components=2, perplexity=30, random_state=42)
X_tsne = tsne.fit_transform(X_scaled)
localhost:8888/notebooks/Untitled8.ipynb 20/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
# 7-Isolation Forest
isolation_forest = IsolationForest(random_state=42)
isolation_forest_pred = isolation_forest.fit_predict(X_scaled)
# Visualization
plt.figure(figsize=(16, 16))
# PCA plot
plt.subplot(3, 3, 1)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=kmeans_pred, cmap='viridis')
plt.title('PCA (K-Means)')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
# ICA plot
plt.subplot(3, 3, 2)
plt.scatter(X_ica[:, 0], X_ica[:, 1], c=kmeans_pred, cmap='viridis')
plt.title('ICA (K-Means)')
plt.xlabel('Independent Component 1')
plt.ylabel('Independent Component 2')
# t-SNE plot
plt.subplot(3, 3, 3)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=kmeans_pred, cmap='viridis')
plt.title('t-SNE (K-Means)')
# DBSCAN plot
plt.subplot(3, 3, 5)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=dbscan_pred, cmap='viridis')
plt.title('PCA (DBSCAN)')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
# Isolation Forest plot
plt.subplot(3, 3, 6)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=isolation_forest_pred, cmap='viridis')
localhost:8888/notebooks/Untitled8.ipynb 21/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
plt.title('PCA (Isolation Forest)')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.tight_layout()
plt.show()
In [ ]: ####################Deep Learning
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, LSTM, GRU,SimpleRNN, Dropout
from tensorflow.keras.layers import Input, Reshape, Conv2DTranspose, UpSampling2D, Lambda, Concatenate
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import SparseCategoricalCrossentropy, MeanSquaredError, BinaryCrossentropy
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.utils import plot_model
from tensorflow.keras.initializers import RandomNormal
########## Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
########## Normalize the images
x_train = x_train / 255.0
x_test = x_test / 255.0
########## Reshape data for CNN
x_train_cnn = np.expand_dims(x_train, axis=-1)
x_test_cnn = np.expand_dims(x_test, axis=-1)
########## Reshape data for RNN
x_train_rnn = x_train.reshape(-1, 28, 28)
x_test_rnn = x_test.reshape(-1, 28, 28)
########## Define some constants
input_shape = x_train_cnn.shape[1:]
num_classes = 10
localhost:8888/notebooks/Untitled8.ipynb 22/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
localhost:8888/notebooks/Untitled8.ipynb 28/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
localhost:8888/notebooks/Untitled8.ipynb 31/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
localhost:8888/notebooks/Untitled8.ipynb 34/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
In [ ]: #####Label Propagation:
from sklearn.semi_supervised import LabelPropagation
from sklearn.metrics import classification_report, confusion_matrix
# Initialize the Label Propagation model
label_prop_model = LabelPropagation()
# Train the model
label_prop_model.fit(X_train, y_train)
# Predict the labels of the test set
y_pred = label_prop_model.predict(X_test)
# Evaluate the model
print("Label Propagation:")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
localhost:8888/notebooks/Untitled8.ipynb 35/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
In [ ]: #####Self-Training:
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import RandomForestClassifier
class SelfTrainingClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, base_classifier=RandomForestClassifier(), threshold=0.75):
self.base_classifier = base_classifier
self.threshold = threshold
def fit(self, X, y):
self.base_classifier.fit(X[y != -1], y[y != -1])
while True:
y_prob = self.base_classifier.predict_proba(X)
unlabeled_mask = y == -1
pseudo_labels = np.argmax(y_prob[unlabeled_mask], axis=1)
max_prob = np.max(y_prob[unlabeled_mask], axis=1)
confident_pseudo_labels = pseudo_labels[max_prob > self.threshold]
if len(confident_pseudo_labels) == 0:
break
y[unlabeled_mask][max_prob > self.threshold] = confident_pseudo_labels
self.base_classifier.fit(X, y)
return self
def predict(self, X):
return self.base_classifier.predict(X)
# Initialize the Self-Training model
self_training_model = SelfTrainingClassifier()
# Train the model
self_training_model.fit(X_train, y_train)
# Predict the labels of the test set
y_pred = self_training_model.predict(X_test)
# Evaluate the model
print("Self-Training:")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
localhost:8888/notebooks/Untitled8.ipynb 37/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
In [ ]: #####Co-Training:
from sklearn.tree import DecisionTreeClassifier
def co_training(X_train, y_train, base_classifier=DecisionTreeClassifier(), num_iterations=10):
views = np.split(X_train, 2, axis=1)
classifiers = [base_classifier, base_classifier]
for clf, view in zip(classifiers, views):
clf.fit(view[y_train != -1], y_train[y_train != -1])
for _ in range(num_iterations):
for i, (clf, view) in enumerate(zip(classifiers, views)):
pseudo_labels = clf.predict(view)
unlabeled_mask = y_train == -1
y_train[unlabeled_mask] = pseudo_labels[unlabeled_mask]
for j in range(len(classifiers)):
if j != i:
classifiers[j].fit(views[j], y_train)
return classifiers
# Train the Co-Training model
co_training_classifiers = co_training(X_train, y_train)
# Predict the labels of the test set
views_test = np.split(X_test, 2, axis=1)
y_preds = [clf.predict(view) for clf, view in zip(co_training_classifiers, views_test)]
y_pred = np.array(y_preds).mean(axis=0).round().astype(int)
# Evaluate the model
print("Co-Training:")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
localhost:8888/notebooks/Untitled8.ipynb 38/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
localhost:8888/notebooks/Untitled8.ipynb 39/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
In [ ]: #####Pseudo-Labeling:
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import RandomForestClassifier
class PseudoLabelingClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, base_classifier=RandomForestClassifier(), threshold=0.75):
self.base_classifier = base_classifier
self.threshold = threshold
def fit(self, X, y):
self.base_classifier.fit(X[y != -1], y[y != -1])
y_prob = self.base_classifier.predict_proba(X)
pseudo_labels = np.argmax(y_prob, axis=1)
max_prob = np.max(y_prob, axis=1)
y[max_prob > self.threshold] = pseudo_labels[max_prob > self.threshold]
self.base_classifier.fit(X, y)
return self
def predict(self, X):
return self.base_classifier.predict(X)
# Initialize the Pseudo-Labeling model
pseudo_labeling_model = PseudoLabelingClassifier()
# Train the model
pseudo_labeling_model.fit(X_train, y_train)
# Predict the labels of the test set
y_pred = pseudo_labeling_model.predict(X_test)
# Evaluate the model
print("Pseudo-Labeling:")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
localhost:8888/notebooks/Untitled8.ipynb 40/41
5/14/24, 7:52 PM Untitled8 - Jupyter Notebook
localhost:8888/notebooks/Untitled8.ipynb 41/41