AIML PRACTICALS
AIML PRACTICALS
(Established in 1947 as Government Engineering College, Jabalpur Declared Autonomous by Government of Madhya Pradesh and RGPV, Bhopal)
Variable
# Load Data
df = pd.read_csv('../input/house-prices-advanced-regression-techniques/housetrain.csv')
df_float = df.select_dtypes(include=['float64']).copy()
# Regression function
def regression_analysis(feature, target):
lr = LinearRegression()
lr.fit(feature, target)
mse = mean_squared_error(target, lr.predict(feature))
return lr, mse
# Load dataset
df = pd.read_csv("/kaggle/input/cardataset/data.csv")
print(df.head(), "\n", df.shape, "\n", df.corr(), "\n", df.describe())
# Set X and y
X = df[['Weight', 'Volume']]
y = df['CO2']
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=100)
# Model training
reg_model = LinearRegression().fit(X_train, y_train)
print('Intercept:', reg_model.intercept_)
print('Coefficients:', dict(zip(X.columns, reg_model.coef_)))
# Predictions
y_pred = reg_model.predict(X_test)
reg_model_diff = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
print(reg_model_diff)
# Metrics
mae = metrics.mean_absolute_error(y_test,
y_pred) mse = metrics.mean_squared_error(y_test,
y_pred) rmse = np.sqrt(mse)
# Feature scaling
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# Importing libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from matplotlib.colors import ListedColormap
# Loading the dataset
df = pd.read_csv('Social_Network_Ads.csv')
X = df.iloc[:, 2:4].values
y = df.iloc[:, 4].values
# Splitting the dataset and scaling features
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting KNN classifier
clf = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)
clf.fit(X_train, y_train)
# Predicting the test set results
y_pred = clf.predict(X_test)
# Confusion matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
# Visualizing the training set results
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01),
np.arange(start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01))
plt.contourf(X1, X2, clf.predict(np.array([X1.ravel(),
X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('red',
'green')))
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c=ListedColormap(('red', 'green'))(i), label=j)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
plt.title('KNN Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated
Salary') plt.legend()
plt.show()
Output :
Experiment-7: Apply Hierarchical Clustering to solve the given
problem:
# Importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
# Load dataset
df = pd.read_csv('Mall_Customers.csv')
# Applying KMeans clustering to the data with the optimal number of clusters (5 as determined
from the elbow method)
clf = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10, random_state=0)
y_kmeans = clf.fit_predict(X)
Output :
Experiment-9: Implement Perceptrons to solve the given problem:
# Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
# Compute accuracy
def compute_accuracy(model, X, y):
return sum(model.forward(x) == y_true for x, y_true in zip(X, y)) / len(y)
Output :
Experiment-10: Apply Error Backpropagation Algorithm to solve the
given problem:
# Importing libraries
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# Load dataset
data = load_iris()
X = data.data
y = pd.get_dummies(data.target).values
# Initialize weights
W1 = np.random.normal(scale=0.5, size=(input_size, hidden_size))
W2 = np.random.normal(scale=0.5, size=(hidden_size, output_size))
# Training
for itr in range(iterations):
# Feedforward
A1 = sigmoid(np.dot(X_train, W1))
A2 = sigmoid(np.dot(A1, W2))
# Backpropagation
dW2 = (A2 - y_train) * A2 * (1 - A2)
dW1 = np.dot(dW2, W2.T) * A1 * (1 - A1)
# Test Accuracy
A1_test = sigmoid(np.dot(X_test, W1))
A2_test = sigmoid(np.dot(A1_test, W2))
acc = accuracy(A2_test, y_test)
print(f"Accuracy: {acc}")
Output: