0% found this document useful (0 votes)
7 views5 pages

Implement the Knn (2)

The document contains implementations of three machine learning algorithms: k-Nearest Neighbor for classifying the Iris dataset, Locally Weighted Regression for fitting data points and visualizing results, and a Q-learning algorithm for navigating a grid environment. Each section includes Python code that demonstrates how to execute these algorithms, including data generation, training, and evaluation. The document provides a comprehensive overview of the algorithms along with their respective outputs.

Uploaded by

Kalpana Murthy
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
7 views5 pages

Implement the Knn (2)

The document contains implementations of three machine learning algorithms: k-Nearest Neighbor for classifying the Iris dataset, Locally Weighted Regression for fitting data points and visualizing results, and a Q-learning algorithm for navigating a grid environment. Each section includes Python code that demonstrates how to execute these algorithms, including data generation, training, and evaluation. The document provides a comprehensive overview of the algorithms along with their respective outputs.

Uploaded by

Kalpana Murthy
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 5

Implement the k-Nearest Neighbor algorithm to classify the Iris

dataset, printing both correct and incorrect predictions.

import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from collections import Counter

def euclidean_distance(p1, p2):


return np.sqrt(np.sum((p1 - p2) ** 2))

def knn_classify(train_X, train_y, test_X, k=3):


predictions = []

for test_point in test_X:


distances = [euclidean_distance(test_point, train_point) for train_point in
train_X]
k_indices = np.argsort(distances)[:k]
k_nearest_labels = [train_y[i] for i in k_indices]
most_common = Counter(k_nearest_labels).most_common(1)[0][0]
predictions.append(most_common)

return np.array(predictions)

# Load the Iris dataset


iris = datasets.load_iris()
X, y = iris.data, iris.target

# Split the dataset into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)

# Predict using k-NN


k=3
y_pred = knn_classify(X_train, y_train, X_test, k)

# Print correct and incorrect predictions


for i in range(len(y_test)):
if y_pred[i] == y_test[i]:
print(f'Correct: Predicted {y_pred[i]}, Actual {y_test[i]}')
else:
print(f'Incorrect: Predicted {y_pred[i]}, Actual {y_test[i]}')

9. Develop a program to implement the non-parametric Locally Weighted


Regression algorithm, fitting data points and visualizing results.
import numpy as np
import matplotlib.pyplot as plt

# Generate sample data


np.random.seed(42)
X = np.linspace(0, 10, 100) # 100 data points between 0 and 10
y = np.sin(X) + np.random.normal(scale=0.1, size=X.shape) # Sine function
with noise
X = X.reshape(-1, 1) # Reshape X to column vector

# Locally Weighted Regression Function


def locally_weighted_regression(X_train, y_train, X_query, tau=0.5):
m = X_train.shape[0]
y_pred = np.zeros(X_query.shape[0]) # Placeholder for predictions

for i, x_q in enumerate(X_query):


# Compute weights using Gaussian kernel
weights = np.exp(-np.sum((X_train - x_q) ** 2, axis=1) / (2 * tau ** 2))
W = np.diag(weights) # Convert to diagonal matrix

# Compute weighted least squares solution: θ = (X^T W X)^(-1) X^T W y


X_bias = np.c_[np.ones((m, 1)), X_train] # Add bias term (1s column)
theta = np.linalg.inv(X_bias.T @ W @ X_bias) @ X_bias.T @ W @
y_train

# Fix: Ensure x_q_bias is a proper (1,2) shaped row vector


x_q_bias = np.array([1, x_q.item()]) # Convert x_q to scalar
y_pred[i] = x_q_bias @ theta # Predict output

return y_pred

# Define query points for prediction


X_query = np.linspace(0, 10, 100).reshape(-1, 1) # Points to predict
tau = 0.5 # Bandwidth parameter
# Get predictions
y_pred = locally_weighted_regression(X, y, X_query, tau)

# Plot results
plt.scatter(X, y, label="Training Data", color="blue", alpha=0.6)
plt.plot(X_query, y_pred, label=f"LWR Prediction (tau={tau})", color="red",
linewidth=2)
plt.xlabel("X")
plt.ylabel("y")
plt.title("Locally Weighted Regression")
plt.legend()
plt.show()

10.Implement a Q-learning algorithm to navigate a simple grid


environment, defining the reward structure and analyzing agent
performance....give code in python

import numpy as np
import random

# Define the environment (3x3 Grid)


GRID_SIZE = 3
GOAL = (2, 2)
START = (0, 0)
ACTIONS = ["up", "down", "left", "right"]
ACTION_MAP = {"up": (-1, 0), "down": (1, 0), "left": (0, -1), "right": (0, 1)}

# Initialize Q-table
Q_table = np.zeros((GRID_SIZE, GRID_SIZE, len(ACTIONS)))

# Q-learning parameters
alpha = 0.1 # Learning rate
gamma = 0.9 # Discount factor
epsilon = 0.2 # Exploration rate
episodes = 200 # Training episodes

# Function to get next state


def get_next_state(state, action):
r, c = state
dr, dc = ACTION_MAP[action]
new_r, new_c = r + dr, c + dc
if 0 <= new_r < GRID_SIZE and 0 <= new_c < GRID_SIZE:
return (new_r, new_c), -0.1 # Small step penalty
return state, -1 # Hitting wall penalty

# Train the agent


for _ in range(episodes):
state = START
while state != GOAL:
action = random.choice(ACTIONS) if random.uniform(0, 1) < epsilon else
ACTIONS[np.argmax(Q_table[state])]
next_state, reward = get_next_state(state, action)
Q_table[state][ACTIONS.index(action)] += alpha * (reward + gamma *
np.max(Q_table[next_state]) - Q_table[state][ACTIONS.index(action)])
state = next_state

# Print final learned policy


for r in range(GRID_SIZE):
for c in range(GRID_SIZE):
if (r, c) == GOAL:
print("⭐", end=" ")
else:
best_action = ACTIONS[np.argmax(Q_table[r, c])]
print({"up": "⬆️", "down": "⬇️", "left": "⬅️", "right": "➡️"}[best_action],
end=" ")
print()
Output:
Optimal Policy Grid:
⬇️ ➡️ ⬇️
➡️ ⬇️ ⬇️
➡️ ➡️ ⭐

You might also like