0% found this document useful (0 votes)
15 views

Ai Lab

The document contains code snippets for implementing various artificial intelligence and machine learning algorithms, including: 1) A tic-tac-toe game with minimax algorithm and alpha-beta pruning for optimization. 2) An Expectation-Maximization algorithm for mixture modeling and clustering. 3) A K-means clustering algorithm with synthetic data visualization. 4) Support vector machines for classification with the Iris dataset.
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
15 views

Ai Lab

The document contains code snippets for implementing various artificial intelligence and machine learning algorithms, including: 1) A tic-tac-toe game with minimax algorithm and alpha-beta pruning for optimization. 2) An Expectation-Maximization algorithm for mixture modeling and clustering. 3) A K-means clustering algorithm with synthetic data visualization. 4) Support vector machines for classification with the Iris dataset.
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 15

KCA 351 Artificial Intelligence Lab

1.TIC TAC TOE PROGRAM

def print_board(board):

for row in board:

print(" | ".join(row))

print("-" * 5)

def check_winner(board, player):

# Check rows, columns, and diagonals for a winner

for i in range(3):

if all(board[i][j] == player for j in range(3)) or all(board[j][i] == player for j in range(3)):

return True

if all(board[i][i] == player for i in range(3)) or all(board[i][2 - i] == player for i in range(3)):

return True

return False

def is_board_full(board):

return all(board[i][j] != ' ' for i in range(3) for j in range(3))

def tic_tac_toe():

board = [[' ' for _ in range(3)] for _ in range(3)]

current_player = 'X'

while True:

print_board(board)

# Get player move

row = int(input(f"Player {current_player}, enter row (0, 1, or 2): "))

col = int(input(f"Player {current_player}, enter column (0, 1, or 2): "))

# Check if the chosen position is valid


if 0 <= row < 3 and 0 <= col < 3 and board[row][col] == ' ':

board[row][col] = current_player

# Check for a winner

if check_winner(board, current_player):

print_board(board)

print(f"Player {current_player} wins!")

break

# Check for a tie

if is_board_full(board):

print_board(board)

print("It's a tie!")

break

# Switch to the other player

current_player = 'O' if current_player == 'X' else 'X'

else:

print("Invalid move. Try again.")

if __name__ == "__main__":

tic_tac_toe()

2.ALPHA BETA PRUNING

import math

def minimax_alpha_beta_pruning(node, depth, alpha, beta, maximizing_player):

if depth == 0 or node_is_terminal(node):

return evaluate_node(node)
if maximizing_player:

max_eval = -math.inf

for child in generate_children(node):

eval_child = minimax_alpha_beta_pruning(child, depth - 1, alpha, beta, False)

max_eval = max(max_eval, eval_child)

alpha = max(alpha, eval_child)

if beta <= alpha:

break

return max_eval

else:

min_eval = math.inf

for child in generate_children(node):

eval_child = minimax_alpha_beta_pruning(child, depth - 1, alpha, beta, True)

min_eval = min(min_eval, eval_child)

beta = min(beta, eval_child)

if beta <= alpha:

break

return min_eval

def node_is_terminal(node):

# Check if the node is a terminal node (end of the game)

# Implement this function based on your specific game logic

return True

def evaluate_node(node):

# Evaluate the current state of the game

# Implement this function based on your specific game logic

return 0

def generate_children(node):

# Generate possible moves or states for the given node


# Implement this function based on your specific game logic

return []

# Example usage:

initial_node = "root" # Replace with the initial state of your game

depth_limit = 3 # Set the depth limit for the search tree

result = minimax_alpha_beta_pruning(initial_node, depth_limit, -math.inf, math.inf, True)

print("Optimal value:", result)

3.EM Algorithm

import numpy as np

from scipy.stats import norm

def em_algorithm(data, num_components, max_iterations=100, tolerance=1e-4):

# Initialization

n = len(data)

weights = np.ones(num_components) / num_components

means = np.linspace(min(data), max(data), num_components)

variances = np.ones(num_components) * np.var(data)

for _ in range(max_iterations):
# E-step: Compute responsibilities

responsibilities = np.zeros((n, num_components))

for k in range(num_components):

responsibilities[:, k] = weights[k] * norm.pdf(data, means[k], np.sqrt(variances[k]))

responsibilities /= responsibilities.sum(axis=1, keepdims=True)

# M-step: Update parameters

Nk = responsibilities.sum(axis=0)

weights = Nk / n

means = (responsibilities.T @ data) / Nk

variances = (responsibilities.T @ (data - means)**2) / Nk

# Check for convergence

if np.max(np.abs(Nk - n / num_components)) < tolerance:

break

return weights, means, variances

# Generate synthetic data

np.random.seed(42)

data = np.concatenate([np.random.normal(-5, 1, 300), np.random.normal(5, 1, 700)])

# Run EM algorithm

num_components = 2

estimated_weights, estimated_means, estimated_variances = em_algorithm(data, num_components)

# Display results

print("Estimated Weights:", estimated_weights)

print("Estimated Means:", estimated_means)

print("Estimated Variances:", estimated_variances)


4.K- means clustering

import numpy as np

import matplotlib.pyplot as plt

from sklearn.cluster import KMeans

from sklearn.datasets import make_blobs

# Generate synthetic data

np.random.seed(42)

X, _ = make_blobs(n_samples=300, centers=4, random_state=42)

# Apply K-means clustering

num_clusters = 4

kmeans = KMeans(n_clusters=num_clusters, random_state=42)

kmeans.fit(X)

labels = kmeans.labels_

centers = kmeans.cluster_centers_

# Plot the original data and cluster centers


plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', alpha=0.7, edgecolors='k')

plt.scatter(centers[:, 0], centers[:, 1], c='red', marker='X', s=200, label='Centers')

plt.title('K-means Clustering')

plt.xlabel('Feature 1')

plt.ylabel('Feature 2')

plt.legend()

plt.show()

5.Support Vector Machine

import numpy as np

import matplotlib.pyplot as plt

from sklearn import datasets

from sklearn.model_selection import train_test_split

from sklearn.svm import SVC

from sklearn.metrics import accuracy_score

# Load the Iris dataset

iris = datasets.load_iris()

X = iris.data[:, :2] # Selecting the first two features for simplicity

y = iris.target

# Split the data into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Create a Support Vector Machine (SVM) classifier

svm_classifier = SVC(kernel='linear', C=1.0)

svm_classifier.fit(X_train, y_train)

# Make predictions on the test set


y_pred = svm_classifier.predict(X_test)

# Calculate and print the accuracy of the model

accuracy = accuracy_score(y_test, y_pred)

print(f'Accuracy: {accuracy * 100:.2f}%')

# Plot the decision boundary

def plot_decision_boundary(X, y, model, title):

h = 0.02 # step size in the mesh

x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1

y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1

xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

Z = model.predict(np.c_[xx.ravel(), yy.ravel()])

Z = Z.reshape(xx.shape)

plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)

plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm, edgecolors='k')

plt.title(title)

plt.xlabel('Feature 1')

plt.ylabel('Feature 2')

plt.show()

# Plot the decision boundary and data points

plot_decision_boundary(X_train, y_train, svm_classifier, 'SVM Decision Boundary (Training Set)')

plot_decision_boundary(X_test, y_test, svm_classifier, 'SVM Decision Boundary (Test Set)')


6.Bayesian Networks

from pgmpy.models import BayesianModel

from pgmpy.estimators import ParameterEstimator

from pgmpy.inference import VariableElimination

# Define the structure of the Bayesian Network

model = BayesianModel([('A', 'C'), ('B', 'C')])

# Generate some sample data

data = {

'A': [0, 1, 0, 1, 0, 1, 1, 0],

'B': [0, 0, 1, 1, 1, 0, 0, 1],

'C': [0, 1, 1, 1, 0, 0, 1, 1],

# Fit the model with the data using Maximum Likelihood Estimation

model.fit(data, estimator=ParameterEstimator)

# Print the CPDs (Conditional Probability Distributions)

for cpd in model.get_cpds():

print(cpd)

# Perform inference using Variable Elimination

inference = VariableElimination(model)

query_result = inference.query(variables=['C'], evidence={'A': 1, 'B': 0})

print(query_result)
7.Hidden Markov Model

from hmmlearn import hmm

import numpy as np

# Define the HMM model

model = hmm.MultinomialHMM(n_components=2)

# Set the initial probabilities

model.startprob_ = np.array([0.5, 0.5])

# Set the transition matrix

model.transmat_ = np.array([[0.7, 0.3], [0.4, 0.6]])

# Set the emission probabilities

model.emissionprob_ = np.array([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]])

# Generate some sample observations

obs, states = model.sample(n_samples=100)

# Fit the model to the observations

model.fit(obs)

# Predict the hidden states given new observations

new_obs = np.array([[0, 1, 2, 1, 0]]).T

hidden_states = model.predict(new_obs)

print("Predicted Hidden States:", hidden_states)


8.Speech Recognition

import speech_recognition as sr

def recognize_speech():

recognizer = sr.Recognizer()

with sr.Microphone() as source:

print("Say something:")

audio = recognizer.listen(source, timeout=5)

try:

print("You said: " + recognizer.recognize_google(audio))

except sr.UnknownValueError:

print("Google Web Speech API could not understand audio")

except sr.RequestError as e:

print(f"Could not request results from Google Web Speech API; {e}")

if __name__ == "__main__":

recognize_speech()
9.Handwritten digit recognition

from sklearn import datasets

from sklearn.model_selection import train_test_split

from sklearn.svm import SVC

from sklearn.metrics import accuracy_score

import matplotlib.pyplot as plt

# Load the MNIST dataset

digits = datasets.load_digits()

# Split the dataset into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.2,


random_state=42)

# Create an SVM classifier

clf = SVC(kernel='linear')

# Train the classifier

clf.fit(X_train, y_train)

# Make predictions on the test set

predictions = clf.predict(X_test)

# Calculate accuracy

accuracy = accuracy_score(y_test, predictions)

print(f"Accuracy: {accuracy * 100:.2f}%")

# Visualize some predictions

fig, axes = plt.subplots(2, 5, figsize=(10, 5))

for i, ax in enumerate(axes.flat):

ax.imshow(X_test[i].reshape(8, 8), cmap='gray')

ax.set_title(f"Prediction: {predictions[i]}")

ax.axis('off')

plt.show()
10.Best first search

from queue import PriorityQueue

class Graph:

def __init__(self):

self.graph = {}

def add_edge(self, node, neighbors):

self.graph[node] = neighbors

def best_first_search(graph, start, goal):

visited = set()

priority_queue = PriorityQueue()

priority_queue.put((0, start))

while not priority_queue.empty():

current_cost, current_node = priority_queue.get()

if current_node not in visited:

print("Visiting:", current_node)

visited.add(current_node)

if current_node == goal:

print("Goal reached!")

break

for neighbor, cost in graph.graph[current_node]:

if neighbor not in visited:

priority_queue.put((cost, neighbor))

# Example usage

if __name__ == "__main__":
g = Graph()

g.add_edge("A", [("B", 2), ("C", 1)])

g.add_edge("B", [("D", 3), ("E", 4)])

g.add_edge("C", [("F", 5)])

g.add_edge("D", [])

g.add_edge("E", [])

g.add_edge("F", [])

print("Best-First Search:")

best_first_search(g, "A", "D")


11.write a program that can predict whether a given fruit is an apple or an orange. To do this,
we will use a simple pattern recognition algorithm called k-nearest neighbors (k-NN).
from collections import Counter

def predict(fruit):

# Count the number of apples and oranges in the training data

num_apples = sum([1 for f in training_data if f[-1] == 'apple'])

num_oranges = sum([1 for f in training_data if f[-1] == 'orange'])

# Find the k nearest neighbors of the fruit

nearest_neighbors = find_nearest_neighbors(fruit, training_data, k=5)

# Count the number of apples and oranges among the nearest neighbors

num_apples_nn = sum([1 for nn in nearest_neighbors if nn[-1] == 'apple'])

num_oranges_nn = sum([1 for nn in nearest_neighbors if nn[-1] == 'orange'])

# Predict the label of the fruit based on the majority class among the nearest neighbors

if num_apples_nn>num_oranges_nn:

return 'apple'

else:

return 'orange'

# Create a training dataset

training_data = [

['red', 'round', 'apple'],

['red', 'oval', 'apple'],

['yellow', 'round', 'orange'],

['yellow', 'oval', 'orange']

# Create a test fruit

test_fruit = ['red', 'round']

# Predict the label of the test fruit

prediction = predict(test_fruit)

print(prediction)

Output:
‘apple’

You might also like