practical file (2)
practical file (2)
while queue:
current_node = queue.popleft()
print(current_node, end=' ')
def print_board(board):
for row in board:
print(" | ".join(row))
print("-" * 5)
def check_winner(board):
# Check rows
for row in board:
if all(cell == row[0] for cell in row) and row[0] != ' ':
return True
# Check columns
for col in range(3):
if all(board[row][col] == board[0][col] for row in range(3)) and board[0]
[col] != ' ':
return True
# Check diagonals
if board[0][0] == board[1][1] == board[2][2] != ' ' or board[0][2] ==
board[1][1] == board[2][0] != ' ':
return True
return False
def is_board_full(board):
return all(cell != ' ' for row in board for cell in row)
def tic_tac_toe():
board = [[' ' for _ in range(3)] for _ in range(3)]
current_player = 'X'
while True:
print_board(board)
if check_winner(board):
print_board(board)
print(f"Player {current_player} wins!")
break
elif is_board_full(board):
print_board(board)
print("It's a tie!")
break
if __name__ == "__main__":
tic_tac_toe()
class PuzzleNode:
def __init__(self, state, parent=None, move=None):
self.state = state
self.parent = parent
self.move = move
self.cost = self.calculate_cost()
def calculate_cost(self):
# A* heuristic: Manhattan distance
goal_state = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
cost = 0
for i in range(3):
for j in range(3):
if self.state[i][j] != 0:
row, col = divmod(self.state[i][j] - 1, 3)
cost += abs(row - i) + abs(col - j)
return cost
def print_board(board):
for row in board:
print(" ".join(map(str, row)))
print()
def get_blank_position(board):
for i in range(3):
for j in range(3):
if board[i][j] == 0:
return i, j
def is_goal_state(board):
return board == [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
def generate_neighbors(node):
i, j = get_blank_position(node.state)
neighbors = []
for move_i, move_j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
new_i, new_j = i + move_i, j + move_j
if 0 <= new_i < 3 and 0 <= new_j < 3:
new_state = [row[:] for row in node.state]
new_state[i][j], new_state[new_i][new_j] = new_state[new_i][new_j],
new_state[i][j]
neighbors.append(PuzzleNode(new_state, node, (i, j)))
return neighbors
def trace_solution(node):
path = []
while node:
path.append((node.move[0] * 3 + node.move[1]) if node.move else
None)
node = node.parent
return path[::-1]
def solve_puzzle(initial_state):
start_node = PuzzleNode(initial_state)
frontier = PriorityQueue()
frontier.put(start_node)
explored = set()
if is_goal_state(current_node.state):
solution_path = trace_solution(current_node)
return solution_path
explored.add(tuple(map(tuple, current_node.state)))
return None
if __name__ == "__main__":
# Example initial state (you can modify this)
initial_state = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
print("Initial State:")
print_board(initial_state)
solution_path = solve_puzzle(initial_state)
if solution_path:
print("Solution Path:")
for move in solution_path:
if move is not None:
print(f"Move {move + 1}:")
print_board([[1, 2, 3], [4, 5, 6], [7, 8, 0]])
else:
print("No solution found.")
class WaterJugProblem:
def __init__(self, jug1_capacity, jug2_capacity, target_amount):
self.jug1_capacity = jug1_capacity
self.jug2_capacity = jug2_capacity
self.target_amount = target_amount
self.state = (0, 0) # Initial state: both jugs empty
def solve(self):
visited_states = set()
frontier = [self.state]
while frontier:
current_state = frontier.pop(0)
if self.is_goal_state(current_state):
return self.trace_solution(current_state)
visited_states.add(current_state)
next_states = [
self.fill_jug1(current_state),
self.fill_jug2(current_state),
self.empty_jug1(current_state),
self.empty_jug2(current_state),
self.pour_jug1_to_jug2(current_state),
self.pour_jug2_to_jug1(current_state),
]
return None
return path
if __name__ == "__main__":
# Example: Jug1 capacity = 4, Jug2 capacity = 3, target amount = 2
water_jug_problem = WaterJugProblem(4, 3, 2)
solution_path = water_jug_problem.solve()
if solution_path:
print("Solution Path:")
for state in solution_path:
print(f"Jug1: {state[0]}, Jug2: {state[1]}")
else:
print("No solution found.")
import itertools
def nearest_neighbor(cities):
num_cities = len(cities)
unvisited = set(range(1, num_cities)) # Start from city 0, so exclude it from
unvisited set
path = [0] # Start from city 0
current_city = 0
while unvisited:
nearest_city = min(unvisited, key=lambda city:
calculate_distance(cities[current_city], cities[city]))
path.append(nearest_city)
unvisited.remove(nearest_city)
current_city = nearest_city
return path
def tsp_bruteforce(cities):
num_cities = len(cities)
all_permutations = itertools.permutations(range(num_cities))
min_distance = float('inf')
best_path = None
return best_path
if __name__ == "__main__":
# Example: Cities represented as (x, y) coordinates
cities = [(0, 0), (1, 2), (2, 4), (3, 1)]
if __name__ == "__main__":
# Example: 3 disks on peg A, moving to peg C with the help of peg B
num_disks = 3
tower_of_hanoi(num_disks, 'A', 'C', 'B')
class MonkeyBananaProblem:
def __init__(self):
self.state = {'monkey': (1, 1), 'box': (2, 2), 'banana': (4, 4)}
if state['monkey'] == state['box']:
possible_actions.append('climb box')
return possible_actions
if 'push' in action:
direction = action.split()[-1]
if direction == 'up':
new_state['box'] = (state['box'][0] - 1, state['box'][1])
elif direction == 'down':
new_state['box'] = (state['box'][0] + 1, state['box'][1])
elif direction == 'left':
new_state['box'] = (state['box'][0], state['box'][1] - 1)
elif direction == 'right':
new_state['box'] = (state['box'][0], state['box'][1] + 1)
return new_state
def solve_monkey_banana_problem():
problem = MonkeyBananaProblem()
current_state = problem.state
print("Initial State:")
print(current_state)
if __name__ == "__main__":
solve_monkey_banana_problem()
def recognize_characters(image_path):
# Open the image file
img = Image.open(image_path)
return text
if __name__ == "__main__":
# Example: Replace 'your_image_path.png' with the path to your image
file
image_path = 'your_image_path.png'
result = recognize_characters(image_path)
print("Recognized Characters:")
print(result)
class BayesianNetwork:
def __init__(self):
self.nodes = {'I': 0.7, 'S': 0.8, 'P': {'I': {True: {'S': {True: 0.9, False: 0.2}},
False: {'S': {True: 0.6, False: 0.1}}}}}
if __name__ == "__main__":
# Create a Bayesian Network
bayesian_network = BayesianNetwork()
clear; clc;
% Data (X: input, y: output)
X = [1; 2; 3; 4; 5];
y = [2; 4; 6; 8; 10];
% Initial parameters
theta = rand(2,1);
alpha = 0.01;
iterations = 1000;
h = X * theta; % Hypothesis
end
% Display results
o/p
Theta values:
0.0
2.0
Predicted values:
2.0000
4.0000
6.0000
8.0000
10.0000
clear; clc;
X = [1; 2; 3; 4; 5];
y = [0; 0; 0; 1; 1];
% Initial parameters
theta = rand(2,1);
m = length(y);
X = [ones(m,1), X];
% Sigmoid function
alpha = 0.01;
iterations = 1000;
h = sigmoid(X * theta);
end
% Display results
o/p
Theta values:
-6.3
3.1
Predicted values:
% Test data
X_test = [1.5 1.5];
% K value
K = 3;
% Euclidean distance
distances = sqrt(sum((X_train - X_test).^2, 2));
% Majority vote
prediction = mode(nearest_neighbors);
% Display prediction
disp('Predicted class:'), disp(prediction);
o/p
Predicted class:
0
o/p
Predicted label:
'setosa'
15. Neural Network (Simple Feedforward)
% Simple Neural Network
clear; clc;
% Input data (X) and output labels (y)
X = [0 0; 0 1; 1 0; 1 1];
y = [0; 1; 1; 0]; % XOR problem
% Define neural network
net = feedforwardnet(2); % 2 hidden neurons
net = train(net, X', y');
layers = [
imageInputLayer([28 28 1])
convolution2dLayer(3, 8, 'Padding', 'same')
reluLayer
maxPooling2dLayer(2, 'Stride', 2)
fullyConnectedLayer(10)
softmaxLayer
classificationLayer];
o/p
Epoch 1/3
...
Training completed.
% K-Means Clustering
clear; clc;
% Data points
X = [1 1; 2 2; 3 3; 6 6; 7 7; 8 8];
% Number of clusters
K = 2;
% Apply K-means
[idx, C] = kmeans(X, K);
% Display results
disp('Cluster indices:'), disp(idx);
disp('Cluster centers:'), disp(C);
Cluster centers:
2.0000 2.0000
7.0000 7.0000
% SVM Classifier
clear; clc;
o/p
Predicted label:
'versicolor'
% Number of variables
nVars = 2;
o/p
Optimal solution:
3.0000 2.0000
% Define parameters
states = 1:5; % 5 states
actions = 1:2; % 2 actions (left, right)
Q = zeros(length(states), length(actions)); % Initialize Q-table
gamma = 0.9; % Discount factor
alpha = 0.1; % Learning rate
epsilon = 0.1; % Exploration rate
% Reward matrix
R = [-1, 0, -1, -1, 10]; % Reward for each state
% Update Q-value
Q(state, action) = Q(state, action) + alpha * ...
(reward + gamma * max(Q(nextState, :)) - Q(state, action));