AIML
AIML
import pandas as pd
import numpy as np
return specific_hypothesis
import numpy as np
import pandas as pd
for i, h in enumerate(concepts[:num_instances]):
print("\nInstance", i+1, "is ", h)
if target[i] == "yes":
print("Instance is Positive ")
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
specific_h[x] = '?'
general_h[x][x] = '?'
if target[i] == "no":
print("Instance is Negative ")
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
general_h[x][x] = specific_h[x]
else:
general_h[x][x] = '?'
#3 Decision Tree
import pandas as pd
import math
import numpy as np
data = pd.read_csv("/content/drive/MyDrive/Colab
Notebooks/Datasets/PlayTennis.csv")
data = data.drop(columns=["Unnamed: 0"])
features = [feat for feat in data.columns if feat != "PlayTennis"]
class Node:
def __init__(self):
self.children = []
self.value = ""
self.isLeaf = False
self.pred = ""
def entropy(examples):
pos = 0.0
neg = 0.0
for _, row in examples.iterrows():
if row["PlayTennis"] == "Yes":
pos += 1
else:
neg += 1
if pos == 0.0 or neg == 0.0:
return 0.0
else:
p = pos / (pos + neg)
n = neg / (pos + neg)
return -(p * math.log(p, 2) + n * math.log(n, 2))
uniq = np.unique(examples[max_feat])
for u in uniq:
subdata = examples[examples[max_feat] == u]
if entropy(subdata) == 0.0:
newNode = Node()
newNode.isLeaf = True
newNode.value = u
newNode.pred = np.unique(subdata["PlayTennis"])[0]
root.children.append(newNode)
else:
dummyNode = Node()
dummyNode.value = u
new_attrs = attrs.copy()
new_attrs.remove(max_feat)
child = ID3(subdata, new_attrs)
dummyNode.children.append(child)
root.children.append(dummyNode)
return root
import numpy as np
# Sigmoid Function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Variable initialization
epoch = 5 # Setting training iterations
lr = 0.1 # Setting learning rate
# Backpropagation
EO = y - output
outgrad = derivatives_sigmoid(output)
d_output = EO * outgrad
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act) # how much hidden layer wts
contributed to error
d_hiddenlayer = EH * hiddengrad
print("-----------Epoch-", i + 1, "Starts----------")
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n", output)
print("-----------Epoch-", i + 1, "Ends----------\n")
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
dataset = pd.read_csv("/content/drive/MyDrive/Colab
Notebooks/Datasets/tennisdata.csv")
X = dataset.iloc[:, [0,1]].values
y = dataset.iloc[:, 2].values
le1 = LabelEncoder()
le2 = LabelEncoder()
clf = GaussianNB()
clf.fit(X_train, y_train)
y_pred1 = clf.predict(X_test)
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, recall_score, precision_score,
confusion_matrix
dataset = pd.read_csv("/content/drive/MyDrive/Colab
Notebooks/Datasets/document.csv")
X = X.values
y = le.fit_transform(y)
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
classifer1 = GaussianNB()
classifer1.fit(X_train, y_train)
y_pred1 = classifer1.predict(X_test)
#7 Bayesian Network
import numpy as np
import pandas as pd
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.models import BayesianNetwork
from pgmpy.inference import VariableElimination
X = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
plt.figure(figsize=(14,7))
colormap=np.array(['red','lime','black'])
plt.subplot(1,3,1)
plt.title('Real')
# Convert y to integers for indexing
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y.astype(int)])
model=KMeans(n_clusters=3, random_state=0).fit(X)
plt.subplot(1,3,2)
plt.title('KMeans')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[model.labels_])
gmm=GaussianMixture(n_components=3, random_state=0).fit(X)
y_cluster_gmm=gmm.predict(X)
plt.subplot(1,3,3)
plt.title('GMM Classification')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y_cluster_gmm])
plt.show()
#9 K-Nearest Neighbours
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.datasets import load_iris
# Load dataset
iris = load_iris()
print(X.head())
# Make predictions
ypred = classifier.predict(Xtest)
# Print results
i = 0
print("\
n-------------------------------------------------------------------------")
print('%-25s %-25s %-25s' % ('Original Label', 'Predicted Label', 'Correct/Wrong'))
print("-------------------------------------------------------------------------")
for label in ytest:
print('%-25s %-25s' % (label, ypred[i]), end="")
if label == ypred[i]:
print(' %-25s' % ('Correct'))
else:
print(' %-25s' % ('Wrong'))
i = i + 1
print("-------------------------------------------------------------------------")
% solve(Node, Solution) - Find an acyclic path (in reverse order) from Node to a
goal
solve(Node, Solution) :-
depthfirst([], Node, Solution).
% Initialization Directive
:- initialization(main).
#14 BFS
import heapq
class Node:
def __init__(self, state, parent, cost, heuristic):
self.state = state
self.parent = parent
self.cost = cost
self.heuristic = heuristic
while open_list:
current_node = heapq.heappop(open_list)
if current_node.state == goal:
return reconstruct_path(current_node)
closed_list.add(current_node.state)
return None
def reconstruct_path(node):
path = []
while node:
path.append(node.state)
node = node.parent
return path[::-1]
def get_neighbors(state):
neighbors = []
x, y = state
moves = [(-1, 0), (1, 0), (0, -1), (0, 1)]
for move in moves:
neighbor = (x + move[0], y + move[1])
if 0 <= neighbor[0] < 5 and 0 <= neighbor[1] < 5:
neighbors.append((neighbor, 1))
return neighbors
start = (0, 0)
goal = (4, 4)
path = best_first_search(start, goal, manhattan_distance, get_neighbors)
print("Path found:", path)
% Production Rules
route(Town1, Town2, Distance) :-
road(Town1, Town2, Distance).
% Domains
% town = symbol
% distance = integer
% Predicates
% nondeterm road(town, town, distance)
% nondeterm route(town, town, distance)
% Clauses
road("tampa", "houston", 200).
road("gordon", "tampa", 300).
road("houston", "gordon", 100).
road("houston", "kansas_city", 120).
road("gordon", "kansas_city", 130).
% Initialization Directive
:- initialization(main).