aiml lab
aiml lab
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start, goal = 'A', 'F'
print(bfs(graph, start, goal))
```
pq = [(0, start)]
dist = {start: 0}
while pq:
cost, node = heapq.heappop(pq)
if node == goal:
return cost
for neighbor, weight in graph[node]:
new_cost = cost + weight
if neighbor not in dist or new_cost < dist[neighbor]:
dist[neighbor] = new_cost
heapq.heappush(pq, (new_cost, neighbor))
return float('inf')
# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
start, goal = 'A', 'F'
print(dijkstra(graph, start, goal))
```output: 3
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start, goal = 'A', 'F'
print(dfs(graph, start, goal))
```
# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
h = lambda x: {'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 1, 'F': 0}[x]
start, goal = 'A', 'F'
print(a_star(graph, start, goal, h))
```
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start, goal = 'A', 'F'
max_depth = 3
print(iddfs(graph, start, goal, max_depth))
```
# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
h = lambda x: {'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 1, 'F': 0}[x]
start, goal = 'A', 'F'
memory_limit = 10
print(memory_bounded_a_star(graph, start, goal, h, memory_limit))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = GaussianNB()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import MaximumLikelihoodEstimator
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
# Define structure
model = BayesianNetwork([('A', 'B'), ('B', 'C')]) # Replace with your structure
# Train model
model.fit(data, estimator=MaximumLikelihoodEstimator)
# Check probability
print(model.get_cpds())
```
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = LinearRegression()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Mean Squared Error:", mean_squared_error(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = RandomForestClassifier()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
Here are the concise implementations for each of the requested tasks:
```python
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = SVC()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = BaggingClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=10,
random_state=42)
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
```python
from sklearn.cluster import KMeans
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
# Train model
model = KMeans(n_clusters=3, random_state=42)
model.fit(X)
# Predict clusters
clusters = model.predict(X)
print("Cluster labels:", clusters)
```
```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import ExpectationMaximization
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
# Define structure
model = BayesianNetwork([('A', 'B'), ('B', 'C')]) # Replace with your structure
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1].values # Features
y = data.iloc[:, -1].values # Target
# Define model
model = Sequential([
Dense(64, input_dim=X.shape[1], activation='relu'),
Dense(32, activation='relu'),
Dense(1, activation='sigmoid') # Change to 'softmax' for multi-class
classification
])
# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train model
model.fit(X, y, epochs=10, batch_size=32, validation_split=0.2)
# Evaluate model
loss, accuracy = model.evaluate(X, y)
print("Accuracy:", accuracy)
```
```python
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, LSTM,
Embedding
import numpy as np
Dense(64, activation='relu'),
Dense(10, activation='softmax') # Adjust for number of classes
])
return model
```python
import heapq
# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
start, goal = 'A', 'F'
print(dijkstra(graph, start, goal))
```
```python
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start = 'A'
bfs(graph, start)
```
```python
def dfs(graph, start, visited=None):
if visited is None:
visited = set()
visited.add(start)
print(start, end=' ')
for neighbor in graph[start]:
if neighbor not in visited:
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start = 'A'
dfs(graph, start)
```
### 5. A* Algorithm for Informed Search
```python
import heapq
# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
h = lambda x: {'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 1, 'F': 0}[x]
start, goal = 'A', 'F'
print(a_star(graph, start, goal, h))
```
```python
import heapq
```python
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = GaussianNB()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
### 3. Bayesian Network for Medical Diagnosis (Heart Disease Data Set)
```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import MaximumLikelihoodEstimator, BayesianEstimator
from pgmpy.inference import VariableElimination
# Load dataset
data = pd.read_csv('heart_disease.csv') # Replace with your dataset file
# Train model
model.fit(data, estimator=MaximumLikelihoodEstimator)
# Inference
inference = VariableElimination(model)
query_result = inference.query(variables=['HeartDisease'], evidence={'Age': 50,
'Cholesterol': 240})
print(query_result)
```
```python
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = LinearRegression()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Mean Squared Error:", mean_squared_error(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = SVC()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("SVM Classification Accuracy:", accuracy_score(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = SVR()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("SVM Regression Mean Squared Error:", mean_squared_error(y_test, y_pred))
```
```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Random Forest Accuracy:", accuracy_score(y_test, y_pred))
```
#### Gradient Boosting
```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = GradientBoostingClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Gradient Boosting Accuracy:", accuracy_score(y_test, y_pred))
```
```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import ExpectationMaximization
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
```
```python
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train model
model.fit(X_train, y_train, epochs=10, batch_size=10)
```
```python
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
# Define model
model = Sequential([
Dense(128, input_dim=20, activation='relu'),
Dense(64, activation='relu'),
Dense(3, activation='softmax')
])
# Compile model
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# Train model
model.fit(X_train, y_train, epochs=20, batch_size=32, validation_split=0.2)
```
#### BFS
```python
from collections import deque
visited.add(neighbor)
queue.append(neighbor)
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start = 'A'
bfs(graph, start)
```
#### DFS
```python
def dfs(graph, start, visited=None):
if visited is None:
visited = set()
visited.add(start)
print(start, end=' ')
for neighbor in graph[start]:
if neighbor not in visited:
dfs(graph, neighbor, visited)
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start = 'A'
dfs(graph, start)
```
```python
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Train model
model = LinearRegression()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
print("Multiple Regression Mean Squared Error:", mean_squared_error(y_test,
y_pred))