0% found this document useful (0 votes)
9 views19 pages

aiml lab

The document provides implementations of various algorithms and models, including search algorithms like BFS, DFS, Dijkstra's, A*, and IDDFS, as well as machine learning techniques such as Naive Bayes, Decision Trees, and Neural Networks. Each section includes Python code snippets demonstrating how to load data, train models, and evaluate their performance. The document serves as a comprehensive guide for implementing these algorithms and models in Python.

Uploaded by

nowfal2005km
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views19 pages

aiml lab

The document provides implementations of various algorithms and models, including search algorithms like BFS, DFS, Dijkstra's, A*, and IDDFS, as well as machine learning techniques such as Naive Bayes, Decision Trees, and Neural Networks. Each section includes Python code snippets demonstrating how to load data, train models, and evaluate their performance. The document serves as a comprehensive guide for implementing these algorithms and models in Python.

Uploaded by

nowfal2005km
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 19

### 1.

BFS (Breadth-First Search)


```python
from collections import deque

def bfs(graph, start, goal):


queue = deque([start])
visited = {start}
while queue:
node = queue.popleft()
if node == goal:
return True
for neighbor in graph[node]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
return False

# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start, goal = 'A', 'F'
print(bfs(graph, start, goal))
```

### 2. Shortest Path with Cost Function (Dijkstra's Algorithm)


```python
import heapq

def dijkstra(graph, start, goal):

pq = [(0, start)]
dist = {start: 0}
while pq:
cost, node = heapq.heappop(pq)
if node == goal:
return cost
for neighbor, weight in graph[node]:
new_cost = cost + weight
if neighbor not in dist or new_cost < dist[neighbor]:
dist[neighbor] = new_cost
heapq.heappush(pq, (new_cost, neighbor))
return float('inf')

# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
start, goal = 'A', 'F'
print(dijkstra(graph, start, goal))
```output: 3

### 3. DFS (Depth-First Search)


```python
def dfs(graph, start, goal, visited=None):
if visited is None:
visited = set()
visited.add(start)
if start == goal:
return True
for neighbor in graph[start]:
if neighbor not in visited:

if dfs(graph, neighbor, goal, visited):


return True
return False

# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start, goal = 'A', 'F'
print(dfs(graph, start, goal))
```

### 4. A* (A-Star Search)


```python
import heapq

def a_star(graph, start, goal, h):


pq = [(0 + h(start), 0, start)]
dist = {start: 0}
while pq:
_, cost, node = heapq.heappop(pq)
if node == goal:
return cost
for neighbor, weight in graph[node]:
new_cost = cost + weight
if neighbor not in dist or new_cost < dist[neighbor]]:
dist[neighbor] = new_cost
heapq.heappush(pq, (new_cost + h(neighbor), new_cost, neighbor))
return float('inf')

# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
h = lambda x: {'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 1, 'F': 0}[x]
start, goal = 'A', 'F'
print(a_star(graph, start, goal, h))
```

### 5. Limited Memory Task (Iterative Deepening Depth-First Search)


```python
def iddfs(graph, start, goal, max_depth):
def dls(node, depth):
if depth == 0 and node == goal:
return True
if depth > 0:
for neighbor in graph[node]:
if dls(neighbor, depth - 1):
return True
return False

for depth in range(max_depth + 1):


if dls(start, depth):
return True
return False

# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],

'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start, goal = 'A', 'F'
max_depth = 3
print(iddfs(graph, start, goal, max_depth))
```

### 6. Memory-Bounded A* (Simplified MA*)


```python
import heapq

def memory_bounded_a_star(graph, start, goal, h, memory_limit):


pq = [(0 + h(start), 0, start)]
dist = {start: 0}
while pq and len(pq) < memory_limit:
_, cost, node = heapq.heappop(pq)
if node == goal:
return cost
for neighbor, weight in graph[node]:
new_cost = cost + weight
if neighbor not in dist or new_cost < dist[neighbor]]:
dist[neighbor] = new_cost
heapq.heappush(pq, (new_cost + h(neighbor), new_cost, neighbor))
return float('inf')

# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],

'F': []
}
h = lambda x: {'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 1, 'F': 0}[x]
start, goal = 'A', 'F'
memory_limit = 10
print(memory_bounded_a_star(graph, start, goal, h, memory_limit))
```

### 1. Naive Bayes Model for Data Set Analysis

```python
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = GaussianNB()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

### 2. Bayesian Networks for Probability Relationship Check

```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import MaximumLikelihoodEstimator

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file

# Define structure
model = BayesianNetwork([('A', 'B'), ('B', 'C')]) # Replace with your structure

# Train model
model.fit(data, estimator=MaximumLikelihoodEstimator)

# Check probability
print(model.get_cpds())
```

### 3. Regression Models for Outcome Prediction


```python
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = LinearRegression()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Mean Squared Error:", mean_squared_error(y_test, y_pred))
```

### 4. Decision Trees for Output Prediction

```python
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = DecisionTreeClassifier()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

### 5. Random Forests for Data Set Analysis

```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = RandomForestClassifier()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

Here are the concise implementations for each of the requested tasks:

### 1. Support Vector Machine (SVM) for Classification

```python
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = SVC()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

### 2. Ensembling Techniques (Bagging)

```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = BaggingClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=10,
random_state=42)
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

### 3. Clustering Algorithms (K-Means)

```python
from sklearn.cluster import KMeans
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features

# Train model
model = KMeans(n_clusters=3, random_state=42)
model.fit(X)

# Predict clusters
clusters = model.predict(X)
print("Cluster labels:", clusters)
```

### 4. EM Algorithm for Bayesian Networks

```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import ExpectationMaximization

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file

# Define structure
model = BayesianNetwork([('A', 'B'), ('B', 'C')]) # Replace with your structure

# Train model using EM


em = ExpectationMaximization(model, data)
em.fit()

# Check learned parameters


print(model.get_cpds())
```

### 5. Neural Network Model for Pattern Optimization (using Keras)


```python
import numpy as np
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1].values # Features
y = data.iloc[:, -1].values # Target

# Define model
model = Sequential([
Dense(64, input_dim=X.shape[1], activation='relu'),
Dense(32, activation='relu'),
Dense(1, activation='sigmoid') # Change to 'softmax' for multi-class
classification

])

# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train model
model.fit(X, y, epochs=10, batch_size=32, validation_split=0.2)

# Evaluate model
loss, accuracy = model.evaluate(X, y)
print("Accuracy:", accuracy)
```

Here are the requested implementations:

### 1. Deep-Learning Neural Network for Experiencing Different Architectural Models

```python
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, LSTM,
Embedding
import numpy as np

# Example for an image classification model (CNN)


def cnn_model(input_shape):
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),

Dense(64, activation='relu'),
Dense(10, activation='softmax') # Adjust for number of classes
])
return model

# Example for a text classification model (RNN)


def rnn_model(input_length, vocab_size):
model = Sequential([
Embedding(vocab_size, 64, input_length=input_length),
LSTM(128, return_sequences=True),
LSTM(128),
Dense(64, activation='relu'),
Dense(1, activation='sigmoid') # Adjust for binary/multi-class
classification
])
return model

# Example for a basic MLP model


def mlp_model(input_shape):
model = Sequential([
Dense(128, activation='relu', input_shape=input_shape),
Dense(64, activation='relu'),
Dense(1, activation='sigmoid') # Adjust for binary/multi-class
classification
])
return model

# Compile and train a model (example)


input_shape = (28, 28, 1) # Example input shape for image data
model = cnn_model(input_shape)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Assuming X_train, y_train are prepared
# model.fit(X_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
```

### 2. Shortest Path with Cost Function (Dijkstra's Algorithm)

```python
import heapq

def dijkstra(graph, start, goal):


pq = [(0, start)]
dist = {start: 0}
while pq:
cost, node = heapq.heappop(pq)
if node == goal:
return cost
for neighbor, weight in graph[node]:
new_cost = cost + weight
if neighbor not in dist or new_cost < dist[neighbor]:
dist[neighbor] = new_cost
heapq.heappush(pq, (new_cost, neighbor))
return float('inf')

# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
start, goal = 'A', 'F'
print(dijkstra(graph, start, goal))
```

### 3. BFS (Breadth-First Search) with Adjacency List

```python

from collections import deque

def bfs(graph, start):


queue = deque([start])
visited = set([start])
while queue:
node = queue.popleft()
print(node, end=' ')
for neighbor in graph[node]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)

# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start = 'A'
bfs(graph, start)
```

### 4. DFS (Depth-First Search) with Adjacency List

```python
def dfs(graph, start, visited=None):
if visited is None:
visited = set()
visited.add(start)
print(start, end=' ')
for neighbor in graph[start]:
if neighbor not in visited:

dfs(graph, neighbor, visited)

# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start = 'A'
dfs(graph, start)
```
### 5. A* Algorithm for Informed Search

```python
import heapq

def a_star(graph, start, goal, h):


pq = [(0 + h(start), 0, start)]
dist = {start: 0}
while pq:
_, cost, node = heapq.heappop(pq)
if node == goal:
return cost
for neighbor, weight in graph[node]:
new_cost = cost + weight
if neighbor not in dist or new_cost < dist[neighbor]:
dist[neighbor] = new_cost
heapq.heappush(pq, (new_cost + h(neighbor), new_cost, neighbor))
return float('inf')

# Input example

graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
h = lambda x: {'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 1, 'F': 0}[x]
start, goal = 'A', 'F'
print(a_star(graph, start, goal, h))
```

Sure, here are the implementations for each requested task:

### 1. Memory-Bounded A* Algorithm

```python
import heapq

def memory_bounded_a_star(graph, start, goal, h, memory_limit):


pq = [(0 + h(start), 0, start)]
dist = {start: 0}
while pq and len(pq) < memory_limit:
_, cost, node = heapq.heappop(pq)
if node == goal:
return cost
for neighbor, weight in graph[node]:
new_cost = cost + weight
if neighbor not in dist or new_cost < dist[neighbor]]:
dist[neighbor] = new_cost
heapq.heappush(pq, (new_cost + h(neighbor), new_cost, neighbor))
return float('inf')
# Input example
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('D', 2), ('E', 5)],
'C': [('F', 1)],
'D': [],
'E': [('F', 2)],
'F': []
}
h = lambda x: {'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 1, 'F': 0}[x]
start, goal = 'A', 'F'
memory_limit = 10
print(memory_bounded_a_star(graph, start, goal, h, memory_limit))
```

### 2. Gaussian Naive Bayes Model

```python
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = GaussianNB()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

### 3. Bayesian Network for Medical Diagnosis (Heart Disease Data Set)

```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import MaximumLikelihoodEstimator, BayesianEstimator
from pgmpy.inference import VariableElimination

# Load dataset
data = pd.read_csv('heart_disease.csv') # Replace with your dataset file

# Define structure (Example: ['Age', 'Cholesterol', 'HeartDisease'])


model = BayesianNetwork([('Age', 'HeartDisease'), ('Cholesterol', 'HeartDisease')])

# Train model
model.fit(data, estimator=MaximumLikelihoodEstimator)
# Inference
inference = VariableElimination(model)
query_result = inference.query(variables=['HeartDisease'], evidence={'Age': 50,
'Cholesterol': 240})
print(query_result)
```

### 4. Linear Regression

```python
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = LinearRegression()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Mean Squared Error:", mean_squared_error(y_test, y_pred))
```

### 5. Decision Trees and Random Forests

#### Decision Tree

```python
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file

X = data.iloc[:, :-1] # Features


y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = DecisionTreeClassifier()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

#### Random Forest

```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = RandomForestClassifier(n_estimators=100, random_state=42)

model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
```

Here are the Python programs for each requested task:

### 1. SVM Models for Classification and Regression

#### SVM Classification

```python
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = SVC()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("SVM Classification Accuracy:", accuracy_score(y_test, y_pred))
```

#### SVM Regression

```python
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = SVR()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("SVM Regression Mean Squared Error:", mean_squared_error(y_test, y_pred))
```

### 2. Random Forest and Gradient Boosting

#### Random Forest

```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Random Forest Accuracy:", accuracy_score(y_test, y_pred))
```
#### Gradient Boosting

```python
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features

y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = GradientBoostingClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Gradient Boosting Accuracy:", accuracy_score(y_test, y_pred))
```

### 3. EM Algorithm for Bayesian Networks

```python
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import ExpectationMaximization

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file

# Define structure (Example: ['A', 'B', 'C'])


model = BayesianNetwork([('A', 'B'), ('B', 'C')])

# Train model using EM


em = ExpectationMaximization(model, data)
em.fit()

# Check learned parameters


print(model.get_cpDS())

```

### 4. Simple Neural Network Model (Keras)

```python
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# Generate some sample data


X_train = np.random.rand(100, 10)
y_train = np.random.randint(2, size=100)
# Define model
model = Sequential([
Dense(64, input_dim=10, activation='relu'),
Dense(1, activation='sigmoid')
])

# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train model
model.fit(X_train, y_train, epochs=10, batch_size=10)
```

### 5. Deep Neural Network Model (MLP for Multi-Class Classification)

```python
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical

# Generate some sample data


X_train = np.random.rand(1000, 20)
y_train = to_categorical(np.random.randint(3, size=1000), num_classes=3)

# Define model
model = Sequential([
Dense(128, input_dim=20, activation='relu'),
Dense(64, activation='relu'),
Dense(3, activation='softmax')
])

# Compile model
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

# Train model
model.fit(X_train, y_train, epochs=20, batch_size=32, validation_split=0.2)
```

### 6. Breadth-First Search (BFS) and Depth-First Search (DFS)

#### BFS

```python
from collections import deque

def bfs(graph, start):


queue = deque([start])
visited = set([start])
while queue:
node = queue.popleft()
print(node, end=' ')
for neighbor in graph[node]:
if neighbor not in visited:

visited.add(neighbor)
queue.append(neighbor)
# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
start = 'A'
bfs(graph, start)
```

#### DFS

```python
def dfs(graph, start, visited=None):
if visited is None:
visited = set()
visited.add(start)
print(start, end=' ')
for neighbor in graph[start]:
if neighbor not in visited:
dfs(graph, neighbor, visited)

# Input example
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []

}
start = 'A'
dfs(graph, start)
```

### 7. Multiple Regression

```python
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import pandas as pd

# Load dataset
data = pd.read_csv('dataset.csv') # Replace with your dataset file
X = data.iloc[:, :-1] # Features
y = data.iloc[:, -1] # Target

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

# Train model
model = LinearRegression()
model.fit(X_train, y_train)

# Predict
y_pred = model.predict(X_test)
print("Multiple Regression Mean Squared Error:", mean_squared_error(y_test,
y_pred))

You might also like