0% found this document useful (0 votes)
3 views

Document

The document contains various Python code snippets demonstrating data manipulation with pandas, basic programming concepts, and machine learning techniques using libraries like NumPy and scikit-learn. It covers topics such as dataframes, loops, functions, and neural network training. Additionally, it includes examples of data visualization with matplotlib and the implementation of activation functions in neural networks.

Uploaded by

GS SINDHU
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
3 views

Document

The document contains various Python code snippets demonstrating data manipulation with pandas, basic programming concepts, and machine learning techniques using libraries like NumPy and scikit-learn. It covers topics such as dataframes, loops, functions, and neural network training. Additionally, it includes examples of data visualization with matplotlib and the implementation of activation functions in neural networks.

Uploaded by

GS SINDHU
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 91

import pandas as pd

df = pd.read_csv('employee_salary.csv')
df.head()

{"summary":"{\n \"name\": \"df\",\n \"rows\": 100,\n \"fields\": [\


n {\n \"column\": \"YearsExperience\",\n \"properties\":
{\n \"dtype\": \"number\",\n \"std\": 5,\n
\"min\": 1,\n \"max\": 19,\n \"num_unique_values\": 19,\
n \"samples\": [\n 13,\n 10,\n 18\n
],\n \"semantic_type\": \"\",\n \"description\": \"\"\n
}\n },\n {\n \"column\": \"Salary\",\n \"properties\":
{\n \"dtype\": \"number\",\n \"std\":
11826.176470961964,\n \"min\": 26646.236892447287,\n
\"max\": 77915.72387431971,\n \"num_unique_values\": 100,\n
\"samples\": [\n 34196.958506322684,\n
54530.223291376926,\n 59361.473785062175\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\
n }\n ]\n}","type":"dataframe","variable_name":"df"}

d = {
'a':1,
'b':2
}
d.update({'c':1})
for k, v in d.items():
print(k, v)

for k,v in d.items():


if k == input():
print(v)

a 1
b 2
c 1
c
b
2
a

d ={}

while True:

z = int(input("1-add, 2- search"))
if z == 1:
n = input("name :")
c = int(input("contact :"))

d.update({n:c})
else:
n = input("name: ")
for k,v in d.items():
if k == n:
print(v)

1-add, 2- search1
name :subhash
contact :1234567
1-add, 2- search1
name :xyz
contact :0987654
1-add, 2- search2
name: subhash
1234567

----------------------------------------------------------------------
-----
KeyboardInterrupt Traceback (most recent call
last)
<ipython-input-1-6e6121975a20> in <cell line: 3>()
3 while True:
4
----> 5 z = int(input("1-add, 2- search"))
6 if z == 1:
7 n = input("name :")

/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py in
raw_input(self, prompt)
849 "raw_input was called, but this frontend does
not support input requests."
850 )
--> 851 return self._input_request(str(prompt),
852 self._parent_ident,
853 self._parent_header,

/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py in
_input_request(self, prompt, ident, parent, password)
893 except KeyboardInterrupt:
894 # re-raise KeyboardInterrupt, to truncate
traceback
--> 895 raise KeyboardInterrupt("Interrupted by user")
from None
896 except Exception as e:
897 self.log.warning("Invalid Message:",
exc_info=True)

KeyboardInterrupt: Interrupted by user

Python Basics
# Integer
x = 10
print(type(x)) # <class 'int'>

# Float
y = 3.14
print(type(y)) # <class 'float'>

# String
z = "Hello"
print(type(z)) # <class 'str'>

# Boolean
is_valid = True
print(type(is_valid)) # <class 'bool'>

<class 'int'>
<class 'float'>
<class 'str'>
<class 'bool'>

# Conditional Statements
x = 10
if x > 5:
print("x is greater than 5")
elif x == 5:
print("x is equal to 5")
else:
print("x is less than 5")

# For Loop
for i in range(5):
print(i)

# While Loop
i = 0
while i < 5:
print(i)
i += 1

x is greater than 5
0
1
2
3
4
0
1
2
3
4

# Function Definition
def greet(name):
return f"Hello, {name}!"

print(greet("Alice"))

# Using Built-in Functions


numbers = [1, 2, 3, 4, 5]
print(len(numbers)) # 5
print(sum(numbers)) # 15

Hello, Alice!
5
15

my_list = [1, 2, 3, 4]
my_list.append(5)
print(my_list) # [1, 2, 3, 4, 5]

my_list[0] = 10
print(my_list) # [10, 2, 3, 4, 5]

[1, 2, 3, 4, 5]
[10, 2, 3, 4, 5]

my_tuple = (1, 2, 3, 4)
# my_tuple[0] = 10 # Will raise an error
print(my_tuple)

(1, 2, 3, 4)

my_dict = {'name': 'Alice', 'age': 25}


print(my_dict['name']) # Alice

my_dict['age'] = 26
print(my_dict) # {'name': 'Alice', 'age': 26}

Alice
{'name': 'Alice', 'age': 26}

my_set = {1, 2, 3, 3, 4}
print(my_set) # {1, 2, 3, 4} (duplicates removed)

{1, 2, 3, 4}

Machine Learning
import numpy as np

# Creating a NumPy array


array = np.array([1, 2, 3, 4, 5])
print("Array:", array)

# Array operations
print("Array * 2:", array * 2)
print("Mean of array:", np.mean(array))
print("Reshaped array:", array.reshape(5, 1))

Array: [1 2 3 4 5]
Array * 2: [ 2 4 6 8 10]
Mean of array: 3.0
Reshaped array: [[1]
[2]
[3]
[4]
[5]]

import matplotlib.pyplot as plt


import numpy as np
# Sample data
x = np.linspace(0, 10, 100)
y = np.sin(x)

# Plotting the graph


plt.plot(x, y, label='Sine Wave1')
plt.title('Sine Wave')
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.legend()
plt.show()

import pandas as pd

# Creating a DataFrame
data = {'Name': ['John', 'Anna', 'Peter', 'Linda'],
'Age': [28, 24, 35, 32],
'City': ['New York', 'Paris', 'Berlin', 'London']}

df = pd.DataFrame(data)

# Display the DataFrame


print(df)

# Basic Operations
print("Mean Age:", df['Age'].mean())
print("Data from London:\n", df[df['City'] == 'London'])

Name Age City


0 John 28 New York
1 Anna 24 Paris
2 Peter 35 Berlin
3 Linda 32 London
Mean Age: 29.75
Data from London:
Name Age City
3 Linda 32 London

from sklearn.model_selection import train_test_split


from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression

# Creating a dataset
X, y = make_regression(n_samples=100, n_features=1, noise=10)

# Splitting the dataset into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)

# Creating and training the model


model = LinearRegression()
model.fit(X_train, y_train)

# Predicting and evaluating the model


y_pred = model.predict(X_test)
print("Model coefficients:", model.coef_)
print("Model intercept:", model.intercept_)

Model coefficients: [5.3624582]


Model intercept: -0.40869979495289066

import numpy as np

# Sigmoid activation function for output layer


def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Derivative of sigmoid for backpropagation


def sigmoid_derivative(x):
return x * (1 - x)

# ReLU activation function for hidden layer


def relu(x):
return np.maximum(0, x)

# Derivative of ReLU for backpropagation


def relu_derivative(x):
return np.where(x > 0, 1, 0)

# Initialize data
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input (XOR problem)
y = np.array([[0], [1], [1], [0]]) # Target output (XOR
labels)

# Set seed for reproducibility


np.random.seed(42)

# Initialize weights and biases


input_size = 2
hidden_size = 2
output_size = 1

# Random weights for input to hidden layer and hidden to output layer
W1 = np.random.randn(input_size, hidden_size)
W2 = np.random.randn(hidden_size, output_size)

# Random biases for hidden and output layers


b1 = np.random.randn(1, hidden_size)
b2 = np.random.randn(1, output_size)

# Learning rate
lr = 0.1
epochs = 10000

# Training the neural network using gradient descent


for epoch in range(epochs):
# Forward propagation
hidden_input = np.dot(X, W1) + b1 # Input to hidden layer
hidden_output = relu(hidden_input) # Activation in hidden layer

output_input = np.dot(hidden_output, W2) + b2 # Input to output


layer
predicted_output = sigmoid(output_input) # Activation in output
layer
# Compute the error
error = y - predicted_output

# Backpropagation
d_predicted_output = error * sigmoid_derivative(predicted_output)
# Derivative at output

error_hidden_layer = d_predicted_output.dot(W2.T) # Error


contribution from output to hidden
d_hidden_output = error_hidden_layer *
relu_derivative(hidden_output) # Derivative at hidden layer

# Updating weights and biases


W2 += hidden_output.T.dot(d_predicted_output) * lr
b2 += np.sum(d_predicted_output, axis=0, keepdims=True) * lr
W1 += X.T.dot(d_hidden_output) * lr
b1 += np.sum(d_hidden_output, axis=0, keepdims=True) * lr

# Print error every 1000 epochs


if epoch % 1000 == 0:
print(f'Epoch {epoch}, Error: {np.mean(np.abs(error))}')

# Final predictions
print("\nFinal predictions after training:")
print(predicted_output)

Epoch 0, Error: 0.5010149108269109


Epoch 1000, Error: 0.5000000645563429
Epoch 2000, Error: 0.5000000156953752
Epoch 3000, Error: 0.5000000045733671
Epoch 4000, Error: 0.5000000014419256
Epoch 5000, Error: 0.5000000004715042
Epoch 6000, Error: 0.5000000001568123
Epoch 7000, Error: 0.5000000000525605
Epoch 8000, Error: 0.5000000000176792
Epoch 9000, Error: 0.5000000000059556

Final predictions after training:


[[0.49971352]
[0.4998213 ]
[0.5001344 ]
[0.50024217]]

import numpy as np

# Sigmoid activation function for output layer


def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Derivative of sigmoid for backpropagation


def sigmoid_derivative(x):
return x * (1 - x)

# ReLU activation function for hidden layer


def relu(x):
return np.maximum(0, x)

# Derivative of ReLU for backpropagation


def relu_derivative(x):
return np.where(x > 0, 1, 0)

# Data: Age, Salary


X = np.array([[22, 30000],
[25, 50000],
[47, 75000],
[52, 100000],
[46, 25000],
[29, 40000]])

# Normalize the features (Age and Salary)


X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)

# Labels: Did the person buy the product?


y = np.array([[0], [1], [1], [1], [0], [0]])

# Set seed for reproducibility


np.random.seed(42)

# Initialize weights and biases


input_size = 2
hidden_size = 3 # Use 3 neurons in the hidden layer
output_size = 1

# Random weights for input to hidden layer and hidden to output layer
W1 = np.random.randn(input_size, hidden_size)
W2 = np.random.randn(hidden_size, output_size)

# Random biases for hidden and output layers


b1 = np.random.randn(1, hidden_size)
b2 = np.random.randn(1, output_size)

# Learning rate
lr = 0.1
epochs = 10000

# Training the neural network using gradient descent


for epoch in range(epochs):
# Forward propagation
hidden_input = np.dot(X, W1) + b1 # Input to hidden layer
hidden_output = relu(hidden_input) # Activation in hidden layer
output_input = np.dot(hidden_output, W2) + b2 # Input to output
layer
predicted_output = sigmoid(output_input) # Activation in output
layer

# Compute the error


error = y - predicted_output

# Backpropagation
d_predicted_output = error * sigmoid_derivative(predicted_output)
# Derivative at output

error_hidden_layer = d_predicted_output.dot(W2.T) # Error


contribution from output to hidden
d_hidden_output = error_hidden_layer *
relu_derivative(hidden_output) # Derivative at hidden layer

# Updating weights and biases


W2 += hidden_output.T.dot(d_predicted_output) * lr
b2 += np.sum(d_predicted_output, axis=0, keepdims=True) * lr
W1 += X.T.dot(d_hidden_output) * lr
b1 += np.sum(d_hidden_output, axis=0, keepdims=True) * lr

# Print error every 1000 epochs


if epoch % 1000 == 0:
print(f'Epoch {epoch}, Error: {np.mean(np.abs(error))}')

# Final predictions
print("\nFinal predictions after training (probabilities):")
print(predicted_output)

# Converting probabilities to binary output (0 or 1)


binary_output = np.where(predicted_output >= 0.5, 1, 0)
print("\nFinal binary predictions (0 = No, 1 = Yes):")
print(binary_output)

Epoch 0, Error: 0.35194752589829115


Epoch 1000, Error: 0.2303648044953486
Epoch 2000, Error: 0.22749559727662402
Epoch 3000, Error: 0.033073592235613666
Epoch 4000, Error: 0.019497272437606416
Epoch 5000, Error: 0.0147671098330691
Epoch 6000, Error: 0.012245524634297155
Epoch 7000, Error: 0.010636587912924274
Epoch 8000, Error: 0.009501372997931473
Epoch 9000, Error: 0.008655504496544402

Final predictions after training (probabilities):


[[1.85242936e-03]
[9.68690602e-01]
[9.99835717e-01]
[9.99995714e-01]
[5.89124130e-10]
[1.45757143e-02]]

Final binary predictions (0 = No, 1 = Yes):


[[0]
[1]
[1]
[1]
[0]
[0]]

# Importing necessary libraries


import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error

# Example dataset: Features and target variable


# You can replace this with your own dataset
data = {
'Size': [1500, 2000, 2500, 3000, 3500],
'Bedrooms': [3, 4, 3, 5, 4],
'Age': [10, 15, 20, 5, 8],
'Price': [300000, 400000, 500000, 600000, 700000]
}

# Convert to DataFrame
df = pd.DataFrame(data)

# Features (independent variables) and target (dependent variable)


X = df[['Size', 'Bedrooms', 'Age']] # Features
y = df['Price'] # Target

# Splitting the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)

# Create and train the model


model = LinearRegression()
model.fit(X_train, y_train)

# Predicting on test set


y_pred = model.predict(X_test)

# Evaluating the model


mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)

# Print results
print("Predicted values:", y_pred)
print("Actual values:", y_test.values)
print("Root Mean Squared Error:", rmse)

# You can also predict for new data


new_data = [[2800, 4, 6]] # Example: new house of size 2800 sqft, 4
bedrooms, and 6 years old
new_prediction = model.predict(new_data)
print("Prediction for new data:", new_prediction)

Predicted values: [400000.]


Actual values: [400000]
Root Mean Squared Error: 5.820766091346741e-11
Prediction for new data: [560000.]

/usr/local/lib/python3.10/dist-packages/sklearn/base.py:493:
UserWarning: X does not have valid feature names, but LinearRegression
was fitted with feature names
warnings.warn(

#calc

def add(a,b):
return a+b
def sub(a,b):
return a-b
def mul(a,b):
return a*b
def div(a,b):
return a/b
#calctr

def add(a,b):
return a+b

def add(a,b):
return (a+b,a-b)

a = 1
b = 2
x,y = add(a,b)

l = [1,2,3]

d = {'a':1, 'b':2}

d.update({'c':3})
d

s = set()
s.add()
s.remove(1)

s1 ={}
s2 = {}

s = s1.union(s2)

S = {}
s = {}

s.issubset(S)

l = []
l = [[12,3], [4,5],(1,3)]

{'a': 1, 'b': 2, 'c': 3}

#sentiment analys

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
import nltk
from nltk.corpus import stopwords
import re

# Download stopwords
nltk.download('stopwords')

[nltk_data] Downloading package stopwords to /root/nltk_data...


[nltk_data] Unzipping corpora/stopwords.zip.

True

# Sample dataset
data = {'Review': ['I love this product!', 'This is terrible, I hate
it', 'Not bad, but could be better',
'Absolutely wonderful!', 'Worst purchase ever', 'I
am so happy with this item',
'Very disappointing', 'The product is okay, nothing
special', 'I enjoy using this daily',
'It broke after one use. Horrible!'],
'Sentiment': ['positive', 'negative', 'neutral', 'positive',
'negative', 'positive',
'negative', 'neutral', 'positive', 'negative']}

df = pd.DataFrame(data)

# Display the dataset


print(df.head())

Review Sentiment
0 I love this product! positive
1 This is terrible, I hate it negative
2 Not bad, but could be better neutral
3 Absolutely wonderful! positive
4 Worst purchase ever negative

# Function to clean the text


def clean_text(text):
text = re.sub(r'[^a-zA-Z\s]', '', text) # Remove special
characters and numbers
text = text.lower() # Convert to lowercase
text = text.split() # Tokenize the text
text = [word for word in text if word not in
stopwords.words('english')] # Remove stopwords
return ' '.join(text)

# Apply the cleaning function to the reviews


df['Cleaned_Review'] = df['Review'].apply(clean_text)

# Display the cleaned dataset


print(df[['Review', 'Cleaned_Review']].head())
Review Cleaned_Review
0 I love this product! love product
1 This is terrible, I hate it terrible hate
2 Not bad, but could be better bad could better
3 Absolutely wonderful! absolutely wonderful
4 Worst purchase ever worst purchase ever

# Convert text into numerical form using TF-IDF


vectorizer = TfidfVectorizer(max_features=1000)
X = vectorizer.fit_transform(df['Cleaned_Review']).toarray()

# Encode the target labels


y = df['Sentiment'].map({'positive': 1, 'negative': 0, 'neutral': 2})

# Split the data into training and test sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)

# Train a Logistic Regression model


model = LogisticRegression(max_iter=100)
model.fit(X_train, y_train)

# Make predictions on the test set


y_pred = model.predict(X_test)

# Evaluate the model


print("Accuracy:", accuracy_score(y_test, y_pred))
# print("\nClassification Report:\n", classification_report(y_test,
y_pred, target_names=['Negative', 'Positive', 'Neutral']))

Accuracy: 0.5

# Function to predict sentiment of a new review


def predict_sentiment(review):
cleaned_review = clean_text(review)
vectorized_review =
vectorizer.transform([cleaned_review]).toarray()
prediction = model.predict(vectorized_review)[0]

if prediction == 1:
return "Positive"
elif prediction == 0:
return "Negative"
else:
return "Neutral"

# Test with a new review


new_review = "This product is awesome, I love it!"
print(f"Review: {new_review}")
print(f"Predicted Sentiment: {predict_sentiment(new_review)}")
Review: This product is awesome, I love it!
Predicted Sentiment: Positive

import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt

(train_images, _), (_, _) = tf.keras.datasets.mnist.load_data()

# Normalize the images to the range [-1, 1] for better GAN performance
train_images = (train_images - 127.5) / 127.5
train_images = np.expand_dims(train_images, axis=-1)

BUFFER_SIZE = 60000
BATCH_SIZE = 256

# Create batches of data


train_dataset =
tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).
batch(BATCH_SIZE)

Downloading data from https://storage.googleapis.com/tensorflow/tf-


keras-datasets/mnist.npz
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step

def build_generator():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False,
input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())

model.add(layers.Reshape((7, 7, 256)))
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1),
padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())

model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2),


padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())

model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2),


padding='same', use_bias=False, activation='tanh'))
return model

def build_discriminator():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2),
padding='same', input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))

model.add(layers.Conv2D(128, (5, 5), strides=(2, 2),


padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))

model.add(layers.Flatten())
model.add(layers.Dense(1))

return model

cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

def discriminator_loss(real_output, fake_output):


real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
return real_loss + fake_loss

def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)

generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16

# Random noise seed for generating images during training progress


seed = tf.random.normal([num_examples_to_generate, noise_dim])

@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])

with tf.GradientTape() as gen_tape, tf.GradientTape() as


disc_tape:
generated_images = generator(noise, training=True)

real_output = discriminator(images, training=True)


fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)

gradients_of_generator = gen_tape.gradient(gen_loss,
generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss,
discriminator.trainable_variables)

generator_optimizer.apply_gradients(zip(gradients_of_generator,
generator.trainable_variables))

discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator
, discriminator.trainable_variables))

def train(dataset, epochs):


for epoch in range(epochs):
for image_batch in dataset:
train_step(image_batch)

# Generate images after each epoch


generate_and_save_images(generator, epoch + 1, seed)

def generate_and_save_images(model, epoch, test_input):


predictions = model(test_input, training=False)

fig = plt.figure(figsize=(4, 4))

for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5,
cmap='gray')
plt.axis('off')

plt.savefig(f'image_at_epoch_{epoch:04d}.png')
plt.show()

generator = build_generator()
discriminator = build_discriminator()

train(train_dataset, EPOCHS)

/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
/usr/local/lib/python3.10/dist-packages/keras/src/layers/convolutional
/base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
----------------------------------------------------------------------
-----
KeyboardInterrupt Traceback (most recent call
last)
<ipython-input-7-831272d6fd65> in <cell line: 4>()
2 discriminator = build_discriminator()
3
----> 4 train(train_dataset, EPOCHS)

<ipython-input-6-f139a6fe64b9> in train(dataset, epochs)


28 for epoch in range(epochs):
29 for image_batch in dataset:
---> 30 train_step(image_batch)
31
32 # Generate images after each epoch

/usr/local/lib/python3.10/dist-packages/tensorflow/python/util/traceba
ck_utils.py in error_handler(*args, **kwargs)
148 filtered_tb = None
149 try:
--> 150 return fn(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/polymorphic_function.py in __call__(self, *args,
**kwds)
831
832 with OptionalXlaContext(self._jit_compile):
--> 833 result = self._call(*args, **kwds)
834
835 new_tracing_count =
self.experimental_get_tracing_count()

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/polymorphic_function.py in _call(self, *args, **kwds)
867 # In this case we have created variables on the first
call, so we run the
868 # defunned version which is guaranteed to never create
variables.
--> 869 return tracing_compilation.call_function(
870 args, kwds, self._no_variable_creation_config
871 )

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/tracing_compilation.py in call_function(args, kwargs,
tracing_options)
137 bound_args = function.function_type.bind(*args, **kwargs)
138 flat_inputs =
function.function_type.unpack_inputs(bound_args)
--> 139 return function._call_flat( # pylint: disable=protected-
access
140 flat_inputs, captured_inputs=function.captured_inputs
141 )

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/concrete_function.py in _call_flat(self, tensor_inputs,
captured_inputs)
1320 and executing_eagerly):
1321 # No tape is watching; skip to running the function.
-> 1322 return self._inference_function.call_preflattened(args)
1323 forward_backward =
self._select_forward_and_backward_functions(
1324 args,

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/atomic_function.py in call_preflattened(self, args)
214 def call_preflattened(self, args: Sequence[core.Tensor]) ->
Any:
215 """Calls with flattened tensor inputs and returns the
structured output."""
--> 216 flat_outputs = self.call_flat(*args)
217 return self.function_type.pack_output(flat_outputs)
218

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/atomic_function.py in call_flat(self, *args)
249 with record.stop_recording():
250 if self._bound_context.executing_eagerly():
--> 251 outputs = self._bound_context.call_function(
252 self.name,
253 list(args),

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/contex
t.py in call_function(self, name, tensor_inputs, num_outputs)
1550 cancellation_context = cancellation.context()
1551 if cancellation_context is None:
-> 1552 outputs = execute.execute(
1553 name.decode("utf-8"),
1554 num_outputs=num_outputs,

/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/execut
e.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
51 try:
52 ctx.ensure_initialized()
---> 53 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle,
device_name, op_name,
54 inputs, attrs,
num_outputs)
55 except core._NotOkStatusException as e:

KeyboardInterrupt:

import tensorflow as tf
from tensorflow.keras import layers, models

# Simple Sequential model


model = models.Sequential([
layers.Dense(64, activation='relu', input_shape=(784,)),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()

/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Model: "sequential"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ dense (Dense) │ (None, 64) │
50,240 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_1 (Dense) │ (None, 64) │
4,160 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_2 (Dense) │ (None, 10) │
650 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘

Total params: 55,050 (215.04 KB)

Trainable params: 55,050 (215.04 KB)

Non-trainable params: 0 (0.00 B)

model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28,
1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()

/usr/local/lib/python3.10/dist-packages/keras/src/layers/
convolutional/base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Model: "sequential_1"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ conv2d (Conv2D) │ (None, 26, 26, 32) │
320 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d (MaxPooling2D) │ (None, 13, 13, 32) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ conv2d_1 (Conv2D) │ (None, 11, 11, 64) │
18,496 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d_1 (MaxPooling2D) │ (None, 5, 5, 64) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ conv2d_2 (Conv2D) │ (None, 3, 3, 64) │
36,928 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ flatten (Flatten) │ (None, 576) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_3 (Dense) │ (None, 64) │
36,928 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_4 (Dense) │ (None, 10) │
650 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘

Total params: 93,322 (364.54 KB)

Trainable params: 93,322 (364.54 KB)

Non-trainable params: 0 (0.00 B)

model = models.Sequential([
layers.LSTM(128, input_shape=(100, 1), return_sequences=True),
layers.LSTM(128),
layers.Dense(1, activation='sigmoid')
])

model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()

/usr/local/lib/python3.10/dist-packages/keras/src/layers/rnn/
rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(**kwargs)

Model: "sequential_2"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ lstm (LSTM) │ (None, 100, 128) │
66,560 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ lstm_1 (LSTM) │ (None, 128) │
131,584 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_5 (Dense) │ (None, 1) │
129 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘

Total params: 198,273 (774.50 KB)


Trainable params: 198,273 (774.50 KB)

Non-trainable params: 0 (0.00 B)

from transformers import BertTokenizer, BertModel

tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')

text = "Machine learning is fascinating!"


inputs = tokenizer(text, return_tensors="pt")
outputs = model(**inputs)

print(outputs)

/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/
_token.py:89: UserWarning:
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your
settings tab (https://huggingface.co/settings/tokens), set it as
secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to
access public models or datasets.
warnings.warn(

{"model_id":"28cdd872b10846f6b93ff29399b1712f","version_major":2,"vers
ion_minor":0}

{"model_id":"012a7bbf33de4c848a7d3a4b935ec370","version_major":2,"vers
ion_minor":0}

{"model_id":"7d1f0c3f0f0d4b1f9dde82ed478e5ce3","version_major":2,"vers
ion_minor":0}

{"model_id":"1be4b838a4f640b984dbaa1918a636fb","version_major":2,"vers
ion_minor":0}

/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(

{"model_id":"511383744f1b4383b48438b60e7dbfe3","version_major":2,"vers
ion_minor":0}

BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=tensor(
[[[ 0.1215, 0.1279, -0.1553, ..., -0.2384, 0.1134, 0.4655],
[-0.1434, 0.0242, -0.1768, ..., -0.2693, 0.3880, 0.5866],
[-0.5255, 0.2367, -0.1766, ..., -1.0957, 0.0183, 0.1786],
...,
[ 0.1088, 0.4796, 0.3701, ..., -0.3182, 0.5285, 0.5071],
[ 0.1523, -0.1217, -0.3568, ..., 0.7916, 0.4255, -0.3329],
[ 0.7277, 0.1579, -0.2494, ..., 0.0396, -0.7772, -
0.0927]]],
grad_fn=<NativeLayerNormBackward0>), pooler_output=tensor([[-
0.9058, -0.4572, -0.7117, 0.6969, 0.4305, -0.2093, 0.8993, 0.3831,
-0.6459, -1.0000, -0.2695, 0.8195, 0.9797, 0.3692,
0.9434, -0.6985,
-0.2993, -0.6567, 0.3702, -0.5629, 0.7226, 0.9998,
0.2187, 0.4423,
0.5051, 0.9367, -0.7745, 0.9424, 0.9514, 0.7407, -
0.7502, 0.3359,
-0.9872, -0.2924, -0.6936, -0.9885, 0.5133, -0.7162, -
0.0136, -0.0736,
-0.9015, 0.3011, 0.9998, -0.4140, 0.3361, -0.4876, -
1.0000, 0.2668,
-0.8623, 0.7405, 0.6754, 0.5029, 0.2391, 0.5518,
0.4931, -0.1784,
-0.0949, 0.1626, -0.2628, -0.6336, -0.7151, 0.4300, -
0.7019, -0.8930,
0.7147, 0.5396, -0.1597, -0.3587, -0.1770, 0.1036,
0.9017, 0.2654,
-0.2601, -0.8739, 0.5052, 0.3261, -0.6135, 1.0000, -
0.4879, -0.9733,
0.7039, 0.6281, 0.5523, -0.1647, 0.3469, -1.0000,
0.5162, -0.1543,
-0.9886, 0.3672, 0.5822, -0.3116, 0.5760, 0.6323, -
0.5110, -0.4911,
-0.3259, -0.5796, -0.2790, -0.3309, 0.0705, -0.3011, -
0.3468, -0.4481,
0.3401, -0.5502, -0.4754, 0.5831, 0.1009, 0.7237,
0.4383, -0.3739,
0.4780, -0.9536, 0.6735, -0.3733, -0.9873, -0.5873, -
0.9901, 0.5880,
-0.2367, -0.3225, 0.9540, 0.0118, 0.4931, -0.1739, -
0.6838, -1.0000,
-0.5581, -0.4884, -0.2551, -0.3507, -0.9732, -0.9585,
0.6793, 0.9541,
0.3219, 0.9996, -0.3519, 0.9220, -0.2081, -0.4622,
0.3290, -0.5109,
0.7324, 0.2226, -0.5763, 0.2563, -0.1796, 0.0645, -
0.6216, -0.3003,
-0.6366, -0.9398, -0.4497, 0.9419, -0.4049, -0.7677,
0.1756, -0.2687,
-0.4851, 0.8363, 0.6967, 0.4466, -0.3283, 0.4693,
0.1779, 0.5369,
-0.8549, 0.0317, 0.4702, -0.3974, -0.6004, -0.9757, -
0.3916, 0.5150,
0.9866, 0.7442, 0.3114, 0.6679, -0.2815, 0.5302, -
0.9439, 0.9769,
-0.2530, 0.3455, -0.2536, 0.4924, -0.8406, -0.0043,
0.8301, -0.4812,
-0.8194, -0.0391, -0.5256, -0.5028, -0.6408, 0.4403, -
0.3583, -0.4404,
-0.1672, 0.9300, 0.9682, 0.7945, 0.0467, 0.6366, -
0.9050, -0.5554,
0.2293, 0.2937, 0.1416, 0.9939, -0.4999, -0.2369, -
0.9308, -0.9841,
0.0392, -0.8715, -0.1538, -0.7557, 0.6220, -0.0038,
0.3449, 0.4316,
-0.9738, -0.7905, 0.4445, -0.4246, 0.5210, -0.3600,
0.7822, 0.7918,
-0.6814, 0.7464, 0.9092, -0.7347, -0.7638, 0.8045, -
0.4142, 0.8614,
-0.6606, 0.9776, 0.6753, 0.6940, -0.9088, -0.5557, -
0.8867, -0.4908,
-0.1301, 0.0215, 0.8044, 0.6641, 0.4835, 0.4822, -
0.5475, 0.9959,
-0.8882, -0.9449, -0.3189, -0.3225, -0.9851, 0.7515,
0.4108, 0.0474,
-0.5075, -0.7080, -0.9543, 0.8535, 0.1567, 0.9850, -
0.2869, -0.8921,
-0.5774, -0.9243, -0.1150, -0.2775, -0.2001, -0.0619, -
0.9636, 0.5623,
0.5841, 0.5763, -0.6142, 0.9982, 1.0000, 0.9720,
0.8853, 0.9046,
-0.9991, -0.5057, 1.0000, -0.9627, -1.0000, -0.9275, -
0.5815, 0.4179,
-1.0000, -0.2886, -0.1206, -0.9089, 0.4905, 0.9714,
0.9887, -1.0000,
0.7891, 0.9497, -0.6444, 0.8770, -0.3496, 0.9708,
0.6418, 0.5462,
-0.3294, 0.4854, -0.8583, -0.8613, -0.3572, -0.5550,
0.9904, 0.2320,
-0.7590, -0.9007, 0.3481, -0.2951, -0.2143, -0.9682, -
0.2264, 0.1771,
0.7525, 0.1591, 0.3587, -0.7061, 0.3356, -0.1946,
0.4647, 0.6896,
-0.9387, -0.6469, -0.0825, -0.2081, -0.5290, -0.9612,
0.9598, -0.4344,
0.3573, 1.0000, 0.1070, -0.8543, 0.5679, 0.3092, -
0.4859, 1.0000,
0.8121, -0.9739, -0.5910, 0.5766, -0.6081, -0.5920,
0.9985, -0.3401,
-0.4901, -0.0913, 0.9693, -0.9860, 0.9823, -0.9025, -
0.9648, 0.9595,
0.9419, -0.5277, -0.5184, 0.1786, -0.6148, 0.3716, -
0.9515, 0.6426,
0.4893, -0.1172, 0.8823, -0.8033, -0.5945, 0.4025, -
0.4737, 0.0427,
0.7717, 0.5434, -0.2714, 0.1274, -0.3824, -0.4543, -
0.9742, 0.3513,
1.0000, -0.2638, 0.5045, -0.4859, -0.1327, -0.1108,
0.5930, 0.6409,
-0.3316, -0.8332, 0.6133, -0.9519, -0.9839, 0.7259,
0.2788, -0.3892,
1.0000, 0.3536, 0.2347, 0.2418, 0.9272, 0.0554,
0.6012, 0.7287,
0.9767, -0.3521, 0.5785, 0.8195, -0.7369, -0.3601, -
0.6726, 0.1256,
-0.9235, 0.0263, -0.9418, 0.9669, 0.6302, 0.4231,
0.3173, 0.3953,
1.0000, -0.6745, 0.6315, -0.2615, 0.7843, -0.9988, -
0.8163, -0.4595,
-0.1289, -0.6436, -0.4184, 0.3394, -0.9650, 0.6185,
0.2661, -0.9828,
-0.9882, 0.0076, 0.7880, 0.1068, -0.9486, -0.7272, -
0.5918, 0.3846,
-0.3037, -0.9322, 0.0477, -0.3533, 0.5225, -0.3488,
0.5743, 0.6411,
0.7107, -0.3430, -0.1751, -0.1363, -0.7699, 0.8267, -
0.7987, -0.7414,
-0.2716, 1.0000, -0.5494, 0.7833, 0.7544, 0.6619, -
0.2157, 0.2679,
0.8124, 0.3571, -0.6267, -0.6102, -0.5773, -0.4588,
0.6147, 0.3626,
0.4948, 0.7793, 0.7432, 0.2275, -0.1053, 0.0662,
0.9992, -0.1274,
-0.2473, -0.5522, -0.0796, -0.4123, -0.3082, 1.0000,
0.4006, 0.3709,
-0.9859, -0.5532, -0.9128, 1.0000, 0.8517, -0.7146,
0.6145, 0.5404,
-0.1740, 0.7857, -0.1946, -0.3820, 0.3750, 0.2469,
0.9550, -0.6274,
-0.9630, -0.6039, 0.4835, -0.9646, 0.9992, -0.5814, -
0.3417, -0.4322,
-0.0050, 0.2776, -0.0343, -0.9794, -0.2424, 0.2312,
0.9478, 0.3163,
-0.5759, -0.8941, 0.4700, 0.5235, -0.7190, -0.9096,
0.9655, -0.9816,
0.6593, 1.0000, 0.4221, -0.3072, 0.2710, -0.4932,
0.4409, -0.2550,
0.6636, -0.9537, -0.5166, -0.2841, 0.2909, -0.2505, -
0.0816, 0.6080,
0.2937, -0.5435, -0.6205, -0.1174, 0.4856, 0.8549, -
0.3460, -0.1994,
0.1777, -0.2268, -0.9230, -0.3390, -0.4143, -0.9999,
0.6923, -1.0000,
0.0999, -0.0647, -0.2707, 0.8169, 0.5821, 0.5015, -
0.7599, -0.7151,
0.5485, 0.6703, -0.2525, -0.0385, -0.6443, 0.3655, -
0.2030, 0.3334,
-0.3805, 0.8068, -0.3440, 1.0000, 0.2795, -0.6265, -
0.9676, 0.3235,
-0.3020, 1.0000, -0.8739, -0.9482, 0.4658, -0.6999, -
0.8394, 0.3409,
0.0491, -0.6863, -0.7933, 0.9441, 0.8941, -0.5768,
0.5212, -0.4330,
-0.5228, 0.0199, 0.6422, 0.9860, 0.4593, 0.8839,
0.1233, -0.2100,
0.9657, 0.2701, 0.4862, 0.2180, 1.0000, 0.4198, -
0.9134, 0.2200,
-0.9773, -0.2501, -0.9550, 0.3540, 0.3361, 0.9130, -
0.3822, 0.9526,
-0.3916, 0.0897, -0.3437, -0.0984, 0.4560, -0.9246, -
0.9821, -0.9792,
0.5692, -0.4398, -0.1484, 0.3126, 0.1003, 0.4359,
0.5249, -1.0000,
0.9226, 0.4523, 0.7460, 0.9562, 0.7393, 0.5150,
0.3177, -0.9833,
-0.9682, -0.4583, -0.2691, 0.7171, 0.6962, 0.8897,
0.4221, -0.5915,
-0.4718, -0.4593, -0.7946, -0.9915, 0.4757, -0.3642, -
0.9476, 0.9591,
-0.1637, -0.2214, 0.1289, -0.7161, 0.9311, 0.6817,
0.3032, 0.1904,
0.4720, 0.8657, 0.9383, 0.9784, -0.6469, 0.7740, -
0.4382, 0.5053,
0.7871, -0.9414, 0.1293, 0.4383, -0.2647, 0.3275, -
0.2636, -0.9596,
0.5181, -0.2378, 0.5664, -0.4090, -0.0334, -0.5110, -
0.2726, -0.6247,
-0.6144, 0.6554, 0.3491, 0.8986, 0.7631, -0.1505, -
0.6016, -0.1458,
-0.5594, -0.9147, 0.9243, -0.1467, -0.2104, 0.4937,
0.0167, 0.8446,
-0.0806, -0.4162, -0.4450, -0.7513, 0.8658, -0.4507, -
0.6339, -0.6069,
0.6975, 0.3748, 0.9999, -0.6382, -0.7740, -0.4288, -
0.4536, 0.4428,
-0.3692, -1.0000, 0.4550, -0.3561, 0.5644, -0.3591,
0.5700, -0.4135,
-0.9800, -0.3350, 0.5721, 0.6838, -0.6093, -0.3125,
0.5816, 0.1876,
0.9232, 0.8434, -0.1788, 0.1416, 0.6532, -0.6099, -
0.7171, 0.9117]],
grad_fn=<TanhBackward0>), hidden_states=None,
past_key_values=None, attentions=None, cross_attentions=None)

import torch
import torch.nn as nn

class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.fc = nn.Sequential(
nn.Linear(100, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 784),
nn.Tanh()
)

def forward(self, x):


return self.fc(x)

class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.fc = nn.Sequential(
nn.Linear(784, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Linear(256, 1),
nn.Sigmoid()
)

def forward(self, x):


return self.fc(x)

generator = Generator()
discriminator = Discriminator()

import numpy as np

# Initialize parameters
Q = np.zeros((5, 5)) # Example state-action table
alpha = 0.1
gamma = 0.9
epsilon = 0.1

# Q-Learning update rule


def update_q_table(state, action, reward, next_state):
best_next_action = np.argmax(Q[next_state])
Q[state, action] += alpha * (reward + gamma * Q[next_state,
best_next_action] - Q[state, action])

# Sample usage
update_q_table(0, 1, 10, 2) # Sample state-action-reward-next_state

from transformers import pipeline

# Load pre-trained sentiment analysis pipeline


nlp = pipeline("sentiment-analysis")

text = "AI is revolutionizing the world!"


result = nlp(text)
print(result)

No model was supplied, defaulted to distilbert/distilbert-base-


uncased-finetuned-sst-2-english and revision af0f99b
(https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-
sst-2-english).
Using a pipeline without specifying a model name and revision in
production is not recommended.

{"model_id":"82b832892ddd4e44a30437c44462b66f","version_major":2,"vers
ion_minor":0}

{"model_id":"a4c70858e7264fbcbfaf1a6a99d9fe9f","version_major":2,"vers
ion_minor":0}

{"model_id":"9589324be965400cbcca79f128d79130","version_major":2,"vers
ion_minor":0}

{"model_id":"0ec0297a7433454eb9fbe499726a7b5f","version_major":2,"vers
ion_minor":0}

/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
Hardware accelerator e.g. GPU is available in the environment, but no
`device` argument is passed to the `Pipeline` object. Model will be on
CPU.

[{'label': 'POSITIVE', 'score': 0.9994089603424072}]

from sklearn.metrics import confusion_matrix


import numpy as np
# Simulated true labels and predicted labels (with bias)
true_labels = np.array([1, 0, 1, 0, 1, 0, 1, 0])
predicted_labels = np.array([1, 0, 1, 0, 0, 0, 0, 0])

conf_matrix = confusion_matrix(true_labels, predicted_labels)


print("Confusion Matrix:")
print(conf_matrix)

# Further code can be added to measure bias-related metrics like


disparate impact, etc.

Confusion Matrix:
[[4 0]
[2 2]]

from flask import Flask, request, jsonify


import joblib

app = Flask(__name__)

# Load trained model


model = joblib.load('model.pkl')

@app.route('/predict', methods=['POST'])
def predict():
data = request.json
prediction = model.predict([data['input']])
return jsonify({'prediction': prediction.tolist()})

if __name__ == '__main__':
app.run(debug=True)

----------------------------------------------------------------------
-----
FileNotFoundError Traceback (most recent call
last)
<ipython-input-10-9f42a7ddfac5> in <cell line: 7>()
5
6 # Load trained model
----> 7 model = joblib.load('model.pkl')
8
9 @app.route('/predict', methods=['POST'])

/usr/local/lib/python3.10/dist-packages/joblib/numpy_pickle.py in
load(filename, mmap_mode)
648 obj = _unpickle(fobj)
649 else:
--> 650 with open(filename, 'rb') as f:
651 with _read_fileobject(f, filename, mmap_mode) as
fobj:
652 if isinstance(fobj, str):
FileNotFoundError: [Errno 2] No such file or directory: 'model.pkl'

!pip install qiskit-aer # Install the Aer provider package

# Import statements modified


from qiskit import QuantumCircuit, transpile
from qiskit_aer import Aer # Import Aer from qiskit_aer
from qiskit.visualization import plot_histogram

# Simple quantum circuit


qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])

# Run the circuit


simulator = Aer.get_backend('qasm_simulator')
compiled_circuit = transpile(qc, simulator)
result = simulator.run(compiled_circuit).result()

counts = result.get_counts(qc)
print(counts)

plot_histogram(counts)

Requirement already satisfied: qiskit-aer in


/usr/local/lib/python3.10/dist-packages (0.15.1)
Requirement already satisfied: qiskit>=1.1.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.2.4)
Requirement already satisfied: numpy>=1.16.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.26.4)
Requirement already satisfied: scipy>=1.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.13.1)
Requirement already satisfied: psutil>=5 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (5.9.5)
Requirement already satisfied: rustworkx>=0.15.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.15.1)
Requirement already satisfied: sympy>=1.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (1.13.3)
Requirement already satisfied: dill>=0.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.3.9)
Requirement already satisfied: python-dateutil>=2.8.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (2.8.2)
Requirement already satisfied: stevedore>=3.0.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (5.3.0)
Requirement already satisfied: typing-extensions in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (4.12.2)
Requirement already satisfied: symengine<0.14,>=0.11 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.13.0)
Requirement already satisfied: six>=1.5 in
/usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.0-
>qiskit>=1.1.0->qiskit-aer) (1.16.0)
Requirement already satisfied: pbr>=2.0.0 in
/usr/local/lib/python3.10/dist-packages (from stevedore>=3.0.0-
>qiskit>=1.1.0->qiskit-aer) (6.1.0)
Requirement already satisfied: mpmath<1.4,>=1.1.0 in
/usr/local/lib/python3.10/dist-packages (from sympy>=1.3-
>qiskit>=1.1.0->qiskit-aer) (1.3.0)
{'11': 508, '00': 516}

!pip install qiskit-aer

Collecting qiskit-aer
Downloading qiskit_aer-0.15.1-cp310-cp310-
manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (8.0 kB)
Requirement already satisfied: qiskit>=1.1.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.2.4)
Requirement already satisfied: numpy>=1.16.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.26.4)
Requirement already satisfied: scipy>=1.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.13.1)
Requirement already satisfied: psutil>=5 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (5.9.5)
Requirement already satisfied: rustworkx>=0.15.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.15.1)
Requirement already satisfied: sympy>=1.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (1.13.3)
Requirement already satisfied: dill>=0.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.3.9)
Requirement already satisfied: python-dateutil>=2.8.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (2.8.2)
Requirement already satisfied: stevedore>=3.0.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (5.3.0)
Requirement already satisfied: typing-extensions in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (4.12.2)
Requirement already satisfied: symengine<0.14,>=0.11 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.13.0)
Requirement already satisfied: six>=1.5 in
/usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.0-
>qiskit>=1.1.0->qiskit-aer) (1.16.0)
Requirement already satisfied: pbr>=2.0.0 in
/usr/local/lib/python3.10/dist-packages (from stevedore>=3.0.0-
>qiskit>=1.1.0->qiskit-aer) (6.1.0)
Requirement already satisfied: mpmath<1.4,>=1.1.0 in
/usr/local/lib/python3.10/dist-packages (from sympy>=1.3-
>qiskit>=1.1.0->qiskit-aer) (1.3.0)
Downloading qiskit_aer-0.15.1-cp310-cp310-
manylinux_2_17_x86_64.manylinux2014_x86_64.whl (12.3 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 12.3/12.3 MB 56.8 MB/s eta
0:00:00

#employee salary AI implemetation

# Required Libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import joblib # To save and load models

# Load Dataset (assuming a CSV file with 'YearsExperience' and


'Salary' columns)
# Replace 'path_to_salary_data.csv' with the actual path to your
dataset
df = pd.read_csv('employee_salary.csv')

# Explore dataset
print(df.head())

# Split dataset into features (X) and target variable (y)


X = df[['YearsExperience']] # Independent variable
y = df['Salary'] # Dependent variable

# Split dataset into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)

# Create and train a Linear Regression model


model = LinearRegression()
model.fit(X_train, y_train)

# Predict on test data


y_pred = model.predict(X_test)

# Evaluate the model


mse = mean_squared_error(y_test, y_pred)
print(f'Mean Squared Error: {mse}')
print(f'Model Coefficient: {model.coef_}')
print(f'Model Intercept: {model.intercept_}')

# Save the trained model to a file


joblib.dump(model, 'salary_prediction_model.pkl')
print("Model saved to salary_prediction_model.pkl")

# Load the saved model and make new predictions


loaded_model = joblib.load('salary_prediction_model.pkl')

# Predict salary for a new employee with 5 years of experience


years_experience = np.array([[5]]) # New data point
predicted_salary = loaded_model.predict(years_experience)
print(f'Predicted salary for 5 years of experience:
{predicted_salary[0]}')
YearsExperience Salary
0 13 52828.389532
1 16 60186.294170
2 1 28637.697761
3 4 36202.234192
4 4 33934.268590
Mean Squared Error: 22229486.69120074
Model Coefficient: [1917.65565688]
Model Intercept: 31506.15770479451
Model saved to salary_prediction_model.pkl
Predicted salary for 5 years of experience: 41094.43598917406

/usr/local/lib/python3.10/dist-packages/sklearn/base.py:493:
UserWarning: X does not have valid feature names, but LinearRegression
was fitted with feature names
warnings.warn(

# Load the saved model and make new predictions


loaded_model = joblib.load('salary_prediction_model.pkl')

# Predict salary for a new employee with 5 years of experience


years_experience = np.array([[20]]) # New data point
predicted_salary = loaded_model.predict(years_experience)
print(f'Predicted salary for 5 years of experience:
{predicted_salary[0]}')

Predicted salary for 5 years of experience: 69859.2708423127

/usr/local/lib/python3.10/dist-packages/sklearn/base.py:493:
UserWarning: X does not have valid feature names, but LinearRegression
was fitted with feature names
warnings.warn(

#deep learning

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# Create a basic feedforward neural network


model = Sequential([
Dense(32, activation='relu', input_shape=(10,)),
Dense(64, activation='relu'),
Dense(1, activation='linear') # Regression output
])

# Compile the model


model.compile(optimizer='adam', loss='mse')
# Print summary
model.summary()

/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Model: "sequential"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ dense (Dense) │ (None, 32) │
352 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_1 (Dense) │ (None, 64) │
2,112 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_2 (Dense) │ (None, 1) │
65 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘

Total params: 2,529 (9.88 KB)

Trainable params: 2,529 (9.88 KB)

Non-trainable params: 0 (0.00 B)

import torch
import torch.nn as nn
import torch.optim as optim

# Simple PyTorch Feedforward Network


class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(10, 32)
self.fc2 = nn.Linear(32, 1)

def forward(self, x):


x = torch.relu(self.fc1(x))
x = self.fc2(x) # No activation for regression
# Model initialization
model = SimpleNN()
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# Example training step


def train_step(x, y):
optimizer.zero_grad()
output = model(x)
loss = loss_fn(output, y)
loss.backward()
optimizer.step()

# x and y are assumed to be torch tensors

from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten

# CNN model for image classification


model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(64, 64, 3)),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax') # Multiclass output
])

model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()

/usr/local/lib/python3.10/dist-packages/keras/src/layers/
convolutional/base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Model: "sequential_1"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ conv2d (Conv2D) │ (None, 62, 62, 32) │
896 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d (MaxPooling2D) │ (None, 31, 31, 32) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ conv2d_1 (Conv2D) │ (None, 29, 29, 64) │
18,496 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d_1 (MaxPooling2D) │ (None, 14, 14, 64) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ flatten (Flatten) │ (None, 12544) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_3 (Dense) │ (None, 128) │
1,605,760 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_4 (Dense) │ (None, 10) │
1,290 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘

Total params: 1,626,442 (6.20 MB)

Trainable params: 1,626,442 (6.20 MB)

Non-trainable params: 0 (0.00 B)

from tensorflow.keras.layers import LSTM

# LSTM for sequence data


model = Sequential([
LSTM(50, activation='relu', input_shape=(10, 1)), # 10 timesteps,
1 feature
Dense(1)
])

model.compile(optimizer='adam', loss='mse')
model.summary()

/usr/local/lib/python3.10/dist-packages/keras/src/layers/rnn/
rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(**kwargs)
Model: "sequential_2"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ lstm (LSTM) │ (None, 50) │
10,400 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_5 (Dense) │ (None, 1) │
51 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘

Total params: 10,451 (40.82 KB)

Trainable params: 10,451 (40.82 KB)

Non-trainable params: 0 (0.00 B)

from tensorflow.keras.layers import Input

# Autoencoder model
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)

autoencoder = tf.keras.Model(input_img, decoded)


autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.summary()

Model: "functional_3"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ input_layer_3 (InputLayer) │ (None, 784) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_6 (Dense) │ (None, 128) │
100,480 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_7 (Dense) │ (None, 784) │
101,136 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘

Total params: 201,616 (787.56 KB)

Trainable params: 201,616 (787.56 KB)

Non-trainable params: 0 (0.00 B)

import torch.nn as nn

# Define the generator


class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(100, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 28*28), # Output for a 28x28 image
(flattened)
nn.Tanh()
)

def forward(self, x):


return self.model(x)

# Define the discriminator


class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(28*28, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Linear(256, 1),
nn.Sigmoid()
)

def forward(self, x):


return self.model(x)

# Initialize models
generator = Generator()
discriminator = Discriminator()
# Neural network from scrth

import numpy as np

# Activation function (Sigmoid)


def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Derivative of the sigmoid function for backpropagation


def sigmoid_derivative(x):
return x * (1 - x)

# Input data (X) - 4 samples with 2 features


X = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])

# Output labels (y) - binary classification (XOR problem)


y = np.array([[0], [1], [1], [0]])

# Initialize weights randomly with mean 0


np.random.seed(1)
input_layer_neurons = X.shape[1] # 2 input features
hidden_layer_neurons = 3 # 3 neurons in hidden layer
output_neurons = 1 # 1 output neuron

# Random weights initialization


weights_input_hidden = np.random.uniform(size=(input_layer_neurons,
hidden_layer_neurons))
weights_hidden_output = np.random.uniform(size=(hidden_layer_neurons,
output_neurons))

# Learning rate
learning_rate = 0.1

# Training the neural network


for epoch in range(10000):
# Feedforward
hidden_layer_input = np.dot(X, weights_input_hidden) # Input to
hidden layer
hidden_layer_output = sigmoid(hidden_layer_input) # Activation
function

output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output) # Input to output layer
predicted_output = sigmoid(output_layer_input) # Activation
function

# Backpropagation
error = y - predicted_output # Error in output

# Derivatives for weights between hidden and output layer


d_predicted_output = error * sigmoid_derivative(predicted_output)

# Error at hidden layer (backpropagated)


error_hidden_layer =
d_predicted_output.dot(weights_hidden_output.T)

# Derivatives for weights between input and hidden layer


d_hidden_layer = error_hidden_layer *
sigmoid_derivative(hidden_layer_output)

# Updating the weights


weights_hidden_output +=
hidden_layer_output.T.dot(d_predicted_output) * learning_rate
weights_input_hidden += X.T.dot(d_hidden_layer) * learning_rate

if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
print(f'Epoch {epoch}, Loss: {loss}')

# Final prediction
print("\nFinal Prediction after training:")
print(predicted_output)

Epoch 0, Loss: 0.49979824553598173


Epoch 1000, Loss: 0.5000142138825419
Epoch 2000, Loss: 0.499990969342896
Epoch 3000, Loss: 0.4999478394501804
Epoch 4000, Loss: 0.4998252806592769
Epoch 5000, Loss: 0.4992831784249153
Epoch 6000, Loss: 0.4946935392946492
Epoch 7000, Loss: 0.46020091737274127
Epoch 8000, Loss: 0.39967539690720133
Epoch 9000, Loss: 0.30069202394171557

Final Prediction after training:


[[0.20693508]
[0.82108664]
[0.81948912]
[0.1716522 ]]

import numpy as np

# Sigmoid activation function


def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Derivative of sigmoid function for backpropagation


def sigmoid_derivative(x):
return x * (1 - x)

# Generate random dataset


# Input: years of experience, education level (encoded), role type
(encoded)
X = np.array([[2, 1, 0], # Example 1: 2 years of experience,
education level 1, role type 0
[3, 2, 1], # Example 2: 3 years of experience,
education level 2, role type 1
[5, 3, 1], # Example 3: 5 years of experience,
education level 3, role type 1
[7, 2, 2]]) # Example 4: 7 years of experience,
education level 2, role type 2

# Salary output (in thousands) for the above examples


y = np.array([[50], [60], [80], [100]])

# Initialize weights randomly


np.random.seed(1)
input_layer_neurons = X.shape[1] # 3 input features
hidden_layer_neurons = 5 # Arbitrary choice: 5 neurons in hidden
layer
output_neurons = 1 # 1 output (predicted salary)

# Random weight initialization


weights_input_hidden = np.random.uniform(size=(input_layer_neurons,
hidden_layer_neurons))
weights_hidden_output = np.random.uniform(size=(hidden_layer_neurons,
output_neurons))

# Learning rate
learning_rate = 0.01

# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)

# Backpropagation step
error = y - predicted_output # Error calculation

d_predicted_output = error * sigmoid_derivative(predicted_output)


# Output layer gradients
error_hidden_layer =
d_predicted_output.dot(weights_hidden_output.T) # Backpropagate error
d_hidden_layer = error_hidden_layer *
sigmoid_derivative(hidden_layer_output) # Hidden layer gradients

# Update the weights


weights_hidden_output +=
hidden_layer_output.T.dot(d_predicted_output) * learning_rate
weights_input_hidden += X.T.dot(d_hidden_layer) * learning_rate

# Print loss every 1000 epochs


if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
print(f'Epoch {epoch}, Loss: {loss}')

# Final prediction
print("\nPredicted Salary after training:")
print(predicted_output * 100) # Multiply by 100 to scale to salary in
thousands

Epoch 0, Loss: 71.65983932975507


Epoch 1000, Loss: 71.50008525469275
Epoch 2000, Loss: 71.50004227997762
Epoch 3000, Loss: 71.50002805078817
Epoch 4000, Loss: 71.50002096697293
Epoch 5000, Loss: 71.50001673024053
Epoch 6000, Loss: 71.50001391286202
Epoch 7000, Loss: 71.5000119046198
Epoch 8000, Loss: 71.50001040108484
Epoch 9000, Loss: 71.50000923344872

Predicted Salary after training:


[[99.99827203]
[99.99939304]
[99.99950257]
[99.99951178]]

import numpy as np
import pandas as pd

# Activation function: Sigmoid


def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Derivative of sigmoid function for backpropagation


def sigmoid_derivative(x):
return x * (1 - x)

# Load the dataset (replace with your actual file path)


data = pd.read_csv('employee_salary.csv')

# Assume the dataset has the following columns: 'experience' and


'salary'
X = data[['YearsExperience']].values # Features: years of experience
(input)
y = data[['Salary']].values / 1000 # Output: Salary (divided by 1000
to normalize)

# Normalize the input (years of experience) between 0 and 1


X = (X - np.min(X)) / (np.max(X) - np.min(X))

# Initialize weights randomly


np.random.seed(1)
input_layer_neurons = 1 # 1 input feature (experience)
hidden_layer_neurons = 5 # Arbitrary choice: 5 neurons in hidden
layer
output_neurons = 1 # 1 output (predicted salary)

# Random weight initialization


weights_input_hidden = np.random.uniform(size=(input_layer_neurons,
hidden_layer_neurons))
weights_hidden_output = np.random.uniform(size=(hidden_layer_neurons,
output_neurons))

# Learning rate
learning_rate = 0.01

# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)

# Backpropagation step
error = y - predicted_output # Error calculation

d_predicted_output = error * sigmoid_derivative(predicted_output)


# Output layer gradients
error_hidden_layer =
d_predicted_output.dot(weights_hidden_output.T) # Backpropagate error
d_hidden_layer = error_hidden_layer *
sigmoid_derivative(hidden_layer_output) # Hidden layer gradients

# Update the weights


weights_hidden_output +=
hidden_layer_output.T.dot(d_predicted_output) * learning_rate
weights_input_hidden += X.T.dot(d_hidden_layer) * learning_rate
# Print loss every 1000 epochs
if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
print(f'Epoch {epoch}, Loss: {loss}')

# Final prediction
print("\nPredicted Salary after training (in thousands):")
print(predicted_output * 1000) # Multiply by 1000 to scale back to
actual salary

Epoch 0, Loss: 48.51377912400656


Epoch 1000, Loss: 48.20845630214606
Epoch 2000, Loss: 48.20845630148357
Epoch 3000, Loss: 48.208456300829724
Epoch 4000, Loss: 48.208456300184295
Epoch 5000, Loss: 48.20845629954712
Epoch 6000, Loss: 48.208456298918094
Epoch 7000, Loss: 48.208456298296994
Epoch 8000, Loss: 48.2084562976837
Epoch 9000, Loss: 48.20845629707808

Predicted Salary after training (in thousands):


[[999.99998873]
[999.99999512]
[999.9995937 ]
[999.99983755]
[999.99983755]
[999.99995145]
[999.99997315]
[999.99999781]
[999.99988015]
[999.9999345 ]
[999.99998873]
[999.99970081]
[999.9999345 ]
[999.99995145]
[999.99999357]
[999.99999715]
[999.99991147]
[999.99999151]
[999.99996394]
[999.99997315]
[999.99999628]
[999.99991147]
[999.99999512]
[999.99999512]
[999.9995937 ]
[999.99999781]
[999.99983755]
[999.99999715]
[999.99999357]
[999.99995145]
[999.9995937 ]
[999.99970081]
[999.99997315]
[999.9995937 ]
[999.99997996]
[999.99983755]
[999.99998499]
[999.99999781]
[999.99977961]
[999.9995937 ]
[999.9995937 ]
[999.99988015]
[999.99991147]
[999.9999345 ]
[999.99996394]
[999.99999715]
[999.99999512]
[999.99988015]
[999.99997315]
[999.99997996]
[999.99970081]
[999.99970081]
[999.99995145]
[999.99997315]
[999.99983755]
[999.9999345 ]
[999.99998499]
[999.99999357]
[999.99999781]
[999.9995937 ]
[999.99999357]
[999.99983755]
[999.99998873]
[999.99997996]
[999.99998499]
[999.99988015]
[999.9999345 ]
[999.99988015]
[999.99999512]
[999.99983755]
[999.99998873]
[999.99988015]
[999.99996394]
[999.99999357]
[999.99999512]
[999.99983755]
[999.99999512]
[999.99999151]
[999.99999628]
[999.99999715]
[999.99991147]
[999.99997315]
[999.99983755]
[999.9995937 ]
[999.99991147]
[999.9995937 ]
[999.99999715]
[999.99999781]
[999.99988015]
[999.99977961]
[999.99999628]
[999.99983755]
[999.99977961]
[999.99997996]
[999.99999151]
[999.99999628]
[999.99995145]
[999.99997315]
[999.9995937 ]
[999.99997996]]

import numpy as np
import pandas as pd

# Sigmoid activation function


def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Derivative of the sigmoid function for backpropagation


def sigmoid_derivative(x):
return x * (1 - x)

# Load the dataset (replace with your actual file path)


data = pd.read_csv('employee_salary.csv')

# Assume the dataset has the following columns: 'experience' and


'salary'
X = data[['YearsExperience']].values # Features: years of experience
(input)
y = data[['Salary']].values / 1000 # Output: Salary (divided by 1000
to normalize)

# Normalize the input (years of experience) between 0 and 1


X = (X - np.min(X)) / (np.max(X) - np.min(X))

# Initialize weights randomly


np.random.seed(1)
input_layer_neurons = 1 # 1 input feature (experience)
hidden_layer_neurons = 5 # Arbitrary choice: 5 neurons in the hidden
layer
output_neurons = 1 # 1 output (predicted salary)

# Random weight initialization


weights_input_hidden = np.random.uniform(size=(input_layer_neurons,
hidden_layer_neurons))
weights_hidden_output = np.random.uniform(size=(hidden_layer_neurons,
output_neurons))

# Learning rate
learning_rate = 0.01

# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)

# Backpropagation step
error = y - predicted_output # Error calculation

d_predicted_output = error * sigmoid_derivative(predicted_output)


# Output layer gradients
error_hidden_layer =
d_predicted_output.dot(weights_hidden_output.T) # Backpropagate error
d_hidden_layer = error_hidden_layer *
sigmoid_derivative(hidden_layer_output) # Hidden layer gradients

# Update the weights


weights_hidden_output +=
hidden_layer_output.T.dot(d_predicted_output) * learning_rate
weights_input_hidden += X.T.dot(d_hidden_layer) * learning_rate

# Print loss every 1000 epochs


if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
print(f'Epoch {epoch}, Loss: {loss}')

# Final prediction (Choose a single predicted output for each input)


final_prediction = predicted_output[-1] # Get the last prediction
(for the last input)

print("\nPredicted Salary for the last employee (in thousands):")


print(final_prediction[0] * 1000) # Multiply by 1000 to scale back to
actual salary

Epoch 0, Loss: 48.51377912400656


Epoch 1000, Loss: 48.20845630214606
Epoch 2000, Loss: 48.20845630148357
Epoch 3000, Loss: 48.208456300829724
Epoch 4000, Loss: 48.208456300184295
Epoch 5000, Loss: 48.20845629954712
Epoch 6000, Loss: 48.208456298918094
Epoch 7000, Loss: 48.208456298296994
Epoch 8000, Loss: 48.2084562976837
Epoch 9000, Loss: 48.20845629707808

Predicted Salary for the last employee (in thousands):


999.9999799566601

import numpy as np
import pandas as pd

# Sigmoid activation function


def sigmoid(x):
return 1 / (1 + np.exp(-x))

# Derivative of the sigmoid function for backpropagation


def sigmoid_derivative(x):
return x * (1 - x)

# Load the dataset (replace with your actual file path)


data = pd.read_csv('employee_salary.csv')

# Assume the dataset has the following columns: 'experience' and


'salary'
X = data[['YearsExperience']].values # Features: years of experience
(input)
y = data[['Salary']].values / 1000 # Output: Salary (divided by 1000
to normalize)

# Normalize the input (years of experience) between 0 and 1


X = (X - np.min(X)) / (np.max(X) - np.min(X))

# Initialize weights randomly


np.random.seed(1)
input_layer_neurons = 1 # 1 input feature (experience)
hidden_layer_neurons = 5 # Arbitrary choice: 5 neurons in the hidden
layer
output_neurons = 1 # 1 output (predicted salary)

# Random weight initialization


weights_input_hidden = np.random.uniform(size=(input_layer_neurons,
hidden_layer_neurons))
weights_hidden_output = np.random.uniform(size=(hidden_layer_neurons,
output_neurons))

# Learning rate
learning_rate = 0.01

# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)

# Backpropagation step
error = y - predicted_output # Error calculation

d_predicted_output = error * sigmoid_derivative(predicted_output)


# Output layer gradients
error_hidden_layer =
d_predicted_output.dot(weights_hidden_output.T) # Backpropagate error
d_hidden_layer = error_hidden_layer *
sigmoid_derivative(hidden_layer_output) # Hidden layer gradients

# Update the weights


weights_hidden_output +=
hidden_layer_output.T.dot(d_predicted_output) * learning_rate
weights_input_hidden += X.T.dot(d_hidden_layer) * learning_rate

# Print loss every 1000 epochs


if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
print(f'Epoch {epoch}, Loss: {loss}')

# Final prediction for a new input (different experience value)


new_experience = np.array([[20]]) # New input: 6 years of experience

# Normalize the new input based on the training data scaling


new_experience_normalized = (new_experience - np.min(X)) / (np.max(X)
- np.min(X))

# Feedforward to predict the salary for the new experience value


hidden_layer_input_new = np.dot(new_experience_normalized,
weights_input_hidden)
hidden_layer_output_new = sigmoid(hidden_layer_input_new)
output_layer_input_new = np.dot(hidden_layer_output_new,
weights_hidden_output)
predicted_salary_new = sigmoid(output_layer_input_new)

# Print the predicted salary for the new input (after scaling back)
print("\nPredicted Salary for 6 years of experience (in thousands):")
print(predicted_salary_new[0][0] * 1000) # Multiply by 1000 to scale
back to actual salary

Epoch 0, Loss: 48.51377912400656


Epoch 1000, Loss: 48.20845630214606
Epoch 2000, Loss: 48.20845630148357
Epoch 3000, Loss: 48.208456300829724
Epoch 4000, Loss: 48.208456300184295
Epoch 5000, Loss: 48.20845629954712
Epoch 6000, Loss: 48.208456298918094
Epoch 7000, Loss: 48.208456298296994
Epoch 8000, Loss: 48.2084562976837
Epoch 9000, Loss: 48.20845629707808

Predicted Salary for 6 years of experience (in thousands):


999.9999999998348

#NLP
!pip install nltk spacy

Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-


packages (3.8.1)
Requirement already satisfied: spacy in
/usr/local/lib/python3.10/dist-packages (3.7.5)
Requirement already satisfied: click in
/usr/local/lib/python3.10/dist-packages (from nltk) (8.1.7)
Requirement already satisfied: joblib in
/usr/local/lib/python3.10/dist-packages (from nltk) (1.4.2)
Requirement already satisfied: regex>=2021.8.3 in
/usr/local/lib/python3.10/dist-packages (from nltk) (2024.9.11)
Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-
packages (from nltk) (4.66.5)
Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in
/usr/local/lib/python3.10/dist-packages (from spacy) (3.0.12)
Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (1.0.5)
Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (1.0.10)
Requirement already satisfied: cymem<2.1.0,>=2.0.2 in
/usr/local/lib/python3.10/dist-packages (from spacy) (2.0.8)
Requirement already satisfied: preshed<3.1.0,>=3.0.2 in
/usr/local/lib/python3.10/dist-packages (from spacy) (3.0.9)
Requirement already satisfied: thinc<8.3.0,>=8.2.2 in
/usr/local/lib/python3.10/dist-packages (from spacy) (8.2.5)
Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in
/usr/local/lib/python3.10/dist-packages (from spacy) (1.1.3)
Requirement already satisfied: srsly<3.0.0,>=2.4.3 in
/usr/local/lib/python3.10/dist-packages (from spacy) (2.4.8)
Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in
/usr/local/lib/python3.10/dist-packages (from spacy) (2.0.10)
Requirement already satisfied: weasel<0.5.0,>=0.1.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (0.4.1)
Requirement already satisfied: typer<1.0.0,>=0.3.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (0.12.5)
Requirement already satisfied: requests<3.0.0,>=2.13.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (2.32.3)
Requirement already satisfied: pydantic!=1.8,!=1.8.1,<3.0.0,>=1.7.4 in
/usr/local/lib/python3.10/dist-packages (from spacy) (2.9.2)
Requirement already satisfied: jinja2 in
/usr/local/lib/python3.10/dist-packages (from spacy) (3.1.4)
Requirement already satisfied: setuptools in
/usr/local/lib/python3.10/dist-packages (from spacy) (75.1.0)
Requirement already satisfied: packaging>=20.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (24.1)
Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (3.4.1)
Requirement already satisfied: numpy>=1.19.0 in
/usr/local/lib/python3.10/dist-packages (from spacy) (1.26.4)
Requirement already satisfied: language-data>=1.2 in
/usr/local/lib/python3.10/dist-packages (from langcodes<4.0.0,>=3.2.0-
>spacy) (1.2.0)
Requirement already satisfied: annotated-types>=0.6.0 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy) (0.7.0)
Requirement already satisfied: pydantic-core==2.23.4 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy) (2.23.4)
Requirement already satisfied: typing-extensions>=4.6.1 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy) (4.12.2)
Requirement already satisfied: charset-normalizer<4,>=2 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy) (3.4.0)
Requirement already satisfied: idna<4,>=2.5 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy) (2024.8.30)
Requirement already satisfied: blis<0.8.0,>=0.7.8 in
/usr/local/lib/python3.10/dist-packages (from thinc<8.3.0,>=8.2.2-
>spacy) (0.7.11)
Requirement already satisfied: confection<1.0.0,>=0.0.1 in
/usr/local/lib/python3.10/dist-packages (from thinc<8.3.0,>=8.2.2-
>spacy) (0.1.5)
Requirement already satisfied: shellingham>=1.3.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy) (1.5.4)
Requirement already satisfied: rich>=10.11.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy) (13.9.2)
Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in
/usr/local/lib/python3.10/dist-packages (from weasel<0.5.0,>=0.1.0-
>spacy) (0.19.0)
Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in
/usr/local/lib/python3.10/dist-packages (from weasel<0.5.0,>=0.1.0-
>spacy) (7.0.5)
Requirement already satisfied: MarkupSafe>=2.0 in
/usr/local/lib/python3.10/dist-packages (from jinja2->spacy) (3.0.2)
Requirement already satisfied: marisa-trie>=0.7.7 in
/usr/local/lib/python3.10/dist-packages (from language-data>=1.2-
>langcodes<4.0.0,>=3.2.0->spacy) (1.2.1)
Requirement already satisfied: markdown-it-py>=2.2.0 in
/usr/local/lib/python3.10/dist-packages (from rich>=10.11.0-
>typer<1.0.0,>=0.3.0->spacy) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in
/usr/local/lib/python3.10/dist-packages (from rich>=10.11.0-
>typer<1.0.0,>=0.3.0->spacy) (2.18.0)
Requirement already satisfied: wrapt in
/usr/local/lib/python3.10/dist-packages (from smart-
open<8.0.0,>=5.2.1->weasel<0.5.0,>=0.1.0->spacy) (1.16.0)
Requirement already satisfied: mdurl~=0.1 in
/usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0-
>rich>=10.11.0->typer<1.0.0,>=0.3.0->spacy) (0.1.2)

import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize

text = "Natural Language Processing is exciting!"


tokens = word_tokenize(text)
print(tokens)
2 4 6 8 10 ?
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Unzipping tokenizers/punkt.zip.

['Natural', 'Language', 'Processing', 'is', 'exciting', '!']

#word embeding
import spacy
nlp = spacy.load("en_core_web_md")

doc = nlp("I love programming in Python")


for token in doc:
print(token.text, token.vector[:5]) # Print the first 5 elements
of the word vector

I [ -1.8607 0.15804 -4.1425 -8.6359 -16.955 ]


love [ 2.0565 -3.2259 -5.7364 -6.146 0.15748]
programming [-1.7244 -1.7096 1.1533 -0.63927 2.6726 ]
in [-3.7766 0.69426 -3.3805 2.705 8.6019 ]
Python [-1.2606 0.065898 6.0885 -0.22722 0.83154 ]

!python -m spacy download en_core_web_md

Collecting en-core-web-md==3.7.1
Downloading
https://github.com/explosion/spacy-models/releases/download/en_core_we
b_md-3.7.1/en_core_web_md-3.7.1-py3-none-any.whl (42.8 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 42.8/42.8 MB 12.1 MB/s eta
0:00:00
ent already satisfied: spacy<3.8.0,>=3.7.2 in
/usr/local/lib/python3.10/dist-packages (from en-core-web-md==3.7.1)
(3.7.5)
Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.0.12)
Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.0.5)
Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.0.10)
Requirement already satisfied: cymem<2.1.0,>=2.0.2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.0.8)
Requirement already satisfied: preshed<3.1.0,>=3.0.2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.0.9)
Requirement already satisfied: thinc<8.3.0,>=8.2.2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (8.2.5)
Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.1.3)
Requirement already satisfied: srsly<3.0.0,>=2.4.3 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.4.8)
Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.0.10)
Requirement already satisfied: weasel<0.5.0,>=0.1.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (0.4.1)
Requirement already satisfied: typer<1.0.0,>=0.3.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (0.12.5)
Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (4.66.5)
Requirement already satisfied: requests<3.0.0,>=2.13.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.32.3)
Requirement already satisfied: pydantic!=1.8,!=1.8.1,<3.0.0,>=1.7.4 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.9.2)
Requirement already satisfied: jinja2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.1.4)
Requirement already satisfied: setuptools in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (75.1.0)
Requirement already satisfied: packaging>=20.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (24.1)
Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.4.1)
Requirement already satisfied: numpy>=1.19.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.26.4)
Requirement already satisfied: language-data>=1.2 in
/usr/local/lib/python3.10/dist-packages (from langcodes<4.0.0,>=3.2.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (1.2.0)
Requirement already satisfied: annotated-types>=0.6.0 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(0.7.0)
Requirement already satisfied: pydantic-core==2.23.4 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(2.23.4)
Requirement already satisfied: typing-extensions>=4.6.1 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(4.12.2)
Requirement already satisfied: charset-normalizer<4,>=2 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (3.4.0)
Requirement already satisfied: idna<4,>=2.5 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (2024.8.30)
Requirement already satisfied: blis<0.8.0,>=0.7.8 in
/usr/local/lib/python3.10/dist-packages (from thinc<8.3.0,>=8.2.2-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (0.7.11)
Requirement already satisfied: confection<1.0.0,>=0.0.1 in
/usr/local/lib/python3.10/dist-packages (from thinc<8.3.0,>=8.2.2-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (0.1.5)
Requirement already satisfied: click>=8.0.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (8.1.7)
Requirement already satisfied: shellingham>=1.3.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (1.5.4)
Requirement already satisfied: rich>=10.11.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (13.9.2)
Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in
/usr/local/lib/python3.10/dist-packages (from weasel<0.5.0,>=0.1.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (0.19.0)
Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in
/usr/local/lib/python3.10/dist-packages (from weasel<0.5.0,>=0.1.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (7.0.5)
Requirement already satisfied: MarkupSafe>=2.0 in
/usr/local/lib/python3.10/dist-packages (from jinja2-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (3.0.2)
Requirement already satisfied: marisa-trie>=0.7.7 in
/usr/local/lib/python3.10/dist-packages (from language-data>=1.2-
>langcodes<4.0.0,>=3.2.0->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(1.2.1)
Requirement already satisfied: markdown-it-py>=2.2.0 in
/usr/local/lib/python3.10/dist-packages (from rich>=10.11.0-
>typer<1.0.0,>=0.3.0->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in
/usr/local/lib/python3.10/dist-packages (from rich>=10.11.0-
>typer<1.0.0,>=0.3.0->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(2.18.0)
Requirement already satisfied: wrapt in
/usr/local/lib/python3.10/dist-packages (from smart-
open<8.0.0,>=5.2.1->weasel<0.5.0,>=0.1.0->spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.16.0)
Requirement already satisfied: mdurl~=0.1 in
/usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0-
>rich>=10.11.0->typer<1.0.0,>=0.3.0->spacy<3.8.0,>=3.7.2->en-core-web-
md==3.7.1) (0.1.2)
Installing collected packages: en-core-web-md
Successfully installed en-core-web-md-3.7.1
✔ Download and installation successful
You can now load the package via spacy.load('en_core_web_md')
⚠ Restart to reload dependencies
If you are in a Jupyter or Colab notebook, you may need to restart
Python in
order to load all the package's dependencies. You can do this by
selecting the
'Restart kernel' or 'Restart runtime' option.

doc = nlp("Apple is looking at buying U.K. startup for $1 billion.")


for ent in doc.ents:
print(ent.text, ent.label_)

Apple ORG
U.K. GPE
$1 billion MONEY

#nlp in trasformers
from transformers import pipeline

generator = pipeline('text-generation', model='gpt2')


print(generator("Natural Language Processing is", max_length=30,
num_return_sequences=1))

/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/
_token.py:89: UserWarning:
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your
settings tab (https://huggingface.co/settings/tokens), set it as
secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to
access public models or datasets.
warnings.warn(
{"model_id":"6e8cd9bf68754077ad0f2d91be22cce9","version_major":2,"vers
ion_minor":0}

{"model_id":"7e63de39db8a46cfa36f293c671b67b3","version_major":2,"vers
ion_minor":0}

{"model_id":"2ab65f49a8654615a8c9160715388afb","version_major":2,"vers
ion_minor":0}

{"model_id":"9ee3a9316e194286862912e1ba0827a5","version_major":2,"vers
ion_minor":0}

{"model_id":"921395cc87664c36a7c75ff61b0dadef","version_major":2,"vers
ion_minor":0}

{"model_id":"ecaba4e3edd44b2a9e145c5c59652baa","version_major":2,"vers
ion_minor":0}

{"model_id":"23988d2fb7c34cf99d7c8638c1b12c6b","version_major":2,"vers
ion_minor":0}

/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
Hardware accelerator e.g. GPU is available in the environment, but no
`device` argument is passed to the `Pipeline` object. Model will be on
CPU.
Truncation was not explicitly activated but `max_length` is provided a
specific value, please use `truncation=True` to explicitly truncate
examples to max length. Defaulting to 'longest_first' truncation
strategy. If you encode pairs of sequences (GLUE-style) with the
tokenizer you can select this strategy more precisely by providing a
specific strategy to `truncation`.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.

[{'generated_text': "Natural Language Processing is the most popular


course in Microsoft Word (and, admittedly, most Word books), one of
the best. It's the perfect course"}]

#sentiment analysis

import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import string
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
import pandas as pd

df = pd.read_csv("IMDB Dataset.csv")
# Function to preprocess text
def preprocess_text(text):
# Tokenize and lower the text
tokens = word_tokenize(text.lower())

# Remove punctuation and stopwords


tokens = [word for word in tokens if word.isalpha() and word not
in stopwords.words('english')]

# Lemmatize the words


lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(word) for word in tokens]

return ' '.join(tokens)

# Apply preprocessing to the reviews column


df['processed_review'] = df['review'].apply(preprocess_text)

[nltk_data] Downloading package punkt to /root/nltk_data...


[nltk_data] Package punkt is already up-to-date!
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
[nltk_data] Downloading package wordnet to /root/nltk_data...
[nltk_data] Package wordnet is already up-to-date!

import matplotlib.pyplot as plt


import seaborn as sns

# Plot sentiment distribution


sns.countplot(x='sentiment', data=df)
plt.title('Distribution of Sentiments')
plt.show()
from sklearn.feature_extraction.text import TfidfVectorizer

# Create the TF-IDF vectorizer


tfidf = TfidfVectorizer(max_features=3000) # Choose a top 3000 most
common words
X = tfidf.fit_transform(df['processed_review'])
y = df['sentiment']

from sklearn.model_selection import train_test_split


from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=42)

# Train the logistic regression model


model = LogisticRegression()
model.fit(X_train, y_train)

# Predict on the test set


y_pred = model.predict(X_test)
# Evaluate the model
print("Accuracy:", accuracy_score(y_test, y_pred))
print(classification_report(y_test, y_pred))

Accuracy: 0.8297587131367292
precision recall f1-score support

negative 0.86 0.80 0.83 383


positive 0.80 0.87 0.83 363

accuracy 0.83 746


macro avg 0.83 0.83 0.83 746
weighted avg 0.83 0.83 0.83 746

from sklearn.metrics import confusion_matrix


import seaborn as sns

# Plot the confusion matrix


conf_matrix = confusion_matrix(y_test, y_pred)
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
xticklabels=['Negative', 'Neutral', 'Positive'],
yticklabels=['Negative', 'Neutral', 'Positive'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
from sklearn.model_selection import GridSearchCV

# Example of tuning Logistic Regression's regularization parameter (C)


parameters = {'C': [0.01, 0.1, 1, 10]}
grid_search = GridSearchCV(LogisticRegression(), parameters, cv=5)
grid_search.fit(X_train, y_train)

print("Best parameters:", grid_search.best_params_)

Best parameters: {'C': 10}

#nlg
import pandas as pd

# Sample data
data = {
'Date': ['2024-01-01', '2024-01-01', '2024-01-02', '2024-01-02'],
'Product': ['Product A', 'Product B', 'Product A', 'Product C'],
'Units Sold': [120, 150, 130, 90],
'Revenue': [1200, 2100, 1300, 900],
'Region': ['North', 'South', 'West', 'East']
}
df = pd.DataFrame(data)

# Aggregate data by product


product_sales = df.groupby('Product').agg({'Units Sold': 'sum',
'Revenue': 'sum'}).reset_index()

# Aggregate data by region


region_sales = df.groupby('Region').agg({'Units Sold': 'sum',
'Revenue': 'sum'}).reset_index()

print(product_sales)
print(region_sales)

Product Units Sold Revenue


0 Product A 250 2500
1 Product B 150 2100
2 Product C 90 900
Region Units Sold Revenue
0 East 90 900
1 North 120 1200
2 South 150 2100
3 West 130 1300

def generate_sales_report(product_sales, region_sales):


# Generate product sales summary
product_summary = "Product Sales Summary:\n"
for index, row in product_sales.iterrows():
product_summary += f"- {row['Product']} sold {row['Units
Sold']} units, generating a revenue of ${row['Revenue']}.\n"

# Generate region sales summary


region_summary = "\nRegional Sales Summary:\n"
for index, row in region_sales.iterrows():
region_summary += f"- {row['Region']} region sold {row['Units
Sold']} units, generating ${row['Revenue']} in revenue.\n"

# Combine summaries
report = product_summary + region_summary
return report

# Generate report
sales_report = generate_sales_report(product_sales, region_sales)
print(sales_report)

Product Sales Summary:


- Product A sold 250 units, generating a revenue of $2500.
- Product B sold 150 units, generating a revenue of $2100.
- Product C sold 90 units, generating a revenue of $900.

Regional Sales Summary:


- East region sold 90 units, generating $900 in revenue.
- North region sold 120 units, generating $1200 in revenue.
- South region sold 150 units, generating $2100 in revenue.
- West region sold 130 units, generating $1300 in revenue.

# Optional: Add email functionality to send reports


import smtplib
from email.mime.text import MIMEText

def send_email(report, recipient):


msg = MIMEText(report)
msg['Subject'] = 'Sales Report'
msg['From'] = '[email protected]'
msg['To'] = recipient

with smtplib.SMTP('smtp.example.com') as server:


server.login('[email protected]', 'password')
server.sendmail(msg['From'], [msg['To']], msg.as_string())

# Send the report via email


send_email(sales_report, '[email protected]')

#Generative AI

!pip install transformers


!pip install torch

Requirement already satisfied: transformers in


/usr/local/lib/python3.10/dist-packages (4.44.2)
Requirement already satisfied: filelock in
/usr/local/lib/python3.10/dist-packages (from transformers) (3.16.1)
Requirement already satisfied: huggingface-hub<1.0,>=0.23.2 in
/usr/local/lib/python3.10/dist-packages (from transformers) (0.24.7)
Requirement already satisfied: numpy>=1.17 in
/usr/local/lib/python3.10/dist-packages (from transformers) (1.26.4)
Requirement already satisfied: packaging>=20.0 in
/usr/local/lib/python3.10/dist-packages (from transformers) (24.1)
Requirement already satisfied: pyyaml>=5.1 in
/usr/local/lib/python3.10/dist-packages (from transformers) (6.0.2)
Requirement already satisfied: regex!=2019.12.17 in
/usr/local/lib/python3.10/dist-packages (from transformers)
(2024.9.11)
Requirement already satisfied: requests in
/usr/local/lib/python3.10/dist-packages (from transformers) (2.32.3)
Requirement already satisfied: safetensors>=0.4.1 in
/usr/local/lib/python3.10/dist-packages (from transformers) (0.4.5)
Requirement already satisfied: tokenizers<0.20,>=0.19 in
/usr/local/lib/python3.10/dist-packages (from transformers) (0.19.1)
Requirement already satisfied: tqdm>=4.27 in
/usr/local/lib/python3.10/dist-packages (from transformers) (4.66.5)
Requirement already satisfied: fsspec>=2023.5.0 in
/usr/local/lib/python3.10/dist-packages (from huggingface-
hub<1.0,>=0.23.2->transformers) (2024.6.1)
Requirement already satisfied: typing-extensions>=3.7.4.3 in
/usr/local/lib/python3.10/dist-packages (from huggingface-
hub<1.0,>=0.23.2->transformers) (4.12.2)
Requirement already satisfied: charset-normalizer<4,>=2 in
/usr/local/lib/python3.10/dist-packages (from requests->transformers)
(3.4.0)
Requirement already satisfied: idna<4,>=2.5 in
/usr/local/lib/python3.10/dist-packages (from requests->transformers)
(3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in
/usr/local/lib/python3.10/dist-packages (from requests->transformers)
(2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in
/usr/local/lib/python3.10/dist-packages (from requests->transformers)
(2024.8.30)
Requirement already satisfied: torch in
/usr/local/lib/python3.10/dist-packages (2.4.1+cu121)
Requirement already satisfied: filelock in
/usr/local/lib/python3.10/dist-packages (from torch) (3.16.1)
Requirement already satisfied: typing-extensions>=4.8.0 in
/usr/local/lib/python3.10/dist-packages (from torch) (4.12.2)
Requirement already satisfied: sympy in
/usr/local/lib/python3.10/dist-packages (from torch) (1.13.3)
Requirement already satisfied: networkx in
/usr/local/lib/python3.10/dist-packages (from torch) (3.4.1)
Requirement already satisfied: jinja2 in
/usr/local/lib/python3.10/dist-packages (from torch) (3.1.4)
Requirement already satisfied: fsspec in
/usr/local/lib/python3.10/dist-packages (from torch) (2024.6.1)
Requirement already satisfied: MarkupSafe>=2.0 in
/usr/local/lib/python3.10/dist-packages (from jinja2->torch) (3.0.2)
Requirement already satisfied: mpmath<1.4,>=1.1.0 in
/usr/local/lib/python3.10/dist-packages (from sympy->torch) (1.3.0)

from transformers import GPT2LMHeadModel, GPT2Tokenizer

# Load pre-trained model and tokenizer


model_name = 'gpt2' # You can use 'gpt2-medium', 'gpt2-large', etc.
for bigger models
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)

# Set the model to evaluation mode


model.eval()
/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(

GPT2LMHeadModel(
(transformer): GPT2Model(
(wte): Embedding(50257, 768)
(wpe): Embedding(1024, 768)
(drop): Dropout(p=0.1, inplace=False)
(h): ModuleList(
(0-11): 12 x GPT2Block(
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(attn): GPT2SdpaAttention(
(c_attn): Conv1D()
(c_proj): Conv1D()
(attn_dropout): Dropout(p=0.1, inplace=False)
(resid_dropout): Dropout(p=0.1, inplace=False)
)
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(mlp): GPT2MLP(
(c_fc): Conv1D()
(c_proj): Conv1D()
(act): NewGELUActivation()
(dropout): Dropout(p=0.1, inplace=False)
)
)
)
(ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
)
(lm_head): Linear(in_features=768, out_features=50257, bias=False)
)

import torch

def generate_text(prompt, max_length=100, temperature=0.7):


# Tokenize input prompt
inputs = tokenizer.encode(prompt, return_tensors='pt')

# Generate text with GPT-2


with torch.no_grad():
outputs = model.generate(
inputs,
max_length=max_length,
temperature=temperature, # Controls creativity (lower
values = less random)
num_return_sequences=1, # Number of sequences to generate
no_repeat_ngram_size=2, # Prevents repeating n-grams
top_p=0.95, # Nucleus sampling, helps in generating
coherent text
top_k=50 # Limits the number of choices at each step
)

# Decode the generated text


generated_text = tokenizer.decode(outputs[0],
skip_special_tokens=True)
return generated_text

# Input your prompt


prompt = "Once upon a time in a faraway land"
generated_text = generate_text(prompt, max_length=100)

# Print the generated text


print(generated_text)

/usr/local/lib/python3.10/dist-packages/transformers/generation/
configuration_utils.py:567: UserWarning: `do_sample` is set to
`False`. However, `temperature` is set to `0.7` -- this flag is only
used in sample-based generation modes. You should set `do_sample=True`
or unset `temperature`.
warnings.warn(
/usr/local/lib/python3.10/dist-packages/transformers/generation/config
uration_utils.py:572: UserWarning: `do_sample` is set to `False`.
However, `top_p` is set to `0.95` -- this flag is only used in sample-
based generation modes. You should set `do_sample=True` or unset
`top_p`.
warnings.warn(
The attention mask and the pad token id were not set. As a
consequence, you may observe unexpected behavior. Please pass your
input's `attention_mask` to obtain reliable results.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.
The attention mask is not set and cannot be inferred from input
because pad token is same as eos token. As a consequence, you may
observe unexpected behavior. Please pass your input's `attention_mask`
to obtain reliable results.

Once upon a time in a faraway land, the world was a land of peace and
harmony. The world of the gods was the land that was to be the home of
all.

The world that had been the center of civilization was now a world
where the Gods were to rule. They were the ones who had to make the
most of their power. And they were not the only ones. There were many
other gods, too. But the one who was most powerful was none other than
# Experiment with different prompts and parameters
prompt = "The future of AI is"
generated_text = generate_text(prompt, max_length=200,
temperature=1.0)
print(generated_text)

The attention mask and the pad token id were not set. As a
consequence, you may observe unexpected behavior. Please pass your
input's `attention_mask` to obtain reliable results.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.

The future of AI is uncertain. The future is not yet clear. But it is


certainly possible that AI will be able to solve many of the problems
that we face today.

The Future of Artificial Intelligence


...
(1) The Future Of Artificial Life. (2) Artificial intelligence will
become a reality. It will not be a "new" technology. Rather, it will
evolve into a new kind of technology that will enable us to live in a
world where we can live with dignity and respect for the human being.
We will live a life of dignity, respect, and dignity for ourselves and
for others. This is the future. And it's not just the people who will
have to suffer. There will also be people in the world who are not
human. They will suffer because they are human, because of their own
choices, or because their lives are in danger. These are the
consequences of our choices. In the end, we will all be human beings.
If

#GEN AI project
from transformers import GPT2LMHeadModel, GPT2Tokenizer

# Load pre-trained GPT-2 model and tokenizer


model_name = 'gpt2' # Can change to 'gpt2-medium' or larger models
for more powerful results
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)

# Set the model to evaluation mode


model.eval()

/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/
_token.py:89: UserWarning:
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your
settings tab (https://huggingface.co/settings/tokens), set it as
secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to
access public models or datasets.
warnings.warn(

{"model_id":"3d54c24b14374c419fd831c3e154bae6","version_major":2,"vers
ion_minor":0}

{"model_id":"8bbfdb23ab944fc2a344c59a69870904","version_major":2,"vers
ion_minor":0}

{"model_id":"cf457aced37e48b9a61326a1af342e0b","version_major":2,"vers
ion_minor":0}

{"model_id":"0d1f21709422421f993e4420f12e07d2","version_major":2,"vers
ion_minor":0}

{"model_id":"b460ce814baa4806a77b89ac2d15e7b7","version_major":2,"vers
ion_minor":0}

{"model_id":"547d3c73d40c4d3c90cdd7640d2d870b","version_major":2,"vers
ion_minor":0}

{"model_id":"529130c46bfe4c738f101dc143acfe5f","version_major":2,"vers
ion_minor":0}

/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(

GPT2LMHeadModel(
(transformer): GPT2Model(
(wte): Embedding(50257, 768)
(wpe): Embedding(1024, 768)
(drop): Dropout(p=0.1, inplace=False)
(h): ModuleList(
(0-11): 12 x GPT2Block(
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(attn): GPT2SdpaAttention(
(c_attn): Conv1D()
(c_proj): Conv1D()
(attn_dropout): Dropout(p=0.1, inplace=False)
(resid_dropout): Dropout(p=0.1, inplace=False)
)
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(mlp): GPT2MLP(
(c_fc): Conv1D()
(c_proj): Conv1D()
(act): NewGELUActivation()
(dropout): Dropout(p=0.1, inplace=False)
)
)
)
(ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
)
(lm_head): Linear(in_features=768, out_features=50257, bias=False)
)

def create_story_prompt(character_name, traits, setting):


prompt = f"Create a story about a character named
{character_name}. "
prompt += f"{character_name} is {traits}. "
prompt += f"The story is set in {setting}. "
prompt += "Here's how the story begins: "
return prompt

import torch

def generate_story(prompt, max_length=200, temperature=0.7):


inputs = tokenizer.encode(prompt, return_tensors='pt')

# Generate text
with torch.no_grad():
output = model.generate(
inputs,
max_length=max_length,
temperature=temperature,
top_p=0.9,
top_k=50,
num_return_sequences=1
)

# Decode the output


story = tokenizer.decode(output[0], skip_special_tokens=True)
return story

# Input: Character details


character_name = input("Enter character's name: ")
traits = input("Describe your character (e.g., brave, shy, clever): ")
setting = input("Where is the story set? (e.g., forest, city, fantasy
world): ")

# Create prompt
prompt = create_story_prompt(character_name, traits, setting)

# Generate the story


story = generate_story(prompt, max_length=200, temperature=0.7)
print("\nGenerated Story:\n")
print(story)

Enter character's name: Prajwal


Describe your character (e.g., brave, shy, clever): shy
Where is the story set? (e.g., forest, city, fantasy world): forest

/usr/local/lib/python3.10/dist-packages/transformers/generation/
configuration_utils.py:567: UserWarning: `do_sample` is set to
`False`. However, `temperature` is set to `0.7` -- this flag is only
used in sample-based generation modes. You should set `do_sample=True`
or unset `temperature`.
warnings.warn(
/usr/local/lib/python3.10/dist-packages/transformers/generation/config
uration_utils.py:572: UserWarning: `do_sample` is set to `False`.
However, `top_p` is set to `0.9` -- this flag is only used in sample-
based generation modes. You should set `do_sample=True` or unset
`top_p`.
warnings.warn(
The attention mask and the pad token id were not set. As a
consequence, you may observe unexpected behavior. Please pass your
input's `attention_mask` to obtain reliable results.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.
The attention mask is not set and cannot be inferred from input
because pad token is same as eos token. As a consequence, you may
observe unexpected behavior. Please pass your input's `attention_mask`
to obtain reliable results.

Generated Story:

Create a story about a character named Prajwal. Prajwal is shy. The


story is set in forest. Here's how the story begins: A young boy
named Prajwal is a young boy. He is a boy who is a boy. He is a boy
who is a boy. Prajwal is a boy who is a boy. Prajwal is a boy who is a
boy. Prajwal is a boy who is a boy. Prajwal is a boy who is a boy.
Prajwal is a boy who is a boy. Prajwal is a boy who is a boy. Prajwal
is a boy who is a boy. Prajwal is a boy who is a boy. Prajwal is a boy
who is a boy. Prajwal is a boy who is a boy. Prajwal is a boy who is a
boy. Prajwal is a boy who is a boy. Prajwal is a

#use hugging face

from transformers import pipeline


pipe = pipeline("question-answering", model="deepset/roberta-base-
squad2")

{"model_id":"1e13c0c9bef64362a6ca97a030ef9b0b","version_major":2,"vers
ion_minor":0}

{"model_id":"92516f2e2c824bce926801de5ec8bbf3","version_major":2,"vers
ion_minor":0}

{"model_id":"b43b43791365448381b84434c1cf9408","version_major":2,"vers
ion_minor":0}

{"model_id":"cadf58dd9517476d8911a42e17bdbece","version_major":2,"vers
ion_minor":0}

{"model_id":"073965278521493a94df8753c7dc4123","version_major":2,"vers
ion_minor":0}

{"model_id":"1892996bb7074026b8de5386b441a348","version_major":2,"vers
ion_minor":0}

Hardware accelerator e.g. GPU is available in the environment, but no


`device` argument is passed to the `Pipeline` object. Model will be on
CPU.

from transformers import AutoTokenizer, AutoModelForQuestionAnswering

tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-
squad2")
model =
AutoModelForQuestionAnswering.from_pretrained("deepset/roberta-base-
squad2")

from transformers import pipeline

# Load a pre-trained question-answering pipeline


pipe = pipeline("question-answering", model="deepset/roberta-base-
squad2")

# Define the context (the text from which the model will answer
questions)
context = """
Hugging Face is a technology company based in New York and Paris.
It is known for creating tools that democratize artificial
intelligence by making
models and datasets accessible to developers worldwide.
"""

# Ask a question based on the context


result = pipe(question="Where is Hugging Face based?",
context=context)
# Print the result
print(result)

Hardware accelerator e.g. GPU is available in the environment, but no


`device` argument is passed to the `Pipeline` object. Model will be on
CPU.

{'score': 0.9414467811584473, 'start': 47, 'end': 65, 'answer': 'New


York and Paris'}

# normal steps in ML programming


#Reading the things
#read_cvs()
#divide the thing
#3:7 or 8:2
import pandas as pd
import numpy as np

df = pd.read_csv("employee_salary.csv")
df.head()

{"summary":"{\n \"name\": \"df\",\n \"rows\": 100,\n \"fields\": [\


n {\n \"column\": \"YearsExperience\",\n \"properties\":
{\n \"dtype\": \"number\",\n \"std\": 5,\n
\"min\": 1,\n \"max\": 19,\n \"num_unique_values\": 19,\
n \"samples\": [\n 13,\n 10,\n 18\n
],\n \"semantic_type\": \"\",\n \"description\": \"\"\n
}\n },\n {\n \"column\": \"Salary\",\n \"properties\":
{\n \"dtype\": \"number\",\n \"std\":
11826.176470961964,\n \"min\": 26646.236892447287,\n
\"max\": 77915.72387431971,\n \"num_unique_values\": 100,\n
\"samples\": [\n 34196.958506322684,\n
54530.223291376926,\n 59361.473785062175\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\
n }\n ]\n}","type":"dataframe","variable_name":"df"}

x = df['YearsExperience'].values

y = df['Salary']

----------------------------------------------------------------------
-----
ValueError Traceback (most recent call
last)
<ipython-input-17-56a6a2b0b003> in <cell line: 2>()
1 x = df['YearsExperience'].values
----> 2 x = x.reshape(30, 1)
3 y = df['Salary']
4
5 y

ValueError: cannot reshape array of size 100 into shape (30,1)

x = x.values.reshape(-1, 1)
y = y.values.reshape(-1, 1)

----------------------------------------------------------------------
-----
AttributeError Traceback (most recent call
last)
<ipython-input-12-be81df8663df> in <cell line: 1>()
----> 1 x = x.values.reshape(-1, 1)
2 y = y.values.reshape(-1, 1)

AttributeError: 'numpy.ndarray' object has no attribute 'values'

y_pre = model.predict([14])

# Use a pipeline as a high-level helper


from transformers import pipeline

pipe = pipeline("question-answering", model="deepset/roberta-base-


squad2")

{"model_id":"262cf4a731ce4d0ca173a518db0bc5f0","version_major":2,"vers
ion_minor":0}

{"model_id":"33b9a2375d3f4a3ca9e7e291370e7f61","version_major":2,"vers
ion_minor":0}

{"model_id":"542e62df216c4abba4e1791bb73fc753","version_major":2,"vers
ion_minor":0}

{"model_id":"5caabc08145e4952b7d3228eb4b5f130","version_major":2,"vers
ion_minor":0}

{"model_id":"cfaf796834264a04beeba567b7e132ee","version_major":2,"vers
ion_minor":0}

{"model_id":"f5ae44d932a54390be768f15963eeb92","version_major":2,"vers
ion_minor":0}
/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
Hardware accelerator e.g. GPU is available in the environment, but no
`device` argument is passed to the `Pipeline` object. Model will be on
CPU.

# Load model directly


from transformers import AutoTokenizer, AutoModelForQuestionAnswering

tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-
squad2")
model =
AutoModelForQuestionAnswering.from_pretrained("deepset/roberta-base-
squad2")

You might also like