Document
Document
df = pd.read_csv('employee_salary.csv')
df.head()
d = {
'a':1,
'b':2
}
d.update({'c':1})
for k, v in d.items():
print(k, v)
a 1
b 2
c 1
c
b
2
a
d ={}
while True:
z = int(input("1-add, 2- search"))
if z == 1:
n = input("name :")
c = int(input("contact :"))
d.update({n:c})
else:
n = input("name: ")
for k,v in d.items():
if k == n:
print(v)
1-add, 2- search1
name :subhash
contact :1234567
1-add, 2- search1
name :xyz
contact :0987654
1-add, 2- search2
name: subhash
1234567
----------------------------------------------------------------------
-----
KeyboardInterrupt Traceback (most recent call
last)
<ipython-input-1-6e6121975a20> in <cell line: 3>()
3 while True:
4
----> 5 z = int(input("1-add, 2- search"))
6 if z == 1:
7 n = input("name :")
/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py in
raw_input(self, prompt)
849 "raw_input was called, but this frontend does
not support input requests."
850 )
--> 851 return self._input_request(str(prompt),
852 self._parent_ident,
853 self._parent_header,
/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py in
_input_request(self, prompt, ident, parent, password)
893 except KeyboardInterrupt:
894 # re-raise KeyboardInterrupt, to truncate
traceback
--> 895 raise KeyboardInterrupt("Interrupted by user")
from None
896 except Exception as e:
897 self.log.warning("Invalid Message:",
exc_info=True)
Python Basics
# Integer
x = 10
print(type(x)) # <class 'int'>
# Float
y = 3.14
print(type(y)) # <class 'float'>
# String
z = "Hello"
print(type(z)) # <class 'str'>
# Boolean
is_valid = True
print(type(is_valid)) # <class 'bool'>
<class 'int'>
<class 'float'>
<class 'str'>
<class 'bool'>
# Conditional Statements
x = 10
if x > 5:
print("x is greater than 5")
elif x == 5:
print("x is equal to 5")
else:
print("x is less than 5")
# For Loop
for i in range(5):
print(i)
# While Loop
i = 0
while i < 5:
print(i)
i += 1
x is greater than 5
0
1
2
3
4
0
1
2
3
4
# Function Definition
def greet(name):
return f"Hello, {name}!"
print(greet("Alice"))
Hello, Alice!
5
15
my_list = [1, 2, 3, 4]
my_list.append(5)
print(my_list) # [1, 2, 3, 4, 5]
my_list[0] = 10
print(my_list) # [10, 2, 3, 4, 5]
[1, 2, 3, 4, 5]
[10, 2, 3, 4, 5]
my_tuple = (1, 2, 3, 4)
# my_tuple[0] = 10 # Will raise an error
print(my_tuple)
(1, 2, 3, 4)
my_dict['age'] = 26
print(my_dict) # {'name': 'Alice', 'age': 26}
Alice
{'name': 'Alice', 'age': 26}
my_set = {1, 2, 3, 3, 4}
print(my_set) # {1, 2, 3, 4} (duplicates removed)
{1, 2, 3, 4}
Machine Learning
import numpy as np
# Array operations
print("Array * 2:", array * 2)
print("Mean of array:", np.mean(array))
print("Reshaped array:", array.reshape(5, 1))
Array: [1 2 3 4 5]
Array * 2: [ 2 4 6 8 10]
Mean of array: 3.0
Reshaped array: [[1]
[2]
[3]
[4]
[5]]
import pandas as pd
# Creating a DataFrame
data = {'Name': ['John', 'Anna', 'Peter', 'Linda'],
'Age': [28, 24, 35, 32],
'City': ['New York', 'Paris', 'Berlin', 'London']}
df = pd.DataFrame(data)
# Basic Operations
print("Mean Age:", df['Age'].mean())
print("Data from London:\n", df[df['City'] == 'London'])
# Creating a dataset
X, y = make_regression(n_samples=100, n_features=1, noise=10)
import numpy as np
# Initialize data
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input (XOR problem)
y = np.array([[0], [1], [1], [0]]) # Target output (XOR
labels)
# Random weights for input to hidden layer and hidden to output layer
W1 = np.random.randn(input_size, hidden_size)
W2 = np.random.randn(hidden_size, output_size)
# Learning rate
lr = 0.1
epochs = 10000
# Backpropagation
d_predicted_output = error * sigmoid_derivative(predicted_output)
# Derivative at output
# Final predictions
print("\nFinal predictions after training:")
print(predicted_output)
import numpy as np
# Random weights for input to hidden layer and hidden to output layer
W1 = np.random.randn(input_size, hidden_size)
W2 = np.random.randn(hidden_size, output_size)
# Learning rate
lr = 0.1
epochs = 10000
# Backpropagation
d_predicted_output = error * sigmoid_derivative(predicted_output)
# Derivative at output
# Final predictions
print("\nFinal predictions after training (probabilities):")
print(predicted_output)
# Convert to DataFrame
df = pd.DataFrame(data)
# Print results
print("Predicted values:", y_pred)
print("Actual values:", y_test.values)
print("Root Mean Squared Error:", rmse)
/usr/local/lib/python3.10/dist-packages/sklearn/base.py:493:
UserWarning: X does not have valid feature names, but LinearRegression
was fitted with feature names
warnings.warn(
#calc
def add(a,b):
return a+b
def sub(a,b):
return a-b
def mul(a,b):
return a*b
def div(a,b):
return a/b
#calctr
def add(a,b):
return a+b
def add(a,b):
return (a+b,a-b)
a = 1
b = 2
x,y = add(a,b)
l = [1,2,3]
d = {'a':1, 'b':2}
d.update({'c':3})
d
s = set()
s.add()
s.remove(1)
s1 ={}
s2 = {}
s = s1.union(s2)
S = {}
s = {}
s.issubset(S)
l = []
l = [[12,3], [4,5],(1,3)]
#sentiment analys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
import nltk
from nltk.corpus import stopwords
import re
# Download stopwords
nltk.download('stopwords')
True
# Sample dataset
data = {'Review': ['I love this product!', 'This is terrible, I hate
it', 'Not bad, but could be better',
'Absolutely wonderful!', 'Worst purchase ever', 'I
am so happy with this item',
'Very disappointing', 'The product is okay, nothing
special', 'I enjoy using this daily',
'It broke after one use. Horrible!'],
'Sentiment': ['positive', 'negative', 'neutral', 'positive',
'negative', 'positive',
'negative', 'neutral', 'positive', 'negative']}
df = pd.DataFrame(data)
Review Sentiment
0 I love this product! positive
1 This is terrible, I hate it negative
2 Not bad, but could be better neutral
3 Absolutely wonderful! positive
4 Worst purchase ever negative
Accuracy: 0.5
if prediction == 1:
return "Positive"
elif prediction == 0:
return "Negative"
else:
return "Neutral"
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
# Normalize the images to the range [-1, 1] for better GAN performance
train_images = (train_images - 127.5) / 127.5
train_images = np.expand_dims(train_images, axis=-1)
BUFFER_SIZE = 60000
BATCH_SIZE = 256
def build_generator():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False,
input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1),
padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
def build_discriminator():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2),
padding='same', input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
gradients_of_generator = gen_tape.gradient(gen_loss,
generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss,
discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator,
generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator
, discriminator.trainable_variables))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5,
cmap='gray')
plt.axis('off')
plt.savefig(f'image_at_epoch_{epoch:04d}.png')
plt.show()
generator = build_generator()
discriminator = build_discriminator()
train(train_dataset, EPOCHS)
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
/usr/local/lib/python3.10/dist-packages/keras/src/layers/convolutional
/base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
----------------------------------------------------------------------
-----
KeyboardInterrupt Traceback (most recent call
last)
<ipython-input-7-831272d6fd65> in <cell line: 4>()
2 discriminator = build_discriminator()
3
----> 4 train(train_dataset, EPOCHS)
/usr/local/lib/python3.10/dist-packages/tensorflow/python/util/traceba
ck_utils.py in error_handler(*args, **kwargs)
148 filtered_tb = None
149 try:
--> 150 return fn(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/polymorphic_function.py in __call__(self, *args,
**kwds)
831
832 with OptionalXlaContext(self._jit_compile):
--> 833 result = self._call(*args, **kwds)
834
835 new_tracing_count =
self.experimental_get_tracing_count()
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/polymorphic_function.py in _call(self, *args, **kwds)
867 # In this case we have created variables on the first
call, so we run the
868 # defunned version which is guaranteed to never create
variables.
--> 869 return tracing_compilation.call_function(
870 args, kwds, self._no_variable_creation_config
871 )
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/tracing_compilation.py in call_function(args, kwargs,
tracing_options)
137 bound_args = function.function_type.bind(*args, **kwargs)
138 flat_inputs =
function.function_type.unpack_inputs(bound_args)
--> 139 return function._call_flat( # pylint: disable=protected-
access
140 flat_inputs, captured_inputs=function.captured_inputs
141 )
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/concrete_function.py in _call_flat(self, tensor_inputs,
captured_inputs)
1320 and executing_eagerly):
1321 # No tape is watching; skip to running the function.
-> 1322 return self._inference_function.call_preflattened(args)
1323 forward_backward =
self._select_forward_and_backward_functions(
1324 args,
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/atomic_function.py in call_preflattened(self, args)
214 def call_preflattened(self, args: Sequence[core.Tensor]) ->
Any:
215 """Calls with flattened tensor inputs and returns the
structured output."""
--> 216 flat_outputs = self.call_flat(*args)
217 return self.function_type.pack_output(flat_outputs)
218
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymo
rphic_function/atomic_function.py in call_flat(self, *args)
249 with record.stop_recording():
250 if self._bound_context.executing_eagerly():
--> 251 outputs = self._bound_context.call_function(
252 self.name,
253 list(args),
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/contex
t.py in call_function(self, name, tensor_inputs, num_outputs)
1550 cancellation_context = cancellation.context()
1551 if cancellation_context is None:
-> 1552 outputs = execute.execute(
1553 name.decode("utf-8"),
1554 num_outputs=num_outputs,
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/execut
e.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
51 try:
52 ctx.ensure_initialized()
---> 53 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle,
device_name, op_name,
54 inputs, attrs,
num_outputs)
55 except core._NotOkStatusException as e:
KeyboardInterrupt:
import tensorflow as tf
from tensorflow.keras import layers, models
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ dense (Dense) │ (None, 64) │
50,240 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_1 (Dense) │ (None, 64) │
4,160 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_2 (Dense) │ (None, 10) │
650 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28,
1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
/usr/local/lib/python3.10/dist-packages/keras/src/layers/
convolutional/base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ conv2d (Conv2D) │ (None, 26, 26, 32) │
320 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d (MaxPooling2D) │ (None, 13, 13, 32) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ conv2d_1 (Conv2D) │ (None, 11, 11, 64) │
18,496 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d_1 (MaxPooling2D) │ (None, 5, 5, 64) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ conv2d_2 (Conv2D) │ (None, 3, 3, 64) │
36,928 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ flatten (Flatten) │ (None, 576) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_3 (Dense) │ (None, 64) │
36,928 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_4 (Dense) │ (None, 10) │
650 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
model = models.Sequential([
layers.LSTM(128, input_shape=(100, 1), return_sequences=True),
layers.LSTM(128),
layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
/usr/local/lib/python3.10/dist-packages/keras/src/layers/rnn/
rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(**kwargs)
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ lstm (LSTM) │ (None, 100, 128) │
66,560 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ lstm_1 (LSTM) │ (None, 128) │
131,584 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_5 (Dense) │ (None, 1) │
129 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
print(outputs)
/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/
_token.py:89: UserWarning:
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your
settings tab (https://huggingface.co/settings/tokens), set it as
secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to
access public models or datasets.
warnings.warn(
{"model_id":"28cdd872b10846f6b93ff29399b1712f","version_major":2,"vers
ion_minor":0}
{"model_id":"012a7bbf33de4c848a7d3a4b935ec370","version_major":2,"vers
ion_minor":0}
{"model_id":"7d1f0c3f0f0d4b1f9dde82ed478e5ce3","version_major":2,"vers
ion_minor":0}
{"model_id":"1be4b838a4f640b984dbaa1918a636fb","version_major":2,"vers
ion_minor":0}
/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
{"model_id":"511383744f1b4383b48438b60e7dbfe3","version_major":2,"vers
ion_minor":0}
BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=tensor(
[[[ 0.1215, 0.1279, -0.1553, ..., -0.2384, 0.1134, 0.4655],
[-0.1434, 0.0242, -0.1768, ..., -0.2693, 0.3880, 0.5866],
[-0.5255, 0.2367, -0.1766, ..., -1.0957, 0.0183, 0.1786],
...,
[ 0.1088, 0.4796, 0.3701, ..., -0.3182, 0.5285, 0.5071],
[ 0.1523, -0.1217, -0.3568, ..., 0.7916, 0.4255, -0.3329],
[ 0.7277, 0.1579, -0.2494, ..., 0.0396, -0.7772, -
0.0927]]],
grad_fn=<NativeLayerNormBackward0>), pooler_output=tensor([[-
0.9058, -0.4572, -0.7117, 0.6969, 0.4305, -0.2093, 0.8993, 0.3831,
-0.6459, -1.0000, -0.2695, 0.8195, 0.9797, 0.3692,
0.9434, -0.6985,
-0.2993, -0.6567, 0.3702, -0.5629, 0.7226, 0.9998,
0.2187, 0.4423,
0.5051, 0.9367, -0.7745, 0.9424, 0.9514, 0.7407, -
0.7502, 0.3359,
-0.9872, -0.2924, -0.6936, -0.9885, 0.5133, -0.7162, -
0.0136, -0.0736,
-0.9015, 0.3011, 0.9998, -0.4140, 0.3361, -0.4876, -
1.0000, 0.2668,
-0.8623, 0.7405, 0.6754, 0.5029, 0.2391, 0.5518,
0.4931, -0.1784,
-0.0949, 0.1626, -0.2628, -0.6336, -0.7151, 0.4300, -
0.7019, -0.8930,
0.7147, 0.5396, -0.1597, -0.3587, -0.1770, 0.1036,
0.9017, 0.2654,
-0.2601, -0.8739, 0.5052, 0.3261, -0.6135, 1.0000, -
0.4879, -0.9733,
0.7039, 0.6281, 0.5523, -0.1647, 0.3469, -1.0000,
0.5162, -0.1543,
-0.9886, 0.3672, 0.5822, -0.3116, 0.5760, 0.6323, -
0.5110, -0.4911,
-0.3259, -0.5796, -0.2790, -0.3309, 0.0705, -0.3011, -
0.3468, -0.4481,
0.3401, -0.5502, -0.4754, 0.5831, 0.1009, 0.7237,
0.4383, -0.3739,
0.4780, -0.9536, 0.6735, -0.3733, -0.9873, -0.5873, -
0.9901, 0.5880,
-0.2367, -0.3225, 0.9540, 0.0118, 0.4931, -0.1739, -
0.6838, -1.0000,
-0.5581, -0.4884, -0.2551, -0.3507, -0.9732, -0.9585,
0.6793, 0.9541,
0.3219, 0.9996, -0.3519, 0.9220, -0.2081, -0.4622,
0.3290, -0.5109,
0.7324, 0.2226, -0.5763, 0.2563, -0.1796, 0.0645, -
0.6216, -0.3003,
-0.6366, -0.9398, -0.4497, 0.9419, -0.4049, -0.7677,
0.1756, -0.2687,
-0.4851, 0.8363, 0.6967, 0.4466, -0.3283, 0.4693,
0.1779, 0.5369,
-0.8549, 0.0317, 0.4702, -0.3974, -0.6004, -0.9757, -
0.3916, 0.5150,
0.9866, 0.7442, 0.3114, 0.6679, -0.2815, 0.5302, -
0.9439, 0.9769,
-0.2530, 0.3455, -0.2536, 0.4924, -0.8406, -0.0043,
0.8301, -0.4812,
-0.8194, -0.0391, -0.5256, -0.5028, -0.6408, 0.4403, -
0.3583, -0.4404,
-0.1672, 0.9300, 0.9682, 0.7945, 0.0467, 0.6366, -
0.9050, -0.5554,
0.2293, 0.2937, 0.1416, 0.9939, -0.4999, -0.2369, -
0.9308, -0.9841,
0.0392, -0.8715, -0.1538, -0.7557, 0.6220, -0.0038,
0.3449, 0.4316,
-0.9738, -0.7905, 0.4445, -0.4246, 0.5210, -0.3600,
0.7822, 0.7918,
-0.6814, 0.7464, 0.9092, -0.7347, -0.7638, 0.8045, -
0.4142, 0.8614,
-0.6606, 0.9776, 0.6753, 0.6940, -0.9088, -0.5557, -
0.8867, -0.4908,
-0.1301, 0.0215, 0.8044, 0.6641, 0.4835, 0.4822, -
0.5475, 0.9959,
-0.8882, -0.9449, -0.3189, -0.3225, -0.9851, 0.7515,
0.4108, 0.0474,
-0.5075, -0.7080, -0.9543, 0.8535, 0.1567, 0.9850, -
0.2869, -0.8921,
-0.5774, -0.9243, -0.1150, -0.2775, -0.2001, -0.0619, -
0.9636, 0.5623,
0.5841, 0.5763, -0.6142, 0.9982, 1.0000, 0.9720,
0.8853, 0.9046,
-0.9991, -0.5057, 1.0000, -0.9627, -1.0000, -0.9275, -
0.5815, 0.4179,
-1.0000, -0.2886, -0.1206, -0.9089, 0.4905, 0.9714,
0.9887, -1.0000,
0.7891, 0.9497, -0.6444, 0.8770, -0.3496, 0.9708,
0.6418, 0.5462,
-0.3294, 0.4854, -0.8583, -0.8613, -0.3572, -0.5550,
0.9904, 0.2320,
-0.7590, -0.9007, 0.3481, -0.2951, -0.2143, -0.9682, -
0.2264, 0.1771,
0.7525, 0.1591, 0.3587, -0.7061, 0.3356, -0.1946,
0.4647, 0.6896,
-0.9387, -0.6469, -0.0825, -0.2081, -0.5290, -0.9612,
0.9598, -0.4344,
0.3573, 1.0000, 0.1070, -0.8543, 0.5679, 0.3092, -
0.4859, 1.0000,
0.8121, -0.9739, -0.5910, 0.5766, -0.6081, -0.5920,
0.9985, -0.3401,
-0.4901, -0.0913, 0.9693, -0.9860, 0.9823, -0.9025, -
0.9648, 0.9595,
0.9419, -0.5277, -0.5184, 0.1786, -0.6148, 0.3716, -
0.9515, 0.6426,
0.4893, -0.1172, 0.8823, -0.8033, -0.5945, 0.4025, -
0.4737, 0.0427,
0.7717, 0.5434, -0.2714, 0.1274, -0.3824, -0.4543, -
0.9742, 0.3513,
1.0000, -0.2638, 0.5045, -0.4859, -0.1327, -0.1108,
0.5930, 0.6409,
-0.3316, -0.8332, 0.6133, -0.9519, -0.9839, 0.7259,
0.2788, -0.3892,
1.0000, 0.3536, 0.2347, 0.2418, 0.9272, 0.0554,
0.6012, 0.7287,
0.9767, -0.3521, 0.5785, 0.8195, -0.7369, -0.3601, -
0.6726, 0.1256,
-0.9235, 0.0263, -0.9418, 0.9669, 0.6302, 0.4231,
0.3173, 0.3953,
1.0000, -0.6745, 0.6315, -0.2615, 0.7843, -0.9988, -
0.8163, -0.4595,
-0.1289, -0.6436, -0.4184, 0.3394, -0.9650, 0.6185,
0.2661, -0.9828,
-0.9882, 0.0076, 0.7880, 0.1068, -0.9486, -0.7272, -
0.5918, 0.3846,
-0.3037, -0.9322, 0.0477, -0.3533, 0.5225, -0.3488,
0.5743, 0.6411,
0.7107, -0.3430, -0.1751, -0.1363, -0.7699, 0.8267, -
0.7987, -0.7414,
-0.2716, 1.0000, -0.5494, 0.7833, 0.7544, 0.6619, -
0.2157, 0.2679,
0.8124, 0.3571, -0.6267, -0.6102, -0.5773, -0.4588,
0.6147, 0.3626,
0.4948, 0.7793, 0.7432, 0.2275, -0.1053, 0.0662,
0.9992, -0.1274,
-0.2473, -0.5522, -0.0796, -0.4123, -0.3082, 1.0000,
0.4006, 0.3709,
-0.9859, -0.5532, -0.9128, 1.0000, 0.8517, -0.7146,
0.6145, 0.5404,
-0.1740, 0.7857, -0.1946, -0.3820, 0.3750, 0.2469,
0.9550, -0.6274,
-0.9630, -0.6039, 0.4835, -0.9646, 0.9992, -0.5814, -
0.3417, -0.4322,
-0.0050, 0.2776, -0.0343, -0.9794, -0.2424, 0.2312,
0.9478, 0.3163,
-0.5759, -0.8941, 0.4700, 0.5235, -0.7190, -0.9096,
0.9655, -0.9816,
0.6593, 1.0000, 0.4221, -0.3072, 0.2710, -0.4932,
0.4409, -0.2550,
0.6636, -0.9537, -0.5166, -0.2841, 0.2909, -0.2505, -
0.0816, 0.6080,
0.2937, -0.5435, -0.6205, -0.1174, 0.4856, 0.8549, -
0.3460, -0.1994,
0.1777, -0.2268, -0.9230, -0.3390, -0.4143, -0.9999,
0.6923, -1.0000,
0.0999, -0.0647, -0.2707, 0.8169, 0.5821, 0.5015, -
0.7599, -0.7151,
0.5485, 0.6703, -0.2525, -0.0385, -0.6443, 0.3655, -
0.2030, 0.3334,
-0.3805, 0.8068, -0.3440, 1.0000, 0.2795, -0.6265, -
0.9676, 0.3235,
-0.3020, 1.0000, -0.8739, -0.9482, 0.4658, -0.6999, -
0.8394, 0.3409,
0.0491, -0.6863, -0.7933, 0.9441, 0.8941, -0.5768,
0.5212, -0.4330,
-0.5228, 0.0199, 0.6422, 0.9860, 0.4593, 0.8839,
0.1233, -0.2100,
0.9657, 0.2701, 0.4862, 0.2180, 1.0000, 0.4198, -
0.9134, 0.2200,
-0.9773, -0.2501, -0.9550, 0.3540, 0.3361, 0.9130, -
0.3822, 0.9526,
-0.3916, 0.0897, -0.3437, -0.0984, 0.4560, -0.9246, -
0.9821, -0.9792,
0.5692, -0.4398, -0.1484, 0.3126, 0.1003, 0.4359,
0.5249, -1.0000,
0.9226, 0.4523, 0.7460, 0.9562, 0.7393, 0.5150,
0.3177, -0.9833,
-0.9682, -0.4583, -0.2691, 0.7171, 0.6962, 0.8897,
0.4221, -0.5915,
-0.4718, -0.4593, -0.7946, -0.9915, 0.4757, -0.3642, -
0.9476, 0.9591,
-0.1637, -0.2214, 0.1289, -0.7161, 0.9311, 0.6817,
0.3032, 0.1904,
0.4720, 0.8657, 0.9383, 0.9784, -0.6469, 0.7740, -
0.4382, 0.5053,
0.7871, -0.9414, 0.1293, 0.4383, -0.2647, 0.3275, -
0.2636, -0.9596,
0.5181, -0.2378, 0.5664, -0.4090, -0.0334, -0.5110, -
0.2726, -0.6247,
-0.6144, 0.6554, 0.3491, 0.8986, 0.7631, -0.1505, -
0.6016, -0.1458,
-0.5594, -0.9147, 0.9243, -0.1467, -0.2104, 0.4937,
0.0167, 0.8446,
-0.0806, -0.4162, -0.4450, -0.7513, 0.8658, -0.4507, -
0.6339, -0.6069,
0.6975, 0.3748, 0.9999, -0.6382, -0.7740, -0.4288, -
0.4536, 0.4428,
-0.3692, -1.0000, 0.4550, -0.3561, 0.5644, -0.3591,
0.5700, -0.4135,
-0.9800, -0.3350, 0.5721, 0.6838, -0.6093, -0.3125,
0.5816, 0.1876,
0.9232, 0.8434, -0.1788, 0.1416, 0.6532, -0.6099, -
0.7171, 0.9117]],
grad_fn=<TanhBackward0>), hidden_states=None,
past_key_values=None, attentions=None, cross_attentions=None)
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.fc = nn.Sequential(
nn.Linear(100, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 784),
nn.Tanh()
)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.fc = nn.Sequential(
nn.Linear(784, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Linear(256, 1),
nn.Sigmoid()
)
generator = Generator()
discriminator = Discriminator()
import numpy as np
# Initialize parameters
Q = np.zeros((5, 5)) # Example state-action table
alpha = 0.1
gamma = 0.9
epsilon = 0.1
# Sample usage
update_q_table(0, 1, 10, 2) # Sample state-action-reward-next_state
{"model_id":"82b832892ddd4e44a30437c44462b66f","version_major":2,"vers
ion_minor":0}
{"model_id":"a4c70858e7264fbcbfaf1a6a99d9fe9f","version_major":2,"vers
ion_minor":0}
{"model_id":"9589324be965400cbcca79f128d79130","version_major":2,"vers
ion_minor":0}
{"model_id":"0ec0297a7433454eb9fbe499726a7b5f","version_major":2,"vers
ion_minor":0}
/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
Hardware accelerator e.g. GPU is available in the environment, but no
`device` argument is passed to the `Pipeline` object. Model will be on
CPU.
Confusion Matrix:
[[4 0]
[2 2]]
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
data = request.json
prediction = model.predict([data['input']])
return jsonify({'prediction': prediction.tolist()})
if __name__ == '__main__':
app.run(debug=True)
----------------------------------------------------------------------
-----
FileNotFoundError Traceback (most recent call
last)
<ipython-input-10-9f42a7ddfac5> in <cell line: 7>()
5
6 # Load trained model
----> 7 model = joblib.load('model.pkl')
8
9 @app.route('/predict', methods=['POST'])
/usr/local/lib/python3.10/dist-packages/joblib/numpy_pickle.py in
load(filename, mmap_mode)
648 obj = _unpickle(fobj)
649 else:
--> 650 with open(filename, 'rb') as f:
651 with _read_fileobject(f, filename, mmap_mode) as
fobj:
652 if isinstance(fobj, str):
FileNotFoundError: [Errno 2] No such file or directory: 'model.pkl'
counts = result.get_counts(qc)
print(counts)
plot_histogram(counts)
Collecting qiskit-aer
Downloading qiskit_aer-0.15.1-cp310-cp310-
manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (8.0 kB)
Requirement already satisfied: qiskit>=1.1.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.2.4)
Requirement already satisfied: numpy>=1.16.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.26.4)
Requirement already satisfied: scipy>=1.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (1.13.1)
Requirement already satisfied: psutil>=5 in
/usr/local/lib/python3.10/dist-packages (from qiskit-aer) (5.9.5)
Requirement already satisfied: rustworkx>=0.15.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.15.1)
Requirement already satisfied: sympy>=1.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (1.13.3)
Requirement already satisfied: dill>=0.3 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.3.9)
Requirement already satisfied: python-dateutil>=2.8.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (2.8.2)
Requirement already satisfied: stevedore>=3.0.0 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (5.3.0)
Requirement already satisfied: typing-extensions in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (4.12.2)
Requirement already satisfied: symengine<0.14,>=0.11 in
/usr/local/lib/python3.10/dist-packages (from qiskit>=1.1.0->qiskit-
aer) (0.13.0)
Requirement already satisfied: six>=1.5 in
/usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.0-
>qiskit>=1.1.0->qiskit-aer) (1.16.0)
Requirement already satisfied: pbr>=2.0.0 in
/usr/local/lib/python3.10/dist-packages (from stevedore>=3.0.0-
>qiskit>=1.1.0->qiskit-aer) (6.1.0)
Requirement already satisfied: mpmath<1.4,>=1.1.0 in
/usr/local/lib/python3.10/dist-packages (from sympy>=1.3-
>qiskit>=1.1.0->qiskit-aer) (1.3.0)
Downloading qiskit_aer-0.15.1-cp310-cp310-
manylinux_2_17_x86_64.manylinux2014_x86_64.whl (12.3 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 12.3/12.3 MB 56.8 MB/s eta
0:00:00
# Required Libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import joblib # To save and load models
# Explore dataset
print(df.head())
/usr/local/lib/python3.10/dist-packages/sklearn/base.py:493:
UserWarning: X does not have valid feature names, but LinearRegression
was fitted with feature names
warnings.warn(
/usr/local/lib/python3.10/dist-packages/sklearn/base.py:493:
UserWarning: X does not have valid feature names, but LinearRegression
was fitted with feature names
warnings.warn(
#deep learning
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ dense (Dense) │ (None, 32) │
352 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_1 (Dense) │ (None, 64) │
2,112 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_2 (Dense) │ (None, 1) │
65 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
import torch
import torch.nn as nn
import torch.optim as optim
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
/usr/local/lib/python3.10/dist-packages/keras/src/layers/
convolutional/base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ conv2d (Conv2D) │ (None, 62, 62, 32) │
896 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d (MaxPooling2D) │ (None, 31, 31, 32) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ conv2d_1 (Conv2D) │ (None, 29, 29, 64) │
18,496 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ max_pooling2d_1 (MaxPooling2D) │ (None, 14, 14, 64) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ flatten (Flatten) │ (None, 12544) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_3 (Dense) │ (None, 128) │
1,605,760 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_4 (Dense) │ (None, 10) │
1,290 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
model.compile(optimizer='adam', loss='mse')
model.summary()
/usr/local/lib/python3.10/dist-packages/keras/src/layers/rnn/
rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(**kwargs)
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ lstm (LSTM) │ (None, 50) │
10,400 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_5 (Dense) │ (None, 1) │
51 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
# Autoencoder model
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
Model: "functional_3"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ input_layer_3 (InputLayer) │ (None, 784) │
0 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_6 (Dense) │ (None, 128) │
100,480 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense_7 (Dense) │ (None, 784) │
101,136 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
import torch.nn as nn
# Initialize models
generator = Generator()
discriminator = Discriminator()
# Neural network from scrth
import numpy as np
# Learning rate
learning_rate = 0.1
output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output) # Input to output layer
predicted_output = sigmoid(output_layer_input) # Activation
function
# Backpropagation
error = y - predicted_output # Error in output
if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
print(f'Epoch {epoch}, Loss: {loss}')
# Final prediction
print("\nFinal Prediction after training:")
print(predicted_output)
import numpy as np
# Learning rate
learning_rate = 0.01
# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)
# Backpropagation step
error = y - predicted_output # Error calculation
# Final prediction
print("\nPredicted Salary after training:")
print(predicted_output * 100) # Multiply by 100 to scale to salary in
thousands
import numpy as np
import pandas as pd
# Learning rate
learning_rate = 0.01
# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)
# Backpropagation step
error = y - predicted_output # Error calculation
# Final prediction
print("\nPredicted Salary after training (in thousands):")
print(predicted_output * 1000) # Multiply by 1000 to scale back to
actual salary
import numpy as np
import pandas as pd
# Learning rate
learning_rate = 0.01
# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)
# Backpropagation step
error = y - predicted_output # Error calculation
import numpy as np
import pandas as pd
# Learning rate
learning_rate = 0.01
# Training loop
for epoch in range(10000):
# Feedforward step
hidden_layer_input = np.dot(X, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_input = np.dot(hidden_layer_output,
weights_hidden_output)
predicted_output = sigmoid(output_layer_input)
# Backpropagation step
error = y - predicted_output # Error calculation
# Print the predicted salary for the new input (after scaling back)
print("\nPredicted Salary for 6 years of experience (in thousands):")
print(predicted_salary_new[0][0] * 1000) # Multiply by 1000 to scale
back to actual salary
#NLP
!pip install nltk spacy
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
#word embeding
import spacy
nlp = spacy.load("en_core_web_md")
Collecting en-core-web-md==3.7.1
Downloading
https://github.com/explosion/spacy-models/releases/download/en_core_we
b_md-3.7.1/en_core_web_md-3.7.1-py3-none-any.whl (42.8 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 42.8/42.8 MB 12.1 MB/s eta
0:00:00
ent already satisfied: spacy<3.8.0,>=3.7.2 in
/usr/local/lib/python3.10/dist-packages (from en-core-web-md==3.7.1)
(3.7.5)
Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.0.12)
Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.0.5)
Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.0.10)
Requirement already satisfied: cymem<2.1.0,>=2.0.2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.0.8)
Requirement already satisfied: preshed<3.1.0,>=3.0.2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.0.9)
Requirement already satisfied: thinc<8.3.0,>=8.2.2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (8.2.5)
Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.1.3)
Requirement already satisfied: srsly<3.0.0,>=2.4.3 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.4.8)
Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.0.10)
Requirement already satisfied: weasel<0.5.0,>=0.1.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (0.4.1)
Requirement already satisfied: typer<1.0.0,>=0.3.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (0.12.5)
Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (4.66.5)
Requirement already satisfied: requests<3.0.0,>=2.13.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.32.3)
Requirement already satisfied: pydantic!=1.8,!=1.8.1,<3.0.0,>=1.7.4 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (2.9.2)
Requirement already satisfied: jinja2 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.1.4)
Requirement already satisfied: setuptools in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (75.1.0)
Requirement already satisfied: packaging>=20.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (24.1)
Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (3.4.1)
Requirement already satisfied: numpy>=1.19.0 in
/usr/local/lib/python3.10/dist-packages (from spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.26.4)
Requirement already satisfied: language-data>=1.2 in
/usr/local/lib/python3.10/dist-packages (from langcodes<4.0.0,>=3.2.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (1.2.0)
Requirement already satisfied: annotated-types>=0.6.0 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(0.7.0)
Requirement already satisfied: pydantic-core==2.23.4 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(2.23.4)
Requirement already satisfied: typing-extensions>=4.6.1 in
/usr/local/lib/python3.10/dist-packages (from pydantic!=1.8,!
=1.8.1,<3.0.0,>=1.7.4->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(4.12.2)
Requirement already satisfied: charset-normalizer<4,>=2 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (3.4.0)
Requirement already satisfied: idna<4,>=2.5 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in
/usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.13.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (2024.8.30)
Requirement already satisfied: blis<0.8.0,>=0.7.8 in
/usr/local/lib/python3.10/dist-packages (from thinc<8.3.0,>=8.2.2-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (0.7.11)
Requirement already satisfied: confection<1.0.0,>=0.0.1 in
/usr/local/lib/python3.10/dist-packages (from thinc<8.3.0,>=8.2.2-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (0.1.5)
Requirement already satisfied: click>=8.0.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (8.1.7)
Requirement already satisfied: shellingham>=1.3.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (1.5.4)
Requirement already satisfied: rich>=10.11.0 in
/usr/local/lib/python3.10/dist-packages (from typer<1.0.0,>=0.3.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (13.9.2)
Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in
/usr/local/lib/python3.10/dist-packages (from weasel<0.5.0,>=0.1.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (0.19.0)
Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in
/usr/local/lib/python3.10/dist-packages (from weasel<0.5.0,>=0.1.0-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (7.0.5)
Requirement already satisfied: MarkupSafe>=2.0 in
/usr/local/lib/python3.10/dist-packages (from jinja2-
>spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1) (3.0.2)
Requirement already satisfied: marisa-trie>=0.7.7 in
/usr/local/lib/python3.10/dist-packages (from language-data>=1.2-
>langcodes<4.0.0,>=3.2.0->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(1.2.1)
Requirement already satisfied: markdown-it-py>=2.2.0 in
/usr/local/lib/python3.10/dist-packages (from rich>=10.11.0-
>typer<1.0.0,>=0.3.0->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in
/usr/local/lib/python3.10/dist-packages (from rich>=10.11.0-
>typer<1.0.0,>=0.3.0->spacy<3.8.0,>=3.7.2->en-core-web-md==3.7.1)
(2.18.0)
Requirement already satisfied: wrapt in
/usr/local/lib/python3.10/dist-packages (from smart-
open<8.0.0,>=5.2.1->weasel<0.5.0,>=0.1.0->spacy<3.8.0,>=3.7.2->en-
core-web-md==3.7.1) (1.16.0)
Requirement already satisfied: mdurl~=0.1 in
/usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0-
>rich>=10.11.0->typer<1.0.0,>=0.3.0->spacy<3.8.0,>=3.7.2->en-core-web-
md==3.7.1) (0.1.2)
Installing collected packages: en-core-web-md
Successfully installed en-core-web-md-3.7.1
✔ Download and installation successful
You can now load the package via spacy.load('en_core_web_md')
⚠ Restart to reload dependencies
If you are in a Jupyter or Colab notebook, you may need to restart
Python in
order to load all the package's dependencies. You can do this by
selecting the
'Restart kernel' or 'Restart runtime' option.
Apple ORG
U.K. GPE
$1 billion MONEY
#nlp in trasformers
from transformers import pipeline
/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/
_token.py:89: UserWarning:
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your
settings tab (https://huggingface.co/settings/tokens), set it as
secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to
access public models or datasets.
warnings.warn(
{"model_id":"6e8cd9bf68754077ad0f2d91be22cce9","version_major":2,"vers
ion_minor":0}
{"model_id":"7e63de39db8a46cfa36f293c671b67b3","version_major":2,"vers
ion_minor":0}
{"model_id":"2ab65f49a8654615a8c9160715388afb","version_major":2,"vers
ion_minor":0}
{"model_id":"9ee3a9316e194286862912e1ba0827a5","version_major":2,"vers
ion_minor":0}
{"model_id":"921395cc87664c36a7c75ff61b0dadef","version_major":2,"vers
ion_minor":0}
{"model_id":"ecaba4e3edd44b2a9e145c5c59652baa","version_major":2,"vers
ion_minor":0}
{"model_id":"23988d2fb7c34cf99d7c8638c1b12c6b","version_major":2,"vers
ion_minor":0}
/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
Hardware accelerator e.g. GPU is available in the environment, but no
`device` argument is passed to the `Pipeline` object. Model will be on
CPU.
Truncation was not explicitly activated but `max_length` is provided a
specific value, please use `truncation=True` to explicitly truncate
examples to max length. Defaulting to 'longest_first' truncation
strategy. If you encode pairs of sequences (GLUE-style) with the
tokenizer you can select this strategy more precisely by providing a
specific strategy to `truncation`.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.
#sentiment analysis
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import string
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
import pandas as pd
df = pd.read_csv("IMDB Dataset.csv")
# Function to preprocess text
def preprocess_text(text):
# Tokenize and lower the text
tokens = word_tokenize(text.lower())
Accuracy: 0.8297587131367292
precision recall f1-score support
#nlg
import pandas as pd
# Sample data
data = {
'Date': ['2024-01-01', '2024-01-01', '2024-01-02', '2024-01-02'],
'Product': ['Product A', 'Product B', 'Product A', 'Product C'],
'Units Sold': [120, 150, 130, 90],
'Revenue': [1200, 2100, 1300, 900],
'Region': ['North', 'South', 'West', 'East']
}
df = pd.DataFrame(data)
print(product_sales)
print(region_sales)
# Combine summaries
report = product_summary + region_summary
return report
# Generate report
sales_report = generate_sales_report(product_sales, region_sales)
print(sales_report)
#Generative AI
GPT2LMHeadModel(
(transformer): GPT2Model(
(wte): Embedding(50257, 768)
(wpe): Embedding(1024, 768)
(drop): Dropout(p=0.1, inplace=False)
(h): ModuleList(
(0-11): 12 x GPT2Block(
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(attn): GPT2SdpaAttention(
(c_attn): Conv1D()
(c_proj): Conv1D()
(attn_dropout): Dropout(p=0.1, inplace=False)
(resid_dropout): Dropout(p=0.1, inplace=False)
)
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(mlp): GPT2MLP(
(c_fc): Conv1D()
(c_proj): Conv1D()
(act): NewGELUActivation()
(dropout): Dropout(p=0.1, inplace=False)
)
)
)
(ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
)
(lm_head): Linear(in_features=768, out_features=50257, bias=False)
)
import torch
/usr/local/lib/python3.10/dist-packages/transformers/generation/
configuration_utils.py:567: UserWarning: `do_sample` is set to
`False`. However, `temperature` is set to `0.7` -- this flag is only
used in sample-based generation modes. You should set `do_sample=True`
or unset `temperature`.
warnings.warn(
/usr/local/lib/python3.10/dist-packages/transformers/generation/config
uration_utils.py:572: UserWarning: `do_sample` is set to `False`.
However, `top_p` is set to `0.95` -- this flag is only used in sample-
based generation modes. You should set `do_sample=True` or unset
`top_p`.
warnings.warn(
The attention mask and the pad token id were not set. As a
consequence, you may observe unexpected behavior. Please pass your
input's `attention_mask` to obtain reliable results.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.
The attention mask is not set and cannot be inferred from input
because pad token is same as eos token. As a consequence, you may
observe unexpected behavior. Please pass your input's `attention_mask`
to obtain reliable results.
Once upon a time in a faraway land, the world was a land of peace and
harmony. The world of the gods was the land that was to be the home of
all.
The world that had been the center of civilization was now a world
where the Gods were to rule. They were the ones who had to make the
most of their power. And they were not the only ones. There were many
other gods, too. But the one who was most powerful was none other than
# Experiment with different prompts and parameters
prompt = "The future of AI is"
generated_text = generate_text(prompt, max_length=200,
temperature=1.0)
print(generated_text)
The attention mask and the pad token id were not set. As a
consequence, you may observe unexpected behavior. Please pass your
input's `attention_mask` to obtain reliable results.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.
#GEN AI project
from transformers import GPT2LMHeadModel, GPT2Tokenizer
/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/
_token.py:89: UserWarning:
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your
settings tab (https://huggingface.co/settings/tokens), set it as
secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to
access public models or datasets.
warnings.warn(
{"model_id":"3d54c24b14374c419fd831c3e154bae6","version_major":2,"vers
ion_minor":0}
{"model_id":"8bbfdb23ab944fc2a344c59a69870904","version_major":2,"vers
ion_minor":0}
{"model_id":"cf457aced37e48b9a61326a1af342e0b","version_major":2,"vers
ion_minor":0}
{"model_id":"0d1f21709422421f993e4420f12e07d2","version_major":2,"vers
ion_minor":0}
{"model_id":"b460ce814baa4806a77b89ac2d15e7b7","version_major":2,"vers
ion_minor":0}
{"model_id":"547d3c73d40c4d3c90cdd7640d2d870b","version_major":2,"vers
ion_minor":0}
{"model_id":"529130c46bfe4c738f101dc143acfe5f","version_major":2,"vers
ion_minor":0}
/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
GPT2LMHeadModel(
(transformer): GPT2Model(
(wte): Embedding(50257, 768)
(wpe): Embedding(1024, 768)
(drop): Dropout(p=0.1, inplace=False)
(h): ModuleList(
(0-11): 12 x GPT2Block(
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(attn): GPT2SdpaAttention(
(c_attn): Conv1D()
(c_proj): Conv1D()
(attn_dropout): Dropout(p=0.1, inplace=False)
(resid_dropout): Dropout(p=0.1, inplace=False)
)
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
(mlp): GPT2MLP(
(c_fc): Conv1D()
(c_proj): Conv1D()
(act): NewGELUActivation()
(dropout): Dropout(p=0.1, inplace=False)
)
)
)
(ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
)
(lm_head): Linear(in_features=768, out_features=50257, bias=False)
)
import torch
# Generate text
with torch.no_grad():
output = model.generate(
inputs,
max_length=max_length,
temperature=temperature,
top_p=0.9,
top_k=50,
num_return_sequences=1
)
# Create prompt
prompt = create_story_prompt(character_name, traits, setting)
/usr/local/lib/python3.10/dist-packages/transformers/generation/
configuration_utils.py:567: UserWarning: `do_sample` is set to
`False`. However, `temperature` is set to `0.7` -- this flag is only
used in sample-based generation modes. You should set `do_sample=True`
or unset `temperature`.
warnings.warn(
/usr/local/lib/python3.10/dist-packages/transformers/generation/config
uration_utils.py:572: UserWarning: `do_sample` is set to `False`.
However, `top_p` is set to `0.9` -- this flag is only used in sample-
based generation modes. You should set `do_sample=True` or unset
`top_p`.
warnings.warn(
The attention mask and the pad token id were not set. As a
consequence, you may observe unexpected behavior. Please pass your
input's `attention_mask` to obtain reliable results.
Setting `pad_token_id` to `eos_token_id`:50256 for open-end
generation.
The attention mask is not set and cannot be inferred from input
because pad token is same as eos token. As a consequence, you may
observe unexpected behavior. Please pass your input's `attention_mask`
to obtain reliable results.
Generated Story:
{"model_id":"1e13c0c9bef64362a6ca97a030ef9b0b","version_major":2,"vers
ion_minor":0}
{"model_id":"92516f2e2c824bce926801de5ec8bbf3","version_major":2,"vers
ion_minor":0}
{"model_id":"b43b43791365448381b84434c1cf9408","version_major":2,"vers
ion_minor":0}
{"model_id":"cadf58dd9517476d8911a42e17bdbece","version_major":2,"vers
ion_minor":0}
{"model_id":"073965278521493a94df8753c7dc4123","version_major":2,"vers
ion_minor":0}
{"model_id":"1892996bb7074026b8de5386b441a348","version_major":2,"vers
ion_minor":0}
tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-
squad2")
model =
AutoModelForQuestionAnswering.from_pretrained("deepset/roberta-base-
squad2")
# Define the context (the text from which the model will answer
questions)
context = """
Hugging Face is a technology company based in New York and Paris.
It is known for creating tools that democratize artificial
intelligence by making
models and datasets accessible to developers worldwide.
"""
df = pd.read_csv("employee_salary.csv")
df.head()
x = df['YearsExperience'].values
y = df['Salary']
----------------------------------------------------------------------
-----
ValueError Traceback (most recent call
last)
<ipython-input-17-56a6a2b0b003> in <cell line: 2>()
1 x = df['YearsExperience'].values
----> 2 x = x.reshape(30, 1)
3 y = df['Salary']
4
5 y
x = x.values.reshape(-1, 1)
y = y.values.reshape(-1, 1)
----------------------------------------------------------------------
-----
AttributeError Traceback (most recent call
last)
<ipython-input-12-be81df8663df> in <cell line: 1>()
----> 1 x = x.values.reshape(-1, 1)
2 y = y.values.reshape(-1, 1)
y_pre = model.predict([14])
{"model_id":"262cf4a731ce4d0ca173a518db0bc5f0","version_major":2,"vers
ion_minor":0}
{"model_id":"33b9a2375d3f4a3ca9e7e291370e7f61","version_major":2,"vers
ion_minor":0}
{"model_id":"542e62df216c4abba4e1791bb73fc753","version_major":2,"vers
ion_minor":0}
{"model_id":"5caabc08145e4952b7d3228eb4b5f130","version_major":2,"vers
ion_minor":0}
{"model_id":"cfaf796834264a04beeba567b7e132ee","version_major":2,"vers
ion_minor":0}
{"model_id":"f5ae44d932a54390be768f15963eeb92","version_major":2,"vers
ion_minor":0}
/usr/local/lib/python3.10/dist-packages/transformers/
tokenization_utils_base.py:1601: FutureWarning:
`clean_up_tokenization_spaces` was not set. It will be set to `True`
by default. This behavior will be depracted in transformers v4.45, and
will be then set to `False` by default. For more details check this
issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
Hardware accelerator e.g. GPU is available in the environment, but no
`device` argument is passed to the `Pipeline` object. Model will be on
CPU.
tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-
squad2")
model =
AutoModelForQuestionAnswering.from_pretrained("deepset/roberta-base-
squad2")