IRT Lab Programs
IRT Lab Programs
print("\nTokens:")
print(tokens)
_________________________________
LAB-5_Implementation of Text
Processing Model
# Import necessary libraries
from google.colab import files
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
import nltk
# Remove stopwords
filtered_tokens = [word for word in tokens if word not in
stop_words]
# Apply stemming
stemmed_tokens = [stemmer.stem(word) for word in filtered_tokens]
return stemmed_tokens
print("\nProcessed Tokens:")
print(processed_tokens)
_________________________________
LAB-6: Implementation of Neural
Network Model
train_data = padded_sequences[:train_size]
train_labels = labels[:train_size]
test_data = padded_sequences[train_size:]
test_labels = labels[train_size:]
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
# Step 6: Train the Model
print("\nTraining the model...")
batch_size = min(len(train_data), 32) # Ensure batch_size is not
larger than the training set
history = model.fit(train_data, train_labels, epochs=5,
validation_data=(test_data, test_labels), batch_size=batch_size)
print("\nSample Predictions:")
for i in range(min(5, len(test_data))):
print(f"Text: {texts[train_size + i]}, Actual Label:
{test_labels[i]}, Predicted Label: {predictions[i][0]}")
class ScalableIndexer:
def __init__(self): # Corrected constructor
self.index = defaultdict(list) # Dictionary for word-to-line
mapping
# Display results
if search_results:
print(f"Found '{search_term}' in lines: {search_results}")
print("\nLines containing the term:")
for line_num in search_results:
print(f"Line {line_num + 1}: {file_content[line_num].strip()}")
else:
print(f"'{search_term}' not found in the file.")
INPUT:
Create own .TXT file and add few lines in TXT file,
then search the term in TXT file