0% found this document useful (0 votes)
30 views

TPCV1+2+3+4.ipynb - Colaboratory

The document discusses image processing tasks for a computer vision project including: 1. Importing libraries and mounting Google Drive for access to image data. 2. Visualizing sample images from the dataset and splitting images into train and test sets. 3. Generating summary statistics of the image dataset such as total images, range of widths and heights. 4. Creating DataFrames to store information about train and test images. 5. Converting images to binary by applying a threshold to grayscale images. 6. Displaying original and binarized versions of sample images.

Uploaded by

douaa khila
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
30 views

TPCV1+2+3+4.ipynb - Colaboratory

The document discusses image processing tasks for a computer vision project including: 1. Importing libraries and mounting Google Drive for access to image data. 2. Visualizing sample images from the dataset and splitting images into train and test sets. 3. Generating summary statistics of the image dataset such as total images, range of widths and heights. 4. Creating DataFrames to store information about train and test images. 5. Converting images to binary by applying a threshold to grayscale images. 6. Displaying original and binarized versions of sample images.

Uploaded by

douaa khila
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 70

11/25/23, 7:40 PM TPCV1+2+3+4.

ipynb - Colaboratory

DOUAA KHILA 3 ID

TP1

requirments

!pip install opencv-python

Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (4.8.0.76)


Requirement already satisfied: numpy>=1.21.2 in /usr/local/lib/python3.10/dist-packages (from opencv-python) (1.23.5)

pip install split-folders

Requirement already satisfied: split-folders in /usr/local/lib/python3.10/dist-packages (0.5.1)

import cv2
import os

import imageio
import matplotlib.pyplot as plt
import random
import shutil
import pandas as pd
import numpy as np
from fastai.vision.all import *

import matplotlib.pyplot as plt


from skimage import color

from google.colab import drive


drive.mount('/content/drive')

Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).

Visualize pictures

path = "/content/drive/MyDrive/tpcv/Db2_b"

images = []

# Parcourez le répertoire et lisez les images


for filename in os.listdir(path):
if filename.endswith(".tif"):
chemin_image = os.path.join(path, filename)
image = cv2.imread(chemin_image)
if image is not None:
images.append(image)
else:
print(f"error: {chemin_image}")

import matplotlib.pyplot as plt


import cv2

def display_images(images):
for i, img in enumerate(images):
plt.subplot(1, len(images), i+1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()

# Display images
display_images(images[1:6])

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 1/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

Spliting the dataset

# Specify the path to your original dataset (all your images)


input_folder = "/content/drive/MyDrive/tpcv/Db2_b"

# Specify the path where you want to save the split dataset
output_folder = "/content/drive/MyDrive/tpcv/data"

# Define the desired split ratio (e.g., 80% train, 20% test)
split_ratio = 0.8 # 80% train, 20% test

# List all files in the input folder (assumes all files are images)
all_files = os.listdir(input_folder)

# Shuffle the list of files randomly


random.shuffle(all_files)

# Calculate the number of files for training and testing


split_index = int(len(all_files) * split_ratio)

# Split the list of files into train and test sets


train_files = all_files[:split_index]
test_files = all_files[split_index:]

# Create the train and test directories in the output folder


train_folder = os.path.join(output_folder, "train")
test_folder = os.path.join(output_folder, "test")

os.makedirs(train_folder, exist_ok=True)
os.makedirs(test_folder, exist_ok=True)

# Copy the training files to the train folder


for file in train_files:
src_path = os.path.join(input_folder, file)
dst_path = os.path.join(train_folder, file)
shutil.copy(src_path, dst_path)

# Copy the testing files to the test folder


for file in test_files:
src_path = os.path.join(input_folder, file)
dst_path = os.path.join(test_folder, file)
shutil.copy(src_path, dst_path)

dataset summary

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 2/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import os
import matplotlib.pyplot as plt
from PIL import Image

# Specify the path to your dataset


dataset_path = "/content/drive/MyDrive/tpcv/data"

# Initialize variables for summary statistics


total_images = 0
min_width = float('inf')
max_width = 0
min_height = float('inf')
max_height = 0

# Lists to store image widths and heights for visualization


image_widths = []
image_heights = []

# Iterate through the dataset


for root, _, files in os.walk(dataset_path):
for file in files:
if file.endswith(('.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tif')):
file_path = os.path.join(root, file)

# Open the image using PIL


with Image.open(file_path) as img:
width, height = img.size

# Update summary statistics


total_images += 1
min_width = min(min_width, width)
max_width = max(max_width, width)
min_height = min(min_height, height)
max_height = max(max_height, height)
image_widths.append(width)
image_heights.append(height)

# Print summary statistics


print(f"Total images: {total_images}")
print(f"Minimum width: {min_width}px")
print(f"Maximum width: {max_width}px")
print(f"Minimum height: {min_height}px")
print(f"Maximum height: {max_height}px")

# Create histograms for image widths and heights


plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.hist(image_widths, bins=20, edgecolor='k', alpha=0.7)
plt.title("Image Widths")
plt.xlabel("Width (pixels)")
plt.ylabel("Frequency")

plt.subplot(1, 2, 2)
plt.hist(image_heights, bins=20, edgecolor='k', alpha=0.7)
plt.title("Image Heights")
plt.xlabel("Height (pixels)")
plt.ylabel("Frequency")

plt.tight_layout()
plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 3/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

Total images: 639


Minimum width: 256px
Maximum width: 256px
Minimum height: 364px
Maximum height: 364px
def data_info(direct):
files = [file.strip() for file in os.listdir('/content/drive/MyDrive/tpcv/data/'+direct)]
filenames = []
for file in files:
add = [file[:-4],file[0]]
filenames.append(add)
return filenames

df_train = data_info(direct = 'train')


df_test = data_info(direct = 'test')
train = pd.DataFrame(df_train, columns =['filename', 'label'])
test = pd.DataFrame(df_test, columns = ['filename', 'label'])
display(train.head(), test.head())

filename label

0 101_6 1

1 107_4 1

2 105_5 1

3 107_7 1

4 109_1 1

filename label

0 107_3 1

1 106_2 1

2 101_2 1

3 105_7 1

4 106_5 1

print('train:', len(train))
print('test:', len(test))

train: 80
test: 79

Binarization of pictures

import os
import cv2
import matplotlib.pyplot as plt

# Define the input and output folder paths


input_folder = "/content/drive/MyDrive/tpcv/data/train"

threshold_value = 128

# Define the function to convert an image to binary


def convert_to_binary(image):
grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, binary_image = cv2.threshold(grayscale_image, threshold_value, 255, cv2.THRESH_BINARY)
return binary_image

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 4/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import os
import cv2
import matplotlib.pyplot as plt

# Define the folder containing your images


input_folder = "/content/drive/MyDrive/tpcv/data/train"

# Create a list to keep track of processed images


processed_images = []

# Iterate through all image files in the input folder


for filename in os.listdir(input_folder):
if filename.endswith('.tif') or filename.endswith('.jpg'):
image_path = os.path.join(input_folder, filename)
image = cv2.imread(image_path)

# Binarize the image using your function (replace with your actual binarization function)
binary_image = convert_to_binary(image)

# Append the original and binarized images to the processed_images list


processed_images.append((image, binary_image))

# Display the original and binarized images for the first three examples
for i in range(min(3, len(processed_images))):
original_image = cv2.cvtColor(processed_images[i][0], cv2.COLOR_BGR2RGB)
binary_image = processed_images[i][1]

plt.figure(figsize=(12, 6))

plt.subplot(1, 2, 1)
plt.title('Before Binarization')
plt.imshow(original_image)
plt.axis('off')

plt.subplot(1, 2, 2)
plt.title('After Binarization')
plt.imshow(binary_image, cmap='gray')
plt.axis('off')

plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 5/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

IMG_PATH = "/content/drive/MyDrive/tpcv/data/train/101_7.tif"

imgArray = cv2.imread(IMG_PATH)
plt.imshow(imgArray)

plt.show()

imgArray.shape

(364, 256, 3)

visualize color channels

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 6/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

np.random.seed(42)

# data loader
data = ImageDataLoaders.from_folder(
path,
train=".",
valid_pct=0.2,
item_tfms=RandomResizedCrop(512, min_scale=0.75),
bs=32,
batch_tfms=[*aug_transforms(size=256, max_warp=0), Normalize.from_stats(*imagenet_stats)],
num_workers=0
)

data.show_batch(nrows=3, figsize=(7,8))

convertedArray = cv2.cvtColor(imgArray, cv2.COLOR_BGR2RGB)

plt.subplots(figsize=(15,10))
plt.imshow(convertedArray)
plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 7/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

fig, ((ax1,ax2), (ax3,ax4)) =plt.subplots(2,2,figsize=(14,10))

ax1.imshow(convertedArray[:,:,0], cmap="Reds_r"); ax1.set_title("R", size=20)


ax2.imshow(convertedArray[:,:,1], cmap="Greens_r"); ax2.set_title("G", size=20)
ax3.imshow(convertedArray[:,:,2], cmap="Blues_r"); ax3.set_title("B", size=20)

ax4.axis("off"); plt.tight_layout(); plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 8/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 4))

# Plot histogram for the Red channel


ax1.hist(convertedArray[:, :, 0].flatten(), color="r", bins=200)
ax1.set_title("R", size=20)

# Plot histogram for the Green channel


ax2.hist(convertedArray[:, :, 1].flatten(), color="g", bins=200)
ax2.set_title("G", size=20)

# Plot histogram for the Blue channel


ax3.hist(convertedArray[:, :, 2].flatten(), color="b", bins=200)
ax3.set_title("B", size=20)

plt.tight_layout()
plt.show()

# Extract the horizontal section at a valid row (e.g., row 200)


row_index = 200 # Choose a valid row index within the array size
horSection = convertedArray[row_index, :, :]

# Plot the profiles


plt.figure(figsize=(16, 5))
plt.plot(horSection[:, 0], label="R", color="#e74c3c")
plt.plot(horSection[:, 1], label="G", color="#16a085")
plt.plot(horSection[:, 2], label="B", color="#3498db")

plt.xlabel("X")
plt.legend()
plt.show()

TP2

Filtres

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 9/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os

pathB = "/content/drive/MyDrive/tpcv/BONNE"

imagesB = []

# Parcourez le répertoire et lisez les images


for filename in os.listdir(pathB ):
if filename.endswith(".tif"):
chemin_image = os.path.join(pathB, filename)
image = cv2.imread(chemin_image)
if image is not None:
imagesB.append(image)
else:
print(f"error: {chemin_image}")

import cv2
import os

pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

imagesM = []

# Parcourez le répertoire et lisez les images


for filename in os.listdir(pathM):
if filename.endswith(".tif"):
chemin_image = os.path.join(pathM, filename)
image = cv2.imread(chemin_image)
if image is not None:
imagesM.append(image)
else:
print(f"error: {chemin_image}")

import cv2
import os

pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

imagesM = []

# Parcourez le répertoire et lisez les images


for filename in os.listdir(pathM):
if filename.endswith(".tif"):
chemin_image = os.path.join(pathM, filename)
image = cv2.imread(chemin_image)
if image is not None:
imagesM.append(image)
else:
print(f"error: {chemin_image}")

Mauvais images

display_images(imagesM)

Bonnes images

display_images(imagesB)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 10/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

filtre moyenneur

(5,5)

import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.blur(image, (5, 5))

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 11/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.blur(image, (5, 5))

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

(3,3)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 12/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.blur(image, (3,3))

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

Filtre GAUSSIAN

(5,5) 0

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 13/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.GaussianBlur(image, (5, 5), 0)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

(5,5) 0.25

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 14/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.GaussianBlur(image, (5, 5), 0.25)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

(5,5) 3.5

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 15/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.GaussianBlur(image, (5, 5), 3.5)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

(5,5) 15

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 16/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as pl
filtered_images=[]
# Parcourez tous les fichiers d'images dans le dossier
for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format JPG ou PNG)
if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre gaussien avec un noyau de 5x5 (vous pouvez ajuster la taille du noyau selon vos besoins)
image_filtree = cv2.GaussianBlur(image, (5, 5),15)
filtered_images.append(image_filtree)
# Create a single figure with three subplots
fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(image_filtree, cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

bilateral

d=3, sigmaColor=75, sigmaSpace=75

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 17/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.bilateralFilter(image, d=3, sigmaColor=75, sigmaSpace=75)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

d=9, sigmaColor=75, sigmaSpace=75

import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin image)
https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 18/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
image cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.bilateralFilter(image, d=9, sigmaColor=75, sigmaSpace=75)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

d=15, sigmaColor=75, sigmaSpace=75

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 19/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre bilatéral (you can adjust the parameters as needed)
image_filtree = cv2.bilateralFilter(image, d=15, sigmaColor=75, sigmaSpace=75)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

Filtre MEDIAN

noyau 5

import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 20/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre médian avec une taille de noyau de 5x5 (vous pouvez ajuster la taille selon vos besoins)
image_filtree = cv2.medianBlur(image, 5)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

noyau 7

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 21/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre médian avec une taille de noyau de 5x5 (vous pouvez ajuster la taille selon vos besoins)
image_filtree = cv2.medianBlur(image, 7)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

noyau 3

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 22/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathM = "/content/drive/MyDrive/tpcv/MAUVAISE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathM):
chemin_image = os.path.join(pathM, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre médian avec une taille de noyau de 5x5 (vous pouvez ajuster la taille selon vos besoins)
image_filtree = cv2.medianBlur(image, 3)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

filtre min max

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 23/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathB = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathB):
chemin_image = os.path.join(pathB, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


# Appliquez le filtre médian avec une taille de noyau de 5x5 (vous pouvez ajuster la taille selon vos besoins)
image_filtree = cv2.medianBlur(image, 7)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

FILTER min + erosion

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 24/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathB = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathB):
chemin_image = os.path.join(pathB, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image
image = cv2.imread(chemin_image)

if image is not None:


kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
image_filtree = cv2.erode(image, kernel)

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(cv2.cvtColor(filtered_images[i], cv2.COLOR_BGR2RGB))
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

filter +erosion +dilatation

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 25/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt

# Define the path to the folder containing the images


pathB = "/content/drive/MyDrive/tpcv/BONNE"

# Initialize a list to store the filtered images


filtered_images = []

# Parcourez tous les fichiers d'images dans le dossier


for nom_fichier in os.listdir(pathB):
chemin_image = os.path.join(pathB, nom_fichier)

# Vérifiez si le fichier est une image (par exemple, au format TIFF)


if nom_fichier.lower().endswith((".tif")):
# Chargez l'image en niveau de gris
image = cv2.imread(chemin_image, cv2.IMREAD_GRAYSCALE)

if image is not None:


# Appliquez le min-max filter en utilisant des opérations morphologiques
kernel = np.ones((7, 7), np.uint8)
dilation = cv2.dilate(image, kernel, iterations=1)
erosion = cv2.erode(image, kernel, iterations=1)
image_filtree = dilation - erosion

# Ajoutez l'image filtrée à la liste


filtered_images.append(image_filtree)

# Create a single figure with three subplots


fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# Display the filtered images side by side


for i in range(3):
axes[i].imshow(filtered_images[i], cmap='gray')
axes[i].set_title(f"Image filtrée - Image {i+1}")
axes[i].axis('off')

plt.show()

TRANSFORMATION DE FOURIER

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 26/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
import matplotlib.pyplot as plt

# Define the path to the image file


path_to_image = "/content/drive/MyDrive/tpcv/BONNE/105_5.tif" # Replace with your image path

# Load the image


imaget = cv2.imread(path_to_image)

# Convert the image to grayscale


if len(image.shape) == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image_gray = image

# Perform 2D Fourier transformation


f_transform = np.fft.fft2(image_gray)
f_transform_shifted = np.fft.fftshift(f_transform)

# Compute the magnitude spectrum


magnitude_spectrum = np.log(np.abs(f_transform) + 1) # Apply logarithm for visualization

# Display the original image and magnitude spectrum


plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Original Image")
plt.axis('off')

plt.subplot(122)
plt.imshow(magnitude_spectrum, cmap='gray')
plt.title("Magnitude Spectrum (Fourier Transform)")
plt.axis('off')

plt.show()

FILTRES SUR SPECTRES

pass haut +fft

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 27/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
rows, cols = image_gray.shape
crow,ccol = rows//2 , cols//2
f = np.fft.fft2(image_gray)
fshift = np.fft.fftshift(f)
fshift[crow-30:crow+31, ccol-30:ccol+31] = 0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.real(img_back)
plt.subplot(131),plt.imshow(imaget, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(img_back, cmap = 'gray')
plt.title('Image after HPF'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_back)
plt.title('Result in JET'), plt.xticks([]), plt.yticks([])
plt.show()

import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread, imshow
from skimage.color import rgb2hsv, rgb2gray, rgb2yuv
from skimage import color, exposure, transform
from skimage.exposure import equalize_hist

dark_image_grey = rgb2gray(imaget)
plt.figure(num=None, figsize=(8, 6), dpi=80)
plt.imshow(dark_image_grey, cmap='gray');

dark_image_grey_fourier = np.fft.fftshift(np.fft.fft2(dark_image_grey))
plt.figure(num=None, figsize=(8, 6), dpi=80)
plt.imshow(np.log(abs(dark_image_grey_fourier)), cmap='gray');

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 28/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

def fourier_masker_ver(image, i):


f_size = 15
dark_image_grey_fourier =np.fft.fftshift(np.fft.fft2(rgb2gray(imaget)))
dark_image_grey_fourier[:225, 235:240] = i
dark_image_grey_fourier[-225:,235:240] = i
fig, ax = plt.subplots(1,3,figsize=(15,15))
ax[0].imshow(np.log(abs(dark_image_grey_fourier)), cmap='gray')
ax[0].set_title('Masked Fourier', fontsize = f_size)
ax[1].imshow(rgb2gray(image), cmap = 'gray')
ax[1].set_title('Greyscale Image', fontsize = f_size);
ax[2].imshow(abs(np.fft.ifft2(dark_image_grey_fourier)),
cmap='gray')
ax[2].set_title('Transformed Greyscale Image',
fontsize = f_size);

fourier_masker_ver(imaget, 1)

passe haut

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 29/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
import matplotlib.pyplot as plt

# Définissez le chemin de l'image


path_to_image = "/content/drive/MyDrive/tpcv/BONNE/105_5.tif" # Remplacez par le chemin de votre image

# Chargez l'image
image = cv2.imread(path_to_image)

# Convertissez l'image en niveaux de gris


if len(image.shape) == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image_gray = image

# Appliquez la transformation de Fourier 2D


f_transform = np.fft.fft2(image_gray)
f_transform_shifted = np.fft.fftshift(f_transform)

# Créez un masque pour un filtre passe-haut (hautes fréquences)


rows, cols = image_gray.shape
crow, ccol = rows // 2, cols // 2 # Centre du spectre

# Créez un masque avec des valeurs 1 dans la région haute fréquence et 0 dans la région basse fréquence
mask = np.ones((rows, cols), np.uint8)
mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 0

# Appliquez le masque au spectre de Fourier


f_transform_shifted_highpass = f_transform_shifted * mask

# Déplacez le spectre filtré à son emplacement d'origine


f_transform_highpass = np.fft.ifftshift(f_transform_shifted_highpass)

# Effectuez la transformation de Fourier inverse pour obtenir l'image filtrée


image_highpass = np.abs(np.fft.ifft2(f_transform_highpass))

# Affichez l'image originale et l'image filtrée


plt.figure(figsize=(12, 6))
plt.subplot(121)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()

filtre pass bas

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 30/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
import matplotlib.pyplot as plt

# Définissez le chemin de l'image


path_to_image = "/content/drive/MyDrive/tpcv/BONNE/105_5.tif" # Remplacez par le chemin de votre image

# Chargez l'image
image = cv2.imread(path_to_image)

# Convertissez l'image en niveaux de gris si elle n'est pas déjà en niveaux de gris
if len(image.shape) == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image_gray = image

# Effectuez une transformée de Fourier 2D


f_transform = np.fft.fft2(image_gray)
f_transform_shifted = np.fft.fftshift(f_transform)

# Appliquez le filtre passe-bas en supprimant les hautes fréquences (en gardant seulement les basses fréquences)
rows, cols = image_gray.shape
crow, ccol = rows // 2, cols // 2 # Centre du spectre
mask = np.zeros((rows, cols), np.uint8)
mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1 # Gardez une région carrée de basses fréquences

# Appliquez le masque au spectre


f_transform_filtered = f_transform_shifted * mask

# Effectuez une transformée inverse de Fourier pour obtenir l'image filtrée


image_filtered = np.abs(np.fft.ifft2(np.fft.ifftshift(f_transform_filtered))).astype(np.uint8)

# Affichez l'image originale et l'image filtrée


plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Image Originale")
plt.axis('off')

plt.subplot(122)
plt.imshow(image_filtered, cmap='gray')
plt.title("Image Filtrée (Passe-Bas)")
plt.axis('off')

plt.show()

filtre laplacien

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 31/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
import matplotlib.pyplot as plt

# Définissez le chemin de l'image


path_to_image = "/content/drive/MyDrive/tpcv/BONNE/105_5.tif" # Remplacez par le chemin de votre image

# Chargez l'image
image = cv2.imread(path_to_image)

# Convertissez l'image en niveaux de gris si elle n'est pas déjà en niveaux de gris
if len(image.shape) == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image_gray = image

# Appliquez le filtre laplacien


image_laplacian = cv2.Laplacian(image_gray, cv2.CV_64F)

# Convertissez l'image laplacienne en une image en niveaux de gris 8 bits


image_laplacian = cv2.convertScaleAbs(image_laplacian)

# Affichez l'image originale et l'image filtrée


plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Image Originale")
plt.axis('off')

plt.subplot(122)
plt.imshow(image_laplacian, cmap='gray')
plt.title("Image Filtrée (Laplacien)")
plt.axis('off')

plt.show()

filtre gaussian

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 32/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
import matplotlib.pyplot as plt

# Définissez le chemin de l'image


path_to_image = "/content/drive/MyDrive/tpcv/BONNE/105_5.tif" # Remplacez par le chemin de votre image

# Chargez l'image
image = cv2.imread(path_to_image)

# Convertissez l'image en niveaux de gris si elle n'est pas déjà en niveaux de gris
if len(image.shape) == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image_gray = image

# Effectuez une transformée de Fourier 2D


f_transform = np.fft.fft2(image_gray)
f_transform_shifted = np.fft.fftshift(f_transform)

# Définissez les paramètres du filtre gaussien


sigma = 10 # Écart-type du filtre gaussien

# Créez le filtre gaussien dans le domaine de la fréquence


rows, cols = image_gray.shape
crow, ccol = rows // 2, cols // 2 # Centre du spectre
x = np.arange(cols) - ccol
y = np.arange(rows) - crow
X, Y = np.meshgrid(x, y)
gaussian_filter = np.exp(-(X**2 + Y**2) / (2 * sigma**2))

# Appliquez le filtre gaussien au spectre de fréquence


f_transform_filtered = f_transform_shifted * gaussian_filter

# Effectuez une transformée inverse de Fourier pour obtenir l'image filtrée


image_filtered = np.abs(np.fft.ifft2(np.fft.ifftshift(f_transform_filtered))).astype(np.uint8)

# Affichez l'image originale et l'image filtrée


plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Image Originale")
plt.axis('off')

plt.subplot(122)
plt.imshow(image_filtered, cmap='gray')
plt.title("Image Filtrée (Gaussien)")
plt.axis('off')

plt.show()

compare filters

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 33/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

import cv2
import numpy as np
import matplotlib.pyplot as plt

# Define the path to the image


path_to_image = "/content/drive/MyDrive/tpcv/BONNE/105_5.tif" # Replace with your image path

# Load the image


image = cv2.imread(path_to_image)

# Convert the image to grayscale if it's not already in grayscale


if len(image.shape) == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image_gray = image

# Perform a 2D Fourier transformation


f_transform = np.fft.fft2(image_gray)
f_transform_shifted = np.fft.fftshift(f_transform)

# Define the threshold values for the high-pass and low-pass filters
high_pass_threshold = 10 # Adjust the threshold for the high-pass filter
low_pass_threshold = 50 # Adjust the threshold for the low-pass filter

# Create a high-pass filter


rows, cols = image_gray.shape
crow, ccol = rows // 2, cols // 2 # Center of the spectrum
x = np.arange(cols) - ccol
y = np.arange(rows) - crow
X, Y = np.meshgrid(x, y)
high_pass_filter = 1 - np.exp(-(X**2 + Y**2) / (2 * high_pass_threshold**2))

# Create a low-pass filter


low_pass_filter = np.exp(-(X**2 + Y**2) / (2 * low_pass_threshold**2))

# Apply the filters to the frequency domain


f_transform_high_pass = f_transform_shifted * high_pass_filter
f_transform_low_pass = f_transform_shifted * low_pass_filter

# Perform an inverse Fourier transformation to get the filtered images


image_high_pass = np.abs(np.fft.ifft2(np.fft.ifftshift(f_transform_high_pass))).astype(np.uint8)
image_low_pass = np.abs(np.fft.ifft2(np.fft.ifftshift(f_transform_low_pass))).astype(np.uint8)

# Display the original image and the filtered images


plt.figure(figsize=(15, 5))

plt.subplot(131)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Original Image")
plt.axis('off')

plt.subplot(132)
plt.imshow(image_high_pass, cmap='gray')
plt.title("High-Pass Filtered Image")
plt.axis('off')

plt.subplot(133)
plt.imshow(image_low_pass, cmap='gray')
plt.title("Low-Pass Filtered Image")
plt.axis('off')

plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 34/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

seuillage

import numpy as np
import matplotlib.pyplot as plt

# Assuming you have loaded your image into the variable image

THRESHOLD1 = image.mean()

image1 = np.array(image > THRESHOLD1).astype(int) * 255

# Display the thresholded image


plt.imshow(image1, cmap='gray') # Assuming a grayscale image
plt.show()

comparaison entre fltres rober sobel prewitt

vertical_robert_filter = np.array([[1,0],[0,-1]])
horizontal_robert_filter = np.array([[0,1],[-1,0]])

vertical_sobel_filter = np.array([[-1,0,1],[-2,0,2],[-1,0,1]])
horizontal_sobel_filter = np.array([[-1,-2,-1],[0,0,0],[1,2,1]])

vertical_prewitt_filter = np.array([[-1,0,1],[-1,0,1],[-1,0,1]])
horizontal_prewitt_filter = np.array([[-1,-1,-1],[0,0,0],[1,1,1]])

print("vertical robert filter\n",vertical_robert_filter )


print("horizontal robert filter\n",horizontal_robert_filter)
print("vertical sobel filter: \n", vertical_sobel_filter)
print("horizontal sobel filter: \n", horizontal_sobel_filter)

print("vertical prewitt filter: \n", vertical_prewitt_filter)


print("horizontal prewitt filter: \n", horizontal_prewitt_filter)

vertical robert filter


[[ 1 0]
[ 0 -1]]
horizontal robert filter
[[ 0 1]
[-1 0]]
vertical sobel filter:
[[-1 0 1]
[-2 0 2]
[-1 0 1]]
horizontal sobel filter:
[[-1 -2 -1]
[ 0 0 0]
[ 1 2 1]]
vertical prewitt filter:
[[-1 0 1]

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 35/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
[-1 0 1]
[-1 0 1]]
horizontal prewitt filter:
[[-1 -1 -1]
[ 0 0 0]
[ 1 1 1]]

from scipy.ndimage.filters import convolve


gray_img = Image.fromarray(processed_images[0][1])

convolved_img1 = convolve(gray_img,vertical_robert_filter)
convolved_img1 = convolve(convolved_img1,horizontal_robert_filter)

convolved_img2 = convolve(gray_img,vertical_sobel_filter)
convolved_img2 = convolve(convolved_img2,horizontal_sobel_filter)

convolved_img3 = convolve(gray_img,vertical_prewitt_filter )
convolved_img3 = convolve(gray_img,horizontal_prewitt_filter )

<ipython-input-214-b9823d611340>:1: DeprecationWarning: Please use `convolve` from the `scipy.ndimage` namespace, the `scipy.ndimag
from scipy.ndimage.filters import convolve

fig, axes = plt.subplots(1,3,figsize = (12,12));


axes[0].set_title("Robert");
axes[0].imshow(convolved_img1);
axes[1].set_title("Sobel");
axes[1].imshow(convolved_img2);
axes[2].set_title("Prewitt");
axes[2].imshow(convolved_img3);

TP3

segmentation

def imageHist(image):
_, axis = plt.subplots(ncols=2, figsize=(12, 3))
if (image.ndim == 2):
# Grascale Image
axis[0].imshow(image, cmap=plt.get_cmap('gray'))
axis[1].set_title('Histogram')
axis[0].set_title('Grayscale Image')
hist = exposure.histogram(image)
axis[1].plot(hist[0])
else:
# Color image
axis[0].imshow(image, cmap='gray')
axis[1].set_title('Histogram')
axis[0].set_title('Colored Image')
rgbcolors = ['red', 'green', 'blue']
for i, mycolor in enumerate(rgbcolors):
axis[1].plot(exposure.histogram(image[...,i])[0], color=mycolor)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 36/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

imageHist(image)

import glob

import imageio.v3 as iio

import matplotlib.pyplot as plt


import numpy as np
import skimage as ski

# convert the image to grayscale


gray_shapes = ski.color.rgb2gray(image)

fig, ax = plt.subplots()
plt.imshow(gray_shapes, cmap="gray")

<matplotlib.image.AxesImage at 0x7fe55449f4c0>

# create a histogram of the blurred grayscale image


histogram, bin_edges = np.histogram(gray_shapes, bins=256, range=(0.0, 1.0))

fig, ax = plt.subplots()
plt.plot(bin_edges[0:-1], histogram)
plt.title("Grayscale Histogram")
plt.xlabel("grayscale value")
plt.ylabel("pixels")
plt.xlim(0, 1.0)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 37/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

(0.0, 1.0)

# create a mask based on the threshold


t = 0.7
binary_mask = gray_shapes > t

fig, ax = plt.subplots()
plt.imshow(binary_mask, cmap="gray")

<matplotlib.image.AxesImage at 0x7fe55437ecb0>

# show the histogram of the blurred image


histogram, bin_edges = np.histogram(binary_mask, bins=256, range=(0.0, 1.0))
fig, ax = plt.subplots()
plt.plot(bin_edges[0:-1], histogram)
plt.title("Graylevel histogram")
plt.xlabel("gray value")
plt.ylabel("pixel count")
plt.xlim(0, 1.0)

<__array_function__ internals>:180: RuntimeWarning: Converting input from bool to <cl


(0.0, 1.0)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 38/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

seuillage automatique

# perform automatic thresholding


totsu = ski.filters.threshold_otsu(gray_shapes)
print("Found automatic threshold t = {}.".format(t))

Found automatic threshold t = 0.7.

otsuimg = gray_shapes > totsu

fig, ax = plt.subplots()
plt.imshow(otsuimg , cmap="gray")

<matplotlib.image.AxesImage at 0x7fe54cbfd780>

# show the histogram of the blurred image


histogram, bin_edges = np.histogram(otsuimg, bins=256, range=(0.0, 1.0))
fig, ax = plt.subplots()
plt.plot(bin_edges[0:-1], histogram)
plt.title("Graylevel histogram")
plt.xlabel("gray value")
plt.ylabel("pixel count")
plt.xlim(0, 1.0)

(0.0, 1.0)

seuillage suavola

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 39/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import numpy as np
import cv2
from skimage import io
from skimage.filters import threshold_sauvola
from skimage.color import rgb2gray
from google.colab.patches import cv2_imshow

# Convert the image to grayscale


gray_image = rgb2gray(image)

window_size = 55 # You can adjust this parameter


thresh_sauvola = threshold_sauvola(gray_image, window_size=window_size)
print(thresh_sauvola)
sauvola_image = gray_image > thresh_sauvola

# Display the thresholded image


cv2_imshow( sauvola_image.astype(np.uint8) * 255)
cv2.waitKey(0)
cv2.destroyAllWindows()

[[0.73856931 0.7386687 0.73857471 ... 0.75979349 0.75979437 0.7598798 ]


[0.73861336 0.73871283 0.73864369 ... 0.75973204 0.7597298 0.7598092 ]
[0.73862971 0.73872054 0.7386549 ... 0.75974887 0.75974211 0.75981007]
...
[0.77656527 0.77665248 0.77676007 ... 0.78355237 0.78352835 0.7835885 ]
[0.7765044 0.77659611 0.7766708 ... 0.78356766 0.78353654 0.78360093]
[0.77637828 0.77645789 0.77653486 ... 0.78366243 0.78364047 0.78370167]]

# show the histogram of the blurred image


histogram, bin_edges = np.histogram(sauvola_image, bins=256, range=(0.0, 1.0))
fig, ax = plt.subplots()
plt.plot(bin_edges[0:-1], histogram)
plt.title("sauvolahistogram")
plt.xlabel("gray value")
plt.ylabel("pixel count")
plt.xlim(0, 1.0)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 40/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

<__array_function__ internals>:180: RuntimeWarning: Converting input from bool to <cl


(0.0, 1.0)

suillage wolf

import numpy as np
import cv2
from skimage import io
from skimage.filters import threshold_local
from skimage.color import rgb2gray

# Convert the image to grayscale


gray_image = rgb2gray(image)

# Apply Wolf's thresholding


window_size = 55 # You can adjust this parameter
thresh_wolf = threshold_local(gray_image, block_size=window_size, method='gaussian', offset=0.1)
wolf_image = gray_image > thresh_wolf

# Display the thresholded image


cv2_imshow(wolf_image.astype(np.uint8) * 255)
cv2.waitKey(0)
cv2.destroyAllWindows()

# show the histogram of the blurred image


histogram, bin_edges = np.histogram(wolf_image, bins=256, range=(0.0, 1.0))
fig, ax = plt.subplots()
plt.plot(bin_edges[0:-1], histogram)
plt.title("wolf histogram")
plt.xlabel("gray value")
plt.ylabel("pixel count")
plt.xlim(0, 1.0)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 41/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

(0.0, 1.0)

suillage niblack

import numpy as np
import cv2
from skimage import io
from skimage.filters import threshold_niblack
from skimage.color import rgb2gray

# Convert the image to grayscale


gray_image1 = rgb2gray(image)

# Apply Niblack's thresholding


window_size = 25 # You can adjust this parameter
k = -0.2 # You can adjust this parameter
thresh_niblack = threshold_niblack(gray_image1, window_size=window_size, k=k)
noblack_image = gray_image > thresh_niblack

# Display the thresholded image


cv2_imshow(noblack_image.astype(np.uint8) * 255)
cv2.waitKey(0)
cv2.destroyAllWindows()

# show the histogram of the blurred image


histogram, bin_edges = np.histogram(noblack_image, bins=256, range=(0.0, 1.0))
fig, ax = plt.subplots()
plt.plot(bin_edges[0:-1], histogram)
plt.title("niblack histogram")
plt.xlabel("gray value")
plt.ylabel("pixel count")
plt.xlim(0, 1.0)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 42/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

(0.0, 1.0)

k-means

k=3

import cv2
import numpy as np
from sklearn.cluster import KMeans

image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Reshape the image into a 2D array of pixels


pixels = image.reshape((-1, 3))

# Define the number of clusters (segments) you want


n_clusters = 3

# Apply K-Means clustering


kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(pixels)
labels = kmeans.predict(pixels)
segmented_image = kmeans.cluster_centers_[labels].reshape(image.shape).astype(np.uint8)

# Display the segmented image


cv2_imshow( cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyAllWindows()

/usr/local/lib/python3.10/dist-packages/sklearn/cluster/_kmeans.py:870: FutureWarning
warnings.warn(

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 43/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
from sklearn.cluster import KMeans

image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Reshape the image into a 2D array of pixels


pixels = image.reshape((-1, 3))

# Define the number of clusters (segments) you want


n_clusters = 3 # Adjust as needed

# Apply K-Means clustering


kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(pixels)
labels = kmeans.predict(pixels)

# Create a color map for the clusters


cluster_colors = [(0, 255, 0), (255, 0, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255)] # You can define your own colors

# Create an empty segmented image


segmented_image = np.zeros_like(image)

# Assign each pixel to its cluster color


for i in range(n_clusters):
cluster_mask = (labels == i).reshape(image.shape[:-1])
segmented_image[cluster_mask] = cluster_colors[i]

# Display the segmented image


cv2_imshow( cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyAllWindows()

/usr/local/lib/python3.10/dist-packages/sklearn/cluster/_kmeans.py:870: FutureWarning
warnings.warn(

k=7

# Define the number of clusters (segments) you want


n_clusters = 7

# Apply K-Means clustering


kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(pixels)
labels = kmeans.predict(pixels)
segmented_image = kmeans.cluster_centers_[labels].reshape(image.shape).astype(np.uint8)

# Display the segmented image


cv2_imshow( cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyAllWindows()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 44/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

/usr/local/lib/python3.10/dist-packages/sklearn/cluster/_kmeans.py:870: FutureWarning: The default value of `n_init` will change fr


warnings.warn(

k=22

# Define the number of clusters (segments) you want


n_clusters = 200

# Apply K-Means clustering


kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(pixels)
labels = kmeans.predict(pixels)
segmented_image = kmeans.cluster_centers_[labels].reshape(image.shape).astype(np.uint8)

# Display the segmented image


cv2_imshow( cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyAllWindows()

<ipython-input-236-bdfaae762c6e>:6: ConvergenceWarning: Number of distinct clusters


kmeans.fit(pixels)

Gradiant

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 45/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
import matplotlib.pyplot as plt

# Load the image


image = cv2.imread(path_to_image)
# Load the image
image = cv2.imread(path_to_image, cv2.IMREAD_GRAYSCALE)

# Apply Sobel, Prewitt, and Roberts operators


sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=3)
sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=3)
prewitt_x = cv2.filter2D(image, -1, np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]))
prewitt_y = cv2.filter2D(image, -1, np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]))
roberts_x = cv2.filter2D(image, -1, np.array([[-1, 0], [0, 1]]))
roberts_y = cv2.filter2D(image, -1, np.array([[0, -1], [1, 0]]))

# Apply Canny edge detection


canny = cv2.Canny(image, 100, 200)

# Plot the results


plt.figure(figsize=(12, 8))

plt.subplot(2, 3, 1)
plt.imshow(image, cmap='gray')
plt.title('Original Image')
plt.axis('off')

plt.subplot(2, 3, 2)
plt.imshow(np.abs(sobel_x), cmap='gray')
plt.title('Sobel X')
plt.axis('off')

plt.subplot(2, 3, 3)
plt.imshow(np.abs(sobel_y), cmap='gray')
plt.title('Sobel Y')
plt.axis('off')

plt.subplot(2, 3, 4)
plt.imshow(np.abs(prewitt_x), cmap='gray')
plt.title('Prewitt X')
plt.axis('off')

plt.subplot(2, 3, 5)
plt.imshow(np.abs(prewitt_y), cmap='gray')
plt.title('Prewitt Y')
plt.axis('off')

plt.subplot(2, 3, 6)
plt.imshow(canny, cmap='gray')
plt.title('Canny')
plt.axis('off')

plt.tight_layout()
plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 46/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

Detection des contours

# Define the kernel for erosion and dilation


kernel = (15,15)

# Erosion
erosion = cv2.erode(canny, kernel, iterations=1)

# Dilation
dilation = cv2.dilate(canny, kernel, iterations=1)

# Display the Canny image, erosion, and dilation results


cv2_imshow(canny)
cv2_imshow(erosion)
cv2_imshow(dilation)

#Detect contours
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
print(contours[0])

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 47/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

[[[138 356]]

[[137 357]]

[[136 357]]

[[135 357]]

[[134 357]]

[[134 358]]

[[134
# Detect 359]]
contours
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
[[134 360]]
print(contours[0])
[[134 361]]

[[[138 356]]
[[135 361]]

[[137 361]]
[[136 357]]

[[136 360]]
[[137 357]]

[[135 360]]
[[138 357]]

[[134 360]]
[[139 357]]

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 48/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

[[140
[[134 360]]
358]]

[[141
[[134 360]]
359]]

[[142
[[134 361]]
360]]

[[143
[[134 361]]
361]]

[[144
[[135 361]]
361]]

[[144
[[136 360]]
361]]

[[144
[[137 359]]
360]]

[[144
[[138 358]]
360]]

[[144
[[139 357]]
360]]

[[143
[[140 357]]
360]]

[[142
[[141 356]]
360]]

[[141
[[142 356]]
361]]

[[140
[[143 356]]
361]]

[[139
[[144 356]]]
361]]

[[144 360]]

[[144 359]]

[[144 358]]

[[144 357]]

[[143 357]]

[[142 356]]

[[141 356]]

[[140 356]]

[[139 356]]]

height, width = image.shape[:2] # Remove _ (underscore) for grayscale images


min_x, min_y = width, height
max_x, max_y = 0, 0

for contour, hier in zip(contours, hierarchy):


(x, y, w, h) = cv2.boundingRect(contour)
min_x = min(x, min_x)
max_x = max(x + w, max_x)
min_y = min(y, min_y)
max_y = max(y + h, max_y)

if w > 80 and h > 80:


cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)

if max_x - min_x > 0 and max_y - min_y > 0:


fin = cv2.rectangle(image, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)
#plt.imshow(fin)
final=cv2.drawContours(image, contours,-1,(0,0,255),6)
plt.imshow(final,cmap = 'gray')
plt.imsave('/content/new.png',final)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 49/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

import cv2
from google.colab.patches import cv2_imshow
import numpy as np

# Load image and HSV color threshold


image = cv2.imread('/content/new.png')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([0, 0, 80])
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)

# Find contours on the color threshold mask


contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Initialize variables for minimum and maximum coordinates


height, width = image.shape[:2]
min_x, min_y = width, height
max_x, max_y = 0, 0

# Loop through contours, draw rectangles, and update min/max coordinates


for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
min_x = min(x, min_x)
max_x = max(x + w, max_x)
min_y = min(y, min_y)
max_y = max(y + h, max_y)

if w > 80 and h > 80:


cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)

# Check if a bounding box is formed


if max_x - min_x > 0 and max_y - min_y > 0:
fin = cv2.rectangle(image, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)

# Display and save the final image


cv2_imshow(fin)
cv2.imwrite('/content/final_image.png', fin)

True

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 50/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
#fingerprint
import cv2
from PIL import ImageOps
from google.colab.patches import cv2_imshow
import matplotlib.pyplot as plt
import skimage
from skimage import measure, morphology
from skimage.color import label2rgb
from skimage.measure import regionprops
from PIL import Image, ImageDraw, ImageOps
img = cv2.imread('/content/final_image.png', 0)
img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1]
# Extract Blobs
blobs = img > img.mean()
blobs_labels = measure.label(blobs, background=1)
image_label_overlay = label2rgb(blobs_labels, image=img,bg_label=0)
total_area = 0
counter = 0
average = 0.0
for region in regionprops(blobs_labels):
if region.area >70:
total_area = total_area + region.area
counter = counter + 1
# Threshold
average = (total_area/counter)
a4_constant = ((average/100.0)*250.0)+100
b = morphology.remove_small_objects(blobs_labels, a4_constant)
plt.imsave('pre_version.png', b)

# read the pre-version


img2 = cv2.imread('pre_version.png',cv2.IMREAD_GRAYSCALE)
img2 = cv2.threshold(img2, 50, 250, cv2.THRESH_BINARY_INV)[1]
cv2_imshow(img2)
plt.imsave('/content/fingerprints.png',img2)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 51/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

#signature
import cv2
from google.colab.patches import cv2_imshow
import matplotlib.pyplot as plt
import skimage
from skimage import measure, morphology
from skimage.color import label2rgb
from skimage.measure import regionprops
img = cv2.imread('/content/final_image.png', 0)
#cv2_imshow(img)
img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1]
# Extract Blobs
blobs = img > img.mean()
blobs_labels = measure.label(blobs, background=1)
image_label_overlay = label2rgb(blobs_labels, image=img)
b = morphology.remove_small_objects(blobs_labels, 230)
plt.imsave('pre_version.png', b)
# read the pre-version
img2 = cv2.imread('pre_version.png', 0)
hsv = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
lower = np.array([0, 0, 80])
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY_INV)[1]
cv2_imshow(mask)
plt.imsave('/content/signature.png',mask)

TP4

Detection des details

from google.colab.patches import cv2_imshow


# Adaptive thresholding for binarization
thresh2 = cv2.adaptiveThreshold(dilation, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)

# Morphological operations (optional) to remove noise


kernel = np.ones((3, 3), np.uint8)
thresh2 = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel)

cv2_imshow( thresh2)
cv2.waitKey(0)
cv2.destroyAllWindows()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 52/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

# Apply morphological operations to remove small noise


kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(canny , cv2.MORPH_OPEN, kernel, iterations=2)

# Find contours in the opened image


contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Create a mask for the largest fingerprint contour


mask = np.zeros_like(canny )

# Find the largest contour


if contours:
largest_contour = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [largest_contour], -1, 255, thickness=cv2.FILLED)

# Keep the fingerprint region intact


result = np.full_like(canny, 255)
result = cv2.bitwise_and(result, result, mask=mask)
cv2_imshow( result)

import cv2
import numpy as np

# Apply morphological operations to remove small noise


kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(thresh2, cv2.MORPH_OPEN, kernel, iterations=2)

# Find contours in the opened image


contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Create a mask for the contours


mask = np.zeros_like(thresh2)

for contour in contours:


cv2.drawContours(mask, [contour], -1, 255, thickness=cv2.FILLED)

# Apply the mask to the original image


result = cv2.bitwise_and(thresh2,thresh2 , mask)

cv2_imshow( result)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 53/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

# Apply morphological operations to remove small noise


kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(thresh2, cv2.MORPH_OPEN, kernel, iterations=2)

# Find contours in the opened image


contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Create a mask for the fingerprint contours


mask = np.zeros_like(thresh2)

for contour in contours:


cv2.drawContours(mask, [contour], -1, 255, thickness=cv2.FILLED)

# Invert the mask to keep everything outside the contours


mask = cv2.bitwise_not(mask)

# Set everything outside the contours to zero


result = cv2.bitwise_and(thresh2, thresh2, mask=mask)

cv2_imshow( result)

# Apply morphological operations to remove small noise


kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(thresh2, cv2.MORPH_OPEN, kernel, iterations=2)

# Find contours in the opened image


contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Create a mask for the fingerprint contours


mask = np.zeros_like(thresh2)

for contour in contours:


cv2.drawContours(mask, [contour], -1, 255, thickness=cv2.FILLED)

# Invert the mask to keep everything outside the contours


mask = cv2.bitwise_not(mask)

# Perform erosion to further clean the fingerprint region


erosion_kernel = np.ones((3, 3), np.uint8)
mask = cv2.erode(mask, erosion_kernel, iterations=1)

# Restore the image by filling the non-fingerprint regions with a specified value (e.g., 255 for white)
restored_img = np.full_like(thresh2, 255) # Create a white image with the same size as the original

# Keep the fingerprint region intact


restored_img = cv2.bitwise_and(thresh2, restored_img , mask=mask)

cv2_imshow( restored_img)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 54/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

# Apply morphological operations to remove small noise


kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(thresh2, cv2.MORPH_OPEN, kernel, iterations=2)

# Find contours in the opened image


contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Create a mask for the fingerprint contours


mask = np.zeros_like(thresh2)

for contour in contours:


cv2.drawContours(mask, [contour], -1, 255, thickness=cv2.FILLED)

# Invert the mask to keep everything outside the contours


mask = cv2.bitwise_not(mask)

# First, perform erosion to remove small noise


erosion_kernel = np.ones((3, 3), np.uint8)
erosion_mask = cv2.erode(mask, erosion_kernel, iterations=1)

# Then, perform dilation to restore and smooth the fingerprint region


dilation_kernel = np.ones((5, 5), np.uint8)
restored_mask = cv2.dilate(erosion_mask, dilation_kernel, iterations=1)

# Restore the image by filling the non-fingerprint regions with a specified value (e.g., 255 for white)
restored_img = np.full_like(thresh2, 255) # Create a white image with the same size as the original

# Keep the fingerprint region intact


restored_img = cv2.bitwise_and(restored_img, thresh2, mask=restored_mask)

# Show the restored image


cv2_imshow(restored_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

!pip install fingerprint_enhancer

Requirement already satisfied: fingerprint_enhancer in /usr/local/lib/python3.10/dist-packages (0.0.13)


Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from fingerprint_enhancer) (1.23.5)
Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from fingerprint_enhancer) (4.8.0.76)
Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from fingerprint_enhancer) (1.11.3)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 55/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import fingerprint_enhancer # Load the library

img = cv2.imread('/content/drive/MyDrive/tpcv/BONNE/105_5.tif')
out = fingerprint_enhancer.enhance_Fingerprint(img) # enhance the fingerprint image

cv2_imshow( out)

Detection bifurcations

import cv2
import numpy as np

# Perform skeletonization to extract the ridge pattern


skeleton = cv2.ximgproc.thinning(out, thinningType=cv2.ximgproc.THINNING_GUOHALL)

# Detect terminations and bifurcations using hit-or-miss transform


kernel_termination = np.array([
[0, 0, 0],
[1, 1, 0],
[0, 1, 0]
], dtype=np.uint8)

kernel_bifurcation = np.array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0]
], dtype=np.uint8)

# Apply hit-or-miss transform


terminations = cv2.morphologyEx(skeleton, cv2.MORPH_HITMISS, kernel_termination)
bifurcations = cv2.morphologyEx(skeleton, cv2.MORPH_HITMISS, kernel_bifurcation)

# Find the coordinates of terminations and bifurcations


termination_coords = np.transpose(np.where(terminations > 0))
bifurcation_coords = np.transpose(np.where(bifurcations > 0))

# Draw circles around terminations and bifurcations


for x, y in termination_coords:
cv2.circle(out, (y, x), 3, (0, 0, 255), -1)

for x, y in bifurcation_coords:
cv2.circle(out, (y, x), 3, (0, 255, 0), -1)

# Save the image with detected features


cv2.imwrite('fingerprint_with_features.png', out)
# Save the image with detected features
cv2_imshow( out)
print(x,y)
print(termination_coords)
print(bifurcation_coords)

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 56/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

282 64
[[ 96 205]
[ 96 207]
[117 218]
detection minutiaes
[120 194]
[161 88]
[173 133]
import cv2
[234 90]
from skimage
[261 69]import morphology
import [282
numpy 64]]
as np
import
[]os

def detect_minutiae(image_path):
if not os.path.isfile(image_path):
raise FileNotFoundError(f"Image not found at path: {image_path}")

# Load the fingerprint image in grayscale


img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

if img is None:
raise Exception(f"Failed to load image at path: {image_path}")

# Binarize the image using thresholding


_, binarized_img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

# Apply a morphological skeletonization to the binarized image


skeleton = morphology.skeletonize(binarized_img / 255)

# Find minutiae (ridge endings and bifurcations)


minutiae = cv2.findContours((skeleton * 255).astype('uint8'), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
minutiae = [c[0] for c in minutiae[0]]

return minutiae

# Define the path to the first image in the "train_folder"


train_folder_path = "/content/drive/MyDrive/tpcv/data/train"
first_image = os.listdir(train_folder_path)[25]
image_path = os.path.join(train_folder_path, first_image)

minutiae = detect_minutiae(image_path)

minutiae = detect_minutiae(image_path)
from google.colab.patches import cv2_imshow
# Load the original image
original_img = cv2.imread(image_path)

# Draw circles at the locations of minutiae


for point in minutiae:
center = (int(point[0][0]), int(point[0][1])) # Convert to integer coordinates
cv2.circle(original_img, center, 3, (0, 255, 0), -1)

# Display the image with minutiae


cv2_imshow( original_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 57/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

for minutia_location in minutiae:


x_coordinate = minutia_location[0][0]
y_coordinate = minutia_location[0][1]

# Calculate theta (rotation angle)


theta = np.arctan2(y_coordinate, x_coordinate)

print(f"x={x_coordinate}, y={y_coordinate}, theta={theta}")

x=103, y=183, theta=1.0581474631177334


x=107, y=180, theta=1.034471807729759
x=135, y=179, theta=0.924619042410104
x=128, y=179, theta=0.9500185788432532
x=92, y=179, theta=1.096038063896483
x=196, y=178, theta=0.7373069300828965
x=177, y=176, theta=0.7825653097856106
x=37, y=176, theta=1.3635864691949358
x=204, y=175, theta=0.7090298233567726
x=180, y=174, theta=0.7684506335910434
x=179, y=172, theta=0.7654577867631893
x=133, y=172, theta=0.9125768696810034
x=119, y=171, theta=0.9628230097985966
x=202, y=170, theta=0.6995878987009462
x=95, y=169, theta=1.0586878442795182
x=28, y=169, theta=1.4066073419839134
x=185, y=168, theta=0.7372767021644002
x=220, y=166, theta=0.6464038582601841
x=186, y=163, theta=0.7195907450054881
x=126, y=162, theta=0.9097531579442097
x=109, y=162, theta=0.9785323854050311
x=150, y=160, theta=0.8176450458327023
x=109, y=153, theta=0.9517844639425573
x=106, y=153, theta=0.9649118738723885
x=9, y=153, theta=1.512040504079174
x=202, y=152, theta=0.6450833859538322
x=188, y=152, theta=0.6799088548723576
x=128, y=152, theta=0.870903457075653
x=112, y=152, theta=0.9357695914045828
x=149, y=150, theta=0.7887426325329198
x=173, y=147, theta=0.7043262508522844
x=132, y=145, theta=0.8322951604356077
x=18, y=145, theta=1.4472902284788582
x=134, y=143, theta=0.8178777121849488
x=22, y=143, theta=1.4181469983996315
x=129, y=142, theta=0.8333318977961075
x=114, y=142, theta=0.8943401203873141
x=152, y=141, theta=0.7478731245671852
x=132, y=141, theta=0.8183532610117464
x=138, y=140, theta=0.7925922838877404
x=55, y=137, theta=1.189032088710714
x=136, y=134, theta=0.7779908914662833
x=208, y=133, theta=0.5689038024772447
x=136, y=132, theta=0.7704738984073634
x=114, y=131, theta=0.8546748795962951
x=165, y=130, theta=0.6673061349783543
x=101, y=130, theta=0.910285922944416
x=132, y=129, theta=0.7739044166824178
x=78, y=129, theta=1.0269638704927742
x=143, y=128, theta=0.7301040317438175
x=164, y=127, theta=0.6589290070957053
x=81, y=125, theta=0.9958283667439219
x=181, y=124, theta=0.6006441551256168
x=139, y=119, theta=0.7080335039694751
x=130, y=118, theta=0.7370487767455455
x=130, y=111, theta=0.7067227211273577
x=75, y=104, theta=0.9460137802932868
x=129, y=0, theta=0.0

Feature extraction

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 58/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
def display_histogram(image_path):
img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
hist = cv2.calcHist([img], [0], None, [256], [0, 256])

plt.plot(hist)
plt.xlabel('Pixel Value')
plt.ylabel('Frequency')
plt.title('Histogram')
plt.show()

input_Bonne_path = "/content/drive/MyDrive/tpcv/BONNE"
input_Mauv_path = "/content/drive/MyDrive/tpcv/MAUVAISE"
output_Bonne_path_fourier2 = "/content/drive/MyDrive/tpcv/seg_Bonne"
output_Mauv_path_fourier2 = "/content/drive/MyDrive/tpcv/seg_Mauv"

# Display the histogram of the first image in the folder


display_histogram(os.path.join(input_Bonne_path, os.listdir(input_Bonne_path)[0]))

f1 vecteur d'orientation pour le traitement d'images

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 59/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import skew, kurtosis
import os

def calculate_orientation(image):
# Calculate gradient using Sobel operators
gradient_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=3)
gradient_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=3)

# Calculate orientation map


orientation_map = np.arctan2(gradient_y, gradient_x)

return orientation_map

def calculate_histogram(orientation_map):
# Flatten the orientation map
flat_orientation = orientation_map.flatten()

# Calculate histogram
hist, bins = np.histogram(flat_orientation, bins=180, range=[-np.pi, np.pi])

return hist, bins

def calculate_first_order_statistics(hist):
# Calculate first-order statistics
mean = np.mean(hist)
variance = np.var(hist)
skewness = skew(hist)
kurt = kurtosis(hist)

# Calculate central moments


moments = [np.mean((hist - mean)**i) for i in range(1, 5)]

return mean, variance, skewness, kurt, moments

def plot_orientation_histogram(orientation_map):

# Calculate histogram
hist, bins = calculate_histogram(orientation_map)
mean, variance, skewness, kurt, moments = calculate_first_order_statistics(hist)
print("F1 : ",[mean, variance, skewness, kurt, moments])
print("**********")
print(f"Mean: {mean}")
print(f"Variance: {variance}")
print(f"Skewness: {skewness}")
print(f"Kurtosis: {kurt}")
print(f"Central Moments: {moments}")

# Define the folder path


input_folder_path = "/content/drive/MyDrive/tpcv/BONNE" # Change this to your input folder
os.makedirs("primitive_1", exist_ok=True)

# Iterate through the images in the input folder


for filename in os.listdir(input_folder_path):
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tif', '.bmp')): # Adjust the image extensions as needed
image_path = os.path.join(input_folder_path, filename)
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

# Step 1: Calculate the orientation map


orientation_map = calculate_orientation(image)

# Step 2: Plot the histogram and calculate first-order statistics


print(f"Processing image: {filename}")
plot_orientation_histogram(orientation_map)
print("\n")

Processing image: 102_4.tif


F1 : [517.6888888888889, 635002.5920987654, 10.915885466825653, 131.6856891971536, [-3.536923840228054e-14, 635002.5920987654, 552
**********
Mean: 517.6888888888889
Variance: 635002.5920987654
Skewness: 10.915885466825653
Kurtosis: 131.6856891971536
Central Moments: [-3.536923840228054e-14, 635002.5920987654, 5523599955.004959, 54309080408060.24]

Processing image: 105_5.tif


F1 : [517.6888888888889, 60996.480987654315, 2.8082130855596765, 11.683704042814195, [2.021099337273174e-14, 60996.480987654315, 4
**********

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 60/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
Mean: 517.6888888888889
Variance: 60996.480987654315
Skewness: 2.8082130855596765
Kurtosis: 11.683704042814195
Central Moments: [2.021099337273174e-14, 60996.480987654315, 42304564.56792319, 54631758924.57806]

Processing image: 105_3.tif


F1 : [517.6888888888889, 205651.0809876543, 8.188397028562278, 84.95030531222528, [2.021099337273174e-14, 205651.0809876543, 76365
**********
Mean: 517.6888888888889
Variance: 205651.0809876543
Skewness: 8.188397028562278
Kurtosis: 84.95030531222528
Central Moments: [2.021099337273174e-14, 205651.0809876543, 763651812.7945898, 3719626599823.5312]

f2 vecteur des Caractéristiques de Texture pour le Traitement d'Images

def calculate_texture_features(image):
# Convert the image to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Calculate gradient magnitude


gradient_magnitude = cv2.magnitude(cv2.Sobel(gray_image, cv2.CV_64F, 1, 0), cv2.Sobel(gray_image, cv2.CV_64F, 0, 1))

# Calculate texture features


mean_intensity = np.mean(gray_image)
std_intensity = np.std(gray_image)
mean_gradient = np.mean(gradient_magnitude)
std_gradient = np.std(gradient_magnitude)

# Calculate contrast, homogeneity, correlation, and energy


contrast = np.mean((gray_image - mean_intensity) ** 2)
homogeneity = np.mean(1 / (1 + (gray_image - mean_intensity) ** 2))
correlation = np.mean((gray_image - mean_intensity) * (gradient_magnitude - mean_gradient)) / (std_intensity * std_gradient)
energy = np.mean(gradient_magnitude ** 2)

return contrast, homogeneity, correlation, energy

# Define the folder path


input_folder_path = "/content/drive/MyDrive/tpcv/BONNE" # Change this to your input folder
os.makedirs("primitive_2", exist_ok=True)

# Iterate through the images in the input folder


for filename in os.listdir(input_folder_path):
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tif', '.bmp')): # Adjust the image extensions as needed
image_path = os.path.join(input_folder_path, filename)
image = cv2.imread(image_path)

# Step 1: Calculate texture features


contrast, homogeneity, correlation, energy = calculate_texture_features(image)

# Step 2: Print or use the features as needed


print(f"Texture Features for {filename}:")
print("F2 : ",[contrast, homogeneity, correlation, energy])
print(f"Contrast: {contrast}")
print(f"Homogeneity: {homogeneity}")
print(f"Correlation: {correlation}")
print(f"Energy: {energy}")
print("\n")

Texture Features for 102_4.tif:


F2 : [4558.328778070897, 0.00368667603842239, -0.0030299549566865746, 98286.81187757554]
Contrast: 4558.328778070897
Homogeneity: 0.00368667603842239
Correlation: -0.0030299549566865746
Energy: 98286.81187757554

Texture Features for 105_5.tif:


F2 : [4064.158482141013, 0.003550382507692227, -0.010139746381047872, 62447.85946085165]
Contrast: 4064.158482141013
Homogeneity: 0.003550382507692227
Correlation: -0.010139746381047872
Energy: 62447.85946085165

Texture Features for 105_3.tif:


F2 : [4127.718636271256, 0.003354058374801654, 0.02166119875189265, 83951.09746308379]

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 61/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
Contrast: 4127.718636271256
Homogeneity: 0.003354058374801654
Correlation: 0.02166119875189265
Energy: 83951.09746308379

** squelettisation aux Images**

from skimage import morphology

def apply_skeletonization(input_folder, output_folder):


os.makedirs(output_folder, exist_ok=True)

for filename in os.listdir(input_folder):


img_path = os.path.join(input_folder, filename)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)

# Apply binary thresholding (if not already binary)


_, binary_img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)

# Apply skeletonization
skeleton = morphology.skeletonize(binary_img // 255)

output_path = os.path.join(output_folder, filename)


cv2.imwrite(output_path, (skeleton * 255).astype('uint8'))

# Define the folder paths


input_folder_path = "/content/drive/MyDrive/tpcv/data/train"
output_folder_path = "/content/drive/MyDrive/tpcv/data/skeleton_train"

# Create folder for skeletonized images


apply_skeletonization(input_folder_path, output_folder_path)

import os
import cv2
import matplotlib.pyplot as plt

def load_images_from_folder(folder_path):
images = []
for filename in os.listdir(folder_path):
img_path = os.path.join(folder_path, filename)
img = cv2.imread(img_path)
if img is not None:
images.append(img)
return images

# Specify the folder path


folder_path = "/content/drive/MyDrive/tpcv/data/skeleton_train"

# Load images from the folder


images = load_images_from_folder(folder_path)

# Visualize the images


plt.figure(figsize=(15, 5))
for i in range(min(len(images), 5)): # Display at most 5 images for better visualization
plt.subplot(1, 5, i + 1)
plt.imshow(cv2.cvtColor(images[i], cv2.COLOR_BGR2RGB)) # Convert BGR to RGB for matplotlib
plt.axis('off')

plt.show()

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 62/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

f3 vecteur :Calcul des Primitives Géométriques et de Texture pour les Images Squelettisées

def calculate_geometric_primitives(contour):
# Calculate area and perimeter
area = cv2.contourArea(contour)
perimeter = cv2.arcLength(contour, True)

# Calculate bounding box


x, y, w, h = cv2.boundingRect(contour)
aspect_ratio = w / float(h)

# Calculate solidity
hull = cv2.convexHull(contour)
hull_area = cv2.contourArea(hull)
solidity = float(area) / hull_area

# Calculate extent
extent = float(area) / (w * h)

return area, perimeter, aspect_ratio, solidity, extent

def calculate_texture_primitives(image):
# Convert the image to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Calculate Laplacian for smoothness


laplacian = cv2.Laplacian(gray_image, cv2.CV_64F)
smoothness = np.mean(np.abs(laplacian))

# Calculate uniformity
uniformity = np.std(gray_image)

return smoothness, uniformity

# Define the folder path


input_folder_path = "/content/drive/MyDrive/tpcv/data/skeleton_train" # Change this to your input folder
os.makedirs("Primitive_3", exist_ok=True)

# Iterate through the images in the input folder


for filename in os.listdir(input_folder_path):
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tif', '.bmp')): # Adjust the image extensions as needed
image_path = os.path.join(input_folder_path, filename)
image = cv2.imread(image_path)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Find contours
contours, _ = cv2.findContours(gray_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Assuming the largest contour represents the object of interest


largest_contour = max(contours, key=cv2.contourArea)

# Step 1: Calculate geometric primitives


superficie, perimetre,aspect_ratio, solidity, extent = calculate_geometric_primitives(largest_contour)

# Step 2: Calculate texture primitives


lissage, uniformite = calculate_texture_primitives(image)

# Step 3: Print or use the primitives as needed


print(f"Primitives Geometriques et de Texture for {filename}:")
print("F3 : ",[superficie, perimetre,aspect_ratio,solidity,extent,lissage, uniformite])
print(f"superficie: {superficie}")
print(f"Perimetre: {perimetre}")
print(f"Ratio d'aspect : {aspect_ratio}")
print(f"Solidité : {solidity}")
print(f"Étendue : {extent}")
print(f"lissage: {lissage}")
print(f"uniformite: {uniformite}\n")

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 63/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
Étendue : 0.13018384401114205
lissage: 129.09812843406593
uniformite: 72.52514896233994

Primitives Geometriques et de Texture for 108_3.tif:


F3 : [80568.5, 1325.7371484041214, 0.7191011235955056, 0.970026969105926, 0.8840469188904494, 134.0320763221154, 73.70575828691
superficie: 80568.5
Perimetre: 1325.7371484041214
Ratio d'aspect : 0.7191011235955056
Solidité : 0.970026969105926
Étendue : 0.8840469188904494
lissage: 134.0320763221154
uniformite: 73.7057582869193

Primitives Geometriques et de Texture for 102_3.tif:


F3 : [44098.0, 7803.533837795258, 0.7047353760445683, 0.5183639644299207, 0.4855164213284596, 187.53380408653845, 88.4084982046
superficie: 44098.0
Perimetre: 7803.533837795258
Ratio d'aspect : 0.7047353760445683
Solidité : 0.5183639644299207
Étendue : 0.4855164213284596
lissage: 187.53380408653845
uniformite: 88.40849820467056

Primitives Geometriques et de Texture for 103_7.tif:


F3 : [33447.5, 6758.257580280304, 0.7067039106145251, 0.3984264255679043, 0.3692836796431647, 138.2052712912088, 76.63699906638
superficie: 33447.5
Perimetre: 6758.257580280304
Ratio d'aspect : 0.7067039106145251
Solidité : 0.3984264255679043
Étendue : 0.3692836796431647
lissage: 138.2052712912088
uniformite: 76.63699906638678

Primitives Geometriques et de Texture for 102_1.tif:


F3 : [33079.0, 8848.086346149445, 0.7211267605633803, 0.38773919413919417, 0.3639854753521127, 183.32229781936815, 87.323756349
superficie: 33079.0
Perimetre: 8848.086346149445
Ratio d'aspect : 0.7211267605633803
Solidité : 0.38773919413919417
Étendue : 0.3639854753521127
lissage: 183.32229781936815
uniformite: 87.32375634932212

def calculate_first_order_primitives(image):
# Calculate Mean, Variance, Skewness, Kurtosis, and Moments
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
mean_value = np.mean(gray_image)
variance_value = np.var(gray_image)
skewness_value = np.mean((gray_image - mean_value) ** 3) / np.power(variance_value, 1.5)
kurtosis_value = np.mean((gray_image - mean_value) ** 4) / np.power(variance_value, 2) - 3
moments = cv2.moments(gray_image)

return mean_value, variance_value, skewness_value, kurtosis_value, moments

def calculate_geometric_texture_primitives(image):
# Calculate geometric primitives: Area, Perimeter, Aspect Ratio, Solidity, Extent
# Calculate texture primitives: Smoothness, Uniformity
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Find contours
contours, _ = cv2.findContours(gray_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Assuming the largest contour represents the object of interest


largest_contour = max(contours, key=cv2.contourArea)

area = cv2.contourArea(largest_contour)
perimeter = cv2.arcLength(largest_contour, True)
x, y, w, h = cv2.boundingRect(largest_contour)
aspect_ratio = w / float(h)

# Calculate convex hull


hull = cv2.convexHull(largest_contour)
hull_area = cv2.contourArea(hull)
solidity = float(area) / hull_area
extent = float(area) / (w * h)

smoothness, uniformity = calculate_texture_primitives(image)

return area, perimeter, aspect_ratio, solidity, extent, smoothness, uniformity

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 64/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

Fusion des vecteurs f1 , f2 and f3

# Lists to store fused features


fused_concatenation = []
input_folder_path="/content/drive/MyDrive/tpcv/data/train"
# Iterate through the images in the input folder
for filename in os.listdir(input_folder_path):
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tif', '.bmp')): # Adjust the image extensions as needed
image_path = os.path.join(input_folder_path, filename)
image = cv2.imread(image_path)

# Step 1: Calculate features from each set


f1_features = calculate_primitive_ordre_1(image)
f2_features = calculate_texture_features(image)
f3_features = calculate_primitive_geometrique_texture(image)

# Step 2: Fusion with concatenation


fused_features = np.concatenate((f1_features, f2_features, f3_features))
fused_concatenation.append(fused_features)

# Display or use the fused features as needed


print("Fused Features with Concatenation Operator:")
print(fused_concatenation)

36743.80713427198, 92565.0, 1236.0, 0.7032967032967034, 1.0,


0.9933572287087912, 50.10354781936813, 57.27392665316477],
dtype=object), array([206.2601090315934, 3835.7361722752, -0.8248674616107767,
-1.107544131262112,
{'m00': 19220142.0, 'm10': 2499025605.0, 'm01': 3484378216.0, 'm20': 438590201153.0, 'm11': 451297022649.0, 'm02': 861298
3835.7361722752, 0.005291207277921444, -0.3542952461138544,
79460.5482915522, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 59.76332846840659, 61.93332037179341],
dtype=object), array([190.57577481112637, 3759.378895025526, -0.3921762004850753,
-1.6163756379216998,
{'m00': 17758613.0, 'm10': 2286428507.0, 'm01': 3185072328.0, 'm20': 400913828095.0, 'm11': 411009858803.0, 'm02': 790142
3759.378895025526, 0.004969496848116193, -0.1806884106098766,
68721.07552798763, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 62.27786959134615, 61.313774105216574],
dtype=object), array([165.3980726304945, 3824.107313421235, 0.5074620173593878,
-1.5841565906701134,
{'m00': 15412454.0, 'm10': 1973120855.0, 'm01': 2795644249.0, 'm20': 346260068189.0, 'm11': 354995318734.0, 'm02': 701659
3824.107313421235, 0.0038760133774384206, 0.09806717840037521,
74543.25931490384, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 62.89485319368132, 61.83936701989466],
dtype=object), array([198.3078854739011, 3814.894839948422, -0.5904093952288996,
-1.4414540630687995,
{'m00': 18479122.0, 'm10': 2321336623.0, 'm01': 3438350199.0, 'm20': 400178569887.0, 'm11': 432419091493.0, 'm02': 859004
3814.894839948422, 0.004531114674494156, -0.26974743761661013,
98554.05406507554, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 78.95565762362638, 61.76483497871926],
dtype=object), array([174.39167668269232, 5145.884428490245, 0.06306561716151467,
-1.8910443763811733,
{'m00': 16250514.0, 'm10': 2107035351.0, 'm01': 2872864616.0, 'm20': 378133433175.0, 'm11': 385273792777.0, 'm02': 706931
5145.884428490245, 0.0023848906600699325, -0.07482758513854702,
57438.40831043956, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 50.61946256868132, 71.73482019556643],
dtype=object), array([160.95112894917582, 4446.480333976159, 0.5145295652250044,
-1.5985798621446625,
{'m00': 14998070.0, 'm10': 1951444301.0, 'm01': 2792453372.0, 'm20': 350508997391.0, 'm11': 371684963281.0, 'm02': 707852
4446.480333976159, 0.0031749842717778577, 0.01818963041992507,
52883.581129807695, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 50.15613195398352, 66.68193408994793],
dtype=object), array([177.74720982142858, 4305.02072538798, -0.037805842659319716,
-1.760754752054827,
{'m00': 16563196.0, 'm10': 2241175144.0, 'm01': 2972514651.0, 'm20': 398931197334.0, 'm11': 402268544654.0, 'm02': 738949
4305.02072538798, 0.005139912339780355, -0.051897444293339846,
93276.41354739011, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 72.61758456387362, 65.61265674691111],
dtype=object), array([163.52381310096155, 3911.622672977431, 0.22895589654718895,
-1.654911537634435,
{'m00': 15237803.0, 'm10': 2029227739.0, 'm01': 2763995712.0, 'm20': 362475386405.0, 'm11': 369421564561.0, 'm02': 696145
3911.622672977431, 0.004140291571902547, 0.0822851211740403,
61054.88259787088, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 61.22512448489011, 62.542966614779566],
dtype=object), array([170.81988324175825, 4105.048988241862, 0.19354267920391552,
-1.7421275224122825,
{'m00': 15917680.0, 'm10': 2117557859.0, 'm01': 2757107700.0, 'm20': 375031780881.0, 'm11': 368157067601.0, 'm02': 676351
4105.048988241862, 0.004150518875453758, 0.03887202467750776,
84699.20690247252, 92565.0, 1236.0, 0.7032967032967034, 1.0,
0.9933572287087912, 68.6620127918956, 64.07065621828656],
dtype=object)]

Machine learning pour classifier

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 65/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory

from sklearn.feature_selection import SelectKBest, f_classif


from skimage import io
import os

# Define the folder path


image_folder_path = "/content/drive/MyDrive/tpcv/data/train" # Change this to your image folder path

# Load images and labels (replace this with your actual data loading code)
image_files = [os.path.join(image_folder_path, file) for file in os.listdir(image_folder_path)]
X = np.array([io.imread(file) for file in image_files])
y = np.random.randint(2, size=len(X)) # Example labels (replace with your actual labels)

# Flatten the images


X_flatten = X.reshape(len(X), -1)

# Select features based on Fisher's Score


k_best = SelectKBest(f_classif, k=10) # Select top 10 features (adjust 'k' as needed)
X_fisher = k_best.fit_transform(X_flatten, y)

# Display or use the selected features as needed


print("Selected Features using Fisher's Score:")
print(X_fisher.shape)

Selected Features using Fisher's Score:


(80, 10)
/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:112: UserWarning: Features [ 255 511
3327 3583 3839 4095 4351 4607 4863 5119 5375 5631 5887 6143
6399 6655 6911 7167 7423 7679 7935 8191 8447 8703 8959 9215
9471 9727 9983 10239 10495 10751 11007 11263 11519 11775 12031 12287
12543 12799 13055 13311 13567 13823 14079 14335 14591 14847 15103 15359
15615 15871 16127 16383 16639 16895 17151 17407 17663 17919 18175 18431
18687 18943 19199 19455 19711 19967 20223 20479 20735 20991 21247 21503
21759 22015 22271 22527 22783 23039 23295 23551 23807 24063 24319 24575
24831 25087 25343 25599 25855 26111 26367 26623 26879 27135 27391 27647
27903 28159 28415 28671 28927 29183 29439 29695 29951 30207 30463 30719
30975 31231 31487 31743 31999 32255 32511 32767 33023 33279 33535 33791
34047 34303 34559 34815 35071 35327 35583 35839 36095 36351 36607 36863
37119 37375 37631 37887 38143 38399 38655 38911 39167 39423 39679 39935
40191 40447 40703 40959 41215 41471 41727 41983 42239 42495 42751 43007
43263 43519 43775 44031 44287 44543 44799 45055 45311 45567 45823 46079
46335 46591 46719 46847 47103 47359 47615 47871 48127 48383 48639 48895
49151 49407 49663 49919 50175 50431 50687 50943 51199 51455 51711 51967
52223 52479 52735 52991 53247 53503 53759 54015 54271 54527 54783 55039
55295 55551 55807 56063 56319 56575 56831 57087 57343 57599 57855 58111
58367 58623 58879 59135 59391 59647 59903 60159 60415 60671 60927 61183
61439 61695 61951 62207 62463 62719 62975 63231 63487 63743 63999 64255
64511 64767 65023 65279 65535 65791 66047 66303 66559 66815 67071 67327
67583 67839 68095 68351 68607 68863 69119 69375 69631 69887 70143 70399
70655 70911 71167 71423 71679 71935 72191 72447 72703 72959 73215 73471
73727 73983 74239 74495 74751 75007 75263 75519 75775 76031 76287 76543
76799 77055 77311 77567 77823 78079 78335 78591 78847 79103 79359 79615
79871 80127 80383 80639 80895 81151 81407 81663 81919 82175 82431 82615
82687 82943 83199 83455 83711 83967 84223 84479 84735 84991 85247 85503
85759 86015 86271 86527 86783 87039 87295 87551 87807 88063 88319 88575
88831 89087 89343 89599 89855 90111 90367 90623 90879 91135 91391 91647
91903 92159 92415 92671 92927 93183] are constant.
warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:113: RuntimeWarning: invalid value encou
f = msb / msw

# Get the indices of the selected features


selected_feature_indices = k_best.get_support(indices=True)

# Display or use the selected feature indices as needed


print("Indices of Selected Features:")
print(selected_feature_indices)

Indices of Selected Features:


[54223 64386 64897 72818 73074 75849 76105 87672 89702 89957]

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 66/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
from sklearn.decomposition import PCA
from skimage import io
import os

# Load images and labels (replace this with your actual data loading code)
image_files = [os.path.join(image_folder_path, file) for file in os.listdir(image_folder_path)]
X = np.array([io.imread(file) for file in image_files])

# Flatten the images


X_flatten = X.reshape(len(X), -1)

# Apply PCA
pca = PCA(n_components=10) # Set the number of components you want to keep (adjust as needed)
X_pca = pca.fit_transform(X_flatten)

# Display or use the transformed features as needed


print("Transformed Features using PCA:")
print(X_pca.shape)

Transformed Features using PCA:


(80, 10)

from sklearn.model_selection import train_test_split


from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectKBest, f_classif
from skimage import io
import os

# Load images and labels (replace this with your actual data loading code)
image_files = [os.path.join(image_folder_path, file) for file in os.listdir(image_folder_path)]
X = np.array([io.imread(file) for file in image_files])
y = np.random.randint(2, size=len(X)) # Example labels (replace with your actual labels)

# Flatten the images


X_flatten = X.reshape(len(X), -1)

# Select features based on Fisher's Score


k_best = SelectKBest(f_classif, k=10) # Select top 10 features (adjust 'k' as needed)
X_selected = k_best.fit_transform(X_flatten, y)

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X_selected, y, test_size=0.2, random_state=42)

# Train a KNN classifier


knn_classifier = KNeighborsClassifier(n_neighbors=3) # You can adjust the number of neighbors
knn_classifier.fit(X_train, y_train)

# Make predictions on the test set


y_pred = knn_classifier.predict(X_test)

# Evaluate the classifier


accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy*100," %")

Accuracy: 68.75 %
/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:112: UserWarning: Features [ 255 511
3327 3583 3839 4095 4351 4607 4863 5119 5375 5631 5887 6143
6399 6655 6911 7167 7423 7679 7935 8191 8447 8703 8959 9215
9471 9727 9983 10239 10495 10751 11007 11263 11519 11775 12031 12287
12543 12799 13055 13311 13567 13823 14079 14335 14591 14847 15103 15359
15615 15871 16127 16383 16639 16895 17151 17407 17663 17919 18175 18431
18687 18943 19199 19455 19711 19967 20223 20479 20735 20991 21247 21503
21759 22015 22271 22527 22783 23039 23295 23551 23807 24063 24319 24575
24831 25087 25343 25599 25855 26111 26367 26623 26879 27135 27391 27647
27903 28159 28415 28671 28927 29183 29439 29695 29951 30207 30463 30719
30975 31231 31487 31743 31999 32255 32511 32767 33023 33279 33535 33791
34047 34303 34559 34815 35071 35327 35583 35839 36095 36351 36607 36863
37119 37375 37631 37887 38143 38399 38655 38911 39167 39423 39679 39935
40191 40447 40703 40959 41215 41471 41727 41983 42239 42495 42751 43007
43263 43519 43775 44031 44287 44543 44799 45055 45311 45567 45823 46079
46335 46591 46719 46847 47103 47359 47615 47871 48127 48383 48639 48895
49151 49407 49663 49919 50175 50431 50687 50943 51199 51455 51711 51967
52223 52479 52735 52991 53247 53503 53759 54015 54271 54527 54783 55039
55295 55551 55807 56063 56319 56575 56831 57087 57343 57599 57855 58111
58367 58623 58879 59135 59391 59647 59903 60159 60415 60671 60927 61183
61439 61695 61951 62207 62463 62719 62975 63231 63487 63743 63999 64255
64511 64767 65023 65279 65535 65791 66047 66303 66559 66815 67071 67327
67583 67839 68095 68351 68607 68863 69119 69375 69631 69887 70143 70399
70655 70911 71167 71423 71679 71935 72191 72447 72703 72959 73215 73471
73727 73983 74239 74495 74751 75007 75263 75519 75775 76031 76287 76543

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 67/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
76799 77055 77311 77567 77823 78079 78335 78591 78847 79103 79359 79615
79871 80127 80383 80639 80895 81151 81407 81663 81919 82175 82431 82615
82687 82943 83199 83455 83711 83967 84223 84479 84735 84991 85247 85503
85759 86015 86271 86527 86783 87039 87295 87551 87807 88063 88319 88575
88831 89087 89343 89599 89855 90111 90367 90623 90879 91135 91391 91647
91903 92159 92415 92671 92927 93183] are constant.
warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:113: RuntimeWarning: invalid value encou
f = msb / msw

from sklearn.model_selection import train_test_split


from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
from skimage import io
import os

# Load images and labels (replace this with your actual data loading code)
image_files = [os.path.join(image_folder_path, file) for file in os.listdir(image_folder_path)]
X = np.array([io.imread(file) for file in image_files])
y = np.random.randint(2, size=len(X)) # Example labels (replace with your actual labels)

# Flatten the images


X_flatten = X.reshape(len(X), -1)

# Apply PCA
pca = PCA(n_components=10) # Set the number of components you want to keep (adjust as needed)
X_pca = pca.fit_transform(X_flatten)

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=0.2, random_state=42)

# Train a KNN classifier


knn_classifier = KNeighborsClassifier(n_neighbors=3) # You can adjust the number of neighbors
knn_classifier.fit(X_train, y_train)

# Make predictions on the test set


y_pred = knn_classifier.predict(X_test)

# Evaluate the classifier


accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy*100," %")

Accuracy: 37.5 %

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 68/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectKBest, f_classif
from skimage import io
import os

# Define the folder path


image_folder_path = "/content/drive/MyDrive/tpcv/data/train" # Change this to your image folder path

# Load images and labels (replace this with your actual data loading code)
image_files = [os.path.join(image_folder_path, file) for file in os.listdir(image_folder_path)]
X = np.array([io.imread(file) for file in image_files])
y = np.random.randint(2, size=len(X)) # Example labels (replace with your actual labels)

# Flatten the images


X_flatten = X.reshape(len(X), -1)

# Select features based on Fisher's Score


k_best = SelectKBest(f_classif, k=10) # Select top 10 features (adjust 'k' as needed)
X_selected = k_best.fit_transform(X_flatten, y)

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X_selected, y, test_size=0.2, random_state=42)

# Train an SVM classifier


svm_classifier = SVC(kernel='linear', C=1, probability=True) # Set probability to True
svm_classifier.fit(X_train, y_train)

# Make predictions on the test set


y_pred = svm_classifier.predict(X_test)

# Evaluate the classifier


accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100, " %")

/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:112: UserWarning: Features [ 255 511


3327 3583 3839 4095 4351 4607 4863 5119 5375 5631 5887 6143
6399 6655 6911 7167 7423 7679 7935 8191 8447 8703 8959 9215
9471 9727 9983 10239 10495 10751 11007 11263 11519 11775 12031 12287
12543 12799 13055 13311 13567 13823 14079 14335 14591 14847 15103 15359
15615 15871 16127 16383 16639 16895 17151 17407 17663 17919 18175 18431
18687 18943 19199 19455 19711 19967 20223 20479 20735 20991 21247 21503
21759 22015 22271 22527 22783 23039 23295 23551 23807 24063 24319 24575
24831 25087 25343 25599 25855 26111 26367 26623 26879 27135 27391 27647
27903 28159 28415 28671 28927 29183 29439 29695 29951 30207 30463 30719
30975 31231 31487 31743 31999 32255 32511 32767 33023 33279 33535 33791
34047 34303 34559 34815 35071 35327 35583 35839 36095 36351 36607 36863
37119 37375 37631 37887 38143 38399 38655 38911 39167 39423 39679 39935
40191 40447 40703 40959 41215 41471 41727 41983 42239 42495 42751 43007
43263 43519 43775 44031 44287 44543 44799 45055 45311 45567 45823 46079
46335 46591 46719 46847 47103 47359 47615 47871 48127 48383 48639 48895
49151 49407 49663 49919 50175 50431 50687 50943 51199 51455 51711 51967
52223 52479 52735 52991 53247 53503 53759 54015 54271 54527 54783 55039
55295 55551 55807 56063 56319 56575 56831 57087 57343 57599 57855 58111
58367 58623 58879 59135 59391 59647 59903 60159 60415 60671 60927 61183
61439 61695 61951 62207 62463 62719 62975 63231 63487 63743 63999 64255
64511 64767 65023 65279 65535 65791 66047 66303 66559 66815 67071 67327
67583 67839 68095 68351 68607 68863 69119 69375 69631 69887 70143 70399
70655 70911 71167 71423 71679 71935 72191 72447 72703 72959 73215 73471
73727 73983 74239 74495 74751 75007 75263 75519 75775 76031 76287 76543
76799 77055 77311 77567 77823 78079 78335 78591 78847 79103 79359 79615
79871 80127 80383 80639 80895 81151 81407 81663 81919 82175 82431 82615
82687 82943 83199 83455 83711 83967 84223 84479 84735 84991 85247 85503
85759 86015 86271 86527 86783 87039 87295 87551 87807 88063 88319 88575
88831 89087 89343 89599 89855 90111 90367 90623 90879 91135 91391 91647
91903 92159 92415 92671 92927 93183] are constant.
warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:113: RuntimeWarning: invalid value encou
f = msb / msw
Accuracy: 75.0 %

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 69/70
11/25/23, 7:40 PM TPCV1+2+3+4.ipynb - Colaboratory
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectKBest, f_classif
from skimage import io
import os

# Define the folder path


image_folder_path = "/content/drive/MyDrive/tpcv/data/train" # Change this to your image folder path

# Load images and labels (replace this with your actual data loading code)
image_files = [os.path.join(image_folder_path, file) for file in os.listdir(image_folder_path)]
X = np.array([io.imread(file) for file in image_files])
y = np.random.randint(2, size=len(X)) # Example labels (replace with your actual labels)

# Flatten the images


X_flatten = X.reshape(len(X), -1)

# Select features based on Fisher's Score


k_best = SelectKBest(f_classif, k=10) # Select top 10 features (adjust 'k' as needed)
X_selected = k_best.fit_transform(X_flatten, y)

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X_selected, y, test_size=0.2, random_state=42)

# Train a Random Forest classifier


random_forest_classifier = RandomForestClassifier(n_estimators=100, random_state=42) # You can adjust other parameters
random_forest_classifier.fit(X_train, y_train)

# Make predictions on the test set

https://colab.research.google.com/drive/1fW5yI8419PqZCHz7Ke2CB4cA5o6I_S8s#scrollTo=sJ--gN94v-e8&printMode=true 70/70

You might also like