0% found this document useful (0 votes)
2 views

python avec ia pr suivie avec un calque

The document outlines a Python program that implements a cup tracking application using OpenCV, TensorFlow, and Tkinter. It creates an overlay window to visually track a cup's position on the screen, utilizing template matching and a neural network model for detection. The program allows users to start tracking by right-clicking on the cup and can be exited by pressing the 'Q' key.

Uploaded by

dylanbro877
Copyright
© © All Rights Reserved
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
2 views

python avec ia pr suivie avec un calque

The document outlines a Python program that implements a cup tracking application using OpenCV, TensorFlow, and Tkinter. It creates an overlay window to visually track a cup's position on the screen, utilizing template matching and a neural network model for detection. The program allows users to start tracking by right-clicking on the cup and can be exited by pressing the 'Q' key.

Uploaded by

dylanbro877
Copyright
© © All Rights Reserved
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 3

import numpy as np

import cv2
import pyautogui
import tkinter as tk
import win32gui
import win32con
import win32api
import time
import tensorflow as tf

class OverlayWindow:
def __init__(self):
self.root = tk.Tk()
self.root.attributes('-alpha', 0.6)
self.root.attributes('-topmost', True)
self.root.overrideredirect(True)
self.root.configure(bg='#FF0000')

self.hwnd = win32gui.GetParent(self.root.winfo_id())
extended_style = win32gui.GetWindowLong(self.hwnd, win32con.GWL_EXSTYLE)
win32gui.SetWindowLong(self.hwnd, win32con.GWL_EXSTYLE,
extended_style | win32con.WS_EX_TRANSPARENT |
win32con.WS_EX_LAYERED)

self.size = 35
self.root.withdraw()

def show(self, x, y):


self.move(x, y)
self.root.deiconify()
self.root.lift()
self.root.update()

def move(self, x, y):


x = int(x - self.size/2)
y = int(y - self.size/2)
self.root.geometry(f"{self.size}x{self.size}+{x}+{y}")
self.root.update()

class CupTracker:
def __init__(self):
self.overlay = OverlayWindow()
self.tracking_active = False
self.cup_position = None
self.initial_position = None
self.initial_template = None
pyautogui.PAUSE = 0

# Initialisation du modèle TensorFlow


self.model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(40, 40,
3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def get_cursor_pos(self):
return win32gui.GetCursorPos()

def start_tracking(self, x, y):


self.initial_position = (x, y)
self.cup_position = (x, y)

screenshot = np.array(pyautogui.screenshot())
frame = cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGR)
y1, y2 = max(0, y-20), min(frame.shape[0], y+20)
x1, x2 = max(0, x-20), min(frame.shape[1], x+20)
self.initial_template = frame[y1:y2, x1:x2]

# Entraînement rapide du modèle sur le template initial


template_resized = cv2.resize(self.initial_template, (40, 40))
self.model.fit(np.array([template_resized]), np.array([1]), epochs=1,
verbose=0)

self.overlay.show(x, y)
self.tracking_active = True

def detect_cup(self, frame, target_pos):


if target_pos is None or self.initial_template is None:
return None

try:
x, y = target_pos
search_width = 400
x1 = max(0, x - search_width//2)
y1 = max(0, y - search_width//2)
x2 = min(frame.shape[1], x + search_width//2)
y2 = min(frame.shape[0], y + search_width//2)

search_area = frame[y1:y2, x1:x2]


if search_area.size == 0:
return target_pos

# Template matching classique


result = cv2.matchTemplate(search_area, self.initial_template,
cv2.TM_CCOEFF_NORMED)
_, max_val, _, max_loc = cv2.minMaxLoc(result)

if max_val > 0.6:


h, w = self.initial_template.shape[:2]
cx = x1 + max_loc[0] + w//2
cy = y1 + max_loc[1] + h//2

# Vérification avec TensorFlow


roi = frame[max(0, cy-20):min(frame.shape[0], cy+20),
max(0, cx-20):min(frame.shape[1], cx+20)]
if roi.size > 0:
roi_resized = cv2.resize(roi, (40, 40))
confidence = self.model.predict(np.array([roi_resized]),
verbose=0)[0][0]
if confidence > 0.5:
return (cx, cy)

return target_pos
except Exception as e:
print(f"Error: {e}")
return target_pos

def track_game(self):
print("Clic droit sur le gobelet à suivre")
print("Q pour quitter")

last_right_click = False

try:
while True:
current_right_click = win32api.GetKeyState(win32con.VK_RBUTTON) < 0

if current_right_click and not last_right_click and not


self.tracking_active:
x, y = self.get_cursor_pos()
self.start_tracking(x, y)
last_right_click = current_right_click

if self.tracking_active:
screenshot = np.array(pyautogui.screenshot())
frame = cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGR)

new_pos = self.detect_cup(frame, self.cup_position)


if new_pos:
self.cup_position = new_pos
self.overlay.move(new_pos[0], new_pos[1])

if win32api.GetAsyncKeyState(ord('Q')) & 0x8000:


break

finally:
self.overlay.root.destroy()

if __name__ == "__main__":
tracker = CupTracker()
tracker.track_game()

You might also like