From 241121a7d12031ac788a2e4a00baaec4083e5713 Mon Sep 17 00:00:00 2001 From: ccunatbrule <32840852+DARKNAGAN@users.noreply.github.com> Date: Tue, 28 May 2024 15:59:57 +0200 Subject: [PATCH] CARIA.2.0 Precedent repo CARIA : Ajout de nouveau modele. Travail sur multiple ia. ... --- .../NOTWORKING.txt | 0 .../Camera_Identification_RoadSign/modele.py | 84 ++++++++++ .../Camera_Identification_RoadSign/predict.py | 47 ++++++ .../Camera_Identification_User/identifie.py | 51 ++++++ .../common.py | 67 ++++++++ .../extract_panneau.py | 79 ++++++++++ .../houghcircles.py | 37 +++++ .../lire_panneau.py | 140 +++++++++++++++++ .../real_time_yolo.py | 78 ++++++++++ server-ia/Camera_Ligne_Detection/canny.py | 53 +++++++ .../franchissement_ligne.py | 147 ++++++++++++++++++ server-ia/Camera_Ligne_Detection/gradient.py | 37 +++++ server-ia/Identification_Yolov3/NOTE.txt | 3 + .../Identification_Yolov3/yolo_opencv.py | 78 ++++++++++ .../yolo_opencv_video.py | 92 +++++++++++ .../identify-RoadSign-speed.py | 90 +++++++++++ .../identify-RoadSign-speed2.py | 97 ++++++++++++ .../Identify_RoadSignV1/identify-RoadSign.py | 76 +++++++++ 18 files changed, 1256 insertions(+) create mode 100644 server-ia/Camera_Identification_RoadSign/NOTWORKING.txt create mode 100644 server-ia/Camera_Identification_RoadSign/modele.py create mode 100644 server-ia/Camera_Identification_RoadSign/predict.py create mode 100644 server-ia/Camera_Identification_User/identifie.py create mode 100644 server-ia/Camera_Identification_VitesseRoadSign/common.py create mode 100644 server-ia/Camera_Identification_VitesseRoadSign/extract_panneau.py create mode 100644 server-ia/Camera_Identification_VitesseRoadSign/houghcircles.py create mode 100644 server-ia/Camera_Identification_VitesseRoadSign/lire_panneau.py create mode 100644 server-ia/Camera_Identification_Yolov3-Tiny/real_time_yolo.py create mode 100644 server-ia/Camera_Ligne_Detection/canny.py create mode 100644 server-ia/Camera_Ligne_Detection/franchissement_ligne.py create mode 100644 server-ia/Camera_Ligne_Detection/gradient.py create mode 100644 server-ia/Identification_Yolov3/NOTE.txt create mode 100644 server-ia/Identification_Yolov3/yolo_opencv.py create mode 100644 server-ia/Identification_Yolov3/yolo_opencv_video.py create mode 100644 server-ia/Identify_RoadSignV1/identify-RoadSign-speed.py create mode 100644 server-ia/Identify_RoadSignV1/identify-RoadSign-speed2.py create mode 100644 server-ia/Identify_RoadSignV1/identify-RoadSign.py diff --git a/server-ia/Camera_Identification_RoadSign/NOTWORKING.txt b/server-ia/Camera_Identification_RoadSign/NOTWORKING.txt new file mode 100644 index 0000000..e69de29 diff --git a/server-ia/Camera_Identification_RoadSign/modele.py b/server-ia/Camera_Identification_RoadSign/modele.py new file mode 100644 index 0000000..550ae4f --- /dev/null +++ b/server-ia/Camera_Identification_RoadSign/modele.py @@ -0,0 +1,84 @@ +import cv2 +import glob +import numpy as np +import json +import requests + +# Charger le modèle TensorFlow +import tensorflow as tf +model = tf.keras.models.load_model('server-ia/data/modeles/traffic_signs_model/') + +# Chemin vers les images de test +# path = 'server-ia/data/videos/autoroute.mp4' +path = 'server-ia/data/images/panneaux/France_road_sign_B14_(5).svg.png' + +images = glob.glob(path) + +# Initialiser les listes pour les données et les étiquettes +data = [] + +# Récupérer les images et leurs étiquettes +for im in images[:5]: # Charger seulement les 5 premières images pour l'exemple + try: + # Lire l'image et redimensionner + image = cv2.imread(im) + image = cv2.resize(image, (30, 30)) + + # Ajouter l'image à la liste des données + data.append(image) + except Exception as e: + print(f"Erreur lors du chargement de l'image {im}: {e}") + +# Convertir la liste des données en un tableau numpy +test_imgs = np.array(data) + +# Effectuer les prédictions sur les images +predictions = model.predict(test_imgs) + +# Afficher les prédictions +print(predictions) + +#dictionary to label all traffic signs class. +classes = { 0:'Speed limit (20km/h)', + 1:'Speed limit (30km/h)', + 2:'Speed limit (50km/h)', + 3:'Speed limit (60km/h)', + 4:'Speed limit (70km/h)', + 5:'Speed limit (80km/h)', + 6:'End of speed limit (80km/h)', + 7:'Speed limit (100km/h)', + 8:'Speed limit (120km/h)', + 9:'No passing', + 10:'No passing veh over 3.5 tons', + 11:'Right-of-way at intersection', + 12:'Priority road', + 13:'Yield', + 14:'Stop', + 15:'No vehicles', + 16:'Veh > 3.5 tons prohibited', + 17:'No entry', + 18:'General caution', + 19:'Dangerous curve left', + 20:'Dangerous curve right', + 21:'Double curve', + 22:'Bumpy road', + 23:'Slippery road', + 24:'Road narrows on the right', + 25:'Road work', + 26:'Traffic signals', + 27:'Pedestrians', + 28:'Children crossing', + 29:'Bicycles crossing', + 30:'Beware of ice/snow', + 31:'Wild animals crossing', + 32:'End speed + passing limits', + 33:'Turn right ahead', + 34:'Turn left ahead', + 35:'Ahead only', + 36:'Go straight or right', + 37:'Go straight or left', + 38:'Keep right', + 39:'Keep left', + 40:'Roundabout mandatory', + 41:'End of no passing', + 42:'End no passing veh > 3.5 tons' } \ No newline at end of file diff --git a/server-ia/Camera_Identification_RoadSign/predict.py b/server-ia/Camera_Identification_RoadSign/predict.py new file mode 100644 index 0000000..4590f4e --- /dev/null +++ b/server-ia/Camera_Identification_RoadSign/predict.py @@ -0,0 +1,47 @@ +import cv2 +import numpy as np +import tensorflow as tf + +# Chemin vers le dossier contenant le modèle SavedModel +model_dir = "server-ia/data/modeles/traffic_signs_model/1" + +# Chargement du modèle SavedModel +model = tf.keras.models.load_model(model_dir) + +# Fonction de prétraitement de l'image +def preprocess_image(image): + # Redimensionner l'image à la taille attendue par le modèle + processed_image = cv2.resize(image, (30, 30)) + # Assurez-vous que l'image est dans le format attendu par le modèle + processed_image = processed_image.astype("float32") / 255.0 # Normalisation + processed_image = np.expand_dims(processed_image, axis=0) # Ajouter une dimension pour l'axe du lot + return processed_image + +# Fonction de détection des panneaux +def detect_signs(video_path): + cap = cv2.VideoCapture(video_path) + while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + # Prétraitement de l'image + processed_frame = preprocess_image(frame) + + # Faire une prédiction avec le modèle + predictions = model.predict(processed_frame) + + # Analyser les prédictions et afficher les résultats sur l'image + # Remplacez cette partie par votre code de détection et d'affichage des panneaux + + # Affichage de la vidéo avec les détections + cv2.imshow('Video', frame) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + cap.release() + cv2.destroyAllWindows() + +# Appel de la fonction pour détecter les panneaux dans la vidéo +video_path = "server-ia/data/videos/autoroute.mp4" +detect_signs(video_path) diff --git a/server-ia/Camera_Identification_User/identifie.py b/server-ia/Camera_Identification_User/identifie.py new file mode 100644 index 0000000..525075f --- /dev/null +++ b/server-ia/Camera_Identification_User/identifie.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +import cv2 +import pickle +import numpy as np + +min_size=50 + +face_cascade= cv2.CascadeClassifier("server-ia/data/haarcascades/haarcascade_frontalface_alt2.xml") +recognizer=cv2.face.LBPHFaceRecognizer_create() +recognizer.read("server-ia/data/modeles/camera_identification_user/trainner.yml") +id_image=0 +color_info=(255, 255, 255) +color_ko=(0, 0, 255) +color_ok=(0, 255, 0) +url = "http://192.168.52.194:8081" + +with open("server-ia/data/modeles/camera_identification_user/labels.pickle", "rb") as f: + og_labels=pickle.load(f) + labels={v:k for k, v in og_labels.items()} + +cap=cv2.VideoCapture(0) +while True: + ret, frame=cap.read() + tickmark=cv2.getTickCount() + gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces=face_cascade.detectMultiScale(gray, scaleFactor=1.2,minNeighbors=4, minSize=(min_size, min_size)) + for (x, y, w, h) in faces: + roi_gray=gray[y:y+h, x:x+w] + #id_, conf=recognizer.predict(cv2.resize(roi_gray, (min_size, min_size))) + id_, conf=recognizer.predict(roi_gray) + if conf<=95: + color=color_ok + name=labels[id_] + else: + color=color_ko + name="Inconnu" + label=name+" "+'{:5.2f}'.format(conf) + cv2.putText(frame, label, (x, y-10), cv2.FONT_HERSHEY_DUPLEX, 1, color_info, 1, cv2.LINE_AA) + cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2) + fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark) + cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, color_info, 2) + cv2.imshow('L42Project', frame) + key=cv2.waitKey(1)&0xFF + if key==ord('q'): + break + if key==ord('a'): + for cpt in range(100): + ret, frame=cap.read() + +cv2.destroyAllWindows() +print("Fin") diff --git a/server-ia/Camera_Identification_VitesseRoadSign/common.py b/server-ia/Camera_Identification_VitesseRoadSign/common.py new file mode 100644 index 0000000..a0572c8 --- /dev/null +++ b/server-ia/Camera_Identification_VitesseRoadSign/common.py @@ -0,0 +1,67 @@ +import tensorflow as tf +from tensorflow.keras import layers, models +import os +import cv2 + +size=42 +dir_images_panneaux="server-ia/Camera_Identification_VitesseRoadSign/images_panneaux" +dir_images_autres_panneaux="server-ia/Camera_Identification_VitesseRoadSign/images_autres_panneaux" +dir_images_sans_panneaux="server-ia/Camera_Identification_VitesseRoadSign/images_sans_panneaux" + +def panneau_model(nbr_classes): + model=tf.keras.Sequential() + + model.add(layers.Input(shape=(size, size, 3), dtype='float32')) + + model.add(layers.Conv2D(128, 3, strides=1)) + model.add(layers.Dropout(0.2)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.Conv2D(128, 3, strides=1)) + model.add(layers.Dropout(0.2)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.MaxPool2D(pool_size=2, strides=2)) + + model.add(layers.Conv2D(256, 3, strides=1)) + model.add(layers.Dropout(0.3)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.Conv2D(256, 3, strides=1)) + model.add(layers.Dropout(0.4)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.MaxPool2D(pool_size=2, strides=2)) + + model.add(layers.Flatten()) + model.add(layers.Dense(512, activation='relu')) + model.add(layers.Dropout(0.5)) + model.add(layers.BatchNormalization()) + model.add(layers.Dense(nbr_classes, activation='sigmoid')) + + return model + +def lire_images_panneaux(dir_images_panneaux, size=None): + tab_panneau=[] + tab_image_panneau=[] + + if not os.path.exists(dir_images_panneaux): + quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux)) + + files=os.listdir(dir_images_panneaux) + if files is None: + quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux)) + + for file in sorted(files): + if file.endswith("png"): + tab_panneau.append(file.split(".")[0]) + image=cv2.imread(dir_images_panneaux+"/"+file) + if size is not None: + image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4) + tab_image_panneau.append(image) + + return tab_panneau, tab_image_panneau \ No newline at end of file diff --git a/server-ia/Camera_Identification_VitesseRoadSign/extract_panneau.py b/server-ia/Camera_Identification_VitesseRoadSign/extract_panneau.py new file mode 100644 index 0000000..de17364 --- /dev/null +++ b/server-ia/Camera_Identification_VitesseRoadSign/extract_panneau.py @@ -0,0 +1,79 @@ +import cv2 +import os +import numpy as np +import random +# Tuto25[OpenCV] Lecture des panneaux de vitesse p.1 (houghcircles) 16min + +size=42 +video_dir = "server-ia/data/videos" + +# Liste tous les fichiers dans le répertoire vidéo +l = os.listdir(video_dir) + +for video in l: + if not video.endswith("mp4"): + continue + cap = cv2.VideoCapture(video_dir + "/" + video) + + print("video:", video) + while True: + # Capture une frame de la vidéo + ret, frame = cap.read() + if ret is False: + break + + # Redimensionne la frame pour un affichage correct + f_w, f_h, f_c = frame.shape + frame = cv2.resize(frame, (int(f_h / 1.5), int(f_w / 1.5))) + + # Extrait une région d'intérêt (ROI) de la frame + image = frame[200:400, 700:1000] + + # represents the top left corner of rectangle + start_point = (600, 50) + # Ending coordinate + # represents t + # he bottom right corner of rectangle + end_point = (800, 450) + # Color in BGR + color = (255, 255, 255) + # Line thickness + thickness = 1 + + # Dessine un rectangle autour de la ROI (dimensions spécifiées) + cv2.rectangle(frame, start_point, end_point, color, thickness) + + # Convertit l'image en niveaux de gris pour la détection des cercles + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + + # Détection des cercles dans l'image + circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=30, param2=60, minRadius=5, maxRadius=45) + if circles is not None: + circles = np.int16(np.around(circles)) + for i in circles[0, :]: + if i[2] != 0: + cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 4) + # Extrait le panneau de signalisation à partir de la position et du rayon du cercle + panneau = cv2.resize( + image[max(0, i[1] - i[2]):i[1] + i[2], max(0, i[0] - i[2]):i[0] + i[2]], + (size, size)) / 255 + cv2.imshow("panneau", panneau) + + # Affiche le nom du fichier vidéo en cours + cv2.putText(frame, "fichier:" + video, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA) + + # Affiche la frame avec le rectangle et le panneau détecté + cv2.imshow("Video", frame) + + # Attend l'appui d'une touche et traite les actions associées + key = cv2.waitKey(1) & 0xFF + if key == ord('q'): + quit() + if key == ord('a'): + for cpt in range(100): + ret, frame = cap.read() + if key == ord('f'): + break + +# Ferme toutes les fenêtres OpenCV +cv2.destroyAllWindows() diff --git a/server-ia/Camera_Identification_VitesseRoadSign/houghcircles.py b/server-ia/Camera_Identification_VitesseRoadSign/houghcircles.py new file mode 100644 index 0000000..51868b0 --- /dev/null +++ b/server-ia/Camera_Identification_VitesseRoadSign/houghcircles.py @@ -0,0 +1,37 @@ +import cv2 +import numpy as np +# Tuto25[OpenCV] Lecture des panneaux de vitesse p.1 (houghcircles) 14min +param1=30 +param2=55 +dp=1.0 +video_path = "server-ia/data/videos/ville.mp4" + +cap=cv2.VideoCapture(video_path) + +while True: + ret, frame=cap.read() + gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, dp, 20, param1=param1, param2=param2, minRadius=10, maxRadius=50) + if circles is not None: + circles=np.around(circles).astype(np.int32) + for i in circles[0, :]: + if i[2]!=0: + cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 4) + cv2.putText(frame, "[i|k]dp: {:4.2f} [o|l]param1: {:d} [p|m]param2: {:d}".format(dp, param1, param2), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1) + cv2.imshow("Video", frame) + key=cv2.waitKey(1)&0xFF + if key==ord('q'): + quit() + if key==ord('i'): + dp=min(10, dp+0.1) + if key==ord('k'): + dp=max(0.1, dp-0.1) + if key==ord('o'): + param1=min(255, param1+1) + if key==ord('l'): + param1=max(1, param1-1) + if key==ord('p'): + param2=min(255, param2+1) + if key==ord('m'): + param2=max(1, param2-1) +cv2.destroyAllWindows() diff --git a/server-ia/Camera_Identification_VitesseRoadSign/lire_panneau.py b/server-ia/Camera_Identification_VitesseRoadSign/lire_panneau.py new file mode 100644 index 0000000..d8c0b08 --- /dev/null +++ b/server-ia/Camera_Identification_VitesseRoadSign/lire_panneau.py @@ -0,0 +1,140 @@ +import tensorflow as tf +from tensorflow.keras import layers, models +import cv2 +import os +import numpy as np +import random + +th1=30 +th2=55 +size=42 +video_dir="server-ia/data/videos/" +dir_images_panneaux="server-ia/data/images/panneaux" + +def panneau_model(nbr_classes): + model=tf.keras.Sequential() + + model.add(layers.Input(shape=(size, size, 3), dtype='float32')) + + model.add(layers.Conv2D(128, 3, strides=1)) + model.add(layers.Dropout(0.2)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.Conv2D(128, 3, strides=1)) + model.add(layers.Dropout(0.2)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.MaxPool2D(pool_size=2, strides=2)) + + model.add(layers.Conv2D(256, 3, strides=1)) + model.add(layers.Dropout(0.3)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.Conv2D(256, 3, strides=1)) + model.add(layers.Dropout(0.4)) + model.add(layers.BatchNormalization()) + model.add(layers.Activation('relu')) + + model.add(layers.MaxPool2D(pool_size=2, strides=2)) + + model.add(layers.Flatten()) + model.add(layers.Dense(512, activation='relu')) + model.add(layers.Dropout(0.5)) + model.add(layers.BatchNormalization()) + model.add(layers.Dense(nbr_classes, activation='sigmoid')) + + return model + +def lire_images_panneaux(dir_images_panneaux, size=None): + tab_panneau=[] + tab_image_panneau=[] + + if not os.path.exists(dir_images_panneaux): + quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux)) + + files=os.listdir(dir_images_panneaux) + if files is None: + quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux)) + + for file in sorted(files): + if file.endswith("png"): + tab_panneau.append(file.split(".")[0]) + image=cv2.imread(dir_images_panneaux+"/"+file) + if size is not None: + image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4) + tab_image_panneau.append(image) + + return tab_panneau, tab_image_panneau + +tab_panneau, tab_image_panneau=lire_images_panneaux(dir_images_panneaux) + +model_panneau=panneau_model(len(tab_panneau)) +checkpoint=tf.train.Checkpoint(model_panneau=model_panneau) +checkpoint.restore(tf.train.latest_checkpoint("server-ia\data\modeles\road_sign_speed_trainers/")) + +l=os.listdir(video_dir) +random.shuffle(l) + +for video in l: + if not video.endswith("mp4"): + continue + cap=cv2.VideoCapture(video_dir+"/"+video) + + print("video:", video) + id_panneau=-1 + while True: + ret, frame=cap.read() + if ret is False: + break + f_w, f_h, f_c=frame.shape + frame=cv2.resize(frame, (int(f_h/1.5), int(f_w/1.5))) + + image=frame[200:400, 700:1000] + + # represents the top left corner of rectangle + start_point = (600, 50) + # Ending coordinate + # represents t + # he bottom right corner of rectangle + end_point = (800, 450) + # Color in BGR + color = (255, 255, 255) + # Line thickness + thickness = 1 + + cv2.rectangle(frame, start_point, end_point, color, thickness) + + gray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + + circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=th1, param2=th2, minRadius=5, maxRadius=45) + if circles is not None: + circles=np.int16(np.around(circles)) + for i in circles[0,:]: + if i[2]!=0: + panneau=cv2.resize(image[max(0, i[1]-i[2]):i[1]+i[2], max(0, i[0]-i[2]):i[0]+i[2]], (size, size))/255 + cv2.imshow("panneau", panneau) + prediction=model_panneau(np.array([panneau]), training=False) + print("Prediction:", prediction) + if np.any(np.greater(prediction[0], 0.6)): + id_panneau=np.argmax(prediction[0]) + print(" -> C'est un panneau:", tab_panneau[id_panneau], "KM/H") + w, h, c=tab_image_panneau[id_panneau].shape + else: + print(" -> Ce n'est pas un panneau") + if id_panneau!=-1: + frame[0:h, 0:w, :]=tab_image_panneau[id_panneau] + cv2.putText(frame, "fichier:"+video, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA) + cv2.imshow("Video", frame) + key=cv2.waitKey(1)&0xFF + if key==ord('q'): + quit() + if key==ord('a'): + for cpt in range(100): + ret, frame=cap.read() + if key==ord('f'): + break + +cv2.destroyAllWindows() diff --git a/server-ia/Camera_Identification_Yolov3-Tiny/real_time_yolo.py b/server-ia/Camera_Identification_Yolov3-Tiny/real_time_yolo.py new file mode 100644 index 0000000..13cff6a --- /dev/null +++ b/server-ia/Camera_Identification_Yolov3-Tiny/real_time_yolo.py @@ -0,0 +1,78 @@ +import cv2 +import numpy as np +import time + +# Load Yolo +net = cv2.dnn.readNet("server-ia/data/modeles/yolov3-tiny/yolov3-tiny.weights", "server-ia/data/modeles/yolov3-tiny/yolov3-tiny.cfg") +classes = [] +with open("server-ia/data/modeles/yolov3-tiny/coco.names", "r") as f: + classes = [line.strip() for line in f.readlines()] +layer_names = net.getLayerNames() +output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] +colors = np.random.uniform(0, 255, size=(len(classes), 3)) + +# Loading image +cap = cv2.VideoCapture(0) + +font = cv2.FONT_HERSHEY_PLAIN +starting_time = time.time() +frame_id = 0 +while True: + _, frame = cap.read() + frame_id += 1 + + height, width, channels = frame.shape + + # Detecting objects + blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False) + + net.setInput(blob) + outs = net.forward(output_layers) + + # Showing informations on the screen + class_ids = [] + confidences = [] + boxes = [] + for out in outs: + for detection in out: + scores = detection[5:] + class_id = np.argmax(scores) + confidence = scores[class_id] + if confidence > 0.5: + # Object detected + center_x = int(detection[0] * width) + center_y = int(detection[1] * height) + w = int(detection[2] * width) + h = int(detection[3] * height) + + # Rectangle coordinates + x = int(center_x - w / 2) + y = int(center_y - h / 2) + + boxes.append([x, y, w, h]) + confidences.append(float(confidence)) + class_ids.append(class_id) + + indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3) + + for i in range(len(boxes)): + if i in indexes: + x, y, w, h = boxes[i] + label = str(classes[class_ids[i]]) + confidence = confidences[i] + color = colors[class_ids[i]] + cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) + cv2.putText(frame, label + " " + str(round(confidence, 2)), (x, y + 30), font, 3, color, 3) + + + + elapsed_time = time.time() - starting_time + fps = frame_id / elapsed_time + cv2.putText(frame, "FPS: " + str(round(fps, 2)), (10, 50), font, 4, (0, 0, 0), 3) + cv2.imshow("Image", frame) + key = cv2.waitKey(1) + if key == 27: + break + +cap.release() +cv2.destroyAllWindows() diff --git a/server-ia/Camera_Ligne_Detection/canny.py b/server-ia/Camera_Ligne_Detection/canny.py new file mode 100644 index 0000000..25e3d6a --- /dev/null +++ b/server-ia/Camera_Ligne_Detection/canny.py @@ -0,0 +1,53 @@ +import cv2 +import numpy as np + +th1=75 +th2=150 +stop=0 +k=3 +cap=cv2.VideoCapture(0) +while True: + if not stop: + ret, frame=cap.read() + if ret is False: + quit() + image=frame.copy() + + cv2.putText(image, "[u|j]th1: {:d} [i|k]th2: {:d} [y|h]blur: {:d}".format(th1, th2, k), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1) + cv2.putText(image, "[a]>> [s]stop [q]quit", (10, 70), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1) + cv2.imshow("image", image) + gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + if k!=1: + gray=cv2.blur(gray, (k, k)) + gray_canny=cv2.Canny(gray, th1, th2) + cv2.imshow("blur", gray) + cv2.imshow("canny", gray_canny) + + if not stop: + key=cv2.waitKey(10)&0xFF + else: + key=cv2.waitKey() + image=frame.copy() + + if key==ord('q'): + break + if key==ord('y'): + k=min(255, k+2) + if key==ord('h'): + k=max(1, k-2) + if key==ord('u'): + th1=min(255, th1+1) + if key==ord('j'): + th1=max(0, th1-1) + if key==ord('i'): + th2=min(255, th2+1) + if key==ord('k'): + th2=max(0, th2-1) + if key==ord('s'): + stop=not stop + if key==ord('a'): + for cpt in range(200): + ret, frame=cap.read() + image=frame.copy() +cap.release() +cv2.destroyAllWindows() diff --git a/server-ia/Camera_Ligne_Detection/franchissement_ligne.py b/server-ia/Camera_Ligne_Detection/franchissement_ligne.py new file mode 100644 index 0000000..00db062 --- /dev/null +++ b/server-ia/Camera_Ligne_Detection/franchissement_ligne.py @@ -0,0 +1,147 @@ +import cv2 +import numpy as np +import time + +ymin=400 +ymax=401 +xmin1=20 +xmax1=120 +xmin2=550 +xmax2=620 + +def point(capteur): + s1=len(capteur)-1 + s2=len(capteur)-1 + for i in range(len(capteur)): + if capteur[i]!=0: + s1=i + break + if s1!=len(capteur)-1: + for i in range(len(capteur)-1, s1-1, -1): + if capteur[i]!=0: + s2=i + break + return int((s1+s2)/2) + return -1 + +s1_old=0 +s2_old=0 +s1=0 +s2=0 +s1_time=0 +s2_time=0 +th1=75 +th2=150 +stop=0 +k=3 +url = "http://192.168.74.194:8081" + +cap=cv2.VideoCapture(url) +while True: + if not stop: + ret, frame=cap.read() + if ret is False: + quit() + image=frame.copy() + + gray1=cv2.cvtColor(frame[ymin:ymax, xmin1:xmax1], cv2.COLOR_BGR2GRAY) + if k!=1: + gray1=cv2.blur(gray1, (k, k)) + capteur1=cv2.Canny(gray1, th1, th2) + + gray2=cv2.cvtColor(frame[ymin:ymax, xmin1:xmax1], cv2.COLOR_BGR2GRAY) + if k!=1: + gray2=cv2.blur(gray2, (k, k)) + capteur2=cv2.Canny(gray2, th1, th2) + + cv2.rectangle(image, (xmin1, ymin), (xmax1, ymax), (0, 0, 255), 1) + cv2.rectangle(image, (xmin2, ymin), (xmax2, ymax), (0, 0, 255), 1) + + s1=point(capteur1[0]) + if s1!=-1: + cv2.circle(image, (s1+xmin1, ymin), 3, (0, 255, 0), 3) + s1_old=s1 + s1_time=time.time() + else: + if time.time()-s1_time<1: + cv2.circle(image, (s1_old+xmin1, ymin), 3, (100, 255, 255), 3) + s1=s1_old + else: + s1=-1 + + s2=point(capteur2[0]) + if s2!=-1: + cv2.circle(image, (s2+xmin2, ymin), 3, (0, 255, 0), 3) + s2_old=s2 + s2_time=time.time() + else: + if time.time()-s2_time<1: + cv2.circle(image, (s2_old+xmin2, ymin), 3, (100, 255, 255), 3) + s2=s2_old + else: + s2=-1 + + if s1!=-1 and s2!=-1: + s2_=abs(xmax2-xmin2-s2) + if abs(s2_-s1)>20: + c=(0, max(0, 255-10*int(abs(s1-s2_)/2)), min(255, 10*int(abs(s1-s2_)/2))) + cv2.circle(image, (int((xmax2-xmin1)/2)+xmin1, ymax-25), 5, c, 7) + cv2.arrowedLine(image, (int((xmax2-xmin1)/2)+xmin1, ymax-25), (int((xmax2-xmin1)/2)+xmin1+2*int((s1-s2_)/2), ymax-25), c, 3, tipLength=0.4) + else: + cv2.putText(image, "OK", (int((xmax2-xmin1)/2)+xmin1-15, ymax-16), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0), 1) + + cv2.putText(image, "[u|j]th1: {:d} [i|k]th2: {:d} [y|h]blur: {:d}".format(th1, th2, k), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1) + cv2.putText(image, "[a]>> [s]stop [q]quit", (10, 70), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1) + cv2.imshow("image", image) + gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + if k!=1: + gray=cv2.blur(gray, (k, k)) + cv2.imshow("blur", gray) + gray_canny=cv2.Canny(gray, th1, th2) + cv2.imshow("canny", gray_canny) + + if not stop: + key=cv2.waitKey(20)&0xFF + else: + key=cv2.waitKey() + image=frame.copy() + + if key==ord('q'): + break + if key==ord('m'): + ymin+=1 + ymax+=1 + if key==ord('p'): + ymin-=1 + ymax-=1 + if key==ord('o'): + xmin1+=1 + xmax1+=1 + xmin2+=1 + xmax2+=1 + if key==ord('l'): + xmin1-=1 + xmax1-=1 + xmin2-=1 + xmax2-=1 + if key==ord('y'): + k=min(255, k+2) + if key==ord('h'): + k=max(1, k-2) + if key==ord('u'): + th1=min(255, th1+1) + if key==ord('j'): + th1=max(0, th1-1) + if key==ord('i'): + th2=min(255, th2+1) + if key==ord('k'): + th2=max(0, th2-1) + if key==ord('s'): + stop=not stop + if key==ord('a'): + for cpt in range(200): + ret, frame=cap.read() + image=frame.copy() + +cap.release() +cv2.destroyAllWindows() diff --git a/server-ia/Camera_Ligne_Detection/gradient.py b/server-ia/Camera_Ligne_Detection/gradient.py new file mode 100644 index 0000000..ebf3c96 --- /dev/null +++ b/server-ia/Camera_Ligne_Detection/gradient.py @@ -0,0 +1,37 @@ +import cv2 +import numpy as np + +stop=0 +cap=cv2.VideoCapture(0) +while True: + if not stop: + ret, frame=cap.read() + if ret is False: + quit() + image=frame.copy() + + cv2.imshow("image", image) + gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + grad_x=cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=5) + cv2.imshow("grad x", grad_x) + + grad_y=cv2.Sobel(gray, cv2.CV_8U, 0, 1, ksize=5) + cv2.imshow("grad y", grad_y) + + if not stop: + key=cv2.waitKey(10)&0xFF + else: + key=cv2.waitKey() + image=frame.copy() + + if key==ord('q'): + break + if key==ord('s'): + stop=not stop + if key==ord('a'): + for cpt in range(200): + ret, frame=cap.read() + image=frame.copy() +cap.release() +cv2.destroyAllWindows() diff --git a/server-ia/Identification_Yolov3/NOTE.txt b/server-ia/Identification_Yolov3/NOTE.txt new file mode 100644 index 0000000..08b7bd1 --- /dev/null +++ b/server-ia/Identification_Yolov3/NOTE.txt @@ -0,0 +1,3 @@ +python server-ia/Identification_Yolov3/yolo_opencv.py --image server-ia/data/images/dog.jpg --config server-ia/data/modeles/yolov3/yolov3.cfg --weights server-ia/data/modeles/yolov3/yolov3.weights --classes server-ia/data/modeles/yolov3/yolov3.txt +python server-ia/Identification_Yolov3/yolo_opencv_video.py --video cam --config server-ia/data/modeles/yolov3/yolov3.cfg --weights server-ia/data/modeles/yolov3/yolov3.weights --classes server-ia/data/modeles/yolov3/yolov3.txt +python server-ia/Identification_Yolov3/yolo_opencv_video.py --video server-ia/data/videos/ville.mp4 --config server-ia/data/modeles/yolov3/yolov3.cfg --weights server-ia/data/modeles/yolov3/yolov3.weights --classes server-ia/data/modeles/yolov3/yolov3.txt \ No newline at end of file diff --git a/server-ia/Identification_Yolov3/yolo_opencv.py b/server-ia/Identification_Yolov3/yolo_opencv.py new file mode 100644 index 0000000..e1bf247 --- /dev/null +++ b/server-ia/Identification_Yolov3/yolo_opencv.py @@ -0,0 +1,78 @@ +import cv2 +import argparse +import numpy as np + +ap = argparse.ArgumentParser() +ap.add_argument('-i', '--image', required=True, + help = 'path to input image') +ap.add_argument('-c', '--config', required=True, + help = 'path to yolo config file') +ap.add_argument('-w', '--weights', required=True, + help = 'path to yolo pre-trained weights') +ap.add_argument('-cl', '--classes', required=True, + help = 'path to text file containing class names') +args = ap.parse_args() + + +def get_output_layers(net): + layer_names = net.getLayerNames() + output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] + return output_layers + + +def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h): + label = str(classes[class_id]) + color = COLORS[class_id] + cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2) + cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) +image = cv2.imread(args.image) +Width = image.shape[1] +Height = image.shape[0] +scale = 0.00392 +classes = None + +with open(args.classes, 'r') as f: + classes = [line.strip() for line in f.readlines()] +COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) +net = cv2.dnn.readNet(args.weights, args.config) +blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False) +net.setInput(blob) +outs = net.forward(get_output_layers(net)) +class_ids = [] +confidences = [] +boxes = [] +conf_threshold = 0.5 +nms_threshold = 0.4 + +for out in outs: + for detection in out: + scores = detection[5:] + class_id = np.argmax(scores) + confidence = scores[class_id] + if confidence > 0.5: + center_x = int(detection[0] * Width) + center_y = int(detection[1] * Height) + w = int(detection[2] * Width) + h = int(detection[3] * Height) + x = center_x - w / 2 + y = center_y - h / 2 + class_ids.append(class_id) + confidences.append(float(confidence)) + boxes.append([x, y, w, h]) + +indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) + +for i in indices: + i = i[0] + box = boxes[i] + x = box[0] + y = box[1] + w = box[2] + h = box[3] + draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h)) + +cv2.imshow("object detection", image) +cv2.waitKey() + +cv2.imwrite("object-detection.jpg", image) +cv2.destroyAllWindows() diff --git a/server-ia/Identification_Yolov3/yolo_opencv_video.py b/server-ia/Identification_Yolov3/yolo_opencv_video.py new file mode 100644 index 0000000..eb457fc --- /dev/null +++ b/server-ia/Identification_Yolov3/yolo_opencv_video.py @@ -0,0 +1,92 @@ +import cv2 +import argparse +import numpy as np + +ap = argparse.ArgumentParser() +ap.add_argument('-v', '--video', required=True, + help='path to input video file or "cam" for webcam') +ap.add_argument('-c', '--config', required=True, + help='path to yolo config file') +ap.add_argument('-w', '--weights', required=True, + help='path to yolo pre-trained weights') +ap.add_argument('-cl', '--classes', required=True, + help='path to text file containing class names') +args = ap.parse_args() + +def get_output_layers(net): + layer_names = net.getLayerNames() + output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] + return output_layers + +def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h): + label = str(classes[class_id]) + color = COLORS[class_id] + cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2) + cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) + +classes = None +with open(args.classes, 'r') as f: + classes = [line.strip() for line in f.readlines()] + +COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) + +net = cv2.dnn.readNet(args.weights, args.config) + +# If webcam is chosen, use camera capture +if args.video == 'cam': + cap = cv2.VideoCapture(0) +else: + cap = cv2.VideoCapture(args.video) + +while True: + ret, frame = cap.read() + if not ret: + break + + Width = frame.shape[1] + Height = frame.shape[0] + scale = 0.00392 + + blob = cv2.dnn.blobFromImage(frame, scale, (416, 416), (0, 0, 0), True, crop=False) + net.setInput(blob) + outs = net.forward(get_output_layers(net)) + + class_ids = [] + confidences = [] + boxes = [] + conf_threshold = 0.5 + nms_threshold = 0.4 + + for out in outs: + for detection in out: + scores = detection[5:] + class_id = np.argmax(scores) + confidence = scores[class_id] + if confidence > 0.5: + center_x = int(detection[0] * Width) + center_y = int(detection[1] * Height) + w = int(detection[2] * Width) + h = int(detection[3] * Height) + x = center_x - w / 2 + y = center_y - h / 2 + class_ids.append(class_id) + confidences.append(float(confidence)) + boxes.append([x, y, w, h]) + + indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) + + for i in indices: + i = i[0] + box = boxes[i] + x = box[0] + y = box[1] + w = box[2] + h = box[3] + draw_prediction(frame, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h)) + + cv2.imshow("object detection", frame) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() diff --git a/server-ia/Identify_RoadSignV1/identify-RoadSign-speed.py b/server-ia/Identify_RoadSignV1/identify-RoadSign-speed.py new file mode 100644 index 0000000..237f2f7 --- /dev/null +++ b/server-ia/Identify_RoadSignV1/identify-RoadSign-speed.py @@ -0,0 +1,90 @@ +import cv2 +from tensorflow.keras.models import load_model +import numpy as np + +# Charger le modèle sauvegardé +model = load_model("server-ia/data/modeles/RoadSign/modele_signaux_routiers.h5") + +# Fonction pour charger les noms de classe à partir d'un fichier +def load_class_names(file_path): + with open(file_path, 'r') as file: + class_names = [line.strip() for line in file.readlines()] + return class_names + +# Définir le chemin du fichier contenant les noms de classe +class_names_file = "server-ia/data/modeles/RoadSign/class_names.txt" + +# Charger les noms de classe à partir du fichier +class_names = load_class_names(class_names_file) + +# Fonction pour détecter les panneaux de signalisation dans une image +def detect_sign(image): + # Prétraiter l'image + preprocessed_img = preprocess_image(image) + + # Faire une prédiction avec le modèle + predictions = model.predict(preprocessed_img) + + # Obtenir l'indice de la classe prédite + predicted_class_index = np.argmax(predictions) + + # Récupérer le nom de la classe prédite + predicted_class_name = class_names[predicted_class_index] + + return predicted_class_name + +# Fonction pour prétraiter l'image +def preprocess_image(image): + # Mettre à l'échelle l'image aux dimensions attendues par le modèle + scaled_image = cv2.resize(image, (224, 224)) + scaled_image = scaled_image.astype("float") / 255.0 + + # Ajouter une dimension pour correspondre à la forme d'entrée du modèle + preprocessed_img = np.expand_dims(scaled_image, axis=0) + + return preprocessed_img + +# Définir la source vidéo ou la caméra +# Pour une vidéo +video_path = "server-ia/data/videos/autoroute.mp4" +cap = cv2.VideoCapture(video_path) + +# Pour la caméra +# cap = cv2.VideoCapture(0) +# Nombre de trames à sauter entre chaque trame lue +skip_frames = 5 + +# Boucle pour lire les images de la vidéo ou de la caméra +frame_count = 0 +while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + # Incrémenter le compteur de trames + frame_count += 1 + + # Si le compteur de trames est un multiple de skip_frames, traiter la trame et afficher + if frame_count % skip_frames == 0: + # Réinitialiser le compteur de trames + frame_count = 0 + + # Détecter les panneaux de signalisation dans l'image + predicted_class_name = detect_sign(frame) + + # Dessiner un carré autour de chaque objet détecté et afficher le nom de la classe au-dessus + # (ici, je vais dessiner un carré au hasard pour donner un exemple) + x_min, y_min, x_max, y_max = 900, 50, 1200, 650 # Coordonnées de la boîte englobante (à remplacer par les bonnes valeurs) + cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2) + cv2.putText(frame, predicted_class_name, (x_min, y_min - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) + + # Afficher l'image + cv2.imshow('Frame', frame) + + # Attendre la touche 'q' pour quitter + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# Libérer la capture et fermer les fenêtres +cap.release() +cv2.destroyAllWindows() diff --git a/server-ia/Identify_RoadSignV1/identify-RoadSign-speed2.py b/server-ia/Identify_RoadSignV1/identify-RoadSign-speed2.py new file mode 100644 index 0000000..1cd49c7 --- /dev/null +++ b/server-ia/Identify_RoadSignV1/identify-RoadSign-speed2.py @@ -0,0 +1,97 @@ +import cv2 +from tensorflow.keras.models import load_model +import numpy as np + +# Charger le modèle sauvegardé +model = load_model("server-ia/data/modeles/RoadSign/modele_signaux_routiers.h5") + +# Fonction pour charger les noms de classe à partir d'un fichier +def load_class_names(file_path): + with open(file_path, 'r') as file: + class_names = [line.strip() for line in file.readlines()] + return class_names + +# Définir le chemin du fichier contenant les noms de classe +class_names_file = "server-ia/data/modeles/RoadSign/class_names.txt" + +# Charger les noms de classe à partir du fichier +class_names = load_class_names(class_names_file) + +# Fonction pour prétraiter l'image +def preprocess_image(image): + # Mettre à l'échelle l'image aux dimensions attendues par le modèle + scaled_image = cv2.resize(image, (224, 224)) + scaled_image = scaled_image.astype("float") / 255.0 + + # Ajouter une dimension pour correspondre à la forme d'entrée du modèle + preprocessed_img = np.expand_dims(scaled_image, axis=0) + + return preprocessed_img + +# Fonction pour obtenir les coordonnées de la boîte englobante +def obtenir_coordonnees_boite(predictions): + # A DEFINIR + # Supposons que predictions est une liste de [x_min, y_min, x_max, y_max] pour chaque boîte englobante + return predictions[0] # Retourne les coordonnées de la première boîte englobante + +# Fonction pour détecter les panneaux de signalisation dans une image +def detect_sign(image): + # Prétraiter l'image + preprocessed_img = preprocess_image(image) + + # Faire une prédiction avec le modèle + predictions = model.predict(preprocessed_img) + + # Obtenir l'indice de la classe prédite + predicted_class_index = np.argmax(predictions) + + # Récupérer le nom de la classe prédite + predicted_class_name = class_names[predicted_class_index] + + return predicted_class_name, predictions + +# Définir la source vidéo ou la caméra +# Pour une vidéo +video_path = "server-ia/data/videos/autoroute.mp4" +cap = cv2.VideoCapture(video_path) + +# Pour la caméra +# cap = cv2.VideoCapture(0) +# Nombre de trames à sauter entre chaque trame lue +skip_frames = 5 + +# Boucle pour lire les images de la vidéo ou de la caméra +frame_count = 0 +while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + # Incrémenter le compteur de trames + frame_count += 1 + + # Si le compteur de trames est un multiple de skip_frames, traiter la trame et afficher + if frame_count % skip_frames == 0: + # Réinitialiser le compteur de trames + frame_count = 0 + + # Détecter les panneaux de signalisation dans l'image + predicted_class_name, predictions = detect_sign(frame) + + # Obtenir les coordonnées de la boîte englobante de l'objet détecté + x_min, y_min, x_max, y_max = obtenir_coordonnees_boite(predictions) + + # Dessiner un carré autour de chaque objet détecté et afficher le nom de la classe au-dessus + cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2) + cv2.putText(frame, predicted_class_name, (x_min, y_min - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) + + # Afficher l'image + cv2.imshow('Frame', frame) + + # Attendre la touche 'q' pour quitter + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# Libérer la capture et fermer les fenêtres +cap.release() +cv2.destroyAllWindows() diff --git a/server-ia/Identify_RoadSignV1/identify-RoadSign.py b/server-ia/Identify_RoadSignV1/identify-RoadSign.py new file mode 100644 index 0000000..8f5bbda --- /dev/null +++ b/server-ia/Identify_RoadSignV1/identify-RoadSign.py @@ -0,0 +1,76 @@ +import cv2 +from tensorflow.keras.models import load_model +import numpy as np + +# Charger le modèle sauvegardé +model = load_model("server-ia/data/modeles/RoadSign/modele_signaux_routiers.h5") + +# Fonction pour charger les noms de classe à partir d'un fichier +def load_class_names(file_path): + with open(file_path, 'r') as file: + class_names = [line.strip() for line in file.readlines()] + return class_names + +# Définir le chemin du fichier contenant les noms de classe +class_names_file = "server-ia/data/modeles/RoadSign/class_names.txt" + +# Charger les noms de classe à partir du fichier +class_names = load_class_names(class_names_file) + +# Fonction pour détecter les panneaux de signalisation dans une image +def detect_sign(image): + # Prétraiter l'image + preprocessed_img = preprocess_image(image) + + # Faire une prédiction avec le modèle + predictions = model.predict(preprocessed_img) + + # Obtenir l'indice de la classe prédite + predicted_class_index = np.argmax(predictions) + + # Récupérer le nom de la classe prédite + predicted_class_name = class_names[predicted_class_index] + + return predicted_class_name + +# Fonction pour prétraiter l'image +def preprocess_image(image): + # Mettre à l'échelle l'image aux dimensions attendues par le modèle + scaled_image = cv2.resize(image, (224, 224)) + scaled_image = scaled_image.astype("float") / 255.0 + + # Ajouter une dimension pour correspondre à la forme d'entrée du modèle + preprocessed_img = np.expand_dims(scaled_image, axis=0) + + return preprocessed_img + +# Définir la source vidéo ou la caméra +# Pour une vidéo +video_path = "server-ia/data/videos/autoroute.mp4" +cap = cv2.VideoCapture(video_path) + +# Pour la caméra +# cap = cv2.VideoCapture(0) + +# Boucle pour lire les images de la vidéo ou de la caméra +while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + # Détecter les panneaux de signalisation dans l'image + predicted_class_name = detect_sign(frame) + + # Afficher le résultat sur l'image + cv2.putText(frame, predicted_class_name, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + + # Afficher l'image + cv2.imshow('Frame', frame) + + # Attendre la touche 'q' pour quitter + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# Libérer la capture et fermer les fenêtres +cap.release() +cv2.destroyAllWindows()