CARIA.2.0
Precedent repo CARIA : Ajout de nouveau modele. Travail sur multiple ia. ...
This commit is contained in:
67
server-ia/Camera_Identification_VitesseRoadSign/common.py
Normal file
67
server-ia/Camera_Identification_VitesseRoadSign/common.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import os
|
||||
import cv2
|
||||
|
||||
size=42
|
||||
dir_images_panneaux="server-ia/Camera_Identification_VitesseRoadSign/images_panneaux"
|
||||
dir_images_autres_panneaux="server-ia/Camera_Identification_VitesseRoadSign/images_autres_panneaux"
|
||||
dir_images_sans_panneaux="server-ia/Camera_Identification_VitesseRoadSign/images_sans_panneaux"
|
||||
|
||||
def panneau_model(nbr_classes):
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Input(shape=(size, size, 3), dtype='float32'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.3))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.4))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(512, activation='relu'))
|
||||
model.add(layers.Dropout(0.5))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dense(nbr_classes, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
tab_panneau=[]
|
||||
tab_image_panneau=[]
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux))
|
||||
|
||||
files=os.listdir(dir_images_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux))
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image=cv2.imread(dir_images_panneaux+"/"+file)
|
||||
if size is not None:
|
||||
image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
@@ -0,0 +1,79 @@
|
||||
import cv2
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
# Tuto25[OpenCV] Lecture des panneaux de vitesse p.1 (houghcircles) 16min
|
||||
|
||||
size=42
|
||||
video_dir = "server-ia/data/videos"
|
||||
|
||||
# Liste tous les fichiers dans le répertoire vidéo
|
||||
l = os.listdir(video_dir)
|
||||
|
||||
for video in l:
|
||||
if not video.endswith("mp4"):
|
||||
continue
|
||||
cap = cv2.VideoCapture(video_dir + "/" + video)
|
||||
|
||||
print("video:", video)
|
||||
while True:
|
||||
# Capture une frame de la vidéo
|
||||
ret, frame = cap.read()
|
||||
if ret is False:
|
||||
break
|
||||
|
||||
# Redimensionne la frame pour un affichage correct
|
||||
f_w, f_h, f_c = frame.shape
|
||||
frame = cv2.resize(frame, (int(f_h / 1.5), int(f_w / 1.5)))
|
||||
|
||||
# Extrait une région d'intérêt (ROI) de la frame
|
||||
image = frame[200:400, 700:1000]
|
||||
|
||||
# represents the top left corner of rectangle
|
||||
start_point = (600, 50)
|
||||
# Ending coordinate
|
||||
# represents t
|
||||
# he bottom right corner of rectangle
|
||||
end_point = (800, 450)
|
||||
# Color in BGR
|
||||
color = (255, 255, 255)
|
||||
# Line thickness
|
||||
thickness = 1
|
||||
|
||||
# Dessine un rectangle autour de la ROI (dimensions spécifiées)
|
||||
cv2.rectangle(frame, start_point, end_point, color, thickness)
|
||||
|
||||
# Convertit l'image en niveaux de gris pour la détection des cercles
|
||||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# Détection des cercles dans l'image
|
||||
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=30, param2=60, minRadius=5, maxRadius=45)
|
||||
if circles is not None:
|
||||
circles = np.int16(np.around(circles))
|
||||
for i in circles[0, :]:
|
||||
if i[2] != 0:
|
||||
cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 4)
|
||||
# Extrait le panneau de signalisation à partir de la position et du rayon du cercle
|
||||
panneau = cv2.resize(
|
||||
image[max(0, i[1] - i[2]):i[1] + i[2], max(0, i[0] - i[2]):i[0] + i[2]],
|
||||
(size, size)) / 255
|
||||
cv2.imshow("panneau", panneau)
|
||||
|
||||
# Affiche le nom du fichier vidéo en cours
|
||||
cv2.putText(frame, "fichier:" + video, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
|
||||
|
||||
# Affiche la frame avec le rectangle et le panneau détecté
|
||||
cv2.imshow("Video", frame)
|
||||
|
||||
# Attend l'appui d'une touche et traite les actions associées
|
||||
key = cv2.waitKey(1) & 0xFF
|
||||
if key == ord('q'):
|
||||
quit()
|
||||
if key == ord('a'):
|
||||
for cpt in range(100):
|
||||
ret, frame = cap.read()
|
||||
if key == ord('f'):
|
||||
break
|
||||
|
||||
# Ferme toutes les fenêtres OpenCV
|
||||
cv2.destroyAllWindows()
|
||||
@@ -0,0 +1,37 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
# Tuto25[OpenCV] Lecture des panneaux de vitesse p.1 (houghcircles) 14min
|
||||
param1=30
|
||||
param2=55
|
||||
dp=1.0
|
||||
video_path = "server-ia/data/videos/ville.mp4"
|
||||
|
||||
cap=cv2.VideoCapture(video_path)
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, dp, 20, param1=param1, param2=param2, minRadius=10, maxRadius=50)
|
||||
if circles is not None:
|
||||
circles=np.around(circles).astype(np.int32)
|
||||
for i in circles[0, :]:
|
||||
if i[2]!=0:
|
||||
cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 4)
|
||||
cv2.putText(frame, "[i|k]dp: {:4.2f} [o|l]param1: {:d} [p|m]param2: {:d}".format(dp, param1, param2), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1)
|
||||
cv2.imshow("Video", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('i'):
|
||||
dp=min(10, dp+0.1)
|
||||
if key==ord('k'):
|
||||
dp=max(0.1, dp-0.1)
|
||||
if key==ord('o'):
|
||||
param1=min(255, param1+1)
|
||||
if key==ord('l'):
|
||||
param1=max(1, param1-1)
|
||||
if key==ord('p'):
|
||||
param2=min(255, param2+1)
|
||||
if key==ord('m'):
|
||||
param2=max(1, param2-1)
|
||||
cv2.destroyAllWindows()
|
||||
140
server-ia/Camera_Identification_VitesseRoadSign/lire_panneau.py
Normal file
140
server-ia/Camera_Identification_VitesseRoadSign/lire_panneau.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import cv2
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
th1=30
|
||||
th2=55
|
||||
size=42
|
||||
video_dir="server-ia/data/videos/"
|
||||
dir_images_panneaux="server-ia/data/images/panneaux"
|
||||
|
||||
def panneau_model(nbr_classes):
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Input(shape=(size, size, 3), dtype='float32'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.3))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.4))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(512, activation='relu'))
|
||||
model.add(layers.Dropout(0.5))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dense(nbr_classes, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
tab_panneau=[]
|
||||
tab_image_panneau=[]
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux))
|
||||
|
||||
files=os.listdir(dir_images_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux))
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image=cv2.imread(dir_images_panneaux+"/"+file)
|
||||
if size is not None:
|
||||
image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
|
||||
tab_panneau, tab_image_panneau=lire_images_panneaux(dir_images_panneaux)
|
||||
|
||||
model_panneau=panneau_model(len(tab_panneau))
|
||||
checkpoint=tf.train.Checkpoint(model_panneau=model_panneau)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("server-ia\data\modeles\road_sign_speed_trainers/"))
|
||||
|
||||
l=os.listdir(video_dir)
|
||||
random.shuffle(l)
|
||||
|
||||
for video in l:
|
||||
if not video.endswith("mp4"):
|
||||
continue
|
||||
cap=cv2.VideoCapture(video_dir+"/"+video)
|
||||
|
||||
print("video:", video)
|
||||
id_panneau=-1
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if ret is False:
|
||||
break
|
||||
f_w, f_h, f_c=frame.shape
|
||||
frame=cv2.resize(frame, (int(f_h/1.5), int(f_w/1.5)))
|
||||
|
||||
image=frame[200:400, 700:1000]
|
||||
|
||||
# represents the top left corner of rectangle
|
||||
start_point = (600, 50)
|
||||
# Ending coordinate
|
||||
# represents t
|
||||
# he bottom right corner of rectangle
|
||||
end_point = (800, 450)
|
||||
# Color in BGR
|
||||
color = (255, 255, 255)
|
||||
# Line thickness
|
||||
thickness = 1
|
||||
|
||||
cv2.rectangle(frame, start_point, end_point, color, thickness)
|
||||
|
||||
gray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=th1, param2=th2, minRadius=5, maxRadius=45)
|
||||
if circles is not None:
|
||||
circles=np.int16(np.around(circles))
|
||||
for i in circles[0,:]:
|
||||
if i[2]!=0:
|
||||
panneau=cv2.resize(image[max(0, i[1]-i[2]):i[1]+i[2], max(0, i[0]-i[2]):i[0]+i[2]], (size, size))/255
|
||||
cv2.imshow("panneau", panneau)
|
||||
prediction=model_panneau(np.array([panneau]), training=False)
|
||||
print("Prediction:", prediction)
|
||||
if np.any(np.greater(prediction[0], 0.6)):
|
||||
id_panneau=np.argmax(prediction[0])
|
||||
print(" -> C'est un panneau:", tab_panneau[id_panneau], "KM/H")
|
||||
w, h, c=tab_image_panneau[id_panneau].shape
|
||||
else:
|
||||
print(" -> Ce n'est pas un panneau")
|
||||
if id_panneau!=-1:
|
||||
frame[0:h, 0:w, :]=tab_image_panneau[id_panneau]
|
||||
cv2.putText(frame, "fichier:"+video, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
|
||||
cv2.imshow("Video", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('a'):
|
||||
for cpt in range(100):
|
||||
ret, frame=cap.read()
|
||||
if key==ord('f'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
Reference in New Issue
Block a user