CARIA.2.1
Update for the final presentation huge change with previous version
@@ -1,208 +0,0 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
from sklearn.model_selection import train_test_split
|
||||
import cv2
|
||||
import os
|
||||
import time
|
||||
import dataset
|
||||
|
||||
size=42
|
||||
dir_images_panneaux="server-trainer/images/road_sign_speed_trainers/panneaux"
|
||||
dir_images_autres_panneaux="server-trainer/images/road_sign_speed_trainers/autres_panneaux"
|
||||
dir_images_genere_sans_panneaux="server-trainer/images/road_sign_speed_trainers/genere_sans_panneaux"
|
||||
|
||||
batch_size=128
|
||||
nbr_entrainement=1 #20
|
||||
|
||||
def panneau_model(nbr_classes):
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Input(shape=(size, size, 3), dtype='float32'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.3))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.4))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(512, activation='relu'))
|
||||
model.add(layers.Dropout(0.5))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dense(nbr_classes, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
tab_panneau=[]
|
||||
tab_image_panneau=[]
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux))
|
||||
|
||||
files=os.listdir(dir_images_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux))
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image=cv2.imread(dir_images_panneaux+"/"+file)
|
||||
if size is not None:
|
||||
image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
|
||||
tab_panneau, tab_image_panneau=lire_images_panneaux(dir_images_panneaux, size)
|
||||
|
||||
tab_images=np.array([]).reshape(0, size, size, 3)
|
||||
tab_labels=np.array([]).reshape(0, len(tab_image_panneau))
|
||||
|
||||
id=0
|
||||
for image in tab_image_panneau:
|
||||
lot = []
|
||||
for _ in range(120):
|
||||
lot.append(dataset.modif_img(image))
|
||||
lot = np.array(lot)
|
||||
tab_images=np.concatenate((tab_images, lot))
|
||||
tab_labels=np.concatenate([tab_labels, np.repeat([np.eye(len(tab_image_panneau))[id]], len(lot), axis=0)])
|
||||
id += 1
|
||||
|
||||
|
||||
files=os.listdir(dir_images_autres_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide:".format(dir_images_autres_panneaux))
|
||||
|
||||
nbr=0
|
||||
for file in files:
|
||||
lot = []
|
||||
if file.endswith("png"):
|
||||
path=os.path.join(dir_images_autres_panneaux, file)
|
||||
image=cv2.resize(cv2.imread(path), (size, size), cv2.INTER_LANCZOS4)
|
||||
for _ in range(700):
|
||||
lot.append(dataset.modif_img(image))
|
||||
lot = np.array(lot)
|
||||
tab_images=np.concatenate([tab_images, lot])
|
||||
nbr+=len(lot)
|
||||
|
||||
tab_labels=np.concatenate([tab_labels, np.repeat([np.full(len(tab_image_panneau), 0)], nbr, axis=0)])
|
||||
|
||||
nbr_np=int(len(tab_images)/2)
|
||||
|
||||
id=1
|
||||
nbr=0
|
||||
tab=[]
|
||||
for cpt in range(nbr_np):
|
||||
file=dir_images_genere_sans_panneaux+"/{:d}.png".format(id)
|
||||
if not os.path.isfile(file):
|
||||
break
|
||||
image=cv2.resize(cv2.imread(file), (size, size))
|
||||
tab.append(image)
|
||||
id+=1
|
||||
nbr+=1
|
||||
|
||||
tab_images=np.concatenate([tab_images, tab])
|
||||
tab_labels=np.concatenate([tab_labels, np.repeat([np.full(len(tab_image_panneau), 0)], nbr, axis=0)])
|
||||
|
||||
tab_panneau=np.array(tab_panneau)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32) #.reshape([-1, 1])
|
||||
|
||||
train_images, test_images, train_labels, test_labels=train_test_split(tab_images, tab_labels, test_size=0.10)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((train_images, train_labels)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(batch_size)
|
||||
|
||||
print("train_images", len(train_images))
|
||||
print("test_images", len(test_images))
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model_panneau(images)
|
||||
loss=my_loss(labels, predictions)
|
||||
gradients=tape.gradient(loss, model_panneau.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model_panneau.trainable_variables))
|
||||
train_loss(loss)
|
||||
train_accuracy(labels, predictions)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
train_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
train_loss.reset_states()
|
||||
train_accuracy.reset_states()
|
||||
test(test_ds)
|
||||
|
||||
def my_loss(labels, preds):
|
||||
labels_reshape=tf.reshape(labels, (-1, 1))
|
||||
preds_reshape=tf.reshape(preds, (-1, 1))
|
||||
result=loss_object(labels_reshape, preds_reshape)
|
||||
return result
|
||||
|
||||
def test(test_ds):
|
||||
start=time.time()
|
||||
for test_images, test_labels in test_ds:
|
||||
predictions=model_panneau(test_images)
|
||||
t_loss=my_loss(test_labels, predictions)
|
||||
test_loss(t_loss)
|
||||
test_accuracy(test_labels, predictions)
|
||||
message=' >>> Test: loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(test_loss.result(),
|
||||
test_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
test_loss.reset_states()
|
||||
test_accuracy.reset_states()
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam()
|
||||
loss_object=tf.keras.losses.BinaryCrossentropy()
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
train_accuracy=tf.keras.metrics.BinaryAccuracy()
|
||||
test_loss=tf.keras.metrics.Mean()
|
||||
test_accuracy=tf.keras.metrics.BinaryAccuracy()
|
||||
model_panneau=panneau_model(len(tab_panneau))
|
||||
checkpoint=tf.train.Checkpoint(model_panneau=model_panneau)
|
||||
|
||||
print("Entrainement")
|
||||
train(train_ds, nbr_entrainement)
|
||||
checkpoint.save(file_prefix="server-ia/data/modeles/road_sign_speed_trainers/modele_panneau")
|
||||
|
||||
quit()
|
||||
|
||||
for i in range(len(test_images)):
|
||||
prediction=model_panneau(np.array([test_images[i]]))
|
||||
print("prediction", prediction[0])
|
||||
if np.sum(prediction[0])<0.6:
|
||||
print("Ce n'est pas un panneau")
|
||||
else:
|
||||
print("C'est un panneau:", tab_panneau[np.argmax(prediction[0])])
|
||||
cv2.imshow("image", test_images[i])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
@@ -1,31 +0,0 @@
|
||||
import os
|
||||
|
||||
# Assurez-vous d'être dans le répertoire Darknet
|
||||
os.chdir("chemin/vers/votre/darknet")
|
||||
|
||||
# Entraînement du modèle YOLOv3 pour les panneaux de signalisation routière
|
||||
# Utilisation du CPU uniquement
|
||||
|
||||
# Chemin vers les données d'entraînement et de validation
|
||||
train_data = "server-trainer/images/road_sign_trainers/train_speed"
|
||||
valid_data = "server-trainer/images/road_sign_trainers/test_speed"
|
||||
|
||||
# Paramètres d'entraînement
|
||||
batch_size = 64
|
||||
subdivisions = 16
|
||||
num_epochs = 1000
|
||||
|
||||
# Commande d'entraînement
|
||||
train_command = f"./darknet detector train data/obj.data cfg/yolov3_custom_train.cfg yolov3.weights -map -dont_show -gpus 0"
|
||||
|
||||
# Boucle d'entraînement
|
||||
for epoch in range(num_epochs):
|
||||
print(f"Epoch {epoch+1}/{num_epochs}")
|
||||
|
||||
# Entraînement sur les données d'entraînement
|
||||
os.system(train_command)
|
||||
|
||||
# Validation sur les données de validation
|
||||
os.system(f"./darknet detector map data/obj.data cfg/yolov3_custom_test.cfg backup/yolov3_custom_train_{epoch+1}.weights")
|
||||
|
||||
print("Entraînement terminé!")
|
||||
@@ -1,33 +0,0 @@
|
||||
import cv2
|
||||
import os
|
||||
import numpy as np
|
||||
import pickle
|
||||
|
||||
image_dir="server-trainer/images/avatars/"
|
||||
current_id=0
|
||||
label_ids={}
|
||||
x_train=[]
|
||||
y_labels=[]
|
||||
|
||||
for root, dirs, files in os.walk(image_dir):
|
||||
if len(files):
|
||||
label=root.split("/")[-1]
|
||||
for file in files:
|
||||
if file.endswith("jpg"):
|
||||
path=os.path.join(root, file)
|
||||
if not label in label_ids:
|
||||
label_ids[label]=current_id
|
||||
current_id+=1
|
||||
id_=label_ids[label]
|
||||
image=cv2.imread(path, cv2.IMREAD_GRAYSCALE)
|
||||
x_train.append(image)
|
||||
y_labels.append(id_)
|
||||
|
||||
with open("server-ia/data/modeles/camera_identification_user/labels.pickle", "wb") as f:
|
||||
pickle.dump(label_ids, f)
|
||||
|
||||
x_train=np.array(x_train)
|
||||
y_labels=np.array(y_labels)
|
||||
recognizer=cv2.face.LBPHFaceRecognizer_create()
|
||||
recognizer.train(x_train, y_labels)
|
||||
recognizer.save("server-ia/data/modeles/camera_identification_user/trainner.yml")
|
||||
@@ -0,0 +1,117 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
import os
|
||||
import cv2
|
||||
import csv
|
||||
import dataset_params_edition as dataset
|
||||
|
||||
# Taille des images
|
||||
size = 60
|
||||
# Nombre total d'images à générer
|
||||
nombre_images_a_generer = 2000
|
||||
# Chemin vers le répertoire contenant les images de panneaux
|
||||
dir_images_panneaux = "server-trainer/images/autres_panneaux"
|
||||
dir_images_genere_panneaux = "server-trainer/images/genere_autres_panneaux"
|
||||
csv_file_path = "server-trainer/images/genere_autres_panneaux_labels.csv"
|
||||
|
||||
# Fonction pour lire les images de panneaux à partir du répertoire spécifié
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
print(f"Lecture des images depuis le répertoire : {dir_images_panneaux}")
|
||||
tab_panneau = []
|
||||
tab_image_panneau = []
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit(f"Le répertoire d'image n'existe pas: {dir_images_panneaux}")
|
||||
|
||||
files = os.listdir(dir_images_panneaux)
|
||||
|
||||
if files is None or len(files) == 0:
|
||||
quit(f"Le répertoire d'image est vide: {dir_images_panneaux}")
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image = cv2.imread(os.path.join(dir_images_panneaux, file))
|
||||
if size is not None:
|
||||
image = cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
|
||||
# Supprimer les images existantes et le fichier CSV
|
||||
if os.path.exists(dir_images_genere_panneaux):
|
||||
for file in os.listdir(dir_images_genere_panneaux):
|
||||
file_path = os.path.join(dir_images_genere_panneaux, file)
|
||||
if os.path.isfile(file_path):
|
||||
os.remove(file_path)
|
||||
|
||||
if os.path.exists(csv_file_path):
|
||||
os.remove(csv_file_path)
|
||||
|
||||
tab_panneau, tab_image_panneau = lire_images_panneaux(dir_images_panneaux, size)
|
||||
print(f"Nombre d'images lues : {len(tab_image_panneau)}")
|
||||
|
||||
tab_images = np.array([]).reshape(0, size, size, 3)
|
||||
tab_labels = []
|
||||
|
||||
images_par_panneau = nombre_images_a_generer // len(tab_image_panneau)
|
||||
print(f"Nombre d'images à générer par panneau : {images_par_panneau}")
|
||||
|
||||
print("Génération des images modifiées...")
|
||||
csv_data = [] # Liste pour stocker les données du CSV
|
||||
for id, image in enumerate(tab_image_panneau):
|
||||
lot = []
|
||||
for i in range(images_par_panneau):
|
||||
lot.append(dataset.modif_img(image))
|
||||
lot = np.array(lot)
|
||||
tab_images = np.concatenate([tab_images, lot])
|
||||
tab_labels = np.concatenate([tab_labels, np.full(len(lot), id)])
|
||||
|
||||
print(f"Nombre total d'images générées : {len(tab_images)}")
|
||||
|
||||
tab_panneau = np.array(tab_panneau)
|
||||
tab_images = np.array(tab_images, dtype=np.float32) / 255
|
||||
tab_labels = np.array(tab_labels).reshape([-1, 1])
|
||||
|
||||
tab_images, tab_labels = shuffle(tab_images, tab_labels)
|
||||
print("Données mélangées.")
|
||||
|
||||
print(f"Sauvegarde des images générées dans : {dir_images_genere_panneaux}")
|
||||
if not os.path.exists(dir_images_genere_panneaux):
|
||||
os.makedirs(dir_images_genere_panneaux)
|
||||
|
||||
for i in range(len(tab_images)):
|
||||
# Générer un nom de fichier unique
|
||||
file_name = "{}_{}.png".format(i, tab_panneau[int(tab_labels[i])])
|
||||
# Enregistrer l'image dans le répertoire de sortie
|
||||
cv2.imwrite(os.path.join(dir_images_genere_panneaux, file_name), tab_images[i] * 255.0)
|
||||
|
||||
# Ajouter les données au CSV
|
||||
label = "OP" + str(i)
|
||||
csv_data.append([file_name, label])
|
||||
|
||||
print("Toutes les images ont été sauvegardées.")
|
||||
print(f"Nombre total d'images sauvegardées : {len(tab_images)}")
|
||||
|
||||
# Écrire les données dans le fichier CSV
|
||||
print(f"Écriture des labels dans le fichier CSV : {csv_file_path}")
|
||||
with open(csv_file_path, mode='w', newline='') as file:
|
||||
writer = csv.writer(file)
|
||||
writer.writerow(["filename", "label"]) # Écrire l'en-tête
|
||||
writer.writerows(csv_data) # Écrire les données
|
||||
|
||||
print("Fichier CSV généré.")
|
||||
|
||||
print("Affichage des images générées...")
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("panneau", tab_images[i])
|
||||
print(f"label: {tab_labels[i][0]}, panneau: {tab_panneau[int(tab_labels[i])]}")
|
||||
key = cv2.waitKey(0) & 0xFF
|
||||
if key == ord('q'):
|
||||
print("Sortie demandée par l'utilisateur.")
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
print("Terminé.")
|
||||
@@ -0,0 +1,97 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import random
|
||||
import os
|
||||
|
||||
# Taille des images
|
||||
size = 60
|
||||
# Chemin vers la vidéo et le répertoire de sortie
|
||||
video = "server-ia/data/videos/autoroute.mp4"
|
||||
dir_images_genere_sans_panneaux = "server-trainer/images/genere_sans_panneaux"
|
||||
|
||||
# Nombre total d'images à générer
|
||||
nbr_image = 2000
|
||||
|
||||
# Création du répertoire de sortie s'il n'existe pas
|
||||
if not os.path.isdir(dir_images_genere_sans_panneaux):
|
||||
os.makedirs(dir_images_genere_sans_panneaux)
|
||||
print(f"Répertoire créé : {dir_images_genere_sans_panneaux}")
|
||||
|
||||
# Vérification de l'existence de la vidéo
|
||||
if not os.path.exists(video):
|
||||
print(f"Vidéo non présente : {video}")
|
||||
quit()
|
||||
|
||||
print(f"Vidéo trouvée : {video}")
|
||||
|
||||
cap = cv2.VideoCapture(video)
|
||||
if not cap.isOpened():
|
||||
print(f"Erreur lors de l'ouverture de la vidéo : {video}")
|
||||
quit()
|
||||
|
||||
# Calcul du nombre d'images à générer par frame
|
||||
nbr_frame_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
nbr_image_par_frame = int(nbr_image / nbr_frame_total) + 1
|
||||
|
||||
print(f"Nombre d'images à générer : {nbr_image}")
|
||||
print(f"Nombre d'images à générer par frame : {nbr_image_par_frame}")
|
||||
|
||||
id = 0
|
||||
|
||||
# Variables pour calculer les dimensions globales
|
||||
dimensions = []
|
||||
min_w = float('inf')
|
||||
max_w = 0
|
||||
min_h = float('inf')
|
||||
max_h = 0
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
print("Fin de la vidéo ou erreur de lecture.")
|
||||
break
|
||||
|
||||
h, w, c = frame.shape
|
||||
|
||||
# Mettre à jour les dimensions minimales et maximales
|
||||
min_w = min(min_w, w)
|
||||
max_w = max(max_w, w)
|
||||
min_h = min(min_h, h)
|
||||
max_h = max(max_h, h)
|
||||
|
||||
# Ajouter les dimensions de la frame
|
||||
dimensions.append((w, h, c))
|
||||
|
||||
for _ in range(nbr_image_par_frame):
|
||||
x = random.randint(0, w - size)
|
||||
y = random.randint(0, h - size)
|
||||
img = frame[y:y + size, x:x + size]
|
||||
|
||||
# Sauvegarde de l'image
|
||||
img_filename = os.path.join(dir_images_genere_sans_panneaux, f"{id}.png")
|
||||
cv2.imwrite(img_filename, img)
|
||||
id += 1
|
||||
|
||||
if id >= nbr_image:
|
||||
print(f"Nombre d'images générées atteint : {nbr_image}")
|
||||
break
|
||||
|
||||
if id >= nbr_image:
|
||||
break
|
||||
|
||||
cap.release()
|
||||
|
||||
# Affichage du nombre total d'images sauvegardées et de l'emplacement du répertoire
|
||||
print(f"Nombre total d'images sauvegardées : {id}")
|
||||
print(f"Emplacement des images sauvegardées : {dir_images_genere_sans_panneaux}")
|
||||
|
||||
# Calcul des dimensions globales
|
||||
average_w = np.mean([w for w, h, c in dimensions])
|
||||
average_h = np.mean([h for w, h, c in dimensions])
|
||||
|
||||
# Affichage des statistiques globales
|
||||
print(f"Dimensions minimales : {min_w}x{min_h}")
|
||||
print(f"Dimensions maximales : {max_w}x{max_h}")
|
||||
print(f"Dimensions moyennes : {average_w:.2f}x{average_h:.2f}")
|
||||
|
||||
print("Libération des ressources et fin du traitement.")
|
||||
@@ -0,0 +1,117 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
import os
|
||||
import cv2
|
||||
import csv
|
||||
import dataset_params_edition as dataset
|
||||
|
||||
# Taille des images
|
||||
size = 60
|
||||
# Nombre total d'images à générer
|
||||
nombre_images_a_generer = 2000
|
||||
# Chemin vers le répertoire contenant les images de panneaux
|
||||
dir_images_panneaux = "server-trainer/images/vitesse_panneaux"
|
||||
dir_images_genere_panneaux = "server-trainer/images/genere_vitesse_panneaux"
|
||||
csv_file_path = "server-trainer/images/genere_vitesse_panneaux_labels.csv"
|
||||
|
||||
# Fonction pour lire les images de panneaux à partir du répertoire spécifié
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
print(f"Lecture des images depuis le répertoire : {dir_images_panneaux}")
|
||||
tab_panneau = []
|
||||
tab_image_panneau = []
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit(f"Le répertoire d'image n'existe pas: {dir_images_panneaux}")
|
||||
|
||||
files = os.listdir(dir_images_panneaux)
|
||||
|
||||
if files is None or len(files) == 0:
|
||||
quit(f"Le répertoire d'image est vide: {dir_images_panneaux}")
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image = cv2.imread(os.path.join(dir_images_panneaux, file))
|
||||
if size is not None:
|
||||
image = cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
|
||||
# Supprimer les images existantes et le fichier CSV
|
||||
if os.path.exists(dir_images_genere_panneaux):
|
||||
for file in os.listdir(dir_images_genere_panneaux):
|
||||
file_path = os.path.join(dir_images_genere_panneaux, file)
|
||||
if os.path.isfile(file_path):
|
||||
os.remove(file_path)
|
||||
|
||||
if os.path.exists(csv_file_path):
|
||||
os.remove(csv_file_path)
|
||||
|
||||
tab_panneau, tab_image_panneau = lire_images_panneaux(dir_images_panneaux, size)
|
||||
print(f"Nombre d'images lues : {len(tab_image_panneau)}")
|
||||
|
||||
tab_images = np.array([]).reshape(0, size, size, 3)
|
||||
tab_labels = []
|
||||
|
||||
images_par_panneau = nombre_images_a_generer // len(tab_image_panneau)
|
||||
print(f"Nombre d'images à générer par panneau : {images_par_panneau}")
|
||||
|
||||
print("Génération des images modifiées...")
|
||||
csv_data = [] # Liste pour stocker les données du CSV
|
||||
for id, image in enumerate(tab_image_panneau):
|
||||
lot = []
|
||||
for i in range(images_par_panneau):
|
||||
lot.append(dataset.modif_img(image))
|
||||
lot = np.array(lot)
|
||||
tab_images = np.concatenate([tab_images, lot])
|
||||
tab_labels = np.concatenate([tab_labels, np.full(len(lot), id)])
|
||||
|
||||
print(f"Nombre total d'images générées : {len(tab_images)}")
|
||||
|
||||
tab_panneau = np.array(tab_panneau)
|
||||
tab_images = np.array(tab_images, dtype=np.float32) / 255
|
||||
tab_labels = np.array(tab_labels).reshape([-1, 1])
|
||||
|
||||
tab_images, tab_labels = shuffle(tab_images, tab_labels)
|
||||
print("Données mélangées.")
|
||||
|
||||
print(f"Sauvegarde des images générées dans : {dir_images_genere_panneaux}")
|
||||
if not os.path.exists(dir_images_genere_panneaux):
|
||||
os.makedirs(dir_images_genere_panneaux)
|
||||
|
||||
for i in range(len(tab_images)):
|
||||
# Générer un nom de fichier unique
|
||||
file_name = "{}_{}.png".format(i, tab_panneau[int(tab_labels[i])])
|
||||
# Enregistrer l'image dans le répertoire de sortie
|
||||
cv2.imwrite(os.path.join(dir_images_genere_panneaux, file_name), tab_images[i] * 255.0)
|
||||
|
||||
# Ajouter les données au CSV
|
||||
label = "VP" + str(i)
|
||||
csv_data.append([file_name, label])
|
||||
|
||||
print("Toutes les images ont été sauvegardées.")
|
||||
print(f"Nombre total d'images sauvegardées : {len(tab_images)}")
|
||||
|
||||
# Écrire les données dans le fichier CSV
|
||||
print(f"Écriture des labels dans le fichier CSV : {csv_file_path}")
|
||||
with open(csv_file_path, mode='w', newline='') as file:
|
||||
writer = csv.writer(file)
|
||||
writer.writerow(["filename", "label"]) # Écrire l'en-tête
|
||||
writer.writerows(csv_data) # Écrire les données
|
||||
|
||||
print("Fichier CSV généré.")
|
||||
|
||||
print("Affichage des images générées...")
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("panneau", tab_images[i])
|
||||
print(f"label: {tab_labels[i][0]}, panneau: {tab_panneau[int(tab_labels[i])]}")
|
||||
key = cv2.waitKey(0) & 0xFF
|
||||
if key == ord('q'):
|
||||
print("Sortie demandée par l'utilisateur.")
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
print("Terminé.")
|
||||
@@ -1,8 +1,9 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
import multiprocessing
|
||||
import random
|
||||
# dataset pour train_panneau.py
|
||||
import multiprocessing
|
||||
from multiprocessing import Pool
|
||||
|
||||
def bruit(image_orig):
|
||||
h, w, c = image_orig.shape
|
||||
n = np.random.randn(h, w, c) * random.randint(5, 30)
|
||||
@@ -28,7 +29,10 @@ def modif_img(img):
|
||||
if np.random.randint(2):
|
||||
a = int(max(w, h) / 5) + 1
|
||||
pts1 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
|
||||
pts2 = np.float32([[0 + random.randint(-a, a), 0 + random.randint(-a, a)], [w - random.randint(-a, a), 0 + random.randint(-a, a)], [0 + random.randint(-a, a), h - random.randint(-a, a)], [w - random.randint(-a, a), h - random.randint(-a, a)]])
|
||||
pts2 = np.float32([[0 + random.randint(-a, a), 0 + random.randint(-a, a)],
|
||||
[w - random.randint(-a, a), 0 + random.randint(-a, a)],
|
||||
[0 + random.randint(-a, a), h - random.randint(-a, a)],
|
||||
[w - random.randint(-a, a), h - random.randint(-a, a)]])
|
||||
M = cv2.getPerspectiveTransform(pts1,pts2)
|
||||
img = cv2.warpPerspective(img, M, (w, h))
|
||||
|
||||
@@ -71,12 +75,18 @@ def modif_img(img):
|
||||
|
||||
return img
|
||||
|
||||
# NOT WORKING ERREUR
|
||||
def create_lot_img(image, nbr, nbr_thread=None):
|
||||
if nbr_thread is None:
|
||||
nbr_thread = multiprocessing.cpu_count()
|
||||
lot_original = np.repeat([image], nbr, axis=0)
|
||||
p = Pool(nbr_thread)
|
||||
lot_result = p.map(modif_img, lot_original)
|
||||
p.close()
|
||||
return lot_result
|
||||
|
||||
# Convert the image to a bytes-like object to avoid issues with multiprocessing
|
||||
image_bytes = np.array(image, dtype=np.uint8).tobytes()
|
||||
|
||||
def modif_img_from_bytes(image_bytes):
|
||||
img = np.frombuffer(image_bytes, dtype=np.uint8).reshape(image.shape)
|
||||
return modif_img(img)
|
||||
|
||||
with Pool(nbr_thread) as p:
|
||||
lot_result = p.map(modif_img_from_bytes, [image_bytes] * nbr)
|
||||
|
||||
return lot_result
|
||||
@@ -1,85 +0,0 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
import os
|
||||
import cv2
|
||||
import dataset
|
||||
###GENERE DES IMAGES AVEC PANNEAUX DEPUIS UN MEDIA###
|
||||
# Tuto25[OpenCV] Lecture des panneaux de vitesse p.1 (houghcircles) 28min
|
||||
# Taille des images
|
||||
size = 42
|
||||
# Chemin vers le répertoire contenant les images de panneaux
|
||||
dir_images_panneaux = "server-trainer/images/road_sign_speed_trainers/panneaux"
|
||||
dir_images_genere_panneaux = "server-trainer/images/road_sign_speed_trainers/genere_panneaux"
|
||||
|
||||
# Fonction pour lire les images de panneaux à partir du répertoire spécifié
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
tab_panneau = []
|
||||
tab_image_panneau = []
|
||||
|
||||
# Vérifie si le répertoire existe
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux))
|
||||
|
||||
# Liste tous les fichiers dans le répertoire
|
||||
files = os.listdir(dir_images_panneaux)
|
||||
|
||||
# Quitte si le répertoire est vide
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux))
|
||||
|
||||
# Parcours des fichiers dans le répertoire
|
||||
for file in sorted(files):
|
||||
# Vérifie si le fichier est un fichier PNG
|
||||
if file.endswith("png"):
|
||||
# Ajoute le nom du fichier (sans l'extension) à tab_panneau
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
|
||||
# Lit l'image et redimensionne si une taille est spécifiée
|
||||
image = cv2.imread(dir_images_panneaux + "/" + file)
|
||||
if size is not None:
|
||||
image = cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
|
||||
# Appel de la fonction pour lire les images de panneaux
|
||||
tab_panneau, tab_image_panneau = lire_images_panneaux(dir_images_panneaux, size)
|
||||
|
||||
# Initialisation des tableaux pour les images et les labels
|
||||
tab_images = np.array([]).reshape(0, size, size, 3)
|
||||
tab_labels = []
|
||||
|
||||
# Génération des données d'entraînement
|
||||
id = 0
|
||||
for image in tab_image_panneau:
|
||||
lot = []
|
||||
for _ in range(100):
|
||||
lot.append(dataset.modif_img(image))
|
||||
lot = np.array(lot)
|
||||
tab_images = np.concatenate([tab_images, lot])
|
||||
tab_labels = np.concatenate([tab_labels, np.full(len(lot), id)])
|
||||
id += 1
|
||||
|
||||
# Conversion des tableaux en type approprié et normalisation des images
|
||||
tab_panneau = np.array(tab_panneau)
|
||||
tab_images = np.array(tab_images, dtype=np.float32) / 255
|
||||
tab_labels = np.array(tab_labels).reshape([-1, 1])
|
||||
|
||||
# Mélange des données
|
||||
tab_images, tab_labels = shuffle(tab_images, tab_labels)
|
||||
|
||||
# Boucle pour sauvegarder les images générées dans le répertoire de sortie
|
||||
for i in range(len(tab_images)):
|
||||
# Générer un nom de fichier unique en fonction de l'ID et du nom du panneau
|
||||
file_name = "{:d}_{}.png".format(i, tab_panneau[int(tab_labels[i])])
|
||||
# Enregistrer l'image dans le répertoire de sortie
|
||||
cv2.imwrite(os.path.join(dir_images_genere_panneaux, file_name), tab_images[i] * 255.0) # Retour à l'échelle 0-255
|
||||
|
||||
# Affichage des images avec leur label
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("panneau", tab_images[i])
|
||||
print("label", tab_labels[i], "panneau", tab_panneau[int(tab_labels[i])])
|
||||
if cv2.waitKey() & 0xFF == ord('q'):
|
||||
quit()
|
||||
@@ -1,39 +0,0 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import random
|
||||
import os
|
||||
###GENERE DES IMAGES SANS PANNEAUX DEPUIS UN MEDIA###
|
||||
#YT:Tuto25[Tensorflow2] Lecture des panneaux de vitesse p.2 - 4min30
|
||||
|
||||
size=42
|
||||
video="server-trainer/videos/autoroute.mp4"
|
||||
dir_images_genere_sans_panneaux="server-trainer/images/road_sign_speed_trainers/genere_sans_panneaux"
|
||||
|
||||
if not os.path.isdir(dir_images_genere_sans_panneaux):
|
||||
os.mkdir(dir_images_genere_sans_panneaux)
|
||||
|
||||
if not os.path.exists(video):
|
||||
print("Vidéo non présente:", video)
|
||||
quit()
|
||||
|
||||
cap=cv2.VideoCapture(video)
|
||||
|
||||
id=0
|
||||
nbr_image=1500
|
||||
|
||||
nbr_image_par_frame=int(1500/cap.get(cv2.CAP_PROP_FRAME_COUNT))+1
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if ret is False:
|
||||
quit()
|
||||
h, w, c=frame.shape
|
||||
|
||||
for cpt in range(nbr_image_par_frame):
|
||||
x=random.randint(0, w-size)
|
||||
y=random.randint(0, h-size)
|
||||
img=frame[y:y+size, x:x+size]
|
||||
cv2.imwrite(dir_images_genere_sans_panneaux+"/{:d}.png".format(id), img)
|
||||
id+=1
|
||||
if id==nbr_image:
|
||||
quit()
|
||||
BIN
server-trainer/images/Thumbs.db
Normal file
|
After Width: | Height: | Size: 2.9 KiB |
|
After Width: | Height: | Size: 3.2 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.6 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.7 KiB |
|
After Width: | Height: | Size: 3.0 KiB |
|
After Width: | Height: | Size: 2.9 KiB |
|
After Width: | Height: | Size: 2.9 KiB |
|
After Width: | Height: | Size: 2.4 KiB |
|
After Width: | Height: | Size: 2.7 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 3.1 KiB |
|
After Width: | Height: | Size: 3.3 KiB |
|
After Width: | Height: | Size: 2.6 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.3 KiB |
|
After Width: | Height: | Size: 2.3 KiB |
|
After Width: | Height: | Size: 2.6 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.4 KiB |
|
After Width: | Height: | Size: 2.9 KiB |
|
After Width: | Height: | Size: 2.9 KiB |
|
After Width: | Height: | Size: 2.8 KiB |
|
After Width: | Height: | Size: 2.9 KiB |
|
After Width: | Height: | Size: 2.8 KiB |
|
After Width: | Height: | Size: 4.3 KiB |
|
After Width: | Height: | Size: 3.5 KiB |
|
After Width: | Height: | Size: 4.7 KiB |
|
After Width: | Height: | Size: 4.5 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 1.7 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 1.9 KiB |
|
After Width: | Height: | Size: 1.4 KiB |
|
After Width: | Height: | Size: 2.5 KiB |
|
After Width: | Height: | Size: 2.2 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 3.2 KiB |
|
After Width: | Height: | Size: 3.2 KiB |
|
After Width: | Height: | Size: 3.5 KiB |
|
After Width: | Height: | Size: 2.9 KiB |
|
After Width: | Height: | Size: 3.2 KiB |
|
After Width: | Height: | Size: 3.5 KiB |
|
After Width: | Height: | Size: 3.6 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 3.0 KiB |
|
After Width: | Height: | Size: 3.6 KiB |
|
After Width: | Height: | Size: 1.4 KiB |
|
After Width: | Height: | Size: 1.9 KiB |
|
After Width: | Height: | Size: 1.9 KiB |
|
After Width: | Height: | Size: 2.0 KiB |
|
After Width: | Height: | Size: 2.0 KiB |
|
After Width: | Height: | Size: 1.8 KiB |
|
After Width: | Height: | Size: 3.3 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 2.0 KiB |
|
After Width: | Height: | Size: 2.0 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 4.7 KiB |
|
After Width: | Height: | Size: 3.9 KiB |
|
After Width: | Height: | Size: 4.3 KiB |
|
After Width: | Height: | Size: 4.2 KiB |
|
After Width: | Height: | Size: 5.7 KiB |
|
After Width: | Height: | Size: 2.1 KiB |
|
After Width: | Height: | Size: 3.6 KiB |
|
After Width: | Height: | Size: 4.4 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 3.3 KiB |
|
After Width: | Height: | Size: 3.5 KiB |
|
After Width: | Height: | Size: 3.1 KiB |
|
After Width: | Height: | Size: 3.0 KiB |
|
After Width: | Height: | Size: 1.3 KiB |
|
After Width: | Height: | Size: 2.1 KiB |
|
After Width: | Height: | Size: 1.5 KiB |
|
After Width: | Height: | Size: 1.5 KiB |
|
After Width: | Height: | Size: 1.6 KiB |
|
After Width: | Height: | Size: 2.0 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 3.6 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 3.4 KiB |
|
After Width: | Height: | Size: 4.5 KiB |
|
After Width: | Height: | Size: 4.2 KiB |