{ "cells": [ { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "#Tous les codes sont basés sur l'environnement suivant\n", "#python 3.7\n", "#opencv 3.1.0\n", "#pytorch 1.4.0\n", "\n", "import torch\n", "from torch.autograd import Variable\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import cv2\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import random\n", "import math\n", "import pickle\n", "import random\n", "from PIL import Image\n", "import sys\n", "from glob import glob\n", "from IPython.display import clear_output\n", "from datetime import datetime\n", "import json\n", "from time import time\n", "from PIL import Image\n", "from torchvision import transforms" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "# Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils\n", "\n", "# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage \n", "# “Apprentissage_MSELoss_avec_GPU“\n", "# et les commentaires détaillés se trouvent dans le programme d'apprentissage\n", "\n", "def tensor_imshow(im_tensor,cannel):\n", " b,c,h,w=im_tensor.shape\n", " if c==1:\n", " plt.imshow(im_tensor.squeeze().detach().numpy())\n", " else:\n", " plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])\n", " \n", "def get_training_fragment(frag_size,im):\n", " h,w,c=im.shape\n", " n=random.randint(0,int(h/frag_size)-1)\n", " m=random.randint(0,int(w/frag_size)-1)\n", " \n", " shape=frag_size/4\n", " vt_h=math.ceil((h+1)/shape)\n", " vt_w=math.ceil((w+1)/shape)\n", " vt=np.zeros([vt_h,vt_w])\n", " vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)\n", " vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)\n", " vt[vt_h_po,vt_w_po]=1\n", " vt = np.float32(vt)\n", " vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))\n", " \n", " return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt\n", "\n", "def write_result_in_file(result,file_name):\n", " n=0\n", " with open(file_name,'w') as file:\n", " for i in range(len(result)):\n", " while n=2 and m>=2:# Si n=m=1,Notre réseau n'a plus besoin de plus de couches pour agréger les cartes de corrélation\n", " self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", " self.shift2.weight=kernel_shift_ini(n,m)\n", " self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", " self.add2.weight=kernel_add_ini(n,m)\n", " \n", " n=int(n/2)\n", " m=int(m/2)\n", " if n>=2 and m>=2:\n", " self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", " self.shift3.weight=kernel_shift_ini(n,m)\n", " self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", " self.add3.weight=kernel_add_ini(n,m)\n", " \n", " def get_descripteur(self,img,using_cuda):\n", " # Utilisez Conv1 pour calculer le descripteur,\n", " descripteur_img=self.Relu(self.conv1(img))\n", " b,c,h,w=descripteur_img.shape\n", " couche_constante = 0.5 * torch.ones([b, 1, h, w])\n", " if using_cuda:\n", " couche_constante=couche_constante.cuda()\n", " # Ajouter une couche constante pour éviter la division par 0 lors de la normalisation\n", " descripteur_img = torch.cat((descripteur_img,couche_constante),1)\n", " # la normalisation\n", " descripteur_img_norm = F.normalize(descripteur_img) #/torch.norm(descripteur_img,dim=1, keepdim = True)\n", " return descripteur_img_norm\n", " \n", " def forward(self,img,frag,using_cuda):\n", " psize=4\n", " # Utilisez Conv1 pour calculer le descripteur,\n", " descripteur_input2=self.get_descripteur(frag,using_cuda)\n", " descripteur_input1=self.get_descripteur(img,using_cuda)\n", " \n", " b,c,h,w=frag.shape\n", " n=int(h/psize)\n", " m=int(w/psize)\n", " \n", " db,dc,dh,dw = descripteur_input1.shape\n", " \n", " #######################################\n", " # Calculer la carte de corrélation par convolution pour les n*m patchs plus petit.\n", " for i in range(n):\n", " for j in range(m):\n", " if i==0 and j==0:\n", " map_corre=F.conv2d(descripteur_input1.view(1,db*dc,dh,dw),get_patch(descripteur_input2,psize,i,j),padding=2,groups=db)\n", "\n", " map_corre=map_corre.view(db,1,map_corre.size(2),map_corre.size(3))\n", " else:\n", " a=F.conv2d(descripteur_input1.view(1,db*dc,dh,dw),get_patch(descripteur_input2,psize,i,j),padding=2, groups=db)\n", " a=a.view(db,1,a.size(2),a.size(3))\n", " map_corre=torch.cat((map_corre,a),1)\n", " \n", " ########################################\n", " # Étape de polymérisation\n", " map_corre,idx1=self.maxpooling(map_corre)\n", " map_corre=self.shift1(map_corre)\n", " map_corre=self.add1(map_corre)\n", " \n", " #########################################\n", " # Répétez l'étape d'agrégation jusqu'à obtenir le graphique de corrélation du patch d'entrée\n", " n=int(n/2)\n", " m=int(m/2)\n", " if n>=2 and m>=2:\n", " map_corre,idx2=self.maxpooling(map_corre)\n", " map_corre=self.shift2(map_corre)\n", " map_corre=self.add2(map_corre)\n", " \n", " \n", " n=int(n/2)\n", " m=int(m/2)\n", " if n>=2 and m>=2:\n", " map_corre,idx3=self.maxpooling(map_corre)\n", " map_corre=self.shift3(map_corre)\n", " map_corre=self.add3(map_corre)\n", " else:\n", " idx3=idx2\n", "\n", " #b,c,h,w=map_corre.shape\n", " # Normalisation de la division par maximum\n", " map_corre=map_corre/map_corre.max()\n", " # Normalisation SoftMax\n", " #map_corre=(F.softmax(map_corre.reshape(b,1,h*w,1),dim=2)).reshape(b,c,h,w)\n", " return map_corre, idx1, idx2, idx3" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [], "source": [ "# Les fonctions de ce bloc sont utilisées pour appliquer le réseau à des fragments (pas à des patchs carrés)\n", "\n", "\n", "# Cette fonction permet de sélectionner un ensemble de patchs carrés à partir d'un fragment\n", "# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16)\n", "# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch\n", "# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment\n", "def get_patch_list(frag,frag_size,limite,seuillage):\n", " n=0\n", " m=0\n", " h,w,c=frag.shape\n", " patch_list=[]\n", " position_list=[]\n", " for i in range(4):\n", " if len(patch_list)>limite and limite!=0:\n", " break\n", " for j in range(4):\n", " if len(patch_list)>limite and limite!=0:\n", " break\n", " n_offset=i*4 # n offset\n", " m_offset=j*4 # m offset\n", " n=0\n", " while n+frag_size+n_offset0:\n", " rot_frag=math.atan(tan_rot)*(180/math.pi)\n", " else:\n", " rot_frag=math.atan(tan_rot)*(180/math.pi)+180\n", " rot_frag=-rot_frag\n", " if rot_frag>0:\n", " rot_frag-=360\n", " return centre[0][0],centre[1][0],rot_frag\n", "\n", "# Vérifiez les résultats de Ransac en avec des changements de distance euclidienne\n", "def test_frag(inline,frag,fres):\n", " itera=10\n", " frag_inline=[]\n", " fres_inline=[]\n", " # Metter les coordonnées du point inline dans \"frag_inline[]\",et \"fres_inline[]\"\n", " for i in range(np.size(inline,0)):\n", " if inline[i]==1:\n", " frag_inline.append([frag[i][0],frag[i][1]])\n", " fres_inline.append([fres[i][0],fres[i][1]])\n", " p=[]\n", " \n", " # Faites une boucle dix fois, \n", " # sélectionnez à chaque fois deux paires correspondantes inline \n", " # calculer le changement de leur distance euclidienne\n", " for i in range(itera):\n", " point_test=selectionner_points(2,np.size(frag_inline,0))\n", " diff_x_frag=frag_inline[point_test[1]][0]-frag_inline[point_test[0]][0]\n", " diff_y_frag=frag_inline[point_test[1]][1]-frag_inline[point_test[0]][1]\n", " diff_frag=math.sqrt(math.pow(diff_x_frag,2)+math.pow(diff_y_frag,2))\n", " \n", " diff_x_fres=fres_inline[point_test[1]][0]-fres_inline[point_test[0]][0]\n", " diff_y_fres=fres_inline[point_test[1]][1]-fres_inline[point_test[0]][1]\n", " diff_fres=math.sqrt(math.pow(diff_x_fres,2)+math.pow(diff_y_fres,2))\n", " if diff_frag !=0:\n", " fsf=diff_fres/diff_frag\n", " p.append([fsf])\n", " result=np.mean(p)\n", " return result\n", "\n", "def frag_match(frag,img,position):\n", " \n", " frag_size=frag.size\n", " centre_frag=creer_point(frag_size[1]/2,frag_size[0]/2)\n", " \n", " retained_matches = []\n", " frag=[]\n", " fres=[]\n", " \n", " for i in range(len(position)):\n", " frag.append([float(position[i][0]),float(position[i][1])])\n", " fres.append([float(position[i][2]),float(position[i][3])])\n", " \n", " if np.size(frag)>0:\n", " # Calculer la matrice de transformation affine à l'aide de la méthode Ransac\n", " h,inline=cv2.estimateAffinePartial2D(np.array(frag),np.array(fres))\n", " # Si “h” n'est pas sous la forme de matrice 2 * 3, la matrice de transformation affine n'est pas trouvée\n", " if np.size(h)!=6:\n", " return ([-1])\n", " else:\n", " x,y,rot=position_rotation(h,centre_frag)\n", " pourcenttage=sum(inline)/np.size(frag,0)\n", " # Le nombre de points inline doit être supérieur à un certain nombre\n", " if sum(inline)>3:\n", " p=test_frag(inline,frag,fres)\n", " # La distance euclidienne entre les points correspondants ne doit pas trop changer, \n", " # sinon cela prouve que le résultat de Ransac est incorrect\n", " # ici,le changement de la distance euclidienne sont entre 0.7 et 1.3\n", " if abs(p-1)<0.3:\n", " # Ce n'est qu'alors que Ransac renvoie le résultat correct\n", " return([round(x),round(y),round(rot,3)])\n", " else:\n", " return ([-2])\n", " else:\n", " return ([-3])\n", " else:\n", " return ([-4]) " ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Fresque 1, fragment 10/200 (5.0%)\n", "Temps par fragment: 5.46. ETA = 1.04e+03s\n" ] }, { "ename": "KeyboardInterrupt", "evalue": "", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0;31m#rotation=0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;31m#rotation_base=0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 53\u001b[0;31m \u001b[0mscore_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mpositions_patchs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrun_net_v3\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnet\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfrag\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfrag_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m60\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0.7\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0musing_cuda\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrotation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 54\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0mfrag_position\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfrag_match\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfrag\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mpositions_patchs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m\u001b[0m in \u001b[0;36mrun_net_v3\u001b[0;34m(net, img, frag, frag_size, limite, seuillage, using_cuda, rotation)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0midx1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0midx2\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0midx3\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnet\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_tensor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfrag_tensor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0musing_cuda\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0musing_cuda\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0mres\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mres\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0midx1\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0midx1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0midx2\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0midx2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "if __name__==\"__main__\":\n", " \n", " # Variable du réseau\n", " frag_size=16\n", " using_cuda=True\n", " for fresque_id in [1]:\n", " #fresque_id = 2\n", "\n", " # Variable des données\n", " base_dir = './training_data_small/'\n", " fresque_filename = base_dir+'fresque_small{}.ppm'\n", " fresque_filename_wild = base_dir+'fresque_small*.ppm'\n", " fragment_filename = base_dir+'fragments/fresque{}/frag_bench_{:05}.ppm'\n", " fragments_filename_wild = base_dir+'fragments/fresque{}/frag_bench_*.ppm'\n", " vt_filename = base_dir+'fragments/fresque{}/vt/frag_bench_{:05}_vt.txt'\n", " net_filename = \"./trained_net/net_trainned_with-cross-entropy_02-10_07-30_0115\"\n", " \n", " expe_id = int(net_filename.split(\"_\")[-1]) # ID de l'expérience, à ajouter à tout les fichiers écrits pour identifier les résultats d'une même expérience.\n", " date = datetime.now().strftime(\"%m-%d_%H-%M\")\n", " results_filename = './results_bench/results_bench_f{}_{}_{:04}'.format(fresque_id,date,expe_id)\n", "\n", " # Chargement du réseau\n", " net=load_net(net_filename)\n", " net.maxpooling=nn.MaxPool2d(3,stride=2, padding=1, return_indices=True)\n", "\n", " # Charge la fresque en mémoire\n", " img=Image.open(fresque_filename.format(fresque_id))\n", " \n", " #N_fragments = 20\n", " N_fragments = len(glob(fragments_filename_wild.format(fresque_id)))\n", " print(fragments_filename_wild.format(fresque_id))\n", " print(N_fragments)\n", "\n", " # Crée les tableau de résultats\n", " distances, matched, positions, verite_terrain = [],[],[],[]\n", " tailles = []\n", "\n", " time_old = time()\n", " # Parcour tout les fragments de bench de cette fresque\n", " for fragment_id in range(N_fragments):\n", " clear_output(wait=True)\n", " print(\"Fresque {}, fragment {}/{} ({:.3}%)\".format(fresque_id,fragment_id,N_fragments,(fragment_id/N_fragments*100)))\n", " delta = time()-time_old\n", " print(\"Temps par fragment: {:.3}. ETA = {:.3}s\".format(delta,(N_fragments-fragment_id)*delta))\n", " time_old = time()\n", " frag = Image.open(fragment_filename.format(fresque_id,fragment_id))\n", "\n", " # Faites pivoter les pièces de 20 degrés à chaque fois pour correspondre, répétez 18 fois\n", " for i in [0,17]:\n", " rotation=20*i\n", " #rotation=0\n", " #rotation_base=0\n", " score_list,positions_patchs=run_net_v3(net,img,frag,frag_size,60,0.7,using_cuda,rotation)\n", " \n", " frag_position=frag_match(frag,img,positions_patchs)\n", " # Lorsque Ransac obtient le bon résultat, sortez de la boucle\n", " if len(frag_position)==3:\n", " rotation_base=i*20\n", " break\n", " # Si Ransac trouve une solution, la variable renvoyé est une liste de deux positions et une rotation\n", " if len(frag_position)==3:\n", " \n", " # MATCHED\n", " matched.append(1)\n", "\n", " # POSITION\n", " frag_position[2]=rotation_base-360-frag_position[2]\n", " if frag_position[2]>0:\n", " frag_position[2]=frag_position[2]-360\n", " positions.append([frag_position[0],frag_position[1],round(frag_position[2],3)])\n", "\n", " # VERITE TERRAIN\n", " with open(vt_filename.format(fresque_id,fragment_id), 'r') as f:\n", " data_vt = f.read().splitlines()\n", " verite_terrain.append([int(data_vt[2]),int(data_vt[3]),frag.size[0],frag.size[1]])\n", "\n", " # DISTANCE\n", " distances.append(np.linalg.norm([float(data_vt[3])-float(frag_position[0]),float(data_vt[2])-float(frag_position[1])]))\n", " else:\n", " matched.append(0)\n", " distances.append(-1)\n", " positions.append([])\n", " verite_terrain.append([])\n", "\n", " del frag\n", "\n", " meta = {'date':date,'base_dir':base_dir,'fresque_id':fresque_id,'fresque_taille':img.size,'N_fragments': N_fragments,'expe_id': expe_id}\n", " res = {'meta':meta, 'matched':matched,'distances':distances,'positions':positions,'vt':verite_terrain}\n", "\n", " with open(results_filename,'w') as f:\n", " f.write(json.dumps(res))\n", "\n", " print(\"Sauvegardé dans {}\".format(results_filename))" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Sauvegardé dans ./results_bench/results_bench_f1_03-12_12-41_0115\n" ] } ], "source": [ "meta = {'date':date,'base_dir':base_dir,'fresque_id':fresque_id,'fresque_taille':img.size,'N_fragments': N_fragments,'expe_id': expe_id}\n", "res = {'meta':meta, 'matched':matched,'distances':distances,'positions':positions,'vt':verite_terrain}\n", "\n", "with open(results_filename,'w') as f:\n", " f.write(json.dumps(res))\n", "\n", "print(\"Sauvegardé dans {}\".format(results_filename))" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "20" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Clear GPU memory \n", "import gc\n", "torch.cuda.empty_cache()\n", "gc.collect()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.3" } }, "nbformat": 4, "nbformat_minor": 4 }