Ajout du benchmark avec minibatchs
This commit is contained in:
parent
200a55eed2
commit
f1d693b678
1 changed files with 748 additions and 0 deletions
748
Benchmark_MB-Backtracking-16-32.ipynb
Normal file
748
Benchmark_MB-Backtracking-16-32.ipynb
Normal file
|
@ -0,0 +1,748 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#Tous les codes sont basés sur l'environnement suivant\n",
|
||||||
|
"#python 3.7\n",
|
||||||
|
"#opencv 3.1.0\n",
|
||||||
|
"#pytorch 1.4.0\n",
|
||||||
|
"\n",
|
||||||
|
"import torch\n",
|
||||||
|
"from torch.autograd import Variable\n",
|
||||||
|
"import torch.nn as nn\n",
|
||||||
|
"import torch.nn.functional as F\n",
|
||||||
|
"import cv2\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import random\n",
|
||||||
|
"import math\n",
|
||||||
|
"import pickle\n",
|
||||||
|
"import random\n",
|
||||||
|
"from PIL import Image\n",
|
||||||
|
"import sys\n",
|
||||||
|
"from glob import glob\n",
|
||||||
|
"from IPython.display import clear_output\n",
|
||||||
|
"from datetime import datetime\n",
|
||||||
|
"import json\n",
|
||||||
|
"from time import time\n",
|
||||||
|
"from PIL import Image\n",
|
||||||
|
"from torchvision import transforms"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils\n",
|
||||||
|
"\n",
|
||||||
|
"# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage \n",
|
||||||
|
"# “Apprentissage_MSELoss_avec_GPU“\n",
|
||||||
|
"# et les commentaires détaillés se trouvent dans le programme d'apprentissage\n",
|
||||||
|
"\n",
|
||||||
|
"def tensor_imshow(im_tensor,cannel):\n",
|
||||||
|
" b,c,h,w=im_tensor.shape\n",
|
||||||
|
" if c==1:\n",
|
||||||
|
" plt.imshow(im_tensor.squeeze().detach().numpy())\n",
|
||||||
|
" else:\n",
|
||||||
|
" plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])\n",
|
||||||
|
" \n",
|
||||||
|
"def get_training_fragment(frag_size,im):\n",
|
||||||
|
" h,w,c=im.shape\n",
|
||||||
|
" n=random.randint(0,int(h/frag_size)-1)\n",
|
||||||
|
" m=random.randint(0,int(w/frag_size)-1)\n",
|
||||||
|
" \n",
|
||||||
|
" shape=frag_size/4\n",
|
||||||
|
" vt_h=math.ceil((h+1)/shape)\n",
|
||||||
|
" vt_w=math.ceil((w+1)/shape)\n",
|
||||||
|
" vt=np.zeros([vt_h,vt_w])\n",
|
||||||
|
" vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)\n",
|
||||||
|
" vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)\n",
|
||||||
|
" vt[vt_h_po,vt_w_po]=1\n",
|
||||||
|
" vt = np.float32(vt)\n",
|
||||||
|
" vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))\n",
|
||||||
|
" \n",
|
||||||
|
" return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt\n",
|
||||||
|
"\n",
|
||||||
|
"def write_result_in_file(result,file_name):\n",
|
||||||
|
" n=0\n",
|
||||||
|
" with open(file_name,'w') as file:\n",
|
||||||
|
" for i in range(len(result)):\n",
|
||||||
|
" while n<result[i][0]:\n",
|
||||||
|
" s=str(n)\n",
|
||||||
|
" n=n+1\n",
|
||||||
|
" s=s+\"\\n\"\n",
|
||||||
|
" file.write(s)\n",
|
||||||
|
" s=str(result[i][0])+\" \"+str(result[i][1])+\" \"+str(result[i][2])+\" \"+str(result[i][3])\n",
|
||||||
|
" s=s+\"\\n\"\n",
|
||||||
|
" n=n+1\n",
|
||||||
|
" file.write(s)\n",
|
||||||
|
" file.close()\n",
|
||||||
|
" \n",
|
||||||
|
" \n",
|
||||||
|
"def img2tensor(im):\n",
|
||||||
|
" im=np.array(im,dtype=\"float32\")\n",
|
||||||
|
" tensor_cv = torch.from_numpy(np.transpose(im, (2, 0, 1)))\n",
|
||||||
|
" im_tensor=tensor_cv.unsqueeze(0)\n",
|
||||||
|
" return im_tensor\n",
|
||||||
|
"\n",
|
||||||
|
"def show_coordonnee(position_pred):\n",
|
||||||
|
" map_corre=position_pred.squeeze().detach().numpy()\n",
|
||||||
|
" score=sum(sum(map_corre))\n",
|
||||||
|
" h,w=map_corre.shape\n",
|
||||||
|
" max_value=map_corre.max()\n",
|
||||||
|
" coordonnee=np.where(map_corre==max_value)\n",
|
||||||
|
" return score,coordonnee[0].mean(),coordonnee[1].mean()\n",
|
||||||
|
"\n",
|
||||||
|
"def test_fragment32_32(frag,seuillage):\n",
|
||||||
|
" a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]\n",
|
||||||
|
" mask = (a == 0)\n",
|
||||||
|
" arr_new = a[mask]\n",
|
||||||
|
" if arr_new.size/a.size<=(1-seuillage):\n",
|
||||||
|
" return True\n",
|
||||||
|
" else:\n",
|
||||||
|
" return False\n",
|
||||||
|
"\n",
|
||||||
|
"def save_net(file_path,net):\n",
|
||||||
|
" pkl_file = open(file_path, 'wb')\n",
|
||||||
|
" pickle.dump(net,pkl_file)\n",
|
||||||
|
" pkl_file.close()\n",
|
||||||
|
" \n",
|
||||||
|
"def load_net(file_path): \n",
|
||||||
|
" pkl_file = open(file_path, 'rb')\n",
|
||||||
|
" net= pickle.load(pkl_file)\n",
|
||||||
|
" pkl_file.close()\n",
|
||||||
|
" return net\n",
|
||||||
|
"\n",
|
||||||
|
"def show_mem(txt):\n",
|
||||||
|
" t = torch.cuda.get_device_properties(0).total_memory\n",
|
||||||
|
" r = torch.cuda.memory_reserved(0) \n",
|
||||||
|
" a = torch.cuda.memory_allocated(0)\n",
|
||||||
|
" print(\"---- {} ----\".format(txt))\n",
|
||||||
|
" print(\"- Total: {} -\".format(t/1e6))\n",
|
||||||
|
" print(\"- Reserved: {} -\".format(r/1e6))\n",
|
||||||
|
" print(\"- Allocated: {} -\".format(a/1e6))\n",
|
||||||
|
" print(\"---- ----- ----\\n\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 14,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Créer un poids de type DeepMatch comme valeur initiale de Conv1 (non obligatoire)\n",
|
||||||
|
"def ini():\n",
|
||||||
|
" kernel=torch.zeros([8,3,3,3])\n",
|
||||||
|
" array_0=np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype='float32')\n",
|
||||||
|
" array_1=np.array([[2,1,0],[1,0,-1],[0,-1,-2]],dtype='float32')\n",
|
||||||
|
" array_2=np.array([[1,0,-1],[2,0,-2],[1,0,-1]],dtype='float32')\n",
|
||||||
|
" array_3=np.array([[0,-1,-2],[1,0,-1],[2,1,0]],dtype='float32')\n",
|
||||||
|
" array_4=np.array([[-1,-2,-1],[0,0,0],[1,2,1]],dtype='float32')\n",
|
||||||
|
" array_5=np.array([[-2,-1,0],[-1,0,1],[0,1,2]],dtype='float32')\n",
|
||||||
|
" array_6=np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype='float32')\n",
|
||||||
|
" array_7=np.array([[0,1,2],[-1,0,1],[-2,-1,0]],dtype='float32')\n",
|
||||||
|
" for i in range(3):\n",
|
||||||
|
" kernel[0,i,:]=torch.from_numpy(array_0)\n",
|
||||||
|
" kernel[1,i,:]=torch.from_numpy(array_1)\n",
|
||||||
|
" kernel[2,i,:]=torch.from_numpy(array_2)\n",
|
||||||
|
" kernel[3,i,:]=torch.from_numpy(array_3)\n",
|
||||||
|
" kernel[4,i,:]=torch.from_numpy(array_4)\n",
|
||||||
|
" kernel[5,i,:]=torch.from_numpy(array_5)\n",
|
||||||
|
" kernel[6,i,:]=torch.from_numpy(array_6)\n",
|
||||||
|
" kernel[7,i,:]=torch.from_numpy(array_7)\n",
|
||||||
|
" return torch.nn.Parameter(kernel,requires_grad=True) \n",
|
||||||
|
"\n",
|
||||||
|
"# Calculer le poids initial de la couche convolutive add\n",
|
||||||
|
"# n, m signifie qu'il y a n * m sous-patches dans le patch d'entrée\n",
|
||||||
|
"# Par exemple, le patch d'entrée est 16 * 16, pour les patchs 4 * 4 de la première couche, n = 4, m = 4\n",
|
||||||
|
"# pour les patchs 8 * 8 de la deuxième couche, n = 2, m = 2\n",
|
||||||
|
"def kernel_add_ini(n,m):\n",
|
||||||
|
" input_canal=int(n*m)\n",
|
||||||
|
" output_canal=int(n/2)*int(m/2)\n",
|
||||||
|
" for i in range(int(n/2)):\n",
|
||||||
|
" for j in range(int(m/2)):\n",
|
||||||
|
" kernel_add=np.zeros([1,input_canal],dtype='float32')\n",
|
||||||
|
" kernel_add[0,i*2*m+j*2]=1\n",
|
||||||
|
" kernel_add[0,i*2*m+j*2+1]=1\n",
|
||||||
|
" kernel_add[0,(i*2+1)*m+j*2]=1\n",
|
||||||
|
" kernel_add[0,(i*2+1)*m+j*2+1]=1\n",
|
||||||
|
" if i==0 and j==0:\n",
|
||||||
|
" add=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n",
|
||||||
|
" else:\n",
|
||||||
|
" add_=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n",
|
||||||
|
" add=torch.cat((add,add_),0)\n",
|
||||||
|
" return torch.nn.Parameter(add,requires_grad=False) \n",
|
||||||
|
"\n",
|
||||||
|
"# Calculer le poids initial de la couche convolutive shift\n",
|
||||||
|
"# shift+add Peut réaliser l'étape de l'agrégation\n",
|
||||||
|
"# Voir ci-dessus pour les paramètres n et m. \n",
|
||||||
|
"# Pour des étapes plus détaillées, veuillez consulter mon rapport de stage\n",
|
||||||
|
"def kernel_shift_ini(n,m):\n",
|
||||||
|
" input_canal=int(n*m)\n",
|
||||||
|
" output_canal=int(n*m)\n",
|
||||||
|
" \n",
|
||||||
|
" kernel_shift=torch.zeros([output_canal,input_canal,3,3])\n",
|
||||||
|
" \n",
|
||||||
|
" array_0=np.array([[1,0,0],[0,0,0],[0,0,0]],dtype='float32')\n",
|
||||||
|
" array_1=np.array([[0,0,1],[0,0,0],[0,0,0]],dtype='float32')\n",
|
||||||
|
" array_2=np.array([[0,0,0],[0,0,0],[1,0,0]],dtype='float32')\n",
|
||||||
|
" array_3=np.array([[0,0,0],[0,0,0],[0,0,1]],dtype='float32')\n",
|
||||||
|
" \n",
|
||||||
|
" kernel_shift_0=torch.from_numpy(array_0)\n",
|
||||||
|
" kernel_shift_1=torch.from_numpy(array_1)\n",
|
||||||
|
" kernel_shift_2=torch.from_numpy(array_2)\n",
|
||||||
|
" kernel_shift_3=torch.from_numpy(array_3)\n",
|
||||||
|
" \n",
|
||||||
|
" \n",
|
||||||
|
" for i in range(n):\n",
|
||||||
|
" for j in range(m):\n",
|
||||||
|
" if i==0 and j==0:\n",
|
||||||
|
" kernel_shift[0,0,:]=kernel_shift_0\n",
|
||||||
|
" else:\n",
|
||||||
|
" if i%2==0 and j%2==0:\n",
|
||||||
|
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_0\n",
|
||||||
|
" if i%2==0 and j%2==1:\n",
|
||||||
|
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_1\n",
|
||||||
|
" if i%2==1 and j%2==0:\n",
|
||||||
|
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_2\n",
|
||||||
|
" if i%2==1 and j%2==1:\n",
|
||||||
|
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_3\n",
|
||||||
|
" \n",
|
||||||
|
" return torch.nn.Parameter(kernel_shift,requires_grad=False) \n",
|
||||||
|
"\n",
|
||||||
|
"# Trouvez le petit patch(4 * 4) dans la n ème ligne et la m ème colonne du patch d'entrée\n",
|
||||||
|
"# Ceci est utilisé pour calculer la convolution et obtenir la carte de corrélation\n",
|
||||||
|
"def get_patch(fragment,psize,n,m):\n",
|
||||||
|
" return fragment[:,:,n*psize:(n+1)*psize,m*psize:(m+1)*psize]\n",
|
||||||
|
"\n",
|
||||||
|
"###################################################################################################################\n",
|
||||||
|
"class Net(nn.Module):\n",
|
||||||
|
" def __init__(self,frag_size,psize):\n",
|
||||||
|
" super(Net, self).__init__()\n",
|
||||||
|
" \n",
|
||||||
|
" h_fr=frag_size\n",
|
||||||
|
" w_fr=frag_size\n",
|
||||||
|
" \n",
|
||||||
|
" n=int(h_fr/psize) # n*m patches dans le patch d'entrée\n",
|
||||||
|
" m=int(w_fr/psize)\n",
|
||||||
|
" \n",
|
||||||
|
" self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)\n",
|
||||||
|
" # Si vous souhaitez initialiser Conv1 avec les poids de DeepMatch, exécutez la ligne suivante\n",
|
||||||
|
" # self.conv1.weight=ini()\n",
|
||||||
|
" self.Relu = nn.ReLU(inplace=True)\n",
|
||||||
|
" self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1, return_indices=True)\n",
|
||||||
|
" \n",
|
||||||
|
" self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
|
||||||
|
" self.shift1.weight=kernel_shift_ini(n,m)\n",
|
||||||
|
" self.add1 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
|
||||||
|
" self.add1.weight=kernel_add_ini(n,m)\n",
|
||||||
|
" \n",
|
||||||
|
" n=int(n/2)\n",
|
||||||
|
" m=int(m/2)\n",
|
||||||
|
" if n>=2 and m>=2:# Si n=m=1,Notre réseau n'a plus besoin de plus de couches pour agréger les cartes de corrélation\n",
|
||||||
|
" self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
|
||||||
|
" self.shift2.weight=kernel_shift_ini(n,m)\n",
|
||||||
|
" self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
|
||||||
|
" self.add2.weight=kernel_add_ini(n,m)\n",
|
||||||
|
" \n",
|
||||||
|
" n=int(n/2)\n",
|
||||||
|
" m=int(m/2)\n",
|
||||||
|
" if n>=2 and m>=2:\n",
|
||||||
|
" self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
|
||||||
|
" self.shift3.weight=kernel_shift_ini(n,m)\n",
|
||||||
|
" self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
|
||||||
|
" self.add3.weight=kernel_add_ini(n,m)\n",
|
||||||
|
" \n",
|
||||||
|
" def get_descripteur(self,img,using_cuda):\n",
|
||||||
|
" # Utilisez Conv1 pour calculer le descripteur,\n",
|
||||||
|
" descripteur_img=self.Relu(self.conv1(img))\n",
|
||||||
|
" b,c,h,w=descripteur_img.shape\n",
|
||||||
|
" couche_constante = 0.5 * torch.ones([b, 1, h, w])\n",
|
||||||
|
" if using_cuda:\n",
|
||||||
|
" couche_constante=couche_constante.cuda()\n",
|
||||||
|
" # Ajouter une couche constante pour éviter la division par 0 lors de la normalisation\n",
|
||||||
|
" descripteur_img = torch.cat((descripteur_img,couche_constante),1)\n",
|
||||||
|
" # la normalisation\n",
|
||||||
|
" descripteur_img_norm = F.normalize(descripteur_img) #/torch.norm(descripteur_img,dim=1, keepdim = True)\n",
|
||||||
|
" return descripteur_img_norm\n",
|
||||||
|
" \n",
|
||||||
|
" def forward(self,img,frag,using_cuda):\n",
|
||||||
|
" psize=4\n",
|
||||||
|
" # Utilisez Conv1 pour calculer le descripteur,\n",
|
||||||
|
" descripteur_input2=self.get_descripteur(frag,using_cuda)\n",
|
||||||
|
" descripteur_input1=self.get_descripteur(img,using_cuda)\n",
|
||||||
|
" \n",
|
||||||
|
" b,c,h,w=frag.shape\n",
|
||||||
|
" n=int(h/psize)\n",
|
||||||
|
" m=int(w/psize)\n",
|
||||||
|
" \n",
|
||||||
|
" db,dc,dh,dw = descripteur_input1.shape\n",
|
||||||
|
" \n",
|
||||||
|
" #######################################\n",
|
||||||
|
" # Calculer la carte de corrélation par convolution pour les n*m patchs plus petit.\n",
|
||||||
|
" for i in range(n):\n",
|
||||||
|
" for j in range(m):\n",
|
||||||
|
" if i==0 and j==0:\n",
|
||||||
|
" map_corre=F.conv2d(descripteur_input1.view(1,db*dc,dh,dw),get_patch(descripteur_input2,psize,i,j),padding=2,groups=db)\n",
|
||||||
|
"\n",
|
||||||
|
" map_corre=map_corre.view(db,1,map_corre.size(2),map_corre.size(3))\n",
|
||||||
|
" else:\n",
|
||||||
|
" a=F.conv2d(descripteur_input1.view(1,db*dc,dh,dw),get_patch(descripteur_input2,psize,i,j),padding=2, groups=db)\n",
|
||||||
|
" a=a.view(db,1,a.size(2),a.size(3))\n",
|
||||||
|
" map_corre=torch.cat((map_corre,a),1)\n",
|
||||||
|
" \n",
|
||||||
|
" ########################################\n",
|
||||||
|
" # Étape de polymérisation\n",
|
||||||
|
" map_corre,idx1=self.maxpooling(map_corre)\n",
|
||||||
|
" map_corre=self.shift1(map_corre)\n",
|
||||||
|
" map_corre=self.add1(map_corre)\n",
|
||||||
|
" \n",
|
||||||
|
" #########################################\n",
|
||||||
|
" # Répétez l'étape d'agrégation jusqu'à obtenir le graphique de corrélation du patch d'entrée\n",
|
||||||
|
" n=int(n/2)\n",
|
||||||
|
" m=int(m/2)\n",
|
||||||
|
" if n>=2 and m>=2:\n",
|
||||||
|
" map_corre,idx2=self.maxpooling(map_corre)\n",
|
||||||
|
" map_corre=self.shift2(map_corre)\n",
|
||||||
|
" map_corre=self.add2(map_corre)\n",
|
||||||
|
" \n",
|
||||||
|
" \n",
|
||||||
|
" n=int(n/2)\n",
|
||||||
|
" m=int(m/2)\n",
|
||||||
|
" if n>=2 and m>=2:\n",
|
||||||
|
" map_corre,idx3=self.maxpooling(map_corre)\n",
|
||||||
|
" map_corre=self.shift3(map_corre)\n",
|
||||||
|
" map_corre=self.add3(map_corre)\n",
|
||||||
|
" else:\n",
|
||||||
|
" idx3=idx2\n",
|
||||||
|
"\n",
|
||||||
|
" #b,c,h,w=map_corre.shape\n",
|
||||||
|
" # Normalisation de la division par maximum\n",
|
||||||
|
" map_corre=map_corre/map_corre.max()\n",
|
||||||
|
" # Normalisation SoftMax\n",
|
||||||
|
" #map_corre=(F.softmax(map_corre.reshape(b,1,h*w,1),dim=2)).reshape(b,c,h,w)\n",
|
||||||
|
" return map_corre, idx1, idx2, idx3"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 20,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Les fonctions de ce bloc sont utilisées pour appliquer le réseau à des fragments (pas à des patchs carrés)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# Cette fonction permet de sélectionner un ensemble de patchs carrés à partir d'un fragment\n",
|
||||||
|
"# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16)\n",
|
||||||
|
"# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch\n",
|
||||||
|
"# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment\n",
|
||||||
|
"def get_patch_list(frag,frag_size,limite,seuillage):\n",
|
||||||
|
" n=0\n",
|
||||||
|
" m=0\n",
|
||||||
|
" h,w,c=frag.shape\n",
|
||||||
|
" patch_list=[]\n",
|
||||||
|
" position_list=[]\n",
|
||||||
|
" for i in range(4):\n",
|
||||||
|
" if len(patch_list)>limite and limite!=0:\n",
|
||||||
|
" break\n",
|
||||||
|
" for j in range(4):\n",
|
||||||
|
" if len(patch_list)>limite and limite!=0:\n",
|
||||||
|
" break\n",
|
||||||
|
" n_offset=i*4 # n offset\n",
|
||||||
|
" m_offset=j*4 # m offset\n",
|
||||||
|
" n=0\n",
|
||||||
|
" while n+frag_size+n_offset<h:\n",
|
||||||
|
" m=0\n",
|
||||||
|
" while m+frag_size+m_offset<w:\n",
|
||||||
|
" patch=frag[n+n_offset:n+frag_size+n_offset,m+m_offset:m+frag_size+m_offset,:]\n",
|
||||||
|
" if test_fragment32_32(patch,seuillage):\n",
|
||||||
|
" patch_list.append(patch)\n",
|
||||||
|
" position_list.append([int((n+frag_size/2)+n_offset),int((m+frag_size/2)+m_offset)])\n",
|
||||||
|
" m=m+frag_size\n",
|
||||||
|
" n=n+frag_size\n",
|
||||||
|
" return patch_list,position_list\n",
|
||||||
|
"\n",
|
||||||
|
"# Entrez du fragment et de la fresque, exécutez le réseau\n",
|
||||||
|
"def run_net_v3(net,img,frag,frag_size,limite,seuillage,using_cuda,rotation):\n",
|
||||||
|
" #Img=Image.fromarray(frag)\n",
|
||||||
|
" transf = transforms.ToTensor()\n",
|
||||||
|
" frag=np.array(frag.rotate(rotation))\n",
|
||||||
|
" img_tensor=transf(img).unsqueeze(0) \n",
|
||||||
|
" \n",
|
||||||
|
" # la collection de patchs carrée dans le fragement \"sont frag_list[]\"\n",
|
||||||
|
" # La position de leur centre dans la fragment sont \"position_frag[]\"\n",
|
||||||
|
" frag_list,position_frag=get_patch_list(frag,frag_size,limite,seuillage)\n",
|
||||||
|
" if using_cuda:\n",
|
||||||
|
" img_tensor=img_tensor.cuda()\n",
|
||||||
|
" \n",
|
||||||
|
" score_list=[]\n",
|
||||||
|
" coordonnee_list=[]\n",
|
||||||
|
" \n",
|
||||||
|
" # Pour chaque patch carré dans la collection, effectuez un calcul en réseau de leur position\n",
|
||||||
|
" # Le résultat est placé en \"coordonnee_list[]\"\n",
|
||||||
|
" # \"score_list[]\" pas utile dans notre programme\n",
|
||||||
|
" for i in range(len(frag_list)):\n",
|
||||||
|
" frag_tensor=transf(frag_list[i]).unsqueeze(0) \n",
|
||||||
|
" if using_cuda:\n",
|
||||||
|
" frag_tensor=frag_tensor.cuda()\n",
|
||||||
|
" res,idx1,idx2,idx3=net.forward(img_tensor,frag_tensor,using_cuda)\n",
|
||||||
|
" if using_cuda:\n",
|
||||||
|
" res=res.cpu()\n",
|
||||||
|
" idx1=idx1.cpu()\n",
|
||||||
|
" idx2=idx2.cpu()\n",
|
||||||
|
" idx3=idx3.cpu()\n",
|
||||||
|
" score,po_h,po_w=show_coordonnee(res)\n",
|
||||||
|
" #Chnagement au contexte numpy\n",
|
||||||
|
" idx3=idx3.squeeze().detach().numpy()\n",
|
||||||
|
" idx2=idx2.squeeze().detach().numpy()\n",
|
||||||
|
" idx1=idx1.squeeze().detach().numpy()\n",
|
||||||
|
" #Backtracking: si la taille des fragments est 32 on a bessoin d'une étape de plus\n",
|
||||||
|
" #On prend la moyenne des 4 cartes de corrélation avant l'étape de pooling pour trouver le meilleur indice\n",
|
||||||
|
" if (frag_size==32): \n",
|
||||||
|
" c,h,w=idx2.shape\n",
|
||||||
|
" po_h2=np.mean(idx3[:,round(po_h),round(po_w)]//h)\n",
|
||||||
|
" po_w2=np.mean(idx3[:,round(po_h),round(po_w)]%w)\n",
|
||||||
|
" else: \n",
|
||||||
|
" po_h2=po_h\n",
|
||||||
|
" po_w2=po_w\n",
|
||||||
|
" c,h,w=idx1.shape \n",
|
||||||
|
" po_h1=np.mean(idx2[:,round(po_h2),round(po_w2)]//h)\n",
|
||||||
|
" po_w1=np.mean(idx2[:,round(po_h2),round(po_w2)]%w)\n",
|
||||||
|
" _,_,h,w=img_tensor.shape\n",
|
||||||
|
" po_h=np.mean(idx1[:,round(po_h1),round(po_w1)]//(h+1))\n",
|
||||||
|
" po_w=np.mean(idx1[:,round(po_h1),round(po_w1)]%(w+1))\n",
|
||||||
|
" coordonnee_list.append([po_h/h,po_w/w])\n",
|
||||||
|
" score_list.append(score)\n",
|
||||||
|
" b,c,h_img,w_img=img_tensor.shape\n",
|
||||||
|
" position=[]\n",
|
||||||
|
" \n",
|
||||||
|
" # Mettez les paires correspondante en \"position[]\"\n",
|
||||||
|
" # [x,y,x',y']\n",
|
||||||
|
" # La position (x,y) dans le fragment correspond à la position (x',y') dans la fresque\n",
|
||||||
|
" for i in range(len(coordonnee_list)):\n",
|
||||||
|
" x0=position_frag[i][0]\n",
|
||||||
|
" y0=position_frag[i][1]\n",
|
||||||
|
" x1=int(round(h_img*coordonnee_list[i][0]))\n",
|
||||||
|
" y1=int(round(w_img*coordonnee_list[i][1]))\n",
|
||||||
|
" position.append([x0,y0,x1,y1])\n",
|
||||||
|
" return score_list,position"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 16,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Cette partie du code consiste à implémenter l'algorithme RANSAC amélioré\n",
|
||||||
|
"\n",
|
||||||
|
"# Ecrire le point sous forme [x,y,1]T,\n",
|
||||||
|
"# Utilisé pour construire l'équation de la matrice de transformation\n",
|
||||||
|
"def creer_point(x,y):\n",
|
||||||
|
" p=np.zeros((3,1))\n",
|
||||||
|
" p[0][0]=x\n",
|
||||||
|
" p[1][0]=y\n",
|
||||||
|
" p[2][0]=1\n",
|
||||||
|
" return p\n",
|
||||||
|
"\n",
|
||||||
|
"# Sélectionnez aléatoirement n points sans duplication à partir de M points\n",
|
||||||
|
"def selectionner_points(n,M):\n",
|
||||||
|
" table=[]\n",
|
||||||
|
" for i in range(M):\n",
|
||||||
|
" table.append(i)\n",
|
||||||
|
" result=[]\n",
|
||||||
|
" for i in range(n):\n",
|
||||||
|
" index=random.randint(0,M-i-1)\n",
|
||||||
|
" result.append(table[index])\n",
|
||||||
|
" table[index]=table[M-1-i]\n",
|
||||||
|
" return result\n",
|
||||||
|
"\n",
|
||||||
|
"# Selon la matrice de transformation affine, calculer la position centrale transformée et l'angle de rotation\n",
|
||||||
|
"def position_rotation(h,centre_frag):\n",
|
||||||
|
" centre=h@centre_frag\n",
|
||||||
|
" cos_rot=(h[0][0]+h[1][1])/2\n",
|
||||||
|
" sin_rot=(h[1][0]-h[0][1])/2\n",
|
||||||
|
" tan_rot=sin_rot/(cos_rot+0.0000001)\n",
|
||||||
|
" if cos_rot>0:\n",
|
||||||
|
" rot_frag=math.atan(tan_rot)*(180/math.pi)\n",
|
||||||
|
" else:\n",
|
||||||
|
" rot_frag=math.atan(tan_rot)*(180/math.pi)+180\n",
|
||||||
|
" rot_frag=-rot_frag\n",
|
||||||
|
" if rot_frag>0:\n",
|
||||||
|
" rot_frag-=360\n",
|
||||||
|
" return centre[0][0],centre[1][0],rot_frag\n",
|
||||||
|
"\n",
|
||||||
|
"# Vérifiez les résultats de Ransac en avec des changements de distance euclidienne\n",
|
||||||
|
"def test_frag(inline,frag,fres):\n",
|
||||||
|
" itera=10\n",
|
||||||
|
" frag_inline=[]\n",
|
||||||
|
" fres_inline=[]\n",
|
||||||
|
" # Metter les coordonnées du point inline dans \"frag_inline[]\",et \"fres_inline[]\"\n",
|
||||||
|
" for i in range(np.size(inline,0)):\n",
|
||||||
|
" if inline[i]==1:\n",
|
||||||
|
" frag_inline.append([frag[i][0],frag[i][1]])\n",
|
||||||
|
" fres_inline.append([fres[i][0],fres[i][1]])\n",
|
||||||
|
" p=[]\n",
|
||||||
|
" \n",
|
||||||
|
" # Faites une boucle dix fois, \n",
|
||||||
|
" # sélectionnez à chaque fois deux paires correspondantes inline \n",
|
||||||
|
" # calculer le changement de leur distance euclidienne\n",
|
||||||
|
" for i in range(itera):\n",
|
||||||
|
" point_test=selectionner_points(2,np.size(frag_inline,0))\n",
|
||||||
|
" diff_x_frag=frag_inline[point_test[1]][0]-frag_inline[point_test[0]][0]\n",
|
||||||
|
" diff_y_frag=frag_inline[point_test[1]][1]-frag_inline[point_test[0]][1]\n",
|
||||||
|
" diff_frag=math.sqrt(math.pow(diff_x_frag,2)+math.pow(diff_y_frag,2))\n",
|
||||||
|
" \n",
|
||||||
|
" diff_x_fres=fres_inline[point_test[1]][0]-fres_inline[point_test[0]][0]\n",
|
||||||
|
" diff_y_fres=fres_inline[point_test[1]][1]-fres_inline[point_test[0]][1]\n",
|
||||||
|
" diff_fres=math.sqrt(math.pow(diff_x_fres,2)+math.pow(diff_y_fres,2))\n",
|
||||||
|
" if diff_frag !=0:\n",
|
||||||
|
" fsf=diff_fres/diff_frag\n",
|
||||||
|
" p.append([fsf])\n",
|
||||||
|
" result=np.mean(p)\n",
|
||||||
|
" return result\n",
|
||||||
|
"\n",
|
||||||
|
"def frag_match(frag,img,position):\n",
|
||||||
|
" \n",
|
||||||
|
" frag_size=frag.size\n",
|
||||||
|
" centre_frag=creer_point(frag_size[1]/2,frag_size[0]/2)\n",
|
||||||
|
" \n",
|
||||||
|
" retained_matches = []\n",
|
||||||
|
" frag=[]\n",
|
||||||
|
" fres=[]\n",
|
||||||
|
" \n",
|
||||||
|
" for i in range(len(position)):\n",
|
||||||
|
" frag.append([float(position[i][0]),float(position[i][1])])\n",
|
||||||
|
" fres.append([float(position[i][2]),float(position[i][3])])\n",
|
||||||
|
" \n",
|
||||||
|
" if np.size(frag)>0:\n",
|
||||||
|
" # Calculer la matrice de transformation affine à l'aide de la méthode Ransac\n",
|
||||||
|
" h,inline=cv2.estimateAffinePartial2D(np.array(frag),np.array(fres))\n",
|
||||||
|
" # Si “h” n'est pas sous la forme de matrice 2 * 3, la matrice de transformation affine n'est pas trouvée\n",
|
||||||
|
" if np.size(h)!=6:\n",
|
||||||
|
" return ([-1])\n",
|
||||||
|
" else:\n",
|
||||||
|
" x,y,rot=position_rotation(h,centre_frag)\n",
|
||||||
|
" pourcenttage=sum(inline)/np.size(frag,0)\n",
|
||||||
|
" # Le nombre de points inline doit être supérieur à un certain nombre\n",
|
||||||
|
" if sum(inline)>3:\n",
|
||||||
|
" p=test_frag(inline,frag,fres)\n",
|
||||||
|
" # La distance euclidienne entre les points correspondants ne doit pas trop changer, \n",
|
||||||
|
" # sinon cela prouve que le résultat de Ransac est incorrect\n",
|
||||||
|
" # ici,le changement de la distance euclidienne sont entre 0.7 et 1.3\n",
|
||||||
|
" if abs(p-1)<0.3:\n",
|
||||||
|
" # Ce n'est qu'alors que Ransac renvoie le résultat correct\n",
|
||||||
|
" return([round(x),round(y),round(rot,3)])\n",
|
||||||
|
" else:\n",
|
||||||
|
" return ([-2])\n",
|
||||||
|
" else:\n",
|
||||||
|
" return ([-3])\n",
|
||||||
|
" else:\n",
|
||||||
|
" return ([-4]) "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 17,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Fresque 1, fragment 10/200 (5.0%)\n",
|
||||||
|
"Temps par fragment: 5.46. ETA = 1.04e+03s\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ename": "KeyboardInterrupt",
|
||||||
|
"evalue": "",
|
||||||
|
"output_type": "error",
|
||||||
|
"traceback": [
|
||||||
|
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||||
|
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||||
|
"\u001b[0;32m<ipython-input-17-8e428bf4f6f7>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0;31m#rotation=0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;31m#rotation_base=0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 53\u001b[0;31m \u001b[0mscore_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mpositions_patchs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrun_net_v3\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnet\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfrag\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfrag_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m60\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0.7\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0musing_cuda\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrotation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 54\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0mfrag_position\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfrag_match\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfrag\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mpositions_patchs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||||
|
"\u001b[0;32m<ipython-input-15-d10e3e9ca7e4>\u001b[0m in \u001b[0;36mrun_net_v3\u001b[0;34m(net, img, frag, frag_size, limite, seuillage, using_cuda, rotation)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0midx1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0midx2\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0midx3\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnet\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_tensor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfrag_tensor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0musing_cuda\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0musing_cuda\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0mres\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mres\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0midx1\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0midx1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0midx2\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0midx2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||||
|
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"if __name__==\"__main__\":\n",
|
||||||
|
" \n",
|
||||||
|
" # Variable du réseau\n",
|
||||||
|
" frag_size=16\n",
|
||||||
|
" using_cuda=True\n",
|
||||||
|
" for fresque_id in [1]:\n",
|
||||||
|
" #fresque_id = 2\n",
|
||||||
|
"\n",
|
||||||
|
" # Variable des données\n",
|
||||||
|
" base_dir = './training_data_small/'\n",
|
||||||
|
" fresque_filename = base_dir+'fresque_small{}.ppm'\n",
|
||||||
|
" fresque_filename_wild = base_dir+'fresque_small*.ppm'\n",
|
||||||
|
" fragment_filename = base_dir+'fragments/fresque{}/frag_bench_{:05}.ppm'\n",
|
||||||
|
" fragments_filename_wild = base_dir+'fragments/fresque{}/frag_bench_*.ppm'\n",
|
||||||
|
" vt_filename = base_dir+'fragments/fresque{}/vt/frag_bench_{:05}_vt.txt'\n",
|
||||||
|
" net_filename = \"./trained_net/net_trainned_with-cross-entropy_02-10_07-30_0115\"\n",
|
||||||
|
" \n",
|
||||||
|
" expe_id = int(net_filename.split(\"_\")[-1]) # ID de l'expérience, à ajouter à tout les fichiers écrits pour identifier les résultats d'une même expérience.\n",
|
||||||
|
" date = datetime.now().strftime(\"%m-%d_%H-%M\")\n",
|
||||||
|
" results_filename = './results_bench/results_bench_f{}_{}_{:04}'.format(fresque_id,date,expe_id)\n",
|
||||||
|
"\n",
|
||||||
|
" # Chargement du réseau\n",
|
||||||
|
" net=load_net(net_filename)\n",
|
||||||
|
" net.maxpooling=nn.MaxPool2d(3,stride=2, padding=1, return_indices=True)\n",
|
||||||
|
"\n",
|
||||||
|
" # Charge la fresque en mémoire\n",
|
||||||
|
" img=Image.open(fresque_filename.format(fresque_id))\n",
|
||||||
|
" \n",
|
||||||
|
" #N_fragments = 20\n",
|
||||||
|
" N_fragments = len(glob(fragments_filename_wild.format(fresque_id)))\n",
|
||||||
|
" print(fragments_filename_wild.format(fresque_id))\n",
|
||||||
|
" print(N_fragments)\n",
|
||||||
|
"\n",
|
||||||
|
" # Crée les tableau de résultats\n",
|
||||||
|
" distances, matched, positions, verite_terrain = [],[],[],[]\n",
|
||||||
|
" tailles = []\n",
|
||||||
|
"\n",
|
||||||
|
" time_old = time()\n",
|
||||||
|
" # Parcour tout les fragments de bench de cette fresque\n",
|
||||||
|
" for fragment_id in range(N_fragments):\n",
|
||||||
|
" clear_output(wait=True)\n",
|
||||||
|
" print(\"Fresque {}, fragment {}/{} ({:.3}%)\".format(fresque_id,fragment_id,N_fragments,(fragment_id/N_fragments*100)))\n",
|
||||||
|
" delta = time()-time_old\n",
|
||||||
|
" print(\"Temps par fragment: {:.3}. ETA = {:.3}s\".format(delta,(N_fragments-fragment_id)*delta))\n",
|
||||||
|
" time_old = time()\n",
|
||||||
|
" frag = Image.open(fragment_filename.format(fresque_id,fragment_id))\n",
|
||||||
|
"\n",
|
||||||
|
" # Faites pivoter les pièces de 20 degrés à chaque fois pour correspondre, répétez 18 fois\n",
|
||||||
|
" for i in [0,17]:\n",
|
||||||
|
" rotation=20*i\n",
|
||||||
|
" #rotation=0\n",
|
||||||
|
" #rotation_base=0\n",
|
||||||
|
" score_list,positions_patchs=run_net_v3(net,img,frag,frag_size,60,0.7,using_cuda,rotation)\n",
|
||||||
|
" \n",
|
||||||
|
" frag_position=frag_match(frag,img,positions_patchs)\n",
|
||||||
|
" # Lorsque Ransac obtient le bon résultat, sortez de la boucle\n",
|
||||||
|
" if len(frag_position)==3:\n",
|
||||||
|
" rotation_base=i*20\n",
|
||||||
|
" break\n",
|
||||||
|
" # Si Ransac trouve une solution, la variable renvoyé est une liste de deux positions et une rotation\n",
|
||||||
|
" if len(frag_position)==3:\n",
|
||||||
|
" \n",
|
||||||
|
" # MATCHED\n",
|
||||||
|
" matched.append(1)\n",
|
||||||
|
"\n",
|
||||||
|
" # POSITION\n",
|
||||||
|
" frag_position[2]=rotation_base-360-frag_position[2]\n",
|
||||||
|
" if frag_position[2]>0:\n",
|
||||||
|
" frag_position[2]=frag_position[2]-360\n",
|
||||||
|
" positions.append([frag_position[0],frag_position[1],round(frag_position[2],3)])\n",
|
||||||
|
"\n",
|
||||||
|
" # VERITE TERRAIN\n",
|
||||||
|
" with open(vt_filename.format(fresque_id,fragment_id), 'r') as f:\n",
|
||||||
|
" data_vt = f.read().splitlines()\n",
|
||||||
|
" verite_terrain.append([int(data_vt[2]),int(data_vt[3]),frag.size[0],frag.size[1]])\n",
|
||||||
|
"\n",
|
||||||
|
" # DISTANCE\n",
|
||||||
|
" distances.append(np.linalg.norm([float(data_vt[3])-float(frag_position[0]),float(data_vt[2])-float(frag_position[1])]))\n",
|
||||||
|
" else:\n",
|
||||||
|
" matched.append(0)\n",
|
||||||
|
" distances.append(-1)\n",
|
||||||
|
" positions.append([])\n",
|
||||||
|
" verite_terrain.append([])\n",
|
||||||
|
"\n",
|
||||||
|
" del frag\n",
|
||||||
|
"\n",
|
||||||
|
" meta = {'date':date,'base_dir':base_dir,'fresque_id':fresque_id,'fresque_taille':img.size,'N_fragments': N_fragments,'expe_id': expe_id}\n",
|
||||||
|
" res = {'meta':meta, 'matched':matched,'distances':distances,'positions':positions,'vt':verite_terrain}\n",
|
||||||
|
"\n",
|
||||||
|
" with open(results_filename,'w') as f:\n",
|
||||||
|
" f.write(json.dumps(res))\n",
|
||||||
|
"\n",
|
||||||
|
" print(\"Sauvegardé dans {}\".format(results_filename))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 18,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Sauvegardé dans ./results_bench/results_bench_f1_03-12_12-41_0115\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"meta = {'date':date,'base_dir':base_dir,'fresque_id':fresque_id,'fresque_taille':img.size,'N_fragments': N_fragments,'expe_id': expe_id}\n",
|
||||||
|
"res = {'meta':meta, 'matched':matched,'distances':distances,'positions':positions,'vt':verite_terrain}\n",
|
||||||
|
"\n",
|
||||||
|
"with open(results_filename,'w') as f:\n",
|
||||||
|
" f.write(json.dumps(res))\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Sauvegardé dans {}\".format(results_filename))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 16,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"20"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 16,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Clear GPU memory \n",
|
||||||
|
"import gc\n",
|
||||||
|
"torch.cuda.empty_cache()\n",
|
||||||
|
"gc.collect()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
Loading…
Reference in a new issue