diff --git a/Apprentissage_MSELoss_avec_GPU.ipynb b/Apprentissage_MSELoss_avec_GPU.ipynb new file mode 100755 index 0000000..92e3073 --- /dev/null +++ b/Apprentissage_MSELoss_avec_GPU.ipynb @@ -0,0 +1,505 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "#Tous les codes sont basés sur l'environnement suivant\n", + "#python 3.7\n", + "#opencv 3.1.0\n", + "#pytorch 1.4.0\n", + "\n", + "import torch\n", + "from torch.autograd import Variable\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import cv2\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import random\n", + "import math\n", + "import pickle\n", + "import random\n", + "from PIL import Image\n", + "import sys" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "#Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils\n", + "\n", + "\n", + "def tensor_imshow(im_tensor,cannel):\n", + " b,c,h,w=im_tensor.shape\n", + " if c==1:\n", + " plt.imshow(im_tensor.squeeze().detach().numpy())\n", + " else:\n", + " plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])\n", + "\n", + "# Obtenez des données d'entraînement\n", + "# frag,vt=get_training_fragment(frag_size,image)\n", + "# frag est un patch carrée de taille (frag_size*frag_size) a partir du image(Son emplacement est aléatoire)\n", + "# vt est la vérité terrain de la forme Dirac.\n", + "def get_training_fragment(frag_size,im):\n", + " h,w,c=im.shape\n", + " n=random.randint(0,int(h/frag_size)-1)\n", + " m=random.randint(0,int(w/frag_size)-1) \n", + " shape=frag_size/4\n", + " vt_h=math.ceil((h+1)/shape)\n", + " vt_w=math.ceil((w+1)/shape)\n", + " vt=np.zeros([vt_h,vt_w])\n", + " vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)\n", + " vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)\n", + " vt[vt_h_po,vt_w_po]=1\n", + " vt = np.float32(vt)\n", + " vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))\n", + " \n", + " return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt\n", + "\n", + "# Cette fonction convertit l'image en variable de type Tensor.\n", + "# Toutes les données de calcul du réseau sont de type Tensor\n", + "# Img.shape=[Height,Width,Channel]\n", + "# Tensor.shape=[Batch,Channel,Height,Width]\n", + "def img2tensor(im):\n", + " im=np.array(im,dtype=\"float32\")\n", + " tensor_cv = torch.from_numpy(np.transpose(im, (2, 0, 1)))\n", + " im_tensor=tensor_cv.unsqueeze(0)\n", + " return im_tensor\n", + "\n", + "# Trouvez les coordonnées de la valeur maximale dans une carte de corrélation\n", + "# x,y=show_coordonnee(carte de corrélation)\n", + "def show_coordonnee(position_pred):\n", + " map_corre=position_pred.squeeze().detach().numpy()\n", + " h,w=map_corre.shape\n", + " max_value=map_corre.max()\n", + " coordonnee=np.where(map_corre==max_value)\n", + " return coordonnee[0].mean()/h,coordonnee[1].mean()/w\n", + "\n", + "# Filtrer les patchs en fonction du nombre de pixels noirs dans le patch\n", + "# Si seuls les pixels non noirs sont plus grands qu'une certaine proportion(seuillage), revenez à True, sinon False\n", + "def test_fragment32_32(frag,seuillage):\n", + " a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]\n", + " mask = (a == 0)\n", + " arr_new = a[mask]\n", + " if arr_new.size/a.size<=(1-seuillage):\n", + " return True\n", + " else:\n", + " return False\n", + " \n", + "# Ces deux fonctions permettent de sauvegarder le réseau dans un fichier\n", + "# ou de load le réseau stocké à partir d'un fichier\n", + "def save_net(file_path,net):\n", + " pkl_file = open(file_path, 'wb')\n", + " pickle.dump(net,pkl_file)\n", + " pkl_file.close()\n", + "def load_net(file_path): \n", + " pkl_file = open(file_path, 'rb')\n", + " net= pickle.load(pkl_file)\n", + " pkl_file.close()\n", + " return net" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Les fonctions de ce bloc sont utilisées pour construire le réseau\n", + "\n", + "# Créer un poids de type DeepMatch comme valeur initiale de Conv1 (non obligatoire)\n", + "def ini():\n", + " kernel=torch.zeros([8,3,3,3])\n", + " array_0=np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype='float32')\n", + " array_1=np.array([[2,1,0],[1,0,-1],[0,-1,-2]],dtype='float32')\n", + " array_2=np.array([[1,0,-1],[2,0,-2],[1,0,-1]],dtype='float32')\n", + " array_3=np.array([[0,-1,-2],[1,0,-1],[2,1,0]],dtype='float32')\n", + " array_4=np.array([[-1,-2,-1],[0,0,0],[1,2,1]],dtype='float32')\n", + " array_5=np.array([[-2,-1,0],[-1,0,1],[0,1,2]],dtype='float32')\n", + " array_6=np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype='float32')\n", + " array_7=np.array([[0,1,2],[-1,0,1],[-2,-1,0]],dtype='float32')\n", + " for i in range(3):\n", + " kernel[0,i,:]=torch.from_numpy(array_0)\n", + " kernel[1,i,:]=torch.from_numpy(array_1)\n", + " kernel[2,i,:]=torch.from_numpy(array_2)\n", + " kernel[3,i,:]=torch.from_numpy(array_3)\n", + " kernel[4,i,:]=torch.from_numpy(array_4)\n", + " kernel[5,i,:]=torch.from_numpy(array_5)\n", + " kernel[6,i,:]=torch.from_numpy(array_6)\n", + " kernel[7,i,:]=torch.from_numpy(array_7)\n", + " return torch.nn.Parameter(kernel,requires_grad=True) \n", + "\n", + "# Calculer le poids initial de la couche convolutive add\n", + "# n, m signifie qu'il y a n * m sous-patches dans le patch d'entrée\n", + "# Par exemple, le patch d'entrée est 16 * 16, pour les patchs 4 * 4 de la première couche, n = 4, m = 4\n", + "# pour les patchs 8 * 8 de la deuxième couche, n = 2, m = 2\n", + "def kernel_add_ini(n,m):\n", + " input_canal=int(n*m)\n", + " output_canal=int(n/2)*int(m/2)\n", + " for i in range(int(n/2)):\n", + " for j in range(int(m/2)):\n", + " kernel_add=np.zeros([1,input_canal],dtype='float32')\n", + " kernel_add[0,i*2*m+j*2]=1\n", + " kernel_add[0,i*2*m+j*2+1]=1\n", + " kernel_add[0,(i*2+1)*m+j*2]=1\n", + " kernel_add[0,(i*2+1)*m+j*2+1]=1\n", + " if i==0 and j==0:\n", + " add=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n", + " else:\n", + " add_=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n", + " add=torch.cat((add,add_),0)\n", + " return torch.nn.Parameter(add,requires_grad=False) \n", + "\n", + "# Calculer le poids initial de la couche convolutive shift\n", + "# shift+add Peut réaliser l'étape de l'agrégation\n", + "# Voir ci-dessus pour les paramètres n et m. \n", + "# Pour des étapes plus détaillées, veuillez consulter mon rapport de stage\n", + "def kernel_shift_ini(n,m):\n", + " input_canal=int(n*m)\n", + " output_canal=int(n*m)\n", + " \n", + " kernel_shift=torch.zeros([output_canal,input_canal,3,3])\n", + " \n", + " array_0=np.array([[1,0,0],[0,0,0],[0,0,0]],dtype='float32')\n", + " array_1=np.array([[0,0,1],[0,0,0],[0,0,0]],dtype='float32')\n", + " array_2=np.array([[0,0,0],[0,0,0],[1,0,0]],dtype='float32')\n", + " array_3=np.array([[0,0,0],[0,0,0],[0,0,1]],dtype='float32')\n", + " \n", + " kernel_shift_0=torch.from_numpy(array_0)\n", + " kernel_shift_1=torch.from_numpy(array_1)\n", + " kernel_shift_2=torch.from_numpy(array_2)\n", + " kernel_shift_3=torch.from_numpy(array_3)\n", + " \n", + " \n", + " for i in range(n):\n", + " for j in range(m):\n", + " if i==0 and j==0:\n", + " kernel_shift[0,0,:]=kernel_shift_0\n", + " else:\n", + " if i%2==0 and j%2==0:\n", + " kernel_shift[i*m+j,i*m+j,:]=kernel_shift_0\n", + " if i%2==0 and j%2==1:\n", + " kernel_shift[i*m+j,i*m+j,:]=kernel_shift_1\n", + " if i%2==1 and j%2==0:\n", + " kernel_shift[i*m+j,i*m+j,:]=kernel_shift_2\n", + " if i%2==1 and j%2==1:\n", + " kernel_shift[i*m+j,i*m+j,:]=kernel_shift_3\n", + " \n", + " return torch.nn.Parameter(kernel_shift,requires_grad=False) \n", + "\n", + "# Trouvez le petit patch(4 * 4) dans la n ème ligne et la m ème colonne du patch d'entrée\n", + "# Ceci est utilisé pour calculer la convolution et obtenir la carte de corrélation\n", + "def get_patch(fragment,psize,n,m):\n", + " return fragment[:,:,n*psize:(n+1)*psize,m*psize:(m+1)*psize]\n", + "###################################################################################################################\n", + "class Net(nn.Module):\n", + " def __init__(self,frag_size,psize):\n", + " super(Net, self).__init__()\n", + " \n", + " h_fr=frag_size\n", + " w_fr=frag_size\n", + " \n", + " n=int(h_fr/psize) # n*m patches dans le patch d'entrée\n", + " m=int(w_fr/psize)\n", + " \n", + " self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)\n", + " # Si vous souhaitez initialiser Conv1 avec les poids de DeepMatch, exécutez la ligne suivante\n", + " # self.conv1.weight=ini()\n", + " self.Relu = nn.ReLU(inplace=True)\n", + " self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1)\n", + " \n", + " self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", + " self.shift1.weight=kernel_shift_ini(n,m)\n", + " self.add1 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", + " self.add1.weight=kernel_add_ini(n,m)\n", + " \n", + " n=int(n/2)\n", + " m=int(m/2)\n", + " if n>=2 and m>=2:# Si n=m=1,Notre réseau n'a plus besoin de plus de couches pour agréger les cartes de corrélation\n", + " self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", + " self.shift2.weight=kernel_shift_ini(n,m)\n", + " self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", + " self.add2.weight=kernel_add_ini(n,m)\n", + " \n", + " n=int(n/2)\n", + " m=int(m/2)\n", + " if n>=2 and m>=2:\n", + " self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", + " self.shift3.weight=kernel_shift_ini(n,m)\n", + " self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", + " self.add3.weight=kernel_add_ini(n,m)\n", + " \n", + " def get_descripteur(self,img,using_cuda):\n", + " # Utilisez Conv1 pour calculer le descripteur,\n", + " descripteur_img=self.Relu(self.conv1(img))\n", + " b,c,h,w=descripteur_img.shape\n", + " couche_constante=0.5*torch.ones([1,1,h,w])\n", + " if using_cuda:\n", + " couche_constante=couche_constante.cuda()\n", + " # Ajouter une couche constante pour éviter la division par 0 lors de la normalisation\n", + " descripteur_img=torch.cat((descripteur_img,couche_constante),1)\n", + " # la normalisation\n", + " descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1)\n", + " return descripteur_img_norm\n", + " \n", + " def forward(self,img,frag,using_cuda):\n", + " psize=4\n", + " # Utilisez Conv1 pour calculer le descripteur,\n", + " descripteur_input1=self.get_descripteur(img,using_cuda)\n", + " descripteur_input2=self.get_descripteur(frag,using_cuda)\n", + " \n", + " b,c,h,w=frag.shape\n", + " n=int(h/psize)\n", + " m=int(w/psize)\n", + " \n", + " #######################################\n", + " # Calculer la carte de corrélation par convolution pour les n*m patchs plus petit.\n", + " for i in range(n):\n", + " for j in range(m):\n", + " if i==0 and j==0:\n", + " map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n", + " else:\n", + " a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n", + " map_corre=torch.cat((map_corre,a),1)\n", + " ########################################\n", + " # Étape de polymérisation\n", + " map_corre=self.maxpooling(map_corre)\n", + " map_corre=self.shift1(map_corre)\n", + " map_corre=self.add1(map_corre)\n", + " \n", + " #########################################\n", + " # Répétez l'étape d'agrégation jusqu'à obtenir le graphique de corrélation du patch d'entrée\n", + " n=int(n/2)\n", + " m=int(m/2)\n", + " if n>=2 and m>=2:\n", + " map_corre=self.maxpooling(map_corre)\n", + " map_corre=self.shift2(map_corre)\n", + " map_corre=self.add2(map_corre)\n", + " \n", + " \n", + " n=int(n/2)\n", + " m=int(m/2)\n", + " if n>=2 and m>=2:\n", + " map_corre=self.maxpooling(map_corre)\n", + " map_corre=self.shift3(map_corre)\n", + " map_corre=self.add3(map_corre)\n", + " \n", + " \n", + " b,c,h,w=map_corre.shape\n", + " # Normalisation de la division par maximum\n", + " map_corre=map_corre/(map_corre.max())\n", + " # Normalisation SoftMax\n", + " #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)\n", + " return map_corre" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def run_net(net,img,frag,frag_size,using_cuda):\n", + " h,w,c=frag.shape\n", + " n=int(h/frag_size)\n", + " m=int(w/frag_size)\n", + " frag_list=[]\n", + " #####################################\n", + " # Obtenez des patchs carrés des fragments et mettez-les dans la frag_list\n", + " for i in range(n):\n", + " for j in range(m):\n", + " frag_32=frag[i*frag_size:(i+1)*frag_size,j*frag_size:(j+1)*frag_size]\n", + " if test_fragment32_32(frag_32,0.6):\n", + " frag_list.append(frag_32)\n", + " img_tensor=img2tensor(img)\n", + " ######################################\n", + " if using_cuda:\n", + " img_tensor=img_tensor.cuda()\n", + " \n", + " coordonnee_list=[]\n", + " #######################################\n", + " # Utilisez le réseau pour calculer les positions de tous les patchs dans frag_list[]\n", + " # Mettez le résultat du calcul dans coordonnee_list[]\n", + " for i in range(len(frag_list)):\n", + " frag_tensor=img2tensor(frag_list[i])\n", + " if using_cuda:\n", + " frag_tensor=frag_tensor.cuda()\n", + " res=net.forward(img_tensor,frag_tensor,using_cuda)\n", + " if using_cuda:\n", + " res=res.cpu()\n", + " po_h,po_w=show_coordonnee(res)\n", + " coordonnee_list.append([po_h,po_w])\n", + " h_img,w_img,c=img.shape\n", + " position=[]\n", + " for i in range(len(coordonnee_list)):\n", + " x=int(round(h_img*coordonnee_list[i][0]))\n", + " y=int(round(w_img*coordonnee_list[i][1]))\n", + " position.append([x,y])\n", + " return position" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "if __name__=='__main__':\n", + " \n", + " # La taille du patch d'entrée est de 16*16\n", + " frag_size=16\n", + " # La taille du plus petit patch dans réseau est de 4 *4 fixée\n", + " psize=4\n", + " using_cuda=True\n", + " \n", + " \n", + " net=Net(frag_size,psize)\n", + " \n", + " # Pour chaque fresque, le nombre d'itérations est de 1000\n", + " itera=1000\n", + " \n", + " if using_cuda:\n", + " net=net.cuda()\n", + " \n", + " # Choisissez l'optimiseur et la fonction de coût\n", + " optimizer = torch.optim.Adam(net.parameters())\n", + " loss_func = torch.nn.MSELoss()\n", + " \n", + " # Dans le processus d'apprentissage du réseau,le changement d'erreur est placé dans loss_value=[] \n", + " # et le changement de Conv1 poids est placé dans para_value[]\n", + " loss_value=[]\n", + " para_value=[]\n", + " ####################################################training_net\n", + " \n", + " #Les données d'entraînement sont 6 fresques\n", + " for n in range(6):\n", + " im_path=\"./fresque\"+str(n)+\".ppm\"\n", + " img_training=cv2.imread(im_path)\n", + " h,w,c=img_training.shape\n", + " \n", + " # Si la peinture murale est trop grande, sous-échantillonnez-la et rétrécissez-la\n", + " while h*w>(1240*900):\n", + " img_training=cv2.resize(img_training,(int(h/2),int(w/2)),interpolation=cv2.INTER_CUBIC)\n", + " h,w,c=img_training.shape\n", + " im_tensor=img2tensor(img_training)\n", + " \n", + " if using_cuda:\n", + " im_tensor=im_tensor.cuda()\n", + " for i in range(itera):\n", + " # Tous les 100 cycles, enregistrez le changement de poids\n", + " if i%100==0:\n", + " para=net.conv1.weight\n", + " para=para.detach().cpu()\n", + " para_value.append(para)\n", + " frag,vt=get_training_fragment(frag_size,img_training)\n", + " frag_tensor=img2tensor(frag)\n", + " if using_cuda:\n", + " vt=vt.cuda()\n", + " frag_tensor=frag_tensor.cuda()\n", + " # Utilisez des patchs et des fresques de données d'entraînement pour faire fonctionner le réseau\n", + " frag_pred=net.forward(im_tensor,frag_tensor,using_cuda)\n", + " b,c,h,w=vt.shape\n", + " # Utilisez la fonction de coût pour calculer l'erreur\n", + " err_=loss_func(vt,frag_pred)\n", + " # Utilisez l'optimiseur pour ajuster le poids de Conv1\n", + " optimizer.zero_grad()\n", + " err_.backward(retain_graph=True)\n", + " optimizer.step()\n", + " \n", + " loss_value.append(err_.tolist())\n", + " \n", + " del frag_tensor,frag_pred,err_,vt\n", + " torch.cuda.empty_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "6000" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(loss_value)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD4CAYAAAD8Zh1EAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3deXxU1fk/8M+TBAhbQCAsQiCArCKbYXFBXIqC9FvcFat1Q0q/ot8Wa4uK1qL8RKxbKxYRrFoEqlVEDYuyo4AQAoEAiYQlJGFJSEhICNnP74+5mcxyZ+ZmMtudfN6vFy9m7j1z55wQnjlz7jnPEaUUiIjI/CKCXQEiIvINBnQiojDBgE5EFCYY0ImIwgQDOhFRmIgK1ht36NBBxcfHB+vtiYhMaffu3WeVUrF654IW0OPj45GUlBSstyciMiURyXR1jkMuRERhggGdiChMMKATEYUJBnQiojBhKKCLyHgRSReRDBGZqXP+ehEpEpG92p8XfV9VIiJyx+MsFxGJBDAfwDgA2QB2icjXSqmDDkW3KqV+6Yc6EhGRAUZ66CMBZCiljiqlKgAsBzDJv9UiIqL6MhLQuwLIsnmerR1zdJWIpIjIahG53Ce105F+uhhvfJeOsyXl/noLIiJTMhLQReeYYxL1ZAA9lFJDAPwDwFe6FxKZKiJJIpKUl5dXv5pqMnJL8I8NGSi4UOHV64mIwpWRgJ4NIM7meTcAJ20LKKXOK6VKtMerADQRkQ6OF1JKLVRKJSilEmJjdVeueiTax0sNN+YgIrJjJKDvAtBHRHqKSFMA9wH42raAiHQWsYRaERmpXTff15UFgAgtoDOeExHZ8zjLRSlVJSLTAawFEAngQ6XUARGZpp1fAOAuAL8TkSoAFwHcp/y2t50lorOHTkRkz1ByLm0YZZXDsQU2j98F8K5vq6aPPXQiIn2mWymqjewwoBMROTBdQLf20J0m2hARNW6mC+h1s1yCWw8iolBjvoCO2iEXRnQiIlvmC+jWIRciIrJluoBeu0J09f5TQa4JEVFoMV1A//lMCQDg420ut9UjImqUTBfQI7j0n4hIl+kCeqQW0asZ0ImI7JguoNfSSwFJRNSYmS6g13bMa1eMEhGRhfkCOicsEhHpMl1AbxYVCQBoGmm6qhMR+ZXpouIDo3sAAC5WVge5JkREocV0Ab1ZlOmqTEQUEKaLjrwXSkSkz3QBPYIRnYhIl+kCOhER6TNdQGcPnYhIn+kCOuM5EZE+0wV09tCJiPSZLqAznBMR6TNfQGdEJyLSZcKAzohORKTHdAGdiIj0MaATEYUJBnQiojDBgE5EFCYY0ImIwgQDOhFRmGBAJyIKEwzoRERhwlBAF5HxIpIuIhkiMtNNuREiUi0id/muiq4pxQ2jiYhqeQzoIhIJYD6ACQAGApgsIgNdlHsNwFpfV5KIiDwz0kMfCSBDKXVUKVUBYDmASTrlngTwBYBcH9aPiIgMMhLQuwLIsnmerR2zEpGuAG4HsMDdhURkqogkiUhSXl5efevq5IeMsw2+BhFRuDAS0PWyYTkOXr8N4M9KqWp3F1JKLVRKJSilEmJjY43W0aUTBaUNvgYRUbiIMlAmG0CczfNuAE46lEkAsFzLhNgBwK0iUqWU+sontXShsqrGn5cnIjIVIwF9F4A+ItITQA6A+wDcb1tAKdWz9rGIfATgW38HcyIisucxoCulqkRkOiyzVyIBfKiUOiAi07TzbsfN/Ym50YmI6hjpoUMptQrAKodjuoFcKfVww6tlDOehExHV4UpRIqIwYeqAzv45EVEdUwd0IiKqw4BORBQmTB3QeU+UiKiOqQM6ERHVYUAnIgoTDOhERGHC1AGdQ+hERHVMHdCJiKgOAzoRUZgwdUBnLhciojqmDuiBsGr/KWxIOxPsahAReWQo22Jj9r+fJgMAjs+dGOSaEBG5Z+oe+urU08GuAhFRyDB1QN+deS7YVSAiChmmDuhG5BReRHmV272riYjCgukDek2Nwsb0XN0ZLxVVNbhm7gbM+CwlCDVrmKrqGoyZtwGr9p8KdlWIyCRMH9A/2X4cj/xrF77Z5xz4qmpqAAAbDuUGuFb1U12j8MZ36SgsrbAeKy6rQlbBRTy3Yn8Qa0ZEZmL6gJ517iIA4ExRmVevzym8iPNllb6sUr1tSMvFPzZk4C9fHwhqPYjI3Ewf0KuqLb3wzIILXr3+mrkbMOHtrb6sUr1s/jkP3x2wzNYpr6wJWj2IyPxMH9A/3p4JAFiy40S9XnexohqV2odBTuFFn9fLqIc+3InPd2cH7f2JKHyE7cKid9YdRu+OLQEASicv44AX12BIXNtAV6vemN2AiIwKq4CeVVCKuHYtAABvrfvZY/mUrEJ/V8mtrIJSu+d6HzxEREaZfsjF1oWKKkPlQmVe+pPL9ngsIxKAihBRWAirgG50eKLfrDX+rYhBNQ4VFjB6E5H3wiqgExE1ZmEZ0DemhfZCIlfWHDiNNan2C6R4U5SIjArLgH6yKHjTEBvq7+szoJRCbnF5sKtCRCYTlgG9odaknsY76w7bHTt3ocJFad9b/MMx3PL2FgC8KUpExoVVQJ/wzlYczStxWnGppXTB/I0ZiJ+Z6PE605bsdpr2+F8Xi3+UUqiu8d24iAKw9fBZn12PiBoPQwFdRMaLSLqIZIjITJ3zk0Rkn4jsFZEkEbnW91U15sY3NmP2twftjlVU12B3ZgFeX5vu8/db/MMx9H5ulVc9eL3x8YzcYh/UiogaI48BXUQiAcwHMAHAQACTRWSgQ7H1AIYopYYCeBTAIl9XtKG2ZeT75bqfJ1l67meKvUsO5qiy2j7K86YoERllpIc+EkCGUuqoUqoCwHIAk2wLKKVKVF1C8pZA+C15fHdjRrCrgPiZiXh9bVqwq0FEIcpIQO8KIMvmebZ2zI6I3C4iaQASYemlOxGRqdqQTFJeXp439Q2aoovOKXZ3Z55D+hnvh0i8ueE5f+MRr9+PiMKbkYCuF3aceuBKqRVKqf4AbgPwst6FlFILlVIJSqmE2NjY+tU0ROQVl1tzsNz5z20NupaR4RS9oP/epgw8sTS5Qe9NROHHSEDPBhBn87wbgJOuCiultgDoLSIdGli3gHrso11YttNzCt4Rc9ZhzLyNfq3LtiN1s1yqq52j/rw16UjU2aGJiBo3I9kWdwHoIyI9AeQAuA/A/bYFROQyAEeUUkpEhgNoCsA/dyG9lO9hFsr6tFysT8vFG995ztIIADuO+q95tjdGi8uNJRwjIvLYQ1dKVQGYDmAtgEMAPlNKHRCRaSIyTSt2J4BUEdkLy4yYe5Xers1B9NG244bKnS0xtkJz8gc7dI/PW5NmaK57fRzNK/Hp9YgoPBnKh66UWgVglcOxBTaPXwPwmm+rFtpcfVy9t8n3Ny335xShV2wrnTooCJeSEpHGlCtFIyMaXxCr0VmN6sMFqkQUBkwZ0GdNHBDsKnhld2YB4mcm4kR+qefCDtztwDTjs714b1Pw58kTUXCZMqBHmbSHXruq9Mcj9c/VsmJPjstzXybnYN4a36c1ICJzMWVAD5aThcbT8rq7J+zN7eLsc+ZNCUxEgcGA7kZZpf3eo9OW7G7Q9fxx/9KXmR6JyNxMGdADFcL6v2C/9+i+7CKXZauqlV2eFXe98Iqqahw8ed5wPdzNZPkmxeUaLyJqZAxNWww1oTXD3WLV/lMepyx+tccSfF/6xpLet1NMswa/b2lFtedCAG5/70ecLSnHa3cORtLxc5g2tjdeW5OGHUfzkfjUmAbXg4iCz6QBPfQiekVVjdvzu44X4KLDEE5xmbFVoG7H4w1+X9lzohAAcP8HPwEAYqKjsPiHY4ZeS0TmYMohl1DkOJTtGGZzz/t+j1BPH2xH80rwzOcpqKp2/rAp9/ABREB+STmuenU90k4bHx4jCiYGdB/58Efn3m5ZZTXiZya63L7O6D3S5MxzusdPFpbh+RWp1ucXbYZflFK48Y3N+Hx3Ng54GK/39O2isdqYnodTRWVYuOVosKtCZIgpA3roDbg425CWiwItIdgb3zVsjvjH2zN1j1/3un3WxwEv1t3ETdxfl43x4CnngG57n7XvrNVe1WvbkbOIn5mII8w1QxQSzBnQTRDRn/jUPl95oFOu5JfUZZdctd851W766YYH4ZXaTd6dxwoafK1Q9NyK/cGuAlG9mDKgX9bROVFVqKmorsHVczcAcP0B5I/PpSU7MjHl4yS7Y3q7LX2RbD8MVFha/02uq7QbB+GaW6d2KOrLZP1Vukt/OoEvXAynEQWDKQP6dX3NtdvR6fNlAevFzvoqFesOnbG7Yepu/nytm9/aUu/3qtHew6ypGBrquRX78fTnKcGuBpGVKQO6GenlYzc6hzwQcovrPwunQps9k3xC/6YtEQUWA3qY8mY4J9/g5h61arfBW7LD89Z9ROR/DOhkdeUr64JdBSJqAAZ0IhPIzL+A4jLnm9tEthjQw1QgpnaOjG/n/zcJklDbx3Xs65twx3vbgl0NCnGmDeiXXxqje3zF/14d4Jo0PtU1CsfOXjC+1NVkvt13Eje+sTnY1XByODe0PmQo9JgyORcAJD41BvEzE52O9zbBHPVAmP3tQb9ct7K6Bn2et6wsDdfpip5SJRCFKtP20F2JCPSSzEbm2311+deruLkGUUgJq4AuAoRppzFoikorsWjrUZzXbshVVjOIE4WqsAroe1+8GRKuA7tBMuOzvXgl8RAGv/RdsKtCRB6YOqB3u6S53fM2zZsEPAlWuImfmYhZX9UlpTpTXGZ3nj9eotBl6oBuOzUvrl1z1wWpXmpXfsbPTERqDm8QEpmFyQN6XUT/6JGRAMJ35oVZlVZUoVJnx6RQVljKBTxkTqYO6LcM6ux0LCoyAosfSghCbcKf3lZ27ry+Ng0DX1yLRz/a5aca+ceFcmN7vRKFGlMH9FkTByIm2jKV3rZf3ikm2q7c1Ot64W93Dwlgzcwv+1yp7nGpx02K+RuPAAC2Hj7rdT0ulFt6+OeDvOzd3YdZ7vkyl+eIAsnUAT0yQtCuZVOn4wO7xODxMT2tz5+7dQDuurIbrurVPpDVM7VFW533SPU0YfFsPbI1Ju475TFH/MWKalz+l7Xo8/xqDH7pO5RXBS/d8PSle+yel1XW1SWYHzavr03DDX/bFLT3p9Bi6oBuy7bnGBEheH7iwCDWxvz08re7k3ziHBJeWYcXV6Z6LgzgiaXJuOf97fh230ks26mfftdxtWsw58CvOXDa+vjAySL0f6Fu/9ZDp4qDUSUAlm9Bx85eCNr7U2gxFNBFZLyIpItIhojM1Dn/axHZp/3ZJiIBG994bEwvAEBs62Yey0ZF8oZpQ7n6CR7Ulst/sj0T8TMT7Xqw7kxfugfPfrkfS3ZkYtHWo3bnXAX6YHPcAerJZXuwWmffVqJA8xjQRSQSwHwAEwAMBDBZRBy7v8cAjFVKDQbwMoCFvq6oKw+O7oHjcyeiVTPPaWk6trYfW//1qO7+qlZYysx33RN0nMlyqqh+48qzvkrFK4mHvKpXKGDiLAoFRnroIwFkKKWOKqUqACwHMMm2gFJqm1Kqdh+yHQC6+baavqFsRoEnXtEFT97YJ4i1MZ9fvLkFX+3V3zDZyL6l4SLn3MVgV4FIl5Fsi10BZNk8zwYwyk35xwCs1jshIlMBTAWA7t393zt+7c4rsD9HP9D06eQ6K2Ns62bI82KPzcYgJatQ9/iKPfqB3tGXydlYn5br1XurQCR5N+DdjRnBrgKRLiM9dL1hU93/WSJyAywB/c9655VSC5VSCUqphNjYWOO19NK9I7rjlduuqKufTVNG92qPVtH6n2d/+EVfv9fNrBoaUmd8lmLdi5SIfMtIQM8GEGfzvBuAk46FRGQwgEUAJiml8n1TPd/q0NoyxfEfk4dZAnqzKKT85Wa7Mj/OvBGTR8Yhocclwahi6DMY0dNPNyxlQLXJUvNyMRKFAiNDLrsA9BGRngByANwH4H7bAiLSHcCXAB5USv3s81r6yIxxfXFZbCv8cnAX67E2zZvYlWnVNAoi0uCeaLgqNhi4Vu51+syvl+Evf+90TG9RU0l5FS5WVBua5WSUNwneThToL8QiCiSPAV0pVSUi0wGsBRAJ4EOl1AERmaadXwDgRQDtAbyn/aerUkqF3Pr7ZlGRuDshzm2Z2hun3LyhYVannvZcyI2ii86LdfTG0G95awtyCi/i+NyJDXo/W6cKufKTzMnQPHSl1CqlVF+lVG+l1Bzt2AItmEMpNUUpdYlSaqj2J+SCuRFP3niZtcdeXWOZhjf9hsuw6qkx1jKje4XvxsihzjGcX6yoRk6h+xknSilsTMut1xDOzuPuV7AShaqwWSnqC0/f3M/6tX5AZ8sm1PeOiMNAmw2pl0+9Cpdx39J6e3tdw0fiHDvoU/+d5Lb8+kNnsCk9D498tAtvfe/8/kUXK+uVh+WpZXuw0sW0TcdhmjWpp3Tf01988fMl8zPtJtG+NO/OwegV29Lu2Mu3DcLkUd0R166FU/l1M8aisLQCQ2c7j/OSvrfXHcbvGzh7aM+Jc7i+X0cAwMa0XLdJv7YdOYvHPq4L+IdO2d+kXZN6CtOWJAOA4eGar1NO4usU/XsDjjtl1V77D+Oc23y+rBKqBmjToonTOW+9ve4w7h0Rhy5tuC9AY8YeOoB7RsQhId5+KCW6SSSGd6+b6fLbsb3szrdtUZcUrEOrusfcMcl/Hv5XXRrelGz9+fC1Dp+xX7npOOBSG3CDYfBL32HIbN9v6VdWaa688+R7DOgGPTthgJuenNg94pCM/5VWuM8VE/BFSPwgpxDAgO4DtpskNY2KwLoZY3G/TZ6Y5VNHB6FW5rQh7Yzb8ws2H0H8zEQs3HLU6dwr3x7EO+sO674uVFaZ2mKWRPI1BnQfuH1YV+vjqAjnH+lo5mE37ICHPUzd3fxb9MMxvKWdr89GHL7gzbvd8LdNKPZhLvWaEPzQosBiQPeBP4/vj9S/3oIHRnfHp1P009yM6dMhwLUyp6TMc27Pu5p9eCLffmGPY488VENdfce9n/1yn8tz/96e2dDqkMkxoPtARISgVbMovHLbFRgS11a3TG186dqWsxDcyS0ud7+M3kVkvu71jdbHP59x3nAiXDqvy3ZmuTyXxdWqjR4Dup90u8QSuAd3a2N3fPaky/GfqaPR1odT1szC0yIgwDK98Ffv/uDyvDLQ1775rS0ue/KenCpialwyLwb0Blj/9FjsePYm3XNNtLH0Edp0yNpA1CwqEqN6tcevhlwamEqGkGvmbjBU7kieu400jEXqRIcdhDy9atnOE8gqKMUHW5z3UjXC2zF7Iy9bd9D9jWKiWgzoDdA7thU6t4nWPXf/qO6468puePLGywAA08b2BgAM6mpZdfqX/7kcTaP44/cXxy3wbMfUN6bnOpV99sv9uHvBdq/fzzYuZ58zPvRhZChoyifuV8Ra68Cpk40eI4qftGwWhb/dPcS6AGlMn1gcnzvR+jwyQjD7V5fbveaGfv7PEd9YbT18FodOncfRvBI8YrNAyda50gqvr/91yknM1za+2HnMu1wwiftOoc/zqzzux1peVY2dxwpQWmF/ryFc7hOQ9xjQg8i2R3Vpm2h88BtT5jQzjQnvbMX/Ld/r8nx5VcNWWr6+Nh1rGpBlct7aNFRWK4/7sc5akYp73t+OZ/5rP+Ml/0IF1qRy85DGjAE9RGx65gZERfKfw1dcDT+42pLQV6Yt2Y3MfO9mm0RqlbbNDPmH/zh/AH2+OxsAkOSQFXJvViGmLUlGwQXvv2mQuTGCBFGv2LoUAbU3TR++Oj5ItQkvqR4WKPmTpyETW7YfPBHakuPFPxyzDtu426v13AX9RUlV1czp0lgxoAfRCJuEYLXjny85jKuT/yWfcL+YKVBqe+jLdp7APe97f4OWGi8G9CBr37Kp50L1dPPATj6/Zjg7dMp5IVJD5BaXe/U6zlKhhmJAD7LPp12FWRMHILpJpO75lk3rjndpE40OrTzvnfnotT19Vr/GwNeJu9wNk7iyO7OAybqowRjQg6xXbCtMGdNL91zrZvb7j/z7sVGc2ugHobB/7J3/3F7vWTZGVs1S48KAHoIWadMXm0ZF2K1AvKxjK0Nfy2v3RSVj5q5OC+r7O84nJ/IWA3oIulbLzPiHcX2tKxAXP2R8jvqALjGeC5GuQI9jC4DCUt+l0KXGjQE9BEU3icTxuRPxwOge1jXljlvk+VN3nX1UGwtf95Y9LTRqyKAJV4aSIwZ0k7k7Ic7v7/HanYP9/h6hyl16Wm98sNV5ZyW/0zoBP58pxosrU1HJeemNBgN6iKvNn167zZ3t3PXW0VF6L3HSsbXnmTG2rurdnonDfGR35jnkFZdjTuJB3dS8Gbkl+Gjbcb+899vrfsYn2zNx4GTwFllRYBmLCBQ0nzw2EjuPFaB1tPONzm+mX4ukzHP44+cpAIBrL+uAqEj7QeBxAzvh/QeuxIGT57Eq9RT+uemI3fkXfzkQ1TUKc1YdAgDrBh3Nm0SiooG5TchixJx1AIADJ89j6eP2+8vet3CH3943p9CSE4Zb0zUe7IaFuI6to/HLwfq50+M7tMRdV3azPl8yZRQ+emQkgLoe/Qe/SUBEhOCKbm3w5/H9na4RGSG4aUBHAMDSx0dh5RPXALDPJ0K+4eufqaurLdx8FPEzE1FdEz4fyKeLyjDuzc3cgMQD9tDDwJzbByH3vP3qxMSnxmDr4TyPrxWxzIU/Pnei3fEBXVpj1/HQWBIfLuo7g8bbLeUW/WDZpONihSWnTDh00JfvOoHDuSVYtjMLM8b1DXZ1QhZ76GHg16N64A8Ov+QDusRg6nW9ncrWbrjhyaKHRvikbuS9MfM2ei7khrudnyg8MaA3MvHtW9o9d9Vp5OIk3xOXP23vGE9ZELgu+tmScr8M14XDt4xAYEBvZMqq6lK79mjfAuMHdQlibRoXEe+HUfyhqLQS//rxmNsPhtnfHMQTnyYbul5+STkSXlmHF1am2h2Pn5mIa1/bgK/qkeOmrLIab33/M8qrjKciJo6hNzpREXW9xM3P3BDEmjROt76zNdhVsPrTFylYe+AM9pwoxPX9YnH7sK5Om11/+KNlPH6+9vzY2Qto27wJLrHJEvpNyknszynCPQmWG/RLfzqB6mqFKWN6WqdkZp+7iN//Zy9uG9bVUN0W/3AM76w/jHfWH8YW/p4aZqiHLiLjRSRdRDJEZKbO+f4isl1EykXkj76vJtlq0TQSfTu18lxQx53Du3kuRH4hAhSX+24lqtGRDVcd8DPajfSvU05ixmcp2H4k33rubEk5CnX2WL3hb5tw4xub7I49uWwPFm45isrqujf6T1IWxr21BZ/+dMJt3Y6dvYDEfXXb5v10NB/7sgvtNgmZtmQ33ll/2O11yMJjD11EImH5gB4HIBvALhH5Wil10KZYAYCnANzml1qSnYOzx3v9Wm+3uRvdqx12HK3b8uyay9rjx4x8LHlsFB5Y/JPX9aHQYfthk/DKOrtzySfOWdclnHORe8abtAk3vbEJNQqYONgyy+pebV6+7c17zqM3zsj/7pEAMpRSR5VSFQCWA5hkW0AplauU2gWAWYbC1CePjtI9zhSuxv2Yke+5kB/U9uQXbjmCE272O11/6AyW7MjUPXfHe9v8sgiKyx18y0hA7wrANsFFtnas3kRkqogkiUhSXp7nOdLkH/PvH47bDYxl2o5dNo2KQP/Ora3PbWdstNLytt96RWd88burfVhT8oV73t+OM+fL8P9WpeH+Ra6D8mdJ2Zj1lfe5X9iRDj4jAV1vrpVX/3RKqYVKqQSlVEJsLDdqCJaJg7vgrXuHeizXvb1+1sV37qt7rVLApKGWlaxv3jMUV/a4BADQq0NL3ddScNy9wLJHaWmF51kjpeWey+SeL2twncj3jMxyyQZgm+KvG4CT/qkOhZonbuiNCG3mw/X9OiLtdDFG92qP/+7OtpaZPWkQ/nRLf+s2eiv+92r0aN8Sw1/+Pih1JmcnHKZLVteoBs0XP1FQivNlVdifU2g9xj1Rg89IQN8FoI+I9ASQA+A+APf7tVYUMp65pb/N43546Ooe6BQTbT2mYMkH06ZF3UKkYd0tvfSPHx2J6KgI640uCh13LdiG/TlFDbrGL97c7KPaOONng3c8BnSlVJWITAewFkAkgA+VUgdEZJp2foGIdAaQBCAGQI2I/B7AQKUU83aGkcgIQZc2lnS+j17TE1sPn8Xll7reHWlsXw6rhao9Jwo9F3JDrzfOMfTgM7SwSCm1CsAqh2MLbB6fhmUohhqJG/p3dEroRY1HQ4N3VkGpofF8qh+uFCW/W/jglejSpjkW/XAUK/fy9gsZSDzGAXmvMJcL+d3Nl3fGFd3a4P6R3d2W+9vdQwJUIwoHRaWVqOFEdjsM6BQw8R6mMt51ZTdOdzSJYHegz5aUY8js7/D2up/tjr+3KQMpWYUoraiySynQWDCgU8B0ionG4TkTcP8oS0/9jmFdrQmdam344/U4Pnci9rwwDl3aROtdhghnSyx5aP6+IcPu+Lw16Zg0/0fMWpGKJ5YmY392w2bymA0DOgVUk8gIxGsLlsYP6ozOMfpB+5KWTbH+6bH47LdXWY/NnNAfg7q6nlVDoe/pz1Jw4KT/g2z2OctWdRe8yC9jZrwpSgH32LW90K9zDK7r0wGpbnakb9E0CiN7trM+nza2N6aN7Y34mYmBqCa54e1uSF8kZ+OnY8HJadMYsIdOARcZIRjbNxYigpv6WzaojneRZgCwbKf33K3OG1xT/RVcqEBecbnngh786b/7vH5tbe/ZFjey8A320CmohsS1xc7nbkI7mw0THK3+vzF2z/t3bo2008X+rlrYCsSQR331m7UG+1662VDZjWm5mPrvJD/XyJzYQ6eg6xgTXa887T1c9Oaba7lkyL1Qneh37kLdhhruJtE8/XmK3WYa7qxINr7tXThgQCfTefWOwZgxri9+Pao7Wmupe6de1yvItaKGSrf51nXGR9kc/5OU5blQGGFAJ9Np17IpnrqpD+bcfgVev3swAODuKz1nnnAcummsPM3Pvnfhdq+u+0UDe8NbDtftkZBTWDfO7rjPqS9UVdfgD//Zi4zcEp9fO5wxdQ0AAA1CSURBVJgY0MnUxg/qgow5E9CnU2vM//UwjIxv57LsgC4xWPSbBDw+pqf12OxJlweimiHFNvWxHm/vTyzb6X7/UE9s88OUV9ZtsnHoVP1z/O08Xrdd4vubjzjlb9+fU4QVe3Lw9Ocp1mPnyyrxl5WpyC3W/3Yw8e9b8YWHn12wMaCT6dWOv9/YvxM+m3aV0/l/PzYS62aMBQD8YmAnTL+hDwAgJjoKk4Z4tfkW+YFtR9w2IHtiu6G0nldXp2H6sj0er/POusP4eHsmRs5ZbzeeX+vAyfN2HwChiAGdws7GP16PB0bX5Y0Z0ycWl3VsZX3erInl1/7yS9sw8XYIWbLDux7+9a9vQtFF99sZl5R5XmBUZbP1Xr5DQFcmyQ3MaYsUdnp2aIlXbrsCdwzvhpho51/x6CaR+OJ3V6NPp1Zo3SwKT93UBzf174hJ8390ec07hnVFTPMmmDi4i3U7Nwq8v68/7HTs9PkyTF+ajAmDuhi6hlIK5VXu9011HLZ3lwPsp6P5qFYKV/fuYOj9/YkBncLWcG3nJD21e58CwIxxfQEA6a+MR9PICPR8dpVT+TcN7MFKwbMvuwhbD581VHbJjky8sPKA2zKOHXJ32/XV7sgVCvsDcMiFSNMsKhIigsgI++7Zn8cbW6X698nD/FEtMqDG4JDIrK/2ewzmDbl+sDGgEzn44c83YOUT11if12aH1NOzQ0v079waANC9nev0BRR8Fyuq3Y7Tu5se6U1AL7hQoXtz1Z845ELkoEub5ta9UwGgTfMmuuXSXh6PCBEcPVuC19ekY2AXZoIMZQNeXOP1a90Nuej5YMtRzFl1CABweM4EXCivQtsWrtNb+Ap76EReim4SiaZREejfOQaLHx6BplH2/51evm0QerRvoZujpE/HVpg09FK3m2xTYLmbyVJjcw/1423H3V6noqrGGswB4MmlezB09vcNrZ4h7KETubB86mik5jgnsrqubyy6ttXP475t5o1oGhWBDq2aAQAeHN3D7vzztw5A5zbR+J8hlwKwJMp6cPFOFAT4q3m4KfYwLfGgF4uTbFXbBPu/fH0AD10d77Ks4/DMmgOnAVg25aj9vfAX9tCJXBjdqz2mjHHOEfPJoyPx6h2DdV9zadvmbv/TThnT0xrMActc+OQXxjGxWJCkZBXiV+/+gBP5pQ5j6PZB2dUYen2GYhJeWedNFeuFPXSiAHJ1423zM9cjt7gcTSIjkFdcjgcW/2R3/sHRPfDvHZkALNPjfr98D77ae9J6/pbLO2H2pEHIKbyIO97b5r8GhKF92UW47vWNdscc43SVQ3bHsspqRDeJxBOfJluPFV2sxNOf7cW6Q7lu32/Kx0m4+fJOuCchrmEV18EeOlEA/PVXl+ORa+Jdnu8YE41BXdugX+fWuLZPB7x17xB8M/1a6/khcW3dXr97uxboFBONYXFt8dqdVyCuXXO35cm9zPxS65j6oq1HMfrV9Xbn+7+wBp8lZVmHUwBg/NtbPAbzpOMF2PxzLo6d9W7HJ0/YQycKAHdjrnpuH2bJHnl87kRkFZSi2yXNkX76PD7Sbsg5ftGvzWcjIrh3RHfszSrEsp2NK3WsLz3+SRLm3TkY94yIwyuJh3TLOO7adKrIc8rfr1NOorJaIcJPKScY0IlCXJw2v/35iQPx/MSBAICYaMtUynl3Dcbmn/Mw1WGs3yTrYELan77Yh6xzpT695ifbLcNmEX5ICQwwoBOZ0swJ/dGjfQvcNbyb7lhsNG+y+sQ/NmT45br+ygnHMXQiE2rZLApTxvRChIvv7k/f3BcdWzvPtrFdAfu0lsOGAs8fm3YA7KEThaXW0U3w/Yyx+M2HOzFr4gDsPVGI8qpqDO7WBhueHouK6hrEXdIC247kY/vRfLvXDugSg9uGXopXV6e5vP7YvrHIv1CO1Bz387uHxrXF3qxC3XMtm0biQoX7XObhyldb7DliD50oTLVp3gQrn7gGI+Lb4fHremH6jX0gIugV2wr9O8egZbMoLJs62u41Cx4YjqVTRuG3Y3vj+VsHYPLIOGTMmYCX/megtUxUhODjR0di4hWXOr4l5tw+CMdevRXP3NIPAHDtZR1waPZ4uzJd2zbH42N64k9a0rN+nVpbz/3u+t5u29S1bXjM3lm+yz83rNlDJ2rkOrRqhuv7xWLenYPthnAet9l4++FreuKuhDik5hShj7ZZiN5im1+PsqyMffjqeGQVlOK3Y3uhedO68fx/PTICw+Laom2LpigqrcSq/afw5r1DUVxWiUOnzuP2Yd1w/8juGDNvIy5tE42TNjNH1vx+DFo1i8KLKw/gpgEdcU9CHJIzz2FY90uw9XAeHvs4yec/G7MRIztxiMh4AO8AiASwSCk11+G8aOdvBVAK4GGlVLLThWwkJCSopCT+AxCZ1bkLFXhq+R68ec9QxDSPQklZFdq7WCU7f2MGYpo3cUqF4E72uVL8bkkyFj+cgI6t9VMt2NqbVYjW0VHo0KoZ2jRvgg1pZ5B+ugT3johDXnE5+nVujb9+cwD/+vE4AGDZ46Mx+YMddtcY06cDXrtzMK6euwEAMHlkd+zOLMDU63rjj5+n4I7hXfFlcg4iBHj/wQSsP3QGy3dlYVTPdvjpmPO2eY9e0xMf/ngMHVo1w9mScuvxPS+MwyUtvUvWJSK7lVIJuuc8BXQRiQTwM4BxALIB7AIwWSl10KbMrQCehCWgjwLwjlJqlLvrMqATUaCVlFfhH+sPY8bNfdEsKhJZBaXIv1CBoXFtUVOjINKwG5ZZBaUouFChuxBMKYWFW47i3hFxDcq82NCAfhWAl5RSt2jPn9Uq96pNmfcBbFJKLdOepwO4Xil1ytV1GdCJiOrPXUA3clO0KwDbEfxs7Vh9y0BEpopIkogk5eXlGXhrIiIyykhA1/v+4ditN1IGSqmFSqkEpVRCbGyskfoREZFBRgJ6NgDbpWjdAJz0ogwREfmRkYC+C0AfEekpIk0B3Afga4cyXwP4jViMBlDkbvyciIh8z+M8dKVUlYhMB7AWlmmLHyqlDojINO38AgCrYJnhkgHLtMVH/FdlIiLSY2hhkVJqFSxB2/bYApvHCsATvq0aERHVB5f+ExGFCQZ0IqIwYWjpv1/eWCQPQKaXL+8A4KwPqxNMbEtoCpe2hEs7ALalVg+llO6876AF9IYQkSRXK6XMhm0JTeHSlnBpB8C2GMEhFyKiMMGATkQUJswa0BcGuwI+xLaEpnBpS7i0A2BbPDLlGDoRETkzaw+diIgcMKATEYUJ0wV0ERkvIukikiEiM4NdHz0i8qGI5IpIqs2xdiLyvYgc1v6+xObcs1p70kXkFpvjV4rIfu3c36UhW6l41444EdkoIodE5ICI/J+J2xItIjtFJEVry1/N2hatDpEiskdEvjV5O45rddgrIkkmb0tbEfmviKRp/2euCnhblFKm+QNLcrAjAHoBaAogBcDAYNdLp57XARgOINXm2DwAM7XHMwG8pj0eqLWjGYCeWvsitXM7AVwFS7751QAmBLgdXQAM1x63hmUrwoEmbYsAaKU9bgLgJwCjzdgWrQ4zACwF8K1Zf7+0OhwH0MHhmFnb8jGAKdrjpgDaBrotAW2wD35gVwFYa/P8WQDPBrteLuoaD/uAng6gi/a4C4B0vTbAktXyKq1Mms3xyQDeD3KbVsKyt6yp2wKgBYBkWPa/NV1bYNlvYD2AG1EX0E3XDu19j8M5oJuuLQBiAByDNtEkWG0x25CLoa3uQlQnpeWI1/7uqB131aau2mPH40EhIvEAhsHSszVlW7Rhir0AcgF8r5Qya1veBvAnADU2x8zYDsCys9l3IrJbRKZqx8zYll4A8gD8SxsKWyQiLRHgtpgtoBva6s5kXLUpZNoqIq0AfAHg90qp8+6K6hwLmbYopaqVUkNh6eGOFJFBboqHZFtE5JcAcpVSu42+ROdY0Nth4xql1HAAEwA8ISLXuSkbym2JgmWY9Z9KqWEALsAyxOKKX9pitoBu5q3uzohIFwDQ/s7VjrtqU7b22PF4QIlIE1iC+adKqS+1w6ZsSy2lVCGATQDGw3xtuQbAr0TkOIDlAG4UkSUwXzsAAEqpk9rfuQBWABgJc7YlG0C29q0PAP4LS4APaFvMFtCNbIcXqr4G8JD2+CFYxqNrj98nIs1EpCeAPgB2al/PikVktHaX+zc2rwkI7X0XAziklHrT5pQZ2xIrIm21x80B/AJAGkzWFqXUs0qpbkqpeFh+/zcopR4wWzsAQERaikjr2scAbgaQChO2RSl1GkCWiPTTDt0E4CAC3ZZA3wTxwc2HW2GZbXEEwPPBro+LOi4DcApAJSyfuI8BaA/LjazD2t/tbMo/r7UnHTZ3tAEkwPILfgTAu3C44RKAdlwLy9e9fQD2an9uNWlbBgPYo7UlFcCL2nHTtcWmHtej7qao6doBy7hzivbnQO3/ZzO2RavDUABJ2u/YVwAuCXRbuPSfiChMmG3IhYiIXGBAJyIKEwzoRERhggGdiChMMKATEYUJBnQiojDBgE5EFCb+Pz0X/pj2x/PZAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.plot(loss_value)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "file_path=\"./net_trainned6000\"\n", + "save_net(file_path,net)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Apprentissage_MSELoss_avec_GPU.py b/Apprentissage_MSELoss_avec_GPU.py new file mode 100644 index 0000000..84ab333 --- /dev/null +++ b/Apprentissage_MSELoss_avec_GPU.py @@ -0,0 +1,424 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +#Tous les codes sont basés sur l'environnement suivant +#python 3.7 +#opencv 3.1.0 +#pytorch 1.4.0 + +import torch +from torch.autograd import Variable +import torch.nn as nn +import torch.nn.functional as F +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import random +import math +import pickle +import random +from PIL import Image +import sys + + +# In[3]: + + +#Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils + + +def tensor_imshow(im_tensor,cannel): + b,c,h,w=im_tensor.shape + if c==1: + plt.imshow(im_tensor.squeeze().detach().numpy()) + else: + plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:]) + +# Obtenez des données d'entraînement +# frag,vt=get_training_fragment(frag_size,image) +# frag est un patch carrée de taille (frag_size*frag_size) a partir du image(Son emplacement est aléatoire) +# vt est la vérité terrain de la forme Dirac. +def get_training_fragment(frag_size,im): + h,w,c=im.shape + n=random.randint(0,int(h/frag_size)-1) + m=random.randint(0,int(w/frag_size)-1) + shape=frag_size/4 + vt_h=math.ceil((h+1)/shape) + vt_w=math.ceil((w+1)/shape) + vt=np.zeros([vt_h,vt_w]) + vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2) + vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2) + vt[vt_h_po,vt_w_po]=1 + vt = np.float32(vt) + vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w)) + + return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt + +# Cette fonction convertit l'image en variable de type Tensor. +# Toutes les données de calcul du réseau sont de type Tensor +# Img.shape=[Height,Width,Channel] +# Tensor.shape=[Batch,Channel,Height,Width] +def img2tensor(im): + im=np.array(im,dtype="float32") + tensor_cv = torch.from_numpy(np.transpose(im, (2, 0, 1))) + im_tensor=tensor_cv.unsqueeze(0) + return im_tensor + +# Trouvez les coordonnées de la valeur maximale dans une carte de corrélation +# x,y=show_coordonnee(carte de corrélation) +def show_coordonnee(position_pred): + map_corre=position_pred.squeeze().detach().numpy() + h,w=map_corre.shape + max_value=map_corre.max() + coordonnee=np.where(map_corre==max_value) + return coordonnee[0].mean()/h,coordonnee[1].mean()/w + +# Filtrer les patchs en fonction du nombre de pixels noirs dans le patch +# Si seuls les pixels non noirs sont plus grands qu'une certaine proportion(seuillage), revenez à True, sinon False +def test_fragment32_32(frag,seuillage): + a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2] + mask = (a == 0) + arr_new = a[mask] + if arr_new.size/a.size<=(1-seuillage): + return True + else: + return False + +# Ces deux fonctions permettent de sauvegarder le réseau dans un fichier +# ou de load le réseau stocké à partir d'un fichier +def save_net(file_path,net): + pkl_file = open(file_path, 'wb') + pickle.dump(net,pkl_file) + pkl_file.close() +def load_net(file_path): + pkl_file = open(file_path, 'rb') + net= pickle.load(pkl_file) + pkl_file.close() + return net + + +# In[4]: + + +# Les fonctions de ce bloc sont utilisées pour construire le réseau + +# Créer un poids de type DeepMatch comme valeur initiale de Conv1 (non obligatoire) +def ini(): + kernel=torch.zeros([8,3,3,3]) + array_0=np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype='float32') + array_1=np.array([[2,1,0],[1,0,-1],[0,-1,-2]],dtype='float32') + array_2=np.array([[1,0,-1],[2,0,-2],[1,0,-1]],dtype='float32') + array_3=np.array([[0,-1,-2],[1,0,-1],[2,1,0]],dtype='float32') + array_4=np.array([[-1,-2,-1],[0,0,0],[1,2,1]],dtype='float32') + array_5=np.array([[-2,-1,0],[-1,0,1],[0,1,2]],dtype='float32') + array_6=np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype='float32') + array_7=np.array([[0,1,2],[-1,0,1],[-2,-1,0]],dtype='float32') + for i in range(3): + kernel[0,i,:]=torch.from_numpy(array_0) + kernel[1,i,:]=torch.from_numpy(array_1) + kernel[2,i,:]=torch.from_numpy(array_2) + kernel[3,i,:]=torch.from_numpy(array_3) + kernel[4,i,:]=torch.from_numpy(array_4) + kernel[5,i,:]=torch.from_numpy(array_5) + kernel[6,i,:]=torch.from_numpy(array_6) + kernel[7,i,:]=torch.from_numpy(array_7) + return torch.nn.Parameter(kernel,requires_grad=True) + +# Calculer le poids initial de la couche convolutive add +# n, m signifie qu'il y a n * m sous-patches dans le patch d'entrée +# Par exemple, le patch d'entrée est 16 * 16, pour les patchs 4 * 4 de la première couche, n = 4, m = 4 +# pour les patchs 8 * 8 de la deuxième couche, n = 2, m = 2 +def kernel_add_ini(n,m): + input_canal=int(n*m) + output_canal=int(n/2)*int(m/2) + for i in range(int(n/2)): + for j in range(int(m/2)): + kernel_add=np.zeros([1,input_canal],dtype='float32') + kernel_add[0,i*2*m+j*2]=1 + kernel_add[0,i*2*m+j*2+1]=1 + kernel_add[0,(i*2+1)*m+j*2]=1 + kernel_add[0,(i*2+1)*m+j*2+1]=1 + if i==0 and j==0: + add=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1)) + else: + add_=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1)) + add=torch.cat((add,add_),0) + return torch.nn.Parameter(add,requires_grad=False) + +# Calculer le poids initial de la couche convolutive shift +# shift+add Peut réaliser l'étape de l'agrégation +# Voir ci-dessus pour les paramètres n et m. +# Pour des étapes plus détaillées, veuillez consulter mon rapport de stage +def kernel_shift_ini(n,m): + input_canal=int(n*m) + output_canal=int(n*m) + + kernel_shift=torch.zeros([output_canal,input_canal,3,3]) + + array_0=np.array([[1,0,0],[0,0,0],[0,0,0]],dtype='float32') + array_1=np.array([[0,0,1],[0,0,0],[0,0,0]],dtype='float32') + array_2=np.array([[0,0,0],[0,0,0],[1,0,0]],dtype='float32') + array_3=np.array([[0,0,0],[0,0,0],[0,0,1]],dtype='float32') + + kernel_shift_0=torch.from_numpy(array_0) + kernel_shift_1=torch.from_numpy(array_1) + kernel_shift_2=torch.from_numpy(array_2) + kernel_shift_3=torch.from_numpy(array_3) + + + for i in range(n): + for j in range(m): + if i==0 and j==0: + kernel_shift[0,0,:]=kernel_shift_0 + else: + if i%2==0 and j%2==0: + kernel_shift[i*m+j,i*m+j,:]=kernel_shift_0 + if i%2==0 and j%2==1: + kernel_shift[i*m+j,i*m+j,:]=kernel_shift_1 + if i%2==1 and j%2==0: + kernel_shift[i*m+j,i*m+j,:]=kernel_shift_2 + if i%2==1 and j%2==1: + kernel_shift[i*m+j,i*m+j,:]=kernel_shift_3 + + return torch.nn.Parameter(kernel_shift,requires_grad=False) + +# Trouvez le petit patch(4 * 4) dans la n ème ligne et la m ème colonne du patch d'entrée +# Ceci est utilisé pour calculer la convolution et obtenir la carte de corrélation +def get_patch(fragment,psize,n,m): + return fragment[:,:,n*psize:(n+1)*psize,m*psize:(m+1)*psize] +################################################################################################################### +class Net(nn.Module): + def __init__(self,frag_size,psize): + super(Net, self).__init__() + + h_fr=frag_size + w_fr=frag_size + + n=int(h_fr/psize) # n*m patches dans le patch d'entrée + m=int(w_fr/psize) + + self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1) + # Si vous souhaitez initialiser Conv1 avec les poids de DeepMatch, exécutez la ligne suivante + # self.conv1.weight=ini() + self.Relu = nn.ReLU(inplace=True) + self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1) + + self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1) + self.shift1.weight=kernel_shift_ini(n,m) + self.add1 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0) + self.add1.weight=kernel_add_ini(n,m) + + n=int(n/2) + m=int(m/2) + if n>=2 and m>=2:# Si n=m=1,Notre réseau n'a plus besoin de plus de couches pour agréger les cartes de corrélation + self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1) + self.shift2.weight=kernel_shift_ini(n,m) + self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0) + self.add2.weight=kernel_add_ini(n,m) + + n=int(n/2) + m=int(m/2) + if n>=2 and m>=2: + self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1) + self.shift3.weight=kernel_shift_ini(n,m) + self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0) + self.add3.weight=kernel_add_ini(n,m) + + def get_descripteur(self,img,using_cuda): + # Utilisez Conv1 pour calculer le descripteur, + descripteur_img=self.Relu(self.conv1(img)) + b,c,h,w=descripteur_img.shape + couche_constante=0.5*torch.ones([1,1,h,w]) + if using_cuda: + couche_constante=couche_constante.cuda() + # Ajouter une couche constante pour éviter la division par 0 lors de la normalisation + descripteur_img=torch.cat((descripteur_img,couche_constante),1) + # la normalisation + descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1) + return descripteur_img_norm + + def forward(self,img,frag,using_cuda): + psize=4 + # Utilisez Conv1 pour calculer le descripteur, + descripteur_input1=self.get_descripteur(img,using_cuda) + descripteur_input2=self.get_descripteur(frag,using_cuda) + + b,c,h,w=frag.shape + n=int(h/psize) + m=int(w/psize) + + ####################################### + # Calculer la carte de corrélation par convolution pour les n*m patchs plus petit. + for i in range(n): + for j in range(m): + if i==0 and j==0: + map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2) + else: + a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2) + map_corre=torch.cat((map_corre,a),1) + ######################################## + # Étape de polymérisation + map_corre=self.maxpooling(map_corre) + map_corre=self.shift1(map_corre) + map_corre=self.add1(map_corre) + + ######################################### + # Répétez l'étape d'agrégation jusqu'à obtenir le graphique de corrélation du patch d'entrée + n=int(n/2) + m=int(m/2) + if n>=2 and m>=2: + map_corre=self.maxpooling(map_corre) + map_corre=self.shift2(map_corre) + map_corre=self.add2(map_corre) + + + n=int(n/2) + m=int(m/2) + if n>=2 and m>=2: + map_corre=self.maxpooling(map_corre) + map_corre=self.shift3(map_corre) + map_corre=self.add3(map_corre) + + + b,c,h,w=map_corre.shape + # Normalisation de la division par maximum + map_corre=map_corre/(map_corre.max()) + # Normalisation SoftMax + #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w) + return map_corre + + +# In[5]: + + +def run_net(net,img,frag,frag_size,using_cuda): + h,w,c=frag.shape + n=int(h/frag_size) + m=int(w/frag_size) + frag_list=[] + ##################################### + # Obtenez des patchs carrés des fragments et mettez-les dans la frag_list + for i in range(n): + for j in range(m): + frag_32=frag[i*frag_size:(i+1)*frag_size,j*frag_size:(j+1)*frag_size] + if test_fragment32_32(frag_32,0.6): + frag_list.append(frag_32) + img_tensor=img2tensor(img) + ###################################### + if using_cuda: + img_tensor=img_tensor.cuda() + + coordonnee_list=[] + ####################################### + # Utilisez le réseau pour calculer les positions de tous les patchs dans frag_list[] + # Mettez le résultat du calcul dans coordonnee_list[] + for i in range(len(frag_list)): + frag_tensor=img2tensor(frag_list[i]) + if using_cuda: + frag_tensor=frag_tensor.cuda() + res=net.forward(img_tensor,frag_tensor,using_cuda) + if using_cuda: + res=res.cpu() + po_h,po_w=show_coordonnee(res) + coordonnee_list.append([po_h,po_w]) + h_img,w_img,c=img.shape + position=[] + for i in range(len(coordonnee_list)): + x=int(round(h_img*coordonnee_list[i][0])) + y=int(round(w_img*coordonnee_list[i][1])) + position.append([x,y]) + return position + + +# In[10]: + + +if __name__=='__main__': + + # La taille du patch d'entrée est de 16*16 + frag_size=16 + # La taille du plus petit patch dans réseau est de 4 *4 fixée + psize=4 + using_cuda=True + + + net=Net(frag_size,psize) + + # Pour chaque fresque, le nombre d'itérations est de 1000 + itera=1000 + + if using_cuda: + net=net.cuda() + + # Choisissez l'optimiseur et la fonction de coût + optimizer = torch.optim.Adam(net.parameters()) + loss_func = torch.nn.MSELoss() + + # Dans le processus d'apprentissage du réseau,le changement d'erreur est placé dans loss_value=[] + # et le changement de Conv1 poids est placé dans para_value[] + loss_value=[] + para_value=[] + ####################################################training_net + + #Les données d'entraînement sont 6 fresques + for n in range(6): + im_path="./fresque"+str(n)+".ppm" + img_training=cv2.imread(im_path) + h,w,c=img_training.shape + + # Si la peinture murale est trop grande, sous-échantillonnez-la et rétrécissez-la + while h*w>(1240*900): + img_training=cv2.resize(img_training,(int(h/2),int(w/2)),interpolation=cv2.INTER_CUBIC) + h,w,c=img_training.shape + im_tensor=img2tensor(img_training) + + if using_cuda: + im_tensor=im_tensor.cuda() + for i in range(itera): + # Tous les 100 cycles, enregistrez le changement de poids + if i%100==0: + para=net.conv1.weight + para=para.detach().cpu() + para_value.append(para) + frag,vt=get_training_fragment(frag_size,img_training) + frag_tensor=img2tensor(frag) + if using_cuda: + vt=vt.cuda() + frag_tensor=frag_tensor.cuda() + # Utilisez des patchs et des fresques de données d'entraînement pour faire fonctionner le réseau + frag_pred=net.forward(im_tensor,frag_tensor,using_cuda) + b,c,h,w=vt.shape + # Utilisez la fonction de coût pour calculer l'erreur + err_=loss_func(vt,frag_pred) + # Utilisez l'optimiseur pour ajuster le poids de Conv1 + optimizer.zero_grad() + err_.backward(retain_graph=True) + optimizer.step() + + loss_value.append(err_.tolist()) + + del frag_tensor,frag_pred,err_,vt + torch.cuda.empty_cache() + + +# In[7]: + + +len(loss_value) + + +# In[11]: + + +plt.plot(loss_value) + + +# In[12]: + + +file_path="./net_trainned6000" +save_net(file_path,net) + diff --git a/Frag_Match_avec_rotation.ipynb b/Frag_Match_avec_rotation.ipynb new file mode 100755 index 0000000..df1944c --- /dev/null +++ b/Frag_Match_avec_rotation.ipynb @@ -0,0 +1,871 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "#Tous les codes sont basés sur l'environnement suivant\n", + "#python 3.7\n", + "#opencv 3.1.0\n", + "#pytorch 1.4.0\n", + "\n", + "import torch\n", + "from torch.autograd import Variable\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import cv2\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import random\n", + "import math\n", + "import pickle\n", + "import random\n", + "from PIL import Image\n", + "import sys" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils\n", + "\n", + "# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage \n", + "# “Apprentissage_MSELoss_avec_GPU“\n", + "# et les commentaires détaillés se trouvent dans le programme d'apprentissage\n", + "\n", + "def tensor_imshow(im_tensor,cannel):\n", + " b,c,h,w=im_tensor.shape\n", + " if c==1:\n", + " plt.imshow(im_tensor.squeeze().detach().numpy())\n", + " else:\n", + " plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])\n", + " \n", + "def get_training_fragment(frag_size,im):\n", + " h,w,c=im.shape\n", + " n=random.randint(0,int(h/frag_size)-1)\n", + " m=random.randint(0,int(w/frag_size)-1)\n", + " \n", + " shape=frag_size/4\n", + " vt_h=math.ceil((h+1)/shape)\n", + " vt_w=math.ceil((w+1)/shape)\n", + " vt=np.zeros([vt_h,vt_w])\n", + " vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)\n", + " vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)\n", + " vt[vt_h_po,vt_w_po]=1\n", + " vt = np.float32(vt)\n", + " vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))\n", + " \n", + " return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt\n", + "\n", + "def write_result_in_file(result,file_name):\n", + " n=0\n", + " with open(file_name,'w') as file:\n", + " for i in range(len(result)):\n", + " while n=2 and m>=2:\n", + " self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", + " self.shift2.weight=kernel_shift_ini(n,m)\n", + " self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", + " self.add2.weight=kernel_add_ini(n,m)\n", + " \n", + " n=int(n/2)\n", + " m=int(m/2)\n", + " if n>=2 and m>=2:\n", + " self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", + " self.shift3.weight=kernel_shift_ini(n,m)\n", + " self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", + " self.add3.weight=kernel_add_ini(n,m)\n", + " \n", + " \n", + " def get_descripteur(self,img,using_cuda):\n", + " descripteur_img=self.Relu(self.conv1(img))\n", + " b,c,h,w=descripteur_img.shape\n", + " couche_constante=0.5*torch.ones([1,1,h,w])\n", + " if using_cuda:\n", + " couche_constante=couche_constante.cuda()\n", + " descripteur_img=torch.cat((descripteur_img,couche_constante),1)\n", + " descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1)\n", + " return descripteur_img_norm\n", + " \n", + " def forward(self,img,frag,using_cuda):\n", + " psize=4\n", + " \n", + " descripteur_input1=self.get_descripteur(img,using_cuda)\n", + " descripteur_input2=self.get_descripteur(frag,using_cuda)\n", + " \n", + " b,c,h,w=frag.shape\n", + " n=int(h/psize)\n", + " m=int(w/psize)\n", + " \n", + " for i in range(n):\n", + " for j in range(m):\n", + " if i==0 and j==0:\n", + " map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n", + " else:\n", + " a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n", + " map_corre=torch.cat((map_corre,a),1)\n", + " #shift\n", + " map_corre=self.maxpooling(map_corre)\n", + " map_corre=self.shift1(map_corre)\n", + " map_corre=self.add1(map_corre)\n", + " \n", + " \n", + " n=int(n/2)\n", + " m=int(m/2)\n", + " if n>=2 and m>=2:\n", + " map_corre=self.maxpooling(map_corre)\n", + " map_corre=self.shift2(map_corre)\n", + " map_corre=self.add2(map_corre)\n", + " \n", + " \n", + " n=int(n/2)\n", + " m=int(m/2)\n", + " if n>=2 and m>=2:\n", + " map_corre=self.maxpooling(map_corre)\n", + " map_corre=self.shift3(map_corre)\n", + " map_corre=self.add3(map_corre)\n", + " \n", + " \n", + " b,c,h,w=map_corre.shape\n", + " map_corre=map_corre/(map_corre.max())\n", + " #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)\n", + " return map_corre" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Les fonctions de ce bloc sont utilisées pour appliquer le réseau à des fragments (pas à des patchs carrés)\n", + "\n", + "\n", + "# Cette fonction permet de sélectionner un ensemble de patchs carrés à partir d'un fragment\n", + "# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16)\n", + "# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch\n", + "# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment\n", + "def get_patch_list(frag,frag_size,limite,seuillage):\n", + " n=0\n", + " m=0\n", + " h,w,c=frag.shape\n", + " patch_list=[]\n", + " position_list=[]\n", + " for i in range(4):\n", + " if len(patch_list)>limite and limite!=0:\n", + " break\n", + " for j in range(4):\n", + " if len(patch_list)>limite and limite!=0:\n", + " break\n", + " n_offset=i*4 # n offset\n", + " m_offset=j*4 # m offset\n", + " n=0\n", + " while n+frag_size+n_offset0:\n", + " rot_frag=math.atan(tan_rot)*(180/pi)\n", + " else:\n", + " rot_frag=math.atan(tan_rot)*(180/pi)+180\n", + " rot_frag=-rot_frag\n", + " if rot_frag>0:\n", + " rot_frag-=360\n", + " return centre[0][0],centre[1][0],rot_frag\n", + "\n", + "# Vérifiez les résultats de Ransac en avec des changements de distance euclidienne\n", + "def test_frag(inline,frag,fres):\n", + " itera=10\n", + " frag_inline=[]\n", + " fres_inline=[]\n", + " # Metter les coordonnées du point inline dans \"frag_inline[]\",et \"fres_inline[]\"\n", + " for i in range(np.size(inline,0)):\n", + " if inline[i]==1:\n", + " frag_inline.append([frag[i][0],frag[i][1]])\n", + " fres_inline.append([fres[i][0],fres[i][1]])\n", + " p=[]\n", + " \n", + " # Faites une boucle dix fois, \n", + " # sélectionnez à chaque fois deux paires correspondantes inline \n", + " # calculer le changement de leur distance euclidienne\n", + " for i in range(itera):\n", + " point_test=selectionner_points(2,np.size(frag_inline,0))\n", + " diff_x_frag=frag_inline[point_test[1]][0]-frag_inline[point_test[0]][0]\n", + " diff_y_frag=frag_inline[point_test[1]][1]-frag_inline[point_test[0]][1]\n", + " diff_frag=sqrt(pow(diff_x_frag,2)+pow(diff_y_frag,2))\n", + " \n", + " diff_x_fres=fres_inline[point_test[1]][0]-fres_inline[point_test[0]][0]\n", + " diff_y_fres=fres_inline[point_test[1]][1]-fres_inline[point_test[0]][1]\n", + " diff_fres=sqrt(pow(diff_x_fres,2)+pow(diff_y_fres,2))\n", + " if diff_frag !=0:\n", + " fsf=diff_fres/diff_frag\n", + " p.append([fsf])\n", + " result=np.mean(p)\n", + " return result\n", + "\n", + "def frag_match(frag,img,position):\n", + " \n", + " frag_size=frag.shape\n", + " centre_frag=creer_point(frag_size[0]/2,frag_size[1]/2)\n", + " \n", + " retained_matches = []\n", + " frag=[]\n", + " fres=[]\n", + " \n", + " for i in range(len(position)):\n", + " frag.append([float(position[i][0]),float(position[i][1])])\n", + " fres.append([float(position[i][2]),float(position[i][3])])\n", + " \n", + " if np.size(frag)>0:\n", + " # Calculer la matrice de transformation affine à l'aide de la méthode Ransac\n", + " h,inline=cv2.estimateAffinePartial2D(np.array(frag),np.array(fres))\n", + " # Si “h” n'est pas sous la forme de matrice 2 * 3, la matrice de transformation affine n'est pas trouvée\n", + " if np.size(h)!=6:\n", + " return ([-1])\n", + " else:\n", + " x,y,rot=position_rotation(h,centre_frag)\n", + " pourcenttage=sum(inline)/np.size(frag,0)\n", + " # Le nombre de points inline doit être supérieur à un certain nombre\n", + " if sum(inline)>3:\n", + " p=test_frag(inline,frag,fres)\n", + " # La distance euclidienne entre les points correspondants ne doit pas trop changer, \n", + " # sinon cela prouve que le résultat de Ransac est incorrect\n", + " # ici,le changement de la distance euclidienne sont entre 0.7 et 1.3\n", + " if abs(p-1)<0.3:\n", + " # Ce n'est qu'alors que Ransac renvoie le résultat correct\n", + " return([round(y),round(x),round(rot,3)])\n", + " else:\n", + " return ([-2])\n", + " else:\n", + " return ([-3])\n", + " else:\n", + " return ([-4]) " + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "if __name__==\"__main__\":\n", + " \n", + " frag_size=16\n", + " using_cuda=True\n", + " net=load_net(\"./net_trainned6000\")\n", + " img_test=cv2.imread(\"./fresque0.ppm\")\n", + " \n", + " result=[]\n", + " for n in range(315):\n", + " if n<10:\n", + " frag_test=cv2.imread(\"./frag_eroded0/frag_eroded_000\"+str(n)+\".ppm\")\n", + " elif n<100:\n", + " frag_test=cv2.imread(\"./frag_eroded0/frag_eroded_00\"+str(n)+\".ppm\")\n", + " else:\n", + " frag_test=cv2.imread(\"./frag_eroded0/frag_eroded_0\"+str(n)+\".ppm\")\n", + " \n", + " # Faites pivoter les pièces de 20 degrés à chaque fois pour correspondre, répétez 18 fois\n", + " for i in range(18):\n", + " rotation=20*i\n", + " score_list,position=run_net_v3(net,img_test,frag_test,frag_size,60,0.7,using_cuda,rotation)\n", + " frag_position=frag_match(frag_test,img_test,position)\n", + " # Lorsque Ransac obtient le bon résultat, sortez de la boucle\n", + " if len(frag_position)==3:\n", + " rotation_base=i*20\n", + " break\n", + " # Enregistrez les fragments correctement localisés dans \"result[]\"\n", + " if len(frag_position)==3:\n", + " frag_position[2]=rotation_base-360-frag_position[2]\n", + " if frag_position[2]>0:\n", + " frag_position[2]=frag_position[2]-360\n", + " result.append([n,frag_position[0],frag_position[1],round(frag_position[2],3)])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[[0, 520.0, 575.0, -356.388],\n", + " [1, 535.0, 460.0, -113.454],\n", + " [2, 971.0, 270.0, -40.966],\n", + " [3, 1641.0, 650.0, -119.543],\n", + " [4, 1349.0, 68.0, -336.356],\n", + " [5, 1509.0, 192.0, -298.759],\n", + " [6, 107.0, 521.0, -74.179],\n", + " [7, 420.0, 440.0, -174.266],\n", + " [8, 287.0, 533.0, -299.677],\n", + " [9, 1518.0, 167.0, -290.164],\n", + " [10, 231.0, 429.0, -180.983],\n", + " [11, 666.0, 483.0, -230.948],\n", + " [12, 855.0, 104.0, -346.884],\n", + " [13, 1267.0, 87.0, -305.562],\n", + " [14, 16.0, 705.0, -30.087],\n", + " [15, 924.0, 120.0, -146.41],\n", + " [16, 657.0, 372.0, -175.323],\n", + " [17, 1409.0, 528.0, -329.829],\n", + " [18, 618.0, 427.0, -350.062],\n", + " [19, 631.0, 269.0, -87.332],\n", + " [20, 1345.0, 579.0, -320.597],\n", + " [21, 1670.0, 139.0, -282.108],\n", + " [22, 1310.0, 4.0, -180.0],\n", + " [23, 1418.0, 29.0, -112.925],\n", + " [24, 874.0, 496.0, -312.046],\n", + " [25, 812.0, 537.0, -4.393],\n", + " [26, 47.0, 728.0, -82.997],\n", + " [27, 1411.0, 200.0, -324.46],\n", + " [28, 767.0, 595.0, -339.734],\n", + " [29, 361.0, 434.0, -349.088],\n", + " [30, 1264.0, 732.0, -211.149],\n", + " [31, 958.0, 738.0, -356.008],\n", + " [32, 1307.0, 679.0, -145.032],\n", + " [33, 704.0, 553.0, -197.736],\n", + " [34, 867.0, 344.0, -355.599],\n", + " [35, 1702.0, 164.0, -315.301],\n", + " [36, 1483.0, 307.0, -330.954],\n", + " [37, 1365.0, 661.0, -158.589],\n", + " [38, 35.0, 623.0, -301.166],\n", + " [39, 968.0, 40.0, -355.307],\n", + " [40, 137.0, 650.0, -127.38],\n", + " [41, 1527.0, 239.0, -113.919],\n", + " [42, 1176.0, 736.0, -218.247],\n", + " [43, 466.0, 676.0, -139.007],\n", + " [44, 297.0, 659.0, -22.509],\n", + " [45, 1075.0, 363.0, -1.866],\n", + " [46, 973.0, 658.0, -118.442],\n", + " [47, 658.0, 589.0, -134.967],\n", + " [48, 1438.0, 245.0, -63.213],\n", + " [49, 1019.0, 381.0, -4.052],\n", + " [50, 898.0, 586.0, -320.709],\n", + " [51, 738.0, 258.0, -298.33],\n", + " [52, 1668.0, 167.0, -257.834],\n", + " [53, 306.0, 201.0, -304.816],\n", + " [54, 129.0, 353.0, -123.722],\n", + " [55, 1612.0, 527.0, -201.46],\n", + " [56, 1406.0, 400.0, -132.928],\n", + " [57, 1223.0, 629.0, -243.388],\n", + " [58, 1603.0, 770.0, -223.688],\n", + " [59, 1451.0, 323.0, -4.008],\n", + " [60, 1262.0, -8.0, -143.496],\n", + " [61, 1409.0, 358.0, -244.745],\n", + " [62, 426.0, 567.0, -107.651],\n", + " [63, 1093.0, 536.0, -11.543],\n", + " [64, 1570.0, 763.0, -340.0],\n", + " [65, 599.0, 29.0, -352.066],\n", + " [66, 38.0, 522.0, -237.017],\n", + " [67, 1076.0, 29.0, -152.794],\n", + " [68, 1629.0, 79.0, -70.396],\n", + " [69, 1464.0, 311.0, -306.565],\n", + " [70, 1595.0, 260.0, -53.364],\n", + " [71, 1343.0, 273.0, -256.237],\n", + " [72, 1074.0, 236.0, -1.801],\n", + " [73, 132.0, 35.0, -160.03],\n", + " [74, 1627.0, 762.0, -235.274],\n", + " [75, 713.0, 361.0, -136.338],\n", + " [76, 1485.0, 85.0, -90.974],\n", + " [77, 1243.0, 153.0, -268.034],\n", + " [78, 1026.0, 511.0, -118.767],\n", + " [79, 240.0, 181.0, -51.205],\n", + " [80, 1085.0, 680.0, -53.483],\n", + " [81, 73.0, 212.0, -299.054],\n", + " [82, 1237.0, 389.0, -306.557],\n", + " [83, 1269.0, 87.0, -330.538],\n", + " [84, 29.0, 175.0, -298.41],\n", + " [85, 982.0, 97.0, -80.0],\n", + " [86, 1487.0, 276.0, -211.009],\n", + " [87, 1226.0, 114.0, -321.565],\n", + " [88, 60.0, 317.0, -127.895],\n", + " [89, 1351.0, 285.0, -322.595],\n", + " [90, 233.0, 33.0, -144.993],\n", + " [91, 807.0, 211.0, -1.747],\n", + " [92, 997.0, 558.0, -326.669],\n", + " [93, 1682.0, 283.0, -312.023],\n", + " [94, 1178.0, 691.0, -242.927],\n", + " [95, 1116.0, 487.0, -288.69],\n", + " [96, 952.0, 14.0, -275.225],\n", + " [97, 140.0, 765.0, -206.9],\n", + " [98, 772.0, 81.0, -112.039],\n", + " [99, 640.0, 682.0, -57.917],\n", + " [100, 121.0, 757.0, -231.821],\n", + " [101, 1484.0, 218.0, -15.255],\n", + " [102, 1304.0, 164.0, -273.435],\n", + " [103, 862.0, 192.0, -303.177],\n", + " [104, 258.0, 766.0, -257.769],\n", + " [105, 540.0, 714.0, -183.176],\n", + " [106, 1283.0, 264.0, -330.905],\n", + " [107, 507.0, 196.0, -333.629],\n", + " [108, 1428.0, 438.0, -209.806],\n", + " [109, 391.0, 739.0, -243.391],\n", + " [110, 250.0, 640.0, -287.596],\n", + " [111, 704.0, 102.0, -78.376],\n", + " [112, 1060.0, 227.0, -300.726],\n", + " [113, 1509.0, 302.0, -318.749],\n", + " [114, 895.0, 662.0, -199.664],\n", + " [115, 1453.0, 254.0, -260.0],\n", + " [116, 1058.0, 199.0, -218.275],\n", + " [117, 1416.0, 779.0, 0.0],\n", + " [118, 1054.0, 740.0, -197.173],\n", + " [119, 1352.0, 393.0, -309.992],\n", + " [120, 1273.0, 585.0, -334.957],\n", + " [121, 952.0, 566.0, -193.436],\n", + " [122, 1132.0, 456.0, -25.393],\n", + " [123, 598.0, 179.0, -308.525],\n", + " [124, 1270.0, 15.0, -260.0],\n", + " [125, 763.0, 462.0, -241.979],\n", + " [126, 1515.0, 307.0, -284.007],\n", + " [127, 1582.0, 608.0, -175.422],\n", + " [128, 388.0, 536.0, -166.952],\n", + " [129, 426.0, 667.0, -84.451],\n", + " [130, 1505.0, 18.0, -192.572],\n", + " [131, 976.0, 440.0, -260.194],\n", + " [132, 1467.0, 655.0, -345.072],\n", + " [133, 1085.0, 388.0, -323.091],\n", + " [134, 1189.0, 514.0, -99.219],\n", + " [135, 232.0, 751.0, -360.0],\n", + " [136, 1336.0, 91.0, -132.694],\n", + " [137, 765.0, 173.0, -54.653],\n", + " [138, 1451.0, 421.0, -291.464],\n", + " [139, 920.0, 370.0, -271.035],\n", + " [140, 314.0, 739.0, -302.917],\n", + " [141, 1225.0, 213.0, -281.565],\n", + " [143, 160.0, 716.0, -238.299],\n", + " [144, 503.0, 31.0, -348.257],\n", + " [145, 1159.0, 423.0, -94.766],\n", + " [146, 131.0, 716.0, -27.957],\n", + " [147, 20.0, 683.0, -230.611],\n", + " [148, 237.0, 756.0, -164.897],\n", + " [149, 1596.0, 19.0, -285.437],\n", + " [150, 238.0, 737.0, -17.033],\n", + " [151, 354.0, 151.0, -100.638],\n", + " [152, 887.0, 257.0, -318.435],\n", + " [153, 541.0, 142.0, -151.453],\n", + " [154, 1177.0, 82.0, -275.589],\n", + " [155, 1526.0, 594.0, -340.0],\n", + " [156, 1454.0, 307.0, -305.964],\n", + " [157, 327.0, 150.0, -161.526],\n", + " [158, 1288.0, 199.0, -314.289],\n", + " [159, 649.0, 179.0, -333.417],\n", + " [160, 1413.0, 255.0, -283.537],\n", + " [161, 1543.0, 61.0, -307.416],\n", + " [162, 1190.0, 144.0, -226.857],\n", + " [163, 587.0, 104.0, -151.213],\n", + " [164, 1320.0, 602.0, -315.852],\n", + " [165, 627.0, 104.0, -320.252],\n", + " [166, 1474.0, 413.0, -233.361],\n", + " [167, 699.0, 176.0, -178.052],\n", + " [168, 1024.0, 341.0, -259.2],\n", + " [169, 1701.0, 755.0, -304.039],\n", + " [170, 1684.0, 210.0, -244.828],\n", + " [171, 1330.0, 290.0, -21.864],\n", + " [172, 1053.0, 139.0, -324.51],\n", + " [173, 1643.0, 428.0, -331.469],\n", + " [174, 319.0, 542.0, -205.964],\n", + " [175, 1045.0, 91.0, -3.533],\n", + " [176, 828.0, 695.0, -295.602],\n", + " [177, 1631.0, 287.0, -132.225],\n", + " [178, 361.0, 677.0, -142.992],\n", + " [179, 1674.0, 350.0, -227.628],\n", + " [180, 1412.0, 764.0, -350.194],\n", + " [181, 845.0, 19.0, -27.206],\n", + " [183, 1239.0, 192.0, -317.673],\n", + " [184, 320.0, 39.0, -157.845],\n", + " [185, 1387.0, 749.0, -46.487],\n", + " [186, 166.0, 589.0, -211.473],\n", + " [187, 1339.0, 122.0, -258.435],\n", + " [188, 1216.0, 235.0, -163.194],\n", + " [189, 1677.0, 696.0, -275.899],\n", + " [190, 158.0, 762.0, -327.275],\n", + " [191, 1078.0, 254.0, -226.278],\n", + " [192, 468.0, 761.0, -280.0],\n", + " [193, 439.0, 44.0, -182.45],\n", + " [194, 161.0, 541.0, -354.612],\n", + " [195, 972.0, 502.0, -335.421],\n", + " [196, 427.0, 192.0, -300.872],\n", + " [197, 17.0, 358.0, -137.835],\n", + " [198, 210.0, 580.0, -7.095],\n", + " [199, 259.0, 601.0, -228.13],\n", + " [200, 583.0, 736.0, -154.988],\n", + " [201, 216.0, 751.0, -302.426],\n", + " [202, 826.0, 46.0, -162.036],\n", + " [203, 1592.0, 382.0, -344.745],\n", + " [204, 1533.0, 110.0, -332.592],\n", + " [205, 159.0, 761.0, -21.87],\n", + " [206, 1219.0, 473.0, -297.199],\n", + " [207, 1697.0, 711.0, -103.887],\n", + " [208, 1449.0, 289.0, -247.773],\n", + " [209, 1694.0, 749.0, -48.106],\n", + " [210, 842.0, 613.0, -235.221],\n", + " [211, 1046.0, 294.0, -134.05],\n", + " [212, 1696.0, 763.0, -258.435],\n", + " [213, 350.0, 83.0, -189.227],\n", + " [214, 1262.0, 482.0, -347.386],\n", + " [215, 310.0, 321.0, -211.399],\n", + " [216, 744.0, 22.0, -268.647],\n", + " [218, 1353.0, 732.0, -72.057],\n", + " [219, 1418.0, 770.0, -340.0],\n", + " [220, 496.0, 718.0, -20.0],\n", + " [221, 477.0, 149.0, -277.479],\n", + " [222, 1547.0, 87.0, -325.069],\n", + " [223, 1087.0, 345.0, -3.805],\n", + " [224, 259.0, 78.0, -19.36],\n", + " [225, 1530.0, 512.0, -276.062],\n", + " [226, 132.0, 730.0, -360.0],\n", + " [227, 944.0, 471.0, -170.0],\n", + " [228, 1245.0, 256.0, -155.672],\n", + " [229, 1615.0, 354.0, -81.565],\n", + " [230, 177.0, 72.0, -198.674],\n", + " [231, 1637.0, 470.0, -350.62],\n", + " [232, 540.0, 644.0, -4.911],\n", + " [233, 624.0, 625.0, -127.127],\n", + " [234, 420.0, 15.0, -187.058],\n", + " [235, 884.0, 751.0, -288.649],\n", + " [236, 267.0, 776.0, -339.146],\n", + " [237, 1129.0, 87.0, -10.008],\n", + " [238, 1700.0, 713.0, -240.764],\n", + " [239, 286.0, 283.0, -148.653],\n", + " [240, 679.0, 730.0, -336.762],\n", + " [241, 1129.0, 106.0, -326.244],\n", + " [242, 1292.0, 200.0, -203.69],\n", + " [243, 1693.0, 570.0, -140.0],\n", + " [244, 1529.0, 114.0, -341.565],\n", + " [245, 158.0, 759.0, -356.87],\n", + " [246, 1116.0, 563.0, -242.203],\n", + " [247, 283.0, 129.0, -309.725],\n", + " [248, 1597.0, 294.0, -312.875],\n", + " [249, 1709.0, 762.0, -273.435],\n", + " [250, 1377.0, 302.0, -314.179],\n", + " [251, 323.0, 364.0, -6.183],\n", + " [252, 1316.0, 485.0, -280.0],\n", + " [253, 962.0, 16.0, -280.0],\n", + " [254, 1198.0, 573.0, -11.792],\n", + " [255, 276.0, 384.0, -279.393],\n", + " [256, 1684.0, 734.0, -104.219],\n", + " [257, 1447.0, 333.0, -280.5],\n", + " [258, 999.0, 256.0, -227.883],\n", + " [259, 1014.0, 413.0, -325.59],\n", + " [260, 141.0, 724.0, -216.565],\n", + " [261, 1552.0, 545.0, -255.848],\n", + " [262, 292.0, 13.0, -191.87],\n", + " [263, 573.0, 681.0, -79.779],\n", + " [264, 1433.0, 740.0, -149.733],\n", + " [265, 138.0, 726.0, -311.027],\n", + " [266, 1617.0, 762.0, -327.995],\n", + " [267, 170.0, 141.0, -335.639],\n", + " [268, 1282.0, 441.0, -324.876],\n", + " [269, 322.0, 255.0, -143.393],\n", + " [270, 100.0, 762.0, -56.809],\n", + " [271, 1249.0, 533.0, -349.403],\n", + " [272, 1097.0, 584.0, -213.29],\n", + " [274, 1697.0, 712.0, -360.0],\n", + " [275, 399.0, 148.0, -176.849],\n", + " [276, 56.0, 452.0, -155.208],\n", + " [277, 166.0, 16.0, -336.461],\n", + " [278, 58.0, 406.0, -233.24],\n", + " [279, 1552.0, 79.0, -69.146],\n", + " [280, 746.0, 134.0, -259.441],\n", + " [281, 123.0, 106.0, -103.672],\n", + " [282, 160.0, 758.0, -64.289],\n", + " [283, 1454.0, 323.0, -100.71],\n", + " [284, 220.0, 98.0, -135.362],\n", + " [285, 1173.0, 234.0, -105.515],\n", + " [286, 1238.0, 576.0, -360.0],\n", + " [287, 383.0, 194.0, -259.057],\n", + " [288, 1693.0, 147.0, -322.49],\n", + " [289, 286.0, 351.0, -360.0],\n", + " [290, 648.0, 734.0, -27.933],\n", + " [292, 160.0, 492.0, -111.87],\n", + " [293, 1105.0, 621.0, -115.047],\n", + " [294, 1108.0, 241.0, -227.684],\n", + " [295, 1212.0, 278.0, -340.0],\n", + " [296, 844.0, 756.0, -338.456],\n", + " [298, 1693.0, 111.0, -278.435],\n", + " [299, 1530.0, 563.0, -335.376],\n", + " [301, 364.0, 316.0, -360.0],\n", + " [303, 1690.0, 15.0, -3.881],\n", + " [304, 1170.0, 179.0, -151.87],\n", + " [305, 1408.0, 331.0, -11.189],\n", + " [307, 1654.0, 12.0, -280.0],\n", + " [308, 1180.0, 207.0, -80.017],\n", + " [309, 86.0, 403.0, -185.0],\n", + " [313, 281.0, 317.0, -120.0]]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py37", + "language": "python", + "name": "py37" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Frag_Match_avec_rotation.py b/Frag_Match_avec_rotation.py new file mode 100644 index 0000000..289004b --- /dev/null +++ b/Frag_Match_avec_rotation.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +#Tous les codes sont basés sur l'environnement suivant +#python 3.7 +#opencv 3.1.0 +#pytorch 1.4.0 + +import torch +from torch.autograd import Variable +import torch.nn as nn +import torch.nn.functional as F +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import random +import math +import pickle +import random +from PIL import Image +import sys + + +# In[2]: + + +# Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils + +# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage +# “Apprentissage_MSELoss_avec_GPU“ +# et les commentaires détaillés se trouvent dans le programme d'apprentissage + +def tensor_imshow(im_tensor,cannel): + b,c,h,w=im_tensor.shape + if c==1: + plt.imshow(im_tensor.squeeze().detach().numpy()) + else: + plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:]) + +def get_training_fragment(frag_size,im): + h,w,c=im.shape + n=random.randint(0,int(h/frag_size)-1) + m=random.randint(0,int(w/frag_size)-1) + + shape=frag_size/4 + vt_h=math.ceil((h+1)/shape) + vt_w=math.ceil((w+1)/shape) + vt=np.zeros([vt_h,vt_w]) + vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2) + vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2) + vt[vt_h_po,vt_w_po]=1 + vt = np.float32(vt) + vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w)) + + return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt + +def write_result_in_file(result,file_name): + n=0 + with open(file_name,'w') as file: + for i in range(len(result)): + while n=2 and m>=2: + self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1) + self.shift2.weight=kernel_shift_ini(n,m) + self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0) + self.add2.weight=kernel_add_ini(n,m) + + n=int(n/2) + m=int(m/2) + if n>=2 and m>=2: + self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1) + self.shift3.weight=kernel_shift_ini(n,m) + self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0) + self.add3.weight=kernel_add_ini(n,m) + + + def get_descripteur(self,img,using_cuda): + descripteur_img=self.Relu(self.conv1(img)) + b,c,h,w=descripteur_img.shape + couche_constante=0.5*torch.ones([1,1,h,w]) + if using_cuda: + couche_constante=couche_constante.cuda() + descripteur_img=torch.cat((descripteur_img,couche_constante),1) + descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1) + return descripteur_img_norm + + def forward(self,img,frag,using_cuda): + psize=4 + + descripteur_input1=self.get_descripteur(img,using_cuda) + descripteur_input2=self.get_descripteur(frag,using_cuda) + + b,c,h,w=frag.shape + n=int(h/psize) + m=int(w/psize) + + for i in range(n): + for j in range(m): + if i==0 and j==0: + map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2) + else: + a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2) + map_corre=torch.cat((map_corre,a),1) + #shift + map_corre=self.maxpooling(map_corre) + map_corre=self.shift1(map_corre) + map_corre=self.add1(map_corre) + + + n=int(n/2) + m=int(m/2) + if n>=2 and m>=2: + map_corre=self.maxpooling(map_corre) + map_corre=self.shift2(map_corre) + map_corre=self.add2(map_corre) + + + n=int(n/2) + m=int(m/2) + if n>=2 and m>=2: + map_corre=self.maxpooling(map_corre) + map_corre=self.shift3(map_corre) + map_corre=self.add3(map_corre) + + + b,c,h,w=map_corre.shape + map_corre=map_corre/(map_corre.max()) + #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w) + return map_corre + + +# In[4]: + + +# Les fonctions de ce bloc sont utilisées pour appliquer le réseau à des fragments (pas à des patchs carrés) + + +# Cette fonction permet de sélectionner un ensemble de patchs carrés à partir d'un fragment +# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16) +# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch +# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment +def get_patch_list(frag,frag_size,limite,seuillage): + n=0 + m=0 + h,w,c=frag.shape + patch_list=[] + position_list=[] + for i in range(4): + if len(patch_list)>limite and limite!=0: + break + for j in range(4): + if len(patch_list)>limite and limite!=0: + break + n_offset=i*4 # n offset + m_offset=j*4 # m offset + n=0 + while n+frag_size+n_offset0: + rot_frag=math.atan(tan_rot)*(180/pi) + else: + rot_frag=math.atan(tan_rot)*(180/pi)+180 + rot_frag=-rot_frag + if rot_frag>0: + rot_frag-=360 + return centre[0][0],centre[1][0],rot_frag + +# Vérifiez les résultats de Ransac en avec des changements de distance euclidienne +def test_frag(inline,frag,fres): + itera=10 + frag_inline=[] + fres_inline=[] + # Metter les coordonnées du point inline dans "frag_inline[]",et "fres_inline[]" + for i in range(np.size(inline,0)): + if inline[i]==1: + frag_inline.append([frag[i][0],frag[i][1]]) + fres_inline.append([fres[i][0],fres[i][1]]) + p=[] + + # Faites une boucle dix fois, + # sélectionnez à chaque fois deux paires correspondantes inline + # calculer le changement de leur distance euclidienne + for i in range(itera): + point_test=selectionner_points(2,np.size(frag_inline,0)) + diff_x_frag=frag_inline[point_test[1]][0]-frag_inline[point_test[0]][0] + diff_y_frag=frag_inline[point_test[1]][1]-frag_inline[point_test[0]][1] + diff_frag=sqrt(pow(diff_x_frag,2)+pow(diff_y_frag,2)) + + diff_x_fres=fres_inline[point_test[1]][0]-fres_inline[point_test[0]][0] + diff_y_fres=fres_inline[point_test[1]][1]-fres_inline[point_test[0]][1] + diff_fres=sqrt(pow(diff_x_fres,2)+pow(diff_y_fres,2)) + if diff_frag !=0: + fsf=diff_fres/diff_frag + p.append([fsf]) + result=np.mean(p) + return result + +def frag_match(frag,img,position): + + frag_size=frag.shape + centre_frag=creer_point(frag_size[0]/2,frag_size[1]/2) + + retained_matches = [] + frag=[] + fres=[] + + for i in range(len(position)): + frag.append([float(position[i][0]),float(position[i][1])]) + fres.append([float(position[i][2]),float(position[i][3])]) + + if np.size(frag)>0: + # Calculer la matrice de transformation affine à l'aide de la méthode Ransac + h,inline=cv2.estimateAffinePartial2D(np.array(frag),np.array(fres)) + # Si “h” n'est pas sous la forme de matrice 2 * 3, la matrice de transformation affine n'est pas trouvée + if np.size(h)!=6: + return ([-1]) + else: + x,y,rot=position_rotation(h,centre_frag) + pourcenttage=sum(inline)/np.size(frag,0) + # Le nombre de points inline doit être supérieur à un certain nombre + if sum(inline)>3: + p=test_frag(inline,frag,fres) + # La distance euclidienne entre les points correspondants ne doit pas trop changer, + # sinon cela prouve que le résultat de Ransac est incorrect + # ici,le changement de la distance euclidienne sont entre 0.7 et 1.3 + if abs(p-1)<0.3: + # Ce n'est qu'alors que Ransac renvoie le résultat correct + return([round(y),round(x),round(rot,3)]) + else: + return ([-2]) + else: + return ([-3]) + else: + return ([-4]) + + +# In[14]: + + +if __name__=="__main__": + + frag_size=16 + using_cuda=True + net=load_net("./net_trainned6000") + img_test=cv2.imread("./fresque0.ppm") + + result=[] + for n in range(315): + if n<10: + frag_test=cv2.imread("./frag_eroded0/frag_eroded_000"+str(n)+".ppm") + elif n<100: + frag_test=cv2.imread("./frag_eroded0/frag_eroded_00"+str(n)+".ppm") + else: + frag_test=cv2.imread("./frag_eroded0/frag_eroded_0"+str(n)+".ppm") + + # Faites pivoter les pièces de 20 degrés à chaque fois pour correspondre, répétez 18 fois + for i in range(18): + rotation=20*i + score_list,position=run_net_v3(net,img_test,frag_test,frag_size,60,0.7,using_cuda,rotation) + frag_position=frag_match(frag_test,img_test,position) + # Lorsque Ransac obtient le bon résultat, sortez de la boucle + if len(frag_position)==3: + rotation_base=i*20 + break + # Enregistrez les fragments correctement localisés dans "result[]" + if len(frag_position)==3: + frag_position[2]=rotation_base-360-frag_position[2] + if frag_position[2]>0: + frag_position[2]=frag_position[2]-360 + result.append([n,frag_position[0],frag_position[1],round(frag_position[2],3)]) + + +# In[15]: + + +result + diff --git a/readme.txt b/readme.txt new file mode 100644 index 0000000..7a0d059 --- /dev/null +++ b/readme.txt @@ -0,0 +1,25 @@ +Notre code comprend deux parties. + +1.L'apprentissage de réseaux neuronaux +"Apprentissage_MSELoss_avec_GPU.ipynb" + +2.La fonctionnement de réseaux neuronaux +"Frag_Match_avec_rotation.ipynb" +*************************************************************************** +Ce programme.ipynb peut être exécuté avec jupyter-notebook. + +1.Ctrl+Alt+T Ouvrir le terminal +2.Input "jupyter-notebook" peut ouvrir une interface +3.Cliquez sur le programme pour l'ouvrir. + +Il est recommandé d 'utiliser le programme jupyter-Notebook, qui non seulement fonctionne par blocs, mais aussi est configuré pour tous les environnements. +*************************************************************************** +Si vous voulez utiliser le programme.py +"Apprentissage_MSELoss_avec_GPU.py" +et +"Frag_Match_avec_rotation.py" + +1.Ctrl+Alt+T Ouvrir le terminal,Vous pouvez voir un "(base)" devant la ligne de commande. +2.Input instruction “conda activate py37” ,alors (base) va devenir (py37). +3.J'ai configuré l' environnement de py37 pour qu 'il fonctionne directement dans cet environnement. +