Final Commit

This commit is contained in:
Arthur 2021-03-12 12:12:26 +01:00
parent ba98a7fd4b
commit 200a55eed2
16 changed files with 4837 additions and 291902 deletions

6
.gitignore vendored
View file

@ -1,2 +1,8 @@
# exclude images # exclude images
*.ppm *.ppm
.ipynb_checkpoints/
#exclude data:
training*/
results_bench/
trained_net/

File diff suppressed because one or more lines are too long

View file

@ -1,424 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Tous les codes sont basés sur l'environnement suivant
#python 3.7
#opencv 3.1.0
#pytorch 1.4.0
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
import numpy as np
import random
import math
import pickle
import random
from PIL import Image
import sys
# In[3]:
#Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils
def tensor_imshow(im_tensor,cannel):
b,c,h,w=im_tensor.shape
if c==1:
plt.imshow(im_tensor.squeeze().detach().numpy())
else:
plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])
# Obtenez des données d'entraînement
# frag,vt=get_training_fragment(frag_size,image)
# frag est un patch carrée de taille (frag_size*frag_size) a partir du image(Son emplacement est aléatoire)
# vt est la vérité terrain de la forme Dirac.
def get_training_fragment(frag_size,im):
h,w,c=im.shape
n=random.randint(0,int(h/frag_size)-1)
m=random.randint(0,int(w/frag_size)-1)
shape=frag_size/4
vt_h=math.ceil((h+1)/shape)
vt_w=math.ceil((w+1)/shape)
vt=np.zeros([vt_h,vt_w])
vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)
vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)
vt[vt_h_po,vt_w_po]=1
vt = np.float32(vt)
vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))
return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt
# Cette fonction convertit l'image en variable de type Tensor.
# Toutes les données de calcul du réseau sont de type Tensor
# Img.shape=[Height,Width,Channel]
# Tensor.shape=[Batch,Channel,Height,Width]
def img2tensor(im):
im=np.array(im,dtype="float32")
tensor_cv = torch.from_numpy(np.transpose(im, (2, 0, 1)))
im_tensor=tensor_cv.unsqueeze(0)
return im_tensor
# Trouvez les coordonnées de la valeur maximale dans une carte de corrélation
# x,y=show_coordonnee(carte de corrélation)
def show_coordonnee(position_pred):
map_corre=position_pred.squeeze().detach().numpy()
h,w=map_corre.shape
max_value=map_corre.max()
coordonnee=np.where(map_corre==max_value)
return coordonnee[0].mean()/h,coordonnee[1].mean()/w
# Filtrer les patchs en fonction du nombre de pixels noirs dans le patch
# Si seuls les pixels non noirs sont plus grands qu'une certaine proportion(seuillage), revenez à True, sinon False
def test_fragment32_32(frag,seuillage):
a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]
mask = (a == 0)
arr_new = a[mask]
if arr_new.size/a.size<=(1-seuillage):
return True
else:
return False
# Ces deux fonctions permettent de sauvegarder le réseau dans un fichier
# ou de load le réseau stocké à partir d'un fichier
def save_net(file_path,net):
pkl_file = open(file_path, 'wb')
pickle.dump(net,pkl_file)
pkl_file.close()
def load_net(file_path):
pkl_file = open(file_path, 'rb')
net= pickle.load(pkl_file)
pkl_file.close()
return net
# In[4]:
# Les fonctions de ce bloc sont utilisées pour construire le réseau
# Créer un poids de type DeepMatch comme valeur initiale de Conv1 (non obligatoire)
def ini():
kernel=torch.zeros([8,3,3,3])
array_0=np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype='float32')
array_1=np.array([[2,1,0],[1,0,-1],[0,-1,-2]],dtype='float32')
array_2=np.array([[1,0,-1],[2,0,-2],[1,0,-1]],dtype='float32')
array_3=np.array([[0,-1,-2],[1,0,-1],[2,1,0]],dtype='float32')
array_4=np.array([[-1,-2,-1],[0,0,0],[1,2,1]],dtype='float32')
array_5=np.array([[-2,-1,0],[-1,0,1],[0,1,2]],dtype='float32')
array_6=np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype='float32')
array_7=np.array([[0,1,2],[-1,0,1],[-2,-1,0]],dtype='float32')
for i in range(3):
kernel[0,i,:]=torch.from_numpy(array_0)
kernel[1,i,:]=torch.from_numpy(array_1)
kernel[2,i,:]=torch.from_numpy(array_2)
kernel[3,i,:]=torch.from_numpy(array_3)
kernel[4,i,:]=torch.from_numpy(array_4)
kernel[5,i,:]=torch.from_numpy(array_5)
kernel[6,i,:]=torch.from_numpy(array_6)
kernel[7,i,:]=torch.from_numpy(array_7)
return torch.nn.Parameter(kernel,requires_grad=True)
# Calculer le poids initial de la couche convolutive add
# n, m signifie qu'il y a n * m sous-patches dans le patch d'entrée
# Par exemple, le patch d'entrée est 16 * 16, pour les patchs 4 * 4 de la première couche, n = 4, m = 4
# pour les patchs 8 * 8 de la deuxième couche, n = 2, m = 2
def kernel_add_ini(n,m):
input_canal=int(n*m)
output_canal=int(n/2)*int(m/2)
for i in range(int(n/2)):
for j in range(int(m/2)):
kernel_add=np.zeros([1,input_canal],dtype='float32')
kernel_add[0,i*2*m+j*2]=1
kernel_add[0,i*2*m+j*2+1]=1
kernel_add[0,(i*2+1)*m+j*2]=1
kernel_add[0,(i*2+1)*m+j*2+1]=1
if i==0 and j==0:
add=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))
else:
add_=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))
add=torch.cat((add,add_),0)
return torch.nn.Parameter(add,requires_grad=False)
# Calculer le poids initial de la couche convolutive shift
# shift+add Peut réaliser l'étape de l'agrégation
# Voir ci-dessus pour les paramètres n et m.
# Pour des étapes plus détaillées, veuillez consulter mon rapport de stage
def kernel_shift_ini(n,m):
input_canal=int(n*m)
output_canal=int(n*m)
kernel_shift=torch.zeros([output_canal,input_canal,3,3])
array_0=np.array([[1,0,0],[0,0,0],[0,0,0]],dtype='float32')
array_1=np.array([[0,0,1],[0,0,0],[0,0,0]],dtype='float32')
array_2=np.array([[0,0,0],[0,0,0],[1,0,0]],dtype='float32')
array_3=np.array([[0,0,0],[0,0,0],[0,0,1]],dtype='float32')
kernel_shift_0=torch.from_numpy(array_0)
kernel_shift_1=torch.from_numpy(array_1)
kernel_shift_2=torch.from_numpy(array_2)
kernel_shift_3=torch.from_numpy(array_3)
for i in range(n):
for j in range(m):
if i==0 and j==0:
kernel_shift[0,0,:]=kernel_shift_0
else:
if i%2==0 and j%2==0:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_0
if i%2==0 and j%2==1:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_1
if i%2==1 and j%2==0:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_2
if i%2==1 and j%2==1:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_3
return torch.nn.Parameter(kernel_shift,requires_grad=False)
# Trouvez le petit patch(4 * 4) dans la n ème ligne et la m ème colonne du patch d'entrée
# Ceci est utilisé pour calculer la convolution et obtenir la carte de corrélation
def get_patch(fragment,psize,n,m):
return fragment[:,:,n*psize:(n+1)*psize,m*psize:(m+1)*psize]
###################################################################################################################
class Net(nn.Module):
def __init__(self,frag_size,psize):
super(Net, self).__init__()
h_fr=frag_size
w_fr=frag_size
n=int(h_fr/psize) # n*m patches dans le patch d'entrée
m=int(w_fr/psize)
self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)
# Si vous souhaitez initialiser Conv1 avec les poids de DeepMatch, exécutez la ligne suivante
# self.conv1.weight=ini()
self.Relu = nn.ReLU(inplace=True)
self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1)
self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)
self.shift1.weight=kernel_shift_ini(n,m)
self.add1 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)
self.add1.weight=kernel_add_ini(n,m)
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:# Si n=m=1Notre réseau n'a plus besoin de plus de couches pour agréger les cartes de corrélation
self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)
self.shift2.weight=kernel_shift_ini(n,m)
self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)
self.add2.weight=kernel_add_ini(n,m)
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:
self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)
self.shift3.weight=kernel_shift_ini(n,m)
self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)
self.add3.weight=kernel_add_ini(n,m)
def get_descripteur(self,img,using_cuda):
# Utilisez Conv1 pour calculer le descripteur,
descripteur_img=self.Relu(self.conv1(img))
b,c,h,w=descripteur_img.shape
couche_constante=0.5*torch.ones([1,1,h,w])
if using_cuda:
couche_constante=couche_constante.cuda()
# Ajouter une couche constante pour éviter la division par 0 lors de la normalisation
descripteur_img=torch.cat((descripteur_img,couche_constante),1)
# la normalisation
descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1)
return descripteur_img_norm
def forward(self,img,frag,using_cuda):
psize=4
# Utilisez Conv1 pour calculer le descripteur,
descripteur_input1=self.get_descripteur(img,using_cuda)
descripteur_input2=self.get_descripteur(frag,using_cuda)
b,c,h,w=frag.shape
n=int(h/psize)
m=int(w/psize)
#######################################
# Calculer la carte de corrélation par convolution pour les n*m patchs plus petit.
for i in range(n):
for j in range(m):
if i==0 and j==0:
map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)
else:
a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)
map_corre=torch.cat((map_corre,a),1)
########################################
# Étape de polymérisation
map_corre=self.maxpooling(map_corre)
map_corre=self.shift1(map_corre)
map_corre=self.add1(map_corre)
#########################################
# Répétez l'étape d'agrégation jusqu'à obtenir le graphique de corrélation du patch d'entrée
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:
map_corre=self.maxpooling(map_corre)
map_corre=self.shift2(map_corre)
map_corre=self.add2(map_corre)
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:
map_corre=self.maxpooling(map_corre)
map_corre=self.shift3(map_corre)
map_corre=self.add3(map_corre)
b,c,h,w=map_corre.shape
# Normalisation de la division par maximum
map_corre=map_corre/(map_corre.max())
# Normalisation SoftMax
#map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)
return map_corre
# In[5]:
def run_net(net,img,frag,frag_size,using_cuda):
h,w,c=frag.shape
n=int(h/frag_size)
m=int(w/frag_size)
frag_list=[]
#####################################
# Obtenez des patchs carrés des fragments et mettez-les dans la frag_list
for i in range(n):
for j in range(m):
frag_32=frag[i*frag_size:(i+1)*frag_size,j*frag_size:(j+1)*frag_size]
if test_fragment32_32(frag_32,0.6):
frag_list.append(frag_32)
img_tensor=img2tensor(img)
######################################
if using_cuda:
img_tensor=img_tensor.cuda()
coordonnee_list=[]
#######################################
# Utilisez le réseau pour calculer les positions de tous les patchs dans frag_list[]
# Mettez le résultat du calcul dans coordonnee_list[]
for i in range(len(frag_list)):
frag_tensor=img2tensor(frag_list[i])
if using_cuda:
frag_tensor=frag_tensor.cuda()
res=net.forward(img_tensor,frag_tensor,using_cuda)
if using_cuda:
res=res.cpu()
po_h,po_w=show_coordonnee(res)
coordonnee_list.append([po_h,po_w])
h_img,w_img,c=img.shape
position=[]
for i in range(len(coordonnee_list)):
x=int(round(h_img*coordonnee_list[i][0]))
y=int(round(w_img*coordonnee_list[i][1]))
position.append([x,y])
return position
# In[10]:
if __name__=='__main__':
# La taille du patch d'entrée est de 16*16
frag_size=16
# La taille du plus petit patch dans réseau est de 4 *4 fixée
psize=4
using_cuda=True
net=Net(frag_size,psize)
# Pour chaque fresque, le nombre d'itérations est de 1000
itera=1000
if using_cuda:
net=net.cuda()
# Choisissez l'optimiseur et la fonction de coût
optimizer = torch.optim.Adam(net.parameters())
loss_func = torch.nn.MSELoss()
# Dans le processus d'apprentissage du réseau,le changement d'erreur est placé dans loss_value=[]
# et le changement de Conv1 poids est placé dans para_value[]
loss_value=[]
para_value=[]
####################################################training_net
#Les données d'entraînement sont 6 fresques
for n in range(6):
im_path="./fresque"+str(n)+".ppm"
img_training=cv2.imread(im_path)
h,w,c=img_training.shape
# Si la peinture murale est trop grande, sous-échantillonnez-la et rétrécissez-la
while h*w>(1240*900):
img_training=cv2.resize(img_training,(int(h/2),int(w/2)),interpolation=cv2.INTER_CUBIC)
h,w,c=img_training.shape
im_tensor=img2tensor(img_training)
if using_cuda:
im_tensor=im_tensor.cuda()
for i in range(itera):
# Tous les 100 cycles, enregistrez le changement de poids
if i%100==0:
para=net.conv1.weight
para=para.detach().cpu()
para_value.append(para)
frag,vt=get_training_fragment(frag_size,img_training)
frag_tensor=img2tensor(frag)
if using_cuda:
vt=vt.cuda()
frag_tensor=frag_tensor.cuda()
# Utilisez des patchs et des fresques de données d'entraînement pour faire fonctionner le réseau
frag_pred=net.forward(im_tensor,frag_tensor,using_cuda)
b,c,h,w=vt.shape
# Utilisez la fonction de coût pour calculer l'erreur
err_=loss_func(vt,frag_pred)
# Utilisez l'optimiseur pour ajuster le poids de Conv1
optimizer.zero_grad()
err_.backward(retain_graph=True)
optimizer.step()
loss_value.append(err_.tolist())
del frag_tensor,frag_pred,err_,vt
torch.cuda.empty_cache()
# In[7]:
len(loss_value)
# In[11]:
plt.plot(loss_value)
# In[12]:
file_path="./net_trainned6000"
save_net(file_path,net)

View file

@ -0,0 +1,518 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Imports\n",
"Les packages nécéssaires doivent déja être installé sur la machine fournie mais si ce n'est pas le cas, il faut les installer.\n",
"Attention en installant pytorch, il faut choisir la bonne versio en fonction de la version de CUDa supportée par la carte graphique."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch.autograd import Variable\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import cv2\n",
"import matplotlib\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import random\n",
"import math\n",
"import pickle\n",
"import random\n",
"from PIL import Image\n",
"import sys\n",
"from glob import glob\n",
"from IPython.display import clear_output\n",
"from datetime import datetime\n",
"from time import time\n",
"import json\n",
"import os"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Functions utiles\n",
"Principalement des fonctions d'imports et d'exports, de transformation d'objects et degénération de données."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"#Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils\n",
"\n",
"\n",
"def tensor_imshow(im_tensor,cannel):\n",
" b,c,h,w=im_tensor.shape\n",
" if c==1:\n",
" plt.imshow(im_tensor.squeeze().detach().numpy())\n",
" else:\n",
" plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])\n",
"\n",
"# Obtenez des données d'entraînement\n",
"# frag,vt=get_training_fragment(frag_size,image)\n",
"# frag est un patch carrée de taille (frag_size*frag_size) a partir du image(Son emplacement est aléatoire)\n",
"# vt est la vérité terrain de la forme Dirac.\n",
"def get_training_fragment(frag_size,im):\n",
" \"\"\"Permet de générer des fragments et leur vérité terrain. \n",
" Dépréciée au profit d'un entrainement à partir de fragments déjà générés (voir load_training_fragment et le fichier gen_frags)\n",
" \"\"\"\n",
" h,w,c=im.shape\n",
" n=random.randint(0,int(h/frag_size)-1)\n",
" m=random.randint(0,int(w/frag_size)-1) \n",
" shape=frag_size/4\n",
" vt_h=math.ceil((h+1)/shape)\n",
" vt_w=math.ceil((w+1)/shape)\n",
" vt=np.zeros([vt_h,vt_w])\n",
" vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)\n",
" vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)\n",
" vt[vt_h_po,vt_w_po]=1\n",
" vt = np.float32(vt)\n",
" vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))\n",
" \n",
" return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt\n",
"\n",
"def load_training_fragment(fragment_path,vt_path):\n",
" \"\"\"Charge un fragment de la base de test et génère la tableau de la vérité terrain.\n",
" \"\"\"\n",
" # Load fragment\n",
" frag = cv2.imread(fragment_path)\n",
" \n",
" # Load vt data\n",
" with open(vt_path,'r') as f:\n",
" data_vt_raw = f.readlines()\n",
" data_vt = [int(d.rstrip('\\r\\n')) for d in data_vt_raw]\n",
" \n",
" #facteur de taille entre la fresque et la vt/carte de correlation.\n",
" # Si l'entrainement se fait sur des fragments 32x32, la carte de correlation\n",
" # finale sera de taille 126x126 (si initialement à 1000x1000). Le facteur de \n",
" # division est alors 8.\n",
" # Si par contre l'entrainement se fait sur des fragments de 16x16, il y aura\n",
" # une étape de division (maxpooling) en moins donc la taille de la carte finale\n",
" # sera 256x256 et la division sera donc 4\n",
" div = 4 \n",
" \n",
" # Construct vt\n",
" vt = np.zeros((int(data_vt[0]/div)+1,int(data_vt[1]/div)+1))\n",
" vt[int(round(data_vt[2]/div,0)),int(round(data_vt[3]/div,0))] = 1\n",
" vt = np.float32(vt)\n",
" vt = torch.from_numpy(vt.reshape(1,1,int(data_vt[0]/div)+1,int(data_vt[1]/div)+1))\n",
" \n",
" return(frag,vt)\n",
"\n",
"def img2tensor(im):\n",
" \"\"\"Conversion d'une image en tenseur pytorch.\n",
" Pour rappel: Tensor.shape=[Batch,Channel,Height,Width]\"\"\"\n",
" im=np.array(im,dtype=\"float32\")\n",
" tensor_cv = torch.from_numpy(np.transpose(im, (2, 0, 1)))\n",
" im_tensor=tensor_cv.unsqueeze(0)\n",
" return im_tensor\n",
"\n",
"\n",
"def show_coordonnee(position_pred):\n",
" \"\"\"Trouve les coordonnées du maximum dans la carte de correlation.\"\"\"\n",
" map_corre=position_pred.squeeze().detach().cpu().numpy()\n",
" h,w=map_corre.shape\n",
" max_value=map_corre.max()\n",
" coordonnee=np.where(map_corre==max_value)\n",
" return coordonnee[0].mean(),coordonnee[1].mean()\n",
"\n",
"def test_fragment32_32(frag,seuillage):\n",
" \"\"\"Vérifie que le fragment ne contient pas trop de pixels noir (fond).\"\"\"\n",
" a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]\n",
" mask = (a == 0)\n",
" arr_new = a[mask]\n",
" if arr_new.size/a.size<=(1-seuillage):\n",
" return True\n",
" else:\n",
" return False\n",
" \n",
"def save_net(file_path,net):\n",
" \"\"\"Sauvegarde le modèle à l'aide de Pickle (fichier binaire).\"\"\"\n",
" pkl_file = open(file_path, 'wb')\n",
" pickle.dump(net,pkl_file)\n",
" pkl_file.close()\n",
"def load_net(file_path):\n",
" \"\"\"Charge le modèle sauvegardé à l'aide de Pickle.\"\"\"\n",
" pkl_file = open(file_path, 'rb')\n",
" net= pickle.load(pkl_file)\n",
" pkl_file.close()\n",
" return net"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Paramètres du réseau\n",
"Ces fonctions permettent de décrire et d'initialiser le réseau et ses couches"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
" def ini():\n",
" \"\"\"Créer un poids de type DeepMatching comme valeur initiale de Conv1 (non obligatoire)\"\"\"\n",
" kernel=torch.zeros([8,3,3,3])\n",
" array_0=np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype='float32')\n",
" array_1=np.array([[2,1,0],[1,0,-1],[0,-1,-2]],dtype='float32')\n",
" array_2=np.array([[1,0,-1],[2,0,-2],[1,0,-1]],dtype='float32')\n",
" array_3=np.array([[0,-1,-2],[1,0,-1],[2,1,0]],dtype='float32')\n",
" array_4=np.array([[-1,-2,-1],[0,0,0],[1,2,1]],dtype='float32')\n",
" array_5=np.array([[-2,-1,0],[-1,0,1],[0,1,2]],dtype='float32')\n",
" array_6=np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype='float32')\n",
" array_7=np.array([[0,1,2],[-1,0,1],[-2,-1,0]],dtype='float32')\n",
" for i in range(3):\n",
" kernel[0,i,:]=torch.from_numpy(array_0)\n",
" kernel[1,i,:]=torch.from_numpy(array_1)\n",
" kernel[2,i,:]=torch.from_numpy(array_2)\n",
" kernel[3,i,:]=torch.from_numpy(array_3)\n",
" kernel[4,i,:]=torch.from_numpy(array_4)\n",
" kernel[5,i,:]=torch.from_numpy(array_5)\n",
" kernel[6,i,:]=torch.from_numpy(array_6)\n",
" kernel[7,i,:]=torch.from_numpy(array_7)\n",
" return torch.nn.Parameter(kernel,requires_grad=True) \n",
"\n",
"\n",
"def kernel_add_ini(n,m):\n",
" \"\"\"Calculer le poids initial de la couche convolutive add\n",
" n, m signifie qu'il y a n * m sous-patches dans le patch d'entrée\n",
" Par exemple, le patch d'entrée est 16 * 16, pour les patchs 4 * 4 de la première couche, n = 4, m = 4\n",
" pour les patchs 8 * 8 de la deuxième couche, n = 2, m = 2\"\"\"\n",
" input_canal=int(n*m)\n",
" output_canal=int(n/2)*int(m/2)\n",
" for i in range(int(n/2)):\n",
" for j in range(int(m/2)):\n",
" kernel_add=np.zeros([1,input_canal],dtype='float32')\n",
" kernel_add[0,i*2*m+j*2]=1\n",
" kernel_add[0,i*2*m+j*2+1]=1\n",
" kernel_add[0,(i*2+1)*m+j*2]=1\n",
" kernel_add[0,(i*2+1)*m+j*2+1]=1\n",
" if i==0 and j==0:\n",
" add=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n",
" else:\n",
" add_=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n",
" add=torch.cat((add,add_),0)\n",
" return torch.nn.Parameter(add,requires_grad=False) \n",
"\n",
"\n",
"def kernel_shift_ini(n,m):\n",
" \"\"\"Calculer le poids initial de la couche convolutive shift\n",
" shift+add Peut réaliser l'étape de l'agrégation\n",
" Voir ci-dessus pour les paramètres n et m. \n",
" Pour des étapes plus détaillées, veuillez consulter le rapport de stage de Boyang\"\"\"\n",
" input_canal=int(n*m)\n",
" output_canal=int(n*m)\n",
" \n",
" kernel_shift=torch.zeros([output_canal,input_canal,3,3])\n",
" \n",
" array_0=np.array([[1,0,0],[0,0,0],[0,0,0]],dtype='float32')\n",
" array_1=np.array([[0,0,1],[0,0,0],[0,0,0]],dtype='float32')\n",
" array_2=np.array([[0,0,0],[0,0,0],[1,0,0]],dtype='float32')\n",
" array_3=np.array([[0,0,0],[0,0,0],[0,0,1]],dtype='float32')\n",
" \n",
" kernel_shift_0=torch.from_numpy(array_0)\n",
" kernel_shift_1=torch.from_numpy(array_1)\n",
" kernel_shift_2=torch.from_numpy(array_2)\n",
" kernel_shift_3=torch.from_numpy(array_3)\n",
" \n",
" \n",
" for i in range(n):\n",
" for j in range(m):\n",
" if i==0 and j==0:\n",
" kernel_shift[0,0,:]=kernel_shift_0\n",
" else:\n",
" if i%2==0 and j%2==0:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_0\n",
" if i%2==0 and j%2==1:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_1\n",
" if i%2==1 and j%2==0:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_2\n",
" if i%2==1 and j%2==1:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_3\n",
" \n",
" return torch.nn.Parameter(kernel_shift,requires_grad=False) \n",
"\n",
"# Trouvez le petit patch(4 * 4) dans la n ème ligne et la m ème colonne du patch d'entrée\n",
"# Ceci est utilisé pour calculer la convolution et obtenir la carte de corrélation\n",
"def get_patch(fragment,psize,n,m):\n",
" return fragment[:,:,n*psize:(n+1)*psize,m*psize:(m+1)*psize]\n",
"###################################################################################################################\n",
"class Net(nn.Module):\n",
" def __init__(self,frag_size,psize):\n",
" super(Net, self).__init__()\n",
" \n",
" h_fr=frag_size\n",
" w_fr=frag_size\n",
" \n",
" n=int(h_fr/psize) # n*m patches dans le patch d'entrée\n",
" m=int(w_fr/psize)\n",
" \n",
" # Le nombre de couches du descripteur (self.conv1) peut être changé\n",
" self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)\n",
" # Si vous souhaitez initialiser Conv1 avec les poids de DeepMatch, exécutez la ligne suivante\n",
" #self.conv1.weight=ini()\n",
" self.Relu = nn.ReLU(inplace=True)\n",
" self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1)\n",
" \n",
" self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
" self.shift1.weight=kernel_shift_ini(n,m)\n",
" self.add1 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
" self.add1.weight=kernel_add_ini(n,m)\n",
" \n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:# Si n=m=1 Notre réseau n'a plus besoin de plus de couches pour agréger les cartes de corrélation\n",
" self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
" self.shift2.weight=kernel_shift_ini(n,m)\n",
" self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
" self.add2.weight=kernel_add_ini(n,m)\n",
" \n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:\n",
" self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
" self.shift3.weight=kernel_shift_ini(n,m)\n",
" self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
" self.add3.weight=kernel_add_ini(n,m)\n",
" \n",
" def get_descripteur(self,img,using_cuda):\n",
" # Utilisez Conv1 pour calculer le descripteur,\n",
" descripteur_img=self.Relu(self.conv1(img))\n",
" b,c,h,w=descripteur_img.shape\n",
" couche_constante=0.5*torch.ones([1,1,h,w])\n",
" if using_cuda:\n",
" couche_constante=couche_constante.cuda()\n",
" # Ajouter une couche constante pour éviter la division par 0 lors de la normalisation\n",
" descripteur_img=torch.cat((descripteur_img,couche_constante),1)\n",
" # la normalisation\n",
" descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1)\n",
" return descripteur_img_norm\n",
" \n",
" def forward(self,img,frag,using_cuda):\n",
" psize=4\n",
" # Utilisez Conv1 pour calculer le descripteur,\n",
" descripteur_input1=self.get_descripteur(img,using_cuda)\n",
" descripteur_input2=self.get_descripteur(frag,using_cuda)\n",
" \n",
" b,c,h,w=frag.shape\n",
" n=int(h/psize)\n",
" m=int(w/psize)\n",
" \n",
" #######################################\n",
" # Calculer la carte de corrélation par convolution pour les n*m patchs plus petit.\n",
" for i in range(n):\n",
" for j in range(m):\n",
" if i==0 and j==0:\n",
" map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n",
" else:\n",
" a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n",
" map_corre=torch.cat((map_corre,a),1)\n",
" ########################################\n",
" # Étape de polymérisation (agrégation)\n",
" map_corre=self.maxpooling(map_corre)\n",
" map_corre=self.shift1(map_corre)\n",
" map_corre=self.add1(map_corre)\n",
" \n",
" #########################################\n",
" # Répétez l'étape d'agrégation jusqu'à obtenir le graphique de corrélation du patch d'entrée\n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:\n",
" map_corre=self.maxpooling(map_corre)\n",
" map_corre=self.shift2(map_corre)\n",
" map_corre=self.add2(map_corre)\n",
"\n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:\n",
" map_corre=self.maxpooling(map_corre)\n",
" map_corre=self.shift3(map_corre)\n",
" map_corre=self.add3(map_corre)\n",
" \n",
" \n",
" \n",
" \n",
" b,c,h,w=map_corre.shape\n",
" # Normalisation de la division par maximum\n",
" map_corre=map_corre/(map_corre.max())\n",
" # Normalisation SoftMax\n",
" #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)\n",
" return map_corre"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 2, Fresque 0, fragment 117/1500 (7.8%)\n",
"Temps par fragment: 0.76\n"
]
}
],
"source": [
"if __name__=='__main__':\n",
" \n",
" \n",
" # Variable des données\n",
" base_dir = './training_data_shift_color_16/'\n",
" fresque_filename = base_dir+'fresque{}.ppm'\n",
" fresque_filename_wild = base_dir+'fresque*.ppm'\n",
" fragment_filename = base_dir+'fragments/fresque{}/frag_dev_{:05}.ppm'\n",
" fragments_filename_wild = base_dir+'fragments/fresque{}/frag_dev_*.ppm'\n",
" vt_filename = base_dir+'fragments/fresque{}/vt/frag_dev_{:05}_vt.txt'\n",
" save_dir = './trained_net/'\n",
" \n",
" # Pour continuer l'entrainement à partir d'une modèle deja entrainé, donner \n",
" # un chemin de fichier à récuperer. \n",
" # Sinon laisser None\n",
" net_filename = None\n",
" # Plus petite taille de patch (à priori laisser 4)\n",
" psize=4\n",
" \n",
" # Utiliser la carte graphique (attention à la version de pytorch)\n",
" # Pour verifier si la carte graphique est utilisable, executer:\n",
" # import torch\n",
" # torch.cuda.is_available()\n",
" # Dans un terminal python3\n",
" using_cuda=True\n",
" \n",
" # Chargement du premier fragment pour obtenir la taille d'entrainement\n",
" dummy_frag = cv2.imread(fragment_filename.format(0,0))\n",
" frag_size = dummy_frag.shape[0]\n",
" del dummy_frag\n",
" \n",
" # Si continuation d'entrainement, charger le modèle et spécifier les epochs.\n",
" # Attention, le modèle chargé doit être de la même architecture que le modèle\n",
" # décrit dans les fonctions précédentes.\n",
" if net_filename:\n",
" net = load_net(net_filename)\n",
" epochs = [4,10]\n",
" expe_id = int(net_filename.split(\"_\")[-1])\n",
" else:\n",
" net=Net(frag_size,psize)\n",
" expe_id = 120\n",
" epochs = [0,10]\n",
" \n",
" # Sauvegarder le modèle à la fin de l'apprentissage ou pas\n",
" save = True\n",
" \n",
" if using_cuda:\n",
" net=net.cuda()\n",
" \n",
" # Choisissez l'optimiseur et la fonction de coût\n",
" optimizer = torch.optim.Adam(net.parameters())\n",
" #loss_func = torch.nn.MSELoss()\n",
" loss_func = torch.nn.SmoothL1Loss(size_average=None, reduce=None, reduction='mean', beta=20.0)\n",
" \n",
" # Dans le processus d'apprentissage du réseau,le changement d'erreur est placé dans loss_value=[] \n",
" loss_value=[]\n",
" \n",
" # Detection des fresques\n",
" fresques_paths = glob(fresque_filename_wild) \n",
" N_fresque = len(fresques_paths)\n",
" \n",
" time_old = time()\n",
" \n",
" for epoch in range(epochs[0],epochs[1]):\n",
" # Iteration sur les fresques trouvées\n",
" for fresque_id,fresque_path in enumerate(fresques_paths):\n",
" # Charge la fresque\n",
" fresque=cv2.imread(fresque_path)\n",
" h,w,c=fresque.shape\n",
" fresque_tensor=img2tensor(fresque)\n",
"\n",
" # Si GPU, conversion de la fresque\n",
" if using_cuda:\n",
" fresque_tensor=fresque_tensor.cuda()\n",
"\n",
" # Recherche des fragments d'entrainement\n",
" fragments_paths = glob(fragments_filename_wild.format(fresque_id))\n",
" random.shuffle(fragments_paths)\n",
" fragments_paths = fragments_paths[:1500]\n",
" N_fragments = len(fragments_paths)\n",
" # Itérer sur les fragments trouvés\n",
" for fragment_id,fragment_path in enumerate(fragments_paths):\n",
" clear_output(wait=True)\n",
" print(\"Epoch {}, Fresque {}, fragment {}/{} ({:.3}%)\".format(epoch,fresque_id,fragment_id,N_fragments,(fragment_id/N_fragments)*100))\n",
" print(\"Temps par fragment: {:.3}\".format(time()-time_old))\n",
" time_old = time()\n",
"\n",
" # Chargement du fragment et de la vt\n",
" frag,vt=load_training_fragment(fragment_path,vt_filename.format(fresque_id,fragment_id))\n",
"\n",
" # si GPU, conversion des objects\n",
" frag_tensor=img2tensor(frag)\n",
" if using_cuda:\n",
" vt=vt.cuda()\n",
" frag_tensor=frag_tensor.cuda()\n",
"\n",
" frag_pred=net.forward(fresque_tensor,frag_tensor,using_cuda)\n",
" \n",
" # Utilisez la fonction de coût pour calculer l'erreur\n",
" err_=loss_func(vt,frag_pred)\n",
" optimizer.zero_grad()\n",
" err_.backward(retain_graph=True)\n",
" optimizer.step()\n",
" print(err_.tolist())\n",
"\n",
" loss_value.append(err_.tolist())\n",
"\n",
" del frag_tensor,frag_pred,err_,vt\n",
" torch.cuda.empty_cache()\n",
"\n",
" # Sauvegarder le réseau\n",
" if save:\n",
" net_filename = save_dir + \"net_trainned_SLLShift_E{}_{}_{:04}\".format(epoch,datetime.now().strftime(\"%m-%d_%H-%M\"),expe_id)\n",
" save_net(net_filename,net)\n",
" \n",
" with open(save_dir + \"loss_values_SLLShift_E{}_{}_{}\".format(epoch,datetime.now().strftime(\"%m-%d_%H-%M\"),expe_id),'w') as f:\n",
" f.write(json.dumps(loss_value))\n",
" \n",
" print(\"Net sauvegardés dans {}\".format(net_filename))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

File diff suppressed because one or more lines are too long

View file

@ -28,7 +28,8 @@
"from IPython.display import clear_output\n", "from IPython.display import clear_output\n",
"from datetime import datetime\n", "from datetime import datetime\n",
"import json\n", "import json\n",
"from time import time" "from time import time\n",
"from random import randint"
] ]
}, },
{ {
@ -97,6 +98,55 @@
" coordonnee=np.where(map_corre==max_value)\n", " coordonnee=np.where(map_corre==max_value)\n",
" return score,coordonnee[0].mean()/h,coordonnee[1].mean()/w\n", " return score,coordonnee[0].mean()/h,coordonnee[1].mean()/w\n",
"\n", "\n",
"#========================================================================================================================================================================\n",
"def get_initial_pos(carte,indices,max_cartes):\n",
" disp = False\n",
" carte = carte.squeeze().detach().cpu().numpy()\n",
" results = np.where(carte == np.amax(carte))\n",
" posf = [results[0][0],results[1][0]]\n",
" print(\"[BACKTRACK] Position finale: {}...\".format(posf))\n",
" if disp:\n",
" \n",
"\n",
" print(\"ÉTAPE 1:\")\n",
" print(\"Taille indices: {}\".format(indices[2].shape))\n",
" print(\"Taille max_carte: {}\".format(max_cartes[2].shape))\n",
" indices_layer_selected = max_cartes[2][posf[0],posf[1]]\n",
" index = indices[2][0,indices_layer_selected,posf[0],posf[1]]\n",
" posi = [index//251,index%251]\n",
" if disp:\n",
" print(\"Position étage 251: {}\".format(posi))\n",
"\n",
" print(\"ÉTAPE 2:\")\n",
" print(\"Taille indices: {}\".format(indices[1].shape))\n",
" print(\"Taille max_carte: {}\".format(max_cartes[1].shape))\n",
" indices_layer_selected = max_cartes[1][posi[0],posi[1]]\n",
" index = indices[1][0,indices_layer_selected,posi[0],posi[1]]\n",
" posi = [index//501,index%501]\n",
" if disp:\n",
" print(\"Position étage 501: {}\".format(posi))\n",
"\n",
" print(\"ÉTAPE 3:\")\n",
" print(\"Taille indices: {}\".format(indices[0].shape))\n",
" print(\"Taille max_carte: {}\".format(max_cartes[0].shape))\n",
" indices_layer_selected = max_cartes[0][posi[0],posi[1]]\n",
" index = indices[0][0,indices_layer_selected,posi[0],posi[1]]\n",
" posi = [index//1001,index%1001]\n",
" print(\"[BACKTRACK] Position initiale: {}\".format(posi))\n",
" \n",
" return(posi)\n",
" \n",
"\n",
"def max_carte(map_corre):\n",
" map_corre = map_corre.detach().cpu().numpy()\n",
" H,W = map_corre.shape[2],map_corre.shape[3]\n",
" carte = np.zeros((H,W),dtype=int)\n",
" #print(\"Génération de la carte ({})...\".format((H,W)))\n",
" for i in range(H):\n",
" for j in range(W):\n",
" carte[i,j] = np.where(map_corre[0,:,i,j] == np.amax(map_corre[0,:,i,j]))[0][0]\n",
" return(carte)\n",
"\n",
"def test_fragment32_32(frag,seuillage):\n", "def test_fragment32_32(frag,seuillage):\n",
" a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]\n", " a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]\n",
" mask = (a == 0)\n", " mask = (a == 0)\n",
@ -227,7 +277,8 @@
" self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)\n", " self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)\n",
" #self.conv1.weight=ini()\n", " #self.conv1.weight=ini()\n",
" self.Relu = nn.ReLU(inplace=True)\n", " self.Relu = nn.ReLU(inplace=True)\n",
" self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1)\n", " self.maxpooling = nn.MaxPool2d(3,stride=2, padding=1)\n",
" self.maxunpooling = nn.MaxUnpool2d(kernel_size=3,stride=1,padding=1)\n",
" \n", " \n",
" self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n", " self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
" self.shift1.weight=kernel_shift_ini(n,m)\n", " self.shift1.weight=kernel_shift_ini(n,m)\n",
@ -250,7 +301,6 @@
" self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n", " self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
" self.add3.weight=kernel_add_ini(n,m)\n", " self.add3.weight=kernel_add_ini(n,m)\n",
" \n", " \n",
" \n",
" def get_descripteur(self,img,using_cuda):\n", " def get_descripteur(self,img,using_cuda):\n",
" descripteur_img=self.Relu(self.conv1(img))\n", " descripteur_img=self.Relu(self.conv1(img))\n",
" b,c,h,w=descripteur_img.shape\n", " b,c,h,w=descripteur_img.shape\n",
@ -263,10 +313,14 @@
" \n", " \n",
" def forward(self,img,frag,using_cuda):\n", " def forward(self,img,frag,using_cuda):\n",
" psize=4\n", " psize=4\n",
" #print(\"[NET][FORWARD] Correlation commencée.\")\n",
" \n", " \n",
" descripteur_input1=self.get_descripteur(img,using_cuda)\n", " descripteur_input1=self.get_descripteur(img,using_cuda)\n",
" descripteur_input2=self.get_descripteur(frag,using_cuda)\n", " descripteur_input2=self.get_descripteur(frag,using_cuda)\n",
" \n", " \n",
" max_cartes = []\n",
" indices = []\n",
" \n",
" b,c,h,w=frag.shape\n", " b,c,h,w=frag.shape\n",
" n=int(h/psize)\n", " n=int(h/psize)\n",
" m=int(w/psize)\n", " m=int(w/psize)\n",
@ -279,30 +333,40 @@
" a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n", " a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n",
" map_corre=torch.cat((map_corre,a),1)\n", " map_corre=torch.cat((map_corre,a),1)\n",
" #shift\n", " #shift\n",
" map_corre=self.maxpooling(map_corre)\n", " map_corre= self.maxpooling(map_corre)\n",
" #indices.append(ri.detach().cpu().numpy())\n",
" #max_cartes.append(max_carte(map_corre))\n",
" map_corre=self.shift1(map_corre)\n", " map_corre=self.shift1(map_corre)\n",
" map_corre=self.add1(map_corre)\n", " map_corre=self.add1(map_corre)\n",
" #print(\"[NET][FORWARD] Agreg 1 finie.\")\n",
" \n", " \n",
" \n", " \n",
" n=int(n/2)\n", " n=int(n/2)\n",
" m=int(m/2)\n", " m=int(m/2)\n",
" if n>=2 and m>=2:\n", " if n>=2 and m>=2:\n",
" map_corre=self.maxpooling(map_corre)\n", " map_corre=self.maxpooling(map_corre)\n",
" #indices.append(ri.detach().cpu().numpy())\n",
" #max_cartes.append(max_carte(map_corre))\n",
" map_corre=self.shift2(map_corre)\n", " map_corre=self.shift2(map_corre)\n",
" map_corre=self.add2(map_corre)\n", " map_corre=self.add2(map_corre)\n",
" #print(\"[NET][FORWARD] Agreg 2 finie.\")\n",
" \n", " \n",
" \n", " \n",
" n=int(n/2)\n", " n=int(n/2)\n",
" m=int(m/2)\n", " m=int(m/2)\n",
" if n>=2 and m>=2:\n", " if n>=2 and m>=2:\n",
" map_corre=self.maxpooling(map_corre)\n", " map_corre=self.maxpooling(map_corre)\n",
" #indices.append(ri.detach().cpu().numpy())\n",
" #max_cartes.append(max_carte(map_corre))\n",
" map_corre=self.shift3(map_corre)\n", " map_corre=self.shift3(map_corre)\n",
" map_corre=self.add3(map_corre)\n", " map_corre=self.add3(map_corre)\n",
" #print(\"[NET][FORWARD] Agreg 3 finie.\")\n",
" \n", " \n",
" \n", " \n",
" b,c,h,w=map_corre.shape\n", " b,c,h,w=map_corre.shape\n",
" map_corre=map_corre/(map_corre.max())\n", " map_corre=map_corre/(map_corre.max())\n",
" #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)\n", " #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)\n",
" #print(\"[NET][FORWARD] Correlation finie.\")\n",
" return map_corre" " return map_corre"
] ]
}, },
@ -319,7 +383,7 @@
"# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16)\n", "# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16)\n",
"# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch\n", "# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch\n",
"# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment\n", "# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment\n",
"def get_patch_list(frag,frag_size,limite,seuillage):\n", "def get_patch_list_old(frag,frag_size,limite,seuillage):\n",
" n=0\n", " n=0\n",
" m=0\n", " m=0\n",
" h,w,c=frag.shape\n", " h,w,c=frag.shape\n",
@ -338,14 +402,28 @@
" m=0\n", " m=0\n",
" while m+frag_size+m_offset<w:\n", " while m+frag_size+m_offset<w:\n",
" patch=frag[n+n_offset:n+frag_size+n_offset,m+m_offset:m+frag_size+m_offset,:]\n", " patch=frag[n+n_offset:n+frag_size+n_offset,m+m_offset:m+frag_size+m_offset,:]\n",
" if test_fragment32_32(patch,seuillage):\n", " #if test_fragment32_32(patch,seuillage):\n",
" patch_list.append(patch)\n", " patch_list.append(patch)\n",
" position_list.append([int((n+frag_size/2)+n_offset),int((m+frag_size/2)+m_offset)])\n", " position_list.append([int((n+frag_size/2)+n_offset),int((m+frag_size/2)+m_offset)])\n",
" m=m+frag_size\n", " m=m+frag_size\n",
" n=n+frag_size\n", " n=n+frag_size\n",
" if d:\n",
" for i in range(len(position_list)):\n",
" print(\"{}\".format(position_list[i]))\n",
" return patch_list,position_list\n", " return patch_list,position_list\n",
"\n", "\n",
"# Entrez du fragment et de la fresque, exécutez le réseau\n", "def get_patch_list(frag,frag_size,limite,seuillage):\n",
" frag_taille = frag.shape\n",
" print(frag.shape)\n",
" patchs, positions = [], []\n",
" for i in range(limite):\n",
" h,w = randint(0,frag_taille[0]-frag_size), randint(0,frag_taille[1]-frag_size)\n",
" patchs.append(frag[h:h+frag_size,w:w+frag_size,:])\n",
" positions.append([int(h+frag_size/2),int(w+frag_size/2)])\n",
" return(patchs,positions)\n",
" \n",
"\n",
"# Entré du fragment et de la fresque, exécutez le réseau\n",
"def run_net_v3(net,img,frag,frag_size,limite,seuillage,using_cuda,rotation):\n", "def run_net_v3(net,img,frag,frag_size,limite,seuillage,using_cuda,rotation):\n",
" Img=Image.fromarray(frag)\n", " Img=Image.fromarray(frag)\n",
" frag=np.array(Img.rotate(rotation))\n", " frag=np.array(Img.rotate(rotation))\n",
@ -353,7 +431,9 @@
" \n", " \n",
" # la collection de patchs carrée dans le fragement \"sont frag_list[]\"\n", " # la collection de patchs carrée dans le fragement \"sont frag_list[]\"\n",
" # La position de leur centre dans la fragment sont \"position_frag[]\"\n", " # La position de leur centre dans la fragment sont \"position_frag[]\"\n",
" frag_list,position_frag=get_patch_list(frag,frag_size,limite,seuillage)\n", " patchs,positions_patchs=get_patch_list(frag,frag_size,limite,seuillage)\n",
" #patchs,positions_patchs = get_patch_list_old(frag,frag_size,limite,seuillage)\n",
" #print(\"Calcule des positions des {} patchs:\".format(len(patchs)))\n",
" if using_cuda:\n", " if using_cuda:\n",
" img_tensor=img_tensor.cuda()\n", " img_tensor=img_tensor.cuda()\n",
" \n", " \n",
@ -363,16 +443,22 @@
" # Pour chaque patch carré dans la collection, effectuez un calcul en réseau de leur position\n", " # Pour chaque patch carré dans la collection, effectuez un calcul en réseau de leur position\n",
" # Le résultat est placé en \"coordonnee_list[]\"\n", " # Le résultat est placé en \"coordonnee_list[]\"\n",
" # \"score_list[]\" pas utile dans notre programme\n", " # \"score_list[]\" pas utile dans notre programme\n",
" for i in range(len(frag_list)):\n", " print(\"Estimation de {} patchs\".format(len(patchs)))\n",
" frag_tensor=img2tensor(frag_list[i])\n", " for i in range(len(patchs)):\n",
" #print(\"[RUNNETv3] Patch {}/{}\".format(i,len(patchs)))\n",
" patch_tensor=img2tensor(patchs[i])\n",
" if using_cuda:\n", " if using_cuda:\n",
" frag_tensor=frag_tensor.cuda()\n", " patch_tensor=patch_tensor.cuda()\n",
" res=net.forward(img_tensor,frag_tensor,using_cuda)\n", " res=net.forward(img_tensor,patch_tensor,using_cuda)\n",
" \n",
" #pos_init = get_initial_pos(res,indices,max_cartes)\n",
" \n",
" if using_cuda:\n", " if using_cuda:\n",
" res=res.cpu()\n", " res=res.cpu()\n",
" \n",
" score,po_h,po_w=show_coordonnee(res)\n", " score,po_h,po_w=show_coordonnee(res)\n",
" coordonnee_list.append([po_h,po_w])\n", " coordonnee_list.append([po_h,po_w])\n",
" score_list.append(score)\n", " #score_list.append(score)\n",
" h_img,w_img,c=img.shape\n", " h_img,w_img,c=img.shape\n",
" position=[]\n", " position=[]\n",
" \n", " \n",
@ -380,10 +466,12 @@
" # [x,y,x',y']\n", " # [x,y,x',y']\n",
" # La position (x,y) dans le fragment correspond à la position (x',y') dans la fresque\n", " # La position (x,y) dans le fragment correspond à la position (x',y') dans la fresque\n",
" for i in range(len(coordonnee_list)):\n", " for i in range(len(coordonnee_list)):\n",
" x0=position_frag[i][0]\n", " x0=positions_patchs[i][0]\n",
" y0=position_frag[i][1]\n", " y0=positions_patchs[i][1]\n",
" x1=int(round(h_img*coordonnee_list[i][0]))\n", " x1=int(round(h_img*coordonnee_list[i][0]))\n",
" y1=int(round(w_img*coordonnee_list[i][1]))\n", " y1=int(round(w_img*coordonnee_list[i][1]))\n",
" #x1 = coordonnee_list[i][0]\n",
" #y1 = coordonnee_list[i][1]\n",
" position.append([x0,y0,x1,y1])\n", " position.append([x0,y0,x1,y1])\n",
" return score_list,position" " return score_list,position"
] ]
@ -510,15 +598,31 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"Fresque 5, fragment 255/256 (99.6%)\n", "[MAIN]Fresque 5, fragment 99/100 (99.0%)\n",
"Temps par fragment: 11.6. ETA = 11.6s\n", "[MAIN]Temps par fragment: 5.71. ETA = 5.71s\n",
"Sauvegardé dans ./results_bench/results_bench_f5_02-04_17-53_0003\n" "Fragment de taille (82, 39, 3)\n",
"(82, 39, 3)\n",
"Estimation de 30 patchs\n",
"Nombre de positions avant match: 30\n",
"[!Retour MATCH] code [-2]\n",
"Sauvegardé dans ./results_bench/results_SLLShift_8_f5_03-11_18-36_E3_0120\n"
] ]
} }
], ],
"source": [ "source": [
"if __name__==\"__main__\":\n", "if __name__==\"__main__\":\n",
" \n", " \n",
" d=False # Affichage des info ou non\n",
" \n",
" try:\n",
" del frag\n",
" del img\n",
" net.zero_grad()\n",
" torch.cuda.empty_cache()\n",
" print(\"Emptied\")\n",
" except:\n",
" pass\n",
" \n",
" # Variable du réseau\n", " # Variable du réseau\n",
" frag_size=16\n", " frag_size=16\n",
" using_cuda=True\n", " using_cuda=True\n",
@ -526,26 +630,32 @@
" #fresque_id = 2\n", " #fresque_id = 2\n",
"\n", "\n",
" # Variable des données\n", " # Variable des données\n",
" base_dir = './fragments_complets/'\n", " base_dir = './training_data_32/'\n",
" fresque_filename = base_dir+'fresque_small{}.ppm'\n", " fresque_filename = base_dir+'fresque_small{}.ppm'\n",
" fresque_filename_wild = base_dir+'fresque_small*.ppm'\n", " fresque_filename_wild = base_dir+'fresque_small*.ppm'\n",
" fragment_filename = base_dir+'fragments/fresque{}/frag_bench_{:05}.ppm'\n", " fragment_filename = base_dir+'fragments/fresque{}/frag_bench_{:05}.ppm'\n",
" fragments_filename_wild = base_dir+'fragments/fresque{}/frag_bench_*.ppm'\n", " fragments_filename_wild = base_dir+'fragments/fresque{}/frag_bench_*.ppm'\n",
" vt_filename = base_dir+'fragments/fresque{}/vt/frag_bench_{:05}_vt.txt'\n", " vt_filename = base_dir+'fragments/fresque{}/vt/frag_bench_{:05}_vt.txt'\n",
" net_filename = \"./trained_net/net_trainned_02-03_01-33_0002\"\n", " net_filename = \"./trained_net/net_trainned_SLLShift_E3_03-11_17-34_0120\"\n",
" \n", " \n",
" #expe_id = int(net_filename.split(\"_\")[-1]) # ID de l'expérience, à ajouter à tout les fichiers écrits pour identifier les résultats d'une même expérience.\n", " expe_id = int(net_filename.split(\"_\")[-1]) # ID de l'expérience, à ajouter à tout les fichiers écrits pour identifier les résultats d'une même expérience.\n",
" expe_id = 3\n", " #expe_id = 999\n",
" date = datetime.now().strftime(\"%m-%d_%H-%M\")\n", " date = datetime.now().strftime(\"%m-%d_%H-%M\")\n",
" results_filename = './results_bench/results_bench_f{}_{}_{:04}'.format(fresque_id,date,expe_id)\n", " results_filename = './results_bench/results_SLLShift_8_f{}_{}_E3_{:04}'.format(fresque_id,date,expe_id)\n",
" \n",
" #placements = {\"frag_filename\":[], \"positions\" : [], \"scores\":[], \"fresque_filename\": []}\n",
" #placements_filename = \"./results_bench/placements_{}_{}\".format(date,expe_id)\n",
"\n", "\n",
" # Chargement du réseau\n", " # Chargement du réseau\n",
" net=load_net(net_filename)\n", " net=load_net(net_filename)\n",
" if using_cuda:\n",
" net.cuda()\n",
"\n", "\n",
" # Charge la fresque en mémoire\n", " # Charge la fresque en mémoire\n",
" img=cv2.imread(fresque_filename.format(fresque_id))\n", " img=cv2.imread(fresque_filename.format(fresque_id))\n",
"\n", "\n",
" N_fragments = len(glob(fragments_filename_wild.format(fresque_id)))\n", " N_fragments = len(glob(fragments_filename_wild.format(fresque_id)))\n",
" N_fragments = 100\n",
" print(fragments_filename_wild.format(fresque_id))\n", " print(fragments_filename_wild.format(fresque_id))\n",
" print(N_fragments)\n", " print(N_fragments)\n",
"\n", "\n",
@ -554,46 +664,60 @@
" tailles = []\n", " tailles = []\n",
"\n", "\n",
" time_old = time()\n", " time_old = time()\n",
" # Parcour tout les fragments de bench de cette fresque\n", " # Parcourt tout les fragments de bench de cette fresque\n",
" for fragment_id in range(N_fragments):\n", " for fragment_id in range(N_fragments):\n",
" clear_output(wait=True)\n", " clear_output(wait=True)\n",
" print(\"Fresque {}, fragment {}/{} ({:.3}%)\".format(fresque_id,fragment_id,N_fragments,(fragment_id/N_fragments*100)))\n", " print(\"[MAIN]Fresque {}, fragment {}/{} ({:.3}%)\".format(fresque_id,fragment_id,N_fragments,(fragment_id/N_fragments*100)))\n",
" delta = time()-time_old\n", " delta = time()-time_old\n",
" print(\"Temps par fragment: {:.3}. ETA = {:.3}s\".format(delta,(N_fragments-fragment_id)*delta))\n", " print(\"[MAIN]Temps par fragment: {:.3}. ETA = {:.3}s\".format(delta,(N_fragments-fragment_id)*delta))\n",
" time_old = time()\n", " time_old = time()\n",
" frag = cv2.imread(fragment_filename.format(fresque_id,fragment_id))\n", " frag = cv2.imread(fragment_filename.format(fresque_id,fragment_id))\n",
" \n",
" print(\"Fragment de taille {}\".format(frag.shape))\n",
"\n", "\n",
" # Faites pivoter les pièces de 20 degrés à chaque fois pour correspondre, répétez 18 fois\n", " score_list,positions_patchs=run_net_v3(net,img,frag,frag_size,30,0.7,using_cuda,0)\n",
" for i in [0,17]:\n", " \n",
" rotation=20*i\n", " #placements[\"frag_filename\"].append(fragment_filename.format(fresque_id,fragment_id))\n",
" #rotation=0\n", " #placements[\"positions\"].append(positions_patchs)\n",
" #rotation_base=0\n", " #placements[\"scores\"].append(score_list)\n",
" score_list,positions_patchs=run_net_v3(net,img,frag,frag_size,60,0.7,using_cuda,rotation)\n", " #placements[\"fresque_filename\"].append(fresque_filename.format(fresque_id))\n",
" \n",
" #with open(placements_filename,'w') as f:\n",
" # f.write(json.dumps(placements))\n",
" \n",
" \n",
" \n",
" print(\"Nombre de positions avant match: {}\".format(len(positions_patchs)))\n",
" frag_position=frag_match(frag,img,positions_patchs)\n", " frag_position=frag_match(frag,img,positions_patchs)\n",
" # Lorsque Ransac obtient le bon résultat, sortez de la boucle\n", "\n",
" if len(frag_position)==3:\n",
" rotation_base=i*20\n",
" break\n",
" # Si Ransac trouve une solution, la variable renvoyé est une liste de deux positions et une rotation\n", " # Si Ransac trouve une solution, la variable renvoyé est une liste de deux positions et une rotation\n",
" if len(frag_position)==3:\n", " if len(frag_position)==3:\n",
" print(\"MATCH !\")\n",
"\n", "\n",
" # MATCHED\n", " # MATCHED\n",
" matched.append(1)\n", " matched.append(1)\n",
"\n", "\n",
" # POSITION\n", " # POSITION\n",
" frag_position[2]=rotation_base-360-frag_position[2]\n", " frag_position[2]=0\n",
" if frag_position[2]>0:\n", " if frag_position[2]>0:\n",
" frag_position[2]=frag_position[2]-360\n", " frag_position[2]=frag_position[2]-360\n",
" positions.append([frag_position[0],frag_position[1],round(frag_position[2],3)])\n", " positions.append([frag_position[0],frag_position[1],round(frag_position[2],3)])\n",
"\n", "\n",
" # VERITE TERRAIN\n", " # VERITE TERRAIN\n",
" with open(vt_filename.format(fresque_id,fragment_id), 'r') as f:\n", " with open(vt_filename.format(fresque_id,fragment_id), 'r') as f:\n",
" data_vt = f.read().splitlines()\n", " vt = f.read().splitlines()\n",
" verite_terrain.append([int(data_vt[2]),int(data_vt[3]),frag.shape[0],frag.shape[1]])\n", " vt = [int(i) for i in vt]\n",
" verite_terrain.append([vt[2],vt[3],frag.shape[0],frag.shape[1]])\n",
"\n", "\n",
" # DISTANCE\n", " # DISTANCE\n",
" distances.append(np.linalg.norm([float(data_vt[3])-float(frag_position[0]),float(data_vt[2])-float(frag_position[1])]))\n", " distances.append(np.linalg.norm([vt[2]-frag_position[0],vt[3]-float(frag_position[1])]))\n",
" \n",
" if d:\n",
" print(\"Position: {}\".format(positions[-1]))\n",
" print(\"Verite terrain: {}\".format(vt))\n",
" print(\"Distance: {}\\n\".format(distances[-1]))\n",
" else:\n", " else:\n",
" print(\"[!Retour MATCH] code {}\".format(frag_position))\n",
" matched.append(0)\n", " matched.append(0)\n",
" distances.append(-1)\n", " distances.append(-1)\n",
" positions.append([])\n", " positions.append([])\n",

View file

@ -489,7 +489,7 @@
"def frag_match(frag,img,position):\n", "def frag_match(frag,img,position):\n",
" \n", " \n",
" frag_size=frag.size\n", " frag_size=frag.size\n",
" centre_frag=creer_point(frag_size[0]/2,frag_size[1]/2)\n", " centre_frag=creer_point(frag_size[1]/2,frag_size[0]/2)\n",
" \n", " \n",
" retained_matches = []\n", " retained_matches = []\n",
" frag=[]\n", " frag=[]\n",
@ -527,15 +527,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 7,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"Fresque 1, fragment 97/100 (97.0%)\n", "Fresque 1, fragment 199/200 (99.5%)\n",
"Temps par fragment: 12.0. ETA = 35.9s\n" "Temps par fragment: 10.2. ETA = 10.2s\n",
"Sauvegardé dans ./results_bench/results_bench_f1_02-18_18-23_0118\n"
] ]
} }
], ],
@ -545,17 +546,17 @@
" # Variable du réseau\n", " # Variable du réseau\n",
" frag_size=16\n", " frag_size=16\n",
" using_cuda=True\n", " using_cuda=True\n",
" for fresque_id in range(6):\n", " for fresque_id in [1]:\n",
" #fresque_id = 2\n", " #fresque_id = 2\n",
"\n", "\n",
" # Variable des données\n", " # Variable des données\n",
" base_dir = './training_data_random_shift_color/'\n", " base_dir = './training_data_small/'\n",
" fresque_filename = base_dir+'fresque{}.ppm'\n", " fresque_filename = base_dir+'fresque_small{}.ppm'\n",
" fresque_filename_wild = base_dir+'fresque*.ppm'\n", " fresque_filename_wild = base_dir+'fresque_small*.ppm'\n",
" fragment_filename = base_dir+'fragments/fresque{}/frag_bench_{:05}.ppm'\n", " fragment_filename = base_dir+'fragments/fresque{}/frag_bench_{:05}.ppm'\n",
" fragments_filename_wild = base_dir+'fragments/fresque{}/frag_bench_*.ppm'\n", " fragments_filename_wild = base_dir+'fragments/fresque{}/frag_bench_*.ppm'\n",
" vt_filename = base_dir+'fragments/fresque{}/vt/frag_bench_{:05}_vt.txt'\n", " vt_filename = base_dir+'fragments/fresque{}/vt/frag_bench_{:05}_vt.txt'\n",
" net_filename = \"./trained_net/net_trainned_MB4_02-10_20-49_0003\"\n", " net_filename = \"./trained_net/net_trainned_with-BCE_02-18_08-20_0118\"\n",
" \n", " \n",
" expe_id = int(net_filename.split(\"_\")[-1]) # ID de l'expérience, à ajouter à tout les fichiers écrits pour identifier les résultats d'une même expérience.\n", " expe_id = int(net_filename.split(\"_\")[-1]) # ID de l'expérience, à ajouter à tout les fichiers écrits pour identifier les résultats d'une même expérience.\n",
" date = datetime.now().strftime(\"%m-%d_%H-%M\")\n", " date = datetime.now().strftime(\"%m-%d_%H-%M\")\n",
@ -569,8 +570,7 @@
" \n", " \n",
" #N_fragments = 20\n", " #N_fragments = 20\n",
" N_fragments = len(glob(fragments_filename_wild.format(fresque_id)))\n", " N_fragments = len(glob(fragments_filename_wild.format(fresque_id)))\n",
" N_fragments = 100\n", " print(fragments_filename_wild.format(fresque_id))\n",
" #print(fragments_filename_wild.format(fresque_id))\n",
" print(N_fragments)\n", " print(N_fragments)\n",
"\n", "\n",
" # Crée les tableau de résultats\n", " # Crée les tableau de résultats\n",
@ -613,6 +613,7 @@
" # VERITE TERRAIN\n", " # VERITE TERRAIN\n",
" with open(vt_filename.format(fresque_id,fragment_id), 'r') as f:\n", " with open(vt_filename.format(fresque_id,fragment_id), 'r') as f:\n",
" data_vt = f.read().splitlines()\n", " data_vt = f.read().splitlines()\n",
" print(data_vt)\n",
" verite_terrain.append([int(data_vt[2]),int(data_vt[3]),frag.size[0],frag.size[1]])\n", " verite_terrain.append([int(data_vt[2]),int(data_vt[3]),frag.size[0],frag.size[1]])\n",
"\n", "\n",
" # DISTANCE\n", " # DISTANCE\n",
@ -636,17 +637,9 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sauvegarder dans results_f0_02-08_23-15\n"
]
}
],
"source": [ "source": [
"date = datetime.now().strftime(\"%m-%d_%H-%M\")\n", "date = datetime.now().strftime(\"%m-%d_%H-%M\")\n",
"meta = {'date':date,'base_dir':base_dir,'fresque_id':fresque_id,'fresque_taille':img.size,'N_fragments': N_fragments}\n", "meta = {'date':date,'base_dir':base_dir,'fresque_id':fresque_id,'fresque_taille':img.size,'N_fragments': N_fragments}\n",
@ -660,33 +653,35 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": 8,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"[1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]\n" "[0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0]\n",
"0.39\n"
] ]
} }
], ],
"source": [ "source": [
"print(matched)" "print(matched)\n",
"print(np.average(matched))"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 13,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"80" "98"
] ]
}, },
"execution_count": 11, "execution_count": 13,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -697,6 +692,13 @@
"torch.cuda.empty_cache()\n", "torch.cuda.empty_cache()\n",
"gc.collect()" "gc.collect()"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {

View file

@ -1,871 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"#Tous les codes sont basés sur l'environnement suivant\n",
"#python 3.7\n",
"#opencv 3.1.0\n",
"#pytorch 1.4.0\n",
"\n",
"import torch\n",
"from torch.autograd import Variable\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import cv2\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import random\n",
"import math\n",
"import pickle\n",
"import random\n",
"from PIL import Image\n",
"import sys"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils\n",
"\n",
"# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage \n",
"# “Apprentissage_MSELoss_avec_GPU“\n",
"# et les commentaires détaillés se trouvent dans le programme d'apprentissage\n",
"\n",
"def tensor_imshow(im_tensor,cannel):\n",
" b,c,h,w=im_tensor.shape\n",
" if c==1:\n",
" plt.imshow(im_tensor.squeeze().detach().numpy())\n",
" else:\n",
" plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])\n",
" \n",
"def get_training_fragment(frag_size,im):\n",
" h,w,c=im.shape\n",
" n=random.randint(0,int(h/frag_size)-1)\n",
" m=random.randint(0,int(w/frag_size)-1)\n",
" \n",
" shape=frag_size/4\n",
" vt_h=math.ceil((h+1)/shape)\n",
" vt_w=math.ceil((w+1)/shape)\n",
" vt=np.zeros([vt_h,vt_w])\n",
" vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)\n",
" vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)\n",
" vt[vt_h_po,vt_w_po]=1\n",
" vt = np.float32(vt)\n",
" vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))\n",
" \n",
" return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt\n",
"\n",
"def write_result_in_file(result,file_name):\n",
" n=0\n",
" with open(file_name,'w') as file:\n",
" for i in range(len(result)):\n",
" while n<result[i][0]:\n",
" s=str(n)\n",
" n=n+1\n",
" s=s+\"\\n\"\n",
" file.write(s)\n",
" s=str(result[i][0])+\" \"+str(result[i][1])+\" \"+str(result[i][2])+\" \"+str(result[i][3])\n",
" s=s+\"\\n\"\n",
" n=n+1\n",
" file.write(s)\n",
" file.close()\n",
" \n",
" \n",
"def img2tensor(im):\n",
" im=np.array(im,dtype=\"float32\")\n",
" tensor_cv = torch.from_numpy(np.transpose(im, (2, 0, 1)))\n",
" im_tensor=tensor_cv.unsqueeze(0)\n",
" return im_tensor\n",
"\n",
"def show_coordonnee(position_pred):\n",
" map_corre=position_pred.squeeze().detach().numpy()\n",
" score=sum(sum(map_corre))\n",
" h,w=map_corre.shape\n",
" max_value=map_corre.max()\n",
" coordonnee=np.where(map_corre==max_value)\n",
" return score,coordonnee[0].mean()/h,coordonnee[1].mean()/w\n",
"\n",
"def test_fragment32_32(frag,seuillage):\n",
" a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]\n",
" mask = (a == 0)\n",
" arr_new = a[mask]\n",
" if arr_new.size/a.size<=(1-seuillage):\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
"def save_net(file_path,net):\n",
" pkl_file = open(file_path, 'wb')\n",
" pickle.dump(net,pkl_file)\n",
" pkl_file.close()\n",
" \n",
"def load_net(file_path): \n",
" pkl_file = open(file_path, 'rb')\n",
" net= pickle.load(pkl_file)\n",
" pkl_file.close()\n",
" return net"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Les fonctions de ce bloc sont utilisées pour construire le réseau\n",
"\n",
"# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage \n",
"# “Apprentissage_MSELoss_avec_GPU“\n",
"# et les commentaires détaillés se trouvent dans le programme d'apprentissage\n",
"\n",
"def ini():\n",
" kernel=torch.zeros([8,3,3,3])\n",
" array_0=np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype='float32')\n",
" array_1=np.array([[2,1,0],[1,0,-1],[0,-1,-2]],dtype='float32')\n",
" array_2=np.array([[1,0,-1],[2,0,-2],[1,0,-1]],dtype='float32')\n",
" array_3=np.array([[0,-1,-2],[1,0,-1],[2,1,0]],dtype='float32')\n",
" array_4=np.array([[-1,-2,-1],[0,0,0],[1,2,1]],dtype='float32')\n",
" array_5=np.array([[-2,-1,0],[-1,0,1],[0,1,2]],dtype='float32')\n",
" array_6=np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype='float32')\n",
" array_7=np.array([[0,1,2],[-1,0,1],[-2,-1,0]],dtype='float32')\n",
" for i in range(3):\n",
" kernel[0,i,:]=torch.from_numpy(array_0)\n",
" kernel[1,i,:]=torch.from_numpy(array_1)\n",
" kernel[2,i,:]=torch.from_numpy(array_2)\n",
" kernel[3,i,:]=torch.from_numpy(array_3)\n",
" kernel[4,i,:]=torch.from_numpy(array_4)\n",
" kernel[5,i,:]=torch.from_numpy(array_5)\n",
" kernel[6,i,:]=torch.from_numpy(array_6)\n",
" kernel[7,i,:]=torch.from_numpy(array_7)\n",
" return torch.nn.Parameter(kernel,requires_grad=True) \n",
"\n",
"def kernel_add_ini(n,m):\n",
" input_canal=int(n*m)\n",
" output_canal=int(n/2)*int(m/2)\n",
" for i in range(int(n/2)):\n",
" for j in range(int(m/2)):\n",
" kernel_add=np.zeros([1,input_canal],dtype='float32')\n",
" kernel_add[0,i*2*m+j*2]=1\n",
" kernel_add[0,i*2*m+j*2+1]=1\n",
" kernel_add[0,(i*2+1)*m+j*2]=1\n",
" kernel_add[0,(i*2+1)*m+j*2+1]=1\n",
" if i==0 and j==0:\n",
" add=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n",
" else:\n",
" add_=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))\n",
" add=torch.cat((add,add_),0)\n",
" return torch.nn.Parameter(add,requires_grad=False) \n",
"\n",
"def kernel_shift_ini(n,m):\n",
" input_canal=int(n*m)\n",
" output_canal=int(n*m)\n",
" \n",
" kernel_shift=torch.zeros([output_canal,input_canal,3,3])\n",
" \n",
" array_0=np.array([[1,0,0],[0,0,0],[0,0,0]],dtype='float32')\n",
" array_1=np.array([[0,0,1],[0,0,0],[0,0,0]],dtype='float32')\n",
" array_2=np.array([[0,0,0],[0,0,0],[1,0,0]],dtype='float32')\n",
" array_3=np.array([[0,0,0],[0,0,0],[0,0,1]],dtype='float32')\n",
" \n",
" kernel_shift_0=torch.from_numpy(array_0)\n",
" kernel_shift_1=torch.from_numpy(array_1)\n",
" kernel_shift_2=torch.from_numpy(array_2)\n",
" kernel_shift_3=torch.from_numpy(array_3)\n",
" \n",
" \n",
" for i in range(n):\n",
" for j in range(m):\n",
" if i==0 and j==0:\n",
" kernel_shift[0,0,:]=kernel_shift_0\n",
" else:\n",
" if i%2==0 and j%2==0:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_0\n",
" if i%2==0 and j%2==1:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_1\n",
" if i%2==1 and j%2==0:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_2\n",
" if i%2==1 and j%2==1:\n",
" kernel_shift[i*m+j,i*m+j,:]=kernel_shift_3\n",
" \n",
" return torch.nn.Parameter(kernel_shift,requires_grad=False) \n",
"\n",
"def get_patch(fragment,psize,n,m):\n",
" return fragment[:,:,n*psize:(n+1)*psize,m*psize:(m+1)*psize]\n",
"\n",
"class Net(nn.Module):\n",
" def __init__(self,frag_size,psize):\n",
" super(Net, self).__init__()\n",
" \n",
" h_fr=frag_size\n",
" w_fr=frag_size\n",
" \n",
" n=int(h_fr/psize) #n*m patches\n",
" m=int(w_fr/psize)\n",
" \n",
" self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)\n",
" #self.conv1.weight=ini()\n",
" self.Relu = nn.ReLU(inplace=True)\n",
" self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1)\n",
" \n",
" self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
" self.shift1.weight=kernel_shift_ini(n,m)\n",
" self.add1 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
" self.add1.weight=kernel_add_ini(n,m)\n",
" \n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:\n",
" self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
" self.shift2.weight=kernel_shift_ini(n,m)\n",
" self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
" self.add2.weight=kernel_add_ini(n,m)\n",
" \n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:\n",
" self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)\n",
" self.shift3.weight=kernel_shift_ini(n,m)\n",
" self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)\n",
" self.add3.weight=kernel_add_ini(n,m)\n",
" \n",
" \n",
" def get_descripteur(self,img,using_cuda):\n",
" descripteur_img=self.Relu(self.conv1(img))\n",
" b,c,h,w=descripteur_img.shape\n",
" couche_constante=0.5*torch.ones([1,1,h,w])\n",
" if using_cuda:\n",
" couche_constante=couche_constante.cuda()\n",
" descripteur_img=torch.cat((descripteur_img,couche_constante),1)\n",
" descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1)\n",
" return descripteur_img_norm\n",
" \n",
" def forward(self,img,frag,using_cuda):\n",
" psize=4\n",
" \n",
" descripteur_input1=self.get_descripteur(img,using_cuda)\n",
" descripteur_input2=self.get_descripteur(frag,using_cuda)\n",
" \n",
" b,c,h,w=frag.shape\n",
" n=int(h/psize)\n",
" m=int(w/psize)\n",
" \n",
" for i in range(n):\n",
" for j in range(m):\n",
" if i==0 and j==0:\n",
" map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n",
" else:\n",
" a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)\n",
" map_corre=torch.cat((map_corre,a),1)\n",
" #shift\n",
" map_corre=self.maxpooling(map_corre)\n",
" map_corre=self.shift1(map_corre)\n",
" map_corre=self.add1(map_corre)\n",
" \n",
" \n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:\n",
" map_corre=self.maxpooling(map_corre)\n",
" map_corre=self.shift2(map_corre)\n",
" map_corre=self.add2(map_corre)\n",
" \n",
" \n",
" n=int(n/2)\n",
" m=int(m/2)\n",
" if n>=2 and m>=2:\n",
" map_corre=self.maxpooling(map_corre)\n",
" map_corre=self.shift3(map_corre)\n",
" map_corre=self.add3(map_corre)\n",
" \n",
" \n",
" b,c,h,w=map_corre.shape\n",
" map_corre=map_corre/(map_corre.max())\n",
" #map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)\n",
" return map_corre"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Les fonctions de ce bloc sont utilisées pour appliquer le réseau à des fragments (pas à des patchs carrés)\n",
"\n",
"\n",
"# Cette fonction permet de sélectionner un ensemble de patchs carrés à partir d'un fragment\n",
"# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16)\n",
"# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch\n",
"# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment\n",
"def get_patch_list(frag,frag_size,limite,seuillage):\n",
" n=0\n",
" m=0\n",
" h,w,c=frag.shape\n",
" patch_list=[]\n",
" position_list=[]\n",
" for i in range(4):\n",
" if len(patch_list)>limite and limite!=0:\n",
" break\n",
" for j in range(4):\n",
" if len(patch_list)>limite and limite!=0:\n",
" break\n",
" n_offset=i*4 # n offset\n",
" m_offset=j*4 # m offset\n",
" n=0\n",
" while n+frag_size+n_offset<h:\n",
" m=0\n",
" while m+frag_size+m_offset<w:\n",
" patch=frag[n+n_offset:n+frag_size+n_offset,m+m_offset:m+frag_size+m_offset,:]\n",
" if test_fragment32_32(patch,seuillage):\n",
" patch_list.append(patch)\n",
" position_list.append([int((n+frag_size/2)+n_offset),int((m+frag_size/2)+m_offset)])\n",
" m=m+frag_size\n",
" n=n+frag_size\n",
" return patch_list,position_list\n",
"\n",
"# Entrez du fragment et de la fresque, exécutez le réseau\n",
"def run_net_v3(net,img,frag,frag_size,limite,seuillage,using_cuda,rotation):\n",
" Img=Image.fromarray(frag)\n",
" frag=np.array(Img.rotate(rotation))\n",
" img_tensor=img2tensor(img)\n",
" \n",
" # la collection de patchs carrée dans le fragement \"sont frag_list[]\"\n",
" # La position de leur centre dans la fragment sont \"position_frag[]\"\n",
" frag_list,position_frag=get_patch_list(frag,frag_size,limite,seuillage)\n",
" if using_cuda:\n",
" img_tensor=img_tensor.cuda()\n",
" \n",
" score_list=[]\n",
" coordonnee_list=[]\n",
" \n",
" # Pour chaque patch carré dans la collection, effectuez un calcul en réseau de leur position\n",
" # Le résultat est placé en \"coordonnee_list[]\"\n",
" # \"score_list[]\" pas utile dans notre programme\n",
" for i in range(len(frag_list)):\n",
" frag_tensor=img2tensor(frag_list[i])\n",
" if using_cuda:\n",
" frag_tensor=frag_tensor.cuda()\n",
" res=net.forward(img_tensor,frag_tensor,using_cuda)\n",
" if using_cuda:\n",
" res=res.cpu()\n",
" score,po_h,po_w=show_coordonnee(res)\n",
" coordonnee_list.append([po_h,po_w])\n",
" score_list.append(score)\n",
" h_img,w_img,c=img.shape\n",
" position=[]\n",
" \n",
" # Mettez les paires correspondante en \"position[]\"\n",
" # [x,y,x',y']\n",
" # La position (x,y) dans le fragment correspond à la position (x',y') dans la fresque\n",
" for i in range(len(coordonnee_list)):\n",
" x0=position_frag[i][0]\n",
" y0=position_frag[i][1]\n",
" x1=int(round(h_img*coordonnee_list[i][0]))\n",
" y1=int(round(w_img*coordonnee_list[i][1]))\n",
" position.append([x0,y0,x1,y1])\n",
" return score_list,position"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"# Cette partie du code consiste à implémenter l'algorithme RANSAC amélioré\n",
"\n",
"# Ecrire le point sous forme [x,y,1]T,\n",
"# Utilisé pour construire l'équation de la matrice de transformation\n",
"def creer_point(x,y):\n",
" p=np.zeros((3,1))\n",
" p[0][0]=x\n",
" p[1][0]=y\n",
" p[2][0]=1\n",
" return p\n",
"\n",
"# Sélectionnez aléatoirement n points sans duplication à partir de M points\n",
"def selectionner_points(n,M):\n",
" table=[]\n",
" for i in range(M):\n",
" table.append(i)\n",
" result=[]\n",
" for i in range(n):\n",
" index=random.randint(0,M-i-1)\n",
" result.append(table[index])\n",
" table[index]=table[M-1-i]\n",
" return result\n",
"\n",
"# Selon la matrice de transformation affine, calculer la position centrale transformée et l'angle de rotation\n",
"def position_rotation(h,centre_frag):\n",
" centre=h@centre_frag\n",
" cos_rot=(h[0][0]+h[1][1])/2\n",
" sin_rot=(h[1][0]-h[0][1])/2\n",
" tan_rot=sin_rot/(cos_rot+0.0000001)\n",
" if cos_rot>0:\n",
" rot_frag=math.atan(tan_rot)*(180/pi)\n",
" else:\n",
" rot_frag=math.atan(tan_rot)*(180/pi)+180\n",
" rot_frag=-rot_frag\n",
" if rot_frag>0:\n",
" rot_frag-=360\n",
" return centre[0][0],centre[1][0],rot_frag\n",
"\n",
"# Vérifiez les résultats de Ransac en avec des changements de distance euclidienne\n",
"def test_frag(inline,frag,fres):\n",
" itera=10\n",
" frag_inline=[]\n",
" fres_inline=[]\n",
" # Metter les coordonnées du point inline dans \"frag_inline[]\",et \"fres_inline[]\"\n",
" for i in range(np.size(inline,0)):\n",
" if inline[i]==1:\n",
" frag_inline.append([frag[i][0],frag[i][1]])\n",
" fres_inline.append([fres[i][0],fres[i][1]])\n",
" p=[]\n",
" \n",
" # Faites une boucle dix fois, \n",
" # sélectionnez à chaque fois deux paires correspondantes inline \n",
" # calculer le changement de leur distance euclidienne\n",
" for i in range(itera):\n",
" point_test=selectionner_points(2,np.size(frag_inline,0))\n",
" diff_x_frag=frag_inline[point_test[1]][0]-frag_inline[point_test[0]][0]\n",
" diff_y_frag=frag_inline[point_test[1]][1]-frag_inline[point_test[0]][1]\n",
" diff_frag=sqrt(pow(diff_x_frag,2)+pow(diff_y_frag,2))\n",
" \n",
" diff_x_fres=fres_inline[point_test[1]][0]-fres_inline[point_test[0]][0]\n",
" diff_y_fres=fres_inline[point_test[1]][1]-fres_inline[point_test[0]][1]\n",
" diff_fres=sqrt(pow(diff_x_fres,2)+pow(diff_y_fres,2))\n",
" if diff_frag !=0:\n",
" fsf=diff_fres/diff_frag\n",
" p.append([fsf])\n",
" result=np.mean(p)\n",
" return result\n",
"\n",
"def frag_match(frag,img,position):\n",
" \n",
" frag_size=frag.shape\n",
" centre_frag=creer_point(frag_size[0]/2,frag_size[1]/2)\n",
" \n",
" retained_matches = []\n",
" frag=[]\n",
" fres=[]\n",
" \n",
" for i in range(len(position)):\n",
" frag.append([float(position[i][0]),float(position[i][1])])\n",
" fres.append([float(position[i][2]),float(position[i][3])])\n",
" \n",
" if np.size(frag)>0:\n",
" # Calculer la matrice de transformation affine à l'aide de la méthode Ransac\n",
" h,inline=cv2.estimateAffinePartial2D(np.array(frag),np.array(fres))\n",
" # Si “h” n'est pas sous la forme de matrice 2 * 3, la matrice de transformation affine n'est pas trouvée\n",
" if np.size(h)!=6:\n",
" return ([-1])\n",
" else:\n",
" x,y,rot=position_rotation(h,centre_frag)\n",
" pourcenttage=sum(inline)/np.size(frag,0)\n",
" # Le nombre de points inline doit être supérieur à un certain nombre\n",
" if sum(inline)>3:\n",
" p=test_frag(inline,frag,fres)\n",
" # La distance euclidienne entre les points correspondants ne doit pas trop changer, \n",
" # sinon cela prouve que le résultat de Ransac est incorrect\n",
" # ici,le changement de la distance euclidienne sont entre 0.7 et 1.3\n",
" if abs(p-1)<0.3:\n",
" # Ce n'est qu'alors que Ransac renvoie le résultat correct\n",
" return([round(y),round(x),round(rot,3)])\n",
" else:\n",
" return ([-2])\n",
" else:\n",
" return ([-3])\n",
" else:\n",
" return ([-4]) "
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"if __name__==\"__main__\":\n",
" \n",
" frag_size=16\n",
" using_cuda=True\n",
" net=load_net(\"./net_trainned6000\")\n",
" img_test=cv2.imread(\"./fresque0.ppm\")\n",
" \n",
" result=[]\n",
" for n in range(315):\n",
" if n<10:\n",
" frag_test=cv2.imread(\"./frag_eroded0/frag_eroded_000\"+str(n)+\".ppm\")\n",
" elif n<100:\n",
" frag_test=cv2.imread(\"./frag_eroded0/frag_eroded_00\"+str(n)+\".ppm\")\n",
" else:\n",
" frag_test=cv2.imread(\"./frag_eroded0/frag_eroded_0\"+str(n)+\".ppm\")\n",
" \n",
" # Faites pivoter les pièces de 20 degrés à chaque fois pour correspondre, répétez 18 fois\n",
" for i in range(18):\n",
" rotation=20*i\n",
" score_list,position=run_net_v3(net,img_test,frag_test,frag_size,60,0.7,using_cuda,rotation)\n",
" frag_position=frag_match(frag_test,img_test,position)\n",
" # Lorsque Ransac obtient le bon résultat, sortez de la boucle\n",
" if len(frag_position)==3:\n",
" rotation_base=i*20\n",
" break\n",
" # Enregistrez les fragments correctement localisés dans \"result[]\"\n",
" if len(frag_position)==3:\n",
" frag_position[2]=rotation_base-360-frag_position[2]\n",
" if frag_position[2]>0:\n",
" frag_position[2]=frag_position[2]-360\n",
" result.append([n,frag_position[0],frag_position[1],round(frag_position[2],3)])\n"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[[0, 520.0, 575.0, -356.388],\n",
" [1, 535.0, 460.0, -113.454],\n",
" [2, 971.0, 270.0, -40.966],\n",
" [3, 1641.0, 650.0, -119.543],\n",
" [4, 1349.0, 68.0, -336.356],\n",
" [5, 1509.0, 192.0, -298.759],\n",
" [6, 107.0, 521.0, -74.179],\n",
" [7, 420.0, 440.0, -174.266],\n",
" [8, 287.0, 533.0, -299.677],\n",
" [9, 1518.0, 167.0, -290.164],\n",
" [10, 231.0, 429.0, -180.983],\n",
" [11, 666.0, 483.0, -230.948],\n",
" [12, 855.0, 104.0, -346.884],\n",
" [13, 1267.0, 87.0, -305.562],\n",
" [14, 16.0, 705.0, -30.087],\n",
" [15, 924.0, 120.0, -146.41],\n",
" [16, 657.0, 372.0, -175.323],\n",
" [17, 1409.0, 528.0, -329.829],\n",
" [18, 618.0, 427.0, -350.062],\n",
" [19, 631.0, 269.0, -87.332],\n",
" [20, 1345.0, 579.0, -320.597],\n",
" [21, 1670.0, 139.0, -282.108],\n",
" [22, 1310.0, 4.0, -180.0],\n",
" [23, 1418.0, 29.0, -112.925],\n",
" [24, 874.0, 496.0, -312.046],\n",
" [25, 812.0, 537.0, -4.393],\n",
" [26, 47.0, 728.0, -82.997],\n",
" [27, 1411.0, 200.0, -324.46],\n",
" [28, 767.0, 595.0, -339.734],\n",
" [29, 361.0, 434.0, -349.088],\n",
" [30, 1264.0, 732.0, -211.149],\n",
" [31, 958.0, 738.0, -356.008],\n",
" [32, 1307.0, 679.0, -145.032],\n",
" [33, 704.0, 553.0, -197.736],\n",
" [34, 867.0, 344.0, -355.599],\n",
" [35, 1702.0, 164.0, -315.301],\n",
" [36, 1483.0, 307.0, -330.954],\n",
" [37, 1365.0, 661.0, -158.589],\n",
" [38, 35.0, 623.0, -301.166],\n",
" [39, 968.0, 40.0, -355.307],\n",
" [40, 137.0, 650.0, -127.38],\n",
" [41, 1527.0, 239.0, -113.919],\n",
" [42, 1176.0, 736.0, -218.247],\n",
" [43, 466.0, 676.0, -139.007],\n",
" [44, 297.0, 659.0, -22.509],\n",
" [45, 1075.0, 363.0, -1.866],\n",
" [46, 973.0, 658.0, -118.442],\n",
" [47, 658.0, 589.0, -134.967],\n",
" [48, 1438.0, 245.0, -63.213],\n",
" [49, 1019.0, 381.0, -4.052],\n",
" [50, 898.0, 586.0, -320.709],\n",
" [51, 738.0, 258.0, -298.33],\n",
" [52, 1668.0, 167.0, -257.834],\n",
" [53, 306.0, 201.0, -304.816],\n",
" [54, 129.0, 353.0, -123.722],\n",
" [55, 1612.0, 527.0, -201.46],\n",
" [56, 1406.0, 400.0, -132.928],\n",
" [57, 1223.0, 629.0, -243.388],\n",
" [58, 1603.0, 770.0, -223.688],\n",
" [59, 1451.0, 323.0, -4.008],\n",
" [60, 1262.0, -8.0, -143.496],\n",
" [61, 1409.0, 358.0, -244.745],\n",
" [62, 426.0, 567.0, -107.651],\n",
" [63, 1093.0, 536.0, -11.543],\n",
" [64, 1570.0, 763.0, -340.0],\n",
" [65, 599.0, 29.0, -352.066],\n",
" [66, 38.0, 522.0, -237.017],\n",
" [67, 1076.0, 29.0, -152.794],\n",
" [68, 1629.0, 79.0, -70.396],\n",
" [69, 1464.0, 311.0, -306.565],\n",
" [70, 1595.0, 260.0, -53.364],\n",
" [71, 1343.0, 273.0, -256.237],\n",
" [72, 1074.0, 236.0, -1.801],\n",
" [73, 132.0, 35.0, -160.03],\n",
" [74, 1627.0, 762.0, -235.274],\n",
" [75, 713.0, 361.0, -136.338],\n",
" [76, 1485.0, 85.0, -90.974],\n",
" [77, 1243.0, 153.0, -268.034],\n",
" [78, 1026.0, 511.0, -118.767],\n",
" [79, 240.0, 181.0, -51.205],\n",
" [80, 1085.0, 680.0, -53.483],\n",
" [81, 73.0, 212.0, -299.054],\n",
" [82, 1237.0, 389.0, -306.557],\n",
" [83, 1269.0, 87.0, -330.538],\n",
" [84, 29.0, 175.0, -298.41],\n",
" [85, 982.0, 97.0, -80.0],\n",
" [86, 1487.0, 276.0, -211.009],\n",
" [87, 1226.0, 114.0, -321.565],\n",
" [88, 60.0, 317.0, -127.895],\n",
" [89, 1351.0, 285.0, -322.595],\n",
" [90, 233.0, 33.0, -144.993],\n",
" [91, 807.0, 211.0, -1.747],\n",
" [92, 997.0, 558.0, -326.669],\n",
" [93, 1682.0, 283.0, -312.023],\n",
" [94, 1178.0, 691.0, -242.927],\n",
" [95, 1116.0, 487.0, -288.69],\n",
" [96, 952.0, 14.0, -275.225],\n",
" [97, 140.0, 765.0, -206.9],\n",
" [98, 772.0, 81.0, -112.039],\n",
" [99, 640.0, 682.0, -57.917],\n",
" [100, 121.0, 757.0, -231.821],\n",
" [101, 1484.0, 218.0, -15.255],\n",
" [102, 1304.0, 164.0, -273.435],\n",
" [103, 862.0, 192.0, -303.177],\n",
" [104, 258.0, 766.0, -257.769],\n",
" [105, 540.0, 714.0, -183.176],\n",
" [106, 1283.0, 264.0, -330.905],\n",
" [107, 507.0, 196.0, -333.629],\n",
" [108, 1428.0, 438.0, -209.806],\n",
" [109, 391.0, 739.0, -243.391],\n",
" [110, 250.0, 640.0, -287.596],\n",
" [111, 704.0, 102.0, -78.376],\n",
" [112, 1060.0, 227.0, -300.726],\n",
" [113, 1509.0, 302.0, -318.749],\n",
" [114, 895.0, 662.0, -199.664],\n",
" [115, 1453.0, 254.0, -260.0],\n",
" [116, 1058.0, 199.0, -218.275],\n",
" [117, 1416.0, 779.0, 0.0],\n",
" [118, 1054.0, 740.0, -197.173],\n",
" [119, 1352.0, 393.0, -309.992],\n",
" [120, 1273.0, 585.0, -334.957],\n",
" [121, 952.0, 566.0, -193.436],\n",
" [122, 1132.0, 456.0, -25.393],\n",
" [123, 598.0, 179.0, -308.525],\n",
" [124, 1270.0, 15.0, -260.0],\n",
" [125, 763.0, 462.0, -241.979],\n",
" [126, 1515.0, 307.0, -284.007],\n",
" [127, 1582.0, 608.0, -175.422],\n",
" [128, 388.0, 536.0, -166.952],\n",
" [129, 426.0, 667.0, -84.451],\n",
" [130, 1505.0, 18.0, -192.572],\n",
" [131, 976.0, 440.0, -260.194],\n",
" [132, 1467.0, 655.0, -345.072],\n",
" [133, 1085.0, 388.0, -323.091],\n",
" [134, 1189.0, 514.0, -99.219],\n",
" [135, 232.0, 751.0, -360.0],\n",
" [136, 1336.0, 91.0, -132.694],\n",
" [137, 765.0, 173.0, -54.653],\n",
" [138, 1451.0, 421.0, -291.464],\n",
" [139, 920.0, 370.0, -271.035],\n",
" [140, 314.0, 739.0, -302.917],\n",
" [141, 1225.0, 213.0, -281.565],\n",
" [143, 160.0, 716.0, -238.299],\n",
" [144, 503.0, 31.0, -348.257],\n",
" [145, 1159.0, 423.0, -94.766],\n",
" [146, 131.0, 716.0, -27.957],\n",
" [147, 20.0, 683.0, -230.611],\n",
" [148, 237.0, 756.0, -164.897],\n",
" [149, 1596.0, 19.0, -285.437],\n",
" [150, 238.0, 737.0, -17.033],\n",
" [151, 354.0, 151.0, -100.638],\n",
" [152, 887.0, 257.0, -318.435],\n",
" [153, 541.0, 142.0, -151.453],\n",
" [154, 1177.0, 82.0, -275.589],\n",
" [155, 1526.0, 594.0, -340.0],\n",
" [156, 1454.0, 307.0, -305.964],\n",
" [157, 327.0, 150.0, -161.526],\n",
" [158, 1288.0, 199.0, -314.289],\n",
" [159, 649.0, 179.0, -333.417],\n",
" [160, 1413.0, 255.0, -283.537],\n",
" [161, 1543.0, 61.0, -307.416],\n",
" [162, 1190.0, 144.0, -226.857],\n",
" [163, 587.0, 104.0, -151.213],\n",
" [164, 1320.0, 602.0, -315.852],\n",
" [165, 627.0, 104.0, -320.252],\n",
" [166, 1474.0, 413.0, -233.361],\n",
" [167, 699.0, 176.0, -178.052],\n",
" [168, 1024.0, 341.0, -259.2],\n",
" [169, 1701.0, 755.0, -304.039],\n",
" [170, 1684.0, 210.0, -244.828],\n",
" [171, 1330.0, 290.0, -21.864],\n",
" [172, 1053.0, 139.0, -324.51],\n",
" [173, 1643.0, 428.0, -331.469],\n",
" [174, 319.0, 542.0, -205.964],\n",
" [175, 1045.0, 91.0, -3.533],\n",
" [176, 828.0, 695.0, -295.602],\n",
" [177, 1631.0, 287.0, -132.225],\n",
" [178, 361.0, 677.0, -142.992],\n",
" [179, 1674.0, 350.0, -227.628],\n",
" [180, 1412.0, 764.0, -350.194],\n",
" [181, 845.0, 19.0, -27.206],\n",
" [183, 1239.0, 192.0, -317.673],\n",
" [184, 320.0, 39.0, -157.845],\n",
" [185, 1387.0, 749.0, -46.487],\n",
" [186, 166.0, 589.0, -211.473],\n",
" [187, 1339.0, 122.0, -258.435],\n",
" [188, 1216.0, 235.0, -163.194],\n",
" [189, 1677.0, 696.0, -275.899],\n",
" [190, 158.0, 762.0, -327.275],\n",
" [191, 1078.0, 254.0, -226.278],\n",
" [192, 468.0, 761.0, -280.0],\n",
" [193, 439.0, 44.0, -182.45],\n",
" [194, 161.0, 541.0, -354.612],\n",
" [195, 972.0, 502.0, -335.421],\n",
" [196, 427.0, 192.0, -300.872],\n",
" [197, 17.0, 358.0, -137.835],\n",
" [198, 210.0, 580.0, -7.095],\n",
" [199, 259.0, 601.0, -228.13],\n",
" [200, 583.0, 736.0, -154.988],\n",
" [201, 216.0, 751.0, -302.426],\n",
" [202, 826.0, 46.0, -162.036],\n",
" [203, 1592.0, 382.0, -344.745],\n",
" [204, 1533.0, 110.0, -332.592],\n",
" [205, 159.0, 761.0, -21.87],\n",
" [206, 1219.0, 473.0, -297.199],\n",
" [207, 1697.0, 711.0, -103.887],\n",
" [208, 1449.0, 289.0, -247.773],\n",
" [209, 1694.0, 749.0, -48.106],\n",
" [210, 842.0, 613.0, -235.221],\n",
" [211, 1046.0, 294.0, -134.05],\n",
" [212, 1696.0, 763.0, -258.435],\n",
" [213, 350.0, 83.0, -189.227],\n",
" [214, 1262.0, 482.0, -347.386],\n",
" [215, 310.0, 321.0, -211.399],\n",
" [216, 744.0, 22.0, -268.647],\n",
" [218, 1353.0, 732.0, -72.057],\n",
" [219, 1418.0, 770.0, -340.0],\n",
" [220, 496.0, 718.0, -20.0],\n",
" [221, 477.0, 149.0, -277.479],\n",
" [222, 1547.0, 87.0, -325.069],\n",
" [223, 1087.0, 345.0, -3.805],\n",
" [224, 259.0, 78.0, -19.36],\n",
" [225, 1530.0, 512.0, -276.062],\n",
" [226, 132.0, 730.0, -360.0],\n",
" [227, 944.0, 471.0, -170.0],\n",
" [228, 1245.0, 256.0, -155.672],\n",
" [229, 1615.0, 354.0, -81.565],\n",
" [230, 177.0, 72.0, -198.674],\n",
" [231, 1637.0, 470.0, -350.62],\n",
" [232, 540.0, 644.0, -4.911],\n",
" [233, 624.0, 625.0, -127.127],\n",
" [234, 420.0, 15.0, -187.058],\n",
" [235, 884.0, 751.0, -288.649],\n",
" [236, 267.0, 776.0, -339.146],\n",
" [237, 1129.0, 87.0, -10.008],\n",
" [238, 1700.0, 713.0, -240.764],\n",
" [239, 286.0, 283.0, -148.653],\n",
" [240, 679.0, 730.0, -336.762],\n",
" [241, 1129.0, 106.0, -326.244],\n",
" [242, 1292.0, 200.0, -203.69],\n",
" [243, 1693.0, 570.0, -140.0],\n",
" [244, 1529.0, 114.0, -341.565],\n",
" [245, 158.0, 759.0, -356.87],\n",
" [246, 1116.0, 563.0, -242.203],\n",
" [247, 283.0, 129.0, -309.725],\n",
" [248, 1597.0, 294.0, -312.875],\n",
" [249, 1709.0, 762.0, -273.435],\n",
" [250, 1377.0, 302.0, -314.179],\n",
" [251, 323.0, 364.0, -6.183],\n",
" [252, 1316.0, 485.0, -280.0],\n",
" [253, 962.0, 16.0, -280.0],\n",
" [254, 1198.0, 573.0, -11.792],\n",
" [255, 276.0, 384.0, -279.393],\n",
" [256, 1684.0, 734.0, -104.219],\n",
" [257, 1447.0, 333.0, -280.5],\n",
" [258, 999.0, 256.0, -227.883],\n",
" [259, 1014.0, 413.0, -325.59],\n",
" [260, 141.0, 724.0, -216.565],\n",
" [261, 1552.0, 545.0, -255.848],\n",
" [262, 292.0, 13.0, -191.87],\n",
" [263, 573.0, 681.0, -79.779],\n",
" [264, 1433.0, 740.0, -149.733],\n",
" [265, 138.0, 726.0, -311.027],\n",
" [266, 1617.0, 762.0, -327.995],\n",
" [267, 170.0, 141.0, -335.639],\n",
" [268, 1282.0, 441.0, -324.876],\n",
" [269, 322.0, 255.0, -143.393],\n",
" [270, 100.0, 762.0, -56.809],\n",
" [271, 1249.0, 533.0, -349.403],\n",
" [272, 1097.0, 584.0, -213.29],\n",
" [274, 1697.0, 712.0, -360.0],\n",
" [275, 399.0, 148.0, -176.849],\n",
" [276, 56.0, 452.0, -155.208],\n",
" [277, 166.0, 16.0, -336.461],\n",
" [278, 58.0, 406.0, -233.24],\n",
" [279, 1552.0, 79.0, -69.146],\n",
" [280, 746.0, 134.0, -259.441],\n",
" [281, 123.0, 106.0, -103.672],\n",
" [282, 160.0, 758.0, -64.289],\n",
" [283, 1454.0, 323.0, -100.71],\n",
" [284, 220.0, 98.0, -135.362],\n",
" [285, 1173.0, 234.0, -105.515],\n",
" [286, 1238.0, 576.0, -360.0],\n",
" [287, 383.0, 194.0, -259.057],\n",
" [288, 1693.0, 147.0, -322.49],\n",
" [289, 286.0, 351.0, -360.0],\n",
" [290, 648.0, 734.0, -27.933],\n",
" [292, 160.0, 492.0, -111.87],\n",
" [293, 1105.0, 621.0, -115.047],\n",
" [294, 1108.0, 241.0, -227.684],\n",
" [295, 1212.0, 278.0, -340.0],\n",
" [296, 844.0, 756.0, -338.456],\n",
" [298, 1693.0, 111.0, -278.435],\n",
" [299, 1530.0, 563.0, -335.376],\n",
" [301, 364.0, 316.0, -360.0],\n",
" [303, 1690.0, 15.0, -3.881],\n",
" [304, 1170.0, 179.0, -151.87],\n",
" [305, 1408.0, 331.0, -11.189],\n",
" [307, 1654.0, 12.0, -280.0],\n",
" [308, 1180.0, 207.0, -80.017],\n",
" [309, 86.0, 403.0, -185.0],\n",
" [313, 281.0, 317.0, -120.0]]"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"result"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "py37",
"language": "python",
"name": "py37"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View file

@ -1,515 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Tous les codes sont basés sur l'environnement suivant
#python 3.7
#opencv 3.1.0
#pytorch 1.4.0
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
import numpy as np
import random
import math
import pickle
import random
from PIL import Image
import sys
# In[2]:
# Les fonctions dans ce bloc ne sont pas utilisées par le réseau, mais certaines fonctions d'outils
# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage
# “Apprentissage_MSELoss_avec_GPU“
# et les commentaires détaillés se trouvent dans le programme d'apprentissage
def tensor_imshow(im_tensor,cannel):
b,c,h,w=im_tensor.shape
if c==1:
plt.imshow(im_tensor.squeeze().detach().numpy())
else:
plt.imshow(im_tensor.squeeze().detach().numpy()[cannel,:])
def get_training_fragment(frag_size,im):
h,w,c=im.shape
n=random.randint(0,int(h/frag_size)-1)
m=random.randint(0,int(w/frag_size)-1)
shape=frag_size/4
vt_h=math.ceil((h+1)/shape)
vt_w=math.ceil((w+1)/shape)
vt=np.zeros([vt_h,vt_w])
vt_h_po=round((vt_h-1)*(n*frag_size/(h-1)+(n+1)*frag_size/(h-1))/2)
vt_w_po=round((vt_w-1)*(m*frag_size/(w-1)+(m+1)*frag_size/(w-1))/2)
vt[vt_h_po,vt_w_po]=1
vt = np.float32(vt)
vt=torch.from_numpy(vt.reshape(1,1,vt_h,vt_w))
return im[n*frag_size:(n+1)*frag_size,m*frag_size:(m+1)*frag_size,:],vt
def write_result_in_file(result,file_name):
n=0
with open(file_name,'w') as file:
for i in range(len(result)):
while n<result[i][0]:
s=str(n)
n=n+1
s=s+"\n"
file.write(s)
s=str(result[i][0])+" "+str(result[i][1])+" "+str(result[i][2])+" "+str(result[i][3])
s=s+"\n"
n=n+1
file.write(s)
file.close()
def img2tensor(im):
im=np.array(im,dtype="float32")
tensor_cv = torch.from_numpy(np.transpose(im, (2, 0, 1)))
im_tensor=tensor_cv.unsqueeze(0)
return im_tensor
def show_coordonnee(position_pred):
map_corre=position_pred.squeeze().detach().numpy()
score=sum(sum(map_corre))
h,w=map_corre.shape
max_value=map_corre.max()
coordonnee=np.where(map_corre==max_value)
return score,coordonnee[0].mean()/h,coordonnee[1].mean()/w
def test_fragment32_32(frag,seuillage):
a=frag[:,:,0]+frag[:,:,1]+frag[:,:,2]
mask = (a == 0)
arr_new = a[mask]
if arr_new.size/a.size<=(1-seuillage):
return True
else:
return False
def save_net(file_path,net):
pkl_file = open(file_path, 'wb')
pickle.dump(net,pkl_file)
pkl_file.close()
def load_net(file_path):
pkl_file = open(file_path, 'rb')
net= pickle.load(pkl_file)
pkl_file.close()
return net
# In[3]:
# Les fonctions de ce bloc sont utilisées pour construire le réseau
# Les fonctions de ce bloc se trouvent dans le programme d'apprentissage
# “Apprentissage_MSELoss_avec_GPU“
# et les commentaires détaillés se trouvent dans le programme d'apprentissage
def ini():
kernel=torch.zeros([8,3,3,3])
array_0=np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype='float32')
array_1=np.array([[2,1,0],[1,0,-1],[0,-1,-2]],dtype='float32')
array_2=np.array([[1,0,-1],[2,0,-2],[1,0,-1]],dtype='float32')
array_3=np.array([[0,-1,-2],[1,0,-1],[2,1,0]],dtype='float32')
array_4=np.array([[-1,-2,-1],[0,0,0],[1,2,1]],dtype='float32')
array_5=np.array([[-2,-1,0],[-1,0,1],[0,1,2]],dtype='float32')
array_6=np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype='float32')
array_7=np.array([[0,1,2],[-1,0,1],[-2,-1,0]],dtype='float32')
for i in range(3):
kernel[0,i,:]=torch.from_numpy(array_0)
kernel[1,i,:]=torch.from_numpy(array_1)
kernel[2,i,:]=torch.from_numpy(array_2)
kernel[3,i,:]=torch.from_numpy(array_3)
kernel[4,i,:]=torch.from_numpy(array_4)
kernel[5,i,:]=torch.from_numpy(array_5)
kernel[6,i,:]=torch.from_numpy(array_6)
kernel[7,i,:]=torch.from_numpy(array_7)
return torch.nn.Parameter(kernel,requires_grad=True)
def kernel_add_ini(n,m):
input_canal=int(n*m)
output_canal=int(n/2)*int(m/2)
for i in range(int(n/2)):
for j in range(int(m/2)):
kernel_add=np.zeros([1,input_canal],dtype='float32')
kernel_add[0,i*2*m+j*2]=1
kernel_add[0,i*2*m+j*2+1]=1
kernel_add[0,(i*2+1)*m+j*2]=1
kernel_add[0,(i*2+1)*m+j*2+1]=1
if i==0 and j==0:
add=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))
else:
add_=torch.from_numpy(kernel_add.reshape(1,input_canal,1,1))
add=torch.cat((add,add_),0)
return torch.nn.Parameter(add,requires_grad=False)
def kernel_shift_ini(n,m):
input_canal=int(n*m)
output_canal=int(n*m)
kernel_shift=torch.zeros([output_canal,input_canal,3,3])
array_0=np.array([[1,0,0],[0,0,0],[0,0,0]],dtype='float32')
array_1=np.array([[0,0,1],[0,0,0],[0,0,0]],dtype='float32')
array_2=np.array([[0,0,0],[0,0,0],[1,0,0]],dtype='float32')
array_3=np.array([[0,0,0],[0,0,0],[0,0,1]],dtype='float32')
kernel_shift_0=torch.from_numpy(array_0)
kernel_shift_1=torch.from_numpy(array_1)
kernel_shift_2=torch.from_numpy(array_2)
kernel_shift_3=torch.from_numpy(array_3)
for i in range(n):
for j in range(m):
if i==0 and j==0:
kernel_shift[0,0,:]=kernel_shift_0
else:
if i%2==0 and j%2==0:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_0
if i%2==0 and j%2==1:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_1
if i%2==1 and j%2==0:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_2
if i%2==1 and j%2==1:
kernel_shift[i*m+j,i*m+j,:]=kernel_shift_3
return torch.nn.Parameter(kernel_shift,requires_grad=False)
def get_patch(fragment,psize,n,m):
return fragment[:,:,n*psize:(n+1)*psize,m*psize:(m+1)*psize]
class Net(nn.Module):
def __init__(self,frag_size,psize):
super(Net, self).__init__()
h_fr=frag_size
w_fr=frag_size
n=int(h_fr/psize) #n*m patches
m=int(w_fr/psize)
self.conv1 = nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1)
#self.conv1.weight=ini()
self.Relu = nn.ReLU(inplace=True)
self.maxpooling=nn.MaxPool2d(3,stride=2, padding=1)
self.shift1=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)
self.shift1.weight=kernel_shift_ini(n,m)
self.add1 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)
self.add1.weight=kernel_add_ini(n,m)
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:
self.shift2=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)
self.shift2.weight=kernel_shift_ini(n,m)
self.add2 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)
self.add2.weight=kernel_add_ini(n,m)
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:
self.shift3=nn.Conv2d(n*m,n*m,kernel_size=3,stride=1,padding=1)
self.shift3.weight=kernel_shift_ini(n,m)
self.add3 = nn.Conv2d(n*m,int(n/2)*int(m/2),kernel_size=1,stride=1,padding=0)
self.add3.weight=kernel_add_ini(n,m)
def get_descripteur(self,img,using_cuda):
descripteur_img=self.Relu(self.conv1(img))
b,c,h,w=descripteur_img.shape
couche_constante=0.5*torch.ones([1,1,h,w])
if using_cuda:
couche_constante=couche_constante.cuda()
descripteur_img=torch.cat((descripteur_img,couche_constante),1)
descripteur_img_norm=descripteur_img/torch.norm(descripteur_img,dim=1)
return descripteur_img_norm
def forward(self,img,frag,using_cuda):
psize=4
descripteur_input1=self.get_descripteur(img,using_cuda)
descripteur_input2=self.get_descripteur(frag,using_cuda)
b,c,h,w=frag.shape
n=int(h/psize)
m=int(w/psize)
for i in range(n):
for j in range(m):
if i==0 and j==0:
map_corre=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)
else:
a=F.conv2d(descripteur_input1,get_patch(descripteur_input2,psize,i,j),padding=2)
map_corre=torch.cat((map_corre,a),1)
#shift
map_corre=self.maxpooling(map_corre)
map_corre=self.shift1(map_corre)
map_corre=self.add1(map_corre)
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:
map_corre=self.maxpooling(map_corre)
map_corre=self.shift2(map_corre)
map_corre=self.add2(map_corre)
n=int(n/2)
m=int(m/2)
if n>=2 and m>=2:
map_corre=self.maxpooling(map_corre)
map_corre=self.shift3(map_corre)
map_corre=self.add3(map_corre)
b,c,h,w=map_corre.shape
map_corre=map_corre/(map_corre.max())
#map_corre=(F.softmax(map_corre.reshape(1,1,h*w,1),dim=2)).reshape(b,c,h,w)
return map_corre
# In[4]:
# Les fonctions de ce bloc sont utilisées pour appliquer le réseau à des fragments (pas à des patchs carrés)
# Cette fonction permet de sélectionner un ensemble de patchs carrés à partir d'un fragment
# Le paramètre “frag_size” fait ici référence à la taille du patch d'entrée carré (16 * 16)
# Le paramètre “seuillage” limite la proportion de pixels non noirs dans chaque patch
# Le paramètre “limite” peut limiter le nombre de correctifs trouvés dans chaque fragment
def get_patch_list(frag,frag_size,limite,seuillage):
n=0
m=0
h,w,c=frag.shape
patch_list=[]
position_list=[]
for i in range(4):
if len(patch_list)>limite and limite!=0:
break
for j in range(4):
if len(patch_list)>limite and limite!=0:
break
n_offset=i*4 # n offset
m_offset=j*4 # m offset
n=0
while n+frag_size+n_offset<h:
m=0
while m+frag_size+m_offset<w:
patch=frag[n+n_offset:n+frag_size+n_offset,m+m_offset:m+frag_size+m_offset,:]
if test_fragment32_32(patch,seuillage):
patch_list.append(patch)
position_list.append([int((n+frag_size/2)+n_offset),int((m+frag_size/2)+m_offset)])
m=m+frag_size
n=n+frag_size
return patch_list,position_list
# Entrez du fragment et de la fresque, exécutez le réseau
def run_net_v3(net,img,frag,frag_size,limite,seuillage,using_cuda,rotation):
Img=Image.fromarray(frag)
frag=np.array(Img.rotate(rotation))
img_tensor=img2tensor(img)
# la collection de patchs carrée dans le fragement "sont frag_list[]"
# La position de leur centre dans la fragment sont "position_frag[]"
frag_list,position_frag=get_patch_list(frag,frag_size,limite,seuillage)
if using_cuda:
img_tensor=img_tensor.cuda()
score_list=[]
coordonnee_list=[]
# Pour chaque patch carré dans la collection, effectuez un calcul en réseau de leur position
# Le résultat est placé en "coordonnee_list[]"
# "score_list[]" pas utile dans notre programme
for i in range(len(frag_list)):
frag_tensor=img2tensor(frag_list[i])
if using_cuda:
frag_tensor=frag_tensor.cuda()
res=net.forward(img_tensor,frag_tensor,using_cuda)
if using_cuda:
res=res.cpu()
score,po_h,po_w=show_coordonnee(res)
coordonnee_list.append([po_h,po_w])
score_list.append(score)
h_img,w_img,c=img.shape
position=[]
# Mettez les paires correspondante en "position[]"
# [x,y,x',y']
# La position (x,y) dans le fragment correspond à la position (x',y') dans la fresque
for i in range(len(coordonnee_list)):
x0=position_frag[i][0]
y0=position_frag[i][1]
x1=int(round(h_img*coordonnee_list[i][0]))
y1=int(round(w_img*coordonnee_list[i][1]))
position.append([x0,y0,x1,y1])
return score_list,position
# In[12]:
# Cette partie du code consiste à implémenter l'algorithme RANSAC amélioré
# Ecrire le point sous forme [x,y,1]T,
# Utilisé pour construire l'équation de la matrice de transformation
def creer_point(x,y):
p=np.zeros((3,1))
p[0][0]=x
p[1][0]=y
p[2][0]=1
return p
# Sélectionnez aléatoirement n points sans duplication à partir de M points
def selectionner_points(n,M):
table=[]
for i in range(M):
table.append(i)
result=[]
for i in range(n):
index=random.randint(0,M-i-1)
result.append(table[index])
table[index]=table[M-1-i]
return result
# Selon la matrice de transformation affine, calculer la position centrale transformée et l'angle de rotation
def position_rotation(h,centre_frag):
centre=h@centre_frag
cos_rot=(h[0][0]+h[1][1])/2
sin_rot=(h[1][0]-h[0][1])/2
tan_rot=sin_rot/(cos_rot+0.0000001)
if cos_rot>0:
rot_frag=math.atan(tan_rot)*(180/pi)
else:
rot_frag=math.atan(tan_rot)*(180/pi)+180
rot_frag=-rot_frag
if rot_frag>0:
rot_frag-=360
return centre[0][0],centre[1][0],rot_frag
# Vérifiez les résultats de Ransac en avec des changements de distance euclidienne
def test_frag(inline,frag,fres):
itera=10
frag_inline=[]
fres_inline=[]
# Metter les coordonnées du point inline dans "frag_inline[]",et "fres_inline[]"
for i in range(np.size(inline,0)):
if inline[i]==1:
frag_inline.append([frag[i][0],frag[i][1]])
fres_inline.append([fres[i][0],fres[i][1]])
p=[]
# Faites une boucle dix fois,
# sélectionnez à chaque fois deux paires correspondantes inline
# calculer le changement de leur distance euclidienne
for i in range(itera):
point_test=selectionner_points(2,np.size(frag_inline,0))
diff_x_frag=frag_inline[point_test[1]][0]-frag_inline[point_test[0]][0]
diff_y_frag=frag_inline[point_test[1]][1]-frag_inline[point_test[0]][1]
diff_frag=sqrt(pow(diff_x_frag,2)+pow(diff_y_frag,2))
diff_x_fres=fres_inline[point_test[1]][0]-fres_inline[point_test[0]][0]
diff_y_fres=fres_inline[point_test[1]][1]-fres_inline[point_test[0]][1]
diff_fres=sqrt(pow(diff_x_fres,2)+pow(diff_y_fres,2))
if diff_frag !=0:
fsf=diff_fres/diff_frag
p.append([fsf])
result=np.mean(p)
return result
def frag_match(frag,img,position):
frag_size=frag.shape
centre_frag=creer_point(frag_size[0]/2,frag_size[1]/2)
retained_matches = []
frag=[]
fres=[]
for i in range(len(position)):
frag.append([float(position[i][0]),float(position[i][1])])
fres.append([float(position[i][2]),float(position[i][3])])
if np.size(frag)>0:
# Calculer la matrice de transformation affine à l'aide de la méthode Ransac
h,inline=cv2.estimateAffinePartial2D(np.array(frag),np.array(fres))
# Si “h” n'est pas sous la forme de matrice 2 * 3, la matrice de transformation affine n'est pas trouvée
if np.size(h)!=6:
return ([-1])
else:
x,y,rot=position_rotation(h,centre_frag)
pourcenttage=sum(inline)/np.size(frag,0)
# Le nombre de points inline doit être supérieur à un certain nombre
if sum(inline)>3:
p=test_frag(inline,frag,fres)
# La distance euclidienne entre les points correspondants ne doit pas trop changer,
# sinon cela prouve que le résultat de Ransac est incorrect
# ici,le changement de la distance euclidienne sont entre 0.7 et 1.3
if abs(p-1)<0.3:
# Ce n'est qu'alors que Ransac renvoie le résultat correct
return([round(y),round(x),round(rot,3)])
else:
return ([-2])
else:
return ([-3])
else:
return ([-4])
# In[14]:
if __name__=="__main__":
frag_size=16
using_cuda=True
net=load_net("./net_trainned6000")
img_test=cv2.imread("./fresque0.ppm")
result=[]
for n in range(315):
if n<10:
frag_test=cv2.imread("./frag_eroded0/frag_eroded_000"+str(n)+".ppm")
elif n<100:
frag_test=cv2.imread("./frag_eroded0/frag_eroded_00"+str(n)+".ppm")
else:
frag_test=cv2.imread("./frag_eroded0/frag_eroded_0"+str(n)+".ppm")
# Faites pivoter les pièces de 20 degrés à chaque fois pour correspondre, répétez 18 fois
for i in range(18):
rotation=20*i
score_list,position=run_net_v3(net,img_test,frag_test,frag_size,60,0.7,using_cuda,rotation)
frag_position=frag_match(frag_test,img_test,position)
# Lorsque Ransac obtient le bon résultat, sortez de la boucle
if len(frag_position)==3:
rotation_base=i*20
break
# Enregistrez les fragments correctement localisés dans "result[]"
if len(frag_position)==3:
frag_position[2]=rotation_base-360-frag_position[2]
if frag_position[2]>0:
frag_position[2]=frag_position[2]-360
result.append([n,frag_position[0],frag_position[1],round(frag_position[2],3)])
# In[15]:
result

View file

@ -44,3 +44,20 @@ et
2.Input instruction “conda activate py37” ,alors (base) va devenir (py37). 2.Input instruction “conda activate py37” ,alors (base) va devenir (py37).
3.J'ai configuré l' environnement de py37 pour qu 'il fonctionne directement dans cet environnement. 3.J'ai configuré l' environnement de py37 pour qu 'il fonctionne directement dans cet environnement.
## Les fichiers et leur utilisation
* Apprentissage_initial_2021.ipynb: Code d'apprentissage. Les fonctions sont documentées. Le code attend un dataset généré avec gen_frags.ipynb et entraine un réseau puis le sauvegarde.
* gen_frags.ipynb: Code de génération des dataset de fragments. Plusieurs types de dataset peuvent être générés avec des paramètres différents. Le code est documenté.
* Benchmark.ipynb: Code d'évaluation. Le code attend un modèle et un dataset d'évaluation généré avec gen_frags.ipynb (gen_frags.ipynb génère à la fois les fragments de train et de test).
Ce code renvoit un fichier json comprenant tout les résultats de l'évaluation. Il faut interpreter ce fichier avec display_bench.ipynb.
* display_bench.ipynb: Affichage des résultats du benchmark. Le code permet de visualiser les résultats mais aussi d'appliquer une rectification (voir le rapport pour conprendre la corrélation
distance - erreur de placement).
* view_weight.ipynb: permet de visualier les poids d'un modèle.
* view_cartes.ipynb: permet de visualier les cartes de corrélation et les cartes de features d'un modèle.
## Contact:
Pour toute question relative aux données ou aux codes, vous pouvez contacter Arthur Grisel-Davy <gd@crans.org>.
Bon courage !

File diff suppressed because one or more lines are too long

View file

@ -9,7 +9,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 1,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -34,7 +34,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 124, "execution_count": 2,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -91,9 +91,11 @@
"\n", "\n",
" # Selection de la position\n", " # Selection de la position\n",
" pos = select_pos(taille)\n", " pos = select_pos(taille)\n",
" if v:print(\"position: {}\".format(pos))\n",
"\n", "\n",
" # Selection de la taille\n", " # Selection de la taille\n",
" height,width = frag_size,frag_size\n", " height,width = frag_size,frag_size\n",
" if v:print(\"taille: {}\".format((height,width)))\n",
"\n", "\n",
" # Selectionne le fragment\n", " # Selectionne le fragment\n",
" frag = fresque[pos[0]:pos[0]+height,pos[1]:pos[1]+width]\n", " frag = fresque[pos[0]:pos[0]+height,pos[1]:pos[1]+width]\n",
@ -102,15 +104,23 @@
" frag = modifier(frag)\n", " frag = modifier(frag)\n",
"\n", "\n",
" # Ecriture du frag\n", " # Ecriture du frag\n",
" if commit:\n",
" cv2.imwrite(fragment_filename.format(fresque_id,label,i),frag)\n", " cv2.imwrite(fragment_filename.format(fresque_id,label,i),frag)\n",
" \n", "\n",
" # Ecriture de la verité terrain\n", " # Ecriture de la verité terrain\n",
" with open(vt_filename.format(fresque_id,label,i),'w+') as f:\n", " with open(vt_filename.format(fresque_id,label,i),'w+') as f:\n",
" f.write(\"{}\\n{}\\n{}\\n{}\\n\".format(int(taille[0]),\n", " f.write(\"{}\\n{}\\n{}\\n{}\\n\".format(int(taille[0]),\n",
" int(taille[1]),\n", " int(taille[1]),\n",
" int((pos[0]+height/2)),\n", " int((pos[0]+height/2)),\n",
" int((pos[1]+width/2))))\n", " int((pos[1]+width/2))))\n",
"\n",
" f.close()\n", " f.close()\n",
"\n",
" if v:\n",
" print(\"{}\\n{}\\n{}\\n{}\\n\".format(int(taille[0]),\n",
" int(taille[1]),\n",
" int((pos[0]+height/2)),\n",
" int((pos[1]+width/2))))\n",
" \n", " \n",
" ## GENERATING BENCH\n", " ## GENERATING BENCH\n",
" label = 'bench'\n", " label = 'bench'\n",
@ -120,10 +130,12 @@
" \n", " \n",
" # Selection de la position\n", " # Selection de la position\n",
" pos = select_pos(taille)\n", " pos = select_pos(taille)\n",
" if v:print(\"position: {}\".format(pos))\n",
" \n", " \n",
" # Selection de la taille\n", " # Selection de la taille\n",
" height = randint(min_height,max_height)\n", " height = randint(min_height,max_height)\n",
" width = randint(min_width,max_width)\n", " width = randint(min_width,max_width)\n",
" if v:print(\"taille: {}\".format((height,width)))\n",
" \n", " \n",
" # Selectionne le fragment\n", " # Selectionne le fragment\n",
" frag = fresque[pos[0]:pos[0]+height,pos[1]:pos[1]+width]\n", " frag = fresque[pos[0]:pos[0]+height,pos[1]:pos[1]+width]\n",
@ -131,9 +143,10 @@
" if modifier != None:\n", " if modifier != None:\n",
" frag = modifier(frag)\n", " frag = modifier(frag)\n",
"\n", "\n",
" if commit:\n",
" # Ecriture du frag\n", " # Ecriture du frag\n",
" cv2.imwrite(fragment_filename.format(fresque_id,label,i),frag)\n", " cv2.imwrite(fragment_filename.format(fresque_id,label,i),frag)\n",
" \n", "\n",
" # Ecriture de la verité terrain\n", " # Ecriture de la verité terrain\n",
" with open(vt_filename.format(fresque_id,label,i),'w+') as f:\n", " with open(vt_filename.format(fresque_id,label,i),'w+') as f:\n",
" f.write(\"{}\\n{}\\n{}\\n{}\\n\".format(int(taille[0]),\n", " f.write(\"{}\\n{}\\n{}\\n{}\\n\".format(int(taille[0]),\n",
@ -142,6 +155,12 @@
" int((pos[1]+width/2))))\n", " int((pos[1]+width/2))))\n",
" f.close()\n", " f.close()\n",
"\n", "\n",
" if v:\n",
" print(\"{}\\n{}\\n{}\\n{}\\n\".format(int(taille[0]),\n",
" int(taille[1]),\n",
" int((pos[0]+height/2)),\n",
" int((pos[1]+width/2))))\n",
"\n",
" \n", " \n",
"def gen_frags_complet(modifier):\n", "def gen_frags_complet(modifier):\n",
" \"\"\" Function for generating the fragments to be a complet pavement of the fresque.\n", " \"\"\" Function for generating the fragments to be a complet pavement of the fresque.\n",
@ -192,17 +211,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 118, "execution_count": 4,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"base_dir = './training_data_random_shift_color/'\n", "base_dir = './training_data_shift_color_16/'\n",
"fresque_filename = base_dir+'fresque_small{}.ppm'\n", "fresque_filename = base_dir+'fresque{}.ppm'\n",
"fresque_filename_wild = base_dir+'fresque_small*.ppm'\n", "fresque_filename_wild = base_dir+'fresque*.ppm'\n",
"fragment_filename = base_dir+'fragments/fresque{}/frag_{}_{:05}.ppm'\n", "fragment_filename = base_dir+'fragments/fresque{}/frag_{}_{:05}.ppm'\n",
"fragments_filename_wild = base_dir+'fragments/fresque{}/frag_{}_*.ppm'\n", "fragments_filename_wild = base_dir+'fragments/fresque{}/frag_{}_*.ppm'\n",
"vt_filename = base_dir+'fragments/fresque{}/vt/frag_{}_{:05}_vt.txt'\n", "vt_filename = base_dir+'fragments/fresque{}/vt/frag_{}_{:05}_vt.txt'\n",
"deep_folder_filename = base_dir+'fragments/fresque{}/vt/'\n", "deep_folder_filename = base_dir+'fragments/fresque{}/vt/'\n",
"\n",
"v = True # Verbose\n",
"commit = True\n",
" \n", " \n",
"# Nombre de fresques\n", "# Nombre de fresques\n",
"N_fresques = 6\n", "N_fresques = 6\n",
@ -215,7 +237,7 @@
"frag_size = 16\n", "frag_size = 16\n",
"\n", "\n",
"# Nombre de fragments\n", "# Nombre de fragments\n",
"N_bench = 300\n", "N_bench = 200\n",
"N_dev = 3000\n", "N_dev = 3000\n",
"\n", "\n",
"alpha = 30 # Range pour le shift de couleur" "alpha = 30 # Range pour le shift de couleur"
@ -230,29 +252,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 125, "execution_count": 5,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"Fresque 5, fragment 2289/3000, 76.3%\n" "Fresque 5, fragment 199/200, 99.5%\n",
] "position: [573, 227]\n",
}, "taille: (91, 90)\n",
{ "1000\n",
"name": "stderr", "1000\n",
"output_type": "stream", "618\n",
"text": [ "272\n",
"IOPub message rate exceeded.\n",
"The notebook server will temporarily stop sending output\n",
"to the client in order to avoid crashing it.\n",
"To change this limit, set the config variable\n",
"`--NotebookApp.iopub_msg_rate_limit`.\n",
"\n",
"Current values:\n",
"NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n",
"NotebookApp.rate_limit_window=3.0 (secs)\n",
"\n" "\n"
] ]
} }

2829
view_cartes.ipynb Normal file

File diff suppressed because one or more lines are too long

868
view_loss.ipynb Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long