diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..33557cfcf0b3fc1d2d8f69fabfdf6f7d123f2099
Binary files /dev/null and b/.DS_Store differ
diff --git a/Check_Results/draw_points.py b/Check_Results/draw_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef659134465cf8d6ecd7c2659e5aa2f60c4b739e
--- /dev/null
+++ b/Check_Results/draw_points.py
@@ -0,0 +1,201 @@
+
+import sys
+import math
+import numpy
+import matplotlib.pyplot as plt
+import theano
+import lasagne
+import theano.tensor as T
+import pandas as pd
+import glob
+import cv2
+from predict_point_first_part import load_grasped_image,get_first_grasping_point, dibuixar_punt, get_ground_truth_two_points,obtain_vector_points
+
+#sys.path.insert(0,'2nd_point/')
+from net import networks
+
+THRESHOLD = 0.04 # in meters
+size = '0975'
+#size = '1025'
+half_size_point = 2
+
+CONFIDENCE = 0.8
+
+def main():
+
+ 	points_grasped = []
+
+	nets = networks()
+
+	plt.ion()
+	
+	fig = plt.figure(0)
+        ax = fig.add_subplot(1,1,1)
+
+        fig2 = plt.figure(1)
+        ax2 = fig2.add_subplot(1,1,1)
+
+        fig3 = plt.figure(2)
+        ax3 = fig3.add_subplot(1,1,1)
+
+        csv = pd.read_csv( glob.glob('../../../Images/manipulacio/Jeans_with_info/Jeans_'+ size +'/*.csv')[0],header = None)
+
+	all_poses = csv.iloc[:,0].unique()
+	numpy.random.shuffle(all_poses)
+
+	for pose in all_poses: #range( len(csv.iloc[:,0].unique()) ):
+		camera = 1
+		step = 0
+
+		while step<3 and camera < 37:
+			if step == 0:
+				ax.clear()
+
+				if ( len(glob.glob( '../../../Images/manipulacio/Jeans_with_info/Jeans_'+ size +'/*.csv' ) )>0 ):
+					
+					image = cv2.imread( glob.glob('../../../Images/manipulacio/Jeans_with_info/Jeans_'+ size +'/*_'+str(pose)+'_'+str(camera)+'.tif')[0],-1)
+                                        image = image[:,80:240,0]
+                                        image = (image>0)*(1.2+0.6*(1-numpy.float32(image)/65279))
+					
+					seen1,seen2 = nets.pred_1_class(image)
+					
+					if(seen1==1 or seen2 == 1):
+					
+						point1,point2 = nets.pred_1st_point(image)
+
+					        join_csv_with_img = (csv.iloc[:,0] == int(pose))*1
+
+        					coincidences = join_csv_with_img.sum()
+
+					        line_in_csv = (join_csv_with_img.argmax())
+
+					        mat = csv.iloc[line_in_csv:line_in_csv+coincidences,:].as_matrix()
+
+						next_step,image2,pose2,csv2,gtruth,first_grasped_point = get_first_grasping_point(image,point1,point2,mat,size,camera,ax)	
+
+						gtruth1,gtruth2 = get_ground_truth_two_points(mat,camera)
+
+		                                dibuixar_punt(gtruth1[0],gtruth1[1],gtruth1[2],image,ax = ax)
+	 	                                dibuixar_punt(gtruth2[0],gtruth2[1],gtruth2[2],image,ax = ax)
+
+					
+                                        	plt.figure(0)
+                                        	plt.imshow(image)
+                                        	plt.show(block=False)
+
+						step += next_step
+
+                                        	if( sum(sum(image>0)) < 500 ):
+							step = numpy.Inf
+						elif next_step == 1:
+							camera = 1
+							plt.pause(0.2)
+						else:
+							camera += 1
+	                                        	plt.pause(0.2)
+					else:
+						camera+=1
+					print(camera)
+
+			elif step == 1:
+                                ax2.clear()
+
+				seen = nets.pred_2_class(image2,0.75)
+
+                                plt.figure(1)
+                                plt.imshow(image2)
+                                plt.show(block=False)
+
+                                dibuixar_punt(gtruth[0],gtruth[1],gtruth[2],image2,ax = ax2)
+
+				if seen != 1:
+					camera += 1
+					plt.pause(0.5)
+					image2,gtruth = load_grasped_image(size,pose2,camera,csv2)
+				else:
+	                                point = nets.pred_2nd_point(image2)
+					join_csv_with_img = (csv2.iloc[:,0] == int(pose2))*1
+				        coincidences = join_csv_with_img.sum()
+				        line_in_csv = (join_csv_with_img.argmax())
+					mat = csv2.iloc[line_in_csv:line_in_csv+coincidences,:].as_matrix()
+
+				        vect_points = obtain_vector_points(mat, camera)
+
+					punt,punt_proxim = dibuixar_punt(point[0],point[1],point[2],image2,vect_points,mat,ax2)
+
+					if punt==0:
+						plt.pause(1)
+						step += 1
+						camera = 1
+						points_grasped.append([first_grasped_point,punt_proxim[2]])
+					else:
+						plt.pause(0.5)
+						camera += 1
+	                                        image2,gtruth = load_grasped_image(size,pose2,camera,csv2)
+			elif step==2:
+				ax3.clear()
+				im = glob.glob('/home/ecorona/CNN/Images/manipulacio/Jeans_two_points/Jeans_' + str(int(first_grasped_point)) + '_' + str(int(punt_proxim[2])) + '_10.tif')
+				print(im)
+				print('/home/ecorona/CNN/Images/manipulacio/Jeans_two_points/Jeans_' + str(int(first_grasped_point)) + '_' + str(int(punt_proxim[2])) + '_10.tif')
+				if len(im)==0:
+	                                im = glob.glob('/home/ecorona/CNN/Images/manipulacio/Jeans_two_points/Jeans_' + str(int(punt_proxim[2])) + '_' + str(int(first_grasped_point)) + '_10.tif')
+				final_pose = cv2.imread(im[0],-1)
+                                x_crop = numpy.min(numpy.argmax(final_pose[0:100,:,0] != 0, axis = 1))-40
+                                final_pose = final_pose[:,x_crop:x_crop+160,0]
+				image = numpy.float32((final_pose>0)*(1.2+0.6*(1-numpy.float32(final_pose)/65279)))
+
+				plt.figure(2)
+				plt.imshow(image)
+				plt.show(block=False)
+				plt.pause(1)
+				step = 3
+
+			elif step==3:
+				ax3.clear()
+
+	                        new_pose = int(csv2.iloc[ numpy.argmax( csv2.iloc[:,1]==punt_proxim[2] ) , 0])
+				image3,gtruth = load_grasped_image(size,new_pose,camera,csv2)
+
+                                plt.figure(2)
+                                plt.imshow(image3)
+                                plt.show(block=False)
+
+                                seen = nets.pred_2_class(image3,0.75)
+
+                                point = nets.pred_2nd_point(image3)
+
+                                if seen != 1:
+                                        camera += 1
+                                        plt.pause(0.5)
+                                        image2,gtruth = load_grasped_image(size,pose2,camera,csv2)
+                                else:
+                                        join_csv_with_img = (csv2.iloc[:,0] == int(pose2))*1
+                                        coincidences = join_csv_with_img.sum()
+                                        line_in_csv = (join_csv_with_img.argmax())
+                                        mat = csv2.iloc[line_in_csv:line_in_csv+coincidences,:].as_matrix()
+
+                                        vect_points = obtain_vector_points(mat, camera)
+
+                                        punt,punt_proxim2 = dibuixar_punt(point[0],point[1],point[2],image3,vect_points,mat,ax3)
+
+					dibuixar_punt(gtruth[0],gtruth[1],gtruth[2],image3,ax = ax3)
+
+                                        if punt==0:
+                                                plt.pause(1)
+                                                step += 1
+                                                camera = 1
+                                        else:
+                                                plt.pause(0.5)
+                                                camera += 1
+		
+	numpy.save('points_grasped.npy',numpy.array(points_grasped))
+
+	#	else:
+
+	#	predictions = numpy.array(predictions)
+	#	numpy.save('Predictions.npy',predictions)
+
+if "__main__" == __name__:
+	main()
+
+
diff --git a/Check_Results/net.py b/Check_Results/net.py
new file mode 100644
index 0000000000000000000000000000000000000000..343149d735025556a5c985f3f44c3a510bc4b58a
--- /dev/null
+++ b/Check_Results/net.py
@@ -0,0 +1,196 @@
+
+from __future__ import print_function
+import scipy.io
+import sys
+import os
+import time
+import numpy
+import theano
+import theano.tensor as T
+import lasagne
+import matplotlib.pyplot as plt
+
+class networks():
+        def __init__(self,real=False):
+                input_var = T.tensor4('inputs')
+
+                self.net1,self.net2 = self.build_network(input_var)
+ 
+		if real == False:
+
+	                self.params1 = numpy.load('/home/ecorona/CNN/manipulation/Jeans/Simulate_WAMs/Results_1st/parameters_best_val_loss.npy')
+        	        self.params2 = numpy.load('/home/ecorona/CNN/manipulation/Jeans/Simulate_WAMs/Results_2nd/parameters_best_val_loss.npy')
+		else: 
+                        self.params1 = numpy.load('Results_1st_with_noise/parameters_best_val_loss.npy')
+			self.params2 = numpy.load('Results_2nd_with_noise/parameters_best_val_loss.npy')
+			print("Parameters for real")
+
+                test_prediction = lasagne.layers.get_output(self.net1, deterministic=True)
+                self.predict_1st = theano.function([input_var], test_prediction)
+
+                test_prediction = lasagne.layers.get_output(self.net2, deterministic=True)
+                self.predict_2nd = theano.function([input_var], test_prediction)
+		self.params_loaded = 0
+
+	
+        def pred_1_class(self,image,CONFIDENCE_CLASSIFIER=0.5):
+
+		if self.params_loaded != 1:
+			lasagne.layers.set_all_param_values(self.net1,self.params1)
+                        self.params_loaded=1
+
+                prediction = self.predict_1st((image.reshape([1,1,240,160])))
+		
+		print('					' + str(prediction[0,4]))
+                print('                                 ' + str(prediction[0,9]))
+
+                prediccio1 = numpy.argmax(prediction[0,[3,4]]> CONFIDENCE_CLASSIFIER)
+		prediccio2 = numpy.argmax(prediction[0,[8,9]]> CONFIDENCE_CLASSIFIER)
+
+                return prediccio1,prediccio2
+
+        def pred_2_class(self,image,CONFIDENCE_CLASSIFIER=0.5):
+
+                if self.params_loaded != 2:
+                        lasagne.layers.set_all_param_values(self.net2,self.params2)
+                        self.params_loaded=2
+
+                prediction = self.predict_2nd((image.reshape([1,1,240,160])))
+
+                prediccio = numpy.argmax(prediction[0,[3,4]]> CONFIDENCE_CLASSIFIER)
+
+                print('                                 ' + str(prediction[0,4]))
+
+                return prediccio
+
+        def pred_2nd_point(self,image):
+		if self.params_loaded != 2:
+			lasagne.layers.set_all_param_values(self.net2,self.params2)
+			self.params_loaded=2
+
+		prediction =  self.predict_2nd(image.reshape([1,1,240,160]))
+                
+		return prediction[0,[0,1,2]]
+
+	def pred_1st_point(self,image):
+                if self.params_loaded != 1:
+                        lasagne.layers.set_all_param_values(self.net1,self.params1)
+                        self.params_loaded=1
+
+                pred = self.predict_1st(image.reshape([1,1,240,160]))
+
+		point1 = pred[0,[0,1,2]]
+		point2 = pred[0,[5,6,7]]
+
+                return point1,point2
+
+	def accuracy_1st(self,images,gtruth,seen):
+                if self.params_loaded != 1:
+                        lasagne.layers.set_all_param_values(self.net1,self.params1)
+                        self.params_loaded=1
+
+                pred = self.predict_1st(images.reshape([-1,1,240,160]))
+#		pred[:,[0,1,2]] = pred[:,[0,1,2]]*seen[:,[0]
+#		pred[:,[5,6,7]] = pred[:,[5,6,7]]*seen[:,3]
+
+		difference = lasagne.objectives.squared_error(gtruth,pred[:,[0,1,2,5,6,7]])
+		difference2 = lasagne.objectives.squared_error(gtruth,pred[:,[5,6,7,0,1,2]])
+		order = numpy.argmin((numpy.sum(difference,axis=1),numpy.sum(difference2,axis=1)),axis=0)
+               
+		medium_distance = numpy.sqrt(difference)
+                medium_distance2 = numpy.sqrt(difference2)
+	
+		medium_distance1 = numpy.mean(numpy.sqrt(numpy.sum(medium_distance[:,[0,1,2]]**2,axis=1)))*(order==0) + (order==1)*numpy.mean(numpy.sqrt(numpy.sum(medium_distance2[:,[0,1,2]]**2,axis=1)))
+                medium_distance2 = numpy.mean(numpy.sqrt(numpy.sum(medium_distance[:,[3,4,5]]**2,axis=1)))*(order==0) + (order==1)*numpy.mean(numpy.sqrt(numpy.sum(medium_distance2[:,[3,4,5]]**2,axis=1)))
+
+		medium_distance = (medium_distance1+medium_distance2)/2
+
+		res = numpy.sum(difference*seen,axis=0)/numpy.sum(seen,axis=0)
+		return res,medium_distance
+
+	def accuracy_2nd(self,images,gtruth,seen):
+                if self.params_loaded != 2:
+                        lasagne.layers.set_all_param_values(self.net2,self.params2)
+                        self.params_loaded=2
+
+                pred =  self.predict_2nd(images.reshape([-1,1,240,160]))
+
+                difference = lasagne.objectives.squared_error(gtruth,pred[:,[0,1,2]])
+
+                medium_distance = numpy.sqrt(difference)
+
+                medium_distance = numpy.sum(numpy.sqrt(numpy.sum(medium_distance[:,[0,1,2]]**2,axis=1))*seen[:,0])/numpy.sum(seen[:,0])
+
+                res = numpy.sum(difference*seen,axis=0)/numpy.sum(seen,axis=0)
+
+		return res,medium_distance
+
+	def build_network(self, input_var):
+
+		nonlinearity = lasagne.nonlinearities.rectify
+
+		network = lasagne.layers.InputLayer(shape=(None,1,240,160),input_var=input_var) # length_of_batches instead of None
+		
+		network = lasagne.layers.BatchNormLayer(network)
+
+		network = lasagne.layers.Conv2DLayer(network, num_filters=96,stride = (4,4),  pad = 'full', filter_size=(11,11),nonlinearity=nonlinearity)
+
+		network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+		network1 = lasagne.layers.SliceLayer(network,indices = slice(0,48),axis=1)
+
+		network1 = lasagne.layers.Conv2DLayer(network1, num_filters=128,stride = (1,1),  pad = 'full', filter_size=(5,5),nonlinearity=nonlinearity)
+
+		network2 = lasagne.layers.SliceLayer(network,indices = slice(48,96),axis=1)
+
+		network2 = lasagne.layers.Conv2DLayer(network2, num_filters=128,stride = (1,1),  pad = 'full', filter_size=(5,5),nonlinearity=nonlinearity)
+
+		network = lasagne.layers.ConcatLayer((network1,network2),axis=1)
+
+		network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+		network = lasagne.layers.Conv2DLayer(network, num_filters=384,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity)
+
+		network1 = lasagne.layers.SliceLayer(network,indices = slice(0,192),axis=1)
+
+		network1 = lasagne.layers.Conv2DLayer(network1, num_filters=192,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity)
+
+		network1 = lasagne.layers.Conv2DLayer(network1, num_filters=128,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity)
+
+		network2 = lasagne.layers.SliceLayer(network,indices = slice(192,384),axis=1)
+
+		network2 = lasagne.layers.Conv2DLayer(network2, num_filters=192,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity)
+
+		network2 = lasagne.layers.Conv2DLayer(network2, num_filters=128,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity)
+
+		network = lasagne.layers.ConcatLayer((network1,network2),axis=1)
+
+		network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+		network = lasagne.layers.DenseLayer(network,num_units=4096,nonlinearity=nonlinearity)
+
+		network = lasagne.layers.DenseLayer(network,num_units=4096,nonlinearity=nonlinearity)
+
+	# FIRST NETWORK: 
+
+		point1_pred = lasagne.layers.DenseLayer(network,num_units=3,nonlinearity=None,W = lasagne.init.GlorotUniform())
+
+		point2_pred = lasagne.layers.DenseLayer(network,num_units=3,nonlinearity=None,W = lasagne.init.GlorotUniform())
+
+		point1_class = lasagne.layers.DenseLayer(network,num_units=2,nonlinearity=lasagne.nonlinearities.softmax,W = lasagne.init.GlorotUniform())
+
+		point2_class = lasagne.layers.DenseLayer(network,num_units=2,nonlinearity=lasagne.nonlinearities.softmax,W = lasagne.init.GlorotUniform())
+
+	        network_1st_point = lasagne.layers.ConcatLayer((point1_pred,point1_class,point2_pred,point2_class),axis=1)
+
+	# SECOND NETWORK: 
+
+		network_pred = lasagne.layers.DenseLayer(network,num_units=3,nonlinearity=None,W = lasagne.init.GlorotUniform())
+
+		network_class = lasagne.layers.DenseLayer(network,num_units=2,nonlinearity=lasagne.nonlinearities.softmax, W = lasagne.init.GlorotUniform())
+
+		network_2nd_point = lasagne.layers.ConcatLayer([network_pred,network_class],axis=1)
+		
+		return network_1st_point,network_2nd_point
+		
+
diff --git a/Check_Results/predict_loss.py b/Check_Results/predict_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..4838cbf964b239e1a1932be4da3edc1f15fb7cbb
--- /dev/null
+++ b/Check_Results/predict_loss.py
@@ -0,0 +1,84 @@
+
+from __future__ import print_function
+import scipy.io
+import sys
+import os
+import time
+import numpy
+import theano
+import theano.tensor as T
+import lasagne
+import matplotlib.pyplot as plt
+
+from net import networks
+
+#sklearn module helps in preprocessing:Scale function sets the mean value to 0 and the standard desviation to 1.
+
+#Last version:
+# 0 -> Jeans
+# 1 -> Jumper
+# 2 -> T_Shirt
+# 3 -> Towel
+
+	#Function to set the mean of the dataset to zero and the standard variation to one. X is the whole dataset
+ 
+def iterate_minibatches(inputs, targets, points, batchsize, shuffle=False):
+    assert len(inputs) == len(targets)
+    if shuffle:
+        indices = numpy.arange(len(inputs))
+        numpy.random.shuffle(indices)
+    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
+        if shuffle:
+            excerpt = indices[start_idx:start_idx + batchsize]
+        else:
+            excerpt = slice(start_idx, start_idx + batchsize)
+        yield inputs[excerpt], targets[excerpt], points[excerpt]
+
+
+def load_dataset():
+
+	X_valid = numpy.load('Datasets2/X_valid.npy') #X_testing.npy')
+        Ponderate_valid = numpy.repeat(numpy.load('Datasets2/Points_seen_valid.npy').reshape([-1,1]),3,axis=1)
+	
+        Y_valid = numpy.load('Datasets2/Y_valid.npy')
+
+	X_valid = X_valid.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_valid = numpy.float32(Y_valid)
+
+	print("X_valid shape is: " + str(X_valid.shape))
+
+	return X_valid,Y_valid,Ponderate_valid
+	
+def main(num_epochs = 500):
+	print("Loading data...")
+	X_valid,Y_valid,punts_valid = load_dataset()
+
+	net = networks(True)
+
+	# Define how many batches
+        number_of_batches = 100 
+        length_of_batches = numpy.shape(Y_valid)[0]/number_of_batches
+	length_of_batches_valid = length_of_batches
+
+	# Define loss, learning rate, updates and train/validation functions.
+	error_per_val = numpy.zeros(numpy.shape(Y_valid)[1])
+	val_batches = 0 
+	err_distance = 0
+
+	for batch in iterate_minibatches(X_valid, Y_valid, punts_valid, length_of_batches_valid, shuffle=False):
+		inputs, targets, punts = batch
+		error,medium_distance = net.accuracy_2nd(inputs,targets,punts)
+		val_batches += 1
+		error_per_val += error
+		err_distance += medium_distance
+
+	print("	     Error valid distribution: "+ str((error_per_val/val_batches).tolist()))
+	print("	     Medium error distance: "+str((err_distance/val_batches)))
+
+	print("")
+		
+	print("Finished")
+
+if __name__ == "__main__":
+	main()
+
diff --git a/Check_Results/predict_loss_two_points.py b/Check_Results/predict_loss_two_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..307c68386857da93b4a6c3fa3577830b91633a6b
--- /dev/null
+++ b/Check_Results/predict_loss_two_points.py
@@ -0,0 +1,84 @@
+
+from __future__ import print_function
+import scipy.io
+import sys
+import os
+import time
+import numpy
+import theano
+import theano.tensor as T
+import lasagne
+import matplotlib.pyplot as plt
+
+from net import networks
+
+#sklearn module helps in preprocessing:Scale function sets the mean value to 0 and the standard desviation to 1.
+
+#Last version:
+# 0 -> Jeans
+# 1 -> Jumper
+# 2 -> T_Shirt
+# 3 -> Towel
+
+	#Function to set the mean of the dataset to zero and the standard variation to one. X is the whole dataset
+ 
+def iterate_minibatches(inputs, targets, points, batchsize, shuffle=False):
+    assert len(inputs) == len(targets)
+    if shuffle:
+        indices = numpy.arange(len(inputs))
+        numpy.random.shuffle(indices)
+    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
+        if shuffle:
+            excerpt = indices[start_idx:start_idx + batchsize]
+        else:
+            excerpt = slice(start_idx, start_idx + batchsize)
+        yield inputs[excerpt], targets[excerpt], points[excerpt]
+
+
+def load_dataset():
+
+	X_valid = numpy.load('Datasets1/X_valid.npy') #X_testing.npy')
+        Ponderate_valid = numpy.repeat(numpy.load('Datasets1/Points_seen_valid.npy'),3,axis=1)
+	
+        Y_valid = numpy.load('Datasets1/Y_valid.npy')
+
+	X_valid = X_valid.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_valid = numpy.float32(Y_valid)
+
+	print("X_valid shape is: " + str(X_valid.shape))
+
+	return X_valid,Y_valid,Ponderate_valid
+	
+def main(num_epochs = 500):
+	print("Loading data...")
+	X_valid,Y_valid,punts_valid = load_dataset()
+
+	net = networks(True)
+
+	# Define how many batches
+        number_of_batches = 100
+        length_of_batches = numpy.shape(Y_valid)[0]/number_of_batches
+	length_of_batches_valid = length_of_batches
+
+	# Define loss, learning rate, updates and train/validation functions.
+	error_per_val = numpy.zeros(numpy.shape(Y_valid)[1])
+	val_batches = 0 
+	err_distance = 0
+
+	for batch in iterate_minibatches(X_valid[0:1000], Y_valid[0:1000], punts_valid[0:1000], length_of_batches_valid, shuffle=False):
+		inputs, targets, punts = batch
+		error,medium_distance = net.accuracy_1st(inputs,targets,punts)
+		val_batches += 1
+		error_per_val += error
+		err_distance += medium_distance
+
+	print("	     Error valid distribution: "+ str((error_per_val/val_batches).tolist()))
+	print("	     Medium error distance: "+str((err_distance/val_batches)))
+
+	print("")
+		
+	print("Finished")
+
+if __name__ == "__main__":
+	main()
+
diff --git a/Check_Results/show_ground_truth.py b/Check_Results/show_ground_truth.py
new file mode 100644
index 0000000000000000000000000000000000000000..37f428c3fc83c77df8d87b44f970549477a0314b
--- /dev/null
+++ b/Check_Results/show_ground_truth.py
@@ -0,0 +1,59 @@
+import math
+import numpy
+import matplotlib.pyplot as plt
+
+X = numpy.load('Datasets1/X_valid.npy')
+Y = numpy.load('Datasets1/Y_valid.npy')
+
+indexs = numpy.arange(len(X))
+numpy.random.shuffle(indexs)
+
+X = X[indexs]
+Y = Y[indexs]
+
+THRESHOLD = 0.025
+
+plt.ion()
+
+half_size_point = 1  
+
+for k in range(Y.shape[0]):
+
+	#Point 1
+	x = Y[k,0] # cm
+	y = Y[k,1] # cm
+	z = Y[k,2] # cm
+	
+	# INICIALMENT L'ANGLE HORITZONAL DE LES CAMARES ES 57, el de la xtion es 58(horitzontal) x 45(vertical) i el de la kinect es 57 (hor.) x 43 (vert)
+
+	x_point1 = 80-x/((150+z)*math.tan( 57.0/2 *math.pi /180)/160)
+        y_point1 = 119+(-60-y)/((150+z)*math.tan(  42.75/2 * math.pi /180)/120)
+
+	if(x_point1-half_size_point>0 and y_point1-half_size_point>0 and x_point1+half_size_point<160 and y_point1+half_size_point<240):
+		index_y = int(round(y_point1))
+                index_x = int(round(x_point1))
+
+                if((abs(X[k,index_y-half_size_point:index_y+half_size_point+1, index_x-half_size_point:index_x+half_size_point+1] - (1.5+z/100)) < THRESHOLD).any()):
+                        X[k,index_y-half_size_point:index_y+half_size_point+1, index_x-half_size_point:index_x+half_size_point+1] = 1
+
+	if (Y.shape[1]==6):
+		#Point 2
+		x = Y[k,3] # cm
+		y = Y[k,4] # cm
+		z = Y[k,5] # cm
+
+		x_point2 = 80-x/((150+z)*math.tan( 57.0/2 *math.pi /180)/160)
+		y_point2 = 119+(-60-y)/((150+z)*math.tan(  42.75/2 * math.pi /180)/120)
+
+		if(x_point2-half_size_point>0 and y_point2-half_size_point>0 and x_point2+half_size_point<160 and y_point2+half_size_point<240):
+			index_y = int(round(y_point2))
+			index_x = int(round(x_point2))
+
+			if((abs(X[k,index_y-half_size_point:index_y+half_size_point+1, index_x-half_size_point:index_x+half_size_point+1] - (1.5+z/100)) < THRESHOLD).any()):
+				X[k,index_y-half_size_point:index_y+half_size_point+1, index_x-half_size_point:index_x+half_size_point+1] = 1
+		
+	
+	plt.imshow(X[k])
+	plt.show(block=False)
+	plt.pause(1)
+
diff --git a/MEL/Physical_properties_for_each_model_and_vertex_vectors.mel b/MEL/Physical_properties_for_each_model_and_vertex_vectors.mel
new file mode 100644
index 0000000000000000000000000000000000000000..de0153c75babaf6c7ff1e90f6fce45e57c28d554
--- /dev/null
+++ b/MEL/Physical_properties_for_each_model_and_vertex_vectors.mel
@@ -0,0 +1,214 @@
+//--Punts complets:
+//Amb 75% de reducció:
+//int $vertics_T_Shirt[] = {1927, 1853,1928,729,1852,249,247,589,1988,2062,583,1965,2010,1903,1940,2244,898,900,2123,2148,633,2105,561,1966,2102,913,2203,2190,2116,2078,1921,539,884,2161,2281,673,2219,2153,2057,1913,1980,639,2188,2226,2196,2168,2095,1975,569,2179,677,2285,674,2171,2137,591,1935,888,644,2194,2201,2174,2037,855,987,989,241,73,1846,1849,1,16,183,938,19,939,940,2271,2047,1961,678,907,2265,576,535,892,2184,2183,2185,2081,1896,1893,1879,197,1832,1561,2024,2021,646,606,2145,2253,572,2239,861,526,573,1936,58,219,217,1621,1781,1815,1814,91,920,4,924,926,929,932,1322,1110,290,350,352,1305,1211,1237,1227,728,366,735,1269,1290,1267,1182,1097,1157,1231,1291,399,385,354,1180,760,1299,1280,748,731,1221,390,765,1300,762,370,718,1090,369,757,1263,1346,752,727,706,285,346,1137,360,1202,1188,1024,83,89,1167,1205,1043,1324,743,976,135,492,1515,171,1757,1105,1121,1074,1128,1753,1751,163,1746,1505,443,1481,1550,454,1496,1457,1458,1444,109,79,1478,441,1384,1381,459,788,1452,1435,944,1420,1391,118,1731,1644,1684,483,1706,507,1597,1880,846,848,1698,1708,65,509,1687,1618,1648,843,1701,1591,466,1578,152,1585}; 
+//Original:
+//int $vertics_T_shirt_male_obj[] = {475,480,486,492,1557,1552,1860,1868,830,827,539,169,799,1253,1832,1795,740,738,1816,783,753,1548,2238,1290,1730,1188,1779,2264,1258,1743,682,678,672,1024,810,757,711,867,869,870,872,1907,1906,2100,2083,1064,1062,838,161,1069,31,2102,1888,1890,1940,907,956,1933,1897,1989,143,146,27,1956,1226,857,81,841,137,2121,2131,2175,299,296,263,323,57,865,155,1235,1922,649,293,959,646,1004,381,958,610,585,372,388,611,653,978,995,1013,68,895,14,946,1977,1500,5,147,1100,1104,1494,1932,1884,1170,1442,1161,1404,1399,1373,2020,1709,1717,1724,1990,1993,1147,2009,1460,1650,1467,656,637,398,490,469,1560,1543,1863,1865,820,818,792,794,1841,1838,1815,1830,774,759,213,1039,1291,2061,1741,1740,1782,722,674,714,695,790,931,1965,1304,1788,54,935,1428,1768,349,1321,54,222,1373,1477,1680,1718,1445,1679,1356,1324,1298,1267,1295,219,455,185,91,217,1136,1373,280,52,55,1473};
+
+// - Punts meitat: -
+
+
+------------  Jeans1 -------------
+//Original: Meitat
+thickness = 0.1
+friction = 0.8
+Stretch Resistance = 1
+Compression Resistance = 0.1
+BendResistance = 0.025
+BendAngle Dropoff = 0
+Shear Resistance = 0
+Restitution Angle = 360
+Restitution Tension = 1000
+Rigidity = 0
+Deform Resistance = 0
+Input Mesh Attract = 0
+Input Motion Drag = 0
+Rest length scale = 1
+Bendangle scale = 1
+mass = 1
+lift = 0.05
+drag = 0.05
+Tangential drag = 0.1
+damp = 0.5
+stretch damp = 0.1
+
+int $vertics_Jeans1[] = {487,71,701,763,477,515,937,948,1024,1019,682,726,713,674,590,592,721,499,723,722,479,474,451,508,452,506,465,599,457,463,749,669,712,620,454,678,692,603,614,555,600,570,540,560,538,622,611,553,644,626,551,579,609,632,629,584,605,585,602,587,660,636,589,557,577,581,574,536,534,568,594,543,625,572,563,582,558,546,617,619,548,566,578,544,596};
+
+------------  Jeans2 -------------
+//Original: Meitat
+// Preset heavyDenim
+
+thickness = 0.1
+friction = 0.8
+Stretch Resistance = 50
+Compression Resistance = 20
+BendResistance = 0.4
+BendAngle Dropoff = 0.603
+Shear Resistance = 0
+Restitution Angle = 360
+Restitution Tension = 1000
+Rigidity = 0
+Deform Resistance = 0
+Input Mesh Attract = 0
+Input Motion Drag = 0
+Rest length scale = 1
+Bendangle scale = 1
+mass = 2
+lift = 0.05
+drag = 0.05
+Tangential drag = 0.1
+damp = 0.8
+stretch damp = 0.1
+
+// Sense reduir
+int $vertics_Jeans2[] = {5871,5821,5811,5849,5823,5866,5809,5815,5892,7162,7167,6209,6214,6219,6226,6195,6199,6203,6206,6237,6231,6256,6143,6129,6137,6191,6153,6254,6159,6168,6171,6246,6178,6179,6243,6189,5309,5301,5501,5415,5505,5422,5511,5272,5287,5388,5390,5264,5229,5392,5263,5037,5155,4967,8175,8180,8155,5026,5413,5339,5441,5384,5446,5862,5066,5319,5433,5136,5430,5380,5460,5351,5165,4990,5150,5180,5222,5376,5463,5425,6007,5968,5977,5541,5018,5354,5629,4945,5641,6079,5650,5492,5706,5690,8201,8154,8169,8145,5050};
+
+// Reduint:
+int $vertics_Jeans2[] = {3014,3094,3114,3126,3413,3406,3420,3429,3401,3446,3373,3869,3451,3363,3462,3339,3340,3438,3392,2706,2503,2510,2715,2586,2644,2647,2621,2490,2496,2615,3944,3960,3936,2911,2463,2300,2297,2595,2328,2323,2316,2584,2424,2665,3134,2191,2743,2163,2185};
+
+
+
+------------  T_Shirt -------------
+//Amb 75% de reducció, i component to component (Si fa falta eliminar part del coll): Meitat
+//Preset: tshirt
+
+thickness = 0.1
+friction = 0.3
+Stretch Resistance = 35
+Compression Resistance = 10
+BendResistance = 0.1
+BendAngle Dropoff = 0.4
+Shear Resistance = 0
+Restitution Angle = 360
+Restitution Tension = 1000
+Rigidity = 0
+Deform Resistance = 0
+Input Mesh Attract = 0
+Input Motion Drag = 0
+Rest length scale = 1
+Bendangle scale = 1
+mass = 0.6
+lift = 0.05
+drag = 0.05
+Tangential drag = 0.1
+damp = 0.8 // Si sembla poc elàstic, baixar una mica aquest valor
+stretch damp = 0.1
+
+int $vertics_T_Shirt[] = {738,926,290,1110,929,932,1322,729,366,1227,399,1269,385,728,354,735,1180,760,1299,1300,1263,360,1043,1751,861,1757,1753,1105,1074,1202,757,369,762,370,1188,1167,1815,89,1024,1090,83,1515,1481,1550,454,1505,443,718,940,19,938,183,15,1853,1988,2123,2190,673,2188,2171,2137,639,2161,884,539,1966,591,939,788,441,1384,1927,589,1903,16,900,898,2244,1940,1921,2078,2116,2271,644,888,1935,1444,1458,459,1457,1496,1879,1893,1896,197,576,2265,2081,2253,572,2239,606,2145,2185,646,907,2194,674,109,1478,1380,2221,676,330,1295,167,1755,93,1432,77,435,1420};
+
+------------  T_shirt_male_obj -------------
+//Original: Meitat
+thickness = 0.1
+friction = 0.3
+Stretch Resistance = 20
+Compression Resistance = 10
+BendResistance = 0.1
+BendAngle Dropoff = 0
+Shear Resistance = 0
+Restitution Angle = 360
+Restitution Tension = 1000
+Rigidity = 0
+Deform Resistance = 0
+Input Mesh Attract = 0
+Input Motion Drag = 0
+Rest length scale = 1
+Bendangle scale = 1
+mass = 1
+lift = 0.05
+drag = 0.05
+Tangential drag = 0.1
+damp = 0.05
+stretch damp = 0.1
+
+int $vertics_T_shirt_male_obj[] = {492,830,799,740,682,872,1064,31,956,155,147,219,5,423,895,57,865,1013,137,27,143,907,146,1069,161,838,1062,870,869,678,672,738,783,169,539,827,486,480,753,81,841,263,323,995,958,293,296,959,610,299,646,649,381,387,371,1004,585,217,185,68,978,653,611,656,590,390,818,476,1024,810,790,711,714,695,931,349,280,91,222,935,229,759,213,792,774,1030,469,476,794,1039,55,52,722,0,455,51,479,356,595,2121,952,1068};
+
+------------  T_shirt_long_sleeve -------------
+//Amb 98% de reducció: //Falten manigues perquè hi ha problema en simulació : Meitat
+int $vertics_T_shirt_long_sleeve[] = {490,802,1017,1319,1661,1950,2351,2619,2591,2557,2525,2459,2072,1616,623,860,1123,1571,1889,2329,2051,1679,1325,980,797,556,521,513,812,1125,1323,1680,1996,1904,1641,1347,1167,935,859,1101,1285,1711,2131,2236,1984,1630,1429,1005,1204,850,751,626,692,551,659,746,701,520,883,1159,1605,1713,1963,2093,1381,1139,1006,1152,1689,1744,1841,1702,1477,1179,1097,1565,1735,2017,1767,1161,881,778,1074,1532,1678,1994,2460,2052,1825,1547,1211,902,875,1206,1633,1828,2025,844,1046,1329,1620,1785,1971,2092,1698,1235,1316,968,1066,804,672,734,958,1230,1736,2115,2241,1886,1466,1114,705,964,1188,1599,1885,2364,580,667};
+
+------------  T_shirt_1e -------------
+
+int $vertics_T_Shirt_1e[] = {13,1059,1064,1080,233,627,912,991,983,56,202,979,909,563,176,508,35,289,10,522,82,186,92,543,1034,483,858,60,939,943,587,47,170,180,188,549,264,39,1031,541,878,504,865,59,137,3,284,304,12,333,481,855};
+
+------------  Sock -------------
+//Original Sock: Meitat
+// Preset of: silk
+int $vertics_Sock[] = {3,28,27,41,42,2,13,195,188,71,85,146,156,82,68,192,187,56,51,16,149,150,309,273,272,30,97,63,23,257,143,307,21,48,246,297,9,81,101,306,46,61,92,219,173,258,234,212};
+
+------------  Jumper1 -------------
+//Amb Combine i 97% de reducció: Meitat
+int $vertics_Jumper1[] = {2059,399,28,407,452,444,803,4894,3803,4898,3800,486,474,801,458,439,454,500,483,465,466,488,504,493,490,418,479,480,462,415,884,919,917,940,936,939,930,912,911,895,4642,2381,2370,2362,2469,2358,2384,2896,2654,2883,2890,813,2847,2843,2833,2880,969,2935,2078,4585,4607,2028,2043,2072,2076,2061,2003,4192,4155,4134,2809,937,978,2317,2319};
+
+------------  Jumper2 -------------
+//Amb Combine i 94% de reducció: Meitat
+
+
+thickness = 0.1
+friction = 0.1
+Stretch Resistance = 16
+Compression Resistance = 1
+BendResistance = 0.1
+BendAngle Dropoff = 0
+Shear Resistance = 0
+Restitution Angle = 360
+Restitution Tension = 1000
+Rigidity = 0
+Deform Resistance = 0
+Input Mesh Attract = 0
+Input Motion Drag = 0
+Rest length scale = 1
+Bendangle scale = 1
+mass = 0.6
+lift = 0.05
+drag = 0.05
+Tangential drag = 0.1
+damp = 0.2
+stretch damp = 0.1
+
+int $vertics_Jumper2[] = {4266,4492,7236,7221,7209,7195,7187,4223,4220,4210,7108,7085,4204,4198,4825,4858,4255,4232,4300,4305,4239,4212,4243,4222,4234,3775,3734,3770,4214,7379,7596,5001,7573,7624,7586,7645,4246,4250,4845,4847,5975,4233,6276,4213,4183,4227,4900,6269,6011,5977,6008,6035,6307,6315,6491,6363,6308,6725,6785,6887,6867,6020,6073,6074,6001,6064,5980,4872,4892,6041,5037,4934,4949,5048,5015,4956,5053,3780,4930,4944,4955,5047,4843,7077,4655};
+
+------------  Jumper3 -------------
+//Original
+
+int $vertics_Jumper3[] = {126,123,1144,1145,1149,1155,1165,1170,1169,1172,1142,1153,1351,1289,1180,1285,1304,1296,1133,1164,1174,1175,896,379,516,928,229,1068,488,74,738,346,340,329,597,84,1345,631,769,82,732,88,1270,98,778,748,806,801,818,802,814,812,0,120,78,621,1199,694,105,569,12,7,807,15,69,564,54,52,703,47,600,135,131,717,761,766,756,61,700,575,889,560,730,764,137,4,42,168,1016,190,454,1064,943,855,390,325,281,177,459,1058,939,858,850,316,318,843,398,870,429,1082,994,305,384,268,272,249,442,1125,186,164,31,160,28,1044,435,231,893};
+
+------------  Jumper_5e -------------
+//Original
+
+int $vertics_Jumper_5e[] = {721,714,708,664,756,775,819,657,911,798,800,913,880,660,1130,762,802,704,737,731,613,746,742,739,692,771,615,687,611,769,786,826,617,811,831,621,628,1135,1136,1163,1204,1158,903,1213,848,1172,1247,1234,1195,1191,885,873,864,944,898,925,945,985,960,958,919,965,1021,1017,994,990,1053,1047,1058,1025,1013,821,765,768,896,866,887,679,1323};
+------------  Carpet1 -------------
+//Original
+//Material: silk
+
+int $vertics_Carpet1[] = {2,2490,2468,2405,2394,2171,2160,2130,2119,2115,2126,2156,2167,2390,2401,2464,2488,2496,2097,2105,2138,2146,2372,2380,2455,2476,2480,2034,2051,2069,2086,2345,2362,2422,2443,2447,2026,2043,2061,2078,2337,2354,2414,2432,2436,1926,1943,1997,2014,2225,2242,2302,2323,2327,1918,1935,1989,2006,2217,2234,2294,2312,2316,1891,1908,1962,1979,2190,2207,2261,2282,2286,1883,1900,1954,1971,2182,2199,2253,2271,2275};
+
+//int $vertics_Carpet1[] = {2,2429,2290,1860,1698,3,1557,1598,1763,2196,2335,2387,2132,2047,1913,1470,1323,1286,414,453,597,871,1136,1236,124,161,316,745,992,1092,1,934,651,224,62,0};
+
+------------  Carpet2 -------------
+//Original: Polygon plane de 40x0x20 de 25 Subdivisions Width x 18 Subdivisions height
+// Material: silk 
+
+int $vertics_Carpet2[] = {20,502,15,708,1554,718,1559,517,25,728,259,929,254,919,249,1167,1779,1177,1784,1187,493,1388,488,1378,483,1368,1774,909,1549,1430};
+
+------------  Carpet2 -------------
+//Original: Polygon plane de 40x0x20 de 25 Subdivisions Width x 18 Subdivisions height
+// Material: silk 
+
+int $vertics_Carpet3[] = {117,882,627,372,108,112,367,362,622,617,877,872,1127,1132,1137,122,377,632,887,1142,127,382,637,892,1147,1402,1397,1392,1387,1382,1377,1122,867,612,357,102};
+
+------------  Carpet2 -------------
+//Original: Polygon plane de 40x0x20 de 25 Subdivisions Width x 18 Subdivisions height
+// Material: silk 
+
+int $vertics_Carpet4[] = {152,146,139,133,128,434,740,1046,1352,1307,1314,1008,1001,695,389,395,702,402,708,964,1320,1172,662};
+
+-----------------------------------
+
+
+$instruction = "";
+
+for ($i=0;$i<size($vertics_Jeans1);$i++){
+    
+    $instruction = $cloth+".vtx[" +$vertics_Jeans1[$i]+"]";
+    select -tgl $instruction;
+    
+}
diff --git a/MEL/Prepare_environment_to_save_jeans_and_take_depth_images.mel b/MEL/Prepare_environment_to_save_jeans_and_take_depth_images.mel
new file mode 100644
index 0000000000000000000000000000000000000000..12d4533d8cfa8403a4f7b2b19e7aafa6f4bfdc34
--- /dev/null
+++ b/MEL/Prepare_environment_to_save_jeans_and_take_depth_images.mel
@@ -0,0 +1,139 @@
+//Importar pantalons:
+
+string $cloth = "Jeans";
+file -import "/Users/ecorona/Downloads/jeans.obj";
+setAttr "defaultRenderGlobals.imageFormat" 4;
+string $cloth2 = $cloth
+
+// Seleccionar pantalons: 
+
+select -r $cloth ;
+
+// Canviar pantalons a ncloth:
+
+createNCloth 0;
+setAttr "nClothShape1.stretchResistance" 1;
+setAttr "nClothShape1.compressionResistance" 0.1;
+setAttr "nClothShape1.bendResistance" 0.025;
+
+int $k[] = `polyEvaluate -vertex $cloth`;
+
+// Create 36 cameras:
+
+for( $i=0; $i<36; ++$i){
+    camera -centerOfInterest 5 -focalLength 35 -lensSqueezeRatio 1 -cameraScale 1 -horizontalFilmAperture 1.41732 -horizontalFilmOffset 0 -verticalFilmAperture 0.94488 -verticalFilmOffset 0 -filmFit Fill -overscan 1 -motionBlur 0 -shutterAngle 144 -nearClipPlane 0.1 -farClipPlane 10000 -orthographic 0 -orthographicWidth 30 -panZoomEnabled 0 -horizontalPan 0 -verticalPan 0 -zoom 1  -horizontalFieldOfView 43 -verticalFieldOfView 57; objectMoveCommand; cameraMakeNode 1 "";
+    float $x = 150*cos (($i*10)*3.14159/180);
+    float $z = 150*sin (($i*10)*3.14159/180);
+    move -r $x 0 $z;
+    float $rotation = (90 - $i*10);
+    rotate -r -os -fo 0 $rotation 0;
+    string $instruction = "cameraShape"+($i+1) + ".mask";
+    setAttr $instruction 0;
+    string $instruction = "cameraShape"+($i+1) + ".locatorScale";
+    setAttr $instruction 10;
+}
+
+// Create DepthLayer
+
+createRenderLayer -noRecurse -name DepthLayer  -global;
+
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+// Now by hand:
+// Channel Box / Layer Editor TAB -> Right click on DepthLayer -> Attributes -> Presets -> Luminance Depth
+// Now, rather execute:
+
+    disconnectAttr samplerInfo1.cameraNearClipPlane setRange1.oldMinX;
+    disconnectAttr samplerInfo1.cameraFarClipPlane setRange1.oldMaxX;
+
+    setAttr "setRange1.oldMinX" 120;
+    setAttr "setRange1.oldMaxX" 180;
+    setAttr "setRange1.oldMinY" 0;
+    setAttr "setRange1.oldMaxY" 0;
+    setAttr "setRange1.oldMinZ" 0;
+    setAttr "setRange1.oldMaxZ" 0;
+    
+// Or by hand: select setRange1 Tab -> Right click on Old Min -> Break connection, write 110; Right click on Old Max -> Break connection, write 180; 
+// Max could be 6500 to have an output of milimeters
+
+
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+// Setting drag:
+
+$aire = 0;
+drag -pos 0 0 0 -m 0.05 -att 1 -dx 0 -dy 0 -dz 0 -ud 0  -mxd -1  -vsh none -vex 0 -vof 0 0 0 -vsw 360 -tsr 0.5 ;
+
+// Open the file to write the position and orientation of interesting vertices
+$fileid = fopen("/Users/ecorona/Documents/Database/"+$cloth2+".csv");
+
+// Start the captures:
+
+for( $x = 0; $x < size($vertics_T_Shirt_1e); $x = $x+1){ //Condition: $x<$k[0]
+
+// Move jeans to the middle:
+    currentTime 1;
+    string $instruction = $cloth+".vtx[" + $vertics_T_Shirt_1e[$x] + "]";
+    float $pos[] = `pointPosition $instruction`;
+    select -r $cloth;
+    move -r (-1*$pos[0]) (58-1*$pos[1]) (-1*$pos[2]) ;
+    select -r $instruction; //$cloth.vtx[$vertics_T_Shirt_1e[$x]] ;
+    
+// Transform constraint:
+
+    createNConstraint transform 0;
+
+// Setting drag if there is.
+
+    if($aire == 1){
+        setAttr "dragField1.magnitude" -40;
+    }
+
+// Canviar el valor del temps: (S'ha de fer d'un en un)
+
+    for( $t=2; $t<1000; ++$t){
+        currentTime $t ;
+        if ($aire == 1 && $t == 100){
+            setAttr "dragField1.magnitude" 0;
+        }
+    }
+
+// Iterate cameras to take photos and save information:
+
+    for( $c=1; $c<=36; ++$c){
+        string $cam = "camera" + $c;
+        string $final_name = "/Users/ecorona/Documents/Database/" + $cloth2 + "_" + $x + "_" + $c + ".tif";
+        $direction = `render  $cam`; //render -layer DepthLayer $cam;
+        sysFile -move $final_name $direction;
+
+//Vertex cintura1 es 1018, per samarreta es 1146, Jumper 2733
+        string $instruction = $cloth + ".vtx[2733]"; 
+        $Vertex1_orientation = `polyNormalPerVertex -q -xyz $instruction`; 
+        string $instruction = $cloth+".vtx[2733]";
+        float $pos1[] = `pointPosition $instruction`;
+        $orientation_x1 = $Vertex1_orientation[0];
+        $orientation_y1 = $Vertex1_orientation[1];
+        $orientation_z1 = $Vertex1_orientation[2];
+        
+        
+//Vertex cintura2 es 979, per samarreta es 62, Jumper 2731
+        string $instruction = $cloth + ".vtx[2731]"; 
+        $Vertex2_orientation = `polyNormalPerVertex -q -xyz $instruction`; 
+        string $instruction = $cloth+".vtx[2731]"; 
+        float $pos2[] = `pointPosition $instruction`;
+        $orientation_x2 = $Vertex2_orientation[0];
+        $orientation_y2 = $Vertex2_orientation[1];
+        $orientation_z2 = $Vertex2_orientation[2];
+        // Guardar posicio i orientacio dels punts en arxiu .csv
+        fprint $fileid($x+","+$c+","+$pos1[0]+","+$pos1[1]+","+$pos1[2]+","+$orientation_x1+","+$orientation_y1+","+$orientation_z1+","+$pos2[0]+","+$pos2[1]+","+$pos2[2]+","+$orientation_x2+","+$orientation_y2+","+$orientation_z2+"\n");
+
+                }
+    
+// Delete constraint:
+    
+    string $instruction = "dynamicConstraint"+1;//+($x+91);
+    select -r $instruction;
+    doDelete;
+}
+
+fclose $fileid;
diff --git a/MEL/Prepare_environment_to_save_jeans_info.mel b/MEL/Prepare_environment_to_save_jeans_info.mel
new file mode 100644
index 0000000000000000000000000000000000000000..142e940bdb51b9a6035cc3e96993fe513b476b38
--- /dev/null
+++ b/MEL/Prepare_environment_to_save_jeans_info.mel
@@ -0,0 +1,57 @@
+//Importar pantalons:
+
+string $cloth = "Jeans";
+file -import "/Users/ecorona/Downloads/jeans.obj";
+setAttr "defaultRenderGlobals.imageFormat" 4;
+string $cloth2 = $cloth
+
+// Seleccionar pantalons: 
+
+select -r $cloth ;
+
+// Canviar pantalons a ncloth:
+
+createNCloth 0;
+setAttr "nClothShape1.stretchResistance" 1;
+setAttr "nClothShape1.compressionResistance" 0.1;
+setAttr "nClothShape1.bendResistance" 0.025;
+
+int $k[] = `polyEvaluate -vertex $cloth`;
+
+// Create 36 cameras:
+
+for( $i=0; $i<36; ++$i){
+    camera -centerOfInterest 5 -focalLength 35 -lensSqueezeRatio 1 -cameraScale 1 -horizontalFilmAperture 1.41732 -horizontalFilmOffset 0 -verticalFilmAperture 0.94488 -verticalFilmOffset 0 -filmFit Fill -overscan 1 -motionBlur 0 -shutterAngle 144 -nearClipPlane 0.1 -farClipPlane 10000 -orthographic 0 -orthographicWidth 30 -panZoomEnabled 0 -horizontalPan 0 -verticalPan 0 -zoom 1  -horizontalFieldOfView 43 -verticalFieldOfView 57; objectMoveCommand; cameraMakeNode 1 "";
+    float $x = 150*cos (($i*10)*3.14159/180);
+    float $z = 150*sin (($i*10)*3.14159/180);
+    move -r $x 0 $z;
+    float $rotation = (90 - $i*10);
+    rotate -r -os -fo 0 $rotation 0;
+    string $instruction = "cameraShape"+($i+1) + ".mask";
+    setAttr $instruction 0;
+    string $instruction = "cameraShape"+($i+1) + ".locatorScale";
+    setAttr $instruction 10;
+}
+
+// Create DepthLayer
+
+createRenderLayer -noRecurse -name DepthLayer  -global;
+
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+// Now by hand:
+// Channel Box / Layer Editor TAB -> Right click on DepthLayer -> Attributes -> Presets -> Luminance Depth
+// Now, rather execute:
+
+    disconnectAttr samplerInfo1.cameraNearClipPlane setRange1.oldMinX;
+    disconnectAttr samplerInfo1.cameraFarClipPlane setRange1.oldMaxX;
+
+    setAttr "setRange1.oldMinX" 120;
+    setAttr "setRange1.oldMaxX" 180;
+    setAttr "setRange1.oldMinY" 0;
+    setAttr "setRange1.oldMaxY" 0;
+    setAttr "setRange1.oldMinZ" 0;
+    setAttr "setRange1.oldMaxZ" 0;
+    
+// Or by hand: select setRange1 Tab -> Right click on Old Min -> Break connection, write 110; Right click on Old Max -> Break connection, write 180; 
+// Max could be 6500 to have an output of milimeters
diff --git a/MEL/Save_Images_and_point_info_from_jeans.mel b/MEL/Save_Images_and_point_info_from_jeans.mel
new file mode 100644
index 0000000000000000000000000000000000000000..2e2f4b6ada9cfe5434b11ff58cd9d32033fcd175
--- /dev/null
+++ b/MEL/Save_Images_and_point_info_from_jeans.mel
@@ -0,0 +1,103 @@
+// Proces to save images and interesting points in maya, for jeans 1:
+$point1 = 24;
+$point2 = 487;
+$face1_approach = 918;
+$face2_approach = 886;
+
+// Setting drag:
+
+$aire = 0;
+drag -pos 0 0 0 -m 0.05 -att 1 -dx 0 -dy 0 -dz 0 -ud 0  -mxd -1  -vsh none -vex 0 -vof 0 0 0 -vsw 360 -tsr 0.5 ;
+
+// Open the file to write the position and orientation of interesting vertices
+$fileid = fopen("/Users/ecorona/Documents/Database/"+$cloth2+".csv");
+
+// Start the captures:
+
+for( $x = 0; $x < size($vertics_Jeans1); $x = $x+1){ //Condition: $x<$k[0]
+
+// Move jeans to the middle:
+    currentTime 1;
+    string $instruction = $cloth+".vtx[" + $vertics_Jeans1[$x] + "]";
+    float $pos[] = `pointPosition $instruction`;
+    select -r $cloth;
+    move -r (-1*$pos[0]) (58-1*$pos[1]) (-1*$pos[2]) ;
+    select -r $instruction; //$cloth.vtx[$vertics_T_Shirt_1e[$x]] ;
+    
+// Transform constraint:
+
+    createNConstraint transform 0;
+
+// Setting drag if there is.
+
+    if($aire == 1){
+        setAttr "dragField1.magnitude" -40;
+    }
+
+// Canviar el valor del temps: (S'ha de fer d'un en un)
+
+    for( $t=2; $t<500; ++$t){
+        currentTime $t ;
+        if ($aire == 1 && $t == 100){
+            setAttr "dragField1.magnitude" 0;
+        }
+    }
+
+// Iterate cameras to take photos and save information:
+
+    for( $c=1; $c<=36; ++$c){
+        string $cam = "camera" + $c;
+        string $final_name = "/Users/ecorona/Documents/Database/" + $cloth2 + "_" + $x + "_" + $c + ".tif";
+        $direction = `render  $cam`; //render -layer DepthLayer $cam;
+        sysFile -move $final_name $direction;
+
+//Primer punt:
+        // Posicio x,y,z
+        float $pos1[] = `pointPosition Jeans.vtx[487]`;
+        
+        // L'orientació, per saber com agafar es pot trobar amb dos vertexs, sabent la diferència de posició del que està més a dins al de més a fora.
+        float $pos1_2[] = `pointPosition Jeans.vtx[996]`;
+        $orientation_x1 = $pos1[0]-$pos1_2[0]; 
+        $orientation_y1 = $pos1[1]-$pos1_2[1]; 
+        $orientation_z1 = $pos1[2]-$pos1_2[2];         
+        
+        // Orientació de la cara que haurem d'agafar, per saber com coloquem la pinça
+        $Face_orientation = `polyInfo -faceNormals Jeans.f[918]`; 
+        string $buffer[];
+        tokenize $Face_orientation[0] " " $buffer;
+        $approach_x1 = float($buffer[2]);
+        $approach_y1 = float($buffer[3]);
+        $approach_z1 = float($buffer[4]);
+        
+//Segon punt:
+        // Posicio x,y,z
+        float $pos2[] = `pointPosition Jeans.vtx[24]`;
+        
+        // L'orientació, per saber com agafar es pot trobar amb dos vertexs, sabent la diferència de posició del que està més a dins al de més a fora.
+        float $pos2_2[] = `pointPosition Jeans.vtx[965]`;
+        $orientation_x2 = $pos2[0] - $pos2_2[0]; 
+        $orientation_y2 = $pos2[1] - $pos2_2[1]; 
+        $orientation_z2 = $pos2[2] - $pos2_2[2]; 
+
+        // Orientació de la cara que haurem d'agafar, per saber com coloquem la pinça
+        $Face_orientation = `polyInfo -faceNormals Jeans.f[886]`;  
+        string $buffer[];
+        tokenize $Face_orientation[0] " " $buffer;
+        $approach_x2 = float($buffer[2]);
+        $approach_y2 = float($buffer[3]);
+        $approach_z2 = float($buffer[4]);
+        
+        
+        // Guardem tot en l'arxiu csv:
+        fprint $fileid($x+","+$c+","+$pos1[0]+","+$pos1[1]+","+$pos1[2]+","+$orientation_x1+","+$orientation_y1+","+$orientation_z1+","+$approach_x1+","+$approach_y1+","+$approach_z1+","+$pos2[0]+","+$pos2[1]+","+$pos2[2]+","+$orientation_x2+","+$orientation_y2+","+$orientation_z2+","+$approach_x2+","+$approach_y2+","+$approach_z2+"\n");
+
+                }
+    
+// Delete constraint:
+    
+    string $instruction = "dynamicConstraint"+1;//+($x+91);
+    select -r $instruction;
+    doDelete;
+}
+
+fclose $fileid;
diff --git a/MEL/Save_depth_images_from_two_garments_in_the_same_grasping_point.mel b/MEL/Save_depth_images_from_two_garments_in_the_same_grasping_point.mel
new file mode 100644
index 0000000000000000000000000000000000000000..955b5ca7a66e2b42eefe5da08c399c20b9b63ce2
--- /dev/null
+++ b/MEL/Save_depth_images_from_two_garments_in_the_same_grasping_point.mel
@@ -0,0 +1,156 @@
+//Import first clothing:
+
+string $cloth = "Jeans";
+file -import "/Users/ecorona/Downloads/jeans.obj";
+setAttr "defaultRenderGlobals.imageFormat" 32;
+
+// Select first clothing: 
+
+select -r $cloth ;
+
+// Set first clothing to ncloth:
+
+createNCloth 0;
+setAttr "nClothShape1.stretchResistance" 1;
+setAttr "nClothShape1.compressionResistance" 0.1;
+setAttr "nClothShape1.bendResistance" 0.025;
+
+int $k[] = `polyEvaluate -vertex $cloth`;
+
+//Import second clothing:
+
+string $cloth2 = "null_";
+file -import "/Users/ecorona/Documents/Clothes/Coats/Coat1original.obj";
+setAttr "null_.scaleX" 70;
+setAttr "null_.scaleY" 70;
+setAttr "null_.scaleZ" 70;
+
+// Select second clothing and prepare it: 
+
+select -r $cloth2 ;
+polyCleanupArgList 3 { "0","1","1","0","0","0","0","0","0","1e-05","1","1e-05","0","1e-05","0","1","0" };
+hilite  ;
+select -r $cloth2 ;
+polyReduce -ver 1 -trm 0 -p 70 -vct 0 -tct 0 -shp 0 -keepBorder 1 -keepMapBorder 1 -keepColorBorder 1 -keepFaceGroupBorder 1 -keepHardEdge 1 -keepCreaseEdge 1 -keepBorderWeight 0.5 -keepMapBorderWeight 0.5 -keepColorBorderWeight 0.5 -keepFaceGroupBorderWeight 0.5 -keepHardEdgeWeight 0.5 -keepCreaseEdgeWeight 0.5 -useVirtualSymmetry 0 -symmetryTolerance 0.01 -sx 0 -sy 1 -sz 0 -sw 0 -preserveTopology 1 -keepQuadsWeight 1 -vertexMapName "" -replaceOriginal 1 -cachingReduce 1 -ch 1 "null_";
+
+// Set second clothing to ncloth:
+
+createNCloth 0;
+setAttr "nClothShape2.stretchResistance" 3;
+setAttr "nClothShape2.compressionResistance" 1;
+setAttr "nClothShape2.bendResistance" 0.25;
+
+int $k2[] = `polyEvaluate -vertex $cloth2`;
+
+// Create 36 cameras:
+
+for( $i=0; $i<36; ++$i){
+    camera -centerOfInterest 5 -focalLength 35 -lensSqueezeRatio 1 -cameraScale 1 -horizontalFilmAperture 1.41732 -horizontalFilmOffset 0 -verticalFilmAperture 0.94488 -verticalFilmOffset 0 -filmFit Fill -overscan 1 -motionBlur 0 -shutterAngle 144 -nearClipPlane 0.1 -farClipPlane 10000 -orthographic 0 -orthographicWidth 30 -panZoomEnabled 0 -horizontalPan 0 -verticalPan 0 -zoom 1; objectMoveCommand; cameraMakeNode 1 "";
+    float $x = 150*cos (($i*10)*3.14159/180);
+    float $y = 150*sin (($i*10)*3.14159/180);
+    move -r $x 0 $y;
+    float $rotation = (90 - $i*10);
+    rotate -r -os -fo 0 $rotation 0;
+    string $instruction = "cameraShape"+($i+1) + ".mask";
+    setAttr $instruction 0;
+    string $instruction = "cameraShape"+($i+1) + ".locatorScale";
+    setAttr $instruction 10;
+}
+
+// Create DepthLayer
+
+createRenderLayer -noRecurse -name DepthLayer  -global;
+
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+// Now by hand:
+// Channel Box / Layer Editor TAB -> Right click on DepthLayer -> Attributes -> Presets -> Luminance Depth
+// Now, rather execute:
+
+    disconnectAttr samplerInfo1.cameraNearClipPlane setRange1.oldMinX;
+    disconnectAttr samplerInfo1.cameraFarClipPlane setRange1.oldMaxX;
+
+    setAttr "setRange1.oldMinX" 110;
+    setAttr "setRange1.oldMaxX" 180;
+    setAttr "setRange1.oldMinY" 0;
+    setAttr "setRange1.oldMaxY" 0; 
+    setAttr "setRange1.oldMinZ" 0;
+    setAttr "setRange1.oldMaxZ" 0;
+    
+// Or by hand: select setRange1 Tab -> Right click on Old Min -> Break connection, write 110; Right click on Old Max -> Break connection, write 180; 
+// Max could be 6500 to have an output of milimeters
+
+
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+//Setting drag:
+
+$aire = 0;
+drag -pos 0 0 0 -m 0.05 -att 1 -dx 0 -dy 0 -dz 0 -ud 0  -mxd -1  -vsh none -vex 0 -vof 0 0 0 -vsw 360 -tsr 0.5 ;
+
+//Start the captures:
+
+for( $x = 0; $x < 2; ++$x){ //Condition: $x<$k[0]
+
+// Move first cloth to the middle-20:
+
+    string $instruction = $cloth+".vtx["+$x + "]";
+    float $pos[] = `pointPosition $instruction`;
+    select -r $cloth;
+    move -r (-1*$pos[0]) (30-1*$pos[1]) (-20-1*$pos[2]) ;
+    select -r $cloth.vtx[$x];
+
+// Transform constraint:
+
+    createNConstraint transform 0;
+
+// Move second cloth to the middle+20:
+
+    string $instruction = $cloth2+".vtx["+$x2 + "]";
+    float $pos[] = `pointPosition $instruction`;
+    select -r $cloth2;
+    move -r (-1*$pos[0]) (30-1*$pos[1]) (20-1*$pos[2]) ;
+    select -r $cloth2.vtx[$x2] ;
+
+// Transform constraint:
+
+    createNConstraint transform 0;
+
+// Setting drag if there is.
+
+    if($aire == 1){
+        setAttr "dragField1.magnitude" -40;
+    }
+
+// Canviar el valor del temps: (S'ha de fer d'un en un)
+
+    for( $t=0; $t<300; ++$t){ // t<660 would be enough
+        currentTime $t ;
+        if ($t <= 200 && $t%10 == 0){
+            select dynamicConstraint1;
+            move -r 0 0 1;
+            select dynamicConstraint2;
+            move -r 0 0 -1;
+        }
+        if ($aire == 1 && $t == 100){
+            setAttr "dragField1.magnitude" 0;
+        }
+    }
+
+// Iterate cameras to take photos:
+
+    for( $c=1; $c<=36; ++$c){
+        string $cam = "camera" + $c;
+        string $final_name = "/Users/ecorona/Documents/Database/" + $cloth + "_" + $x + "_" + $c + "proba3.tif";
+        render -layer DepthLayer $cam; //render -layer DepthLayer $cam;
+        render -layer ColorLayer $cam;
+        sysFile -move $final_name "/Users/ecorona/Documents/maya/projects/default/images/tmp/DepthLayer/untitled.tif";
+    }
+
+// Delete constraints:
+
+    select -r dynamicConstraint2 ;
+    doDelete;
+    select -r dynamicConstraint1 ;
+    doDelete;
+}
diff --git a/MEL/Save_depth_images_from_two_garments_one_over_the_other.mel b/MEL/Save_depth_images_from_two_garments_one_over_the_other.mel
new file mode 100644
index 0000000000000000000000000000000000000000..48bf6d3416cf57641b5de7f5463c846bcda58974
--- /dev/null
+++ b/MEL/Save_depth_images_from_two_garments_one_over_the_other.mel
@@ -0,0 +1,159 @@
+//Import first clothing:
+
+string $cloth = "Jeans";
+file -import "/Users/ecorona/Downloads/jeans.obj";
+setAttr "defaultRenderGlobals.imageFormat" 32;
+
+// Select first clothing: 
+
+select -r $cloth ;
+
+// Set first clothing to ncloth:
+
+createNCloth 0;
+setAttr "nClothShape1.stretchResistance" 1;
+setAttr "nClothShape1.compressionResistance" 0.1;
+setAttr "nClothShape1.bendResistance" 0.025;
+
+int $k[] = `polyEvaluate -vertex $cloth`;
+
+//Import second clothing:
+
+string $cloth2 = "null_";
+file -import "/Users/ecorona/Documents/Clothes/Coats/Coat1original.obj";
+setAttr "null_.scaleX" 70;
+setAttr "null_.scaleY" 70;
+setAttr "null_.scaleZ" 70;
+
+// Select second clothing and prepare it: 
+
+select -r $cloth2 ;
+polyCleanupArgList 3 { "0","1","1","0","0","0","0","0","0","1e-05","1","1e-05","0","1e-05","0","1","0" };
+select -r $cloth2 ;
+polyReduce -ver 1 -trm 0 -p 70 -vct 0 -tct 0 -shp 0 -keepBorder 1 -keepMapBorder 1 -keepColorBorder 1 -keepFaceGroupBorder 1 -keepHardEdge 1 -keepCreaseEdge 1 -keepBorderWeight 0.5 -keepMapBorderWeight 0.5 -keepColorBorderWeight 0.5 -keepFaceGroupBorderWeight 0.5 -keepHardEdgeWeight 0.5 -keepCreaseEdgeWeight 0.5 -useVirtualSymmetry 0 -symmetryTolerance 0.01 -sx 0 -sy 1 -sz 0 -sw 0 -preserveTopology 1 -keepQuadsWeight 1 -vertexMapName "" -replaceOriginal 1 -cachingReduce 1 -ch 1 "null_";
+select -r null_ ;
+sets -e -forceElement lambert2SG;
+
+// Set second clothing to ncloth:
+
+createNCloth 0;
+setAttr "nClothShape2.stretchResistance" 1.5;
+setAttr "nClothShape2.compressionResistance" 0.5;
+setAttr "nClothShape2.bendResistance" 0.1;
+
+int $k2[] = `polyEvaluate -vertex $cloth2`;
+
+// Create 36 cameras:
+
+for( $i=0; $i<36; ++$i){
+    camera -centerOfInterest 5 -focalLength 35 -lensSqueezeRatio 1 -cameraScale 1 -horizontalFilmAperture 1.41732 -horizontalFilmOffset 0 -verticalFilmAperture 0.94488 -verticalFilmOffset 0 -filmFit Fill -overscan 1 -motionBlur 0 -shutterAngle 144 -nearClipPlane 0.1 -farClipPlane 10000 -orthographic 0 -orthographicWidth 30 -panZoomEnabled 0 -horizontalPan 0 -verticalPan 0 -zoom 1; objectMoveCommand; cameraMakeNode 1 "";
+    float $x = 150*cos (($i*10)*3.14159/180);
+    float $y = 150*sin (($i*10)*3.14159/180);
+    move -r $x 0 $y;
+    float $rotation = (90 - $i*10);
+    rotate -r -os -fo 0 $rotation 0;
+    string $instruction = "cameraShape"+($i+1) + ".mask";
+    setAttr $instruction 0;
+    string $instruction = "cameraShape"+($i+1) + ".locatorScale";
+    setAttr $instruction 10;
+}
+
+// Create DepthLayer
+
+createRenderLayer -noRecurse -name DepthLayer  -global;
+
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+// Now by hand:
+// Channel Box / Layer Editor TAB -> Right click on DepthLayer -> Attributes -> Presets -> Luminance Depth
+// Now, rather execute:
+
+    disconnectAttr samplerInfo1.cameraNearClipPlane setRange1.oldMinX;
+    disconnectAttr samplerInfo1.cameraFarClipPlane setRange1.oldMaxX;
+
+    setAttr "setRange1.oldMinX" 110;
+    setAttr "setRange1.oldMaxX" 180;
+    setAttr "setRange1.oldMinY" 0;
+    setAttr "setRange1.oldMaxY" 0; 
+    setAttr "setRange1.oldMinZ" 0;
+    setAttr "setRange1.oldMaxZ" 0;
+    
+// Or by hand: select setRange1 Tab -> Right click on Old Min -> Break connection, write 110; Right click on Old Max -> Break connection, write 180; 
+// Max could be 6500 to have an output of milimeters
+
+
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+//Setting drag:
+
+$aire = 0;
+drag -pos 0 0 0 -m 0.05 -att 1 -dx 0 -dy 0 -dz 0 -ud 0  -mxd -1  -vsh none -vex 0 -vof 0 0 0 -vsw 360 -tsr 0.5 ;
+
+//Start the captures:
+$x2 = 1546
+
+for( $x = 0; $x < 2; ++$x){ //Condition: $x<$k[0]
+
+    currentTime 0 ;
+    string $instruction = $cloth+".vtx["+$x + "]";
+    float $pos[] = `pointPosition $instruction`;
+    select -r $cloth;
+    move -r (-1*$pos[0]) (-30-1*$pos[1]) (-1*$pos[2]) ;
+    select -r $cloth.vtx[$x];
+
+// Transform constraint:
+
+    createNConstraint transform 0;
+
+// Move second cloth to the middle+20:
+
+    string $instruction = $cloth2+".vtx["+$x2 + "]";
+    float $pos[] = `pointPosition $instruction`;
+    select -r $cloth2;
+    move -r (-1*$pos[0]) (20-1*$pos[1]) (-1*$pos[2]) ;
+    select -r $cloth2.vtx[$x2] ;
+
+// Transform constraint:
+
+    createNConstraint transform 0;
+
+// Set local wind to start for the first cloth
+
+    setAttr "nClothShape2.localWindY" 35;
+
+// Setting drag if there is.
+
+    if($aire == 1){
+        setAttr "dragField1.magnitude" -40;
+    }
+
+// Canviar el valor del temps: (S'ha de fer d'un en un)
+
+    for( $t=0; $t<300; ++$t){ // t<660 would be enough
+        currentTime $t ;
+        if ($t <= 250 && $t%5 == 0){
+            select dynamicConstraint1;
+            move -r 0 1 0;
+      //      select dynamicConstraint2;
+        //    move -r 0 -1 0;
+        }else if ($t ==251){
+            setAttr "nClothShape2.localWindY" 0;
+        }
+        if ($aire == 1 && $t == 100){
+            setAttr "dragField1.magnitude" 0;
+        }
+    }
+    
+    for( $c=1; $c<=36; ++$c){
+        string $cam = "camera" + $c;
+        string $final_name = "/Users/ecorona/Documents/Database/" + $cloth + "_" + $x + "_" + $c + "proba3.tif";
+        render -layer DepthLayer $cam; //render -layer DepthLayer $cam;
+        render -layer ColorLayer $cam;
+        sysFile -move $final_name "/Users/ecorona/Documents/maya/projects/default/images/tmp/DepthLayer/untitled.tif";
+    }
+    
+    select -r dynamicConstraint2 ;
+    doDelete;
+    select -r dynamicConstraint1 ;
+    doDelete;
+}
\ No newline at end of file
diff --git a/Pretreat_data/conversio_potent_2na_xarxa.py b/Pretreat_data/conversio_potent_2na_xarxa.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d8198b9cf38dd3d013500f494d6fe74581a71ff
--- /dev/null
+++ b/Pretreat_data/conversio_potent_2na_xarxa.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python
+
+import os.path 
+import numpy as np
+import numpy
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import cv2
+import pandas as pd
+import sys
+import math
+import glob
+import sys
+import math3d
+
+directory = '../Documents/Database/Jumper/Jumper_first_point_grasped/train_and_valid/' #'../../../Images/manipulacio/Jeans_simulation/'
+list_directories = glob.glob(directory+'*')
+
+array_X = []
+array_Y = []
+array_orientations = []
+array_orient_vectors_original = []
+directions_file_opened = []
+
+print(list_directories)
+
+for i in range(np.shape(list_directories)[0]):
+
+	last_length = 0
+	imatges = glob.glob(list_directories[i]+'/*.tif') #"Images/Towel/Towel_cuadrada_58cm/"
+	imatges.sort()
+
+	arxiu_csv = glob.glob(list_directories[i]+'/*.csv')
+
+	arxiu = pd.read_csv(arxiu_csv[0],header=None) # Potser sera arxiu_csv[0]
+
+        print(arxiu.shape)
+
+	num_cameras = 36
+	num_poses = 64
+	files_read = 0
+	
+	poses = arxiu.iloc[:,0]
+
+	cloth = str.split(str.split(imatges[0],'/')[-1],'_')
+	if len(cloth)==3:
+		cloth = cloth[0]
+	elif len(cloth)==4:
+		cloth = cloth[0]+'_'+cloth[1]
+
+	for j in range(num_poses):
+		join_csv_with_img = (poses==j)*1
+
+		mat = arxiu.iloc[numpy.argmax(join_csv_with_img):numpy.argmax(join_csv_with_img)+join_csv_with_img.sum()]
+		mat = mat.as_matrix()
+
+		# Punt 1 es 1514:
+		id_177 = numpy.argmax( mat[:,2] == 1514 )
+		# Punt 2 es 683
+		id_683 = numpy.argmax( mat[:,2] == 168 )
+
+		# We want to get the furthest point from the grasping point, located at 0,60,0 in the maya coordinates
+		index_punt = numpy.argmax( (mat[id_177,3]**2 + (mat[id_177,4]-60)**2 + mat[id_177,5]**2, mat[ id_683,3]**2 + (mat[id_683,4]-60)**2 + mat[id_683,5]**2) )
+
+		# Punt 1:
+		X_point1_from_world = [mat[ id_177,3], mat[ id_683,3] ][index_punt]
+		Y_point1_from_world = [mat[ id_177,4], mat[ id_683,4] ][index_punt]
+		Z_point1_from_world = [mat[ id_177,5], mat[ id_683,5] ][index_punt]
+
+		# Translation matrix from world to cameras
+		Homogenious_matrix = numpy.eye(4)
+		translation_vector = [0,-60,0]
+		Homogenious_matrix[0:3,3] = translation_vector
+
+		for z in range(1,num_cameras+1):
+
+			if (os.path.isfile( list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif' )):
+                        	img = cv2.imread( list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif' ,-1)
+
+				image = img[:,80:240,0] 
+
+				if( sum(sum(image>0)) > 500 ): # Si hi ha mes de 500 punts valids, la imatge ja deu ser correcta
+		
+					image = (image>0)*(1.2+0.6*(1-np.float32(image)/65279))
+					
+					# Getting transformations for this camera(z): (Normal image)
+					c = math.cos((z-1)*10*2*math.pi/360)
+					s = math.sin((z-1)*10*2*math.pi/360)
+
+					Matrix_rotation = [[-s,0,c], [0,1,0], [-c,0,-s]]
+					
+					Homogenious_matrix[0:3,0:3] = Matrix_rotation
+					
+					# Converting from world in Maya to camera: (Normal image)
+					Pos_point1_from_camera = numpy.dot(Homogenious_matrix,[X_point1_from_world,Y_point1_from_world,Z_point1_from_world,1])
+
+					# Saving images in X: (Normal image)		
+					array_X.append(image)
+
+					# Saving values in Y: (Normal image)
+					array_Y.append([Pos_point1_from_camera[0],Pos_point1_from_camera[1],Pos_point1_from_camera[2]]) 
+
+                                        directions_file_opened.append(list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z))
+
+#				else: # Si no es aixi, es incorrecta segur, informem i saltem a la seguent:
+#					print('Imatge no valida: ' + list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif')
+				
+				inform = str((j)*100/np.shape(poses)[0])#number_of_rows)+"% Done"                
+				sys.stdout.write('\b' * last_length)    # go back
+				sys.stdout.write(' ' * last_length)     # clear last name
+				sys.stdout.write('\b' * last_length)    # reposition
+				sys.stdout.write(inform)
+				sys.stdout.flush()
+				last_length = len(inform)
+				files_read += 1
+				
+                        else:
+                                print('Not found: ' + list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif')
+#                                sys.exit()
+
+
+        print('\nDataset created from '+ list_directories[i]+'. Dataset.shape: ' + str(np.shape(array_X))) # It should be [num_vertexs,num_cameras,240,160]
+
+print('Saving...')
+Dataset_X = np.array(array_X)
+Dataset_Y = np.array(array_Y)
+directions_file_opened = np.array(directions_file_opened)
+print directions_file_opened.shape
+
+print('Done. Total examples for this object: ' + str(np.shape(Dataset_X)[0]))
+
+'''
+if (Dataset_X.size > 0):
+	indexs = numpy.arange(len(Dataset_X))
+#	numpy.random.shuffle(indexs)
+	np.save('Datasets/X_train.npy',Dataset_X[indexs[0:int(len(Dataset_X)*0.7)]])
+	np.save('Datasets/Y_train.npy',Dataset_Y[indexs[0:int(len(Dataset_X)*0.7)]])
+        np.save('Datasets/X_valid.npy',Dataset_X[indexs[int(len(Dataset_X)*0.7):int(len(Dataset_X)*0.85)]])
+        np.save('Datasets/Y_valid.npy',Dataset_Y[indexs[int(len(Dataset_X)*0.7):int(len(Dataset_X)*0.85)]])
+        np.save('Datasets/X_test.npy',Dataset_X[indexs[int(len(Dataset_X)*0.85):]])
+        np.save('Datasets/Y_test.npy',Dataset_Y[indexs[int(len(Dataset_X)*0.85):]])
+	np.save('Datasets/Directions_train.npy',directions_file_opened[indexs[0:int(len(Dataset_X)*0.7)]])
+        np.save('Datasets/Directions_valid.npy',directions_file_opened[indexs[int(len(Dataset_X)*0.7):int(len(Dataset_X)*0.85)]])
+        np.save('Datasets/Directions_test.npy',directions_file_opened[indexs[int(len(Dataset_X)*0.85):]])
+'''
+
+if (Dataset_X.size > 0):
+        indexs = numpy.arange(len(Dataset_X))
+        numpy.random.shuffle(indexs)
+        np.save('Datasets2/X_train.npy',Dataset_X[indexs[0:int(len(Dataset_X)*0.8)]])
+        np.save('Datasets2/Y_train.npy',Dataset_Y[indexs[0:int(len(Dataset_X)*0.8)]])
+        np.save('Datasets2/X_valid.npy',Dataset_X[indexs[int(len(Dataset_X)*0.8): ]])
+        np.save('Datasets2/Y_valid.npy',Dataset_Y[indexs[int(len(Dataset_X)*0.8): ]])
+        np.save('Datasets2/Directions_train.npy',directions_file_opened[indexs[0:int(len(Dataset_X)*0.8)]])
+        np.save('Datasets2/Directions_valid.npy',directions_file_opened[indexs[int(len(Dataset_X)*0.8): ]])
+
+
+
+
diff --git a/Pretreat_data/conversio_potent_nomes_punts.py b/Pretreat_data/conversio_potent_nomes_punts.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a1b2b9e8eca57b8e6e132978b05a7cbfe74983e
--- /dev/null
+++ b/Pretreat_data/conversio_potent_nomes_punts.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+import os.path 
+import numpy as np
+import numpy
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import cv2
+import pandas as pd
+import sys
+import math
+import glob
+import sys
+import math3d
+
+directory = '../Documents/Database/Jumper/Jumper_two_points/'
+list_directories = glob.glob(directory+'*')
+
+array_X = []
+array_Y = []
+array_orientations = []
+array_orient_vectors_original = []
+directions_file_opened = []
+
+print(list_directories)
+
+for i in range(np.shape(list_directories)[0]):
+
+	last_length = 0
+	imatges = glob.glob(list_directories[i]+'/*.tif') #"Images/Towel/Towel_cuadrada_58cm/"
+	imatges.sort()
+
+	arxiu_csv = glob.glob(list_directories[i]+'/*.csv')
+
+	print arxiu_csv
+	arxiu = pd.read_csv(arxiu_csv[0],header=None) 
+
+	print (arxiu.shape)
+        num_cameras = 36
+	num_poses = arxiu.shape[0]/num_cameras
+	files_read = 0
+	
+	poses = arxiu.iloc[:,0]
+	cameras = arxiu.iloc[:,1]
+
+	cloth = str.split(str.split(imatges[0],'/')[-1],'_')
+	if len(cloth)==3:
+		cloth = cloth[0]
+	elif len(cloth)==4:
+		cloth = cloth[0]+'_'+cloth[1]
+
+	line_in_csv = -36
+	for j in range(num_poses-1):
+                line_in_csv += 36
+
+		for z in range(1,num_cameras+1):
+
+			if (os.path.isfile( list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif' )):
+                        	img = cv2.imread( list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif' ,-1)
+
+				image = img[:,80:240,0] 
+
+				if( sum(sum(image>0)) > 500 ): # Si hi ha mes de 500 punts valids, la imatge ja deu ser correcta
+		
+					image = (image>0)*(1.2+0.6*(1-np.float32(image)/65279))
+					row = arxiu.iloc[line_in_csv]
+
+					# Setting conditions for all the pose views:
+
+					if (z==1):
+
+						# Punt 1:
+						X_point1_from_world = row[2]
+						Y_point1_from_world = row[3]
+						Z_point1_from_world = row[4]
+
+						# Punt 2:
+						X_point2_from_world = row[5]
+						Y_point2_from_world = row[6]
+						Z_point2_from_world = row[7]
+
+						# Translation matrix from world to cameras
+						Homogenious_matrix = numpy.eye(4)
+						translation_vector = [0,-60,0]
+						Homogenious_matrix[0:3,3] = translation_vector
+
+					# Getting transformations for this camera(z): (Normal image)
+					c = math.cos((z-1)*10*2*math.pi/360)
+					s = math.sin((z-1)*10*2*math.pi/360)
+
+					Matrix_rotation = [[-s,0,c], [0,1,0], [-c,0,-s]]
+					
+					Homogenious_matrix[0:3,0:3] = Matrix_rotation
+					
+					# Converting from world in Maya to camera: (Normal image)
+					Pos_point1_from_camera = numpy.dot(Homogenious_matrix,[X_point1_from_world,Y_point1_from_world,Z_point1_from_world,1])
+					Pos_point2_from_camera = numpy.dot(Homogenious_matrix,[X_point2_from_world,Y_point2_from_world,Z_point2_from_world,1])
+
+					# Saving images in X: (Normal image)		
+					array_X.append(image)
+
+					# Saving values in Y: (Normal image)
+					array_Y.append([Pos_point1_from_camera[0],Pos_point1_from_camera[1],Pos_point1_from_camera[2],Pos_point2_from_camera[0],Pos_point2_from_camera[1],Pos_point2_from_camera[2]]) 
+                                        directions_file_opened.append(list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z))
+
+					# Change sign of X values (respect to the camera) (Simmetric image)
+					
+					# RECORDA: A LA IMATGE SIMETRICA ELS PUNTS S'HAURIEN DE CANVIAR (QUE PUNT 1 PASI A SER EL 2 I VICEVERSA), A PART DE POSAR -Z
+					
+					Matrix_rotation = [[s,0,-c], [0,1,0], [-c,0,-s]]
+					Homogenious_matrix[0:3,0:3] = Matrix_rotation
+					
+					Pos_point1_from_camera = numpy.dot(Homogenious_matrix,[X_point1_from_world,Y_point1_from_world,Z_point1_from_world,1])
+					Pos_point2_from_camera = numpy.dot(Homogenious_matrix,[X_point2_from_world,Y_point2_from_world,Z_point2_from_world,1])
+
+					array_X.append(np.fliplr(image))
+					array_Y.append([Pos_point2_from_camera[0],Pos_point2_from_camera[1],Pos_point2_from_camera[2],Pos_point1_from_camera[0],Pos_point1_from_camera[1],Pos_point1_from_camera[2]])	
+					directions_file_opened.append(list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z))
+
+#				else: # Si no es aixi, es incorrecta segur, informem i saltem a la seguent:
+#					print('Imatge no valida: ' + list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif')
+		
+				inform = str((j*36+z)*100/np.shape(poses)[0])#number_of_rows)+"% Done"                
+				sys.stdout.write('\b' * last_length)    # go back
+				sys.stdout.write(' ' * last_length)     # clear last name
+				sys.stdout.write('\b' * last_length)    # reposition
+				sys.stdout.write(inform)
+				sys.stdout.flush()
+				last_length = len(inform)
+				files_read += 1
+                        else:
+                                print('Not found: ' + list_directories[i]+'/'+cloth+'_'+str(j)+'_'+str(z)+'.tif')
+#                                sys.exit()
+
+
+        print('\nDataset created from '+ list_directories[i]+'. Dataset.shape: ' + str(np.shape(array_X))) # It should be [num_vertexs,num_cameras,240,160]
+
+print('Saving...')
+Dataset_X = np.array(array_X)
+Dataset_Y = np.array(array_Y)
+directions_file_opened = np.array(directions_file_opened)
+print directions_file_opened.shape
+
+print('Done. Total examples for this object: ' + str(np.shape(Dataset_X)[0]))
+
+if (Dataset_X.size > 0):
+	indexs = numpy.arange(len(Dataset_X))
+	numpy.random.shuffle(indexs)
+	np.save('Datasets1/X_train.npy',Dataset_X[indexs[0:int(len(Dataset_X)*0.75)]])
+	np.save('Datasets1/Y_train.npy',Dataset_Y[indexs[0:int(len(Dataset_X)*0.75)]])
+        np.save('Datasets1/X_valid.npy',Dataset_X[indexs[int(len(Dataset_X)*0.75):]])
+        np.save('Datasets1/Y_valid.npy',Dataset_Y[indexs[int(len(Dataset_X)*0.75):]])
+
+#When all the datasets are saved for different clothes, then join them to have [number_of_clothes,num_vertexs,num_cameras,240,160]
+
+
diff --git a/Training_CNNs/pretrained_params/W0_1_65.npy b/Training_CNNs/pretrained_params/W0_1_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..d95015e2ab453e1449ffcfa3758ef64b6a108f20
Binary files /dev/null and b/Training_CNNs/pretrained_params/W0_1_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W0_3_65.npy b/Training_CNNs/pretrained_params/W0_3_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..711d9bbfac54ab968fbfae21bb08aee041107654
Binary files /dev/null and b/Training_CNNs/pretrained_params/W0_3_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W0_4_65.npy b/Training_CNNs/pretrained_params/W0_4_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..5d5777f94c467639b5a47138b2a02b16434ab0f8
Binary files /dev/null and b/Training_CNNs/pretrained_params/W0_4_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W1_1_65.npy b/Training_CNNs/pretrained_params/W1_1_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..00e62edc72ceae653b1bb4392f7f7c70e23d748e
Binary files /dev/null and b/Training_CNNs/pretrained_params/W1_1_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W1_3_65.npy b/Training_CNNs/pretrained_params/W1_3_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..b36cfdcd75aa6ea68d16996f2555ed0fc911675e
Binary files /dev/null and b/Training_CNNs/pretrained_params/W1_3_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W1_4_65.npy b/Training_CNNs/pretrained_params/W1_4_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..8b4dce8113637f798ab3c4ffe3d7e6fc6f40a070
Binary files /dev/null and b/Training_CNNs/pretrained_params/W1_4_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W_0_65.npy b/Training_CNNs/pretrained_params/W_0_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..729f4566d8d9b81d63424335c9591314e874e380
Binary files /dev/null and b/Training_CNNs/pretrained_params/W_0_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W_2_65.npy b/Training_CNNs/pretrained_params/W_2_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..c7afede2f5b8927d7b88beba94e710fa8d8b707c
Binary files /dev/null and b/Training_CNNs/pretrained_params/W_2_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W_5_65.npy b/Training_CNNs/pretrained_params/W_5_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..247024e81823805bb8d5cc1e7b512f7fcbf2771b
Binary files /dev/null and b/Training_CNNs/pretrained_params/W_5_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W_6_65.npy b/Training_CNNs/pretrained_params/W_6_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..a02fc3e72069781da5bd2e59c63e54c3c484d4d6
Binary files /dev/null and b/Training_CNNs/pretrained_params/W_6_65.npy differ
diff --git a/Training_CNNs/pretrained_params/W_7_65.npy b/Training_CNNs/pretrained_params/W_7_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..d1a534dfca3d046eb39d99fb0a43af297a312621
Binary files /dev/null and b/Training_CNNs/pretrained_params/W_7_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b0_1_65.npy b/Training_CNNs/pretrained_params/b0_1_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..6c40bef22bba1cf18ff4af50ebd843042a40c883
Binary files /dev/null and b/Training_CNNs/pretrained_params/b0_1_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b0_3_65.npy b/Training_CNNs/pretrained_params/b0_3_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..54388c714d7838ead88b755e75822ce897ed313f
Binary files /dev/null and b/Training_CNNs/pretrained_params/b0_3_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b0_4_65.npy b/Training_CNNs/pretrained_params/b0_4_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..46a0f48f7e357b5572c456b71e333a1a531c61d9
Binary files /dev/null and b/Training_CNNs/pretrained_params/b0_4_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b1_1_65.npy b/Training_CNNs/pretrained_params/b1_1_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..109a83fac42e0430abce62ace67dfebfc4a63f30
Binary files /dev/null and b/Training_CNNs/pretrained_params/b1_1_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b1_3_65.npy b/Training_CNNs/pretrained_params/b1_3_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..9b340c276d8c78c7a23f384da8952292e15fd33a
Binary files /dev/null and b/Training_CNNs/pretrained_params/b1_3_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b1_4_65.npy b/Training_CNNs/pretrained_params/b1_4_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..3319ac680e20bd943b15fec6920b604377491060
Binary files /dev/null and b/Training_CNNs/pretrained_params/b1_4_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b_0_65.npy b/Training_CNNs/pretrained_params/b_0_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..ca5b12913401c13b37173ce0ec54b222c6de9390
Binary files /dev/null and b/Training_CNNs/pretrained_params/b_0_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b_2_65.npy b/Training_CNNs/pretrained_params/b_2_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..8760830bf23abb5910046d2cfdbaf5cdec1e260a
Binary files /dev/null and b/Training_CNNs/pretrained_params/b_2_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b_5_65.npy b/Training_CNNs/pretrained_params/b_5_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..12bd037720071170b1752bb75e748aa2be9a98dc
Binary files /dev/null and b/Training_CNNs/pretrained_params/b_5_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b_6_65.npy b/Training_CNNs/pretrained_params/b_6_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..b30cccfacdf4c7ac290f3ee89c13040ffbc61e8f
Binary files /dev/null and b/Training_CNNs/pretrained_params/b_6_65.npy differ
diff --git a/Training_CNNs/pretrained_params/b_7_65.npy b/Training_CNNs/pretrained_params/b_7_65.npy
new file mode 100644
index 0000000000000000000000000000000000000000..d82a31a718d307e59a43a2cf431a5070fee1fcec
Binary files /dev/null and b/Training_CNNs/pretrained_params/b_7_65.npy differ
diff --git a/Training_CNNs/training_class_and_predict_with_noise.py b/Training_CNNs/training_class_and_predict_with_noise.py
new file mode 100644
index 0000000000000000000000000000000000000000..68e27d3f6e86f8cef07d3a166689df65cdffd367
--- /dev/null
+++ b/Training_CNNs/training_class_and_predict_with_noise.py
@@ -0,0 +1,405 @@
+
+from __future__ import print_function
+import scipy.io
+import sys
+import os
+import time
+import numpy
+import theano
+import theano.tensor as T
+import lasagne
+import matplotlib.pyplot as plt
+
+sys.path.insert(0,'/home/tgabas/cloth-recognition/synth_real')
+from training import add_noise
+
+def build_network(input_var,target_var,length_of_batches= None):
+
+        drop_cnn = 0
+
+        drop_fully_cn = 0.5
+
+        nonlinearity = lasagne.nonlinearities.rectify
+
+        init = 'relu' # 'relu' (rectify) or 1 (sigmoid) 
+
+        network = lasagne.layers.InputLayer(shape=(None,1,240,160),input_var=input_var) # length_of_batches instead of None
+
+        network = lasagne.layers.BatchNormLayer(network)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=96,stride = (4,4),  pad = 'full', filter_size=(11,11),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+        print(network.output_shape)
+
+        network1 = lasagne.layers.SliceLayer(network,indices = slice(0,48),axis=1)
+
+        print(network1.output_shape)
+
+        network1 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network1,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(5,5),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network1.output_shape)
+
+        network2 = lasagne.layers.SliceLayer(network,indices = slice(48,96),axis=1)
+
+        print(network2.output_shape)
+
+        network2 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network2,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(5,5),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network2.output_shape)
+
+        network = lasagne.layers.ConcatLayer((network1,network2),axis=1)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=384,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network.output_shape)
+
+        network1 = lasagne.layers.SliceLayer(network,indices = slice(0,192),axis=1)
+
+        print(network1.output_shape)
+
+        network1 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network1,p=drop_cnn), num_filters=192,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network1.output_shape)
+
+        network1 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network1,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network1.output_shape)
+
+        network2 = lasagne.layers.SliceLayer(network,indices = slice(192,384),axis=1)
+
+        print(network2.output_shape)
+
+        network2 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network2,p=drop_cnn), num_filters=192,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network2.output_shape)
+
+        network2 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network2,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network2.output_shape)
+
+        network = lasagne.layers.ConcatLayer((network1,network2),axis=1)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=4096,nonlinearity=nonlinearity)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=4096,nonlinearity=nonlinearity)
+
+        print(network.output_shape)
+
+        network_pred = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=3,nonlinearity=None,W = lasagne.init.GlorotUniform())
+
+        print("PRED: "+ str(network_pred.output_shape))
+
+        network_class = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=2,nonlinearity=lasagne.nonlinearities.softmax, W = lasagne.init.GlorotUniform())
+
+        print("CLASS: " + str(network_class.output_shape))
+
+        network = lasagne.layers.ConcatLayer([network_pred,network_class],axis=1)
+
+        print(network.output_shape)
+
+        regularization = lasagne.regularization.regularize_network_params(network,lasagne.regularization.l2)*0.000005
+
+        p = lasagne.layers.get_all_param_values(network)
+
+        w0 = numpy.load('pretrained_params/W_0_65.npy')[0]
+        b0 = numpy.load('pretrained_params/b_0_65.npy')
+
+        for i in range(0,96):
+                p[4][i,0] = w0[:,:,i]
+                p[5][i] = b0[i]
+
+        w1 = numpy.load('pretrained_params/W1_1_65.npy')
+        b1 = numpy.load('pretrained_params/b1_1_65.npy')
+
+        for i in range(0,128):
+                p[6][i] = w1[:,:,:,i]
+                p[7][i] = b1[i]
+
+        w1 = numpy.load('pretrained_params/W0_1_65.npy')
+        b1 = numpy.load('pretrained_params/b0_1_65.npy')
+
+        for i in range(0,128):
+                p[8][i] = w1[:,:,:,i]
+                p[9][i] = b1[i]
+
+        w2 = numpy.load('pretrained_params/W_2_65.npy')
+        b2 = numpy.load('pretrained_params/b_2_65.npy')
+
+        for i in range(0,384):
+                p[10][i] = w2[:,:,:,i]
+                p[11][i] = b2[i]
+
+        w3 = numpy.load('pretrained_params/W0_3_65.npy') # (192, 3, 3, 192)
+        b3 = numpy.load('pretrained_params/b0_3_65.npy')
+
+
+        for i in range(0,128):
+                p[12][i] = w3[:,:,:,i]
+                p[13][i] = b3[i]
+
+        w4 = numpy.load('pretrained_params/W0_4_65.npy') # (192, 3, 3, 128)
+        b4 = numpy.load('pretrained_params/b0_4_65.npy')
+
+        for i in range(0,128):
+                p[14][i] = w4[:,:,:,i]
+                p[15][i] = b4[i]
+
+        w3 = numpy.load('pretrained_params/W1_3_65.npy') # (192, 3, 3, 192)
+        b3 = numpy.load('pretrained_params/b1_3_65.npy')
+
+        for i in range(0,128):
+                p[16][i] = w3[:,:,:,i]
+                p[17][i] = b3[i]
+
+        w4 = numpy.load('pretrained_params/W1_4_65.npy') # (192, 3, 3, 128)
+        b4 = numpy.load('pretrained_params/b1_4_65.npy')
+
+        for i in range(0,128):
+                p[18][i] = w4[:,:,:,i]
+                p[19][i] = b4[i]
+
+        w5 = numpy.load('pretrained_params/W_5_65.npy') #9216x4096
+        b5 = numpy.load('pretrained_params/b_5_65.npy')
+        p[20][0:9216] = w5
+        p[21][0:9216] = b5
+
+        w6 = numpy.load('pretrained_params/W_6_65.npy') #9216x4096
+        b6 = numpy.load('pretrained_params/b_6_65.npy')
+        p[22] = w6
+        p[23] = b6
+
+
+        return network,regularization
+
+
+
+def iterate_minibatches(inputs, targets, points, batchsize, shuffle=False):
+    assert len(inputs) == len(targets)
+    if shuffle:
+        indices = numpy.arange(len(inputs))
+        numpy.random.shuffle(indices)
+    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
+        if shuffle:
+            excerpt = indices[start_idx:start_idx + batchsize]
+        else:
+            excerpt = slice(start_idx, start_idx + batchsize)
+        yield inputs[excerpt], targets[excerpt], points[excerpt]
+
+
+def load_dataset():
+ 
+	X_training = numpy.load('../../../Data_onepoint/X_train.npy')
+	Y_training = numpy.load('../../../Data_onepoint/Y_train.npy')
+	Ponderate_train = numpy.load('../../../Data_onepoint/Altres/Points_seen_train.npy')
+        Ponderate_valid = numpy.load('../../../Data_onepoint/Altres/Points_seen_valid.npy')
+
+        X_training = add_noise(X_training)
+
+	Ponderate_train = numpy.int16(Ponderate_train)
+        Ponderate_valid = numpy.int16(Ponderate_valid)
+	
+        X_valid = numpy.load('../../../Data_onepoint/X_valid.npy')
+        Y_valid = numpy.load('../../../Data_onepoint/Y_valid.npy')
+
+	X_valid = add_noise(X_valid)
+	
+#	X_training = X_valid
+#	Y_training = Y_valid
+	
+        X_real_val = numpy.load('/home/tgabas/cloth-recognition/Real_data_for_grasping/X_real_val.npy')
+        Y_real_val = numpy.load('/home/tgabas/cloth-recognition/Real_data_for_grasping/Y_real_val.npy')
+        Ponderate_real_val = numpy.int16(numpy.load('/home/tgabas/cloth-recognition/Real_data_for_grasping/Points_seen_real_val.npy'))[:,0]
+
+	X_training = X_training.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_training = numpy.float32(Y_training)
+	X_valid = X_valid.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_valid = numpy.float32(Y_valid)
+
+	print(numpy.shape(Ponderate_train))
+        X_real_val = X_real_val.reshape([-1,1,240,160]).astype(numpy.float32)
+        Y_real_val = numpy.float32(Y_real_val[:,[0,1,2]])
+
+	print("X_training shape is: " + str(X_training.shape))
+	print("X_valid shape is: " + str(X_valid.shape))
+
+	return X_training,Y_training,X_valid,Y_valid,Ponderate_train,Ponderate_valid,X_real_val,Y_real_val,Ponderate_real_val
+	
+def main(num_epochs = 500):
+	print("Loading data...")
+	X_training,Y_training,X_valid,Y_valid,punts_train,punts_valid,X_real_val,Y_real_val,punts_real_val = load_dataset()
+	input_var = T.tensor4('inputs')
+	target_var = T.matrix('targets')
+        punts_seen = T.ivector('ponderate_targets')
+#	punts_seen = T.matrix('ponderate_targets')
+
+#        prediction_given = T.tensor4('preds')
+
+	print("Building model...")
+
+	# Define how many batches
+        number_of_batches = 400 
+        length_of_batches = numpy.shape(Y_training)[0]/number_of_batches
+
+	# Build network
+	network,regularization = build_network(input_var,target_var,length_of_batches)
+	
+	# Define loss, learning rate, updates and train/validation functions.
+        predict = lasagne.layers.get_output(network)
+
+	loss_pred = T.eq(punts_seen,0)*0.2*(lasagne.objectives.squared_error(predict[:,[0,1,2]],target_var)).sum(axis=1) + T.eq(punts_seen,1)*0.8*(lasagne.objectives.squared_error(predict[:,[0,1,2]],target_var)).sum(axis=1)
+
+	loss_class = lasagne.objectives.categorical_crossentropy(predict[:,[3,4]],punts_seen)
+
+	loss = loss_pred.mean() +loss_class.mean()*5 + regularization
+
+#	learning_rate = theano.shared(numpy.array(0.000001,dtype=theano.config.floatX))
+#	learning_rate_decay = 0.9992
+
+        params = lasagne.layers.get_all_params(network, trainable=True)
+
+#	updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=learning_rate, momentum=0.9)	
+
+	updates = lasagne.updates.adam(loss, params, learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=1e-08)
+
+#	updates = lasagne.updates.adagrad(loss, params, learning_rate=1.0, epsilon=1e-06)
+#	updates = lasagne.updates.adadelta(loss, params, learning_rate=1.0, rho=0.95, epsilon=1e-06)
+ 
+        test_predict = lasagne.layers.get_output(network,deterministic=True)
+
+#        test_loss_pred = punts_seen*(lasagne.objectives.squared_error(test_predict[:,[0,1,2]],target_var)).sum(axis=1) 
+
+        test_loss_pred = T.eq(punts_seen,0)*0.2*(lasagne.objectives.squared_error(test_predict[:,[0,1,2]],target_var)).sum(axis=1) + T.eq(punts_seen,1)*0.8*(lasagne.objectives.squared_error(test_predict[:,[0,1,2]],target_var)).sum(axis=1)
+
+	test_loss_class = lasagne.objectives.categorical_crossentropy(test_predict[:,[3,4]],punts_seen)
+
+	test_loss =  test_loss_pred.mean() +test_loss_class.mean()*5
+
+	test_loss_in_col = lasagne.objectives.squared_error(test_predict[:,[0,1,2]],target_var)*T.repeat(punts_seen.reshape([-1,1]),3,axis=1)
+
+	test_loss_in_col = test_loss_in_col.sum(axis=0)/punts_seen.sum()
+ 
+	see_what_improves = theano.function([input_var,target_var, punts_seen],test_loss_in_col)
+
+#	test_loss = test_loss.mean()
+
+        test_acc = T.mean(T.eq(T.argmax(test_predict[:,[3,4]], axis=1), punts_seen), dtype=theano.config.floatX)
+
+	train_fn = theano.function([input_var, target_var,punts_seen], loss, updates=updates)
+
+	val_fn = theano.function([input_var, target_var, punts_seen], [test_loss,test_acc])
+
+	print("Starting training...")
+	
+	# Define initial variables 
+	valid_errors = []
+	training_errors = []
+	train_distribution = []
+        valid_distribution = []
+	error_real_val = []
+	accuracies = []
+	best_validation_error = numpy.Inf
+	best_acc = 0
+	length_of_batches_valid = 200
+	number_of_batches_valid = numpy.shape(Y_valid)[0]/length_of_batches_valid
+	train_err = 10
+
+	# The training loop:
+	for epoch in range(num_epochs):
+        	ant_train_err = train_err
+		train_err = 0
+		valid_err = 0
+		train_batches = 0
+		val_batches = 0
+		acc = 0
+		error_per_train = numpy.zeros(3)
+		error_per_val = numpy.zeros(3)
+
+		if True:
+                        for batch in iterate_minibatches(X_training, Y_training,punts_train , length_of_batches, shuffle=True):
+                                inputs, targets, punts = batch
+				train_err += train_fn(inputs, targets,punts)
+                                train_batches += 1
+				#print( predict_values(inputs))
+				error_per_train += see_what_improves(inputs,targets,punts)
+#				learning_rate = learning_rate*learning_rate_decay	
+
+			for batch in iterate_minibatches(X_valid, Y_valid, punts_valid, length_of_batches_valid, shuffle=False):
+				inputs, targets, punts = batch
+				err,acc1 = val_fn(inputs, targets,punts)
+				error_per_val += see_what_improves(inputs,targets,punts)
+				valid_err += err
+				val_batches += 1
+				acc += acc1
+                        Real_error = see_what_improves(X_real_val,Y_real_val,punts_real_val)
+
+			# Get the real value of training error, validation error and accuracies.
+                        train_err = train_err/train_batches
+			valid_err = valid_err/val_batches
+			error_per_val = error_per_val/val_batches
+			error_per_train = error_per_train/train_batches
+			acc = acc/val_batches
+
+			# Inform about the state of the training process
+			print("Iteration: " + str(epoch))
+                        print("      Training loss when predicting: " + str(train_err))
+                        print("      Validation regression loss: " + str(valid_err))
+                        print("      Validation accuracy: " + str(acc))
+                        print("      Error train distribution: "+ str(error_per_train.tolist() ) )
+			print("	     Error valid distribution: "+ str(error_per_val.tolist() ) )
+			print("      Error real valid distribution: " + str(Real_error.tolist() ) )
+			now = time.localtime()
+			print("      Achieved at day " +str(now[2])+"/"+str(now[1])+"/"+str(now[0])+" ; And time "+ str(now[3])+":"+str(now[4])+":"+str(now[5]))
+			
+			# Keep relevant information about the training process
+                        valid_errors.append(valid_err)
+			training_errors.append(train_err)
+			train_distribution.append(error_per_train)
+			valid_distribution.append(error_per_val)
+			accuracies.append(acc)
+			error_real_val.append(Real_error)
+	
+			if(valid_err < best_validation_error):
+				best_validation_error = valid_err
+				numpy.save("Results_one_point_4out_with_noise/parameters_best_val_loss.npy",lasagne.layers.get_all_param_values(network))
+				print("      Minimum validation error. Parameters saved")
+			
+			if(acc > best_acc):
+				best_acc = acc
+				numpy.save("Results_one_point_4out_with_noise/parameters_best_val_acc.npy",lasagne.layers.get_all_param_values(network))
+				print("	     Maximum accuracy. Parameters saved")
+
+			# Save relevant information about the training process every 5 epochs
+			if (epoch%5==0):
+                                numpy.save("Results_one_point_4out_with_noise/validation_errors.npy",valid_errors)
+				numpy.save("Results_one_point_4out_with_noise/training_errors.npy",training_errors)
+				numpy.save("Results_one_point_4out_with_noise/error_train_distribution.npy",train_distribution )
+				numpy.save("Results_one_point_4out_with_noise/error_valid_distribution.npy",valid_distribution )
+				numpy.save("Results_one_point_4out_with_noise/accuracies.npy",accuracies)
+				numpy.save("Results_one_point_4out_with_noise/error_real_valid_distribution.npy",error_real_val)
+	print("")
+	
+	print("Finished")
+
+if __name__ == "__main__":
+	main(2000)
+
diff --git a/Training_CNNs/training_classifier.py b/Training_CNNs/training_classifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..666a984574b66477a31926d78a9450855d66b2fd
--- /dev/null
+++ b/Training_CNNs/training_classifier.py
@@ -0,0 +1,335 @@
+
+from __future__ import print_function
+import scipy.io
+import sys
+import os
+import time
+import numpy
+import theano
+import theano.tensor as T
+import lasagne
+import matplotlib.pyplot as plt
+
+#sklearn module helps in preprocessing:Scale function sets the mean value to 0 and the standard desviation to 1.
+
+#Last version:
+# 0 -> Jeans
+# 1 -> Jumper
+# 2 -> T_Shirt
+# 3 -> Towel
+
+	#Function to set the mean of the dataset to zero and the standard variation to one. X is the whole dataset
+
+def build_network(input_var,target_var,length_of_batches= None):
+
+	drop_cnn = 0.3
+
+	drop_fully_cn = 0.5
+
+	nonlinearity = lasagne.nonlinearities.rectify
+
+	init = 'relu' # 'relu' (rectify) or 1 (sigmoid) 
+
+        network = lasagne.layers.InputLayer(shape=(None,1,240,160),input_var=input_var) # length_of_batches instead of None
+
+	network = lasagne.layers.BatchNormLayer(network)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=32,stride = (2,2), filter_size=(8,8),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+	print(network.output_shape)
+
+	network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+        print(network.output_shape)
+ 
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=64,stride = (1,1), filter_size=(6,4),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+	print(network.output_shape)
+	 
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=64,stride = (1,1), filter_size=(4,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+	print(network.output_shape)
+ 
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=128,stride = (1,1), filter_size=(2,2),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+	print(network.output_shape)
+
+        network = lasagne.layers.DenseLayer(
+            lasagne.layers.dropout(network,p=drop_fully_cn),
+            num_units=64,
+            nonlinearity=nonlinearity)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.DenseLayer(
+            lasagne.layers.dropout(network,p=drop_fully_cn),
+            num_units=4,
+            nonlinearity=lasagne.nonlinearities.softmax)
+
+	regularization = lasagne.regularization.regularize_network_params(network,lasagne.regularization.l2)*0.00002
+
+	return network,regularization
+ 
+def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
+    assert len(inputs) == len(targets)
+    if shuffle:
+        indices = numpy.arange(len(inputs))
+        numpy.random.shuffle(indices)
+    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
+        if shuffle:
+            excerpt = indices[start_idx:start_idx + batchsize]
+        else:
+            excerpt = slice(start_idx, start_idx + batchsize)
+        yield inputs[excerpt], targets[excerpt]
+
+def load_dataset():
+
+	X_training = numpy.load('../Data/Dataset_X.npy')
+	Y_training = numpy.load('../Data/Dataset_Y.npy')
+	X_valid = numpy.load('../Data/X_real_val.npy')
+	Y_valid = numpy.load('../Data/Y_real_val.npy')
+#	mean = numpy.mean(X_training,axis=0)
+#	std = numpy.mean(X_training,axis=0)
+#	print(mean.shape)
+#	print("Data loaded")	
+
+	X_training = add_noise(X_training)
+
+	X_training = X_training.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_training = numpy.int32(Y_training)
+	X_valid = X_valid.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_valid = numpy.int32(Y_valid)
+
+#	X_training = (X_training-mean)/std
+#	nans = numpy.isnan(X_training)
+#	X_training[nans] = 0
+
+#	X_valid = (X_valid-mean)/std
+#	nans = numpy.isnan(X_valid)
+#	X_valid[nans] = 0
+#	infs = numpy.isnan(X_valid)
+#	X_valid[infs] = 0
+
+	print("X_training shape is: " + str(X_training.shape))
+	print("X_valid shape is: " + str(X_valid.shape))
+
+	return X_training,Y_training,X_valid,Y_valid
+
+def add_noise(X_training): # Shape is [k,240,160]
+
+	array = numpy.arange(X_training.shape[0])
+
+	constant = 280000
+
+	print("Simulating noise...")
+	print("Simulating noise in Z axis...")# z:
+	pesos = [2.0000e-05, 2.0000e-05, 1.2500e-06, 2.0000e-06, 3.5000e-09, 3.5000e-09, -1.0002e-02,-1.0002e-02,-1.5025e-03,1.4515e+00]
+	
+	now = time.time()
+
+	for i in range(X_training.shape[1]):
+	    for j in range(X_training.shape[2]):
+		st = (pesos[0]*j*j*16+pesos[1]*i*i*4+pesos[2]*(X_training[:,i,j]**2)+pesos[3]*j*i*8+pesos[4]*(X_training[:,i,j])*i*2+pesos[5]*(X_training[:,i,j])*j*4+pesos[6]*j*4+pesos[7]*i*2+pesos[8]*(X_training[:,i,j])+pesos[9])/100
+		st = numpy.abs(st)+(st==0)*0.00001
+		X_training[:,i,j]= X_training[:,i,j] + (X_training[:,i,j]>0)*numpy.random.normal(0,st,X_training.shape[0])
+ 
+	X_training_original = X_training
+	print("... which has taken "+str(time.time()-now)+" seconds")
+	print("")
+	now = time.time()
+
+	print("Simulating noise in Y axis...") # j: 
+
+	pesos = [6.3038e-01, 2.6496e-01, 1.3279e-06, 1.5000e-02, 9.0174e-05, 3.3417e-04, -5.9320e+00, 2.4411e+00, 3.1239e-03, 1.0995e+01]
+
+	for i in range(X_training.shape[1]):
+		for j in range(X_training.shape[2]):
+			st = (pesos[0]*j*j*16+pesos[1]*i*i*4+pesos[2]*((1000*X_training[:,i,j])**2)+pesos[3]*j*i*8+pesos[4]*(1000*X_training[:,i,j])*i*2+pesos[5]*(1000*X_training[:,i,j])*j*4+pesos[6]*j*4+pesos[7]*i*2+pesos[8]*(1000*X_training[:,i,j])+pesos[9])/constant
+			st = numpy.abs(st)+(st==0)*0.01
+			index = numpy.round(numpy.random.normal(0,st))
+			index = (index+j > 0)*index + (index+j<=0)*0
+			index = (index+j >= X_training.shape[2])*(X_training.shape[2]-1) + (index+j<X_training.shape[2])*(j+index)
+			X_training[:,i,j] = X_training_original[array,i,numpy.uint16(index)]
+
+	X_training_original = X_training
+
+        print("... which has taken "+str(time.time()-now)+" seconds")
+        print("")
+        now = time.time()
+
+	print("Simulating noise in X axis...") # i:
+
+	pesos = [6.3801e-01, 1.1225e-01, 3.5751e-06,-4.0645e-03,-1.4951e-04, 7.0336e-05,-5.6762e+00,-8.0153e-01,-3.1496e-03, 1.2996e+01]
+
+	for i in range(X_training.shape[1]):
+		for j in range(X_training.shape[2]):
+			st = (pesos[0]*j*j*16+pesos[1]*i*i*4+pesos[2]*((1000*X_training[:,i,j])**2)+pesos[3]*j*i*8+pesos[4]*(1000*X_training[:,i,j])*i*2+pesos[5]*(1000*X_training[:,i,j])*j*4+pesos[6]*j*4+pesos[7]*i*2+pesos[8]*(1000*X_training[:,i,j])+pesos[9])/constant
+			st = numpy.abs(st)+(st==0)*0.01
+			index = numpy.round(numpy.random.normal(0,st))
+			index = (index+i > 0)*index + (index+i<=0)*0
+			index = (index+i >= X_training.shape[1])*(X_training.shape[1]-1) + (index+i<X_training.shape[1])*(i+index)
+			X_training[:,i,j] = X_training_original[array,numpy.uint16(index),j]
+
+        print("... which has taken "+str(time.time()-now)+" seconds")
+        print("")
+	
+	return X_training
+	
+
+def permute(a,b):
+	c = numpy.c_[numpy.reshape(a,[len(a), -1]), numpy.reshape(b.astype(numpy.float32),[len(b), -1])]
+	c = numpy.random.permutation(c)
+	a2 = numpy.reshape(c[:, :numpy.size(a)//len(a)],numpy.shape(a))
+	b2 = numpy.reshape(c[:, numpy.size(a)//len(a):],numpy.shape(b))
+	return a2,b2
+	
+def main(num_epochs=500):
+	print("Loading data...")
+	X_training,Y_training,X_valid,Y_valid = load_dataset()
+	input_var = T.tensor4('inputs')
+	target_var = T.ivector('targets')
+	
+	print("Building model...")
+
+	# Define how many batches
+        number_of_batches = 200 
+        length_of_batches = numpy.shape(Y_training)[0]/number_of_batches
+
+	# Build network
+	network,regularization= build_network(input_var,target_var,length_of_batches)
+	
+	# Define loss, learning rate, updates and train/validation functions.
+	prediction = lasagne.layers.get_output(network)
+	loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
+	loss = loss.mean() + regularization
+
+	learning_rate = theano.shared(numpy.array(0.004,dtype=theano.config.floatX))
+	learning_rate_decay = 0.99992
+	params = lasagne.layers.get_all_params(network, trainable=True)
+	updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=learning_rate, momentum=0.9)	
+
+	test_prediction = lasagne.layers.get_output(network, deterministic=True)
+	test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
+                                                            target_var)
+	test_loss = test_loss.mean()
+	test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
+                      dtype=theano.config.floatX)
+
+	train_fn = theano.function([input_var, target_var], loss, updates=updates)
+
+	val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
+
+	train_fn = theano.function([input_var, target_var], loss, updates=updates)
+	val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
+
+	print("Starting training...")
+	
+	# Define initial variables 
+	validation_errors = []
+	training_errors = []
+	accuracies = []
+	best_validation_accuracy = 0.0
+	best_validation_error = numpy.Inf
+
+	length_of_batches_valid = 216
+	number_of_batches_valid = numpy.shape(Y_valid)[0]/length_of_batches_valid
+	train_err = 10
+	# The training loop:
+	for epoch in range(num_epochs):
+        	ant_train_err = train_err
+		train_err = 0
+		valid_err = 0
+		valid_acc = 0
+		train_batches = 0
+		val_batches = 0
+
+		# Training:
+#		for batch in range(0,number_of_batches):
+#			current_batch = X_training[batch*length_of_batches:(batch+1)*length_of_batches,:,:,:]
+#			current_batch += (current_batch!=0)*numpy.random.normal(0,0.005,current_batch.shape)
+#			train_err += train_fn(current_batch, Y_training[batch*length_of_batches:(batch+1)*length_of_batches])
+#			learning_rate = learning_rate*learning_rate_decay
+		
+		# Validate the model for the synthetic images
+#		for batch in range(0,number_of_batches_valid):
+#			err,acc = val_fn(X_valid[batch*length_of_batches_valid:(batch+1)*length_of_batches_valid,:,:,:],Y_valid[batch*length_of_batches_valid:(batch+1)*length_of_batches_valid])
+#			valid_err+=err
+#			valid_acc+=acc
+		
+		if False: #(ant_train_err > 1.375):
+                	for batch in iterate_minibatches(X_training[0:4000], Y_training[0:4000], length_of_batches, shuffle=True):
+                        	inputs, targets = batch
+                        	train_err += train_fn(inputs, targets)
+                        	train_batches += 1
+				learning_rate = learning_rate*learning_rate_decay
+                        train_err = train_err/train_batches
+                       	print("Iteration: " + str(epoch))
+                        print("      Training loss: " + str(train_err))
+		else:
+                        for batch in iterate_minibatches(X_training, Y_training, length_of_batches, shuffle=True):
+                                inputs, targets = batch
+                                train_err += train_fn(inputs, targets)
+                                train_batches += 1
+				learning_rate = learning_rate*learning_rate_decay	
+		
+			for batch in iterate_minibatches(X_valid, Y_valid, length_of_batches_valid, shuffle=False):
+				inputs, targets = batch
+				err, acc = val_fn(inputs, targets)
+				valid_err += err
+				valid_acc += acc
+				val_batches += 1
+
+			# Get the real value of training error, validation error and accuracies.
+			train_err = train_err/train_batches
+			valid_err = valid_err/val_batches
+			valid_acc = valid_acc*100/val_batches
+
+			# Inform about the state of the training process
+			print("Iteration: " + str(epoch))
+			print("      Training loss: " + str(train_err))
+			print("      Validation loss: " + str(valid_err))
+			print("      Validation accuracy: " + str(valid_acc))
+			now = time.localtime()
+			print("      Achieved at day " +str(now[2])+"/"+str(now[1])+"/"+str(now[0])+" ; And time "+ str(now[3])+":"+str(now[4])+":"+str(now[5]))
+			
+			# Keep relevant information about the training process
+			validation_errors.append(valid_err)
+			training_errors.append(train_err)
+			accuracies.append(valid_acc)
+
+			# Save parameters in important occasions:
+			if(valid_acc > best_validation_accuracy):
+				best_validation_accuracy = valid_acc
+				numpy.save("Results/parameters_best_val_acc.npy",lasagne.layers.get_all_param_values(network))
+				print("      Maximum validation accuracy. Parameters saved")
+			if(valid_err < best_validation_error):
+				best_validation_error = valid_err
+				numpy.save("Results/parameters_best_val_loss.npy",lasagne.layers.get_all_param_values(network))
+				print("      Minimum validation error. Parameters saved")
+			
+			# Save relevant information about the training process every 5 epochs
+			if (epoch%5==0):
+				numpy.save("Results/validation_errors.npy",validation_errors)
+				numpy.save("Results/training_errors.npy",training_errors)
+				numpy.save("Results/accuracies.npy",accuracies)
+
+	print("")
+		
+	print("Finished")
+
+if __name__ == "__main__":
+	main(200)
diff --git a/Training_CNNs/training_two_points_class_and_predict_with_noise.py b/Training_CNNs/training_two_points_class_and_predict_with_noise.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa3caedb310f57126d5f77df0924b101b45ddc24
--- /dev/null
+++ b/Training_CNNs/training_two_points_class_and_predict_with_noise.py
@@ -0,0 +1,423 @@
+
+from __future__ import print_function
+import scipy.io
+import sys
+import os
+import time
+import numpy
+import theano
+import theano.tensor as T
+import lasagne
+import matplotlib.pyplot as plt
+
+sys.path.insert(0,'/home/tgabas/cloth-recognition/synth_real')
+from training import add_noise
+
+#sklearn module helps in preprocessing:Scale function sets the mean value to 0 and the standard desviation to 1.
+
+#Last version:
+# 0 -> Jeans
+# 1 -> Jumper
+# 2 -> T_Shirt
+# 3 -> Towel
+
+	#Function to set the mean of the dataset to zero and the standard variation to one. X is the whole dataset
+
+def build_network(input_var,target_var,length_of_batches= None):
+
+	drop_cnn = 0
+
+	drop_fully_cn = 0.5
+
+	nonlinearity = lasagne.nonlinearities.rectify
+
+	init = 'relu' # 'relu' (rectify) or 1 (sigmoid) 
+
+        network = lasagne.layers.InputLayer(shape=(None,1,240,160),input_var=input_var) # length_of_batches instead of None
+
+	network = lasagne.layers.BatchNormLayer(network)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=96,stride = (4,4),  pad = 'full', filter_size=(11,11),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+	print(network.output_shape)
+
+	network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+        print(network.output_shape)
+ 
+	network1 = lasagne.layers.SliceLayer(network,indices = slice(0,48),axis=1)
+
+	print(network1.output_shape)
+
+        network1 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network1,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(5,5),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network1.output_shape)
+
+        network2 = lasagne.layers.SliceLayer(network,indices = slice(48,96),axis=1)
+
+        print(network2.output_shape)
+
+        network2 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network2,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(5,5),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+	print(network2.output_shape)
+
+	network = lasagne.layers.ConcatLayer((network1,network2),axis=1)
+
+	print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+	print(network.output_shape)
+	 
+        network = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network,p=drop_cnn), num_filters=384,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network.output_shape)
+
+        network1 = lasagne.layers.SliceLayer(network,indices = slice(0,192),axis=1)
+
+        print(network1.output_shape)
+
+        network1 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network1,p=drop_cnn), num_filters=192,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network1.output_shape)
+
+        network1 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network1,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network1.output_shape)
+
+        network2 = lasagne.layers.SliceLayer(network,indices = slice(192,384),axis=1)
+
+        print(network2.output_shape)
+
+        network2 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network2,p=drop_cnn), num_filters=192,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network2.output_shape)
+
+        network2 = lasagne.layers.Conv2DLayer(lasagne.layers.dropout(network2,p=drop_cnn), num_filters=128,stride = (1,1),  pad = 'full', filter_size=(3,3),nonlinearity=nonlinearity,W=lasagne.init.GlorotUniform(init))
+
+        print(network2.output_shape)
+
+        network = lasagne.layers.ConcatLayer((network1,network2),axis=1)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2), ignore_border=False)
+
+	print(network.output_shape)
+
+        network = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=4096,nonlinearity=nonlinearity)
+
+        print(network.output_shape)
+
+        network = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=4096,nonlinearity=nonlinearity)
+
+        print(network.output_shape)
+
+        point1_pred = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=3,nonlinearity=None,W = lasagne.init.GlorotUniform())
+
+        point2_pred = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=3,nonlinearity=None,W = lasagne.init.GlorotUniform())
+
+        point1_class = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=2,nonlinearity=lasagne.nonlinearities.softmax,W = lasagne.init.GlorotUniform())
+
+        point2_class = lasagne.layers.DenseLayer(lasagne.layers.dropout(network,p=drop_fully_cn),num_units=2,nonlinearity=lasagne.nonlinearities.softmax,W = lasagne.init.GlorotUniform())
+
+	network = lasagne.layers.ConcatLayer((point1_pred,point1_class,point2_pred,point2_class),axis=1)
+
+	print(network.output_shape)
+
+	regularization = lasagne.regularization.regularize_network_params(network,lasagne.regularization.l2)*0.000005
+		
+	p = lasagne.layers.get_all_param_values(network)
+	
+	w0 = numpy.load('pretrained_params/W_0_65.npy')[0]
+	b0 = numpy.load('pretrained_params/b_0_65.npy')
+
+	for i in range(0,96):
+		p[4][i,0] = w0[:,:,i]
+		p[5][i] = b0[i]
+
+	w1 = numpy.load('pretrained_params/W1_1_65.npy')
+	b1 = numpy.load('pretrained_params/b1_1_65.npy')
+
+        for i in range(0,128):
+                p[6][i] = w1[:,:,:,i]
+                p[7][i] = b1[i]
+
+	w1 = numpy.load('pretrained_params/W0_1_65.npy')
+        b1 = numpy.load('pretrained_params/b0_1_65.npy')
+	
+        for i in range(0,128):
+                p[8][i] = w1[:,:,:,i]
+                p[9][i] = b1[i]
+
+	w2 = numpy.load('pretrained_params/W_2_65.npy')
+	b2 = numpy.load('pretrained_params/b_2_65.npy')
+
+        for i in range(0,384):
+                p[10][i] = w2[:,:,:,i]
+                p[11][i] = b2[i]
+
+	w3 = numpy.load('pretrained_params/W0_3_65.npy') # (192, 3, 3, 192)
+	b3 = numpy.load('pretrained_params/b0_3_65.npy')
+
+        for i in range(0,128):
+                p[12][i] = w3[:,:,:,i]
+                p[13][i] = b3[i]
+
+	w4 = numpy.load('pretrained_params/W0_4_65.npy') # (192, 3, 3, 128)
+	b4 = numpy.load('pretrained_params/b0_4_65.npy')
+
+        for i in range(0,128):
+                p[14][i] = w4[:,:,:,i]
+                p[15][i] = b4[i]
+
+        w3 = numpy.load('pretrained_params/W1_3_65.npy') # (192, 3, 3, 192)
+        b3 = numpy.load('pretrained_params/b1_3_65.npy')
+
+        for i in range(0,128):
+                p[16][i] = w3[:,:,:,i]
+                p[17][i] = b3[i]
+        
+        w4 = numpy.load('pretrained_params/W1_4_65.npy') # (192, 3, 3, 128)
+        b4 = numpy.load('pretrained_params/b1_4_65.npy')
+        
+        for i in range(0,128):
+                p[18][i] = w4[:,:,:,i]
+                p[19][i] = b4[i]
+
+	w5 = numpy.load('pretrained_params/W_5_65.npy') #9216x4096
+	b5 = numpy.load('pretrained_params/b_5_65.npy')
+	p[20][0:9216] = w5
+	p[21][0:9216] = b5
+
+        w6 = numpy.load('pretrained_params/W_6_65.npy') #9216x4096
+        b6 = numpy.load('pretrained_params/b_6_65.npy')
+        p[22] = w6
+        p[23] = b6
+
+	lasagne.layers.set_all_param_values(network,p)
+
+	return network,regularization
+ 
+def iterate_minibatches(inputs, targets, points, batchsize, shuffle=False):
+    assert len(inputs) == len(targets)
+    if shuffle:
+        indices = numpy.arange(len(inputs))
+        numpy.random.shuffle(indices)
+    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
+        if shuffle:
+            excerpt = indices[start_idx:start_idx + batchsize]
+        else:
+            excerpt = slice(start_idx, start_idx + batchsize)
+        yield inputs[excerpt], targets[excerpt], points[excerpt]
+
+
+def load_dataset():
+
+	X_training = numpy.load('../../../Data/X_train.npy')
+	Y_training = numpy.load('../../../Data/Y_train.npy')
+	Ponderate_train = numpy.load('../../../Data/Altres/Points_seen_train.npy')
+        Ponderate_valid = numpy.load('../../../Data/Altres/Points_seen_valid.npy')
+	
+#	Ponderate_train = numpy.float32((Ponderate_train==1)*0.8+(Ponderate_train==0)*0.2)
+#        Ponderate_valid = numpy.float32((Ponderate_valid==1)*0.8+(Ponderate_valid==0)*0.2)
+ 
+        X_valid = numpy.load('../../../Data/X_valid.npy')
+        Y_valid = numpy.load('../../../Data/Y_valid.npy')
+ 
+        X_training = add_noise(X_training)
+        X_valid = add_noise(X_valid)
+
+	X_real_val = numpy.load('/home/tgabas/cloth-recognition/Real_data_for_grasping/X_real_val.npy')
+	Y_real_val = numpy.load('/home/tgabas/cloth-recognition/Real_data_for_grasping/Y_real_val.npy')
+	Ponderate_real_val = numpy.int16(numpy.load('/home/tgabas/cloth-recognition/Real_data_for_grasping/Points_seen_real_val.npy'))
+
+
+	X_training = X_training.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_training = numpy.float32(Y_training)
+	X_valid = X_valid.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_valid = numpy.float32(Y_valid)
+
+	X_real_val = X_real_val.reshape([-1,1,240,160]).astype(numpy.float32)
+	Y_real_val = numpy.float32(Y_real_val)
+
+	return X_training,Y_training,X_valid,Y_valid,Ponderate_train,Ponderate_valid,X_real_val,Y_real_val,Ponderate_real_val
+	
+def main(num_epochs = 500):
+	print("Loading data...")
+	X_training,Y_training,X_valid,Y_valid,punts_train,punts_valid,X_real_val,Y_real_val,punts_real_val = load_dataset()
+	input_var = T.tensor4('inputs')
+	target_var = T.matrix('targets')
+	punts_seen = T.matrix('ponderate_targets',dtype = 'int64')
+
+#        prediction_given = T.tensor4('preds')
+
+	print("Building model...")
+
+	# Define how many batches
+        number_of_batches = 200 
+        length_of_batches = numpy.shape(Y_training)[0]/number_of_batches
+
+	# Build network
+	network,regularization= build_network(input_var,target_var,length_of_batches)
+	
+	# Define loss, learning rate, updates and train/validation functions.
+	prediction = lasagne.layers.get_output(network)
+
+#	loss = T.min(( ( lasagne.objectives.squared_error(prediction,target_var)*punts_seen).sum(axis=1) , (lasagne.objectives.squared_error(prediction,target_var[:,[3,4,5,0,1,2]])*punts_seen[:,[3,4,5,0,1,2]] ).sum(axis=1)),axis = 0) 
+
+	loss_pred = T.argmin( ( ( lasagne.objectives.squared_error(prediction[:,[0,1,2,5,6,7]],target_var)).sum(axis=1) , (lasagne.objectives.squared_error(prediction[:,[5,6,7,0,1,2]],target_var)).sum(axis=1)),axis = 0)
+
+	loss_class = (lasagne.objectives.categorical_crossentropy(prediction[:,[3,4]],punts_seen[:,0])+lasagne.objectives.categorical_crossentropy(prediction[:,[8,9]],punts_seen[:,3]))*T.eq(loss_pred,0)+(lasagne.objectives.categorical_crossentropy(prediction[:,[8,9]],punts_seen[:,0])+lasagne.objectives.categorical_crossentropy(prediction[:,[3,4]],punts_seen[:,3]))*T.eq(loss_pred,1)
+
+	loss_pred = ((lasagne.objectives.squared_error(prediction[:,[0,1,2,5,6,7]],target_var))*punts_seen).sum(axis=1)*(1*T.eq(loss_pred,0))+((lasagne.objectives.squared_error(prediction[:,[5,6,7,0,1,2]],target_var))*punts_seen[:,[3,4,5,0,1,2]]).sum(axis=1)*(1*T.eq(loss_pred,1))
+
+	loss = loss_class.mean() + loss_pred.mean() + regularization
+
+	learning_rate = theano.shared(numpy.array(0.000001,dtype=theano.config.floatX))
+	learning_rate_decay = 0.9992
+	params = lasagne.layers.get_all_params(network, trainable=True)
+	#updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=learning_rate, momentum=0.9)	
+
+	updates = lasagne.updates.adam(loss, params, learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=1e-08)
+#	updates = lasagne.updates.adagrad(loss, params, learning_rate=1.0, epsilon=1e-06)
+#	updates = lasagne.updates.adadelta(loss, params, learning_rate=1.0, rho=0.95, epsilon=1e-06)
+ 
+	test_prediction = lasagne.layers.get_output(network, deterministic=True)
+
+        test_loss_pred = T.argmin( ( ( lasagne.objectives.squared_error(test_prediction[:,[0,1,2,5,6,7]],target_var)).sum(axis=1) , (lasagne.objectives.squared_error(test_prediction[:,[5,6,7,0,1,2]],target_var)).sum(axis=1)),axis = 0)                                        
+
+#        test_loss_class = (lasagne.objectives.categorical_crossentropy(test_prediction[:,[3,4]],punts_seen[:,0])+lasagne.objectives.categorical_crossentropy(test_prediction[:,[8,9]],punts_seen[:,3]))*T.eq(test_loss_pred,0)+(lasagne.objectives.categorical_crossentropy(test_prediction[:,[8,9]],punts_seen[:,0])+lasagne.objectives.categorical_crossentropy(test_prediction[:,[3,4]],punts_seen[:,3]))*T.eq(test_loss_pred,1)
+
+        test_acc = ((T.mean(T.eq(T.argmax(test_prediction[:,[3,4]], axis=1), punts_seen[:,0]) + T.eq(T.argmax(test_prediction[:,[8,9]], axis=1),punts_seen[:,3]), dtype=theano.config.floatX))*T.eq(test_loss_pred,0)/2 + (T.mean(T.eq(T.argmax(test_prediction[:,[8,9]], axis=1), punts_seen[:,0]) + T.eq(T.argmax(test_prediction[:,[3,4]], axis=1),punts_seen[:,3]), dtype=theano.config.floatX))*T.eq(test_loss_pred,1)/2).mean()
+
+        test_loss_pred = ((lasagne.objectives.squared_error(test_prediction[:,[0,1,2,5,6,7]],target_var))*punts_seen).sum(axis=1)*(1*T.eq(test_loss_pred,0))+((lasagne.objectives.squared_error(test_prediction[:,[5,6,7,0,1,2]],target_var))*punts_seen[:,[3,4,5,0,1,2]]).sum(axis=1)*(1*T.eq(test_loss_pred,1))
+
+        test_loss = test_loss_pred.mean() #+ test_loss_class.mean()
+
+	test_loss_in_col = T.argmin((  (lasagne.objectives.squared_error(test_prediction[:,[0,1,2,5,6,7]],target_var)).sum(axis=1) , (lasagne.objectives.squared_error(test_prediction[:,[5,6,7,0,1,2]],target_var)).sum(axis=1) ),axis = 0)
+
+	test_loss_in_col = (lasagne.objectives.squared_error(test_prediction[:,[0,1,2,5,6,7]],target_var)*punts_seen).transpose()*(1*T.eq(test_loss_in_col,0)) + (lasagne.objectives.squared_error(test_prediction[:,[5,6,7,0,1,2]],target_var)*punts_seen[:,[3,4,5,0,1,2]]).transpose()*test_loss_in_col
+
+	distance_to_real_data = T.argmin(((lasagne.objectives.squared_error(test_prediction[:,[0,1,2]],target_var)).sum(axis=1) , (lasagne.objectives.squared_error(test_prediction[:,[5,6,7]],target_var)).sum(axis=1)),axis = 0)	
+
+	real_acc=(T.eq(T.argmax(test_prediction[:,[3,4]], axis=1),punts_seen[:,0])*1)*(T.eq(distance_to_real_data,0)*1)+(T.eq(T.argmax(test_prediction[:,[8,9]], axis=1), punts_seen[:,0])*1)*(T.eq(distance_to_real_data,1)*1)
+
+	distance_to_real_data = ((lasagne.objectives.squared_error(test_prediction[:,[0,1,2]],target_var))*punts_seen).sum(axis=1)*(1*T.eq(distance_to_real_data,0))+((lasagne.objectives.squared_error(test_prediction[:,[5,6,7]],target_var))*punts_seen).sum(axis=1)*(1*T.eq(distance_to_real_data,1))
+
+	real_acc = real_acc.mean()
+
+	distance_to_real_data = (T.sqrt(distance_to_real_data)).mean()
+	
+	test_loss_in_col = test_loss_in_col.sum(axis=1)/punts_seen.sum(axis=0)
+ 
+	see_what_improves = theano.function([input_var,target_var,punts_seen],test_loss_in_col)
+
+	train_fn = theano.function([input_var, target_var, punts_seen], loss, updates=updates)
+
+	val_fn = theano.function([input_var, target_var, punts_seen], [test_loss,test_acc])
+
+	real_val_fn = theano.function([input_var,target_var,punts_seen],[distance_to_real_data,real_acc])
+
+	print("Starting training...")
+	
+	# Define initial variables 
+	validation_errors = []
+	training_errors = []
+	train_distribution = []
+	valid_distribution = []
+	accuracies = []
+	real_distances = []
+	best_real_distance = numpy.Inf
+	best_validation_error = numpy.Inf
+	best_acc = 0
+	length_of_batches_valid = 200
+	number_of_batches_valid = numpy.shape(Y_valid)[0]/length_of_batches_valid
+	train_err = 10
+
+	# The training loop:
+	for epoch in range(num_epochs):
+        	ant_train_err = train_err
+		train_err = 0
+		valid_err = 0
+		train_batches = 0
+		val_batches = 0
+		accuracy = 0
+		error_per_train = numpy.zeros(6)
+		error_per_val = numpy.zeros(6)
+
+		if True:
+                        for batch in iterate_minibatches(X_training, Y_training ,punts_train, length_of_batches, shuffle=True):
+                                inputs, targets, punts = batch
+                                train_err += train_fn(inputs, targets,punts)
+                                train_batches += 1
+				error_per_train += see_what_improves(inputs,targets,punts)
+				learning_rate = learning_rate*learning_rate_decay	
+
+			for batch in iterate_minibatches(X_valid, Y_valid, punts_valid, length_of_batches_valid, shuffle=False):
+				inputs, targets, punts = batch
+				err,acc = val_fn(inputs, targets, punts)
+				error_per_val += see_what_improves(inputs,targets, punts)
+				valid_err += err
+				accuracy += acc
+				val_batches += 1
+			Real_error,Real_acc = real_val_fn(X_real_val,Y_real_val,punts_real_val)
+
+			
+			# Get the real value of training error, validation error and accuracies.
+			train_err = train_err/train_batches
+			valid_err = valid_err/val_batches
+			error_per_val = error_per_val/val_batches
+			error_per_train = error_per_train/train_batches
+			accuracy = accuracy/val_batches
+
+			# Inform about the state of the training process
+			print("Iteration: " + str(epoch))
+			print("      Training loss: " + str(train_err))
+			print("      Validation loss: " + str(valid_err))
+			print("	     Accuracy: " + str(accuracy))
+                        print("      Error train distribution: "+ str(error_per_train.tolist()))
+			print("	     Error valid distribution: "+ str(error_per_val.tolist()))
+			print("      Real_error: " + str(Real_error))
+                        print("      Real_accuracy: " + str(Real_acc))
+			now = time.localtime()
+			print("      Achieved at day " +str(now[2])+"/"+str(now[1])+"/"+str(now[0])+" ; And time "+ str(now[3])+":"+str(now[4])+":"+str(now[5]))
+			
+			# Keep relevant information about the training process
+			validation_errors.append(valid_err)
+			training_errors.append(train_err)
+			train_distribution.append(error_per_train)
+			valid_distribution.append(error_per_val)
+			accuracies.append(accuracy)
+			real_distances.append(Real_error)
+	
+			if(valid_err < best_validation_error):
+				best_validation_error = valid_err
+				numpy.save("Results_two_points_with_noise/parameters_best_val_loss.npy",lasagne.layers.get_all_param_values(network))
+				print("      Minimum validation error. Parameters saved")
+			if (accuracy > best_acc):
+				best_acc = accuracy
+				numpy.save("Results_two_points_with_noise/parameters_best_val_acc.npy",lasagne.layers.get_all_param_values(network))
+				print("	     Maximum accuracy. Parameters saved")
+                        if (Real_error < best_real_distance):
+                                best_real_distance = Real_error
+                                numpy.save("Results_two_points_with_noise/parameters_best_real_distance.npy",lasagne.layers.get_all_param_values(network))
+                                print("      Minimum real distance. Parameters saved")
+
+			# Save relevant information about the training process every 5 epochs
+			if (epoch%5==0):
+				numpy.save("Results_two_points_with_noise/validation_errors.npy",validation_errors)
+				numpy.save("Results_two_points_with_noise/training_errors.npy",training_errors)
+				numpy.save("Results_two_points_with_noise/error_train_distribution.npy",train_distribution )
+				numpy.save("Results_two_points_with_noise/error_valid_distribution.npy",valid_distribution )
+                                numpy.save("Results_two_points_with_noise/error_real_distances.npy",real_distances )
+
+	print("")
+		
+	print("Finished")
+
+if __name__ == "__main__":
+	main(2000)
+