diff --git a/main.py b/main.py
index a8063c5c0dbf5a9301c320cf8dc71bb2dda48cd0..659e45b0f21c8d8a32b91870d3986354ce2d02cd 100644
--- a/main.py
+++ b/main.py
@@ -50,11 +50,6 @@ class Attempt(enum.Enum):
     name = "attempt"
     counter = 4
 
-model = bnlearn.import_DAG('persona_model_4.bif')
-print("user_action -> attempt ", model['model'].cpds[0].values)
-print("user_action -> game_state ", model['model'].cpds[2].values)
-print("robot_feedback -> robot_assistance ", model['model'].cpds[5].values)
-print("user_action -> reactivity, memory ", model['model'].cpds[6].values)
 
 
 def plot2D(save_path, n_episodes, *y):
@@ -132,7 +127,7 @@ def generate_user_action(actions_prob):
     return action_id
 
 
-def simulation(robot_assistance_vect, robot_feedback_vect, memory, attention, reactivity, epochs=50, non_stochastic=False):
+def simulation(robot_assistance_vect, robot_feedback_vect, persona_cpds, memory, attention, reactivity, epochs=50, task_complexity=5, non_stochastic=False):
     #metrics we need in order to compute the afterwords the belief
     '''
     CPD 0: for each attempt 1 to 4 store the number of correct, wrong and timeout
@@ -160,16 +155,16 @@ def simulation(robot_assistance_vect, robot_feedback_vect, memory, attention, re
 
     for e in range(epochs):
         '''Simulation framework'''
-        task_complexity = 5
+        #counters
         task_evolution = 0
-
         attempt_counter = 0
-        game_state_counter = 0
         iter_counter = 0
         correct_move_counter = 0
         wrong_move_counter = 0
         timeout_counter = 0
+
         while(task_evolution<=task_complexity):
+            #these, if then else are necessary to classify the task game state into beg, mid, end
             if task_evolution>=0 and task_evolution<=2:
                 game_state_counter = 0
             elif task_evolution>=3 and task_evolution<=4:
@@ -182,7 +177,7 @@ def simulation(robot_assistance_vect, robot_feedback_vect, memory, attention, re
             robot_feedback_action = random.randint(min(robot_feedback_vect), max(robot_feedback_vect))
 
             print("robot_assistance {}, attempt {}, game {}, robot_feedback {}".format(robot_assistance_action, attempt_counter, game_state_counter, robot_feedback_action))
-            query = bnlearn.inference.fit(model, variables=['user_action'], evidence={'robot_assistance': robot_assistance_action,
+            query = bnlearn.inference.fit(persona_cpds, variables=['user_action'], evidence={'robot_assistance': robot_assistance_action,
                                                                                       'attempt': attempt_counter,
                                                                                       'game_state': game_state_counter,
                                                                                       'robot_feedback': robot_feedback_action,
@@ -193,22 +188,27 @@ def simulation(robot_assistance_vect, robot_feedback_vect, memory, attention, re
             #generate a random number and trigger one of the three possible action
             user_action = generate_user_action(query.values)#np.argmax(query.values, axis=0)
 
+            #updates counters for plots
             robot_assistance_per_feedback[robot_feedback_action][robot_assistance_action] += 1
             attempt_counter_per_action[user_action][attempt_counter] += 1
             game_state_counter_per_action[user_action][game_state_counter] += 1
             robot_feedback_per_action[user_action][robot_feedback_action] += 1
 
+            #updates counters for simulation
             iter_counter += 1
             if user_action == 0:
                 attempt_counter = 0
                 task_evolution += 1
                 correct_move_counter += 1
+            #if the user made a wrong move and still did not reach the maximum number of attempts
             elif user_action == 1 and attempt_counter<3:
                 attempt_counter += 1
                 wrong_move_counter += 1
+            # if the user did not move any token and still did not reach the maximum number of attempts
             elif user_action == 2 and attempt_counter<3:
                 attempt_counter += 1
                 wrong_move_counter += 1
+            # the robot or therapist makes the correct move on the patient behalf
             else:
                 attempt_counter = 0
                 task_evolution += 1
@@ -223,25 +223,27 @@ def simulation(robot_assistance_vect, robot_feedback_vect, memory, attention, re
         print("iter {}, correct {}, wrong {}, timeout {}".format(iter_counter, correct_move_counter, wrong_move_counter, timeout_counter))
 
         print("correct_move {}, wrong_move {}, timeout {}".format(correct_move_counter, wrong_move_counter, timeout_counter))
+
         #transform counters into probabilities
         prob_over_attempt_per_action = compute_prob(attempt_counter_per_action)
         prob_over_game_per_action = compute_prob(game_state_counter_per_action)
         prob_over_feedback_per_action = compute_prob(robot_feedback_per_action)
         prob_over_assistance_per_feedback = compute_prob(robot_assistance_per_feedback)
+
         #average the probabilities obtained with the cpdf tables
-        updated_prob_over_attempt_per_action = average_prob(np.transpose(model['model'].cpds[0].values),
+        updated_prob_over_attempt_per_action = average_prob(np.transpose(persona_cpds['model'].cpds[0].values),
                                                         prob_over_attempt_per_action)
-        updated_prob_over_game_per_action = average_prob(np.transpose(model['model'].cpds[2].values),
+        updated_prob_over_game_per_action = average_prob(np.transpose(persona_cpds['model'].cpds[2].values),
                                                      prob_over_game_per_action)
-        updated_prob_over_feedback_per_action = average_prob(np.transpose(model['model'].cpds[6].values),
+        updated_prob_over_feedback_per_action = average_prob(np.transpose(persona_cpds['model'].cpds[6].values),
                                                          prob_over_feedback_per_action)
-        updated_prob_over_assistance_per_feedback = average_prob(np.transpose(model['model'].cpds[5].values),
+        updated_prob_over_assistance_per_feedback = average_prob(np.transpose(persona_cpds['model'].cpds[5].values),
                                                              prob_over_assistance_per_feedback)
 
-        model['model'].cpds[0].values = np.transpose(updated_prob_over_attempt_per_action)
-        model['model'].cpds[2].values = np.transpose(updated_prob_over_game_per_action)
-        model['model'].cpds[6].values = np.transpose(updated_prob_over_feedback_per_action)
-        model['model'].cpds[5].values = np.transpose(updated_prob_over_assistance_per_feedback)
+        persona_cpds['model'].cpds[0].values = np.transpose(updated_prob_over_attempt_per_action)
+        persona_cpds['model'].cpds[2].values = np.transpose(updated_prob_over_game_per_action)
+        persona_cpds['model'].cpds[6].values = np.transpose(updated_prob_over_feedback_per_action)
+        persona_cpds['model'].cpds[5].values = np.transpose(updated_prob_over_assistance_per_feedback)
 
         n_correct_per_episode[e] = correct_move_counter
         n_wrong_per_episode[e] = wrong_move_counter
@@ -252,8 +254,16 @@ def simulation(robot_assistance_vect, robot_feedback_vect, memory, attention, re
 robot_assistance = [i for i in range(Robot_Assistance.counter.value)]
 robot_feedback = [i for i in range(Robot_Feedback.counter.value)]
 epochs = 10
+#initialise memory, attention and reactivity varibles
 memory = 0; attention = 0; reactivity = 1;
-results = simulation(robot_assistance, robot_feedback, memory, attention, reactivity, 10)
+#run a simulation
+persona_cpds = bnlearn.import_DAG('persona_model.bif')
+print("user_action -> attempt ", persona_cpds['model'].cpds[0].values)
+print("user_action -> game_state ", persona_cpds['model'].cpds[2].values)
+print("robot_feedback -> robot_assistance ", persona_cpds['model'].cpds[5].values)
+print("user_action -> reactivity, memory ", persona_cpds['model'].cpds[6].values)
+
+results = simulation(robot_assistance, robot_feedback, persona_cpds, memory, attention, reactivity, epochs=10, task_complexity=5, non_stochastic=False)
 plot_path = "epoch_"+str(epochs)+"_memory_"+str(memory)+"_attention_"+str(attention)+"_reactivity_"+str(reactivity)+".jpg"
 plot2D(plot_path, epochs, results)
 
diff --git a/persona_model_4.bif b/persona_model.bif
similarity index 100%
rename from persona_model_4.bif
rename to persona_model.bif