diff --git a/main.py b/main.py
index 5d17ad7b0734c5cbee8e04f7cf707dfa246c1481..cd9fb4b53a8db71c9aa9c7263dd365da93ebd628 100644
--- a/main.py
+++ b/main.py
@@ -1,37 +1,202 @@
 import bnlearn
 import numpy as np
+import enum
+import random
 
-#df = bnlearn.import_example()
-model = bnlearn.import_DAG('persona_model_3.bif')
-#q_1 = bnlearn.inference.fit(model, variables=['Rain'], evidence={'Cloudy':1, 'Sprinkler':0, 'Wet_Grass':1})
-#q_2 = bnlearn.inference.fit(model, variables=['Rain'], evidence={'Cloudy':1})
-df = bnlearn.sampling(model, n=1000)
-print(df)
-q_1 = bnlearn.inference.fit(model, variables=['user_action'], evidence={'robot_assistance':0,
-                                                                        'attempt':2,
-                                                                         'game_state':0,
-                                                                         'robot_feedback':0,
-                                                                         'memory':1,
-                                                                         'reactivity':1,
-                                                                         'attention':1
-                                                                        })
-print(q_1)
-# update = np.arange(9).reshape(3, 3)
-# model['model'].cpds[4].values[0][0] = update
-# print(model['model'].cpds[4].values[0][0])
-#print("model 0")
-#print(model["model"].cpds[0].values)
-#print("model 1")
-#print(model["model"].cpds[1].values)
-#print("model 2")
-#print(model["model"].cpds[2].values)
-#print("model 3")
-#print(model["model"].cpds[3].values)
-#print("model 4")
-#print(model["model"].cpds[4].values)
-#print("model 5")
-#print(model["model"].cpds[5].values)
-#print("model 6")
-#print(model["model"].cpds[6].values)
-#print("model 7")
-#print(model["model"].cpds[7].values)
+#define constants
+class User_Action(enum.Enum):
+    correct = 0
+    wrong = 1
+    timeout = 2
+    name = "user_action"
+    counter = 3
+class Reactivity(enum.Enum):
+    slow = 0
+    medium = 1
+    fast = 2
+    name = "reactivity"
+    counter = 3
+class Memory(enum.Enum):
+    low = 0
+    medium = 1
+    high = 2
+    name = "memory"
+    counter = 3
+class Robot_Assistance(enum.Enum):
+    lev_0 = 0
+    lev_1 = 1
+    lev_2 = 2
+    lev_3 = 3
+    lev_4 = 4
+    name = "robot_assistance"
+    counter = 5
+class Robot_Feedback(enum.Enum):
+    yes = 0
+    false = 1
+    name = "robot_feedback"
+    counter = 2
+class Game_State(enum.Enum):
+    beg = 0
+    middle = 1
+    end = 2
+    name = "game_state"
+    counter = 3
+class Attempt(enum.Enum):
+    at_1 = 0
+    at_2 = 1
+    at_3 = 2
+    at_4 = 3
+    name = "attempt"
+    counter = 4
+
+model = bnlearn.import_DAG('persona_model_4.bif')
+print("user_action -> attempt ", model['model'].cpds[0].values)
+print("user_action -> game_state ", model['model'].cpds[2].values)
+print("robot_feedback -> robot_assistance ", model['model'].cpds[5].values)
+print("user_action -> reactivity, memory ", model['model'].cpds[6].values)
+
+def compute_prob(cpds_table):
+    for val in range(len(cpds_table)):
+            cpds_table[val] = list(map(lambda x: x / (sum(cpds_table[val])+0.00001), cpds_table[val]))
+    return cpds_table
+
+def avg_prob(ref_cpds_table, current_cpds_table):
+    '''
+    Args:
+        ref_cpds_table: table from bnlearn
+        current_cpds_table: table from interaction
+    Return:
+        avg from both tables
+    '''
+    res_cpds_table = ref_cpds_table.copy()
+    for elem1 in range(len(ref_cpds_table)):
+        for elem2 in range(len(ref_cpds_table[0])):
+            res_cpds_table[elem1][elem2] = (ref_cpds_table[elem1][elem2]+current_cpds_table[elem1][elem2])/2
+    return res_cpds_table
+
+
+def simulation(robot_assistance_vect, robot_feedback_vect):
+    #metrics we need in order to compute the afterwords the belief
+    '''
+    CPD 0: for each attempt 1 to 4 store the number of correct, wrong and timeout
+    '''
+    attempt_counter_per_action = [[0 for j in range(User_Action.counter.value)] for i in range(Attempt.counter.value)]
+    '''
+    CPD 2: for each game_state 0 to 2 store the number of correct, wrong and timeout
+    '''
+    game_state_counter_per_action = [[0 for j in range(User_Action.counter.value)] for i in range(Game_State.counter.value)]
+    '''
+    CPD 5: for each robot feedback store the number of correct, wrong and timeout
+    '''
+    robot_feedback_per_action = [[0 for j in range(User_Action.counter.value)] for i in range(Robot_Feedback.counter.value)]
+    '''
+    CPD 6: for each robot assistance store the number of pos and neg feedback
+    '''
+    robot_assistance_per_feedback = [[0 for j in range(Robot_Feedback.counter.value)] for i in range(Robot_Assistance.counter.value)]
+
+    task_complexity = 5
+    task_evolution = 0
+    attempt_counter = 0
+    game_state_counter = 0
+
+    iter_counter = 0
+    correct_move_counter = 0
+    wrong_move_counter = 0
+    timeout_counter = 0
+
+    '''Simulation framework'''
+    while(task_evolution<=task_complexity):
+        if task_evolution>=0 and task_evolution<=2:
+            game_state_counter = 0
+        elif task_evolution>=3 and task_evolution<=4:
+            game_state_counter = 1
+        else:
+            game_state_counter = 2
+        #select robot assistance
+        robot_assistance_action = random.randint(min(robot_assistance_vect), max(robot_assistance_vect))
+        #select robot feedback
+        robot_feedback_action = random.randint(min(robot_feedback_vect), max(robot_feedback_vect))
+
+        print("robot_assistance {}, attempt {}, game {}, robot_feedback {}".format(robot_assistance_action, attempt_counter, game_state_counter, robot_feedback_action))
+        query = bnlearn.inference.fit(model, variables=['user_action'], evidence={'robot_assistance': robot_assistance_action,
+                                                                                  'attempt': attempt_counter,
+                                                                                  'game_state': game_state_counter,
+                                                                                  'robot_feedback': robot_feedback_action,
+                                                                                  'memory': 0,
+                                                                                  'attention': 0,
+                                                                                  'reactivity': 0
+                                                                                  })
+        user_move_action = np.argmax(query.values, axis=0)
+
+        robot_assistance_per_feedback[robot_assistance_action][robot_feedback_action] += 1
+        attempt_counter_per_action[attempt_counter][user_move_action] += 1
+        game_state_counter_per_action[game_state_counter][user_move_action] += 1
+        robot_feedback_per_action[robot_feedback_action][user_move_action] += 1
+
+        iter_counter += 1
+        if user_move_action == 0:
+            attempt_counter += 0
+            task_evolution += 1
+            correct_move_counter += 1
+        elif user_move_action == 1 and attempt_counter<3:
+            attempt_counter += 1
+            wrong_move_counter += 1
+        elif user_move_action == 2 and attempt_counter<3:
+            attempt_counter += 1
+            wrong_move_counter += 1
+        else:
+            attempt_counter += 0
+            task_evolution += 1
+            timeout_counter += 1
+
+        print("correct {}, wrong {}, timeout {}".format(query.values[0],
+                                                    query.values[1],
+                                                    query.values[2]))
+
+
+    print("robot_assistance_per_feedback {}".format(robot_assistance_per_feedback))
+    print("attempt_counter_per_action {}".format(attempt_counter_per_action))
+    print("game_state_counter_per_action {}".format(game_state_counter_per_action))
+    print("robot_feedback_per_action {}".format(robot_feedback_per_action))
+    print("iter {}, correct {}, wrong {}, timeout {}".format(iter_counter, correct_move_counter, wrong_move_counter, timeout_counter))
+
+    return attempt_counter_per_action, game_state_counter_per_action, robot_assistance_per_feedback, robot_feedback_per_action
+
+
+robot_assistance_vect = [0, 1, 2, 3, 4]
+robot_feedback_vect = [0, 1]
+attempt_counter_per_action, game_state_counter_per_action, \
+robot_assistance_per_feedback, robot_feedback_per_action = simulation(robot_assistance_vect, robot_feedback_vect)
+
+print("************BEFORE*************")
+print(model['model'].cpds[0].values)
+print(model['model'].cpds[2].values)
+print(model['model'].cpds[5].values)
+print(model['model'].cpds[6].values)
+
+prob_over_attempt_per_action = compute_prob(attempt_counter_per_action)
+prob_over_game_per_action  = compute_prob(game_state_counter_per_action)
+prob_over_feedback_per_action = compute_prob(robot_feedback_per_action)
+prob_over_assistance_per_feedback = compute_prob(robot_assistance_per_feedback)
+
+print("************DURING*************")
+print(prob_over_attempt_per_action)
+print(prob_over_game_per_action)
+print(prob_over_feedback_per_action)
+print(prob_over_assistance_per_feedback)
+
+res_prob_over_attempt_per_action = avg_prob(model['model'].cpds[0].values,
+                                            prob_over_attempt_per_action)
+res_prob_over_game_per_action = avg_prob(model['model'].cpds[2].values,
+                                         prob_over_game_per_action)
+res_prob_over_feedback_per_action = avg_prob(model['model'].cpds[6].values,
+                                         prob_over_feedback_per_action)
+res_prob_over_assistance_per_feedback = avg_prob(model['model'].cpds[5].values,
+                                                 prob_over_assistance_per_feedback)
+
+
+print("************AFTER*************")
+print(res_prob_over_attempt_per_action)
+print(res_prob_over_game_per_action)
+print(res_prob_over_feedback_per_action)
+print(res_prob_over_assistance_per_feedback)
diff --git a/persona_model_4.bif b/persona_model_4.bif
new file mode 100644
index 0000000000000000000000000000000000000000..0a49f25e9e0f08ab7a4ebec8a99027d553fb1f70
--- /dev/null
+++ b/persona_model_4.bif
@@ -0,0 +1,104 @@
+network persona_model_4 {
+}
+
+%VARIABLES DEFINITION
+variable reactivity {
+  type discrete [3] {slow, medium, fast};
+}
+variable memory {
+  type discrete[3] {low, medium, high};
+}
+variable attention {
+  type discrete[3] {low, medium, high};
+}
+variable robot_assistance {
+  type discrete [ 5 ] { lev_0, lev_1, lev_2, lev_3, lev_4 };
+}
+variable attempt {
+  type discrete [ 4 ] { att_1, att_2, att_3, att_4 };
+}
+variable game_state {
+  type discrete [ 3 ] { beg, mid, end };
+}
+variable robot_feedback {
+  type discrete [ 2 ] { yes, no };
+}
+variable user_action {
+  type discrete [ 3 ] { correct, wrong, timeout };
+}
+
+%INDIVIDUAL PROBABILITIES DEFINITION
+probability ( robot_assistance ) {
+  table 0.2, 0.2, 0.2, 0.2, 0.2;
+}
+probability ( game_state ) {
+  table 0.34, 0.33, 0.33;
+}
+probability ( attempt ) {
+  table 0.25, 0.25, 0.25, 0.25;
+}
+probability ( user_action ) {
+  table 0.33, 0.33, 0.34;
+}
+#CPDS 4 #SPECIFICALLY FOR THE GIVEN PATIENT
+probability ( reactivity ) {
+  table 0.34, 0.33, 0.33;
+}
+#CPDS 3 #SPECIFICALLY FOR THE GIVEN PATIENT
+probability ( memory ) {
+  table 0.33, 0.33, 0.34;
+}
+#CPDS 1 #SPECIFICALLY FOR THE GIVEN PATIENT
+probability ( attention ) {
+  table 0.33, 0.33, 0.34;
+}
+probability ( robot_feedback ) {
+  table 0.5, 0.5;
+}
+probability ( reactivity | attention ) {
+  %
+  (low) 0.1, 0.4, 0.5;
+  (medium)  0.4, 0.3, 0.3;
+  (high)  0.7, 0.2, 0.1;
+}
+#CPDS 7
+probability (user_action | memory, reactivity) {
+(low, slow)  0.1, 0.4, 0.5;
+(low, medium) 0.3, 0.5, 0.2;
+(low, fast) 0.4, 0.5, 0.1;
+(medium, slow) 0.5, 0.3, 0.2;
+(medium, medium) 0.4, 0.3, 0.3;
+(medium, fast) 0.5, 0.4, 0.1;
+(high, slow)  0.3, 0.4, 0.3;
+(high, medium) 0.6, 0.3, 0.1;
+(high, fast) 0.7, 0.2, 0.1;
+}
+#CPDS 5
+probability (robot_feedback | user_action) {
+  (correct) 0.8, 0.2;
+  (wrong) 0.5, 0.5;
+  (timeout) 0.2, 0.8;
+}
+#CPDS 6
+probability (robot_assistance | user_action) {
+  (correct) 0.05 0.1 0.15 0.3 0.4;
+  (wrong) 0.1 0.2 0.4 0.2 0.1;
+  (timeout) 0.2 0.4 0.2 0.1 0.1;
+}
+#CPDS 2
+probability (game_state | user_action)  {
+   (correct) 0.2, 0.4, 0.4;
+   (wrong) 0.4, 0.4, 0.2;
+   (timeout) 0.6, 0.3, 0.1;
+}
+#CPDS 0
+probability (attempt | user_action)  {
+   (correct) 0.1, 0.2, 0.3, 0.4;
+   (wrong) 0.5, 0.3, 0.15, 0.05;
+   (timeout) 0.4, 0.3, 0.2, 0.1;
+}
+#CPDS 5
+probability (robot_assistance | robot_feedback) {
+  (yes) 0.5 0.3 0.1 0.1 0.0;
+  (no) 0.0 0.1 0.1 0.3 0.5;
+}
\ No newline at end of file