diff --git a/main.py b/main.py
index 01e168b6839a9b9eda52002848a69f28d2bad090..6cd75f252e28229ffd21b89f7158b5ffeb3628e5 100644
--- a/main.py
+++ b/main.py
@@ -247,9 +247,9 @@ def main():
     #################GENERATE SIMULATION################################
     parser = argparse.ArgumentParser()
     parser.add_argument('--bn_user_model_filename', '--bn_user_model', type=str,help="file path of the user bn model",
-                        default="/home/pal/Documents/Framework/bn_generative_model/bn_persona_model/persona_model.bif")
+                        default="/home/pal/Documents/Framework/bn_generative_model/bn_persona_model/persona_test.bif")
     parser.add_argument('--bn_agent_model_filename', '--bn_agent_model', type=str,help="file path of the agent bn model",
-                        default="/home/pal/Documents/Framework/bn_generative_model/bn_agent_model/agent_assistive_model.bif")
+                        default="/home/pal/Documents/Framework/bn_generative_model/bn_agent_model/agent_test.bif")
     parser.add_argument('--epoch', '--epoch', type=int,help="number of epochs in the simulation", default=200)
     parser.add_argument('--run', '--run', type=int, help="number of runs in the simulation", default=50)
     parser.add_argument('--output_policy_filename', '--p', type=str,help="output policy from the simulation",
diff --git a/questionnaire_gui.py b/questionnaire_gui.py
index f60095f7e3626ab4e1abde75b5e318ddb49cbbf3..b5442d003e841ee67bcf0f024dce929d3deb8297 100644
--- a/questionnaire_gui.py
+++ b/questionnaire_gui.py
@@ -337,9 +337,12 @@ class GUI():
         Button(self.frame4, text="OK", command=self.get_value_ass_att).grid(row=18+row_att3)
 
         self.notebook.pack()
-        Label(self.root, text="Total:").pack()
+        #Label(self.root, text="Total:").pack()
         #Label(self.root, textvariable=total).pack()
 
+    def generate_bn_files(self):
+
+
     def save(self):
         print(":",self.id.get())
         print(":", self.user_model.get())
@@ -396,74 +399,6 @@ class GUI():
         print(":", self.selected_value_ass_5_att_3.get())
         print(":", self.selected_value_ass_5_att_4.get())
 
-    def get_value_ass_1_att_1(self):
-        pass
-
-    def get_value_ass_2_att_1(self):
-        pass
-
-    def get_value_ass_3_att_1(self):
-        pass
-
-    def get_value_ass_4_att_1(self):
-        pass
-
-    def get_value_ass_5_att_1(self):
-        pass
-    ################################
-    def get_value_ass_0_att_2(self):
-        pass
-
-    def get_value_ass_1_att_2(self):
-        pass
-
-    def get_value_ass_2_att_2(self):
-        pass
-
-    def get_value_ass_3_att_2(self):
-        pass
-
-    def get_value_ass_4_att_2(self):
-        pass
-
-    def get_value_ass_5_att_2(self):
-        pass
-    ################################
-    def get_value_ass_0_att_3(self):
-        pass
-
-    def get_value_ass_1_att_3(self):
-        pass
-
-    def get_value_ass_2_att_3(self):
-        pass
-
-    def get_value_ass_3_att_3(self):
-        pass
-
-    def get_value_ass_4_att_3(self):
-        pass
-
-    def get_value_ass_5_att_3(self):
-        pass
-    #################################
-    def get_value_ass_0_att_4(self):
-        pass
-
-    def get_value_ass_1_att_4(self):
-        pass
-
-    def get_value_ass_2_att_4(self):
-        pass
-
-    def get_value_ass_3_att_4(self):
-        pass
-
-    def get_value_ass_4_att_4(self):
-        pass
-
-    def get_value_ass_5_att_4(self):
-        pass
     #################################
 
 
diff --git a/therapistBNInit.py b/therapistBNInit.py
index 07092251240a39b967ce0bf0b50bb83a1cd0fe6a..78de0f8dcf1a4f96f9f141d94ddd164407db9d23 100644
--- a/therapistBNInit.py
+++ b/therapistBNInit.py
@@ -3,151 +3,208 @@ import random
 from bn_variables import User_Action, Game_State, Attempt, Agent_Assistance
 
 
-#1 create two files where to include the user model and agent model, respectively.
-def create_template(path, filename_out, filename_in):
-    dest = open(path+"/"+filename_out, "w")
-    orig = open(path+"/"+filename_in, "r")
-    contents = orig.readlines()
-    if orig.mode == 'r':
-        dest.writelines(contents)
-
-user_model_path = "/home/pal/Documents/Framework/bn_generative_model/bn_persona_model"
-user_model_filename_out = "persona_test.bif"
-user_model_filename_in = "persona_model_test.bif"
-agent_model_path = "/home/pal/Documents/Framework/bn_generative_model/bn_agent_model"
-agent_model_filename_out = "agent_test.bif"
-agent_model_filename_in = "agent_model_test.bif"
-
-create_template(path=user_model_path, filename_out=user_model_filename_out, filename_in=user_model_filename_in)
+class Therapist_BN_Model():
+    def __init__(self, bn_model_folder, user_model_template, agent_model_template, user_id, with_feedback):
+        self.bn_model_folder = bn_model_folder
+        self.user_model_template = bn_model_folder+"/"+user_model_template
+        self.agent_model_template = bn_model_folder+"/"+agent_model_template
+        self.user_model_filename = bn_model_folder+"/"+"user_model_id_"+str(user_id)+"_"+str(with_feedback)+".bif"
+        self.agent_model_filename = bn_model_folder+"/"+"agent_model_id_"+str(user_id)+"_"+str(with_feedback)+".bif"
+        self.user_model = None
+        self.agent_model = None
+        self.attempt_preferences = [[0 for i in range(Attempt.counter.value)] for j in
+                                     range(User_Action.counter.value)]
+        self.game_preferences = [[0 for i in range(Game_State.counter.value)] for j in
+                                  range(User_Action.counter.value)]
+        self.user_action_preferences_on_agent_assistance = [[0 for i in range(User_Action.counter.value)]
+                                              for j in range(Agent_Assistance.counter.value)]
+        self.agent_assistance_preferences_on_attempt_game = [[0 for ass in range(Agent_Assistance.counter.value)]
+                                        for g in range(Game_State.counter.value)
+                                         for a in range(Attempt.counter.value)]
 
+    #1 create two files where to include the user model and agent model, respectively.
+    def create_template(self, filename_out, filename_in):
+        dest = open(filename_out, "w")
+        orig = open(filename_in, "r")
+        contents = orig.readlines()
+        if orig.mode == 'r':
+            dest.writelines(contents)
+
+
+    def get_attempt_given_user_action(self, attempt_preferences_for_correct_move):
+        attempt_preferences_for_wrong_move = [(5 - attempt_preferences_for_correct_move[a]) / 2 for a in
+                                              range(Attempt.counter.value)]
+        attempt_preferences_for_timeout = [(5 - attempt_preferences_for_correct_move[a]) / 2 for a in
+                                           range(Attempt.counter.value)]
+        #normalise values
+        for elem in range(len(self.attempt_preferences)):
+            attempt_preferences_for_correct_move = [attempt_preferences_for_correct_move[i] * User_Action.counter.value for
+                                                    i in
+                                                    range(len(attempt_preferences_for_correct_move))]
+            attempt_preferences_for_wrong_move = [attempt_preferences_for_wrong_move[i] * User_Action.counter.value for i in
+                                                  range(len(attempt_preferences_for_wrong_move))]
+            attempt_preferences_for_timeout = [attempt_preferences_for_timeout[i] * User_Action.counter.value for i in
+                                               range(len(attempt_preferences_for_timeout))]
+            self.attempt_preferences = [attempt_preferences_for_correct_move, attempt_preferences_for_wrong_move,
+                                   attempt_preferences_for_timeout]
+
+        return self.attempt_preferences
+
+    def get_game_state_given_user_action(self, game_preferences_for_correct_move):
+        game_preferences_for_wrong_move = [(5 - game_preferences_for_correct_move[a]) / 2 for a in
+                                           range(Game_State.counter.value)]
+        game_preferences_for_timeout = [(5 - game_preferences_for_correct_move[a]) / 2 for a in
+                                        range(Game_State.counter.value)]
+
+        for elem in range(len(self.game_preferences)):
+            game_preferences_for_correct_move = [game_preferences_for_correct_move[i] * User_Action.counter.value for i in
+                                                 range(len(game_preferences_for_correct_move))]
+            game_preferences_for_wrong_move = [game_preferences_for_wrong_move[i] * User_Action.counter.value for i in
+                                               range(len(game_preferences_for_wrong_move))]
+            game_preferences_for_timeout = [game_preferences_for_timeout[i] * User_Action.counter.value for i in
+                                            range(len(game_preferences_for_timeout))]
+            self.game_preferences = [game_preferences_for_correct_move, game_preferences_for_wrong_move,
+                                game_preferences_for_timeout]
+
+        return self.game_preferences
+
+    def get_user_action_given_agent_assistance(self, assistance_preferences_for_correct_move):
+        assistance_preferences_for_wrong_move = [(5 - assistance_preferences_for_correct_move[i]) / 2 for i in
+                                                 range(len(assistance_preferences_for_correct_move))]
+        assistance_preferences_for_timeout = [(5 - assistance_preferences_for_correct_move[i]) / 2 for i in
+                                              range(len(assistance_preferences_for_correct_move))]
+        #normalise
+        assistance_preferences_for_correct_move = [assistance_preferences_for_correct_move[i] * User_Action.counter.value for
+                                                   i in
+                                                   range(len(assistance_preferences_for_correct_move))]
+        assistance_preferences_for_wrong_move = [assistance_preferences_for_wrong_move[i] * User_Action.counter.value for i in
+                                                 range(len(assistance_preferences_for_wrong_move))]
+        assistance_preferences_for_timeout = [assistance_preferences_for_timeout[i] * User_Action.counter.value for i in
+                                              range(len(assistance_preferences_for_timeout))]
+        for elem in range(Agent_Assistance.counter.value):
+
+            den = (assistance_preferences_for_correct_move[elem] + assistance_preferences_for_wrong_move[elem] +
+                   assistance_preferences_for_timeout[elem])
+            self.user_action_preferences_on_agent_assistance[elem] = [assistance_preferences_for_correct_move[elem] / den,
+                                                        assistance_preferences_for_wrong_move[elem] / den,
+                                                        assistance_preferences_for_timeout[elem] / den]
+
+        return self.user_action_preferences_on_agent_assistance
+
+
+    def get_agent_assistance_given_attempt_and_game_state(self, agent_assistance_preferences_for_correct_move_game_attempt):
+        it = 0
+        for g in range(Game_State.counter.value):
+            for a in range(Attempt.counter.value):
+                print("it:", it)
+                # get the preference
+                agent_assistance_preferences_for_correct_move_game_attempt = [[(agent_assistance_preferences_for_correct_move_game_attempt[i][j] * Agent_Assistance.counter.value) for j in
+                                           range(len(agent_assistance_preferences_for_correct_move_game_attempt[i]))] for i in
+                                          range(len(agent_assistance_preferences_for_correct_move_game_attempt))]
+                self.agent_assistance_preferences_on_attempt_game[it] = list(
+                    map(lambda x: x / sum(agent_assistance_preferences_for_correct_move_game_attempt[a]), agent_assistance_preferences_for_correct_move_game_attempt[a]))
+                it += 1
+        return self.agent_assistance_preferences_on_attempt_game
+
+
+
+bn_model_folder = "/home/pal/Documents/Framework/bn_generative_model/bn_models"
+user_model_filename = "persona_test.bif"#provided by the gui
+user_model_template = "persona_model_template.bif"
+agent_model_filename = "agent_test.bif"
+agent_model_template = "agent_model_template.bif"#provided by the gui
+
+
+
+bn_models = Therapist_BN_Model(bn_model_folder=bn_model_folder, user_model_template=user_model_template,
+                               agent_model_template=agent_model_template, user_id="1", with_feedback=True)
 user_action = [0, 1, 2]
 attempt = [0, 1, 2, 3]
 game = [0, 1, 2]
-agent_assistance = [0, 1,2 ,3 ,4 ,5 ]
+agent_assistance = [0, 1,2 ,3 ,4 ,5]
 max_value_for_user = 15
 max_value_for_assistance = 30
 
+attempt_preferences_for_correct_move = [2, 3, 4, 4]
+assistance_preferences_for_correct_move = [2, 3, 3, 4, 5, 5]
+game_preferences_for_correct_move = [2, 3, 4]
+assistance_preferences_for_correct_move_game_attempt = [[4, 5, 2, 1, 1, 1],
+                          [1, 3, 5, 3, 1, 1],
+                          [1, 2, 2, 5, 4, 1],
+                          [1, 1, 2, 5, 4, 4]]
+
 user_action_vars = ["(correct)", "(wrong)", "(timeout)"]
 agent_assistance_vars = ["(lev_0)", "(lev_1)", "(lev_2)", "(lev_3)", "(lev_4)", "(lev_5)"]
-user_action_given_agent_assistance = [[0 for i in range(User_Action.counter.value)]
-                                    for j in range(Agent_Assistance.counter.value)]
-attempt_given_user_action = [[0 for i in range(Attempt.counter.value)] for j in
-                                   range(User_Action.counter.value)]
-game_given_user_action = [[0 for i in range(Game_State.counter.value)] for j in
-                                      range(User_Action.counter.value)]
+attempt_game_vars = [
+"(beg, att_1)", "(beg, att_2)","(beg, att_3)","(beg, att_4)",
+"(mid, att_1)", "(mid, att_2)","(mid, att_3)","(mid, att_4)",
+"(end, att_1)", "(end, att_2)","(end, att_3)","(end, att_4)",
+]
 
+#initialise the two models with the templates
+bn_models.create_template(filename_out=bn_models.user_model_filename, filename_in=bn_models.user_model_template)
+bn_models.create_template(filename_out=bn_models.agent_model_filename, filename_in=bn_models.agent_model_template)
+
+
+#write all the values on a bif file
+
+
+user_model = open(bn_models.user_model_filename, "a+")
+agent_model = open(bn_models.agent_model_filename, "a+")
 
-attempt_preferences_for_correct_move = [2, 3, 4, 4]
-attempt_preferences_for_wrong_move = [(5-attempt_preferences_for_correct_move[a])/2 for a in range(Attempt.counter.value)]
-attempt_preferences_for_timeout = [(5-attempt_preferences_for_correct_move[a])/2 for a in range(Attempt.counter.value)]
-
-attempt_preferences_for_correct_move = [attempt_preferences_for_correct_move[i] * User_Action.counter.value for i in
-                                     range(len(attempt_preferences_for_correct_move))]
-attempt_preferences_for_wrong_move = [attempt_preferences_for_wrong_move[i] * User_Action.counter.value for i in
-                                     range(len(attempt_preferences_for_wrong_move))]
-attempt_preferences_for_timeout = [attempt_preferences_for_timeout[i] * User_Action.counter.value for i in
-                                     range(len(attempt_preferences_for_timeout))]
-attempt_preferences = [attempt_preferences_for_correct_move, attempt_preferences_for_wrong_move, attempt_preferences_for_timeout]
-
-user_model = open(user_model_path+"/"+user_model_filename_out, "a+")
 user_model.write("probability (game_state | user_action)  { \n")
-game_preferences_for_correct_move = [2, 3, 4]
-game_preferences_for_wrong_move = [(5-game_preferences_for_correct_move[a])/2 for a in range(Game_State.counter.value)]
-game_preferences_for_timeout = [(5-game_preferences_for_correct_move[a])/2 for a in range(Game_State.counter.value)]
-
-game_preferences_for_correct_move = [game_preferences_for_correct_move[i] * User_Action.counter.value for i in
-                                     range(len(game_preferences_for_correct_move))]
-game_preferences_for_wrong_move = [game_preferences_for_wrong_move[i] * User_Action.counter.value for i in
-                                     range(len(game_preferences_for_wrong_move))]
-game_preferences_for_timeout = [game_preferences_for_timeout[i] * User_Action.counter.value for i in
-                                     range(len(game_preferences_for_timeout))]
-game_preferences = [game_preferences_for_correct_move, game_preferences_for_wrong_move, game_preferences_for_timeout]
-
-for elem in range(len(game_given_user_action)):
-    game_preferences[elem] = list(map(lambda x:x/sum(game_preferences[elem]), game_preferences[elem]))
+bn_models.game_preferences = bn_models.get_game_state_given_user_action(game_preferences_for_correct_move)
+
+for elem in range(len(bn_models.game_preferences)):
+    bn_models.game_preferences[elem] = list(map(lambda x:x/sum(bn_models.game_preferences[elem]), bn_models.game_preferences[elem]))
     user_model.write(str(user_action_vars[elem]) + "\t" +
-                     str(game_preferences[elem][0]) + "," +
-                     str(game_preferences[elem][1]) + "," +
-                     str(game_preferences[elem][2]) + "; \n")
+                     str(bn_models.game_preferences[elem][0]) + "," +
+                     str(bn_models.game_preferences[elem][1]) + "," +
+                     str(bn_models.game_preferences[elem][2]) + "; \n")
 user_model.write("}\n")
 
-
 user_model.write("probability (attempt | user_action)  { \n")
-for elem in range(len(attempt_given_user_action)):
-    attempt_preferences[elem] = list(map(lambda x:x/sum(attempt_preferences[elem]), attempt_preferences[elem]))
+bn_models.attempt_preferences = bn_models.get_attempt_given_user_action(attempt_preferences_for_correct_move)
+for elem in range(len(bn_models.attempt_preferences)):
+    bn_models.attempt_preferences[elem] = list(map(lambda x:x/sum(bn_models.attempt_preferences[elem]), bn_models.attempt_preferences[elem]))
     user_model.write(str(user_action_vars[elem])+ "\t" +
-                     str(attempt_preferences[elem][0])+","+
-                     str(attempt_preferences[elem][1])+","+
-                     str(attempt_preferences[elem][2]) + "," +
-                     str(attempt_preferences[elem][3])+"; \n")
+                     str(bn_models.attempt_preferences[elem][0])+","+
+                     str(bn_models.attempt_preferences[elem][1])+","+
+                     str(bn_models.attempt_preferences[elem][2]) + "," +
+                     str(bn_models.attempt_preferences[elem][3])+"; \n")
 user_model.write("}\n")
 
-
-
 user_model.write("probability (user_action | agent_assistance) { \n")
-assistance_score_for_correct_move = [2, 3, 3, 4, 5, 5]
-assistance_score_for_wrong_move = [(5 - assistance_score_for_correct_move[i]) / 2 for i in
-                                   range(len(assistance_score_for_correct_move))]
-assistance_score_for_timeout = [(5 - assistance_score_for_correct_move[i]) / 2 for i in
-                                range(len(assistance_score_for_correct_move))]
-assistance_score_for_correct_move = [assistance_score_for_correct_move[i] * User_Action.counter.value for i in
-                                     range(len(assistance_score_for_correct_move))]
-assistance_score_for_wrong_move = [assistance_score_for_wrong_move[i] * User_Action.counter.value for i in
-                                     range(len(assistance_score_for_wrong_move))]
-assistance_score_for_timeout = [assistance_score_for_timeout[i] * User_Action.counter.value for i in
-                                     range(len(assistance_score_for_timeout))]
-
+bn_models.assistance_preferences = bn_models.get_user_action_given_agent_assistance(assistance_preferences_for_correct_move)
 for elem in range(Agent_Assistance.counter.value):
     #setting up the effect of each level on the user_action
-    den = (assistance_score_for_correct_move[elem]+assistance_score_for_wrong_move[elem]+assistance_score_for_timeout[elem])
-    user_action_given_agent_assistance[elem] = [assistance_score_for_correct_move[elem]/den, assistance_score_for_wrong_move[elem]/den,
-                                                assistance_score_for_timeout[elem]/den]
     user_model.write(str(agent_assistance_vars[elem])+ "\t"+
-                     str(user_action_given_agent_assistance[elem][0])+","+
-                     str(user_action_given_agent_assistance[elem][1])+","+
-                     str(user_action_given_agent_assistance[elem][2])+"; \n")
+                     str(bn_models.assistance_preferences[elem][0])+","+
+                     str(bn_models.assistance_preferences[elem][1])+","+
+                     str(bn_models.assistance_preferences[elem][2])+"; \n")
 user_model.write("}")
 
-
-create_template(path=agent_model_path, filename_out=agent_model_filename_out, filename_in=agent_model_filename_in )
-
-agent_assistance_given_game_attempt = [[0 for ass in range(Agent_Assistance.counter.value)]
-                                        for g in range(Game_State.counter.value)
-                                         for a in range(Attempt.counter.value)]
-agent_model = open(agent_model_path+"/"+agent_model_filename_out, "a+")
-
-attempt_game_vars = [
-"(beg, att_1)", "(beg, att_2)","(beg, att_3)","(beg, att_4)",
-"(mid, att_1)", "(mid, att_2)","(mid, att_3)","(mid, att_4)",
-"(end, att_1)", "(end, att_2)","(end, att_3)","(end, att_4)",
-]
-
 agent_model.write("probability (agent_assistance | game_state, attempt) { \n")
-assistance_preferences = [[4, 5, 2, 1, 1, 1],
-                          [1, 3, 5, 3, 1, 1],
-                          [1, 2, 2, 5, 4, 1],
-                          [1, 1, 2, 5, 4, 4]]
+bn_models.agent_assistance_preferences_on_attempt_game = bn_models.get_agent_assistance_given_attempt_and_game_state(assistance_preferences_for_correct_move_game_attempt)
 
 it = 0
 for g in range(Game_State.counter.value):
     for a in range(Attempt.counter.value):
         print("it:", it)
         #get the preference
-        assistance_preferences = [[(assistance_preferences[i][j] * Agent_Assistance.counter.value)  for j in range(len(assistance_preferences[i]))] for i in range(len(assistance_preferences))]
-        agent_assistance_given_game_attempt[it] = list(map(lambda x:x/sum(assistance_preferences[a]), assistance_preferences[a]))
         agent_model.write(str(attempt_game_vars[it])+"\t"+
-                          str(agent_assistance_given_game_attempt[it][0])+", "+
-                          str(agent_assistance_given_game_attempt[it][1]) + ", " +
-                          str(agent_assistance_given_game_attempt[it][2]) + ", " +
-                          str(agent_assistance_given_game_attempt[it][3]) + ", " +
-                          str(agent_assistance_given_game_attempt[it][4]) + ", " +
-                          str(agent_assistance_given_game_attempt[it][5]) + "; \n"
+                          str(bn_models.agent_assistance_preferences_on_attempt_game[it][0])+", "+
+                          str(bn_models.agent_assistance_preferences_on_attempt_game[it][1]) + ", " +
+                          str(bn_models.agent_assistance_preferences_on_attempt_game[it][2]) + ", " +
+                          str(bn_models.agent_assistance_preferences_on_attempt_game[it][3]) + ", " +
+                          str(bn_models.agent_assistance_preferences_on_attempt_game[it][4]) + ", " +
+                          str(bn_models.agent_assistance_preferences_on_attempt_game[it][5]) + "; \n"
                           )
         it += 1
 agent_model.write("}")
 
 
-print(attempt_given_user_action)
-print(game_given_user_action)
-print(user_action_given_agent_assistance)
-print(agent_assistance_given_game_attempt)
\ No newline at end of file
+print(bn_models.attempt_preferences)
+print(bn_models.game_preferences)
+print(bn_models.assistance_preferences)
+print(bn_models.agent_assistance_preferences_on_attempt_game)
\ No newline at end of file