Skip to content
Snippets Groups Projects
Commit 6e9d0ce6 authored by Antonio Andriella's avatar Antonio Andriella
Browse files

Working code with persona, real_user and robot in the simulation

parent 32365577
No related branches found
No related tags found
No related merge requests found
network persona_model_4 {
network persona_model {
}
%VARIABLES DEFINITION
......
import random
import bn_functions
def compute_next_state(user_action, task_evolution, attempt_counter, correct_move_counter,
wrong_move_counter, timeout_counter
):
'''
The function computes given the current state and action of the user, the next state
Args:
user_action: 0,1,2
task_evolution: beg, mid, end
correct_move_counter:
attempt_counter:
wrong_move_counter:
timeout_counter:
Return:
the counters updated according to the user_action
'''
if user_action == 0:
attempt_counter = 0
task_evolution += 1
correct_move_counter += 1
# if the user made a wrong move and still did not reach the maximum number of attempts
elif user_action == 1 and attempt_counter < 3:
attempt_counter += 1
wrong_move_counter += 1
# if the user did not move any token and still did not reach the maximum number of attempts
elif user_action == 2 and attempt_counter < 3:
attempt_counter += 1
timeout_counter += 1
# the robot or therapist makes the correct move on the patient's behalf
else:
attempt_counter = 0
task_evolution += 1
correct_move_counter += 1
return task_evolution, attempt_counter, correct_move_counter, wrong_move_counter, timeout_counter
def get_user_action_prob():
def get_stochatic_action(actions_prob):
'''
Select one of the actions according to the actions_prob
Args:
actions_prob: the probability of the Persona based on the BN to make a correct move, wrong move, timeout
Return:
the id of the selected action
N.B:
'''
action_id = None
correct_action_from_BN = actions_prob[0]
wrong_action_from_BN = actions_prob[1]
timeout_action_from_BN = actions_prob[2]
rnd_val = random.uniform(0,1)
#if user_prob is lower than the correct action prob then is the correct one
if rnd_val<=correct_action_from_BN:
action_id = 0
#if rnd is larger than the correct action prob and lower than wrong
# action prob then is the wrong one
elif rnd_val>correct_action_from_BN \
and rnd_val<(correct_action_from_BN+wrong_action_from_BN):
action_id = 1
#timeout
else:
action_id = 2
return action_id
network persona_model_3 {
}
%definition of the variables
%VARIABLES DEFINITION
variable reactivity {
type discrete [3] {slow, medium, fast};
}
......@@ -11,29 +11,23 @@ variable memory {
variable attention {
type discrete[3] {low, medium, high};
}
variable robot_assistance {
type discrete [ 5 ] { lev_0, lev_1, lev_2, lev_3, lev_4 };
}
variable attempt {
type discrete [ 4 ] { att_1, att_2, att_3, att_4 };
}
variable game_state {
type discrete [ 3 ] { beg, mid, end };
}
variable robot_feedback {
type discrete [ 2 ] { yes, no };
}
variable user_action {
type discrete [ 3 ] { correct, wrong, timeout };
}
%definition of individual probabilities
%INDIVIDUAL PROBABILITIES DEFINITION
probability ( robot_assistance ) {
table 0.2, 0.2, 0.2, 0.2, 0.2;
}
......@@ -46,26 +40,23 @@ probability ( attempt ) {
probability ( user_action ) {
table 0.33, 0.33, 0.34;
}
#cpds 4
#CPDS 4
probability ( reactivity ) {
table 0.33, 0.33, 0.34;
}
#cpds 3
#CPDS 3
probability ( memory ) {
table 0.33, 0.33, 0.34;
}
#cpds 1
#CPDS 1
probability ( attention ) {
table 0.33, 0.33, 0.34;
}
probability ( robot_feedback ) {
table 0.5, 0.5;
}
#cpds 7
#CPDS 7
probability (user_action | reactivity, memory, attention) {
(slow, low, low) 0.1, 0.4, 0.5;
(slow, low, medium) 0.3, 0.5, 0.2;
(slow, low, high) 0.4, 0.5, 0.1;
......@@ -75,7 +66,7 @@ probability (user_action | reactivity, memory, attention) {
(slow, high, low) 0.3, 0.4, 0.3;
(slow, high, medium) 0.6, 0.3, 0.1;
(slow, high, high) 0.7, 0.2, 0.1;
%%%
(medium, low, low) 0.3, 0.4, 0.3;
(medium, low, medium) 0.3, 0.5, 0.2;
(medium, low, high) 0.4, 0.3, 0.3;
......@@ -85,7 +76,7 @@ probability (user_action | reactivity, memory, attention) {
(medium, high, low) 0.34, 0.33, 0.33;
(medium, high, medium) 0.7, 0.2, 0.1;
(medium, high, high) 0.75, 0.25, 0.0;
%%%
(fast, low, low) 0.5, 0.2, 0.3;
(fast, low, medium) 0.6, 0.2, 0.2;
(fast, low, high) 0.7, 0.3, 0.0;
......@@ -95,37 +86,32 @@ probability (user_action | reactivity, memory, attention) {
(fast, high, low) 0.5, 0.2, 0.3;
(fast, high, medium) 0.6, 0.2, 0.2;
(fast, high, high) 0.9, 0.1, 0.0;
}
#cpds 5
#CPDS 5
probability (robot_feedback | user_action) {
(correct) 0.8, 0.2;
(wrong) 0.5, 0.5;
(timeout) 0.2, 0.8;
}
#cpds 6
#CPDS 6
probability (robot_assistance | user_action) {
(correct) 0.05 0.1 0.15 0.3 0.4;
(wrong) 0.1 0.2 0.4 0.2 0.1;
(timeout) 0.2 0.4 0.2 0.1 0.1;
}
#cpds 2
#CPDS 2
probability (game_state | user_action) {
(correct) 0.2, 0.4, 0.4;
(wrong) 0.4, 0.4, 0.2;
(timeout) 0.6, 0.3, 0.1;
}
#cpds 0
#CPDS 0
probability (attempt | user_action) {
(correct) 0.1, 0.2, 0.3, 0.4;
(wrong) 0.5, 0.3, 0.15, 0.05;
(timeout) 0.4, 0.3, 0.2, 0.1;
}
#CPDS 5
probability (robot_assistance | robot_feedback) {
(yes) 0.5 0.3 0.1 0.1 0.0;
(no) 0.0 0.1 0.1 0.3 0.5;
......
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment