-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathExample_ExploreExploit.py
More file actions
171 lines (140 loc) · 8 KB
/
Example_ExploreExploit.py
File metadata and controls
171 lines (140 loc) · 8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import numpy as np
from spm_MDP_VB_LC import spm_MDP_VB_LC
from MDP_prelims import MDP_class
import copy
def Example_ExploreExploit(df_passed, n_trials):
"""
# This is an 'explore/'exploit' game in which the agent must choose one
# of three arms in which to search for a reward. At any one time, only one
# of the arms has a high probability of dispensing a reward.
# This situation changes after a set number of trials, at which point
# the agent must learn the new location of the reward by updating its B
# matrices.
# Inputs:
# - value of model decay parameter, denoted df. Either fixed (df>0) or flexible (df=0), in
# which case decay parameter will be set in relation to the state-action
# prediction error.
# - n_trials = number of trials to run.
# Returns: a completed MDP structure.
# Anna Sales, University of Bristol, 2018.
#__________________________________________________________________________
# In each trial, the agent starts at the neutral location (1) and then
# chooses whether to go to 2,3 or 4.
# States: 1-7 - neutral, rewarded at 1, unrewarded at 1, rewarded at 2,
# unrewarded at 2, rewarded at 3, unrewarded at 3.
# Observations: 1-7, one-to-one relationship between states and
# observations (A = identity matrix)
# Actions: Action 1 always takes agent to location 1, same for 2, 3 and 4.
"""
# rng('shuffle')
# outcome probabilities: A
#--------------------------------------------------------------------------
# We start by specifying the probabilistic mapping from hidden states
# to outcomes.
#--------------------------------------------------------------------------
A = np.eye(7)
A_ENV = A
#make it naive to the game:
a = 5 * A
#The higher the multiplier, the slower the learning as priors are adjusted
#by numbers <1 on each trial. A very high order number would correspond to
#a very well learnt prior.
# controlled transitions: B{u}
#--------------------------------------------------------------------------
# Next, we have to specify the probabilistic transitions of hidden states
# under each action or control state.
#--------------------------------------------------------------------------
#B_ENV matrices - probability of transitioning to rewarded or unrewarded state
#is governed by parameters l,m,n, which change trials go on.
l = 1
m = 1
n = 1
B_ENV = np.zeros((4,7,7))
B_ENV[0] = np.array([[1,1,1,1,1,1,1],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
B_ENV[1] = np.array([[0,0,0,0,0,0,0],[l,l,l,l,l,l,l],[(1-l),(1-l),(1-l),(1-l),(1-l),(1-l),(1-l)],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
B_ENV[2] = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[m,m,m,m,m,m,m],[(1-m),(1-m),(1-m),(1-m),(1-m),(1-m),(1-m)],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
B_ENV[3] = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[n,n,n,n,n,n,n],[(1-n),(1-n),(1-n),(1-n),(1-n),(1-n),(1-n)]])
B = B_ENV.copy() #the agent's B: this will get over-written later on but it's useful to create a matrix with the right dimensions.
# the agent's beliefs about B:
l = 0.3
m = 0.3
n = 0.3
b = np.zeros((4,7,7))
b[0] = np.array([[1,1,1,1,1,1,1],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
b[1] = np.array([[0,0,0,0,0,0,0],[l,l,l,l,l,l,l],[(1-l),(1-l),(1-l),(1-l),(1-l),(1-l),(1-l)],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
b[2] = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[m,m,m,m,m,m,m],[(1-m),(1-m),(1-m),(1-m),(1-m),(1-m),(1-m)],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
b[3] = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[n,n,n,n,n,n,n],[(1-n),(1-n),(1-n),(1-n),(1-n),(1-n),(1-n)]])
# priors: (utility) C
#--------------------------------------------------------------------------
# Finally, we have to specify the prior preferences in terms of log
# probabilities. Here, the agent prefers rewarding outcomes, and dislikes
# failing to get a reward.
#--------------------------------------------------------------------------
c = 4
C = np.array([[0,c,-0.5*c,c,-0.5*c,c,-0.5*c]]).T
# now specify prior beliefs about initial state
#--------------------------------------------------------------------------
d = np.array([[1,0,0,0,0,0,0]]).T #agent knows it always starts at the neutral position
# allowable policies (of depth T).
#--------------------------------------------------------------------------
V = np.array([[1,2,3,4]]) #agent can go to any of the three arms or can stay where it is.
V = V - 1
# MDP Structure - this will be used to generate arrays for multiple trials
#==========================================================================
mdp = MDP_class()
mdp.V = V # allowable policies
mdp.A = A # observation model
mdp.B = B # transition probabilities
mdp.C = C # preferred states
mdp.d = d
mdp.b = b
mdp.s = 1 # initial state
mdp.A_ENV = A_ENV # real world (fixed) A and B matrices.
mdp.B_ENV = B_ENV
mdp.Ni = 15 #default
mdp.alpha = 1 # precision of action selection
mdp.beta = 1 # inverse precision of policy selection
mdp.arm_one = 0
mdp.arm_two = 0
mdp.arm_three = 0
mdp.SAPEall = []
mdp.updateENV = 1 #a parameter which can drop the B_ENV probabilities - if zero, B_ENV is just fixed
if df_passed == 0:
mdp.df_set = None
else:
mdp.df_set = df_passed
#--------------------------------------------------------------------------
nt = n_trials # number of trials
# MDP = mdp
# [MDP(1:nt)] = deal(mdp) #sets MDP up with a struct array of individual MDP structures.
MDP = [copy.deepcopy(mdp) for i in range(nt)]
ch_ev = 80 #50 #change every ch_ev trials.
high = 0.95 #0.70 #the high probability arm
low = 0.05 #0.1 #low probability arm
place = 0
for p in range(nt): #setup the task.
if p % ch_ev == 0:
target = place
if place == 2:
place = 0
else:
place = place + 1
arm_probs = np.ones(3) * low
arm_probs[target] = high
l = arm_probs[0]
m = arm_probs[1]
n = arm_probs[2]
#set up what the environmental matrices will look like on the pth trial:
MDP[p].B_ENV[0] = np.array([[1,1,1,1,1,1,1],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
MDP[p].B_ENV[1] = np.array([[0,0,0,0,0,0,0],[l,l,l,l,l,l,l],[(1-l),(1-l),(1-l),(1-l),(1-l),(1-l),(1-l)],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
MDP[p].B_ENV[2] = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[m,m,m,m,m,m,m],[(1-m),(1-m),(1-m),(1-m),(1-m),(1-m),(1-m)],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
MDP[p].B_ENV[3] = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[n,n,n,n,n,n,n],[(1-n),(1-n),(1-n),(1-n),(1-n),(1-n),(1-n)]])
# Solve to generate data
#=========================================================================
MDP_OUT = spm_MDP_VB_LC(MDP)
return MDP_OUT
if __name__ == '__main__':
import matplotlib.pyplot as plt
MDP_OUT = Example_ExploreExploit(df_passed = 16, n_trials = 200)
SAPEall = MDP_OUT[-1].SAPEall
plt.plot(SAPEall)