-
Notifications
You must be signed in to change notification settings - Fork 0
/
valueIterationAgents.py
95 lines (81 loc) · 3.19 KB
/
valueIterationAgents.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# valueIterationAgents.py
# -----------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
for i in range(0,self.iterations):
updatedValues = util.Counter()
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
updatedValues[state] = 0 # initialize V*(s) = 0
else:
Vi = max([self.getQValue(state,action) for action in self.mdp.getPossibleActions(state)]) #Bellman Update
updatedValues[state] = Vi
self.values = updatedValues
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def getQValue(self, state, action):
"""
The q-value of the state action pair
(after the indicated number of value iteration
passes). Note that value iteration does not
necessarily create this quantity and you may have
to derive it on the fly.
"""
Q = 0
for Sk,prob in self.mdp.getTransitionStatesAndProbs(state,action):
R = self.mdp.getReward(state,action,Sk)
U = self.discount*self.values[Sk]
Q += (prob * (U + R))
return Q
def getPolicy(self, state):
"""
The policy is the best action in the given state
according to the values computed by value iteration.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
if self.mdp.isTerminal(state):
return None
Vk = float("-inf")
policy = None
for action in self.mdp.getPossibleActions(state):
Vi = self.getQValue(state,action)
if Vi >= Vk:
Vk = Vi
policy = action
return policy
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.getPolicy(state)