-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathCritic.py
More file actions
60 lines (45 loc) · 1.97 KB
/
Critic.py
File metadata and controls
60 lines (45 loc) · 1.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from keras import layers, models, optimizers
from keras import backend as K
class Critic:
"""Critic (Value) Model."""
def __init__(self, state_size, action_size):
"""
Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
"""
self.state_size = state_size
self.action_size = action_size
# Initialize any other variables here
self.build_model()
def build_model(self):
"""
Build a critic (value) network that maps
(state, action) pairs -> Q-values.
"""
# Define input layers
states = layers.Input(shape=(self.state_size,), name='states')
actions = layers.Input(shape=(self.action_size,), name='actions')
# Add hidden layer(s) for state pathway
net = layers.Dense(units=20, activation='relu')(states)
net = layers.Add()([net, actions])
net = layers.Dense(units=20, activation='relu')(net)
lin_states = layers.Dense(units=20, activation='relu')(states)
net = layers.Add()([net, lin_states])
# Add final output layer to prduce action values (Q values)
Q_values = layers.Dense(units=1,name='q_values')(net)
# Create Keras model
self.model = models.Model(inputs=[states, actions], outputs=Q_values)
# Define optimizer and compile model for training with
# built-in loss function
optimizer = optimizers.Adam(lr=0.05)
self.model.compile(optimizer=optimizer, loss='mse')
# Compute action gradients (derivative of Q values w.r.t. to actions)
action_gradients = K.gradients(Q_values, actions)
# Define an additional function to fetch action gradients (to be
# used by actor model)
self.get_action_gradients = K.function(
inputs=[*self.model.input, K.learning_phase()],
outputs=action_gradients)