-
Notifications
You must be signed in to change notification settings - Fork 272
Expand file tree
/
Copy pathdqn_agent.py
More file actions
213 lines (175 loc) · 7.72 KB
/
dqn_agent.py
File metadata and controls
213 lines (175 loc) · 7.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import random
import argparse
from collections import deque
import numpy as np
from tensorflow.python import keras as K
from PIL import Image
import gym
import gym_ple
from fn_framework import FNAgent, Trainer, Observer
class DeepQNetworkAgent(FNAgent):
def __init__(self, epsilon, actions):
super().__init__(epsilon, actions)
self._scaler = None
self._teacher_model = None
def initialize(self, experiences, optimizer):
feature_shape = experiences[0].s.shape
self.make_model(feature_shape)
self.model.compile(optimizer, loss="mse")
self.initialized = True
print("Done initialization. From now, begin training!")
def make_model(self, feature_shape):
normal = K.initializers.glorot_normal()
model = K.Sequential()
model.add(K.layers.Conv2D(
32, kernel_size=8, strides=4, padding="same",
input_shape=feature_shape, kernel_initializer=normal,
activation="relu"))
model.add(K.layers.Conv2D(
64, kernel_size=4, strides=2, padding="same",
kernel_initializer=normal,
activation="relu"))
model.add(K.layers.Conv2D(
64, kernel_size=3, strides=1, padding="same",
kernel_initializer=normal,
activation="relu"))
model.add(K.layers.Flatten())
model.add(K.layers.Dense(256, kernel_initializer=normal,
activation="relu"))
model.add(K.layers.Dense(len(self.actions),
kernel_initializer=normal))
self.model = model
self._teacher_model = K.models.clone_model(self.model)
def estimate(self, state):
return self.model.predict(np.array([state]))[0]
def update(self, experiences, gamma):
states = np.array([e.s for e in experiences])
n_states = np.array([e.n_s for e in experiences])
estimateds = self.model.predict(states)
future = self._teacher_model.predict(n_states)
for i, e in enumerate(experiences):
reward = e.r
if not e.d:
reward += gamma * np.max(future[i])
estimateds[i][e.a] = reward
loss = self.model.train_on_batch(states, estimateds)
return loss
def update_teacher(self):
self._teacher_model.set_weights(self.model.get_weights())
class DeepQNetworkAgentTest(DeepQNetworkAgent):
def __init__(self, epsilon, actions):
super().__init__(epsilon, actions)
def make_model(self, feature_shape):
normal = K.initializers.glorot_normal()
model = K.Sequential()
model.add(K.layers.Dense(64, input_shape=feature_shape,
kernel_initializer=normal, activation="relu"))
model.add(K.layers.Dense(len(self.actions), kernel_initializer=normal,
activation="relu"))
self.model = model
self._teacher_model = K.models.clone_model(self.model)
class CatcherObserver(Observer):
def __init__(self, env, width, height, frame_count):
super().__init__(env)
self.width = width
self.height = height
self.frame_count = frame_count
self._frames = deque(maxlen=frame_count)
def transform(self, state):
grayed = Image.fromarray(state).convert("L")
resized = grayed.resize((self.width, self.height))
resized = np.array(resized).astype("float")
normalized = resized / 255.0 # scale to 0~1
if len(self._frames) == 0:
for i in range(self.frame_count):
self._frames.append(normalized)
else:
self._frames.append(normalized)
feature = np.array(self._frames)
# Convert the feature shape (f, w, h) => (h, w, f).
feature = np.transpose(feature, (1, 2, 0))
return feature
class DeepQNetworkTrainer(Trainer):
def __init__(self, buffer_size=50000, batch_size=32,
gamma=0.99, initial_epsilon=0.5, final_epsilon=1e-3,
learning_rate=1e-3, teacher_update_freq=3, report_interval=10,
log_dir="", file_name=""):
super().__init__(buffer_size, batch_size, gamma,
report_interval, log_dir)
self.file_name = file_name if file_name else "dqn_agent.h5"
self.initial_epsilon = initial_epsilon
self.final_epsilon = final_epsilon
self.learning_rate = learning_rate
self.teacher_update_freq = teacher_update_freq
self.loss = 0
self.training_episode = 0
self._max_reward = -10
def train(self, env, episode_count=1200, initial_count=200,
test_mode=False, render=False, observe_interval=100):
actions = list(range(env.action_space.n))
if not test_mode:
agent = DeepQNetworkAgent(1.0, actions)
else:
agent = DeepQNetworkAgentTest(1.0, actions)
observe_interval = 0
self.training_episode = episode_count
self.train_loop(env, agent, episode_count, initial_count, render,
observe_interval)
return agent
def episode_begin(self, episode, agent):
self.loss = 0
def begin_train(self, episode, agent):
optimizer = K.optimizers.Adam(lr=self.learning_rate, clipvalue=1.0)
agent.initialize(self.experiences, optimizer)
self.logger.set_model(agent.model)
agent.epsilon = self.initial_epsilon
self.training_episode -= episode
def step(self, episode, step_count, agent, experience):
if self.training:
batch = random.sample(self.experiences, self.batch_size)
self.loss += agent.update(batch, self.gamma)
def episode_end(self, episode, step_count, agent):
reward = sum([e.r for e in self.get_recent(step_count)])
self.loss = self.loss / step_count
self.reward_log.append(reward)
if self.training:
self.logger.write(self.training_count, "loss", self.loss)
self.logger.write(self.training_count, "reward", reward)
self.logger.write(self.training_count, "epsilon", agent.epsilon)
if reward > self._max_reward:
agent.save(self.logger.path_of(self.file_name))
self._max_reward = reward
if self.is_event(self.training_count, self.teacher_update_freq):
agent.update_teacher()
diff = (self.initial_epsilon - self.final_epsilon)
decay = diff / self.training_episode
agent.epsilon = max(agent.epsilon - decay, self.final_epsilon)
if self.is_event(episode, self.report_interval):
recent_rewards = self.reward_log[-self.report_interval:]
self.logger.describe("reward", recent_rewards, episode=episode)
def main(play, is_test):
file_name = "dqn_agent.h5" if not is_test else "dqn_agent_test.h5"
trainer = DeepQNetworkTrainer(file_name=file_name)
path = trainer.logger.path_of(trainer.file_name)
agent_class = DeepQNetworkAgent
if is_test:
print("Train on test mode")
obs = gym.make("CartPole-v0")
agent_class = DeepQNetworkAgentTest
else:
env = gym.make("Catcher-v0")
obs = CatcherObserver(env, 80, 80, 4)
trainer.learning_rate = 1e-4
if play:
agent = agent_class.load(obs, path)
agent.play(obs, render=True)
else:
trainer.train(obs, test_mode=is_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="DQN Agent")
parser.add_argument("--play", action="store_true",
help="play with trained model")
parser.add_argument("--test", action="store_true",
help="train by test mode")
args = parser.parse_args()
main(args.play, args.test)