-
Notifications
You must be signed in to change notification settings - Fork 2
/
client.py
executable file
·225 lines (202 loc) · 9.58 KB
/
client.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import numpy as np
import time
import collections
from environment.environment import Environment
from model.model_manager import ModelManager
# get command line args
import options
flags = options.get()
LOG_INTERVAL = 100
PERFORMANCE_LOG_INTERVAL = 1000
class Worker(object):
def __init__(self, thread_index, session, annotated_wordbag, global_network, device, initial_learning_rate=None, learning_rate_input=None, grad_applier=None, train=True):
self.train = train
self.thread_index = thread_index
self.sess = session
self.global_network = global_network
self.environment = Environment.create_environment(flags.env_type, self.thread_index, annotated_wordbag, shuffle=self.train)
self.device = device
# build network
if self.train:
self.local_network = ModelManager(self.thread_index, self.environment, learning_rate_input, self.device)
self.apply_gradients, self.sync = self.local_network.initialize( self.global_network, grad_applier )
self.initial_learning_rate = initial_learning_rate
else:
self.local_network = self.global_network
self.terminal = True
self.local_t = 0
self.prev_local_t = 0
#logs
if self.train:
# main log directory
if not os.path.exists(flags.log_dir):
os.makedirs(flags.log_dir)
# episode result
self.result_log_dir = flags.log_dir + "/thread" + str(self.thread_index)
if not os.path.exists(self.result_log_dir):
os.makedirs(self.result_log_dir)
# perfomance
if not os.path.exists(flags.log_dir + "/performance"):
os.makedirs(flags.log_dir + "/performance")
formatter = logging.Formatter('%(asctime)s %(message)s')
# info logger
self.info_logger = logging.getLogger('info_' + str(thread_index))
hdlr = logging.FileHandler(flags.log_dir + '/performance/info_' + str(thread_index) + '.log')
hdlr.setFormatter(formatter)
self.info_logger.addHandler(hdlr)
self.info_logger.setLevel(logging.DEBUG)
# reward logger
self.reward_logger = logging.getLogger('reward_' + str(thread_index))
hdlr = logging.FileHandler(flags.log_dir + '/performance/reward_' + str(thread_index) + '.log')
hdlr.setFormatter(formatter)
self.reward_logger.addHandler(hdlr)
self.reward_logger.setLevel(logging.DEBUG)
self.max_reward = float("-inf")
self.update_statistics()
def update_statistics(self):
self.stats = self.environment.get_statistics()
self.stats.update(self.local_network.get_statistics())
def prepare(self, episode_id=None): # initialize a new episode
self.terminal = False
self.environment.reset(episode_id)
self.local_network.reset()
def stop(self): # stop current episode
self.environment.stop()
def _anneal_learning_rate(self, global_step): # anneal learning rate
learning_rate = self.initial_learning_rate * (flags.max_time_step - global_step) / flags.max_time_step
if learning_rate < 0.0:
learning_rate = 0.0
return learning_rate
def set_start_time(self, start_time, reset):
self.start_time = start_time
if reset:
self.local_network.init_train_count()
def _print_log(self, step):
# if self.local_t - self.prev_local_t >= PERFORMANCE_LOG_INTERVAL):
# self.prev_local_t += PERFORMANCE_LOG_INTERVAL
# elapsed_time = time.time() - self.start_time
# steps_per_sec = step / elapsed_time
# print("### Performance : {} STEPS in {:.0f} sec. {:.0f} STEPS/sec. {:.2f}M STEPS/hour".format( step, elapsed_time, steps_per_sec, steps_per_sec * 3600 / 1000000.))
# Print statistics
self.reward_logger.info( str(["{0}={1}".format(key,value) for key, value in self.stats.items()]) )
# Print match results
print_result = False
if flags.show_all_screenshots:
print_result = True
elif flags.show_best_screenshots:
if self.environment.episode_reward > self.max_reward:
self.max_reward = self.environment.episode_reward
print_result = True
if print_result: # show episodes insides
file = open(self.result_log_dir + '/reward({0})_epoch({1})_step({2}).log'.format(self.environment.episode_reward, self.environment.epoch, step),"w")
file.write( 'Annotation: {0}\n'.format( ["{0}={1}".format(key,value) for key, value in sorted(self.environment.annotation.items(), key=lambda t: t[0])] ))
file.write( 'Prediction: {0}\n'.format( ["{0}={1}".format(key,value) for key, value in sorted(self.environment.get_labeled_prediction().items(), key=lambda t: t[0])] ))
file.write( 'Reward: {0}\n'.format(self.environment.compute_reward()) )
file.close()
# run simulations and build a batch for training
def _build_batch(self, step):
batch = {}
if self.train: # init batch
batch["states"] = []
batch["actions"] = []
batch["concat"] = []
batch["cumulative_rewards"] = []
batch["advantages"] = []
batch["start_lstm_state"] = []
for i in range(self.local_network.model_size):
for key in batch:
batch[key].append(collections.deque())
batch["start_lstm_state"][i] = self.local_network.get_model(i).lstm_state_out
agent_id_list = collections.deque()
agent_reward_list = collections.deque()
agent_value_list = collections.deque()
manager_value_list = collections.deque()
t = 0
while t < flags.local_t_max and not self.terminal:
t += 1
prediction = self.environment.sentidoc
state = self.environment.get_state()
agent_id, manager_policy, manager_value = self.local_network.get_agentID_by_state(sess=self.sess, state=[state], concat=[prediction])
agent = self.local_network.get_model(agent_id)
agent_policy, agent_value = agent.run_policy_and_value(sess=self.sess, state=[state], concat=[prediction])
action = self.environment.choose_action(agent_policy)
reward, self.terminal = self.environment.process_action(action)
self.local_t += 1
if self.train: # populate batch
if self.terminal: # update statistics
self.update_statistics() # required before assigning manager reward
# Populate agent batch
batch["states"][agent_id].append(state)
batch["actions"][agent_id].append(agent.get_action_vector(action))
batch["concat"][agent_id].append(prediction)
agent_id_list.appendleft(agent_id) # insert on top
agent_reward_list.appendleft(reward) # insert on top
agent_value_list.appendleft(agent_value) # insert on top
# Populate manager batch
if self.local_network.has_manager:
batch["states"][0].append(state)
batch["actions"][0].append(self.local_network.manager.get_action_vector(agent_id-1))
batch["concat"][0].append(prediction)
manager_value_list.appendleft(manager_value) # insert on top
if (self.local_t % LOG_INTERVAL == 0):
self.info_logger.info( "actions={0} value={1} agent={2}".format(agent_policy, agent_value, agent_id) )
# build cumulative reward
if self.train:
cumulative_reward = 0.0
# if the episode has not terminated, bootstrap the value from the last state
if not self.terminal:
prediction = self.environment.sentidoc
state = self.environment.get_state()
agent_id, manager_policy, manager_value = self.local_network.get_agentID_by_state(sess=self.sess, state=[state], concat=[prediction])
agent = self.local_network.get_model(agent_id)
agent_value = agent.run_value(self.sess, state=[state], concat=[prediction])
if self.local_network.has_manager:
cumulative_reward = manager_value if abs(manager_value) > abs(agent_value) else agent_value # should prevent value over-estimation
else:
cumulative_reward = agent_value
if self.local_network.has_manager:
for(agent_id, agent_reward, agent_value, manager_value) in zip(agent_id_list, agent_reward_list, agent_value_list, manager_value_list):
value = manager_value if abs(manager_value) > abs(agent_value) else agent_value
cumulative_reward = agent_reward + flags.gamma * cumulative_reward
batch["cumulative_rewards"][agent_id].appendleft(cumulative_reward) # insert on top
batch["advantages"][agent_id].appendleft(cumulative_reward - value) # insert on top
batch["cumulative_rewards"][0].appendleft(cumulative_reward) # insert on top
batch["advantages"][0].appendleft(cumulative_reward - value) # insert on top
else:
for(agent_id, agent_reward, agent_value) in zip(agent_id_list, agent_reward_list, agent_value_list):
cumulative_reward = agent_reward + flags.gamma * cumulative_reward
batch["cumulative_rewards"][agent_id].appendleft(cumulative_reward) # insert on top
batch["advantages"][agent_id].appendleft(cumulative_reward - agent_value) # insert on top
return batch
def process(self, step=0):
if self.terminal:
self.prepare()
start_local_t = self.local_t
# Copy weights from shared to local
if self.train:
learning_rate = []
agents_count = self.local_network.model_size-1
for i in range(self.local_network.model_size):
self.sess.run(self.sync[i])
rate = self._anneal_learning_rate(self.local_network.get_model(i).train_count)
# manager learning rate should be the highest
if i>0:
rate /= agents_count
learning_rate.append(rate)
# Build feed dictionary
batch_dict = self._build_batch(step)
# Pupulate the feed dictionary
if self.train:
for i in range(self.local_network.model_size):
if len(batch_dict["states"][i]) > 0:
agent = self.local_network.get_model(i)
agent.train(self.sess, self.apply_gradients[i], learning_rate[i], batch_dict["states"][i], batch_dict["actions"][i], batch_dict["advantages"][i], batch_dict["cumulative_rewards"][i], batch_dict["start_lstm_state"][i], batch_dict["concat"][i])
self._print_log(step)
diff_local_t = self.local_t - start_local_t
return diff_local_t # local steps amount