Created
January 5, 2017 15:43
-
-
Save pbabics/c4c750473ef40b2e5b5490f5d3986cc4 to your computer and use it in GitHub Desktop.
A3C network for reinforcement learning for OpenAI Gym ATARI Breakout from visual input based on https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-8-asynchronous-actor-critic-agents-a3c-c88f72a5e9f2
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import threading | |
import multiprocessing | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import tensorflow as tf | |
import tensorflow.contrib.slim as slim | |
import scipy.signal | |
import gym | |
import os | |
from helper import * | |
from random import choice | |
from time import sleep | |
from time import time | |
def update_target_graph(from_scope,to_scope): | |
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope) | |
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope) | |
op_holder = [] | |
for from_var,to_var in zip(from_vars,to_vars): | |
op_holder.append(to_var.assign(from_var)) | |
return op_holder | |
def process_frame(frame): | |
s = frame[49:193, 8:152] # Remove borders and counters | |
s = np.mean(s, axis=2) # convert to grayscale | |
s[s != 0] = 1 | |
s = scipy.misc.imresize(s,[84,84]) | |
s = np.reshape(s,[np.prod(s.shape)]) / 255.0 | |
return s | |
def discount(x, gamma): | |
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] | |
def normalized_columns_initializer(std=1.0): | |
def _initializer(shape, dtype=None, partition_info=None): | |
out = np.random.randn(*shape).astype(np.float32) | |
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) | |
return tf.constant(out) | |
return _initializer | |
class AC_Network(): | |
def __init__(self,s_size,a_size,scope,trainer): | |
with tf.variable_scope(scope): | |
#Input and visual encoding layers | |
self.inputs = tf.placeholder(shape=[ None, s_size ], dtype=tf.float32) | |
self.imageIn = tf.reshape(self.inputs, shape=[ -1, 84, 84, 1 ]) | |
self.conv1 = slim.conv2d(activation_fn=tf.nn.relu, | |
inputs=self.imageIn, | |
num_outputs=32, | |
kernel_size=5) | |
self.max1 = slim.max_pool2d(inputs = self.conv1, kernel_size = 2) | |
self.conv2 = slim.conv2d(activation_fn=tf.nn.relu, | |
inputs=self.max1, | |
num_outputs=32, | |
kernel_size=5) | |
self.max2 = slim.max_pool2d(inputs = self.conv2, kernel_size = 2) | |
self.conv3 = slim.conv2d(activation_fn=tf.nn.relu, | |
inputs=self.max2, | |
num_outputs=64, | |
kernel_size=4) | |
self.max3 = slim.max_pool2d(inputs = self.conv3, kernel_size = 2) | |
self.conv4 = slim.conv2d(activation_fn=tf.nn.relu, | |
inputs=self.max3, | |
num_outputs=64, | |
kernel_size=3) | |
hidden = slim.fully_connected(slim.flatten(self.conv4), | |
512, | |
activation_fn=tf.nn.relu) | |
#Output layers for policy and value estimations | |
self.policy = slim.fully_connected(hidden, a_size, | |
activation_fn=tf.nn.softmax, | |
weights_initializer=normalized_columns_initializer(0.01), | |
biases_initializer=None) | |
self.value = slim.fully_connected(hidden, 1, | |
activation_fn=None, | |
weights_initializer=normalized_columns_initializer(1.0), | |
biases_initializer=None) | |
#Only the worker network need ops for loss functions and gradient updating. | |
if scope != 'global': | |
self.actions = tf.placeholder(shape=[None],dtype=tf.int32) | |
self.actions_onehot = tf.one_hot(self.actions,a_size,dtype=tf.float32) | |
self.target_v = tf.placeholder(shape=[None],dtype=tf.float32) | |
self.advantages = tf.placeholder(shape=[None],dtype=tf.float32) | |
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1]) | |
#Loss functions | |
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1]))) | |
self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy + 1e-6)) | |
self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs) * self.advantages) | |
self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01 | |
#Get gradients from local network using local losses | |
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) | |
self.gradients = tf.gradients(self.loss,local_vars) | |
self.var_norms = tf.global_norm(local_vars) | |
grads,self.grad_norms = tf.clip_by_global_norm(self.gradients,40.0) | |
#Apply local gradients to global network | |
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global') | |
self.apply_grads = trainer.apply_gradients(zip(grads,global_vars)) | |
class Worker(): | |
def __init__(self,name,s_size,a_size,trainer,model_path,global_episodes): | |
self.name = "worker_" + str(name) | |
self.number = name | |
self.model_path = model_path | |
self.trainer = trainer | |
self.global_episodes = global_episodes | |
self.increment = self.global_episodes.assign_add(1) | |
self.episode_rewards = [] | |
self.episode_lengths = [] | |
self.episode_mean_values = [] | |
self.summary_writer = tf.summary.FileWriter("train_" + str(self.number)) | |
#Create the local copy of the network and the tensorflow op to copy global paramters to local network | |
self.local_AC = AC_Network(s_size, a_size, self.name, trainer) | |
self.update_local_ops = update_target_graph('global', self.name) | |
self.actions = [1, 4, 5] # Stay, Move Left, Move Right | |
self.env = gym.make('Breakout-v0') | |
def train(self, global_AC, rollout, sess, gamma, bootstrap_value): | |
rollout = np.array(rollout) | |
observations = rollout[:, 0] | |
actions = rollout[:, 1] | |
rewards = rollout[:, 2] | |
next_observations = rollout[:, 3] | |
values = rollout[:, 5] | |
# Here we take the rewards and values from the rollout, and use them to | |
# generate the advantage and discounted returns. | |
# The advantage function uses "Generalized Advantage Estimation" | |
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value]) | |
discounted_rewards = discount(self.rewards_plus,gamma)[:-1] | |
self.value_plus = np.asarray(values.tolist() + [bootstrap_value]) | |
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1] | |
advantages = discount(advantages,gamma) | |
# Update the global network using gradients from loss | |
# Generate network statistics to periodically save | |
feed_dict = {self.local_AC.target_v:discounted_rewards, | |
self.local_AC.inputs:np.vstack(observations), | |
self.local_AC.actions:actions, | |
self.local_AC.advantages:advantages} | |
v_l,p_l,e_l,g_n,v_n,_ = sess.run([self.local_AC.value_loss, | |
self.local_AC.policy_loss, | |
self.local_AC.entropy, | |
self.local_AC.grad_norms, | |
self.local_AC.var_norms, | |
self.local_AC.apply_grads], | |
feed_dict=feed_dict) | |
return v_l / len(rollout),p_l / len(rollout),e_l / len(rollout), g_n,v_n | |
def work(self,max_episode_length,gamma,global_AC,sess,coord,saver): | |
episode_count = sess.run(self.global_episodes) | |
total_steps = 0 | |
print("Starting worker %d" % self.number) | |
with sess.as_default(), sess.graph.as_default(): | |
while not coord.should_stop(): | |
sess.run(self.update_local_ops) | |
episode_buffer = [] | |
episode_values = [] | |
episode_frames = [] | |
episode_reward = 0 | |
episode_step_count = 0 | |
d = False | |
s = self.env.reset() | |
episode_frames.append(s) | |
s = process_frame(s) | |
#rnn_state = self.local_AC.state_init | |
while d == False: | |
#Take an action using probabilities from policy network output. | |
a_dist,v = sess.run( | |
[self.local_AC.policy, self.local_AC.value], | |
feed_dict={ | |
self.local_AC.inputs: [s]}) | |
a = np.random.choice(a_dist[0], p=a_dist[0]) | |
a = np.argmax(a_dist == a) | |
s1, r, d, _ = self.env.step(self.actions[a]) | |
if d == False: | |
episode_frames.append(s1) | |
s1 = process_frame(s1) | |
else: | |
s1 = s | |
episode_buffer.append([s,a,r,s1,d,v[0,0]]) | |
episode_values.append(v[0,0]) | |
episode_reward += r | |
s = s1 | |
total_steps += 1 | |
episode_step_count += 1 | |
# If the episode hasn't ended, but the experience buffer is full, then we | |
# make an update step using that experience rollout. | |
if len(episode_buffer) == 30 and d != True and episode_step_count != max_episode_length - 1: | |
# Since we don't know what the true final return is, we "bootstrap" from our current | |
# value estimation. | |
v1 = sess.run(self.local_AC.value, | |
feed_dict={ | |
self.local_AC.inputs: [s]})[0,0] | |
v_l,p_l,e_l,g_n,v_n = self.train(global_AC, episode_buffer, sess, gamma, v1) | |
episode_buffer = [] | |
sess.run(self.update_local_ops) | |
if d == True: | |
break | |
self.episode_rewards.append(episode_reward) | |
self.episode_lengths.append(episode_step_count) | |
self.episode_mean_values.append(np.mean(episode_values)) | |
# Update the network using the experience buffer at the end of the episode. | |
if len(episode_buffer) != 0: | |
v_l,p_l,e_l,g_n,v_n = self.train(global_AC,episode_buffer,sess,gamma,0.0) | |
# Periodically save gifs of episodes, model parameters, and summary statistics. | |
if episode_count % 5 == 0 and episode_count != 0: | |
if episode_count % 50 == 0 and self.name == 'worker_0': | |
saver.save(sess,self.model_path+'/model-'+str(episode_count)+'.cptk') | |
print("Saved Model") | |
mean_reward = np.mean(self.episode_rewards[-5:]) | |
mean_length = np.mean(self.episode_lengths[-5:]) | |
mean_value = np.mean(self.episode_mean_values[-5:]) | |
summary = tf.Summary() | |
summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward)) | |
summary.value.add(tag='Perf/Length', simple_value=float(mean_length)) | |
summary.value.add(tag='Perf/Value', simple_value=float(mean_value)) | |
summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l)) | |
summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l)) | |
summary.value.add(tag='Losses/Entropy', simple_value=float(e_l)) | |
summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n)) | |
summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n)) | |
self.summary_writer.add_summary(summary, episode_count) | |
self.summary_writer.flush() | |
if self.name == 'worker_0': | |
sess.run(self.increment) | |
episode_count += 1 | |
max_episode_length = 300 | |
gamma = .99 # discount rate for advantage estimation and reward discounting | |
s_size = 7056 # Observations are greyscale frames of 84 * 84 * 1 | |
a_size = 3 # Agent can move Left, Right, or Stary | |
load_model = False | |
model_path = './model' | |
tf.reset_default_graph() | |
if not os.path.exists(model_path): | |
os.makedirs(model_path) | |
#Create a directory to save episode playback gifs to | |
if not os.path.exists('./frames'): | |
os.makedirs('./frames') | |
with tf.device("/cpu:0"): | |
global_episodes = tf.Variable(0,dtype=tf.int32,name='global_episodes',trainable=False) | |
trainer = tf.train.AdamOptimizer(learning_rate=1e-4) | |
master_network = AC_Network(s_size,a_size,'global',None) # Generate global network | |
global_sum = tf.summary.FileWriter('global') | |
global_sum.add_graph(tf.get_default_graph()) | |
global_sum.flush() | |
num_workers = 8 #multiprocessing.cpu_count() # Set workers ot number of available CPU threads | |
workers = [] | |
# Create worker classes | |
for i in range(num_workers): | |
workers.append(Worker(i,s_size,a_size,trainer,model_path,global_episodes)) | |
saver = tf.train.Saver(max_to_keep=5) | |
with tf.Session() as sess: | |
coord = tf.train.Coordinator() | |
if load_model == True: | |
print('Loading Model...') | |
ckpt = tf.train.get_checkpoint_state(model_path) | |
saver.restore(sess,ckpt.model_checkpoint_path) | |
else: | |
sess.run(tf.global_variables_initializer()) | |
# This is where the asynchronous magic happens. | |
# Start the "work" process for each worker in a separate threat. | |
worker_threads = [] | |
for worker in workers: | |
worker_work = lambda: worker.work(max_episode_length,gamma,master_network,sess,coord,saver) | |
t = threading.Thread(target=(worker_work)) | |
t.start() | |
worker_threads.append(t) | |
coord.join(worker_threads) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment