408 lines
14 KiB
Python
408 lines
14 KiB
Python
# This script is taken from openAI's baselines implementation and adapted for hInt-RL
|
|
# ===================================================================================================================
|
|
import random
|
|
import operator
|
|
from collections import namedtuple
|
|
|
|
import torch
|
|
import numpy as np
|
|
|
|
Experience = namedtuple('Experience', field_names=['state', 'action', 'reward', 'next_state', 'done', 'goal'])
|
|
|
|
class LinearSchedule(object):
|
|
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
|
|
"""Linear interpolation between initial_p and final_p over
|
|
schedule_timesteps. After this many timesteps pass final_p is
|
|
returned.
|
|
|
|
Parameters
|
|
----------
|
|
schedule_timesteps: int
|
|
Number of timesteps for which to linearly anneal initial_p
|
|
to final_p
|
|
initial_p: float
|
|
initial output value
|
|
final_p: float
|
|
final output value
|
|
"""
|
|
self.schedule_timesteps = schedule_timesteps
|
|
self.final_p = final_p
|
|
self.initial_p = initial_p
|
|
|
|
def value(self, t):
|
|
"""See Schedule.value"""
|
|
fraction = min(float(t) / self.schedule_timesteps, 1.0)
|
|
return self.initial_p + fraction * (self.final_p - self.initial_p)
|
|
|
|
|
|
class TanHSchedule(object):
|
|
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
|
|
"""This is a tanh annealing schedule with restarts, i.e. hyperbolic tangent decay.
|
|
|
|
Parameters
|
|
----------
|
|
schedule_timesteps: int
|
|
Number of overall timesteps for which to anneal initial_p to final_p
|
|
initial_p: float
|
|
max value
|
|
final_p: float
|
|
min value
|
|
"""
|
|
self.schedule_timesteps = schedule_timesteps
|
|
self.final_p = final_p
|
|
self.initial_p = initial_p
|
|
self.tt = 0
|
|
|
|
def value(self, t):
|
|
if t > 0:
|
|
# otherwise random play steps are active
|
|
self.tt += 1.
|
|
tanh = np.tanh(self.tt/(self.schedule_timesteps) - 4.)
|
|
return (self.initial_p + self.final_p) / 2. - (self.initial_p - self.final_p) / 2. * tanh
|
|
|
|
def restart(self):
|
|
print('Resetting tanh cycle...')
|
|
self.tt = 0
|
|
|
|
|
|
class SegmentTree(object):
|
|
def __init__(self, capacity, operation, neutral_element):
|
|
"""Build a Segment Tree data structure.
|
|
|
|
https://en.wikipedia.org/wiki/Segment_tree
|
|
|
|
Can be used as regular array, but with two
|
|
important differences:
|
|
|
|
a) setting item's value is slightly slower.
|
|
It is O(lg capacity) instead of O(1).
|
|
b) user has access to an efficient `reduce`
|
|
operation which reduces `operation` over
|
|
a contiguous subsequence of items in the
|
|
array.
|
|
|
|
Paramters
|
|
---------
|
|
capacity: int
|
|
Total size of the array - must be a power of two.
|
|
operation: lambda obj, obj -> obj
|
|
and operation for combining elements (eg. sum, max)
|
|
must for a mathematical group together with the set of
|
|
possible values for array elements.
|
|
neutral_element: obj
|
|
neutral element for the operation above. eg. float('-inf')
|
|
for max and 0 for sum.
|
|
"""
|
|
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
|
|
self._capacity = capacity
|
|
self._value = [neutral_element for _ in range(2 * capacity)]
|
|
self._operation = operation
|
|
|
|
def _reduce_helper(self, start, end, node, node_start, node_end):
|
|
if start == node_start and end == node_end:
|
|
return self._value[node]
|
|
mid = (node_start + node_end) // 2
|
|
if end <= mid:
|
|
return self._reduce_helper(start, end, 2 * node, node_start, mid)
|
|
else:
|
|
if mid + 1 <= start:
|
|
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
|
|
else:
|
|
return self._operation(
|
|
self._reduce_helper(start, mid, 2 * node, node_start, mid),
|
|
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
|
|
)
|
|
|
|
def reduce(self, start=0, end=None):
|
|
"""Returns result of applying `self.operation`
|
|
to a contiguous subsequence of the array.
|
|
|
|
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
|
|
|
|
Parameters
|
|
----------
|
|
start: int
|
|
beginning of the subsequence
|
|
end: int
|
|
end of the subsequences
|
|
|
|
Returns
|
|
-------
|
|
reduced: obj
|
|
result of reducing self.operation over the specified range of array elements.
|
|
"""
|
|
if end is None:
|
|
end = self._capacity
|
|
if end < 0:
|
|
end += self._capacity
|
|
end -= 1
|
|
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
|
|
|
|
def __setitem__(self, idx, val):
|
|
# index of the leaf
|
|
idx += self._capacity
|
|
self._value[idx] = val
|
|
idx //= 2
|
|
while idx >= 1:
|
|
self._value[idx] = self._operation(
|
|
self._value[2 * idx],
|
|
self._value[2 * idx + 1]
|
|
)
|
|
idx //= 2
|
|
|
|
def __getitem__(self, idx):
|
|
assert 0 <= idx < self._capacity
|
|
return self._value[self._capacity + idx]
|
|
|
|
|
|
class SumSegmentTree(SegmentTree):
|
|
def __init__(self, capacity):
|
|
super(SumSegmentTree, self).__init__(
|
|
capacity=capacity,
|
|
operation=operator.add,
|
|
neutral_element=0.0
|
|
)
|
|
|
|
def sum(self, start=0, end=None):
|
|
"""Returns arr[start] + ... + arr[end]"""
|
|
return super(SumSegmentTree, self).reduce(start, end)
|
|
|
|
def find_prefixsum_idx(self, prefixsum):
|
|
"""Find the highest index `i` in the array such that
|
|
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
|
|
|
|
if array values are probabilities, this function
|
|
allows to sample indexes according to the discrete
|
|
probability efficiently.
|
|
|
|
Parameters
|
|
----------
|
|
perfixsum: float
|
|
upperbound on the sum of array prefix
|
|
|
|
Returns
|
|
-------
|
|
idx: int
|
|
highest index satisfying the prefixsum constraint
|
|
"""
|
|
assert 0 <= prefixsum <= self.sum() + 1e-5
|
|
idx = 1
|
|
while idx < self._capacity: # while non-leaf
|
|
if self._value[2 * idx] > prefixsum:
|
|
idx = 2 * idx
|
|
else:
|
|
prefixsum -= self._value[2 * idx]
|
|
idx = 2 * idx + 1
|
|
return idx - self._capacity
|
|
|
|
|
|
class MinSegmentTree(SegmentTree):
|
|
def __init__(self, capacity):
|
|
super(MinSegmentTree, self).__init__(
|
|
capacity=capacity,
|
|
operation=min,
|
|
neutral_element=float('inf')
|
|
)
|
|
|
|
def min(self, start=0, end=None):
|
|
"""Returns min(arr[start], ..., arr[end])"""
|
|
|
|
return super(MinSegmentTree, self).reduce(start, end)
|
|
|
|
|
|
class ReplayBuffer(object):
|
|
def __init__(self, size):
|
|
"""Create Replay buffer.
|
|
|
|
Parameters
|
|
----------
|
|
size: int
|
|
Max number of transitions to store in the buffer. When the buffer
|
|
overflows the old memories are dropped.
|
|
"""
|
|
self._storage = []
|
|
self._maxsize = size
|
|
self._next_idx = 0
|
|
|
|
def __len__(self):
|
|
return len(self._storage)
|
|
|
|
def add(self, obs_t, action, reward, obs_tp1, done, goal):
|
|
data = (obs_t, action, reward, obs_tp1, done, goal)
|
|
|
|
if self._next_idx >= len(self._storage):
|
|
self._storage.append(data)
|
|
else:
|
|
self._storage[self._next_idx] = data
|
|
self._next_idx = (self._next_idx + 1) % self._maxsize
|
|
|
|
def _encode_sample_torch(self, idxes):
|
|
sample = [self._storage[i] for i in idxes]
|
|
batch = Experience(*zip(*sample))
|
|
return torch.stack(batch.state), torch.stack(batch.action), torch.stack(batch.reward).squeeze(), torch.stack(batch.next_state), \
|
|
torch.stack([torch.tensor(batch.done)]).squeeze(), torch.stack(batch.goal)
|
|
|
|
"""
|
|
# old implementation from
|
|
def _encode_sample(self, idxes):
|
|
obses_t, actions, rewards, obses_tp1, dones, goals = [], [], [], [], [], []
|
|
for i in idxes:
|
|
data = self._storage[i]
|
|
obs_t, action, reward, obs_tp1, done, goal = data
|
|
obses_t.append(np.array(obs_t, copy=False))
|
|
actions.append(np.array(action, copy=False))
|
|
rewards.append(reward)
|
|
obses_tp1.append(np.array(obs_tp1, copy=False))
|
|
dones.append(done)
|
|
goals.append(np.array(action, copy=False))
|
|
return np.array(obses_t), np.array(actions), np.array(rewards, dtype=np.float32), np.array(obses_tp1), np.array(dones, dtype=np.uint8), np.array(goals, dtype=np.uint8)
|
|
"""
|
|
|
|
def sample(self, batch_size):
|
|
"""Sample a batch of experiences.
|
|
|
|
Parameters
|
|
----------
|
|
batch_size: int
|
|
How many transitions to sample.
|
|
|
|
Returns
|
|
-------
|
|
obs_batch: np.array
|
|
batch of observations
|
|
act_batch: np.array
|
|
batch of actions executed given obs_batch
|
|
rew_batch: np.array
|
|
rewards received as results of executing act_batch
|
|
next_obs_batch: np.array
|
|
next set of observations seen after executing act_batch
|
|
done_mask: np.array
|
|
done_mask[i] = 1 if executing act_batch[i] resulted in
|
|
the end of an episode and 0 otherwise.
|
|
"""
|
|
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
|
|
return self._encode_sample_torch(idxes)
|
|
|
|
|
|
class PrioritizedReplayBuffer(ReplayBuffer):
|
|
def __init__(self, size, alpha):
|
|
"""Create Prioritized Replay buffer.
|
|
|
|
Parameters
|
|
----------
|
|
size: int
|
|
Max number of transitions to store in the buffer. When the buffer
|
|
overflows the old memories are dropped.
|
|
alpha: float
|
|
how much prioritization is used
|
|
(0 - no prioritization, 1 - full prioritization)
|
|
|
|
See Also
|
|
--------
|
|
ReplayBuffer.__init__
|
|
"""
|
|
super(PrioritizedReplayBuffer, self).__init__(size)
|
|
assert alpha > 0
|
|
self._alpha = alpha
|
|
|
|
it_capacity = 1
|
|
while it_capacity < size:
|
|
it_capacity *= 2
|
|
|
|
self._it_sum = SumSegmentTree(it_capacity)
|
|
self._it_min = MinSegmentTree(it_capacity)
|
|
self._max_priority = 1.0
|
|
|
|
def add(self, *args, **kwargs):
|
|
"""See ReplayBuffer.store_effect"""
|
|
idx = self._next_idx
|
|
super(PrioritizedReplayBuffer,self).add(*args, **kwargs)
|
|
self._it_sum[idx] = self._max_priority ** self._alpha
|
|
self._it_min[idx] = self._max_priority ** self._alpha
|
|
|
|
def _sample_proportional(self, batch_size):
|
|
res = []
|
|
for _ in range(batch_size):
|
|
# TODO(szymon): should we ensure no repeats?
|
|
mass = random.random() * self._it_sum.sum(0, len(self._storage) - 1)
|
|
idx = self._it_sum.find_prefixsum_idx(mass)
|
|
res.append(idx)
|
|
return res
|
|
|
|
def sample(self, batch_size, beta):
|
|
"""Sample a batch of experiences.
|
|
|
|
compared to ReplayBuffer.sample
|
|
it also returns importance weights and idxes
|
|
of sampled experiences.
|
|
|
|
|
|
Parameters
|
|
----------
|
|
batch_size: int
|
|
How many transitions to sample.
|
|
beta: float
|
|
To what degree to use importance weights
|
|
(0 - no corrections, 1 - full correction)
|
|
|
|
Returns
|
|
-------
|
|
obs_batch: np.array
|
|
batch of observations
|
|
act_batch: np.array
|
|
batch of actions executed given obs_batch
|
|
rew_batch: np.array
|
|
rewards received as results of executing act_batch
|
|
next_obs_batch: np.array
|
|
next set of observations seen after executing act_batch
|
|
done_mask: np.array
|
|
done_mask[i] = 1 if executing act_batch[i] resulted in
|
|
the end of an episode and 0 otherwise.
|
|
weights: np.array
|
|
Array of shape (batch_size,) and dtype np.float32
|
|
denoting importance weight of each sampled transition
|
|
idxes: np.array
|
|
Array of shape (batch_size,) and dtype np.int32
|
|
idexes in buffer of sampled experiences
|
|
"""
|
|
assert beta > 0
|
|
|
|
idxes = self._sample_proportional(batch_size)
|
|
|
|
weights = []
|
|
p_min = self._it_min.min() / self._it_sum.sum()
|
|
max_weight = (p_min * len(self._storage)) ** (-beta)
|
|
|
|
for idx in idxes:
|
|
p_sample = self._it_sum[idx] / self._it_sum.sum()
|
|
weight = (p_sample * len(self._storage)) ** (-beta)
|
|
weights.append(weight / max_weight)
|
|
weights = np.array(weights)
|
|
encoded_sample = self._encode_sample_torch(idxes)
|
|
return tuple(list(encoded_sample) + [weights, idxes])
|
|
|
|
def update_priorities(self, idxes, priorities):
|
|
"""Update priorities of sampled transitions.
|
|
|
|
sets priority of transition at index idxes[i] in buffer
|
|
to priorities[i].
|
|
|
|
Parameters
|
|
----------
|
|
idxes: [int]
|
|
List of idxes of sampled transitions
|
|
priorities: [float]
|
|
List of updated priorities corresponding to
|
|
transitions at the sampled idxes denoted by
|
|
variable `idxes`.
|
|
"""
|
|
assert len(idxes) == len(priorities)
|
|
for idx, priority in zip(idxes, priorities):
|
|
#print priority
|
|
#time.sleep(0.5)
|
|
assert priority > 0
|
|
assert 0 <= idx < len(self._storage)
|
|
self._it_sum[idx] = priority ** self._alpha
|
|
self._it_min[idx] = priority ** self._alpha
|
|
|
|
self._max_priority = max(self._max_priority, priority)
|