Source code for mdptoolbox.mdp

# -*- coding: utf-8 -*-
"""Markov Decision Process (MDP) Toolbox: ``mdp`` module
=====================================================

The ``mdp`` module provides classes for the resolution of descrete-time Markov
Decision Processes.

Available classes
-----------------
:class:`~mdptoolbox.mdp.MDP`
    Base Markov decision process class
:class:`~mdptoolbox.mdp.FiniteHorizon`
    Backwards induction finite horizon MDP
:class:`~mdptoolbox.mdp.PolicyIteration`
    Policy iteration MDP
:class:`~mdptoolbox.mdp.PolicyIterationModified`
    Modified policy iteration MDP
:class:`~mdptoolbox.mdp.QLearning`
    Q-learning MDP
:class:`~mdptoolbox.mdp.RelativeValueIteration`
    Relative value iteration MDP
:class:`~mdptoolbox.mdp.ValueIteration`
    Value iteration MDP
:class:`~mdptoolbox.mdp.ValueIterationGS`
    Gauss-Seidel value iteration MDP

"""

# Copyright (c) 2011-2015 Steven A. W. Cordwell
# Copyright (c) 2009 INRA
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#   * Redistributions of source code must retain the above copyright notice,
#     this list of conditions and the following disclaimer.
#   * Redistributions in binary form must reproduce the above copyright notice,
#     this list of conditions and the following disclaimer in the documentation
#     and/or other materials provided with the distribution.
#   * Neither the name of the <ORGANIZATION> nor the names of its contributors
#     may be used to endorse or promote products derived from this software
#     without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.

import math as _math
import time as _time

import numpy as _np
import scipy.sparse as _sp

import mdptoolbox.util as _util

_MSG_STOP_MAX_ITER = "Iterating stopped due to maximum number of iterations " \
    "condition."
_MSG_STOP_EPSILON_OPTIMAL_POLICY = "Iterating stopped, epsilon-optimal " \
    "policy found."
_MSG_STOP_EPSILON_OPTIMAL_VALUE = "Iterating stopped, epsilon-optimal value " \
    "function found."
_MSG_STOP_UNCHANGING_POLICY = "Iterating stopped, unchanging policy found."


def _computeDimensions(transition):
    A = len(transition)
    try:
        if transition.ndim == 3:
            S = transition.shape[1]
        else:
            S = transition[0].shape[0]
    except AttributeError:
        S = transition[0].shape[0]
    return S, A


def _printVerbosity(iteration, variation):
    if isinstance(variation, float):
        print("{:>10}{:>12f}".format(iteration, variation))
    elif isinstance(variation, int):
        print("{:>10}{:>12d}".format(iteration, variation))
    else:
        print("{:>10}{:>12}".format(iteration, variation))


[docs]class MDP(object): """A Markov Decision Problem. Let ``S`` = the number of states, and ``A`` = the number of acions. Parameters ---------- transitions : array Transition probability matrices. These can be defined in a variety of ways. The simplest is a numpy array that has the shape ``(A, S, S)``, though there are other possibilities. It can be a tuple or list or numpy object array of length ``A``, where each element contains a numpy array or matrix that has the shape ``(S, S)``. This "list of matrices" form is useful when the transition matrices are sparse as ``scipy.sparse.csr_matrix`` matrices can be used. In summary, each action's transition matrix must be indexable like ``transitions[a]`` where ``a`` ∈ {0, 1...A-1}, and ``transitions[a]`` returns an ``S`` × ``S`` array-like object. reward : array Reward matrices or vectors. Like the transition matrices, these can also be defined in a variety of ways. Again the simplest is a numpy array that has the shape ``(S, A)``, ``(S,)`` or ``(A, S, S)``. A list of lists can be used, where each inner list has length ``S`` and the outer list has length ``A``. A list of numpy arrays is possible where each inner array can be of the shape ``(S,)``, ``(S, 1)``, ``(1, S)`` or ``(S, S)``. Also ``scipy.sparse.csr_matrix`` can be used instead of numpy arrays. In addition, the outer list can be replaced by any object that can be indexed like ``reward[a]`` such as a tuple or numpy object array of length ``A``. discount : float Discount factor. The per time-step discount factor on future rewards. Valid values are greater than 0 upto and including 1. If the discount factor is 1, then convergence is cannot be assumed and a warning will be displayed. Subclasses of ``MDP`` may pass ``None`` in the case where the algorithm does not use a discount factor. epsilon : float Stopping criterion. The maximum change in the value function at each iteration is compared against ``epsilon``. Once the change falls below this value, then the value function is considered to have converged to the optimal value function. Subclasses of ``MDP`` may pass ``None`` in the case where the algorithm does not use an epsilon-optimal stopping criterion. max_iter : int Maximum number of iterations. The algorithm will be terminated once this many iterations have elapsed. This must be greater than 0 if specified. Subclasses of ``MDP`` may pass ``None`` in the case where the algorithm does not use a maximum number of iterations. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Attributes ---------- P : array Transition probability matrices. R : array Reward vectors. V : tuple The optimal value function. Each element is a float corresponding to the expected value of being in that state assuming the optimal policy is followed. discount : float The discount rate on future rewards. max_iter : int The maximum number of iterations. policy : tuple The optimal policy. time : float The time used to converge to the optimal policy. verbose : boolean Whether verbose output should be displayed or not. Methods ------- run Implemented in child classes as the main algorithm loop. Raises an exception if it has not been overridden. setSilent Turn the verbosity off setVerbose Turn the verbosity on """ def __init__(self, transitions, reward, discount, epsilon, max_iter, skip_check=False): # Initialise a MDP based on the input parameters. # if the discount is None then the algorithm is assumed to not use it # in its computations if discount is not None: self.discount = float(discount) assert 0.0 < self.discount <= 1.0, ( "Discount rate must be in ]0; 1]" ) if self.discount == 1: print("WARNING: check conditions of convergence. With no " "discount, convergence can not be assumed.") # if the max_iter is None then the algorithm is assumed to not use it # in its computations if max_iter is not None: self.max_iter = int(max_iter) assert self.max_iter > 0, ( "The maximum number of iterations must be greater than 0." ) # check that epsilon is something sane if epsilon is not None: self.epsilon = float(epsilon) assert self.epsilon > 0, "Epsilon must be greater than 0." if not skip_check: # We run a check on P and R to make sure they are describing an # MDP. If an exception isn't raised then they are assumed to be # correct. _util.check(transitions, reward) self.S, self.A = _computeDimensions(transitions) self.P = self._computeTransition(transitions) self.R = self._computeReward(reward, transitions) # the verbosity is by default turned off self.verbose = False # Initially the time taken to perform the computations is set to None self.time = None # set the initial iteration count to zero self.iter = 0 # V should be stored as a vector ie shape of (S,) or (1, S) self.V = None # policy can also be stored as a vector self.policy = None def __repr__(self): P_repr = "P: \n" R_repr = "R: \n" for aa in range(self.A): P_repr += repr(self.P[aa]) + "\n" R_repr += repr(self.R[aa]) + "\n" return(P_repr + "\n" + R_repr) def _bellmanOperator(self, V=None): # Apply the Bellman operator on the value function. # # Updates the value function and the Vprev-improving policy. # # Returns: (policy, value), tuple of new policy and its value # # If V hasn't been sent into the method, then we assume to be working # on the objects V attribute if V is None: # this V should be a reference to the data rather than a copy V = self.V else: # make sure the user supplied V is of the right shape try: assert V.shape in ((self.S,), (1, self.S)), "V is not the " \ "right shape (Bellman operator)." except AttributeError: raise TypeError("V must be a numpy array or matrix.") # Looping through each action the the Q-value matrix is calculated. # P and V can be any object that supports indexing, so it is important # that you know they define a valid MDP before calling the # _bellmanOperator method. Otherwise the results will be meaningless. Q = _np.empty((self.A, self.S)) for aa in range(self.A): Q[aa] = self.R[aa] + self.discount * self.P[aa].dot(V) # Get the policy and value, for now it is being returned but... # Which way is better? # 1. Return, (policy, value) return (Q.argmax(axis=0), Q.max(axis=0)) # 2. update self.policy and self.V directly # self.V = Q.max(axis=1) # self.policy = Q.argmax(axis=1) def _computeTransition(self, transition): return tuple(transition[a] for a in range(self.A)) def _computeReward(self, reward, transition): # Compute the reward for the system in one state chosing an action. # Arguments # Let S = number of states, A = number of actions # P could be an array with 3 dimensions or a cell array (1xA), # each cell containing a matrix (SxS) possibly sparse # R could be an array with 3 dimensions (SxSxA) or a cell array # (1xA), each cell containing a sparse matrix (SxS) or a 2D # array(SxA) possibly sparse try: if reward.ndim == 1: return self._computeVectorReward(reward) elif reward.ndim == 2: return self._computeArrayReward(reward) else: r = tuple(map(self._computeMatrixReward, reward, transition)) return r except (AttributeError, ValueError): if len(reward) == self.A: r = tuple(map(self._computeMatrixReward, reward, transition)) return r else: return self._computeVectorReward(reward) def _computeVectorReward(self, reward): if _sp.issparse(reward): raise NotImplementedError else: r = _np.array(reward).reshape(self.S) return tuple(r for a in range(self.A)) def _computeArrayReward(self, reward): if _sp.issparse(reward): raise NotImplementedError else: def func(x): return _np.array(x).reshape(self.S) return tuple(func(reward[:, a]) for a in range(self.A)) def _computeMatrixReward(self, reward, transition): if _sp.issparse(reward): # An approach like this might be more memory efficeint # reward.data = reward.data * transition[reward.nonzero()] # return reward.sum(1).A.reshape(self.S) # but doesn't work as it is. return reward.multiply(transition).sum(1).A.reshape(self.S) elif _sp.issparse(transition): return transition.multiply(reward).sum(1).A.reshape(self.S) else: return _np.multiply(transition, reward).sum(1).reshape(self.S) def _startRun(self): if self.verbose: _printVerbosity('Iteration', 'Variation') self.time = _time.time() def _endRun(self): # store value and policy as tuples self.V = tuple(self.V.tolist()) try: self.policy = tuple(self.policy.tolist()) except AttributeError: self.policy = tuple(self.policy) self.time = _time.time() - self.time
[docs] def run(self): """Raises error because child classes should implement this function. """ raise NotImplementedError("You should create a run() method.")
[docs] def setSilent(self): """Set the MDP algorithm to silent mode.""" self.verbose = False
[docs] def setVerbose(self): """Set the MDP algorithm to verbose mode.""" self.verbose = True
[docs]class FiniteHorizon(MDP): """A MDP solved using the finite-horizon backwards induction algorithm. Parameters ---------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. discount : float Discount factor. See the documentation for the ``MDP`` class for details. N : int Number of periods. Must be greater than 0. h : array, optional Terminal reward. Default: a vector of zeros. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attributes --------------- V : array Optimal value function. Shape = (S, N+1). ``V[:, n]`` = optimal value function at stage ``n`` with stage in {0, 1...N-1}. ``V[:, N]`` value function for terminal stage. policy : array Optimal policy. ``policy[:, n]`` = optimal policy at stage ``n`` with stage in {0, 1...N}. ``policy[:, N]`` = policy for stage ``N``. time : float used CPU time Notes ----- In verbose mode, displays the current stage and policy transpose. Examples -------- >>> import mdptoolbox, mdptoolbox.example >>> P, R = mdptoolbox.example.forest() >>> fh = mdptoolbox.mdp.FiniteHorizon(P, R, 0.9, 3) >>> fh.run() >>> fh.V array([[ 2.6973, 0.81 , 0. , 0. ], [ 5.9373, 3.24 , 1. , 0. ], [ 9.9373, 7.24 , 4. , 0. ]]) >>> fh.policy array([[0, 0, 0], [0, 0, 1], [0, 0, 0]]) """ def __init__(self, transitions, reward, discount, N, h=None, skip_check=False): # Initialise a finite horizon MDP. self.N = int(N) assert self.N > 0, "N must be greater than 0." # Initialise the base class MDP.__init__(self, transitions, reward, discount, None, None, skip_check=skip_check) # remove the iteration counter, it is not meaningful for backwards # induction del self.iter # There are value vectors for each time step up to the horizon self.V = _np.zeros((self.S, N + 1)) # There are policy vectors for each time step before the horizon, when # we reach the horizon we don't need to make decisions anymore. self.policy = _np.empty((self.S, N), dtype=int) # Set the reward for the final transition to h, if specified. if h is not None: self.V[:, N] = h
[docs] def run(self): # Run the finite horizon algorithm. self.time = _time.time() # loop through each time period for n in range(self.N): W, X = self._bellmanOperator(self.V[:, self.N - n]) stage = self.N - n - 1 self.V[:, stage] = X self.policy[:, stage] = W if self.verbose: print(("stage: %s, policy: %s") % ( stage, self.policy[:, stage].tolist())) # update time spent running self.time = _time.time() - self.time # After this we could create a tuple of tuples for the values and # policies. # self.V = tuple(tuple(self.V[:, n].tolist()) for n in range(self.N)) # self.policy = tuple(tuple(self.policy[:, n].tolist()) # for n in range(self.N))
class _LP(MDP): """A discounted MDP soloved using linear programming. This class requires the Python ``cvxopt`` module to be installed. Arguments --------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. discount : float Discount factor. See the documentation for the ``MDP`` class for details. h : array, optional Terminal reward. Default: a vector of zeros. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attributes --------------- V : tuple optimal values policy : tuple optimal policy time : float used CPU time Examples -------- >>> import mdptoolbox.example >>> P, R = mdptoolbox.example.forest() >>> lp = mdptoolbox.mdp._LP(P, R, 0.9) >>> lp.run() >>> import numpy, mdptoolbox >>> P = numpy.array((((0.5, 0.5), (0.8, 0.2)), ((0, 1), (0.1, 0.9)))) >>> R = numpy.array(((5, 10), (-1, 2))) >>> lp = mdptoolbox.mdp._LP(P, R, 0.9) >>> lp.run() >>> #lp.policy #FIXME: gives (1, 1), should be (1, 0) """ def __init__(self, transitions, reward, discount, skip_check=False): # Initialise a linear programming MDP. # import some functions from cvxopt and set them as object methods try: from cvxopt import matrix, solvers self._linprog = solvers.lp self._cvxmat = matrix except ImportError: raise ImportError("The python module cvxopt is required to use " "linear programming functionality.") # initialise the MDP. epsilon and max_iter are not needed MDP.__init__(self, transitions, reward, discount, None, None, skip_check=skip_check) # Set the cvxopt solver to be quiet by default, but ... # this doesn't do what I want it to do c.f. issue #3 if not self.verbose: solvers.options['show_progress'] = False def run(self): # Run the linear programming algorithm. self.time = _time.time() # The objective is to resolve : min V / V >= PR + discount*P*V # The function linprog of the optimisation Toolbox of Mathworks # resolves : # min f'* x / M * x <= b # So the objective could be expressed as : # min V / (discount*P-I) * V <= - PR # To avoid loop on states, the matrix M is structured following actions # M(A*S,S) f = self._cvxmat(_np.ones((self.S, 1))) h = _np.array(self.R).reshape(self.S * self.A, 1, order="F") h = self._cvxmat(h, tc='d') M = _np.zeros((self.A * self.S, self.S)) for aa in range(self.A): pos = (aa + 1) * self.S M[(pos - self.S):pos, :] = ( self.discount * self.P[aa] - _sp.eye(self.S, self.S)) M = self._cvxmat(M) # Using the glpk option will make this behave more like Octave # (Octave uses glpk) and perhaps Matlab. If solver=None (ie using the # default cvxopt solver) then V agrees with the Octave equivalent # only to 10e-8 places. This assumes glpk is installed of course. self.V = _np.array(self._linprog(f, M, -h)['x']).reshape(self.S) # apply the Bellman operator self.policy, self.V = self._bellmanOperator() # update the time spent solving self.time = _time.time() - self.time # store value and policy as tuples self.V = tuple(self.V.tolist()) self.policy = tuple(self.policy.tolist())
[docs]class PolicyIteration(MDP): """A discounted MDP solved using the policy iteration algorithm. Arguments --------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. discount : float Discount factor. See the documentation for the ``MDP`` class for details. policy0 : array, optional Starting policy. max_iter : int, optional Maximum number of iterations. See the documentation for the ``MDP`` class for details. Default is 1000. eval_type : int or string, optional Type of function used to evaluate policy. 0 or "matrix" to solve as a set of linear equations. 1 or "iterative" to solve iteratively. Default: 0. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attributes --------------- V : tuple value function policy : tuple optimal policy iter : int number of done iterations time : float used CPU time Notes ----- In verbose mode, at each iteration, displays the number of differents actions between policy n-1 and n Examples -------- >>> import mdptoolbox, mdptoolbox.example >>> P, R = mdptoolbox.example.rand(10, 3) >>> pi = mdptoolbox.mdp.PolicyIteration(P, R, 0.9) >>> pi.run() >>> P, R = mdptoolbox.example.forest() >>> pi = mdptoolbox.mdp.PolicyIteration(P, R, 0.9) >>> pi.run() >>> expected = (26.244000000000014, 29.484000000000016, 33.484000000000016) >>> all(expected[k] - pi.V[k] < 1e-12 for k in range(len(expected))) True >>> pi.policy (0, 0, 0) """ def __init__(self, transitions, reward, discount, policy0=None, max_iter=1000, eval_type=0, skip_check=False): # Initialise a policy iteration MDP. # # Set up the MDP, but don't need to worry about epsilon values MDP.__init__(self, transitions, reward, discount, None, max_iter, skip_check=skip_check) # Check if the user has supplied an initial policy. If not make one. if policy0 is None: # Initialise the policy to the one which maximises the expected # immediate reward null = _np.zeros(self.S) self.policy, null = self._bellmanOperator(null) del null else: # Use the policy that the user supplied # Make sure it is a numpy array policy0 = _np.array(policy0) # Make sure the policy is the right size and shape assert policy0.shape in ((self.S, ), (self.S, 1), (1, self.S)), \ "'policy0' must a vector with length S." # reshape the policy to be a vector policy0 = policy0.reshape(self.S) # The policy can only contain integers between 0 and S-1 msg = "'policy0' must be a vector of integers between 0 and S-1." assert not _np.mod(policy0, 1).any(), msg assert (policy0 >= 0).all(), msg assert (policy0 < self.S).all(), msg self.policy = policy0 # set the initial values to zero self.V = _np.zeros(self.S) # Do some setup depending on the evaluation type if eval_type in (0, "matrix"): self.eval_type = "matrix" elif eval_type in (1, "iterative"): self.eval_type = "iterative" else: raise ValueError("'eval_type' should be '0' for matrix evaluation " "or '1' for iterative evaluation. The strings " "'matrix' and 'iterative' can also be used.") def _computePpolicyPRpolicy(self): # Compute the transition matrix and the reward matrix for a policy. # # Arguments # --------- # Let S = number of states, A = number of actions # P(SxSxA) = transition matrix # P could be an array with 3 dimensions or a cell array (1xA), # each cell containing a matrix (SxS) possibly sparse # R(SxSxA) or (SxA) = reward matrix # R could be an array with 3 dimensions (SxSxA) or # a cell array (1xA), each cell containing a sparse matrix (SxS) or # a 2D array(SxA) possibly sparse # policy(S) = a policy # # Evaluation # ---------- # Ppolicy(SxS) = transition matrix for policy # PRpolicy(S) = reward matrix for policy # Ppolicy = _np.empty((self.S, self.S)) Rpolicy = _np.zeros(self.S) for aa in range(self.A): # avoid looping over S # the rows that use action a. ind = (self.policy == aa).nonzero()[0] # if no rows use action a, then no need to assign this if ind.size > 0: try: Ppolicy[ind, :] = self.P[aa][ind, :] except ValueError: Ppolicy[ind, :] = self.P[aa][ind, :].todense() # PR = self._computePR() # an apparently uneeded line, and # perhaps harmful in this implementation c.f. # mdp_computePpolicyPRpolicy.m Rpolicy[ind] = self.R[aa][ind] # self.R cannot be sparse with the code in its current condition, but # it should be possible in the future. Also, if R is so big that its # a good idea to use a sparse matrix for it, then converting PRpolicy # from a dense to sparse matrix doesn't seem very memory efficient if type(self.R) is _sp.csr_matrix: Rpolicy = _sp.csr_matrix(Rpolicy) # self.Ppolicy = Ppolicy # self.Rpolicy = Rpolicy return (Ppolicy, Rpolicy) def _evalPolicyIterative(self, V0=0, epsilon=0.0001, max_iter=10000): # Evaluate a policy using iteration. # # Arguments # --------- # Let S = number of states, A = number of actions # P(SxSxA) = transition matrix # P could be an array with 3 dimensions or # a cell array (1xS), each cell containing a matrix possibly sparse # R(SxSxA) or (SxA) = reward matrix # R could be an array with 3 dimensions (SxSxA) or # a cell array (1xA), each cell containing a sparse matrix (SxS) or # a 2D array(SxA) possibly sparse # discount = discount rate in ]0; 1[ # policy(S) = a policy # V0(S) = starting value function, optional (default : zeros(S,1)) # epsilon = epsilon-optimal policy search, upper than 0, # optional (default : 0.0001) # max_iter = maximum number of iteration to be done, upper than 0, # optional (default : 10000) # # Evaluation # ---------- # Vpolicy(S) = value function, associated to a specific policy # # Notes # ----- # In verbose mode, at each iteration, displays the condition which # stopped iterations: epsilon-optimum value function found or maximum # number of iterations reached. # try: assert V0.shape in ((self.S, ), (self.S, 1), (1, self.S)), \ "'V0' must be a vector of length S." policy_V = _np.array(V0).reshape(self.S) except AttributeError: if V0 == 0: policy_V = _np.zeros(self.S) else: policy_V = _np.array(V0).reshape(self.S) policy_P, policy_R = self._computePpolicyPRpolicy() if self.verbose: _printVerbosity("Iteration", "V variation") itr = 0 done = False while not done: itr += 1 Vprev = policy_V policy_V = policy_R + self.discount * policy_P.dot(Vprev) variation = _np.absolute(policy_V - Vprev).max() if self.verbose: _printVerbosity(itr, variation) # ensure |Vn - Vpolicy| < epsilon if variation < ((1 - self.discount) / self.discount) * epsilon: done = True if self.verbose: print(_MSG_STOP_EPSILON_OPTIMAL_VALUE) elif itr == max_iter: done = True if self.verbose: print(_MSG_STOP_MAX_ITER) self.V = policy_V def _evalPolicyMatrix(self): # Evaluate the value function of the policy using linear equations. # # Arguments # --------- # Let S = number of states, A = number of actions # P(SxSxA) = transition matrix # P could be an array with 3 dimensions or a cell array (1xA), # each cell containing a matrix (SxS) possibly sparse # R(SxSxA) or (SxA) = reward matrix # R could be an array with 3 dimensions (SxSxA) or # a cell array (1xA), each cell containing a sparse matrix (SxS) # or a 2D array(SxA) possibly sparse # discount = discount rate in ]0; 1[ # policy(S) = a policy # # Evaluation # ---------- # Vpolicy(S) = value function of the policy # Ppolicy, Rpolicy = self._computePpolicyPRpolicy() # V = PR + gPV => (I-gP)V = PR => V = inv(I-gP)* PR self.V = _np.linalg.solve( (_sp.eye(self.S, self.S) - self.discount * Ppolicy), Rpolicy)
[docs] def run(self): # Run the policy iteration algorithm. self._startRun() while True: self.iter += 1 # these _evalPolicy* functions will update the classes value # attribute if self.eval_type == "matrix": self._evalPolicyMatrix() elif self.eval_type == "iterative": self._evalPolicyIterative() # This should update the classes policy attribute but leave the # value alone policy_next, null = self._bellmanOperator() del null # calculate in how many places does the old policy disagree with # the new policy n_different = (policy_next != self.policy).sum() # if verbose then continue printing a table if self.verbose: _printVerbosity(self.iter, n_different) # Once the policy is unchanging of the maximum number of # of iterations has been reached then stop if n_different == 0: if self.verbose: print(_MSG_STOP_UNCHANGING_POLICY) break elif self.iter == self.max_iter: if self.verbose: print(_MSG_STOP_MAX_ITER) break else: self.policy = policy_next self._endRun()
[docs]class PolicyIterationModified(PolicyIteration): """A discounted MDP solved using a modifified policy iteration algorithm. Arguments --------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. discount : float Discount factor. See the documentation for the ``MDP`` class for details. epsilon : float, optional Stopping criterion. See the documentation for the ``MDP`` class for details. Default: 0.01. max_iter : int, optional Maximum number of iterations. See the documentation for the ``MDP`` class for details. Default is 10. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attributes --------------- V : tuple value function policy : tuple optimal policy iter : int number of done iterations time : float used CPU time Examples -------- >>> import mdptoolbox, mdptoolbox.example >>> P, R = mdptoolbox.example.forest() >>> pim = mdptoolbox.mdp.PolicyIterationModified(P, R, 0.9) >>> pim.run() >>> pim.policy (0, 0, 0) >>> expected = (21.81408652334702, 25.054086523347017, 29.054086523347017) >>> all(expected[k] - pim.V[k] < 1e-12 for k in range(len(expected))) True """ def __init__(self, transitions, reward, discount, epsilon=0.01, max_iter=10, skip_check=False): # Initialise a (modified) policy iteration MDP. # Maybe its better not to subclass from PolicyIteration, because the # initialisation of the two are quite different. eg there is policy0 # being calculated here which doesn't need to be. The only thing that # is needed from the PolicyIteration class is the _evalPolicyIterative # function. Perhaps there is a better way to do it? PolicyIteration.__init__(self, transitions, reward, discount, None, max_iter, 1, skip_check=skip_check) # PolicyIteration doesn't pass epsilon to MDP.__init__() so we will # check it here self.epsilon = float(epsilon) assert epsilon > 0, "'epsilon' must be greater than 0." # computation of threshold of variation for V for an epsilon-optimal # policy if self.discount != 1: self.thresh = self.epsilon * (1 - self.discount) / self.discount else: self.thresh = self.epsilon if self.discount == 1: self.V = _np.zeros(self.S) else: Rmin = min(R.min() for R in self.R) self.V = 1 / (1 - self.discount) * Rmin * _np.ones((self.S,))
[docs] def run(self): # Run the modified policy iteration algorithm. self._startRun() while True: self.iter += 1 self.policy, Vnext = self._bellmanOperator() # [Ppolicy, PRpolicy] = mdp_computePpolicyPRpolicy(P, PR, policy); variation = _util.getSpan(Vnext - self.V) if self.verbose: _printVerbosity(self.iter, variation) self.V = Vnext if variation < self.thresh: break else: is_verbose = False if self.verbose: self.setSilent() is_verbose = True self._evalPolicyIterative(self.V, self.epsilon, self.max_iter) if is_verbose: self.setVerbose() self._endRun()
[docs]class QLearning(MDP): """A discounted MDP solved using the Q learning algorithm. Parameters ---------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. discount : float Discount factor. See the documentation for the ``MDP`` class for details. n_iter : int, optional Number of iterations to execute. This is ignored unless it is an integer greater than the default value. Defaut: 10,000. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attributes --------------- Q : array learned Q matrix (SxA) V : tuple learned value function (S). policy : tuple learned optimal policy (S). mean_discrepancy : array Vector of V discrepancy mean over 100 iterations. Then the length of this vector for the default value of N is 100 (N/100). Examples --------- >>> # These examples are reproducible only if random seed is set to 0 in >>> # both the random and numpy.random modules. >>> import numpy as np >>> import mdptoolbox, mdptoolbox.example >>> np.random.seed(0) >>> P, R = mdptoolbox.example.forest() >>> ql = mdptoolbox.mdp.QLearning(P, R, 0.96) >>> ql.run() >>> ql.Q array([[ 11.198909 , 10.34652034], [ 10.74229967, 11.74105792], [ 2.86980001, 12.25973286]]) >>> expected = (11.198908998901134, 11.741057920409865, 12.259732864170232) >>> all(expected[k] - ql.V[k] < 1e-12 for k in range(len(expected))) True >>> ql.policy (0, 1, 1) >>> import mdptoolbox >>> import numpy as np >>> P = np.array([[[0.5, 0.5],[0.8, 0.2]],[[0, 1],[0.1, 0.9]]]) >>> R = np.array([[5, 10], [-1, 2]]) >>> np.random.seed(0) >>> ql = mdptoolbox.mdp.QLearning(P, R, 0.9) >>> ql.run() >>> ql.Q array([[ 33.33010866, 40.82109565], [ 34.37431041, 29.67236845]]) >>> expected = (40.82109564847122, 34.37431040682546) >>> all(expected[k] - ql.V[k] < 1e-12 for k in range(len(expected))) True >>> ql.policy (1, 0) """ def __init__(self, transitions, reward, discount, n_iter=10000, skip_check=False): # Initialise a Q-learning MDP. # The following check won't be done in MDP()'s initialisation, so let's # do it here self.max_iter = int(n_iter) assert self.max_iter >= 10000, "'n_iter' should be greater than 10000." if not skip_check: # We don't want to send this to MDP because _computePR should not # be run on it, so check that it defines an MDP _util.check(transitions, reward) # Store P, S, and A self.S, self.A = _computeDimensions(transitions) self.P = self._computeTransition(transitions) self.R = reward self.discount = discount # Initialisations self.Q = _np.zeros((self.S, self.A)) self.mean_discrepancy = []
[docs] def run(self): # Run the Q-learning algoritm. discrepancy = [] self.time = _time.time() # initial state choice s = _np.random.randint(0, self.S) for n in range(1, self.max_iter + 1): # Reinitialisation of trajectories every 100 transitions if (n % 100) == 0: s = _np.random.randint(0, self.S) # Action choice : greedy with increasing probability # probability 1-(1/log(n+2)) can be changed pn = _np.random.random() if pn < (1 - (1 / _math.log(n + 2))): # optimal_action = self.Q[s, :].max() a = self.Q[s, :].argmax() else: a = _np.random.randint(0, self.A) # Simulating next state s_new and reward associated to <s,s_new,a> p_s_new = _np.random.random() p = 0 s_new = -1 while (p < p_s_new) and (s_new < (self.S - 1)): s_new = s_new + 1 p = p + self.P[a][s, s_new] try: r = self.R[a][s, s_new] except IndexError: try: r = self.R[s, a] except IndexError: r = self.R[s] # Updating the value of Q # Decaying update coefficient (1/sqrt(n+2)) can be changed delta = r + self.discount * self.Q[s_new, :].max() - self.Q[s, a] dQ = (1 / _math.sqrt(n + 2)) * delta self.Q[s, a] = self.Q[s, a] + dQ # current state is updated s = s_new # Computing and saving maximal values of the Q variation discrepancy.append(_np.absolute(dQ)) # Computing means all over maximal Q variations values if len(discrepancy) == 100: self.mean_discrepancy.append(_np.mean(discrepancy)) discrepancy = [] # compute the value function and the policy self.V = self.Q.max(axis=1) self.policy = self.Q.argmax(axis=1) self._endRun()
[docs]class RelativeValueIteration(MDP): """A MDP solved using the relative value iteration algorithm. Arguments --------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. epsilon : float, optional Stopping criterion. See the documentation for the ``MDP`` class for details. Default: 0.01. max_iter : int, optional Maximum number of iterations. See the documentation for the ``MDP`` class for details. Default: 1000. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attributes --------------- policy : tuple epsilon-optimal policy average_reward : tuple average reward of the optimal policy cpu_time : float used CPU time Notes ----- In verbose mode, at each iteration, displays the span of U variation and the condition which stopped iterations : epsilon-optimum policy found or maximum number of iterations reached. Examples -------- >>> import mdptoolbox, mdptoolbox.example >>> P, R = mdptoolbox.example.forest() >>> rvi = mdptoolbox.mdp.RelativeValueIteration(P, R) >>> rvi.run() >>> rvi.average_reward 3.2399999999999993 >>> rvi.policy (0, 0, 0) >>> rvi.iter 4 >>> import mdptoolbox >>> import numpy as np >>> P = np.array([[[0.5, 0.5],[0.8, 0.2]],[[0, 1],[0.1, 0.9]]]) >>> R = np.array([[5, 10], [-1, 2]]) >>> rvi = mdptoolbox.mdp.RelativeValueIteration(P, R) >>> rvi.run() >>> expected = (10.0, 3.885235246411831) >>> all(expected[k] - rvi.V[k] < 1e-12 for k in range(len(expected))) True >>> rvi.average_reward 3.8852352464118312 >>> rvi.policy (1, 0) >>> rvi.iter 29 """ def __init__(self, transitions, reward, epsilon=0.01, max_iter=1000, skip_check=False): # Initialise a relative value iteration MDP. MDP.__init__(self, transitions, reward, None, epsilon, max_iter, skip_check=skip_check) self.epsilon = epsilon self.discount = 1 self.V = _np.zeros(self.S) self.gain = 0 # self.U[self.S] self.average_reward = None
[docs] def run(self): # Run the relative value iteration algorithm. self._startRun() while True: self.iter += 1 self.policy, Vnext = self._bellmanOperator() Vnext = Vnext - self.gain variation = _util.getSpan(Vnext - self.V) if self.verbose: _printVerbosity(self.iter, variation) if variation < self.epsilon: self.average_reward = self.gain + (Vnext - self.V).min() if self.verbose: print(_MSG_STOP_EPSILON_OPTIMAL_POLICY) break elif self.iter == self.max_iter: self.average_reward = self.gain + (Vnext - self.V).min() if self.verbose: print(_MSG_STOP_MAX_ITER) break self.V = Vnext self.gain = float(self.V[self.S - 1]) self._endRun()
[docs]class ValueIteration(MDP): """A discounted MDP solved using the value iteration algorithm. Description ----------- ValueIteration applies the value iteration algorithm to solve a discounted MDP. The algorithm consists of solving Bellman's equation iteratively. Iteration is stopped when an epsilon-optimal policy is found or after a specified number (``max_iter``) of iterations. This function uses verbose and silent modes. In verbose mode, the function displays the variation of ``V`` (the value function) for each iteration and the condition which stopped the iteration: epsilon-policy found or maximum number of iterations reached. Parameters ---------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. discount : float Discount factor. See the documentation for the ``MDP`` class for details. epsilon : float, optional Stopping criterion. See the documentation for the ``MDP`` class for details. Default: 0.01. max_iter : int, optional Maximum number of iterations. If the value given is greater than a computed bound, a warning informs that the computed bound will be used instead. By default, if ``discount`` is not equal to 1, a bound for ``max_iter`` is computed, otherwise ``max_iter`` = 1000. See the documentation for the ``MDP`` class for further details. initial_value : array, optional The starting value function. Default: a vector of zeros. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attributes --------------- V : tuple The optimal value function. policy : tuple The optimal policy function. Each element is an integer corresponding to an action which maximises the value function in that state. iter : int The number of iterations taken to complete the computation. time : float The amount of CPU time used to run the algorithm. Methods ------- run() Do the algorithm iteration. setSilent() Sets the instance to silent mode. setVerbose() Sets the instance to verbose mode. Notes ----- In verbose mode, at each iteration, displays the variation of V and the condition which stopped iterations: epsilon-optimum policy found or maximum number of iterations reached. Examples -------- >>> import mdptoolbox, mdptoolbox.example >>> P, R = mdptoolbox.example.forest() >>> vi = mdptoolbox.mdp.ValueIteration(P, R, 0.96) >>> vi.verbose False >>> vi.run() >>> expected = (5.93215488, 9.38815488, 13.38815488) >>> all(expected[k] - vi.V[k] < 1e-12 for k in range(len(expected))) True >>> vi.policy (0, 0, 0) >>> vi.iter 4 >>> import mdptoolbox >>> import numpy as np >>> P = np.array([[[0.5, 0.5],[0.8, 0.2]],[[0, 1],[0.1, 0.9]]]) >>> R = np.array([[5, 10], [-1, 2]]) >>> vi = mdptoolbox.mdp.ValueIteration(P, R, 0.9) >>> vi.run() >>> expected = (40.048625392716815, 33.65371175967546) >>> all(expected[k] - vi.V[k] < 1e-12 for k in range(len(expected))) True >>> vi.policy (1, 0) >>> vi.iter 26 >>> import mdptoolbox >>> import numpy as np >>> from scipy.sparse import csr_matrix as sparse >>> P = [None] * 2 >>> P[0] = sparse([[0.5, 0.5],[0.8, 0.2]]) >>> P[1] = sparse([[0, 1],[0.1, 0.9]]) >>> R = np.array([[5, 10], [-1, 2]]) >>> vi = mdptoolbox.mdp.ValueIteration(P, R, 0.9) >>> vi.run() >>> expected = (40.048625392716815, 33.65371175967546) >>> all(expected[k] - vi.V[k] < 1e-12 for k in range(len(expected))) True >>> vi.policy (1, 0) """ def __init__(self, transitions, reward, discount, epsilon=0.01, max_iter=1000, initial_value=0, skip_check=False): # Initialise a value iteration MDP. MDP.__init__(self, transitions, reward, discount, epsilon, max_iter, skip_check=skip_check) # initialization of optional arguments if initial_value == 0: self.V = _np.zeros(self.S) else: assert len(initial_value) == self.S, "The initial value must be " \ "a vector of length S." self.V = _np.array(initial_value).reshape(self.S) if self.discount < 1: # compute a bound for the number of iterations and update the # stored value of self.max_iter self._boundIter(epsilon) # computation of threshold of variation for V for an epsilon- # optimal policy self.thresh = epsilon * (1 - self.discount) / self.discount else: # discount == 1 # threshold of variation for V for an epsilon-optimal policy self.thresh = epsilon def _boundIter(self, epsilon): # Compute a bound for the number of iterations. # # for the value iteration # algorithm to find an epsilon-optimal policy with use of span for the # stopping criterion # # Arguments ----------------------------------------------------------- # Let S = number of states, A = number of actions # epsilon = |V - V*| < epsilon, upper than 0, # optional (default : 0.01) # Evaluation ---------------------------------------------------------- # max_iter = bound of the number of iterations for the value # iteration algorithm to find an epsilon-optimal policy with use of # span for the stopping criterion # cpu_time = used CPU time # # See Markov Decision Processes, M. L. Puterman, # Wiley-Interscience Publication, 1994 # p 202, Theorem 6.6.6 # k = max [1 - S min[ P(j|s,a), p(j|s',a')] ] # s,a,s',a' j k = 0 h = _np.zeros(self.S) for ss in range(self.S): PP = _np.zeros((self.A, self.S)) for aa in range(self.A): try: PP[aa] = self.P[aa][:, ss] except ValueError: PP[aa] = self.P[aa][:, ss].todense().A1 # minimum of the entire array. h[ss] = PP.min() k = 1 - h.sum() Vprev = self.V null, value = self._bellmanOperator() # p 201, Proposition 6.6.5 span = _util.getSpan(value - Vprev) max_iter = (_math.log((epsilon * (1 - self.discount) / self.discount) / span) / _math.log(self.discount * k)) # self.V = Vprev self.max_iter = int(_math.ceil(max_iter))
[docs] def run(self): # Run the value iteration algorithm. self._startRun() while True: self.iter += 1 Vprev = self.V.copy() # Bellman Operator: compute policy and value functions self.policy, self.V = self._bellmanOperator() # The values, based on Q. For the function "max()": the option # "axis" means the axis along which to operate. In this case it # finds the maximum of the the rows. (Operates along the columns?) variation = _util.getSpan(self.V - Vprev) if self.verbose: _printVerbosity(self.iter, variation) if variation < self.thresh: if self.verbose: print(_MSG_STOP_EPSILON_OPTIMAL_POLICY) break elif self.iter == self.max_iter: if self.verbose: print(_MSG_STOP_MAX_ITER) break self._endRun()
[docs]class ValueIterationGS(ValueIteration): """ A discounted MDP solved using the value iteration Gauss-Seidel algorithm. Parameters ---------- transitions : array Transition probability matrices. See the documentation for the ``MDP`` class for details. reward : array Reward matrices or vectors. See the documentation for the ``MDP`` class for details. discount : float Discount factor. See the documentation for the ``MDP`` class for details. epsilon : float, optional Stopping criterion. See the documentation for the ``MDP`` class for details. Default: 0.01. max_iter : int, optional Maximum number of iterations. See the documentation for the ``MDP`` and ``ValueIteration`` classes for details. Default: computed. initial_value : array, optional The starting value function. Default: a vector of zeros. skip_check : bool By default we run a check on the ``transitions`` and ``rewards`` arguments to make sure they describe a valid MDP. You can set this argument to True in order to skip this check. Data Attribues -------------- policy : tuple epsilon-optimal policy iter : int number of done iterations time : float used CPU time Notes ----- In verbose mode, at each iteration, displays the variation of V and the condition which stopped iterations: epsilon-optimum policy found or maximum number of iterations reached. Examples -------- >>> import mdptoolbox.example, numpy as np >>> P, R = mdptoolbox.example.forest() >>> vigs = mdptoolbox.mdp.ValueIterationGS(P, R, 0.9) >>> vigs.run() >>> expected = (25.5833879767579, 28.830654635546928, 32.83065463554693) >>> all(expected[k] - vigs.V[k] < 1e-12 for k in range(len(expected))) True >>> vigs.policy (0, 0, 0) """ def __init__(self, transitions, reward, discount, epsilon=0.01, max_iter=10, initial_value=0, skip_check=False): # Initialise a value iteration Gauss-Seidel MDP. MDP.__init__(self, transitions, reward, discount, epsilon, max_iter, skip_check=skip_check) # initialization of optional arguments if initial_value == 0: self.V = _np.zeros(self.S) else: if len(initial_value) != self.S: raise ValueError("The initial value must be a vector of " "length S.") else: try: self.V = initial_value.reshape(self.S) except AttributeError: self.V = _np.array(initial_value) except: raise if self.discount < 1: # compute a bound for the number of iterations and update the # stored value of self.max_iter self._boundIter(epsilon) # computation of threshold of variation for V for an epsilon- # optimal policy self.thresh = epsilon * (1 - self.discount) / self.discount else: # discount == 1 # threshold of variation for V for an epsilon-optimal policy self.thresh = epsilon
[docs] def run(self): # Run the value iteration Gauss-Seidel algorithm. self._startRun() while True: self.iter += 1 Vprev = self.V.copy() for s in range(self.S): Q = [float(self.R[a][s] + self.discount * self.P[a][s, :].dot(self.V)) for a in range(self.A)] self.V[s] = max(Q) variation = _util.getSpan(self.V - Vprev) if self.verbose: _printVerbosity(self.iter, variation) if variation < self.thresh: if self.verbose: print(_MSG_STOP_EPSILON_OPTIMAL_POLICY) break elif self.iter == self.max_iter: if self.verbose: print(_MSG_STOP_MAX_ITER) break self.policy = [] for s in range(self.S): Q = _np.zeros(self.A) for a in range(self.A): Q[a] = (self.R[a][s] + self.discount * self.P[a][s, :].dot(self.V)) self.V[s] = Q.max() self.policy.append(int(Q.argmax())) self._endRun()