policy_params#


class A2CParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, dist_fn: collections.abc.Callable[..., torch.distributions.distribution.Distribution] | DistributionFunctionFactory | Literal['default'] = 'default', vf_coef: float = 0.5, ent_coef: float = 0.01, max_grad_norm: float | None = None)[source]#
ent_coef: float = 0.01#

weight (coefficient) of the entropy loss in the loss function

max_grad_norm: float | None = None#

maximum norm for clipping gradients in backpropagation

vf_coef: float = 0.5#

weight (coefficient) of the value loss in the loss function

class DDPGParams(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, estimation_step: int = 1)[source]#
estimation_step: int = 1#

the number of steps to look ahead.

gamma: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

tau: float = 0.005#

controls the soft update of the target network. It determines how slowly the target networks track the main networks. Smaller tau means slower tracking and more stable learning.

class DQNParams(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, discount_factor: float = 0.99, estimation_step: int = 1, target_update_freq: int = 0, reward_normalization: bool = False, is_double: bool = True, clip_loss_grad: bool = False)[source]#
clip_loss_grad: bool = False#

whether to clip the gradient of the loss in accordance with nature14236; this amounts to using the Huber loss instead of the MSE loss.

discount_factor: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

estimation_step: int = 1#

the number of steps to look ahead

is_double: bool = True#

whether to use double Q learning

reward_normalization: bool = False#

whether to normalize the returns to Normal(0, 1)

target_update_freq: int = 0#

the target network update frequency (0 if no target network is to be used)

class DiscreteSACParams(actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, alpha: float | AutoAlphaFactory = 0.2, estimation_step: int = 1)[source]#
class GetParamTransformersProtocol(*args, **kwargs)[source]#
class IQNParams(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, discount_factor: float = 0.99, estimation_step: int = 1, target_update_freq: int = 0, reward_normalization: bool = False, is_double: bool = True, clip_loss_grad: bool = False, sample_size: int = 32, online_sample_size: int = 8, target_sample_size: int = 8, num_quantiles: int = 200, hidden_sizes: collections.abc.Sequence[int] = (), num_cosines: int = 64)[source]#
hidden_sizes: Sequence[int] = ()#

hidden dimensions to use in the IQN network

num_cosines: int = 64#

number of cosines to use in the IQN network

num_quantiles: int = 200#

the number of quantile midpoints in the inverse cumulative distribution function of the value

online_sample_size: int = 8#

the number of samples for online model in training

sample_size: int = 32#

the number of samples for policy evaluation

target_sample_size: int = 8#

the number of samples for target model in training.

class NPGParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, dist_fn: collections.abc.Callable[..., torch.distributions.distribution.Distribution] | DistributionFunctionFactory | Literal['default'] = 'default', optim_critic_iters: int = 5, actor_step_size: float = 0.5, advantage_normalization: bool = True)[source]#
actor_step_size: float = 0.5#

step size for actor update in natural gradient direction

advantage_normalization: bool = True#

whether to do per mini-batch advantage normalization.

optim_critic_iters: int = 5#

number of times to optimize critic network per update.

class PGParams(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, dist_fn: collections.abc.Callable[..., torch.distributions.distribution.Distribution] | DistributionFunctionFactory | Literal['default'] = 'default')[source]#
deterministic_eval: bool = False#

whether to use deterministic action (the dist’s mode) instead of stochastic one during evaluation. Does not affect training.

discount_factor: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

dist_fn: Callable[[...], Distribution] | DistributionFunctionFactory | Literal['default'] = 'default'#

This can either be a function which maps the model output to a torch distribution or a factory for the creation of such a function. When set to “default”, a factory which creates Gaussian distributions from mean and standard deviation will be used for the continuous case and which creates categorical distributions for the discrete case (see DistributionFunctionFactoryDefault)

reward_normalization: bool = False#

if True, will normalize the returns by subtracting the running mean and dividing by the running standard deviation.

class PPOParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, dist_fn: collections.abc.Callable[..., torch.distributions.distribution.Distribution] | DistributionFunctionFactory | Literal['default'] = 'default', vf_coef: float = 0.5, ent_coef: float = 0.01, max_grad_norm: float | None = None, eps_clip: float = 0.2, dual_clip: float | None = None, value_clip: bool = False, advantage_normalization: bool = True, recompute_advantage: bool = False)[source]#
advantage_normalization: bool = True#

whether to apply per mini-batch advantage normalization.

dual_clip: float | None = None#

determines the lower bound clipping for the probability ratio (corresponds to parameter c in arXiv:1912.09729, Equation 5). If set to None, dual clipping is not used and the bounds described in parameter eps_clip apply. If set to a float value c, the lower bound is changed from 1 - eps_clip to c, where c < 1 - eps_clip. Setting c > 0 reduces policy oscillation and further stabilizes training. Typical values are between 0 and 0.5. Smaller values provide more stability. Setting c = 0 yields PPO with only the upper bound.

eps_clip: float = 0.2#

determines the range of allowed change in the policy during a policy update: The ratio between the probabilities indicated by the new and old policy is constrained to stay in the interval [1 - eps_clip, 1 + eps_clip]. Small values thus force the new policy to stay close to the old policy. Typical values range between 0.1 and 0.3. The optimal epsilon depends on the environment; more stochastic environments may need larger epsilons.

recompute_advantage: bool = False#

whether to recompute advantage every update repeat as described in https://arxiv.org/pdf/2006.05990.pdf, Sec. 3.5. The original PPO implementation splits the data in each policy iteration step into individual transitions and then randomly assigns them to minibatches. This makes it impossible to compute advantages as the temporal structure is broken. Therefore, the advantages are computed once at the beginning of each policy iteration step and then used in minibatch policy and value function optimization. This results in higher diversity of data in each minibatch at the cost of using slightly stale advantage estimations. Enabling this option will, as a remedy to this problem, recompute the advantages at the beginning of each pass over the data instead of just once per iteration.

value_clip: bool = False#

whether to apply clipping of the predicted value function during policy learning. Value clipping discourages large changes in value predictions between updates. Inaccurate value predictions can lead to bad policy updates, which can cause training instability. Clipping values prevents sporadic large errors from skewing policy updates too much.

class ParamTransformer[source]#

Base class for parameter transformations from high to low-level API.

Transforms one or more parameters from the representation used by the high-level API to the representation required by the (low-level) policy implementation. It operates directly on a dictionary of keyword arguments, which is initially generated from the parameter dataclass (subclass of Params).

static get(d: dict[str, Any], key: str, drop: bool = False) Any[source]#
abstract transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerActionScaling(key: str)[source]#
change_value(value: Any, data: ParamTransformerData) Any[source]#
class ParamTransformerActorAndCriticLRScheduler(key_scheduler_factory_actor: str, key_scheduler_factory_critic: str, key_scheduler: str)[source]#
transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerActorDualCriticsLRScheduler(key_scheduler_factory_actor: str, key_scheduler_factory_critic1: str, key_scheduler_factory_critic2: str, key_scheduler: str)[source]#
transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerAutoAlpha(key: str)[source]#
transform(kwargs: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerChangeValue(key: str)[source]#
abstract change_value(value: Any, data: ParamTransformerData) Any[source]#
transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerData(*, envs: Environments, device: str | device, optim_factory: OptimizerFactory, optim: Optimizer | None = None, actor: ModuleOpt | None = None, critic1: ModuleOpt | None = None, critic2: ModuleOpt | None = None)[source]#

Holds data that can be used by ParamTransformer instances to perform their transformation.

The representation contains the superset of all data items that are required by different types of agent factories. An agent factory is expected to set only the attributes that are relevant to its parameters.

actor: ModuleOpt | None = None#
critic1: ModuleOpt | None = None#
critic2: ModuleOpt | None = None#
device: str | device#
envs: Environments#
optim: Optimizer | None = None#

the single optimizer for the case where there is just one

optim_factory: OptimizerFactory#
class ParamTransformerDistributionFunction(key: str)[source]#
change_value(value: Any, data: ParamTransformerData) Any[source]#
class ParamTransformerDrop(*keys: str)[source]#
transform(kwargs: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerFloatEnvParamFactory(key: str)[source]#
change_value(value: Any, data: ParamTransformerData) Any[source]#
class ParamTransformerLRScheduler(key_scheduler_factory: str, key_scheduler: str)[source]#

Transformer for learning rate scheduler params.

Transforms a key containing a learning rate scheduler factory (removed) into a key containing a learning rate scheduler (added) for the data member optim.

transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerMultiLRScheduler(optim_key_list: list[tuple[Optimizer, str]], key_scheduler: str)[source]#
transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerNoiseFactory(key: str)[source]#
change_value(value: Any, data: ParamTransformerData) Any[source]#
class Params[source]#
create_kwargs(data: ParamTransformerData) dict[str, Any][source]#
class ParamsMixinActionScaling(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip')[source]#
action_bound_method: Literal['clip', 'tanh'] | None = 'clip'#

method to bound action to range [-1, 1]. Only used if the action_space is continuous.

action_scaling: bool | Literal['default'] = 'default'#

whether to apply action scaling; when set to “default”, it will be enabled for continuous action spaces

class ParamsMixinActorAndCritic(actor_lr: float = 0.001, critic_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic_lr_scheduler_factory: LRSchedulerFactory | None = None)[source]#
actor_lr: float = 0.001#

the learning rate to use for the actor network

actor_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the actor network (if any)

critic_lr: float = 0.001#

the learning rate to use for the critic network

critic_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the critic network (if any)

class ParamsMixinActorAndDualCritics(actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None)[source]#
actor_lr: float = 0.001#

the learning rate to use for the actor network

actor_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the actor network (if any)

critic1_lr: float = 0.001#

the learning rate to use for the first critic network

critic1_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the first critic network (if any)

critic2_lr: float = 0.001#

the learning rate to use for the second critic network

critic2_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the second critic network (if any)

class ParamsMixinExplorationNoise(exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None)[source]#
exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | None = None#

If not None, add noise to actions for exploration. This is useful when solving “hard exploration” problems. It can either be a distribution, a factory for the creation of a distribution or “default”. When set to “default”, use Gaussian noise with standard deviation 0.1.

class ParamsMixinGeneralAdvantageEstimation(gae_lambda: float = 0.95, max_batchsize: int = 256)[source]#
gae_lambda: float = 0.95#

determines the blend between Monte Carlo and one-step temporal difference (TD) estimates of the advantage function in general advantage estimation (GAE). A value of 0 gives a fully TD-based estimate; lambda=1 gives a fully Monte Carlo estimate.

max_batchsize: int = 256#

the maximum size of the batch when computing general advantage estimation (GAE)

class ParamsMixinLearningRateWithScheduler(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None)[source]#
lr: float = 0.001#

the learning rate to use in the gradient-based optimizer

lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler

class REDQParams(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, estimation_step: int = 1, ensemble_size: int = 10, subset_size: int = 2, alpha: float | AutoAlphaFactory = 0.2, actor_delay: int = 20, deterministic_eval: bool = True, target_mode: Literal['mean', 'min'] = 'min')[source]#
actor_delay: int = 20#

the number of critic updates before an actor update

alpha: float | AutoAlphaFactory = 0.2#

controls the relative importance (coefficient) of the entropy term in the loss function. This can be a constant or a factory for the creation of a representation that allows the parameter to be automatically tuned; use tianshou.highlevel.params.alpha.AutoAlphaFactoryDefault for the standard auto-adjusted alpha.

deterministic_eval: bool = True#

whether to use deterministic action (the dist’s mode) instead of stochastic one during evaluation. Does not affect training.

ensemble_size: int = 10#

the number of sub-networks in the critic ensemble

estimation_step: int = 1#

the number of steps to look ahead

subset_size: int = 2#

the number of networks in the subset

target_mode: Literal['mean', 'min'] = 'min'#
class SACParams(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, alpha: float | AutoAlphaFactory = 0.2, estimation_step: int = 1, deterministic_eval: bool = True)[source]#
deterministic_eval: bool = True#

whether to use deterministic action (mean of Gaussian policy) in evaluation mode instead of stochastic action sampled by the policy. Does not affect training.

class TD3Params(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, policy_noise: float | FloatEnvValueFactory = 0.2, noise_clip: float | FloatEnvValueFactory = 0.5, update_actor_freq: int = 2, estimation_step: int = 1)[source]#
estimation_step: int = 1#

the number of steps to look ahead.

gamma: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

noise_clip: float | FloatEnvValueFactory = 0.5#

determines the clipping range of the noise used in updating the policy network as [-noise_clip, noise_clip]

policy_noise: float | FloatEnvValueFactory = 0.2#

the scale of the the noise used in updating policy network

tau: float = 0.005#

controls the soft update of the target network. It determines how slowly the target networks track the main networks. Smaller tau means slower tracking and more stable learning.

update_actor_freq: int = 2#

the update frequency of actor network

class TRPOParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, dist_fn: collections.abc.Callable[..., torch.distributions.distribution.Distribution] | DistributionFunctionFactory | Literal['default'] = 'default', optim_critic_iters: int = 5, actor_step_size: float = 0.5, advantage_normalization: bool = True, max_kl: float = 0.01, backtrack_coeff: float = 0.8, max_backtracks: int = 10)[source]#
backtrack_coeff: float = 0.8#

coefficient with which to reduce the step size when constraints are not met.

max_backtracks: int = 10#

maximum number of times to backtrack in line search when the constraints are not met.

max_kl: float = 0.01#

maximum KL divergence, used to constrain each actor network update.