bitrl & cuberl Documentation
Simulation engine for reinforcement learning agents
Loading...
Searching...
No Matches
cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType > Class Template Referencefinal

The QLearning class. Table based implementation of the Q-learning algorithm using epsilon-greedy policy. The implementation also allows for exponential decay of the used epsilon. More...

#include <q_learning.h>

Inheritance diagram for cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >:
Collaboration diagram for cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >:

Public Types

typedef TDAlgoBase< EnvTp >::env_type env_type
 env_t
 
typedef TDAlgoBase< EnvTp >::action_type action_type
 action_t
 
typedef TDAlgoBase< EnvTp >::state_type state_type
 state_t
 
typedef PolicyType policy_type
 action_selector_t
 
- Public Types inherited from cuberl::rl::algos::td::TDAlgoBase< EnvTp >
typedef EnvTp env_type
 env_t
 
typedef env_type::action_type action_type
 action_t
 
typedef env_type::state_type state_type
 state_t
 
- Public Types inherited from cuberl::rl::algos::RLSolverBase< EnvType >
typedef EnvType env_type
 

Public Member Functions

 QLearningSolver (const QLearningConfig config, const PolicyType &policy)
 Constructor.
 
virtual void actions_before_training_begins (env_type &)
 actions_before_training_begins. Execute any actions the algorithm needs before starting the iterations
 
virtual void actions_after_training_ends (env_type &)
 actions_after_training_ends. Actions to execute after the training iterations have finisehd
 
virtual void actions_before_episode_begins (env_type &, uint_t)
 actions_before_training_episode
 
virtual void actions_after_episode_ends (env_type &, uint_t episode_idx, const EpisodeInfo &)
 actions_after_training_episode
 
virtual EpisodeInfo on_training_episode (env_type &, uint_t episode_idx)
 on_episode Do one on_episode of the algorithm
 
void save (const std::string &filename) const
 Save the state-action function in a CSV format.
 
cuberl::rl::policies::MaxTabularPolicy build_policy () const
 Build the policy after training.
 
- Public Member Functions inherited from cuberl::rl::algos::td::TDAlgoBase< EnvTp >
virtual ~TDAlgoBase ()=default
 Destructor.
 
- Public Member Functions inherited from cuberl::rl::algos::RLSolverBase< EnvType >
virtual ~RLSolverBase ()=default
 Destructor.
 
virtual void actions_before_training_begins (env_type &)=0
 actions_before_training_begins. Execute any actions the algorithm needs before starting the iterations
 
virtual void actions_after_training_ends (env_type &)=0
 actions_after_training_ends. Actions to execute after the training iterations have finisehd
 
virtual void actions_before_episode_begins (env_type &, uint_t)
 actions_before_training_episode
 
virtual void actions_after_episode_ends (env_type &, uint_t, const EpisodeInfo &)
 actions_after_training_episode
 
virtual EpisodeInfo on_training_episode (env_type &, uint_t)=0
 on_episode Do one on_episode of the algorithm
 

Additional Inherited Members

- Protected Member Functions inherited from cuberl::rl::algos::td::TDAlgoBase< EnvTp >
 TDAlgoBase ()=default
 DPAlgoBase.
 
- Protected Member Functions inherited from cuberl::rl::algos::RLSolverBase< EnvType >
 RLSolverBase ()=default
 Constructor.
 

Detailed Description

template<envs::discrete_world_concept EnvTp, typename PolicyType>
class cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >

The QLearning class. Table based implementation of the Q-learning algorithm using epsilon-greedy policy. The implementation also allows for exponential decay of the used epsilon.

Member Typedef Documentation

◆ action_type

template<envs::discrete_world_concept EnvTp, typename PolicyType >
typedef TDAlgoBase<EnvTp>::action_type cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::action_type

action_t

◆ env_type

template<envs::discrete_world_concept EnvTp, typename PolicyType >
typedef TDAlgoBase<EnvTp>::env_type cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::env_type

env_t

◆ policy_type

template<envs::discrete_world_concept EnvTp, typename PolicyType >
typedef PolicyType cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::policy_type

action_selector_t

◆ state_type

template<envs::discrete_world_concept EnvTp, typename PolicyType >
typedef TDAlgoBase<EnvTp>::state_type cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::state_type

state_t

Constructor & Destructor Documentation

◆ QLearningSolver()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::QLearningSolver ( const QLearningConfig  config,
const PolicyType &  policy 
)

Constructor.

Member Function Documentation

◆ actions_after_episode_ends()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
void cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::actions_after_episode_ends ( env_type ,
uint_t  episode_idx,
const EpisodeInfo  
)
virtual

actions_after_training_episode

◆ actions_after_training_ends()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
void cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::actions_after_training_ends ( env_type )
virtual

actions_after_training_ends. Actions to execute after the training iterations have finisehd

◆ actions_before_episode_begins()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
virtual void cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::actions_before_episode_begins ( env_type ,
uint_t   
)
inlinevirtual

actions_before_training_episode

◆ actions_before_training_begins()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
void cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::actions_before_training_begins ( env_type env)
virtual

actions_before_training_begins. Execute any actions the algorithm needs before starting the iterations

◆ build_policy()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
cuberl::rl::policies::MaxTabularPolicy cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::build_policy ( ) const

Build the policy after training.

◆ on_training_episode()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
EpisodeInfo cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::on_training_episode ( env_type env,
uint_t  episode_idx 
)
virtual

on_episode Do one on_episode of the algorithm

◆ save()

template<envs::discrete_world_concept EnvTp, typename PolicyType >
void cuberl::rl::algos::td::QLearningSolver< EnvTp, PolicyType >::save ( const std::string &  filename) const

Save the state-action function in a CSV format.


The documentation for this class was generated from the following file: