|
bitrl & cuberl Documentation
Simulation engine for reinforcement learning agents
|
#include <rl_serial_agent_trainer.h>


Public Types | |
| typedef EnvType | env_type |
| typedef AgentType | agent_type |
Public Member Functions | |
| RLSerialAgentTrainer (const RLSerialTrainerConfig &config, agent_type &agent) | |
| RLSerialAgentTrainer. | |
| virtual bitrl::utils::IterativeAlgorithmResult | train (env_type &env) |
| train Iterate to train the agent on the given environment | |
| virtual void | actions_before_training_begins (env_type &) |
| actions_before_training_begins. Execute any actions the algorithm needs before starting the episode | |
| virtual void | actions_before_episode_begins (env_type &, uint_t) |
| actions_before_episode_begins. Execute any actions the algorithm needs before starting the episode | |
| virtual void | actions_after_episode_ends (env_type &, uint_t, const EpisodeInfo &einfo) |
| actions_after_episode_ends. Execute any actions the algorithm needs after ending the episode | |
| virtual void | actions_after_training_ends (env_type &) |
| actions_after_training_ends. Execute any actions the algorithm needs after the iterations are finished | |
| const std::vector< real_t > & | episodes_total_rewards () const noexcept |
| episodes_total_rewards | |
| const std::vector< uint_t > & | n_itrs_per_episode () const noexcept |
| n_itrs_per_episode | |
Protected Attributes | |
| uint_t | output_msg_frequency_ |
| bitrl::utils::IterativeAlgorithmController | itr_ctrl_ |
| itr_ctrl_ Handles the iteration over the episodes | |
| agent_type & | agent_ |
| agent_ | |
| std::vector< real_t > | total_reward_per_episode_ |
| total_reward_per_episode_ | |
| std::vector< uint_t > | n_itrs_per_episode_ |
| n_itrs_per_episode_ Holds the number of iterations performed per training episode | |
\detailed The RLSerialAgentTrainer class handles the training for serial reinforcement learning agents
| typedef AgentType cuberl::rl::RLSerialAgentTrainer< EnvType, AgentType >::agent_type |
| typedef EnvType cuberl::rl::RLSerialAgentTrainer< EnvType, AgentType >::env_type |
| cuberl::rl::RLSerialAgentTrainer< EnvType, AgentType >::RLSerialAgentTrainer | ( | const RLSerialTrainerConfig & | config, |
| agent_type & | agent | ||
| ) |
| config | |
| agent |
|
virtual |
actions_after_episode_ends. Execute any actions the algorithm needs after ending the episode
|
virtual |
actions_after_training_ends. Execute any actions the algorithm needs after the iterations are finished
|
virtual |
actions_before_episode_begins. Execute any actions the algorithm needs before starting the episode
|
virtual |
actions_before_training_begins. Execute any actions the algorithm needs before starting the episode
|
inlinenoexcept |
episodes_total_rewards
|
inlinenoexcept |
n_itrs_per_episode
|
virtual |
train Iterate to train the agent on the given environment
|
protected |
agent_
|
protected |
itr_ctrl_ Handles the iteration over the episodes
|
protected |
n_itrs_per_episode_ Holds the number of iterations performed per training episode
|
protected |
|
protected |
total_reward_per_episode_