o
    cin                     @   s`   d dl Z d dlZd dlmZ d dlmZmZ d dlm	Z	m
Z
 d dlmZ eG dd de	ZdS )    N)Optional)overrideOldAPIStack)ExternalEnv_ExternalEnvEpisode)MultiAgentDictc                   @   s   e Zd ZdZdejdejfddZdd Zee			dd
e
e dedefddZee	d
ededefddZee	d
edededdfddZee			dd
ededededdf
ddZee	d
ededdfddZdS )ExternalMultiAgentEnvz/This is the multi-agent version of ExternalEnv.action_spaceobservation_spacec                 C   s^   t | || t| jtst| jtr-| j | j ks+td| j | j dS dS )zInitializes an ExternalMultiAgentEnv instance.

        Args:
            action_space: Action space of the env.
            observation_space: Observation space of the env.
        z=Agent ids disagree for action space and obs space dict: {} {}N)	r   __init__
isinstancer	   dictr
   keys
ValueErrorformat)selfr	   r
    r   Z/home/ubuntu/.local/lib/python3.10/site-packages/ray/rllib/env/external_multi_agent_env.pyr      s   zExternalMultiAgentEnv.__init__c                 C   s   t )a  Override this to implement the multi-agent run loop.

        Your loop should continuously:
            1. Call self.start_episode(episode_id)
            2. Call self.get_action(episode_id, obs_dict)
                    -or-
                    self.log_action(episode_id, obs_dict, action_dict)
            3. Call self.log_returns(episode_id, reward_dict)
            4. Call self.end_episode(episode_id, obs_dict)
            5. Wait if nothing to do.

        Multiple episodes may be started at the same time.
        )NotImplementedError)r   r   r   r   run'   s   zExternalMultiAgentEnv.runNT
episode_idtraining_enabledreturnc                 C   s^   |d u r	t  j}|| jv rtd||| jv r!td|t|| j|dd| j|< |S )Nz!Episode {} has already completed.zEpisode {} is already startedT)
multiagent)	uuiduuid4hex	_finishedr   r   	_episodesr   _results_avail_condition)r   r   r   r   r   r   start_episode7   s   



z#ExternalMultiAgentEnv.start_episodeobservation_dictc                 C   s   |  |}||S )a  Record an observation and get the on-policy action.

        Thereby, observation_dict is expected to contain the observation
        of all agents acting in this episode step.

        Args:
            episode_id: Episode id returned from start_episode().
            observation_dict: Current environment observation.

        Returns:
            action: Action from the env action space.
        )_getwait_for_actionr   r   r!   episoder   r   r   
get_actionJ   s   

z ExternalMultiAgentEnv.get_actionaction_dictc                 C   s   |  |}||| dS )a  Record an observation and (off-policy) action taken.

        Args:
            episode_id: Episode id returned from start_episode().
            observation_dict: Current environment observation.
            action_dict: Action for the observation.
        N)r"   
log_action)r   r   r!   r'   r%   r   r   r   r(   ^   s   
z ExternalMultiAgentEnv.log_actionreward_dict	info_dictmultiagent_done_dictc           	      C   sz   |  |}| D ]\}}||jv r|j|  |7  < q	||j|< q	|r2| D ]	\}}||j|< q(|r;|p7i |_dS dS )a   Record returns from the environment.

        The reward will be attributed to the previous action taken by the
        episode. Rewards accumulate until the next action. If no reward is
        logged before the next action, a reward of 0.0 is assumed.

        Args:
            episode_id: Episode id returned from start_episode().
            reward_dict: Reward from the environment agents.
            info_dict: Optional info dict.
            multiagent_done_dict: Optional done dict for agents.
        N)r"   itemscur_reward_dictcur_done_dictcur_info_dict)	r   r   r)   r*   r+   r%   agentrewdoner   r   r   log_returnsp   s   

z!ExternalMultiAgentEnv.log_returnsc                 C   s&   |  |}| j|j || dS )zRecord the end of an episode.

        Args:
            episode_id: Episode id returned from start_episode().
            observation_dict: Current environment observation.
        N)r"   r   addr   r2   r$   r   r   r   end_episode   s   
	z!ExternalMultiAgentEnv.end_episode)NT)NN)__name__
__module____qualname____doc__gymSpacer   r   r   r   r   strboolr    r   r&   r(   r3   r5   r   r   r   r   r   
   sf    
%r   )r   	gymnasiumr:   typingr   ray.rllib.utils.annotationsr   r   ray.rllib.env.external_envr   r   ray.rllib.utils.typingr   r   r   r   r   r   <module>   s    