agent_m/agentm/data/game_data/general.yaml

42 lines
727 B
YAML

folders:
parent_dir: "./results/"
model_name: "default_model"
settings:
step_ratio: 6
frame_shape: [128, 128, 1]
continue_game: 0.0
action_space: "discrete"
wrappers_settings:
wrappers:
normalize_reward: true
no_attack_buttons_combinations: true
stack_frames: 4
dilation: 1
add_last_action: true
stack_actions: 12
scale: true
exclude_image_scaling: true
role_relative: true
flatten: true
filter_keys:
- frame
- action
- stage
- timer
policy_kwargs:
net_arch: [64, 64]
ppo_settings:
gamma: 0.94
model_checkpoint: "0"
learning_rate: [2.5e-4, 2.5e-6]
clip_range: [0.15, 0.025]
batch_size: 256
n_epochs: 4
n_steps: 128
autosave_freq: 256
time_steps: 512