diff --git "a/sf_log.txt" "b/sf_log.txt" new file mode 100644--- /dev/null +++ "b/sf_log.txt" @@ -0,0 +1,1134 @@ +[2023-02-28 15:42:34,827][11028] Saving configuration to /content/train_dir/default_experiment/config.json... +[2023-02-28 15:42:34,831][11028] Rollout worker 0 uses device cpu +[2023-02-28 15:42:34,832][11028] Rollout worker 1 uses device cpu +[2023-02-28 15:42:34,835][11028] Rollout worker 2 uses device cpu +[2023-02-28 15:42:34,836][11028] Rollout worker 3 uses device cpu +[2023-02-28 15:42:34,837][11028] Rollout worker 4 uses device cpu +[2023-02-28 15:42:34,838][11028] Rollout worker 5 uses device cpu +[2023-02-28 15:42:34,841][11028] Rollout worker 6 uses device cpu +[2023-02-28 15:42:34,842][11028] Rollout worker 7 uses device cpu +[2023-02-28 15:42:35,070][11028] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-28 15:42:35,075][11028] InferenceWorker_p0-w0: min num requests: 2 +[2023-02-28 15:42:35,120][11028] Starting all processes... +[2023-02-28 15:42:35,122][11028] Starting process learner_proc0 +[2023-02-28 15:42:35,196][11028] Starting all processes... +[2023-02-28 15:42:35,210][11028] Starting process inference_proc0-0 +[2023-02-28 15:42:35,211][11028] Starting process rollout_proc0 +[2023-02-28 15:42:35,213][11028] Starting process rollout_proc1 +[2023-02-28 15:42:35,213][11028] Starting process rollout_proc2 +[2023-02-28 15:42:35,213][11028] Starting process rollout_proc3 +[2023-02-28 15:42:35,213][11028] Starting process rollout_proc4 +[2023-02-28 15:42:35,213][11028] Starting process rollout_proc5 +[2023-02-28 15:42:35,213][11028] Starting process rollout_proc6 +[2023-02-28 15:42:35,213][11028] Starting process rollout_proc7 +[2023-02-28 15:42:44,636][11239] Worker 4 uses CPU cores [0] +[2023-02-28 15:42:44,683][11217] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-28 15:42:44,683][11217] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-02-28 15:42:44,764][11235] Worker 3 uses CPU cores [1] +[2023-02-28 15:42:44,816][11234] Worker 2 uses CPU cores [0] +[2023-02-28 15:42:44,885][11230] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-28 15:42:44,885][11230] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-02-28 15:42:44,937][11241] Worker 5 uses CPU cores [1] +[2023-02-28 15:42:44,952][11242] Worker 6 uses CPU cores [0] +[2023-02-28 15:42:45,001][11231] Worker 0 uses CPU cores [0] +[2023-02-28 15:42:45,041][11232] Worker 1 uses CPU cores [1] +[2023-02-28 15:42:45,065][11243] Worker 7 uses CPU cores [1] +[2023-02-28 15:42:45,487][11217] Num visible devices: 1 +[2023-02-28 15:42:45,489][11230] Num visible devices: 1 +[2023-02-28 15:42:45,496][11217] Starting seed is not provided +[2023-02-28 15:42:45,496][11217] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-28 15:42:45,497][11217] Initializing actor-critic model on device cuda:0 +[2023-02-28 15:42:45,497][11217] RunningMeanStd input shape: (3, 72, 128) +[2023-02-28 15:42:45,499][11217] RunningMeanStd input shape: (1,) +[2023-02-28 15:42:45,520][11217] ConvEncoder: input_channels=3 +[2023-02-28 15:42:45,909][11217] Conv encoder output size: 512 +[2023-02-28 15:42:45,909][11217] Policy head output size: 512 +[2023-02-28 15:42:45,964][11217] Created Actor Critic model with architecture: +[2023-02-28 15:42:45,965][11217] ActorCriticSharedWeights( + (obs_normalizer): ObservationNormalizer( + (running_mean_std): RunningMeanStdDictInPlace( + (running_mean_std): ModuleDict( + (obs): RunningMeanStdInPlace() + ) + ) + ) + (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace) + (encoder): VizdoomEncoder( + (basic_encoder): ConvEncoder( + (enc): RecursiveScriptModule( + original_name=ConvEncoderImpl + (conv_head): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Conv2d) + (1): RecursiveScriptModule(original_name=ELU) + (2): RecursiveScriptModule(original_name=Conv2d) + (3): RecursiveScriptModule(original_name=ELU) + (4): RecursiveScriptModule(original_name=Conv2d) + (5): RecursiveScriptModule(original_name=ELU) + ) + (mlp_layers): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Linear) + (1): RecursiveScriptModule(original_name=ELU) + ) + ) + ) + ) + (core): ModelCoreRNN( + (core): GRU(512, 512) + ) + (decoder): MlpDecoder( + (mlp): Identity() + ) + (critic_linear): Linear(in_features=512, out_features=1, bias=True) + (action_parameterization): ActionParameterizationDefault( + (distribution_linear): Linear(in_features=512, out_features=5, bias=True) + ) +) +[2023-02-28 15:42:53,384][11217] Using optimizer +[2023-02-28 15:42:53,385][11217] No checkpoints found +[2023-02-28 15:42:53,386][11217] Did not load from checkpoint, starting from scratch! +[2023-02-28 15:42:53,386][11217] Initialized policy 0 weights for model version 0 +[2023-02-28 15:42:53,389][11217] LearnerWorker_p0 finished initialization! +[2023-02-28 15:42:53,392][11217] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-28 15:42:53,485][11230] RunningMeanStd input shape: (3, 72, 128) +[2023-02-28 15:42:53,488][11230] RunningMeanStd input shape: (1,) +[2023-02-28 15:42:53,505][11230] ConvEncoder: input_channels=3 +[2023-02-28 15:42:53,603][11230] Conv encoder output size: 512 +[2023-02-28 15:42:53,604][11230] Policy head output size: 512 +[2023-02-28 15:42:55,055][11028] Heartbeat connected on Batcher_0 +[2023-02-28 15:42:55,063][11028] Heartbeat connected on LearnerWorker_p0 +[2023-02-28 15:42:55,075][11028] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-28 15:42:55,089][11028] Heartbeat connected on RolloutWorker_w0 +[2023-02-28 15:42:55,093][11028] Heartbeat connected on RolloutWorker_w1 +[2023-02-28 15:42:55,100][11028] Heartbeat connected on RolloutWorker_w2 +[2023-02-28 15:42:55,108][11028] Heartbeat connected on RolloutWorker_w3 +[2023-02-28 15:42:55,111][11028] Heartbeat connected on RolloutWorker_w4 +[2023-02-28 15:42:55,114][11028] Heartbeat connected on RolloutWorker_w5 +[2023-02-28 15:42:55,119][11028] Heartbeat connected on RolloutWorker_w6 +[2023-02-28 15:42:55,123][11028] Heartbeat connected on RolloutWorker_w7 +[2023-02-28 15:42:55,878][11028] Inference worker 0-0 is ready! +[2023-02-28 15:42:55,881][11028] All inference workers are ready! Signal rollout workers to start! +[2023-02-28 15:42:55,891][11028] Heartbeat connected on InferenceWorker_p0-w0 +[2023-02-28 15:42:56,022][11235] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:56,024][11232] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:56,047][11241] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:56,055][11239] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:56,056][11231] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:56,057][11242] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:56,054][11243] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:56,073][11234] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 15:42:57,280][11243] Decorrelating experience for 0 frames... +[2023-02-28 15:42:57,282][11239] Decorrelating experience for 0 frames... +[2023-02-28 15:42:57,283][11242] Decorrelating experience for 0 frames... +[2023-02-28 15:42:57,280][11231] Decorrelating experience for 0 frames... +[2023-02-28 15:42:57,284][11241] Decorrelating experience for 0 frames... +[2023-02-28 15:42:57,283][11232] Decorrelating experience for 0 frames... +[2023-02-28 15:42:57,992][11242] Decorrelating experience for 32 frames... +[2023-02-28 15:42:57,998][11231] Decorrelating experience for 32 frames... +[2023-02-28 15:42:58,294][11232] Decorrelating experience for 32 frames... +[2023-02-28 15:42:58,297][11241] Decorrelating experience for 32 frames... +[2023-02-28 15:42:58,306][11243] Decorrelating experience for 32 frames... +[2023-02-28 15:42:59,048][11239] Decorrelating experience for 32 frames... +[2023-02-28 15:42:59,318][11242] Decorrelating experience for 64 frames... +[2023-02-28 15:42:59,401][11235] Decorrelating experience for 0 frames... +[2023-02-28 15:42:59,536][11231] Decorrelating experience for 64 frames... +[2023-02-28 15:42:59,701][11232] Decorrelating experience for 64 frames... +[2023-02-28 15:43:00,075][11028] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-28 15:43:01,198][11243] Decorrelating experience for 64 frames... +[2023-02-28 15:43:01,243][11239] Decorrelating experience for 64 frames... +[2023-02-28 15:43:01,411][11242] Decorrelating experience for 96 frames... +[2023-02-28 15:43:01,664][11235] Decorrelating experience for 32 frames... +[2023-02-28 15:43:01,784][11234] Decorrelating experience for 0 frames... +[2023-02-28 15:43:02,022][11241] Decorrelating experience for 64 frames... +[2023-02-28 15:43:02,027][11231] Decorrelating experience for 96 frames... +[2023-02-28 15:43:02,123][11232] Decorrelating experience for 96 frames... +[2023-02-28 15:43:02,480][11243] Decorrelating experience for 96 frames... +[2023-02-28 15:43:03,318][11239] Decorrelating experience for 96 frames... +[2023-02-28 15:43:03,319][11234] Decorrelating experience for 32 frames... +[2023-02-28 15:43:03,941][11241] Decorrelating experience for 96 frames... +[2023-02-28 15:43:04,697][11235] Decorrelating experience for 64 frames... +[2023-02-28 15:43:04,750][11234] Decorrelating experience for 64 frames... +[2023-02-28 15:43:05,076][11028] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-28 15:43:05,165][11235] Decorrelating experience for 96 frames... +[2023-02-28 15:43:05,614][11234] Decorrelating experience for 96 frames... +[2023-02-28 15:43:09,532][11217] Signal inference workers to stop experience collection... +[2023-02-28 15:43:09,551][11230] InferenceWorker_p0-w0: stopping experience collection +[2023-02-28 15:43:10,075][11028] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 67.7. Samples: 1016. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-28 15:43:10,078][11028] Avg episode reward: [(0, '2.085')] +[2023-02-28 15:43:12,109][11217] Signal inference workers to resume experience collection... +[2023-02-28 15:43:12,111][11230] InferenceWorker_p0-w0: resuming experience collection +[2023-02-28 15:43:15,075][11028] Fps is (10 sec: 1638.5, 60 sec: 819.2, 300 sec: 819.2). Total num frames: 16384. Throughput: 0: 163.5. Samples: 3270. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:43:15,084][11028] Avg episode reward: [(0, '3.129')] +[2023-02-28 15:43:20,081][11028] Fps is (10 sec: 3275.0, 60 sec: 1310.4, 300 sec: 1310.4). Total num frames: 32768. Throughput: 0: 352.7. Samples: 8820. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:43:20,089][11028] Avg episode reward: [(0, '3.742')] +[2023-02-28 15:43:22,277][11230] Updated weights for policy 0, policy_version 10 (0.0352) +[2023-02-28 15:43:25,075][11028] Fps is (10 sec: 2867.2, 60 sec: 1501.9, 300 sec: 1501.9). Total num frames: 45056. Throughput: 0: 362.7. Samples: 10882. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-28 15:43:25,080][11028] Avg episode reward: [(0, '4.260')] +[2023-02-28 15:43:30,075][11028] Fps is (10 sec: 2868.8, 60 sec: 1755.4, 300 sec: 1755.4). Total num frames: 61440. Throughput: 0: 419.1. Samples: 14670. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2023-02-28 15:43:30,078][11028] Avg episode reward: [(0, '4.333')] +[2023-02-28 15:43:34,459][11230] Updated weights for policy 0, policy_version 20 (0.0016) +[2023-02-28 15:43:35,075][11028] Fps is (10 sec: 3686.4, 60 sec: 2048.0, 300 sec: 2048.0). Total num frames: 81920. Throughput: 0: 533.2. Samples: 21328. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-28 15:43:35,080][11028] Avg episode reward: [(0, '4.392')] +[2023-02-28 15:43:40,076][11028] Fps is (10 sec: 4095.8, 60 sec: 2275.5, 300 sec: 2275.5). Total num frames: 102400. Throughput: 0: 548.5. Samples: 24684. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-28 15:43:40,083][11028] Avg episode reward: [(0, '4.313')] +[2023-02-28 15:43:40,100][11217] Saving new best policy, reward=4.313! +[2023-02-28 15:43:45,075][11028] Fps is (10 sec: 3276.8, 60 sec: 2293.8, 300 sec: 2293.8). Total num frames: 114688. Throughput: 0: 645.4. Samples: 29042. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-28 15:43:45,079][11028] Avg episode reward: [(0, '4.487')] +[2023-02-28 15:43:45,086][11217] Saving new best policy, reward=4.487! +[2023-02-28 15:43:47,187][11230] Updated weights for policy 0, policy_version 30 (0.0024) +[2023-02-28 15:43:50,075][11028] Fps is (10 sec: 2867.3, 60 sec: 2383.1, 300 sec: 2383.1). Total num frames: 131072. Throughput: 0: 740.1. Samples: 33304. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:43:50,082][11028] Avg episode reward: [(0, '4.605')] +[2023-02-28 15:43:50,098][11217] Saving new best policy, reward=4.605! +[2023-02-28 15:43:55,075][11028] Fps is (10 sec: 3686.3, 60 sec: 2525.9, 300 sec: 2525.9). Total num frames: 151552. Throughput: 0: 790.6. Samples: 36592. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:43:55,082][11028] Avg episode reward: [(0, '4.486')] +[2023-02-28 15:43:57,356][11230] Updated weights for policy 0, policy_version 40 (0.0014) +[2023-02-28 15:44:00,078][11028] Fps is (10 sec: 4094.7, 60 sec: 2867.0, 300 sec: 2646.5). Total num frames: 172032. Throughput: 0: 888.6. Samples: 43258. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:44:00,085][11028] Avg episode reward: [(0, '4.400')] +[2023-02-28 15:44:05,075][11028] Fps is (10 sec: 3276.9, 60 sec: 3072.0, 300 sec: 2633.1). Total num frames: 184320. Throughput: 0: 859.4. Samples: 47488. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:44:05,078][11028] Avg episode reward: [(0, '4.377')] +[2023-02-28 15:44:10,075][11028] Fps is (10 sec: 2868.2, 60 sec: 3345.1, 300 sec: 2676.1). Total num frames: 200704. Throughput: 0: 860.0. Samples: 49584. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:44:10,078][11028] Avg episode reward: [(0, '4.324')] +[2023-02-28 15:44:10,729][11230] Updated weights for policy 0, policy_version 50 (0.0043) +[2023-02-28 15:44:15,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 2764.8). Total num frames: 221184. Throughput: 0: 905.8. Samples: 55432. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:44:15,078][11028] Avg episode reward: [(0, '4.236')] +[2023-02-28 15:44:20,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3481.9, 300 sec: 2843.1). Total num frames: 241664. Throughput: 0: 901.6. Samples: 61902. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:44:20,082][11028] Avg episode reward: [(0, '4.513')] +[2023-02-28 15:44:20,300][11230] Updated weights for policy 0, policy_version 60 (0.0020) +[2023-02-28 15:44:25,081][11028] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 2867.2). Total num frames: 258048. Throughput: 0: 874.4. Samples: 64032. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:44:25,083][11028] Avg episode reward: [(0, '4.607')] +[2023-02-28 15:44:25,096][11217] Saving new best policy, reward=4.607! +[2023-02-28 15:44:30,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 2888.8). Total num frames: 274432. Throughput: 0: 874.3. Samples: 68384. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:44:30,077][11028] Avg episode reward: [(0, '4.611')] +[2023-02-28 15:44:30,095][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000067_274432.pth... +[2023-02-28 15:44:30,228][11217] Saving new best policy, reward=4.611! +[2023-02-28 15:44:32,929][11230] Updated weights for policy 0, policy_version 70 (0.0037) +[2023-02-28 15:44:35,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 2949.1). Total num frames: 294912. Throughput: 0: 910.7. Samples: 74286. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:44:35,078][11028] Avg episode reward: [(0, '4.558')] +[2023-02-28 15:44:40,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3003.7). Total num frames: 315392. Throughput: 0: 911.0. Samples: 77588. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:44:40,078][11028] Avg episode reward: [(0, '4.319')] +[2023-02-28 15:44:43,339][11230] Updated weights for policy 0, policy_version 80 (0.0012) +[2023-02-28 15:44:45,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3016.1). Total num frames: 331776. Throughput: 0: 883.1. Samples: 82996. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:44:45,079][11028] Avg episode reward: [(0, '4.387')] +[2023-02-28 15:44:50,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 2991.9). Total num frames: 344064. Throughput: 0: 884.3. Samples: 87280. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:44:50,078][11028] Avg episode reward: [(0, '4.321')] +[2023-02-28 15:44:55,079][11028] Fps is (10 sec: 3275.4, 60 sec: 3549.6, 300 sec: 3037.8). Total num frames: 364544. Throughput: 0: 901.1. Samples: 90136. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:44:55,081][11028] Avg episode reward: [(0, '4.209')] +[2023-02-28 15:44:55,188][11230] Updated weights for policy 0, policy_version 90 (0.0016) +[2023-02-28 15:45:00,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3618.3, 300 sec: 3113.0). Total num frames: 389120. Throughput: 0: 922.7. Samples: 96954. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:45:00,081][11028] Avg episode reward: [(0, '4.433')] +[2023-02-28 15:45:05,080][11028] Fps is (10 sec: 4095.9, 60 sec: 3686.1, 300 sec: 3119.2). Total num frames: 405504. Throughput: 0: 893.6. Samples: 102116. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:45:05,088][11028] Avg episode reward: [(0, '4.487')] +[2023-02-28 15:45:05,922][11230] Updated weights for policy 0, policy_version 100 (0.0021) +[2023-02-28 15:45:10,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3094.8). Total num frames: 417792. Throughput: 0: 893.0. Samples: 104218. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:45:10,080][11028] Avg episode reward: [(0, '4.517')] +[2023-02-28 15:45:15,075][11028] Fps is (10 sec: 3278.3, 60 sec: 3618.1, 300 sec: 3130.5). Total num frames: 438272. Throughput: 0: 913.0. Samples: 109470. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-28 15:45:15,084][11028] Avg episode reward: [(0, '4.681')] +[2023-02-28 15:45:15,086][11217] Saving new best policy, reward=4.681! +[2023-02-28 15:45:17,388][11230] Updated weights for policy 0, policy_version 110 (0.0018) +[2023-02-28 15:45:20,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3163.8). Total num frames: 458752. Throughput: 0: 928.3. Samples: 116058. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:45:20,077][11028] Avg episode reward: [(0, '4.725')] +[2023-02-28 15:45:20,112][11217] Saving new best policy, reward=4.725! +[2023-02-28 15:45:25,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3194.9). Total num frames: 479232. Throughput: 0: 915.3. Samples: 118776. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:45:25,081][11028] Avg episode reward: [(0, '4.551')] +[2023-02-28 15:45:29,551][11230] Updated weights for policy 0, policy_version 120 (0.0026) +[2023-02-28 15:45:30,076][11028] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3171.1). Total num frames: 491520. Throughput: 0: 889.4. Samples: 123018. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:45:30,082][11028] Avg episode reward: [(0, '4.541')] +[2023-02-28 15:45:35,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3174.4). Total num frames: 507904. Throughput: 0: 910.7. Samples: 128262. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:45:35,082][11028] Avg episode reward: [(0, '4.495')] +[2023-02-28 15:45:39,569][11230] Updated weights for policy 0, policy_version 130 (0.0020) +[2023-02-28 15:45:40,075][11028] Fps is (10 sec: 4096.2, 60 sec: 3618.1, 300 sec: 3227.2). Total num frames: 532480. Throughput: 0: 922.7. Samples: 131654. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:45:40,084][11028] Avg episode reward: [(0, '4.643')] +[2023-02-28 15:45:45,078][11028] Fps is (10 sec: 4094.6, 60 sec: 3617.9, 300 sec: 3228.5). Total num frames: 548864. Throughput: 0: 901.9. Samples: 137542. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:45:45,083][11028] Avg episode reward: [(0, '4.599')] +[2023-02-28 15:45:50,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3206.6). Total num frames: 561152. Throughput: 0: 880.9. Samples: 141754. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:45:50,081][11028] Avg episode reward: [(0, '4.723')] +[2023-02-28 15:45:52,891][11230] Updated weights for policy 0, policy_version 140 (0.0012) +[2023-02-28 15:45:55,075][11028] Fps is (10 sec: 3277.9, 60 sec: 3618.4, 300 sec: 3231.3). Total num frames: 581632. Throughput: 0: 883.8. Samples: 143990. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:45:55,077][11028] Avg episode reward: [(0, '4.683')] +[2023-02-28 15:46:00,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3618.1, 300 sec: 3276.8). Total num frames: 606208. Throughput: 0: 917.2. Samples: 150742. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:46:00,077][11028] Avg episode reward: [(0, '4.782')] +[2023-02-28 15:46:00,091][11217] Saving new best policy, reward=4.782! +[2023-02-28 15:46:02,016][11230] Updated weights for policy 0, policy_version 150 (0.0019) +[2023-02-28 15:46:05,078][11028] Fps is (10 sec: 4094.6, 60 sec: 3618.2, 300 sec: 3276.7). Total num frames: 622592. Throughput: 0: 891.8. Samples: 156190. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-28 15:46:05,081][11028] Avg episode reward: [(0, '4.811')] +[2023-02-28 15:46:05,084][11217] Saving new best policy, reward=4.811! +[2023-02-28 15:46:10,077][11028] Fps is (10 sec: 2866.7, 60 sec: 3618.0, 300 sec: 3255.8). Total num frames: 634880. Throughput: 0: 876.4. Samples: 158214. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:46:10,081][11028] Avg episode reward: [(0, '4.688')] +[2023-02-28 15:46:15,075][11028] Fps is (10 sec: 2868.2, 60 sec: 3549.9, 300 sec: 3256.3). Total num frames: 651264. Throughput: 0: 882.5. Samples: 162728. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:46:15,083][11028] Avg episode reward: [(0, '4.584')] +[2023-02-28 15:46:15,340][11230] Updated weights for policy 0, policy_version 160 (0.0022) +[2023-02-28 15:46:20,075][11028] Fps is (10 sec: 4096.7, 60 sec: 3618.1, 300 sec: 3296.8). Total num frames: 675840. Throughput: 0: 913.7. Samples: 169378. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:46:20,078][11028] Avg episode reward: [(0, '4.601')] +[2023-02-28 15:46:25,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3296.3). Total num frames: 692224. Throughput: 0: 911.8. Samples: 172684. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:46:25,080][11028] Avg episode reward: [(0, '4.753')] +[2023-02-28 15:46:25,323][11230] Updated weights for policy 0, policy_version 170 (0.0013) +[2023-02-28 15:46:30,075][11028] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3295.8). Total num frames: 708608. Throughput: 0: 878.9. Samples: 177088. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:46:30,079][11028] Avg episode reward: [(0, '4.675')] +[2023-02-28 15:46:30,098][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000173_708608.pth... +[2023-02-28 15:46:35,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3295.4). Total num frames: 724992. Throughput: 0: 889.9. Samples: 181798. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:46:35,078][11028] Avg episode reward: [(0, '4.608')] +[2023-02-28 15:46:37,582][11230] Updated weights for policy 0, policy_version 180 (0.0015) +[2023-02-28 15:46:40,075][11028] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3313.2). Total num frames: 745472. Throughput: 0: 916.9. Samples: 185250. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:46:40,082][11028] Avg episode reward: [(0, '4.564')] +[2023-02-28 15:46:45,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3618.3, 300 sec: 3330.2). Total num frames: 765952. Throughput: 0: 911.8. Samples: 191774. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:46:45,080][11028] Avg episode reward: [(0, '4.771')] +[2023-02-28 15:46:48,807][11230] Updated weights for policy 0, policy_version 190 (0.0024) +[2023-02-28 15:46:50,078][11028] Fps is (10 sec: 3275.9, 60 sec: 3618.0, 300 sec: 3311.6). Total num frames: 778240. Throughput: 0: 883.1. Samples: 195928. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:46:50,084][11028] Avg episode reward: [(0, '4.772')] +[2023-02-28 15:46:55,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3310.9). Total num frames: 794624. Throughput: 0: 887.3. Samples: 198142. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:46:55,077][11028] Avg episode reward: [(0, '4.941')] +[2023-02-28 15:46:55,089][11217] Saving new best policy, reward=4.941! +[2023-02-28 15:46:59,999][11230] Updated weights for policy 0, policy_version 200 (0.0019) +[2023-02-28 15:47:00,075][11028] Fps is (10 sec: 4097.2, 60 sec: 3549.9, 300 sec: 3343.7). Total num frames: 819200. Throughput: 0: 920.8. Samples: 204162. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:47:00,077][11028] Avg episode reward: [(0, '5.160')] +[2023-02-28 15:47:00,087][11217] Saving new best policy, reward=5.160! +[2023-02-28 15:47:05,077][11028] Fps is (10 sec: 4504.5, 60 sec: 3618.2, 300 sec: 3358.7). Total num frames: 839680. Throughput: 0: 916.1. Samples: 210606. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:47:05,080][11028] Avg episode reward: [(0, '4.793')] +[2023-02-28 15:47:10,082][11028] Fps is (10 sec: 3274.6, 60 sec: 3617.8, 300 sec: 3341.0). Total num frames: 851968. Throughput: 0: 889.0. Samples: 212694. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:47:10,084][11028] Avg episode reward: [(0, '4.666')] +[2023-02-28 15:47:12,100][11230] Updated weights for policy 0, policy_version 210 (0.0019) +[2023-02-28 15:47:15,075][11028] Fps is (10 sec: 2867.9, 60 sec: 3618.1, 300 sec: 3339.8). Total num frames: 868352. Throughput: 0: 884.7. Samples: 216900. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:47:15,078][11028] Avg episode reward: [(0, '4.849')] +[2023-02-28 15:47:20,075][11028] Fps is (10 sec: 3688.9, 60 sec: 3549.9, 300 sec: 3354.1). Total num frames: 888832. Throughput: 0: 917.9. Samples: 223104. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:47:20,077][11028] Avg episode reward: [(0, '4.923')] +[2023-02-28 15:47:22,243][11230] Updated weights for policy 0, policy_version 220 (0.0023) +[2023-02-28 15:47:25,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3383.0). Total num frames: 913408. Throughput: 0: 917.7. Samples: 226548. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:47:25,078][11028] Avg episode reward: [(0, '4.588')] +[2023-02-28 15:47:30,076][11028] Fps is (10 sec: 3686.2, 60 sec: 3618.1, 300 sec: 3366.2). Total num frames: 925696. Throughput: 0: 888.7. Samples: 231764. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:47:30,083][11028] Avg episode reward: [(0, '4.719')] +[2023-02-28 15:47:34,996][11230] Updated weights for policy 0, policy_version 230 (0.0019) +[2023-02-28 15:47:35,081][11028] Fps is (10 sec: 2865.4, 60 sec: 3617.8, 300 sec: 3364.5). Total num frames: 942080. Throughput: 0: 892.6. Samples: 236100. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:47:35,087][11028] Avg episode reward: [(0, '4.849')] +[2023-02-28 15:47:40,075][11028] Fps is (10 sec: 3686.6, 60 sec: 3618.1, 300 sec: 3377.4). Total num frames: 962560. Throughput: 0: 912.3. Samples: 239196. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:47:40,078][11028] Avg episode reward: [(0, '4.890')] +[2023-02-28 15:47:45,075][11028] Fps is (10 sec: 3688.7, 60 sec: 3549.9, 300 sec: 3375.7). Total num frames: 978944. Throughput: 0: 909.9. Samples: 245106. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:47:45,078][11028] Avg episode reward: [(0, '5.183')] +[2023-02-28 15:47:45,081][11217] Saving new best policy, reward=5.183! +[2023-02-28 15:47:45,796][11230] Updated weights for policy 0, policy_version 240 (0.0014) +[2023-02-28 15:47:50,076][11028] Fps is (10 sec: 2866.8, 60 sec: 3550.0, 300 sec: 3360.1). Total num frames: 991232. Throughput: 0: 843.0. Samples: 248540. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:47:50,079][11028] Avg episode reward: [(0, '5.119')] +[2023-02-28 15:47:55,075][11028] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 3401.8). Total num frames: 1003520. Throughput: 0: 834.7. Samples: 250250. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:47:55,084][11028] Avg episode reward: [(0, '5.000')] +[2023-02-28 15:48:00,075][11028] Fps is (10 sec: 2457.9, 60 sec: 3276.8, 300 sec: 3443.4). Total num frames: 1015808. Throughput: 0: 831.3. Samples: 254310. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:48:00,078][11028] Avg episode reward: [(0, '4.866')] +[2023-02-28 15:48:01,013][11230] Updated weights for policy 0, policy_version 250 (0.0047) +[2023-02-28 15:48:05,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3345.2, 300 sec: 3526.7). Total num frames: 1040384. Throughput: 0: 837.6. Samples: 260798. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:48:05,081][11028] Avg episode reward: [(0, '5.042')] +[2023-02-28 15:48:10,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3482.0, 300 sec: 3540.6). Total num frames: 1060864. Throughput: 0: 835.2. Samples: 264132. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:48:10,081][11028] Avg episode reward: [(0, '5.281')] +[2023-02-28 15:48:10,100][11217] Saving new best policy, reward=5.281! +[2023-02-28 15:48:10,867][11230] Updated weights for policy 0, policy_version 260 (0.0025) +[2023-02-28 15:48:15,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3526.8). Total num frames: 1073152. Throughput: 0: 823.4. Samples: 268816. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:48:15,078][11028] Avg episode reward: [(0, '5.120')] +[2023-02-28 15:48:20,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 1089536. Throughput: 0: 822.6. Samples: 273110. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:48:20,080][11028] Avg episode reward: [(0, '5.042')] +[2023-02-28 15:48:23,512][11230] Updated weights for policy 0, policy_version 270 (0.0028) +[2023-02-28 15:48:25,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3554.5). Total num frames: 1110016. Throughput: 0: 828.2. Samples: 276466. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:48:25,078][11028] Avg episode reward: [(0, '5.023')] +[2023-02-28 15:48:30,078][11028] Fps is (10 sec: 4504.5, 60 sec: 3481.5, 300 sec: 3568.3). Total num frames: 1134592. Throughput: 0: 849.4. Samples: 283330. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:48:30,080][11028] Avg episode reward: [(0, '5.313')] +[2023-02-28 15:48:30,097][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000277_1134592.pth... +[2023-02-28 15:48:30,247][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000067_274432.pth +[2023-02-28 15:48:30,261][11217] Saving new best policy, reward=5.313! +[2023-02-28 15:48:33,900][11230] Updated weights for policy 0, policy_version 280 (0.0026) +[2023-02-28 15:48:35,076][11028] Fps is (10 sec: 3686.4, 60 sec: 3413.7, 300 sec: 3540.6). Total num frames: 1146880. Throughput: 0: 875.0. Samples: 287916. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:48:35,079][11028] Avg episode reward: [(0, '5.277')] +[2023-02-28 15:48:40,075][11028] Fps is (10 sec: 2867.9, 60 sec: 3345.1, 300 sec: 3554.5). Total num frames: 1163264. Throughput: 0: 885.0. Samples: 290074. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:48:40,078][11028] Avg episode reward: [(0, '5.491')] +[2023-02-28 15:48:40,087][11217] Saving new best policy, reward=5.491! +[2023-02-28 15:48:45,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3568.4). Total num frames: 1183744. Throughput: 0: 922.4. Samples: 295818. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:48:45,082][11028] Avg episode reward: [(0, '5.701')] +[2023-02-28 15:48:45,086][11217] Saving new best policy, reward=5.701! +[2023-02-28 15:48:45,523][11230] Updated weights for policy 0, policy_version 290 (0.0018) +[2023-02-28 15:48:50,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3618.2, 300 sec: 3582.3). Total num frames: 1208320. Throughput: 0: 925.1. Samples: 302428. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:48:50,083][11028] Avg episode reward: [(0, '6.092')] +[2023-02-28 15:48:50,094][11217] Saving new best policy, reward=6.092! +[2023-02-28 15:48:55,075][11028] Fps is (10 sec: 3686.3, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1220608. Throughput: 0: 898.4. Samples: 304560. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:48:55,080][11028] Avg episode reward: [(0, '6.095')] +[2023-02-28 15:48:55,089][11217] Saving new best policy, reward=6.095! +[2023-02-28 15:48:57,383][11230] Updated weights for policy 0, policy_version 300 (0.0013) +[2023-02-28 15:49:00,078][11028] Fps is (10 sec: 2457.0, 60 sec: 3618.0, 300 sec: 3554.5). Total num frames: 1232896. Throughput: 0: 886.1. Samples: 308692. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:49:00,081][11028] Avg episode reward: [(0, '5.765')] +[2023-02-28 15:49:05,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3549.8, 300 sec: 3568.4). Total num frames: 1253376. Throughput: 0: 920.9. Samples: 314552. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:49:05,077][11028] Avg episode reward: [(0, '5.659')] +[2023-02-28 15:49:08,014][11230] Updated weights for policy 0, policy_version 310 (0.0028) +[2023-02-28 15:49:10,075][11028] Fps is (10 sec: 4506.7, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1277952. Throughput: 0: 919.4. Samples: 317840. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:49:10,078][11028] Avg episode reward: [(0, '6.049')] +[2023-02-28 15:49:15,075][11028] Fps is (10 sec: 3686.5, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1290240. Throughput: 0: 888.0. Samples: 323288. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:49:15,083][11028] Avg episode reward: [(0, '6.456')] +[2023-02-28 15:49:15,095][11217] Saving new best policy, reward=6.456! +[2023-02-28 15:49:20,079][11028] Fps is (10 sec: 2866.1, 60 sec: 3617.9, 300 sec: 3554.4). Total num frames: 1306624. Throughput: 0: 877.1. Samples: 327388. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:49:20,088][11028] Avg episode reward: [(0, '6.496')] +[2023-02-28 15:49:20,107][11217] Saving new best policy, reward=6.496! +[2023-02-28 15:49:21,239][11230] Updated weights for policy 0, policy_version 320 (0.0014) +[2023-02-28 15:49:25,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 1327104. Throughput: 0: 889.6. Samples: 330106. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:49:25,082][11028] Avg episode reward: [(0, '6.828')] +[2023-02-28 15:49:25,087][11217] Saving new best policy, reward=6.828! +[2023-02-28 15:49:30,075][11028] Fps is (10 sec: 4097.6, 60 sec: 3550.0, 300 sec: 3568.4). Total num frames: 1347584. Throughput: 0: 912.4. Samples: 336876. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:49:30,079][11028] Avg episode reward: [(0, '6.597')] +[2023-02-28 15:49:30,346][11230] Updated weights for policy 0, policy_version 330 (0.0024) +[2023-02-28 15:49:35,076][11028] Fps is (10 sec: 3685.9, 60 sec: 3618.0, 300 sec: 3554.5). Total num frames: 1363968. Throughput: 0: 886.2. Samples: 342306. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:49:35,081][11028] Avg episode reward: [(0, '6.616')] +[2023-02-28 15:49:40,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1380352. Throughput: 0: 885.1. Samples: 344390. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:49:40,079][11028] Avg episode reward: [(0, '6.804')] +[2023-02-28 15:49:43,239][11230] Updated weights for policy 0, policy_version 340 (0.0027) +[2023-02-28 15:49:45,075][11028] Fps is (10 sec: 3686.9, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1400832. Throughput: 0: 906.2. Samples: 349468. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:49:45,078][11028] Avg episode reward: [(0, '7.516')] +[2023-02-28 15:49:45,082][11217] Saving new best policy, reward=7.516! +[2023-02-28 15:49:50,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 1421312. Throughput: 0: 920.7. Samples: 355982. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:49:50,078][11028] Avg episode reward: [(0, '7.915')] +[2023-02-28 15:49:50,090][11217] Saving new best policy, reward=7.915! +[2023-02-28 15:49:52,860][11230] Updated weights for policy 0, policy_version 350 (0.0013) +[2023-02-28 15:49:55,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3554.5). Total num frames: 1437696. Throughput: 0: 912.4. Samples: 358898. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:49:55,077][11028] Avg episode reward: [(0, '8.225')] +[2023-02-28 15:49:55,093][11217] Saving new best policy, reward=8.225! +[2023-02-28 15:50:00,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3618.3, 300 sec: 3540.7). Total num frames: 1449984. Throughput: 0: 884.1. Samples: 363074. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:50:00,084][11028] Avg episode reward: [(0, '8.255')] +[2023-02-28 15:50:00,095][11217] Saving new best policy, reward=8.255! +[2023-02-28 15:50:05,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3568.4). Total num frames: 1470464. Throughput: 0: 913.9. Samples: 368510. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:50:05,083][11028] Avg episode reward: [(0, '8.344')] +[2023-02-28 15:50:05,087][11217] Saving new best policy, reward=8.344! +[2023-02-28 15:50:05,582][11230] Updated weights for policy 0, policy_version 360 (0.0017) +[2023-02-28 15:50:10,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 1490944. Throughput: 0: 926.1. Samples: 371782. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:50:10,083][11028] Avg episode reward: [(0, '9.197')] +[2023-02-28 15:50:10,098][11217] Saving new best policy, reward=9.197! +[2023-02-28 15:50:15,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 1511424. Throughput: 0: 909.6. Samples: 377806. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:50:15,079][11028] Avg episode reward: [(0, '9.565')] +[2023-02-28 15:50:15,084][11217] Saving new best policy, reward=9.565! +[2023-02-28 15:50:16,352][11230] Updated weights for policy 0, policy_version 370 (0.0018) +[2023-02-28 15:50:20,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3618.4, 300 sec: 3540.6). Total num frames: 1523712. Throughput: 0: 882.7. Samples: 382028. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:50:20,081][11028] Avg episode reward: [(0, '9.278')] +[2023-02-28 15:50:25,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 1540096. Throughput: 0: 886.2. Samples: 384268. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:50:25,082][11028] Avg episode reward: [(0, '8.894')] +[2023-02-28 15:50:27,863][11230] Updated weights for policy 0, policy_version 380 (0.0038) +[2023-02-28 15:50:30,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1564672. Throughput: 0: 918.5. Samples: 390802. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:50:30,077][11028] Avg episode reward: [(0, '8.840')] +[2023-02-28 15:50:30,090][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000382_1564672.pth... +[2023-02-28 15:50:30,225][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000173_708608.pth +[2023-02-28 15:50:35,077][11028] Fps is (10 sec: 4504.8, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 1585152. Throughput: 0: 907.8. Samples: 396834. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:50:35,079][11028] Avg episode reward: [(0, '9.662')] +[2023-02-28 15:50:35,082][11217] Saving new best policy, reward=9.662! +[2023-02-28 15:50:39,233][11230] Updated weights for policy 0, policy_version 390 (0.0027) +[2023-02-28 15:50:40,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1597440. Throughput: 0: 888.8. Samples: 398896. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:50:40,081][11028] Avg episode reward: [(0, '9.933')] +[2023-02-28 15:50:40,105][11217] Saving new best policy, reward=9.933! +[2023-02-28 15:50:45,075][11028] Fps is (10 sec: 2867.7, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 1613824. Throughput: 0: 892.5. Samples: 403238. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:50:45,078][11028] Avg episode reward: [(0, '10.415')] +[2023-02-28 15:50:45,082][11217] Saving new best policy, reward=10.415! +[2023-02-28 15:50:50,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 1634304. Throughput: 0: 915.7. Samples: 409716. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:50:50,081][11028] Avg episode reward: [(0, '10.198')] +[2023-02-28 15:50:50,349][11230] Updated weights for policy 0, policy_version 400 (0.0018) +[2023-02-28 15:50:55,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1654784. Throughput: 0: 916.6. Samples: 413030. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:50:55,082][11028] Avg episode reward: [(0, '9.616')] +[2023-02-28 15:51:00,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 1671168. Throughput: 0: 891.4. Samples: 417920. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:51:00,082][11028] Avg episode reward: [(0, '9.799')] +[2023-02-28 15:51:02,658][11230] Updated weights for policy 0, policy_version 410 (0.0024) +[2023-02-28 15:51:05,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 1683456. Throughput: 0: 895.7. Samples: 422336. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:51:05,077][11028] Avg episode reward: [(0, '9.665')] +[2023-02-28 15:51:10,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1708032. Throughput: 0: 920.5. Samples: 425692. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:51:10,078][11028] Avg episode reward: [(0, '10.797')] +[2023-02-28 15:51:10,093][11217] Saving new best policy, reward=10.797! +[2023-02-28 15:51:12,272][11230] Updated weights for policy 0, policy_version 420 (0.0012) +[2023-02-28 15:51:15,077][11028] Fps is (10 sec: 4504.8, 60 sec: 3618.0, 300 sec: 3568.4). Total num frames: 1728512. Throughput: 0: 922.5. Samples: 432316. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:51:15,085][11028] Avg episode reward: [(0, '10.901')] +[2023-02-28 15:51:15,087][11217] Saving new best policy, reward=10.901! +[2023-02-28 15:51:20,078][11028] Fps is (10 sec: 3275.7, 60 sec: 3617.9, 300 sec: 3554.5). Total num frames: 1740800. Throughput: 0: 888.5. Samples: 436818. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:51:20,080][11028] Avg episode reward: [(0, '11.118')] +[2023-02-28 15:51:20,144][11217] Saving new best policy, reward=11.118! +[2023-02-28 15:51:25,075][11028] Fps is (10 sec: 2867.8, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1757184. Throughput: 0: 886.5. Samples: 438788. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-28 15:51:25,078][11028] Avg episode reward: [(0, '11.591')] +[2023-02-28 15:51:25,082][11217] Saving new best policy, reward=11.591! +[2023-02-28 15:51:25,762][11230] Updated weights for policy 0, policy_version 430 (0.0027) +[2023-02-28 15:51:30,075][11028] Fps is (10 sec: 3687.6, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 1777664. Throughput: 0: 921.1. Samples: 444688. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0) +[2023-02-28 15:51:30,081][11028] Avg episode reward: [(0, '11.123')] +[2023-02-28 15:51:34,741][11230] Updated weights for policy 0, policy_version 440 (0.0017) +[2023-02-28 15:51:35,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3618.2, 300 sec: 3582.3). Total num frames: 1802240. Throughput: 0: 929.2. Samples: 451528. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:51:35,078][11028] Avg episode reward: [(0, '12.126')] +[2023-02-28 15:51:35,084][11217] Saving new best policy, reward=12.126! +[2023-02-28 15:51:40,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 1818624. Throughput: 0: 903.3. Samples: 453680. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:51:40,086][11028] Avg episode reward: [(0, '11.465')] +[2023-02-28 15:51:45,075][11028] Fps is (10 sec: 2867.1, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 1830912. Throughput: 0: 891.0. Samples: 458014. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:51:45,087][11028] Avg episode reward: [(0, '11.191')] +[2023-02-28 15:51:47,581][11230] Updated weights for policy 0, policy_version 450 (0.0021) +[2023-02-28 15:51:50,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1851392. Throughput: 0: 927.1. Samples: 464054. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:51:50,083][11028] Avg episode reward: [(0, '12.184')] +[2023-02-28 15:51:50,093][11217] Saving new best policy, reward=12.184! +[2023-02-28 15:51:55,075][11028] Fps is (10 sec: 4505.7, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 1875968. Throughput: 0: 925.5. Samples: 467340. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:51:55,078][11028] Avg episode reward: [(0, '11.804')] +[2023-02-28 15:51:57,541][11230] Updated weights for policy 0, policy_version 460 (0.0013) +[2023-02-28 15:52:00,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1888256. Throughput: 0: 893.6. Samples: 472528. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:52:00,081][11028] Avg episode reward: [(0, '12.050')] +[2023-02-28 15:52:05,076][11028] Fps is (10 sec: 2867.0, 60 sec: 3686.4, 300 sec: 3568.5). Total num frames: 1904640. Throughput: 0: 888.5. Samples: 476798. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:52:05,080][11028] Avg episode reward: [(0, '10.837')] +[2023-02-28 15:52:09,599][11230] Updated weights for policy 0, policy_version 470 (0.0022) +[2023-02-28 15:52:10,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1925120. Throughput: 0: 910.4. Samples: 479754. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:52:10,081][11028] Avg episode reward: [(0, '11.540')] +[2023-02-28 15:52:15,075][11028] Fps is (10 sec: 4096.2, 60 sec: 3618.2, 300 sec: 3582.3). Total num frames: 1945600. Throughput: 0: 932.0. Samples: 486630. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:52:15,083][11028] Avg episode reward: [(0, '12.094')] +[2023-02-28 15:52:20,077][11028] Fps is (10 sec: 3685.5, 60 sec: 3686.5, 300 sec: 3554.5). Total num frames: 1961984. Throughput: 0: 892.7. Samples: 491700. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:52:20,085][11028] Avg episode reward: [(0, '12.275')] +[2023-02-28 15:52:20,103][11217] Saving new best policy, reward=12.275! +[2023-02-28 15:52:20,730][11230] Updated weights for policy 0, policy_version 480 (0.0035) +[2023-02-28 15:52:25,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 1978368. Throughput: 0: 890.4. Samples: 493748. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:52:25,080][11028] Avg episode reward: [(0, '12.958')] +[2023-02-28 15:52:25,084][11217] Saving new best policy, reward=12.958! +[2023-02-28 15:52:30,075][11028] Fps is (10 sec: 3687.2, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 1998848. Throughput: 0: 915.3. Samples: 499204. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:52:30,078][11028] Avg episode reward: [(0, '12.487')] +[2023-02-28 15:52:30,092][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000488_1998848.pth... +[2023-02-28 15:52:30,233][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000277_1134592.pth +[2023-02-28 15:52:31,840][11230] Updated weights for policy 0, policy_version 490 (0.0041) +[2023-02-28 15:52:35,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 2019328. Throughput: 0: 930.4. Samples: 505922. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:52:35,078][11028] Avg episode reward: [(0, '12.654')] +[2023-02-28 15:52:40,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 2035712. Throughput: 0: 921.2. Samples: 508796. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:52:40,080][11028] Avg episode reward: [(0, '12.802')] +[2023-02-28 15:52:43,401][11230] Updated weights for policy 0, policy_version 500 (0.0028) +[2023-02-28 15:52:45,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3596.2). Total num frames: 2052096. Throughput: 0: 899.8. Samples: 513020. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:52:45,084][11028] Avg episode reward: [(0, '12.836')] +[2023-02-28 15:52:50,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 2072576. Throughput: 0: 931.6. Samples: 518720. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:52:50,078][11028] Avg episode reward: [(0, '13.196')] +[2023-02-28 15:52:50,091][11217] Saving new best policy, reward=13.196! +[2023-02-28 15:52:53,615][11230] Updated weights for policy 0, policy_version 510 (0.0021) +[2023-02-28 15:52:55,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3651.7). Total num frames: 2093056. Throughput: 0: 938.8. Samples: 522002. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:52:55,083][11028] Avg episode reward: [(0, '13.692')] +[2023-02-28 15:52:55,088][11217] Saving new best policy, reward=13.692! +[2023-02-28 15:53:00,077][11028] Fps is (10 sec: 3685.6, 60 sec: 3686.3, 300 sec: 3623.9). Total num frames: 2109440. Throughput: 0: 915.7. Samples: 527840. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:53:00,080][11028] Avg episode reward: [(0, '14.161')] +[2023-02-28 15:53:00,091][11217] Saving new best policy, reward=14.161! +[2023-02-28 15:53:05,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 2125824. Throughput: 0: 896.5. Samples: 532042. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:53:05,081][11028] Avg episode reward: [(0, '14.522')] +[2023-02-28 15:53:05,088][11217] Saving new best policy, reward=14.522! +[2023-02-28 15:53:06,286][11230] Updated weights for policy 0, policy_version 520 (0.0025) +[2023-02-28 15:53:10,075][11028] Fps is (10 sec: 3277.6, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 2142208. Throughput: 0: 907.7. Samples: 534596. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:53:10,081][11028] Avg episode reward: [(0, '14.886')] +[2023-02-28 15:53:10,095][11217] Saving new best policy, reward=14.886! +[2023-02-28 15:53:15,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 2166784. Throughput: 0: 934.4. Samples: 541254. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:53:15,080][11028] Avg episode reward: [(0, '15.806')] +[2023-02-28 15:53:15,083][11217] Saving new best policy, reward=15.806! +[2023-02-28 15:53:15,699][11230] Updated weights for policy 0, policy_version 530 (0.0016) +[2023-02-28 15:53:20,076][11028] Fps is (10 sec: 4095.6, 60 sec: 3686.5, 300 sec: 3637.8). Total num frames: 2183168. Throughput: 0: 906.6. Samples: 546722. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:53:20,087][11028] Avg episode reward: [(0, '16.664')] +[2023-02-28 15:53:20,103][11217] Saving new best policy, reward=16.664! +[2023-02-28 15:53:25,077][11028] Fps is (10 sec: 2866.7, 60 sec: 3618.0, 300 sec: 3596.2). Total num frames: 2195456. Throughput: 0: 888.4. Samples: 548776. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:53:25,079][11028] Avg episode reward: [(0, '16.939')] +[2023-02-28 15:53:25,090][11217] Saving new best policy, reward=16.939! +[2023-02-28 15:53:28,625][11230] Updated weights for policy 0, policy_version 540 (0.0018) +[2023-02-28 15:53:30,075][11028] Fps is (10 sec: 3277.1, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 2215936. Throughput: 0: 904.8. Samples: 553738. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:53:30,077][11028] Avg episode reward: [(0, '16.136')] +[2023-02-28 15:53:35,077][11028] Fps is (10 sec: 4505.4, 60 sec: 3686.3, 300 sec: 3651.7). Total num frames: 2240512. Throughput: 0: 933.8. Samples: 560744. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:53:35,083][11028] Avg episode reward: [(0, '14.489')] +[2023-02-28 15:53:37,450][11230] Updated weights for policy 0, policy_version 550 (0.0013) +[2023-02-28 15:53:40,079][11028] Fps is (10 sec: 4094.6, 60 sec: 3686.2, 300 sec: 3637.8). Total num frames: 2256896. Throughput: 0: 934.9. Samples: 564076. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:53:40,081][11028] Avg episode reward: [(0, '14.455')] +[2023-02-28 15:53:45,075][11028] Fps is (10 sec: 3277.5, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 2273280. Throughput: 0: 900.3. Samples: 568350. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:53:45,081][11028] Avg episode reward: [(0, '14.103')] +[2023-02-28 15:53:50,075][11028] Fps is (10 sec: 3277.9, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 2289664. Throughput: 0: 925.3. Samples: 573682. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:53:50,080][11028] Avg episode reward: [(0, '14.111')] +[2023-02-28 15:53:50,265][11230] Updated weights for policy 0, policy_version 560 (0.0020) +[2023-02-28 15:53:55,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 2314240. Throughput: 0: 944.6. Samples: 577102. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:53:55,081][11028] Avg episode reward: [(0, '15.782')] +[2023-02-28 15:54:00,076][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.5, 300 sec: 3651.7). Total num frames: 2330624. Throughput: 0: 936.4. Samples: 583394. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:54:00,081][11028] Avg episode reward: [(0, '15.783')] +[2023-02-28 15:54:00,286][11230] Updated weights for policy 0, policy_version 570 (0.0013) +[2023-02-28 15:54:05,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 2347008. Throughput: 0: 911.0. Samples: 587718. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:54:05,082][11028] Avg episode reward: [(0, '16.143')] +[2023-02-28 15:54:10,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3651.7). Total num frames: 2367488. Throughput: 0: 916.1. Samples: 590000. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:54:10,081][11028] Avg episode reward: [(0, '15.853')] +[2023-02-28 15:54:11,937][11230] Updated weights for policy 0, policy_version 580 (0.0018) +[2023-02-28 15:54:15,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 2387968. Throughput: 0: 955.8. Samples: 596750. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:54:15,077][11028] Avg episode reward: [(0, '16.099')] +[2023-02-28 15:54:20,078][11028] Fps is (10 sec: 3685.2, 60 sec: 3686.2, 300 sec: 3651.6). Total num frames: 2404352. Throughput: 0: 930.5. Samples: 602616. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:54:20,081][11028] Avg episode reward: [(0, '16.867')] +[2023-02-28 15:54:22,919][11230] Updated weights for policy 0, policy_version 590 (0.0017) +[2023-02-28 15:54:25,080][11028] Fps is (10 sec: 3275.3, 60 sec: 3754.5, 300 sec: 3637.7). Total num frames: 2420736. Throughput: 0: 905.4. Samples: 604818. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:54:25,099][11028] Avg episode reward: [(0, '16.433')] +[2023-02-28 15:54:30,075][11028] Fps is (10 sec: 3277.9, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 2437120. Throughput: 0: 910.4. Samples: 609316. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:54:30,078][11028] Avg episode reward: [(0, '16.472')] +[2023-02-28 15:54:30,090][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000595_2437120.pth... +[2023-02-28 15:54:30,205][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000382_1564672.pth +[2023-02-28 15:54:34,072][11230] Updated weights for policy 0, policy_version 600 (0.0030) +[2023-02-28 15:54:35,075][11028] Fps is (10 sec: 4097.8, 60 sec: 3686.5, 300 sec: 3665.6). Total num frames: 2461696. Throughput: 0: 939.5. Samples: 615958. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:54:35,078][11028] Avg episode reward: [(0, '17.693')] +[2023-02-28 15:54:35,087][11217] Saving new best policy, reward=17.693! +[2023-02-28 15:54:40,075][11028] Fps is (10 sec: 4096.1, 60 sec: 3686.6, 300 sec: 3651.7). Total num frames: 2478080. Throughput: 0: 935.6. Samples: 619202. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:54:40,080][11028] Avg episode reward: [(0, '17.505')] +[2023-02-28 15:54:45,075][11028] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 2494464. Throughput: 0: 901.4. Samples: 623956. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:54:45,079][11028] Avg episode reward: [(0, '18.024')] +[2023-02-28 15:54:45,085][11217] Saving new best policy, reward=18.024! +[2023-02-28 15:54:45,797][11230] Updated weights for policy 0, policy_version 610 (0.0026) +[2023-02-28 15:54:50,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 2510848. Throughput: 0: 909.7. Samples: 628654. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:54:50,077][11028] Avg episode reward: [(0, '17.172')] +[2023-02-28 15:54:55,075][11028] Fps is (10 sec: 4096.1, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 2535424. Throughput: 0: 934.8. Samples: 632068. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:54:55,078][11028] Avg episode reward: [(0, '15.935')] +[2023-02-28 15:54:55,902][11230] Updated weights for policy 0, policy_version 620 (0.0016) +[2023-02-28 15:55:00,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3679.5). Total num frames: 2555904. Throughput: 0: 933.7. Samples: 638766. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:55:00,086][11028] Avg episode reward: [(0, '15.418')] +[2023-02-28 15:55:05,076][11028] Fps is (10 sec: 3276.6, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 2568192. Throughput: 0: 902.3. Samples: 643216. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:55:05,081][11028] Avg episode reward: [(0, '15.210')] +[2023-02-28 15:55:08,259][11230] Updated weights for policy 0, policy_version 630 (0.0018) +[2023-02-28 15:55:10,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 2584576. Throughput: 0: 902.0. Samples: 645406. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:55:10,078][11028] Avg episode reward: [(0, '15.277')] +[2023-02-28 15:55:15,075][11028] Fps is (10 sec: 3686.7, 60 sec: 3618.1, 300 sec: 3665.6). Total num frames: 2605056. Throughput: 0: 937.3. Samples: 651494. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:55:15,078][11028] Avg episode reward: [(0, '16.451')] +[2023-02-28 15:55:17,921][11230] Updated weights for policy 0, policy_version 640 (0.0021) +[2023-02-28 15:55:20,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3754.9, 300 sec: 3693.3). Total num frames: 2629632. Throughput: 0: 938.5. Samples: 658192. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-28 15:55:20,077][11028] Avg episode reward: [(0, '16.100')] +[2023-02-28 15:55:25,078][11028] Fps is (10 sec: 3685.5, 60 sec: 3686.5, 300 sec: 3651.7). Total num frames: 2641920. Throughput: 0: 915.4. Samples: 660398. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:55:25,082][11028] Avg episode reward: [(0, '16.718')] +[2023-02-28 15:55:30,078][11028] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 2658304. Throughput: 0: 902.1. Samples: 664550. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:55:30,082][11028] Avg episode reward: [(0, '17.707')] +[2023-02-28 15:55:30,869][11230] Updated weights for policy 0, policy_version 650 (0.0016) +[2023-02-28 15:55:35,075][11028] Fps is (10 sec: 3687.3, 60 sec: 3618.1, 300 sec: 3665.6). Total num frames: 2678784. Throughput: 0: 940.8. Samples: 670988. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:55:35,082][11028] Avg episode reward: [(0, '17.657')] +[2023-02-28 15:55:39,632][11230] Updated weights for policy 0, policy_version 660 (0.0022) +[2023-02-28 15:55:40,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 2703360. Throughput: 0: 943.6. Samples: 674528. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:55:40,082][11028] Avg episode reward: [(0, '17.678')] +[2023-02-28 15:55:45,075][11028] Fps is (10 sec: 4095.9, 60 sec: 3754.7, 300 sec: 3679.5). Total num frames: 2719744. Throughput: 0: 910.5. Samples: 679738. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:55:45,078][11028] Avg episode reward: [(0, '18.798')] +[2023-02-28 15:55:45,081][11217] Saving new best policy, reward=18.798! +[2023-02-28 15:55:50,075][11028] Fps is (10 sec: 2867.1, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 2732032. Throughput: 0: 906.0. Samples: 683984. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:55:50,083][11028] Avg episode reward: [(0, '19.250')] +[2023-02-28 15:55:50,103][11217] Saving new best policy, reward=19.250! +[2023-02-28 15:55:52,615][11230] Updated weights for policy 0, policy_version 670 (0.0017) +[2023-02-28 15:55:55,075][11028] Fps is (10 sec: 3276.9, 60 sec: 3618.1, 300 sec: 3665.6). Total num frames: 2752512. Throughput: 0: 922.4. Samples: 686916. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:55:55,083][11028] Avg episode reward: [(0, '18.018')] +[2023-02-28 15:56:00,075][11028] Fps is (10 sec: 4505.7, 60 sec: 3686.4, 300 sec: 3707.2). Total num frames: 2777088. Throughput: 0: 937.5. Samples: 693682. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:56:00,077][11028] Avg episode reward: [(0, '17.516')] +[2023-02-28 15:56:02,016][11230] Updated weights for policy 0, policy_version 680 (0.0025) +[2023-02-28 15:56:05,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3679.5). Total num frames: 2793472. Throughput: 0: 901.4. Samples: 698754. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-28 15:56:05,078][11028] Avg episode reward: [(0, '17.235')] +[2023-02-28 15:56:10,076][11028] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 2805760. Throughput: 0: 899.2. Samples: 700860. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0) +[2023-02-28 15:56:10,085][11028] Avg episode reward: [(0, '16.873')] +[2023-02-28 15:56:14,410][11230] Updated weights for policy 0, policy_version 690 (0.0044) +[2023-02-28 15:56:15,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 2826240. Throughput: 0: 930.1. Samples: 706406. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:56:15,082][11028] Avg episode reward: [(0, '16.367')] +[2023-02-28 15:56:20,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 2846720. Throughput: 0: 935.4. Samples: 713082. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:56:20,078][11028] Avg episode reward: [(0, '16.854')] +[2023-02-28 15:56:25,075][11028] Fps is (10 sec: 3686.3, 60 sec: 3686.5, 300 sec: 3679.5). Total num frames: 2863104. Throughput: 0: 910.9. Samples: 715520. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:56:25,081][11028] Avg episode reward: [(0, '19.066')] +[2023-02-28 15:56:25,541][11230] Updated weights for policy 0, policy_version 700 (0.0018) +[2023-02-28 15:56:30,076][11028] Fps is (10 sec: 3276.6, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 2879488. Throughput: 0: 889.5. Samples: 719764. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:56:30,080][11028] Avg episode reward: [(0, '19.649')] +[2023-02-28 15:56:30,095][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000703_2879488.pth... +[2023-02-28 15:56:30,238][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000488_1998848.pth +[2023-02-28 15:56:30,255][11217] Saving new best policy, reward=19.649! +[2023-02-28 15:56:35,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 2899968. Throughput: 0: 925.0. Samples: 725610. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:56:35,077][11028] Avg episode reward: [(0, '18.937')] +[2023-02-28 15:56:36,532][11230] Updated weights for policy 0, policy_version 710 (0.0039) +[2023-02-28 15:56:40,075][11028] Fps is (10 sec: 4096.3, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 2920448. Throughput: 0: 935.8. Samples: 729026. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:56:40,080][11028] Avg episode reward: [(0, '19.545')] +[2023-02-28 15:56:45,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3679.5). Total num frames: 2936832. Throughput: 0: 913.6. Samples: 734794. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:56:45,078][11028] Avg episode reward: [(0, '19.678')] +[2023-02-28 15:56:45,080][11217] Saving new best policy, reward=19.678! +[2023-02-28 15:56:48,370][11230] Updated weights for policy 0, policy_version 720 (0.0016) +[2023-02-28 15:56:50,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 2953216. Throughput: 0: 892.6. Samples: 738922. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:56:50,081][11028] Avg episode reward: [(0, '18.819')] +[2023-02-28 15:56:55,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3651.7). Total num frames: 2965504. Throughput: 0: 884.5. Samples: 740662. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:56:55,080][11028] Avg episode reward: [(0, '17.972')] +[2023-02-28 15:57:00,075][11028] Fps is (10 sec: 2457.7, 60 sec: 3345.1, 300 sec: 3637.8). Total num frames: 2977792. Throughput: 0: 854.2. Samples: 744846. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:57:00,081][11028] Avg episode reward: [(0, '19.307')] +[2023-02-28 15:57:02,307][11230] Updated weights for policy 0, policy_version 730 (0.0033) +[2023-02-28 15:57:05,077][11028] Fps is (10 sec: 3276.3, 60 sec: 3413.3, 300 sec: 3637.8). Total num frames: 2998272. Throughput: 0: 823.8. Samples: 750156. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:57:05,081][11028] Avg episode reward: [(0, '18.943')] +[2023-02-28 15:57:10,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3610.0). Total num frames: 3010560. Throughput: 0: 818.9. Samples: 752370. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:57:10,078][11028] Avg episode reward: [(0, '18.384')] +[2023-02-28 15:57:15,062][11230] Updated weights for policy 0, policy_version 740 (0.0014) +[2023-02-28 15:57:15,075][11028] Fps is (10 sec: 3277.2, 60 sec: 3413.3, 300 sec: 3623.9). Total num frames: 3031040. Throughput: 0: 827.1. Samples: 756982. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:57:15,080][11028] Avg episode reward: [(0, '19.359')] +[2023-02-28 15:57:20,075][11028] Fps is (10 sec: 4096.1, 60 sec: 3413.3, 300 sec: 3637.8). Total num frames: 3051520. Throughput: 0: 847.1. Samples: 763728. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:57:20,081][11028] Avg episode reward: [(0, '20.376')] +[2023-02-28 15:57:20,093][11217] Saving new best policy, reward=20.376! +[2023-02-28 15:57:24,642][11230] Updated weights for policy 0, policy_version 750 (0.0014) +[2023-02-28 15:57:25,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3637.8). Total num frames: 3072000. Throughput: 0: 845.6. Samples: 767080. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:57:25,078][11028] Avg episode reward: [(0, '22.173')] +[2023-02-28 15:57:25,087][11217] Saving new best policy, reward=22.173! +[2023-02-28 15:57:30,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3413.4, 300 sec: 3610.0). Total num frames: 3084288. Throughput: 0: 812.7. Samples: 771366. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:57:30,079][11028] Avg episode reward: [(0, '21.494')] +[2023-02-28 15:57:35,075][11028] Fps is (10 sec: 2867.3, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 3100672. Throughput: 0: 830.0. Samples: 776270. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:57:35,077][11028] Avg episode reward: [(0, '21.814')] +[2023-02-28 15:57:37,108][11230] Updated weights for policy 0, policy_version 760 (0.0015) +[2023-02-28 15:57:40,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3637.8). Total num frames: 3125248. Throughput: 0: 869.6. Samples: 779796. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:57:40,078][11028] Avg episode reward: [(0, '21.806')] +[2023-02-28 15:57:45,075][11028] Fps is (10 sec: 4505.4, 60 sec: 3481.6, 300 sec: 3637.8). Total num frames: 3145728. Throughput: 0: 929.3. Samples: 786666. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:57:45,078][11028] Avg episode reward: [(0, '20.597')] +[2023-02-28 15:57:47,245][11230] Updated weights for policy 0, policy_version 770 (0.0032) +[2023-02-28 15:57:50,079][11028] Fps is (10 sec: 3275.7, 60 sec: 3413.2, 300 sec: 3610.0). Total num frames: 3158016. Throughput: 0: 906.3. Samples: 790942. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:57:50,084][11028] Avg episode reward: [(0, '21.074')] +[2023-02-28 15:57:55,075][11028] Fps is (10 sec: 2867.3, 60 sec: 3481.6, 300 sec: 3610.1). Total num frames: 3174400. Throughput: 0: 905.6. Samples: 793120. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:57:55,084][11028] Avg episode reward: [(0, '20.474')] +[2023-02-28 15:57:58,904][11230] Updated weights for policy 0, policy_version 780 (0.0018) +[2023-02-28 15:58:00,075][11028] Fps is (10 sec: 4097.4, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3198976. Throughput: 0: 943.2. Samples: 799426. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:58:00,083][11028] Avg episode reward: [(0, '21.947')] +[2023-02-28 15:58:05,081][11028] Fps is (10 sec: 4503.1, 60 sec: 3686.1, 300 sec: 3651.6). Total num frames: 3219456. Throughput: 0: 933.4. Samples: 805738. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:58:05,083][11028] Avg episode reward: [(0, '21.589')] +[2023-02-28 15:58:10,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 3231744. Throughput: 0: 905.2. Samples: 807816. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:58:10,082][11028] Avg episode reward: [(0, '22.278')] +[2023-02-28 15:58:10,094][11217] Saving new best policy, reward=22.278! +[2023-02-28 15:58:10,454][11230] Updated weights for policy 0, policy_version 790 (0.0012) +[2023-02-28 15:58:15,080][11028] Fps is (10 sec: 2867.3, 60 sec: 3617.8, 300 sec: 3610.0). Total num frames: 3248128. Throughput: 0: 905.1. Samples: 812098. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:58:15,082][11028] Avg episode reward: [(0, '22.015')] +[2023-02-28 15:58:20,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3272704. Throughput: 0: 942.0. Samples: 818658. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:58:20,078][11028] Avg episode reward: [(0, '21.924')] +[2023-02-28 15:58:20,874][11230] Updated weights for policy 0, policy_version 800 (0.0022) +[2023-02-28 15:58:25,076][11028] Fps is (10 sec: 4507.3, 60 sec: 3686.3, 300 sec: 3651.7). Total num frames: 3293184. Throughput: 0: 941.4. Samples: 822158. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 15:58:25,082][11028] Avg episode reward: [(0, '22.042')] +[2023-02-28 15:58:30,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3623.9). Total num frames: 3309568. Throughput: 0: 900.7. Samples: 827196. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:58:30,081][11028] Avg episode reward: [(0, '21.591')] +[2023-02-28 15:58:30,096][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000808_3309568.pth... +[2023-02-28 15:58:30,271][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000595_2437120.pth +[2023-02-28 15:58:32,812][11230] Updated weights for policy 0, policy_version 810 (0.0012) +[2023-02-28 15:58:35,075][11028] Fps is (10 sec: 2867.6, 60 sec: 3686.4, 300 sec: 3610.1). Total num frames: 3321856. Throughput: 0: 902.4. Samples: 831548. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:58:35,078][11028] Avg episode reward: [(0, '21.774')] +[2023-02-28 15:58:40,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3346432. Throughput: 0: 931.0. Samples: 835014. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:58:40,078][11028] Avg episode reward: [(0, '21.136')] +[2023-02-28 15:58:42,569][11230] Updated weights for policy 0, policy_version 820 (0.0017) +[2023-02-28 15:58:45,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3366912. Throughput: 0: 940.2. Samples: 841734. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 15:58:45,081][11028] Avg episode reward: [(0, '20.347')] +[2023-02-28 15:58:50,077][11028] Fps is (10 sec: 3685.8, 60 sec: 3754.8, 300 sec: 3623.9). Total num frames: 3383296. Throughput: 0: 905.1. Samples: 846464. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:58:50,079][11028] Avg episode reward: [(0, '19.532')] +[2023-02-28 15:58:55,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 3395584. Throughput: 0: 906.6. Samples: 848612. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:58:55,080][11028] Avg episode reward: [(0, '19.507')] +[2023-02-28 15:58:55,583][11230] Updated weights for policy 0, policy_version 830 (0.0016) +[2023-02-28 15:59:00,075][11028] Fps is (10 sec: 3277.3, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 3416064. Throughput: 0: 939.4. Samples: 854366. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:59:00,080][11028] Avg episode reward: [(0, '20.377')] +[2023-02-28 15:59:04,619][11230] Updated weights for policy 0, policy_version 840 (0.0017) +[2023-02-28 15:59:05,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3686.7, 300 sec: 3637.8). Total num frames: 3440640. Throughput: 0: 946.2. Samples: 861238. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 15:59:05,078][11028] Avg episode reward: [(0, '20.965')] +[2023-02-28 15:59:10,077][11028] Fps is (10 sec: 4095.3, 60 sec: 3754.6, 300 sec: 3623.9). Total num frames: 3457024. Throughput: 0: 921.9. Samples: 863646. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:59:10,080][11028] Avg episode reward: [(0, '19.822')] +[2023-02-28 15:59:15,078][11028] Fps is (10 sec: 2866.5, 60 sec: 3686.6, 300 sec: 3610.0). Total num frames: 3469312. Throughput: 0: 906.5. Samples: 867990. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:59:15,086][11028] Avg episode reward: [(0, '22.136')] +[2023-02-28 15:59:17,408][11230] Updated weights for policy 0, policy_version 850 (0.0017) +[2023-02-28 15:59:20,075][11028] Fps is (10 sec: 3277.3, 60 sec: 3618.1, 300 sec: 3624.0). Total num frames: 3489792. Throughput: 0: 942.8. Samples: 873972. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:59:20,078][11028] Avg episode reward: [(0, '23.026')] +[2023-02-28 15:59:20,095][11217] Saving new best policy, reward=23.026! +[2023-02-28 15:59:25,075][11028] Fps is (10 sec: 4506.7, 60 sec: 3686.5, 300 sec: 3651.7). Total num frames: 3514368. Throughput: 0: 941.7. Samples: 877392. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0) +[2023-02-28 15:59:25,077][11028] Avg episode reward: [(0, '23.522')] +[2023-02-28 15:59:25,083][11217] Saving new best policy, reward=23.522! +[2023-02-28 15:59:26,707][11230] Updated weights for policy 0, policy_version 860 (0.0021) +[2023-02-28 15:59:30,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 3530752. Throughput: 0: 915.1. Samples: 882912. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 15:59:30,082][11028] Avg episode reward: [(0, '23.820')] +[2023-02-28 15:59:30,097][11217] Saving new best policy, reward=23.820! +[2023-02-28 15:59:35,075][11028] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 3543040. Throughput: 0: 904.6. Samples: 887168. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-28 15:59:35,078][11028] Avg episode reward: [(0, '23.374')] +[2023-02-28 15:59:39,171][11230] Updated weights for policy 0, policy_version 870 (0.0019) +[2023-02-28 15:59:40,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3567616. Throughput: 0: 922.6. Samples: 890128. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:59:40,078][11028] Avg episode reward: [(0, '24.945')] +[2023-02-28 15:59:40,090][11217] Saving new best policy, reward=24.945! +[2023-02-28 15:59:45,075][11028] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3588096. Throughput: 0: 944.0. Samples: 896846. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-28 15:59:45,085][11028] Avg episode reward: [(0, '25.451')] +[2023-02-28 15:59:45,087][11217] Saving new best policy, reward=25.451! +[2023-02-28 15:59:49,240][11230] Updated weights for policy 0, policy_version 880 (0.0027) +[2023-02-28 15:59:50,077][11028] Fps is (10 sec: 3685.5, 60 sec: 3686.3, 300 sec: 3623.9). Total num frames: 3604480. Throughput: 0: 906.7. Samples: 902042. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 15:59:50,080][11028] Avg episode reward: [(0, '23.852')] +[2023-02-28 15:59:55,079][11028] Fps is (10 sec: 2866.1, 60 sec: 3686.2, 300 sec: 3596.1). Total num frames: 3616768. Throughput: 0: 899.6. Samples: 904128. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 15:59:55,087][11028] Avg episode reward: [(0, '23.875')] +[2023-02-28 16:00:00,077][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.3, 300 sec: 3623.9). Total num frames: 3637248. Throughput: 0: 921.3. Samples: 909450. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 16:00:00,079][11028] Avg episode reward: [(0, '23.750')] +[2023-02-28 16:00:01,044][11230] Updated weights for policy 0, policy_version 890 (0.0016) +[2023-02-28 16:00:05,076][11028] Fps is (10 sec: 4507.2, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3661824. Throughput: 0: 940.9. Samples: 916312. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-28 16:00:05,080][11028] Avg episode reward: [(0, '22.588')] +[2023-02-28 16:00:10,075][11028] Fps is (10 sec: 4097.0, 60 sec: 3686.5, 300 sec: 3637.8). Total num frames: 3678208. Throughput: 0: 925.1. Samples: 919022. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-28 16:00:10,077][11028] Avg episode reward: [(0, '21.334')] +[2023-02-28 16:00:12,403][11230] Updated weights for policy 0, policy_version 900 (0.0029) +[2023-02-28 16:00:15,077][11028] Fps is (10 sec: 2866.9, 60 sec: 3686.5, 300 sec: 3596.1). Total num frames: 3690496. Throughput: 0: 896.5. Samples: 923254. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 16:00:15,081][11028] Avg episode reward: [(0, '21.474')] +[2023-02-28 16:00:20,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 3710976. Throughput: 0: 926.8. Samples: 928874. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 16:00:20,077][11028] Avg episode reward: [(0, '20.988')] +[2023-02-28 16:00:22,846][11230] Updated weights for policy 0, policy_version 910 (0.0028) +[2023-02-28 16:00:25,075][11028] Fps is (10 sec: 4506.2, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3735552. Throughput: 0: 940.3. Samples: 932440. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 16:00:25,083][11028] Avg episode reward: [(0, '20.505')] +[2023-02-28 16:00:30,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3751936. Throughput: 0: 926.1. Samples: 938522. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 16:00:30,085][11028] Avg episode reward: [(0, '20.526')] +[2023-02-28 16:00:30,101][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000916_3751936.pth... +[2023-02-28 16:00:30,260][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000703_2879488.pth +[2023-02-28 16:00:34,706][11230] Updated weights for policy 0, policy_version 920 (0.0025) +[2023-02-28 16:00:35,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3610.0). Total num frames: 3768320. Throughput: 0: 903.6. Samples: 942702. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 16:00:35,078][11028] Avg episode reward: [(0, '21.453')] +[2023-02-28 16:00:40,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3610.0). Total num frames: 3784704. Throughput: 0: 914.1. Samples: 945258. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 16:00:40,078][11028] Avg episode reward: [(0, '20.953')] +[2023-02-28 16:00:44,566][11230] Updated weights for policy 0, policy_version 930 (0.0012) +[2023-02-28 16:00:45,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3809280. Throughput: 0: 949.7. Samples: 952182. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-28 16:00:45,077][11028] Avg episode reward: [(0, '22.913')] +[2023-02-28 16:00:50,078][11028] Fps is (10 sec: 4504.5, 60 sec: 3754.7, 300 sec: 3651.7). Total num frames: 3829760. Throughput: 0: 922.2. Samples: 957814. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 16:00:50,081][11028] Avg episode reward: [(0, '22.317')] +[2023-02-28 16:00:55,076][11028] Fps is (10 sec: 3276.7, 60 sec: 3754.9, 300 sec: 3610.0). Total num frames: 3842048. Throughput: 0: 909.8. Samples: 959964. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 16:00:55,085][11028] Avg episode reward: [(0, '20.630')] +[2023-02-28 16:00:57,432][11230] Updated weights for policy 0, policy_version 940 (0.0034) +[2023-02-28 16:01:00,075][11028] Fps is (10 sec: 2867.9, 60 sec: 3686.5, 300 sec: 3610.0). Total num frames: 3858432. Throughput: 0: 924.6. Samples: 964858. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 16:01:00,078][11028] Avg episode reward: [(0, '21.287')] +[2023-02-28 16:01:05,075][11028] Fps is (10 sec: 4096.2, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3883008. Throughput: 0: 943.8. Samples: 971346. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 16:01:05,078][11028] Avg episode reward: [(0, '21.355')] +[2023-02-28 16:01:06,720][11230] Updated weights for policy 0, policy_version 950 (0.0017) +[2023-02-28 16:01:10,075][11028] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3899392. Throughput: 0: 934.5. Samples: 974494. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-28 16:01:10,083][11028] Avg episode reward: [(0, '21.814')] +[2023-02-28 16:01:15,077][11028] Fps is (10 sec: 3276.3, 60 sec: 3754.7, 300 sec: 3623.9). Total num frames: 3915776. Throughput: 0: 894.2. Samples: 978760. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 16:01:15,080][11028] Avg episode reward: [(0, '20.759')] +[2023-02-28 16:01:19,678][11230] Updated weights for policy 0, policy_version 960 (0.0013) +[2023-02-28 16:01:20,075][11028] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 3932160. Throughput: 0: 915.0. Samples: 983878. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-28 16:01:20,078][11028] Avg episode reward: [(0, '22.124')] +[2023-02-28 16:01:25,075][11028] Fps is (10 sec: 3686.9, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 3952640. Throughput: 0: 931.1. Samples: 987156. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 16:01:25,082][11028] Avg episode reward: [(0, '23.933')] +[2023-02-28 16:01:29,279][11230] Updated weights for policy 0, policy_version 970 (0.0016) +[2023-02-28 16:01:30,081][11028] Fps is (10 sec: 4093.8, 60 sec: 3686.1, 300 sec: 3637.7). Total num frames: 3973120. Throughput: 0: 918.4. Samples: 993516. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 16:01:30,083][11028] Avg episode reward: [(0, '23.391')] +[2023-02-28 16:01:35,075][11028] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 3989504. Throughput: 0: 890.0. Samples: 997864. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-28 16:01:35,078][11028] Avg episode reward: [(0, '23.083')] +[2023-02-28 16:01:39,685][11217] Stopping Batcher_0... +[2023-02-28 16:01:39,694][11217] Loop batcher_evt_loop terminating... +[2023-02-28 16:01:39,687][11028] Component Batcher_0 stopped! +[2023-02-28 16:01:39,696][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth... +[2023-02-28 16:01:39,736][11230] Weights refcount: 2 0 +[2023-02-28 16:01:39,748][11230] Stopping InferenceWorker_p0-w0... +[2023-02-28 16:01:39,745][11028] Component RolloutWorker_w3 stopped! +[2023-02-28 16:01:39,752][11230] Loop inference_proc0-0_evt_loop terminating... +[2023-02-28 16:01:39,750][11028] Component InferenceWorker_p0-w0 stopped! +[2023-02-28 16:01:39,745][11235] Stopping RolloutWorker_w3... +[2023-02-28 16:01:39,765][11235] Loop rollout_proc3_evt_loop terminating... +[2023-02-28 16:01:39,778][11243] Stopping RolloutWorker_w7... +[2023-02-28 16:01:39,777][11028] Component RolloutWorker_w6 stopped! +[2023-02-28 16:01:39,779][11241] Stopping RolloutWorker_w5... +[2023-02-28 16:01:39,779][11232] Stopping RolloutWorker_w1... +[2023-02-28 16:01:39,782][11028] Component RolloutWorker_w7 stopped! +[2023-02-28 16:01:39,786][11028] Component RolloutWorker_w5 stopped! +[2023-02-28 16:01:39,787][11028] Component RolloutWorker_w1 stopped! +[2023-02-28 16:01:39,793][11242] Stopping RolloutWorker_w6... +[2023-02-28 16:01:39,781][11243] Loop rollout_proc7_evt_loop terminating... +[2023-02-28 16:01:39,784][11241] Loop rollout_proc5_evt_loop terminating... +[2023-02-28 16:01:39,785][11232] Loop rollout_proc1_evt_loop terminating... +[2023-02-28 16:01:39,803][11028] Component RolloutWorker_w2 stopped! +[2023-02-28 16:01:39,808][11234] Stopping RolloutWorker_w2... +[2023-02-28 16:01:39,793][11242] Loop rollout_proc6_evt_loop terminating... +[2023-02-28 16:01:39,815][11028] Component RolloutWorker_w4 stopped! +[2023-02-28 16:01:39,818][11239] Stopping RolloutWorker_w4... +[2023-02-28 16:01:39,821][11239] Loop rollout_proc4_evt_loop terminating... +[2023-02-28 16:01:39,822][11234] Loop rollout_proc2_evt_loop terminating... +[2023-02-28 16:01:39,837][11028] Component RolloutWorker_w0 stopped! +[2023-02-28 16:01:39,841][11231] Stopping RolloutWorker_w0... +[2023-02-28 16:01:39,847][11231] Loop rollout_proc0_evt_loop terminating... +[2023-02-28 16:01:39,877][11217] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000808_3309568.pth +[2023-02-28 16:01:39,885][11217] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth... +[2023-02-28 16:01:40,100][11217] Stopping LearnerWorker_p0... +[2023-02-28 16:01:40,101][11217] Loop learner_proc0_evt_loop terminating... +[2023-02-28 16:01:40,100][11028] Component LearnerWorker_p0 stopped! +[2023-02-28 16:01:40,106][11028] Waiting for process learner_proc0 to stop... +[2023-02-28 16:01:41,949][11028] Waiting for process inference_proc0-0 to join... +[2023-02-28 16:01:42,229][11028] Waiting for process rollout_proc0 to join... +[2023-02-28 16:01:42,235][11028] Waiting for process rollout_proc1 to join... +[2023-02-28 16:01:42,938][11028] Waiting for process rollout_proc2 to join... +[2023-02-28 16:01:42,939][11028] Waiting for process rollout_proc3 to join... +[2023-02-28 16:01:42,941][11028] Waiting for process rollout_proc4 to join... +[2023-02-28 16:01:42,942][11028] Waiting for process rollout_proc5 to join... +[2023-02-28 16:01:42,943][11028] Waiting for process rollout_proc6 to join... +[2023-02-28 16:01:42,944][11028] Waiting for process rollout_proc7 to join... +[2023-02-28 16:01:42,945][11028] Batcher 0 profile tree view: +batching: 25.6160, releasing_batches: 0.0244 +[2023-02-28 16:01:42,947][11028] InferenceWorker_p0-w0 profile tree view: +wait_policy: 0.0000 + wait_policy_total: 536.9757 +update_model: 8.0150 + weight_update: 0.0026 +one_step: 0.0106 + handle_policy_step: 535.6958 + deserialize: 15.0421, stack: 2.9491, obs_to_device_normalize: 115.8096, forward: 261.3291, send_messages: 26.1026 + prepare_outputs: 87.6382 + to_cpu: 55.1796 +[2023-02-28 16:01:42,948][11028] Learner 0 profile tree view: +misc: 0.0055, prepare_batch: 16.2617 +train: 76.6077 + epoch_init: 0.0083, minibatch_init: 0.0109, losses_postprocess: 0.5747, kl_divergence: 0.6282, after_optimizer: 33.1540 + calculate_losses: 27.1419 + losses_init: 0.0047, forward_head: 1.7557, bptt_initial: 17.9712, tail: 1.1279, advantages_returns: 0.3304, losses: 3.3024 + bptt: 2.3486 + bptt_forward_core: 2.2612 + update: 14.4677 + clip: 1.3797 +[2023-02-28 16:01:42,949][11028] RolloutWorker_w0 profile tree view: +wait_for_trajectories: 0.4108, enqueue_policy_requests: 145.1765, env_step: 843.8149, overhead: 22.4390, complete_rollouts: 7.1154 +save_policy_outputs: 20.5511 + split_output_tensors: 10.2239 +[2023-02-28 16:01:42,951][11028] RolloutWorker_w7 profile tree view: +wait_for_trajectories: 0.3180, enqueue_policy_requests: 147.7048, env_step: 841.0940, overhead: 22.0080, complete_rollouts: 7.8867 +save_policy_outputs: 20.3061 + split_output_tensors: 9.9457 +[2023-02-28 16:01:42,952][11028] Loop Runner_EvtLoop terminating... +[2023-02-28 16:01:42,954][11028] Runner profile tree view: +main_loop: 1147.8341 +[2023-02-28 16:01:42,955][11028] Collected {0: 4005888}, FPS: 3490.0 +[2023-02-28 16:02:10,307][11028] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-02-28 16:02:10,310][11028] Overriding arg 'num_workers' with value 1 passed from command line +[2023-02-28 16:02:10,312][11028] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-02-28 16:02:10,315][11028] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-02-28 16:02:10,316][11028] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-02-28 16:02:10,319][11028] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-02-28 16:02:10,321][11028] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file! +[2023-02-28 16:02:10,322][11028] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-02-28 16:02:10,323][11028] Adding new argument 'push_to_hub'=False that is not in the saved config file! +[2023-02-28 16:02:10,325][11028] Adding new argument 'hf_repository'=None that is not in the saved config file! +[2023-02-28 16:02:10,329][11028] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-02-28 16:02:10,330][11028] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-02-28 16:02:10,332][11028] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-02-28 16:02:10,333][11028] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-02-28 16:02:10,334][11028] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-02-28 16:02:10,359][11028] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-28 16:02:10,361][11028] RunningMeanStd input shape: (3, 72, 128) +[2023-02-28 16:02:10,366][11028] RunningMeanStd input shape: (1,) +[2023-02-28 16:02:10,382][11028] ConvEncoder: input_channels=3 +[2023-02-28 16:02:11,028][11028] Conv encoder output size: 512 +[2023-02-28 16:02:11,031][11028] Policy head output size: 512 +[2023-02-28 16:02:13,597][11028] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth... +[2023-02-28 16:02:15,393][11028] Num frames 100... +[2023-02-28 16:02:15,552][11028] Num frames 200... +[2023-02-28 16:02:15,716][11028] Num frames 300... +[2023-02-28 16:02:15,883][11028] Num frames 400... +[2023-02-28 16:02:16,029][11028] Avg episode rewards: #0: 8.560, true rewards: #0: 4.560 +[2023-02-28 16:02:16,032][11028] Avg episode reward: 8.560, avg true_objective: 4.560 +[2023-02-28 16:02:16,104][11028] Num frames 500... +[2023-02-28 16:02:16,230][11028] Num frames 600... +[2023-02-28 16:02:16,349][11028] Num frames 700... +[2023-02-28 16:02:16,457][11028] Num frames 800... +[2023-02-28 16:02:16,567][11028] Num frames 900... +[2023-02-28 16:02:16,681][11028] Num frames 1000... +[2023-02-28 16:02:16,790][11028] Num frames 1100... +[2023-02-28 16:02:16,904][11028] Num frames 1200... +[2023-02-28 16:02:17,016][11028] Num frames 1300... +[2023-02-28 16:02:17,129][11028] Num frames 1400... +[2023-02-28 16:02:17,239][11028] Avg episode rewards: #0: 15.240, true rewards: #0: 7.240 +[2023-02-28 16:02:17,245][11028] Avg episode reward: 15.240, avg true_objective: 7.240 +[2023-02-28 16:02:17,308][11028] Num frames 1500... +[2023-02-28 16:02:17,421][11028] Num frames 1600... +[2023-02-28 16:02:17,536][11028] Num frames 1700... +[2023-02-28 16:02:17,650][11028] Num frames 1800... +[2023-02-28 16:02:17,763][11028] Num frames 1900... +[2023-02-28 16:02:17,919][11028] Avg episode rewards: #0: 12.640, true rewards: #0: 6.640 +[2023-02-28 16:02:17,927][11028] Avg episode reward: 12.640, avg true_objective: 6.640 +[2023-02-28 16:02:17,940][11028] Num frames 2000... +[2023-02-28 16:02:18,052][11028] Num frames 2100... +[2023-02-28 16:02:18,163][11028] Num frames 2200... +[2023-02-28 16:02:18,274][11028] Num frames 2300... +[2023-02-28 16:02:18,394][11028] Num frames 2400... +[2023-02-28 16:02:18,503][11028] Num frames 2500... +[2023-02-28 16:02:18,614][11028] Num frames 2600... +[2023-02-28 16:02:18,792][11028] Avg episode rewards: #0: 12.490, true rewards: #0: 6.740 +[2023-02-28 16:02:18,795][11028] Avg episode reward: 12.490, avg true_objective: 6.740 +[2023-02-28 16:02:18,803][11028] Num frames 2700... +[2023-02-28 16:02:18,937][11028] Num frames 2800... +[2023-02-28 16:02:19,048][11028] Num frames 2900... +[2023-02-28 16:02:19,157][11028] Num frames 3000... +[2023-02-28 16:02:19,265][11028] Num frames 3100... +[2023-02-28 16:02:19,387][11028] Num frames 3200... +[2023-02-28 16:02:19,522][11028] Avg episode rewards: #0: 11.946, true rewards: #0: 6.546 +[2023-02-28 16:02:19,524][11028] Avg episode reward: 11.946, avg true_objective: 6.546 +[2023-02-28 16:02:19,558][11028] Num frames 3300... +[2023-02-28 16:02:19,668][11028] Num frames 3400... +[2023-02-28 16:02:19,788][11028] Num frames 3500... +[2023-02-28 16:02:19,903][11028] Num frames 3600... +[2023-02-28 16:02:20,015][11028] Num frames 3700... +[2023-02-28 16:02:20,127][11028] Num frames 3800... +[2023-02-28 16:02:20,240][11028] Num frames 3900... +[2023-02-28 16:02:20,355][11028] Num frames 4000... +[2023-02-28 16:02:20,472][11028] Num frames 4100... +[2023-02-28 16:02:20,582][11028] Num frames 4200... +[2023-02-28 16:02:20,693][11028] Num frames 4300... +[2023-02-28 16:02:20,806][11028] Num frames 4400... +[2023-02-28 16:02:20,918][11028] Num frames 4500... +[2023-02-28 16:02:20,988][11028] Avg episode rewards: #0: 14.352, true rewards: #0: 7.518 +[2023-02-28 16:02:20,990][11028] Avg episode reward: 14.352, avg true_objective: 7.518 +[2023-02-28 16:02:21,096][11028] Num frames 4600... +[2023-02-28 16:02:21,205][11028] Num frames 4700... +[2023-02-28 16:02:21,321][11028] Num frames 4800... +[2023-02-28 16:02:21,437][11028] Num frames 4900... +[2023-02-28 16:02:21,548][11028] Num frames 5000... +[2023-02-28 16:02:21,662][11028] Num frames 5100... +[2023-02-28 16:02:21,783][11028] Num frames 5200... +[2023-02-28 16:02:21,895][11028] Num frames 5300... +[2023-02-28 16:02:22,007][11028] Num frames 5400... +[2023-02-28 16:02:22,119][11028] Num frames 5500... +[2023-02-28 16:02:22,231][11028] Num frames 5600... +[2023-02-28 16:02:22,310][11028] Avg episode rewards: #0: 16.173, true rewards: #0: 8.030 +[2023-02-28 16:02:22,314][11028] Avg episode reward: 16.173, avg true_objective: 8.030 +[2023-02-28 16:02:22,408][11028] Num frames 5700... +[2023-02-28 16:02:22,529][11028] Num frames 5800... +[2023-02-28 16:02:22,639][11028] Num frames 5900... +[2023-02-28 16:02:22,751][11028] Num frames 6000... +[2023-02-28 16:02:22,869][11028] Num frames 6100... +[2023-02-28 16:02:22,989][11028] Num frames 6200... +[2023-02-28 16:02:23,106][11028] Num frames 6300... +[2023-02-28 16:02:23,214][11028] Num frames 6400... +[2023-02-28 16:02:23,324][11028] Num frames 6500... +[2023-02-28 16:02:23,433][11028] Num frames 6600... +[2023-02-28 16:02:23,563][11028] Num frames 6700... +[2023-02-28 16:02:23,671][11028] Avg episode rewards: #0: 17.051, true rewards: #0: 8.426 +[2023-02-28 16:02:23,673][11028] Avg episode reward: 17.051, avg true_objective: 8.426 +[2023-02-28 16:02:23,743][11028] Num frames 6800... +[2023-02-28 16:02:23,855][11028] Num frames 6900... +[2023-02-28 16:02:23,975][11028] Num frames 7000... +[2023-02-28 16:02:24,096][11028] Num frames 7100... +[2023-02-28 16:02:24,207][11028] Num frames 7200... +[2023-02-28 16:02:24,324][11028] Num frames 7300... +[2023-02-28 16:02:24,442][11028] Num frames 7400... +[2023-02-28 16:02:24,561][11028] Num frames 7500... +[2023-02-28 16:02:24,674][11028] Num frames 7600... +[2023-02-28 16:02:24,787][11028] Num frames 7700... +[2023-02-28 16:02:24,908][11028] Num frames 7800... +[2023-02-28 16:02:25,020][11028] Num frames 7900... +[2023-02-28 16:02:25,141][11028] Num frames 8000... +[2023-02-28 16:02:25,255][11028] Num frames 8100... +[2023-02-28 16:02:25,372][11028] Num frames 8200... +[2023-02-28 16:02:25,488][11028] Num frames 8300... +[2023-02-28 16:02:25,599][11028] Num frames 8400... +[2023-02-28 16:02:25,746][11028] Avg episode rewards: #0: 20.315, true rewards: #0: 9.426 +[2023-02-28 16:02:25,748][11028] Avg episode reward: 20.315, avg true_objective: 9.426 +[2023-02-28 16:02:25,774][11028] Num frames 8500... +[2023-02-28 16:02:25,894][11028] Num frames 8600... +[2023-02-28 16:02:26,007][11028] Num frames 8700... +[2023-02-28 16:02:26,123][11028] Num frames 8800... +[2023-02-28 16:02:26,279][11028] Num frames 8900... +[2023-02-28 16:02:26,435][11028] Num frames 9000... +[2023-02-28 16:02:26,590][11028] Num frames 9100... +[2023-02-28 16:02:26,754][11028] Num frames 9200... +[2023-02-28 16:02:26,919][11028] Num frames 9300... +[2023-02-28 16:02:27,083][11028] Num frames 9400... +[2023-02-28 16:02:27,241][11028] Num frames 9500... +[2023-02-28 16:02:27,399][11028] Num frames 9600... +[2023-02-28 16:02:27,558][11028] Num frames 9700... +[2023-02-28 16:02:27,767][11028] Avg episode rewards: #0: 21.296, true rewards: #0: 9.796 +[2023-02-28 16:02:27,774][11028] Avg episode reward: 21.296, avg true_objective: 9.796 +[2023-02-28 16:03:30,667][11028] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2023-02-28 16:13:30,731][11028] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-02-28 16:13:30,735][11028] Overriding arg 'num_workers' with value 1 passed from command line +[2023-02-28 16:13:30,737][11028] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-02-28 16:13:30,740][11028] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-02-28 16:13:30,743][11028] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-02-28 16:13:30,746][11028] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-02-28 16:13:30,750][11028] Adding new argument 'max_num_frames'=100000 that is not in the saved config file! +[2023-02-28 16:13:30,752][11028] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-02-28 16:13:30,754][11028] Adding new argument 'push_to_hub'=True that is not in the saved config file! +[2023-02-28 16:13:30,756][11028] Adding new argument 'hf_repository'='bonadio/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file! +[2023-02-28 16:13:30,757][11028] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-02-28 16:13:30,759][11028] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-02-28 16:13:30,761][11028] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-02-28 16:13:30,763][11028] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-02-28 16:13:30,764][11028] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-02-28 16:13:30,799][11028] RunningMeanStd input shape: (3, 72, 128) +[2023-02-28 16:13:30,803][11028] RunningMeanStd input shape: (1,) +[2023-02-28 16:13:30,819][11028] ConvEncoder: input_channels=3 +[2023-02-28 16:13:30,860][11028] Conv encoder output size: 512 +[2023-02-28 16:13:30,861][11028] Policy head output size: 512 +[2023-02-28 16:13:30,886][11028] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth... +[2023-02-28 16:13:31,312][11028] Num frames 100... +[2023-02-28 16:13:31,438][11028] Num frames 200... +[2023-02-28 16:13:31,552][11028] Num frames 300... +[2023-02-28 16:13:31,668][11028] Num frames 400... +[2023-02-28 16:13:31,778][11028] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480 +[2023-02-28 16:13:31,785][11028] Avg episode reward: 5.480, avg true_objective: 4.480 +[2023-02-28 16:13:31,853][11028] Num frames 500... +[2023-02-28 16:13:31,972][11028] Num frames 600... +[2023-02-28 16:13:32,083][11028] Num frames 700... +[2023-02-28 16:13:32,198][11028] Num frames 800... +[2023-02-28 16:13:32,310][11028] Num frames 900... +[2023-02-28 16:13:32,388][11028] Avg episode rewards: #0: 6.095, true rewards: #0: 4.595 +[2023-02-28 16:13:32,390][11028] Avg episode reward: 6.095, avg true_objective: 4.595 +[2023-02-28 16:13:32,494][11028] Num frames 1000... +[2023-02-28 16:13:32,607][11028] Num frames 1100... +[2023-02-28 16:13:32,727][11028] Num frames 1200... +[2023-02-28 16:13:32,838][11028] Num frames 1300... +[2023-02-28 16:13:32,954][11028] Num frames 1400... +[2023-02-28 16:13:33,064][11028] Num frames 1500... +[2023-02-28 16:13:33,176][11028] Num frames 1600... +[2023-02-28 16:13:33,294][11028] Num frames 1700... +[2023-02-28 16:13:33,450][11028] Num frames 1800... +[2023-02-28 16:13:33,609][11028] Num frames 1900... +[2023-02-28 16:13:33,683][11028] Avg episode rewards: #0: 10.037, true rewards: #0: 6.370 +[2023-02-28 16:13:33,685][11028] Avg episode reward: 10.037, avg true_objective: 6.370 +[2023-02-28 16:13:33,820][11028] Num frames 2000... +[2023-02-28 16:13:33,979][11028] Num frames 2100... +[2023-02-28 16:13:34,132][11028] Num frames 2200... +[2023-02-28 16:13:34,444][11028] Num frames 2300... +[2023-02-28 16:13:34,880][11028] Num frames 2400... +[2023-02-28 16:13:35,260][11028] Num frames 2500... +[2023-02-28 16:13:35,471][11028] Avg episode rewards: #0: 10.878, true rewards: #0: 6.377 +[2023-02-28 16:13:35,473][11028] Avg episode reward: 10.878, avg true_objective: 6.377 +[2023-02-28 16:13:35,667][11028] Num frames 2600... +[2023-02-28 16:13:35,949][11028] Num frames 2700... +[2023-02-28 16:13:36,309][11028] Num frames 2800... +[2023-02-28 16:13:36,633][11028] Num frames 2900... +[2023-02-28 16:13:37,031][11028] Num frames 3000... +[2023-02-28 16:13:37,410][11028] Num frames 3100... +[2023-02-28 16:13:37,609][11028] Num frames 3200... +[2023-02-28 16:13:37,780][11028] Num frames 3300... +[2023-02-28 16:13:37,991][11028] Num frames 3400... +[2023-02-28 16:13:38,206][11028] Num frames 3500... +[2023-02-28 16:13:38,425][11028] Num frames 3600... +[2023-02-28 16:13:38,501][11028] Avg episode rewards: #0: 14.214, true rewards: #0: 7.214 +[2023-02-28 16:13:38,517][11028] Avg episode reward: 14.214, avg true_objective: 7.214 +[2023-02-28 16:13:38,714][11028] Num frames 3700... +[2023-02-28 16:13:38,939][11028] Num frames 3800... +[2023-02-28 16:13:39,224][11028] Num frames 3900... +[2023-02-28 16:13:39,364][11028] Num frames 4000... +[2023-02-28 16:13:39,472][11028] Num frames 4100... +[2023-02-28 16:13:39,583][11028] Num frames 4200... +[2023-02-28 16:13:39,710][11028] Num frames 4300... +[2023-02-28 16:13:39,821][11028] Num frames 4400... +[2023-02-28 16:13:39,939][11028] Num frames 4500... +[2023-02-28 16:13:40,051][11028] Num frames 4600... +[2023-02-28 16:13:40,164][11028] Num frames 4700... +[2023-02-28 16:13:40,292][11028] Avg episode rewards: #0: 15.932, true rewards: #0: 7.932 +[2023-02-28 16:13:40,294][11028] Avg episode reward: 15.932, avg true_objective: 7.932 +[2023-02-28 16:13:40,346][11028] Num frames 4800... +[2023-02-28 16:13:40,469][11028] Num frames 4900... +[2023-02-28 16:13:40,585][11028] Num frames 5000... +[2023-02-28 16:13:40,706][11028] Num frames 5100... +[2023-02-28 16:13:40,822][11028] Num frames 5200... +[2023-02-28 16:13:40,933][11028] Num frames 5300... +[2023-02-28 16:13:41,051][11028] Num frames 5400... +[2023-02-28 16:13:41,166][11028] Num frames 5500... +[2023-02-28 16:13:41,277][11028] Num frames 5600... +[2023-02-28 16:13:41,388][11028] Num frames 5700... +[2023-02-28 16:13:41,506][11028] Num frames 5800... +[2023-02-28 16:13:41,614][11028] Num frames 5900... +[2023-02-28 16:13:41,731][11028] Num frames 6000... +[2023-02-28 16:13:41,840][11028] Num frames 6100... +[2023-02-28 16:13:41,952][11028] Num frames 6200... +[2023-02-28 16:13:42,064][11028] Num frames 6300... +[2023-02-28 16:13:42,177][11028] Num frames 6400... +[2023-02-28 16:13:42,305][11028] Num frames 6500... +[2023-02-28 16:13:42,418][11028] Num frames 6600... +[2023-02-28 16:13:42,531][11028] Num frames 6700... +[2023-02-28 16:13:42,652][11028] Num frames 6800... +[2023-02-28 16:13:42,789][11028] Avg episode rewards: #0: 22.227, true rewards: #0: 9.799 +[2023-02-28 16:13:42,791][11028] Avg episode reward: 22.227, avg true_objective: 9.799 +[2023-02-28 16:13:42,842][11028] Num frames 6900... +[2023-02-28 16:13:42,952][11028] Num frames 7000... +[2023-02-28 16:13:43,061][11028] Num frames 7100... +[2023-02-28 16:13:43,170][11028] Num frames 7200... +[2023-02-28 16:13:43,279][11028] Num frames 7300... +[2023-02-28 16:13:43,343][11028] Avg episode rewards: #0: 20.134, true rewards: #0: 9.134 +[2023-02-28 16:13:43,346][11028] Avg episode reward: 20.134, avg true_objective: 9.134 +[2023-02-28 16:13:43,452][11028] Num frames 7400... +[2023-02-28 16:13:43,574][11028] Num frames 7500... +[2023-02-28 16:13:43,699][11028] Num frames 7600... +[2023-02-28 16:13:43,832][11028] Num frames 7700... +[2023-02-28 16:13:43,956][11028] Num frames 7800... +[2023-02-28 16:13:44,074][11028] Num frames 7900... +[2023-02-28 16:13:44,183][11028] Num frames 8000... +[2023-02-28 16:13:44,294][11028] Num frames 8100... +[2023-02-28 16:13:44,404][11028] Num frames 8200... +[2023-02-28 16:13:44,512][11028] Num frames 8300... +[2023-02-28 16:13:44,627][11028] Num frames 8400... +[2023-02-28 16:13:44,748][11028] Num frames 8500... +[2023-02-28 16:13:44,861][11028] Num frames 8600... +[2023-02-28 16:13:44,979][11028] Num frames 8700... +[2023-02-28 16:13:45,097][11028] Num frames 8800... +[2023-02-28 16:13:45,220][11028] Num frames 8900... +[2023-02-28 16:13:45,332][11028] Num frames 9000... +[2023-02-28 16:13:45,445][11028] Num frames 9100... +[2023-02-28 16:13:45,514][11028] Avg episode rewards: #0: 23.233, true rewards: #0: 10.122 +[2023-02-28 16:13:45,515][11028] Avg episode reward: 23.233, avg true_objective: 10.122 +[2023-02-28 16:13:45,620][11028] Num frames 9200... +[2023-02-28 16:13:45,730][11028] Num frames 9300... +[2023-02-28 16:13:45,846][11028] Num frames 9400... +[2023-02-28 16:13:45,958][11028] Num frames 9500... +[2023-02-28 16:13:46,077][11028] Avg episode rewards: #0: 21.458, true rewards: #0: 9.558 +[2023-02-28 16:13:46,082][11028] Avg episode reward: 21.458, avg true_objective: 9.558 +[2023-02-28 16:14:45,318][11028] Replay video saved to /content/train_dir/default_experiment/replay.mp4!