araffin commited on
Commit
1300478
1 Parent(s): 3e66064

Initial Commit

Browse files
Files changed (4) hide show
  1. README.md +2 -2
  2. dqn-MountainCar-v0.zip +1 -1
  3. dqn-MountainCar-v0/data +13 -13
  4. results.json +1 -1
README.md CHANGED
@@ -31,8 +31,8 @@ with hyperparameter optimization and pre-trained agents included.
31
 
32
  ## Usage (with SB3 RL Zoo)
33
 
34
- RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo
35
- SB3: https://github.com/DLR-RM/stable-baselines3
36
  SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
37
 
38
  ```
 
31
 
32
  ## Usage (with SB3 RL Zoo)
33
 
34
+ RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/>
35
+ SB3: https://github.com/DLR-RM/stable-baselines3<br/>
36
  SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
37
 
38
  ```
dqn-MountainCar-v0.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc7f7861b17461282de2b85694d9ca48e97a97c650513acf42021f70182f56db
3
  size 1103767
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f30b4f540218fe0745e201e2dc302049683a00c83af86c9a3ec4227ce7ae7174
3
  size 1103767
dqn-MountainCar-v0/data CHANGED
@@ -4,15 +4,15 @@
4
  ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCURRTlBvbGljeZSTlC4=",
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
- "__init__": "<function DQNPolicy.__init__ at 0x7f8b5764cb00>",
8
- "_build": "<function DQNPolicy._build at 0x7f8b5764cb90>",
9
- "make_q_net": "<function DQNPolicy.make_q_net at 0x7f8b5764cc20>",
10
- "forward": "<function DQNPolicy.forward at 0x7f8b5764ccb0>",
11
- "_predict": "<function DQNPolicy._predict at 0x7f8b5764cd40>",
12
- "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x7f8b5764cdd0>",
13
- "set_training_mode": "<function DQNPolicy.set_training_mode at 0x7f8b5764ce60>",
14
  "__abstractmethods__": "frozenset()",
15
- "_abc_impl": "<_abc_data object at 0x7f8b576444e0>"
16
  },
17
  "verbose": 1,
18
  "policy_kwargs": {
@@ -89,12 +89,12 @@
89
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
90
  "__module__": "stable_baselines3.common.buffers",
91
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device:\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
92
- "__init__": "<function ReplayBuffer.__init__ at 0x7f8b57abeef0>",
93
- "add": "<function ReplayBuffer.add at 0x7f8b57abef80>",
94
- "sample": "<function ReplayBuffer.sample at 0x7f8b57ab3680>",
95
- "_get_samples": "<function ReplayBuffer._get_samples at 0x7f8b57ab3710>",
96
  "__abstractmethods__": "frozenset()",
97
- "_abc_impl": "<_abc_data object at 0x7f8b57b1d480>"
98
  },
99
  "replay_buffer_kwargs": {},
100
  "train_freq": {
 
4
  ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCURRTlBvbGljeZSTlC4=",
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function DQNPolicy.__init__ at 0x7f16a6309b00>",
8
+ "_build": "<function DQNPolicy._build at 0x7f16a6309b90>",
9
+ "make_q_net": "<function DQNPolicy.make_q_net at 0x7f16a6309c20>",
10
+ "forward": "<function DQNPolicy.forward at 0x7f16a6309cb0>",
11
+ "_predict": "<function DQNPolicy._predict at 0x7f16a6309d40>",
12
+ "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x7f16a6309dd0>",
13
+ "set_training_mode": "<function DQNPolicy.set_training_mode at 0x7f16a6309e60>",
14
  "__abstractmethods__": "frozenset()",
15
+ "_abc_impl": "<_abc_data object at 0x7f16a63004e0>"
16
  },
17
  "verbose": 1,
18
  "policy_kwargs": {
 
89
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
90
  "__module__": "stable_baselines3.common.buffers",
91
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device:\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
92
+ "__init__": "<function ReplayBuffer.__init__ at 0x7f16a677bef0>",
93
+ "add": "<function ReplayBuffer.add at 0x7f16a677bf80>",
94
+ "sample": "<function ReplayBuffer.sample at 0x7f16a676f680>",
95
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7f16a676f710>",
96
  "__abstractmethods__": "frozenset()",
97
+ "_abc_impl": "<_abc_data object at 0x7f16a67da480>"
98
  },
99
  "replay_buffer_kwargs": {},
100
  "train_freq": {
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": -103.4, "std_reward": 7.4859869088851605, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2022-05-20T09:58:38.915474"}
 
1
+ {"mean_reward": -103.4, "std_reward": 7.4859869088851605, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2022-05-20T09:59:53.216097"}