Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
|
@@ -21,7 +21,7 @@ model-index:
|
|
| 21 |
type: OpenAI/Gym/Atari-SpaceInvadersNoFrameskip-v4
|
| 22 |
metrics:
|
| 23 |
- type: mean_reward
|
| 24 |
-
value:
|
| 25 |
name: mean_reward
|
| 26 |
---
|
| 27 |
|
|
@@ -60,23 +60,7 @@ python3 -u run.py
|
|
| 60 |
```
|
| 61 |
**run.py**
|
| 62 |
```python
|
| 63 |
-
|
| 64 |
-
from ding.config import Config
|
| 65 |
-
from easydict import EasyDict
|
| 66 |
-
import torch
|
| 67 |
-
|
| 68 |
-
# Pull model from files which are git cloned from huggingface
|
| 69 |
-
policy_state_dict = torch.load("pytorch_model.bin", map_location=torch.device("cpu"))
|
| 70 |
-
cfg = EasyDict(Config.file_to_dict("policy_config.py"))
|
| 71 |
-
# Instantiate the agent
|
| 72 |
-
agent = DQNAgent(
|
| 73 |
-
env="SpaceInvadersNoFrameskip", exp_name="SpaceInvadersNoFrameskip-v4-DQN", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
|
| 74 |
-
)
|
| 75 |
-
# Continue training
|
| 76 |
-
agent.train(step=5000)
|
| 77 |
-
# Render the new agent performance
|
| 78 |
-
agent.deploy(enable_save_replay=True)
|
| 79 |
-
|
| 80 |
```
|
| 81 |
</details>
|
| 82 |
|
|
@@ -91,20 +75,7 @@ python3 -u run.py
|
|
| 91 |
```
|
| 92 |
**run.py**
|
| 93 |
```python
|
| 94 |
-
|
| 95 |
-
from huggingface_ding import pull_model_from_hub
|
| 96 |
-
|
| 97 |
-
# Pull model from Hugggingface hub
|
| 98 |
-
policy_state_dict, cfg = pull_model_from_hub(repo_id="OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-DQN")
|
| 99 |
-
# Instantiate the agent
|
| 100 |
-
agent = DQNAgent(
|
| 101 |
-
env="SpaceInvadersNoFrameskip", exp_name="SpaceInvadersNoFrameskip-v4-DQN", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
|
| 102 |
-
)
|
| 103 |
-
# Continue training
|
| 104 |
-
agent.train(step=5000)
|
| 105 |
-
# Render the new agent performance
|
| 106 |
-
agent.deploy(enable_save_replay=True)
|
| 107 |
-
|
| 108 |
```
|
| 109 |
</details>
|
| 110 |
|
|
@@ -121,31 +92,7 @@ python3 -u train.py
|
|
| 121 |
```
|
| 122 |
**train.py**
|
| 123 |
```python
|
| 124 |
-
|
| 125 |
-
from huggingface_ding import push_model_to_hub
|
| 126 |
-
|
| 127 |
-
# Instantiate the agent
|
| 128 |
-
agent = DQNAgent(env="SpaceInvadersNoFrameskip", exp_name="SpaceInvadersNoFrameskip-v4-DQN")
|
| 129 |
-
# Train the agent
|
| 130 |
-
return_ = agent.train(step=int(20000000), collector_env_num=8, evaluator_env_num=8, debug=False)
|
| 131 |
-
print("-----wandb url is----:", return_.wandb_url)
|
| 132 |
-
# Push model to huggingface hub
|
| 133 |
-
push_model_to_hub(
|
| 134 |
-
agent=agent.best,
|
| 135 |
-
env_name="OpenAI/Gym/Atari",
|
| 136 |
-
task_name="SpaceInvadersNoFrameskip-v4",
|
| 137 |
-
algo_name="DQN",
|
| 138 |
-
wandb_url=return_.wandb_url,
|
| 139 |
-
github_repo_url="https://github.com/opendilab/DI-engine",
|
| 140 |
-
github_doc_model_url="https://di-engine-docs.readthedocs.io/en/latest/12_policies/dqn.html",
|
| 141 |
-
github_doc_env_url="https://di-engine-docs.readthedocs.io/en/latest/13_envs/atari.html",
|
| 142 |
-
installation_guide="pip3 install DI-engine[common_env]",
|
| 143 |
-
usage_file_by_git_clone="./dqn/spaceinvaders_dqn_deploy.py",
|
| 144 |
-
usage_file_by_huggingface_ding="./dqn/spaceinvaders_dqn_download.py",
|
| 145 |
-
train_file="./dqn/spaceinvaders_dqn.py",
|
| 146 |
-
repo_id="OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-DQN"
|
| 147 |
-
)
|
| 148 |
-
|
| 149 |
```
|
| 150 |
</details>
|
| 151 |
|
|
@@ -172,7 +119,8 @@ exp_config = {
|
|
| 172 |
'env_id': 'SpaceInvadersNoFrameskip-v4',
|
| 173 |
'collector_env_num': 8,
|
| 174 |
'evaluator_env_num': 8,
|
| 175 |
-
'fram_stack': 4
|
|
|
|
| 176 |
},
|
| 177 |
'policy': {
|
| 178 |
'model': {
|
|
@@ -217,6 +165,7 @@ exp_config = {
|
|
| 217 |
'render_freq': -1,
|
| 218 |
'mode': 'train_iter'
|
| 219 |
},
|
|
|
|
| 220 |
'cfg_type': 'InteractionSerialEvaluatorDict',
|
| 221 |
'stop_value': 2000,
|
| 222 |
'n_episode': 8
|
|
@@ -261,7 +210,7 @@ exp_config = {
|
|
| 261 |
|
| 262 |
**Training Procedure**
|
| 263 |
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 264 |
-
- **Weights & Biases (wandb):** [monitor link](https://wandb.ai/
|
| 265 |
|
| 266 |
## Model Information
|
| 267 |
<!-- Provide the basic links for the model. -->
|
|
@@ -271,13 +220,13 @@ exp_config = {
|
|
| 271 |
- **Demo:** [video](https://huggingface.co/OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-DQN/blob/main/replay.mp4)
|
| 272 |
<!-- Provide the size information for the model. -->
|
| 273 |
- **Parameters total size:** 55703.03 KB
|
| 274 |
-
- **Last Update Date:** 2023-
|
| 275 |
|
| 276 |
## Environments
|
| 277 |
<!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
|
| 278 |
- **Benchmark:** OpenAI/Gym/Atari
|
| 279 |
- **Task:** SpaceInvadersNoFrameskip-v4
|
| 280 |
- **Gym version:** 0.25.1
|
| 281 |
-
- **DI-engine version:** v0.4.
|
| 282 |
-
- **PyTorch version:**
|
| 283 |
- **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/atari.html)
|
|
|
|
| 21 |
type: OpenAI/Gym/Atari-SpaceInvadersNoFrameskip-v4
|
| 22 |
metrics:
|
| 23 |
- type: mean_reward
|
| 24 |
+
value: 1176.0 +/- 248.31
|
| 25 |
name: mean_reward
|
| 26 |
---
|
| 27 |
|
|
|
|
| 60 |
```
|
| 61 |
**run.py**
|
| 62 |
```python
|
| 63 |
+
# [More Information Needed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
```
|
| 65 |
</details>
|
| 66 |
|
|
|
|
| 75 |
```
|
| 76 |
**run.py**
|
| 77 |
```python
|
| 78 |
+
# [More Information Needed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
```
|
| 80 |
</details>
|
| 81 |
|
|
|
|
| 92 |
```
|
| 93 |
**train.py**
|
| 94 |
```python
|
| 95 |
+
# [More Information Needed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
```
|
| 97 |
</details>
|
| 98 |
|
|
|
|
| 119 |
'env_id': 'SpaceInvadersNoFrameskip-v4',
|
| 120 |
'collector_env_num': 8,
|
| 121 |
'evaluator_env_num': 8,
|
| 122 |
+
'fram_stack': 4,
|
| 123 |
+
'env_wrapper': 'atari_default'
|
| 124 |
},
|
| 125 |
'policy': {
|
| 126 |
'model': {
|
|
|
|
| 165 |
'render_freq': -1,
|
| 166 |
'mode': 'train_iter'
|
| 167 |
},
|
| 168 |
+
'figure_path': None,
|
| 169 |
'cfg_type': 'InteractionSerialEvaluatorDict',
|
| 170 |
'stop_value': 2000,
|
| 171 |
'n_episode': 8
|
|
|
|
| 210 |
|
| 211 |
**Training Procedure**
|
| 212 |
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 213 |
+
- **Weights & Biases (wandb):** [monitor link](https://wandb.ai/zjowowen/SpaceInvadersNoFrameskip-v4-DQN)
|
| 214 |
|
| 215 |
## Model Information
|
| 216 |
<!-- Provide the basic links for the model. -->
|
|
|
|
| 220 |
- **Demo:** [video](https://huggingface.co/OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-DQN/blob/main/replay.mp4)
|
| 221 |
<!-- Provide the size information for the model. -->
|
| 222 |
- **Parameters total size:** 55703.03 KB
|
| 223 |
+
- **Last Update Date:** 2023-07-24
|
| 224 |
|
| 225 |
## Environments
|
| 226 |
<!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
|
| 227 |
- **Benchmark:** OpenAI/Gym/Atari
|
| 228 |
- **Task:** SpaceInvadersNoFrameskip-v4
|
| 229 |
- **Gym version:** 0.25.1
|
| 230 |
+
- **DI-engine version:** v0.4.8
|
| 231 |
+
- **PyTorch version:** 2.0.1+cu117
|
| 232 |
- **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/atari.html)
|