Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 1. Import Dependencies
|
2 |
+
!pip install gym[box2d] pyglet==1.3.2
|
3 |
+
|
4 |
+
import gym
|
5 |
+
from stable_baselines3 import PPO
|
6 |
+
from stable_baselines3.common.vec_env import VecFrameStack
|
7 |
+
from stable_baselines3.common.evaluation import evaluate_policy
|
8 |
+
import os
|
9 |
+
|
10 |
+
# 2. Test Environment
|
11 |
+
environment_name = "CarRacing-v0"
|
12 |
+
env = gym.make(environment_name)
|
13 |
+
|
14 |
+
episodes = 5
|
15 |
+
for episode in range(1, episodes+1):
|
16 |
+
state = env.reset()
|
17 |
+
done = False
|
18 |
+
score = 0
|
19 |
+
|
20 |
+
while not done:
|
21 |
+
env.render()
|
22 |
+
action = env.action_space.sample()
|
23 |
+
n_state, reward, done, info = env.step(action)
|
24 |
+
score+=reward
|
25 |
+
print('Episode:{} Score:{}'.format(episode, score))
|
26 |
+
env.close()
|
27 |
+
|
28 |
+
env.close()
|
29 |
+
|
30 |
+
# 3. Train Model
|
31 |
+
log_path = os.path.join('Training', 'Logs')
|
32 |
+
model = PPO("CnnPolicy", env, verbose=1, tensorboard_log=log_path)
|
33 |
+
model.learn(total_timesteps=40000)
|
34 |
+
|
35 |
+
# 4. Save Model
|
36 |
+
ppo_path = os.path.join('Training', 'Saved Models', 'PPO_Driving_model')
|
37 |
+
model.save(ppo_path)
|
38 |
+
|
39 |
+
# 5. Evaluate and Test
|
40 |
+
evaluate_policy(model, env, n_eval_episodes=10, render=True)
|
41 |
+
env.close()
|
42 |
+
obs = env.reset()
|
43 |
+
while True:
|
44 |
+
action, _states = model.predict(obs)
|
45 |
+
obs, rewards, dones, info = env.step(action)
|
46 |
+
env.render()
|
47 |
+
env.close()
|