Skip to content

Commit

Permalink
change requirement.txt
Browse files Browse the repository at this point in the history
  • Loading branch information
chenxiaochang committed Oct 27, 2020
1 parent 9fbd171 commit d047b5a
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 4 deletions.
15 changes: 11 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ astunparse==1.6.3
atari-py==0.2.6
base-local-planner==1.14.8
bondpy==1.8.3
Box2D==2.3.10
Box2D-kengz==2.3.3
box2d-py==2.3.8
cachetools==4.1.1
camera-calibration==1.12.23
camera-calibration-parsers==1.11.13
Expand Down Expand Up @@ -45,15 +48,17 @@ importlib-metadata==2.0.0
interactive-markers==1.11.5
joblib==0.17.0
joint-state-publisher==1.12.15
Keras==2.4.3
Keras-Applications==1.0.8
Keras-Preprocessing==1.1.2
kiwisolver==1.2.0
laser-geometry==1.6.5
Markdown==3.3.3
matplotlib==3.3.2
message-filters==1.12.16
mock==4.0.2
mpi4py==3.0.3
numpy==1.18.5
numpy==1.16.0
oauthlib==3.1.0
opencv-python==4.4.0.44
opt-einsum==3.3.0
Expand All @@ -63,12 +68,14 @@ pluginlib==1.11.3
protobuf==3.13.0
pyasn1==0.4.8
pyasn1-modules==0.2.8
pybullet==3.0.6
pygame==1.9.6
pyglet==1.5.0
pyparsing==2.4.7
python-dateutil==2.8.1
python-qt-binding==0.3.7
pytz==2020.1
PyYAML==5.3.1
qt-dotgraph==0.3.17
qt-gui==0.3.17
qt-gui-cpp==0.3.17
Expand Down Expand Up @@ -139,11 +146,11 @@ smach==2.0.1
smach-ros==2.0.1
smclib==1.8.3
stable-baselines==2.10.1
tensorboard==1.14.0
tensorboard==1.13.1
tensorboard-plugin-wit==1.7.0
tensorboardX==2.1
tensorflow==1.14.0
tensorflow-estimator==1.14.0
tensorflow==1.13.1
tensorflow-estimator==1.13.0
termcolor==1.1.0
tf==1.11.9
tf-conversions==1.11.9
Expand Down
48 changes: 48 additions & 0 deletions test_virtual/PPO2_overtaking.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import os

import gym
import highway_env
import pybullet_envs

from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines import PPO2

env = DummyVecEnv([lambda: gym.make("overtaking-v0")])
# Automatically normalize the input features and reward
env = VecNormalize(env, norm_obs=True, norm_reward=True,
clip_obs=10.)


model = PPO2('MlpPolicy', env)
model.learn(total_timesteps=2000)

obs = env.reset()
for i in range(10):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()

# do not update them at test time
env.training = False
# reward normalization is not needed at test time
env.norm_reward = False

# Don't forget to save the VecNormalize statistics when saving the agent
log_dir = "/home/cxc/下载/实验结果"
model.save(log_dir + "ppo_overtaking")
stats_path = os.path.join(log_dir, "vec_normalize.pkl")
env.save(stats_path)

# To demonstrate loading
del model, env

# Load the agent
model = PPO2.load(log_dir + "ppo_overtaking")



# Load the saved statistics
# env = DummyVecEnv([lambda: gym.make("HalfCheetahBulletEnv-v0")])
# env = VecNormalize.load(stats_path, env)


0 comments on commit d047b5a

Please sign in to comment.