Skip to content

Commit

Permalink
초기값수정, 오류 수정
Browse files Browse the repository at this point in the history
  • Loading branch information
sinyeong10 committed Sep 20, 2024
1 parent 6139a67 commit 9b0363a
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 10 deletions.
39 changes: 34 additions & 5 deletions aloha_scripts/real_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(self, init_node, setup_robots=True, setup_base=False):
break


self.mycobot = MyCobot('COM7', 115200) #('/dev/ttyACM0',115200)
self.mycobot = MyCobot('COM11', 115200) #('/dev/ttyACM0',115200)
start_time = time.time()
self.mycobot.set_gripper_mode(0)
print(self.mycobot.get_coords())
Expand All @@ -74,16 +74,40 @@ def __init__(self, init_node, setup_robots=True, setup_base=False):
self._reset_gripper()

self.cnt = 0
self.prev_gripper = 70
self.gripper_value = [70]*40+list(np.linspace(70, 25, 10))+[25]*85+list(np.linspace(25,70,10))+[70]*100

self.backupdata = {"top":[], "right_wrist":[]}

def save(self, path, idx):
print("\n\nsave\n\n")
max_timesteps = len(self.backupdata["right_wrist"])
import os
import h5py
t0 = time.time()
dataset_dir = path
dataset_path = os.path.join(dataset_dir, f'mycobot320_model_run_image_{idx}')
# if task_name == 'sim_move_cube_scripted': #one arm
with h5py.File(dataset_path + '.hdf5', 'w', rdcc_nbytes=1024 ** 2 * 2) as root:
root.attrs['sim'] = True
obs = root.create_group('observations')
image = obs.create_group('images')
for cam_name in ["top", "right_wrist"]:
_ = image.create_dataset(cam_name, (max_timesteps, 480, 640, 3), dtype='uint8',
chunks=(1, 480, 640, 3), )

for name, array in self.backupdata.items():
name = f"/observations/images/"+name
print(name)
root[name][...] = array

def get_qpos(self):
print(self.cnt, "qpos :", self.mycobot.get_angles(), self.mycobot.get_gripper_value(), self.gripper_value[self.cnt])
print(self.cnt, "qpos :", self.mycobot.get_angles(), self.mycobot.get_gripper_value(), self.prev_gripper)#self.gripper_value[self.cnt])
gripper = self.mycobot.get_gripper_value()
if self.mycobot.get_gripper_value() == 255:
# if 41<= self.cnt <= 50 or 135<= self.cnt:
gripper = self.gripper_value[self.cnt]
# gripper = self.gripper_value[self.cnt] #명령순서 정확하게
gripper = self.prev_gripper #이전 action기준으로 계산된 값
self.cnt += 1
return np.concatenate([self.mycobot.get_angles(), [gripper]])

Expand All @@ -97,10 +121,13 @@ def get_images(self):
_, cur_frame1 = self.cap0.read()
image_dict["right_wrist"] = cur_frame1
print(self.cnt, "cur_frame", np.array(cur_frame0).shape, np.array(cur_frame1).shape)

self.backupdata["top"].append(cur_frame0)
self.backupdata["right_wrist"].append(cur_frame1)
return image_dict

def _reset_joints(self):
self.mycobot.send_angles([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 20)
self.mycobot.send_angles([(-3.07), 33.39, 32.78, 6.94, (-88.33), (-1.58)], 20)#[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 20)
time.sleep(5)

def _reset_gripper(self):
Expand Down Expand Up @@ -130,10 +157,12 @@ def reset(self, fake=False):
def step(self, action, base_action=None, get_tracer_vel=False, get_obs=True):
# print("action.shape", action.shape)
action = list(action)
print(self.cnt, "action", action)
print(self.cnt, "action :", action, "이전 값 :", self.prev_gripper)
state_len = 6
self.mycobot.send_angles(action[:state_len], 20)
self.mycobot.set_gripper_value(int(action[-1]), 20, 1)
diff = int(action[-1])-self.prev_gripper
self.prev_gripper = self.prev_gripper+min(5, abs(diff)) if diff > 0 else self.prev_gripper-min(5, abs(diff))
if get_obs:
obs = self.get_observation(get_tracer_vel)
else:
Expand Down
4 changes: 2 additions & 2 deletions constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@
#수정
'sim_mycobot320':{
'dataset_dir': DATA_DIR + '/sim_mycobot_320',
'num_episodes': 10,
'episode_len': 145,
'num_episodes': 50,
'episode_len': 115,
'camera_names': ['right_wrist', 'top']#, 'left_wrist', 'right_wrist']
},

Expand Down
4 changes: 3 additions & 1 deletion imitate_episodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def main(args):
state_dim = 14
if task_name == 'sim_move_cube_scripted' or task_name == 'sim_mycobot320': #one arm
state_dim = 7
print("state_dim", state_dim)
lr_backbone = 1e-5
backbone = 'resnet18'
if policy_class == 'ACT':
Expand Down Expand Up @@ -225,9 +226,10 @@ def main(args):
#utils.load_data ?를 함
if "sim_move_cube" in task_name or "sim_mycobot320" in task_name:
train_dataloader, val_dataloader, stats, _ = load_data_one(dataset_dir, name_filter, camera_names, batch_size_train, batch_size_val, args['chunk_size'], args['skip_mirrored_data'], config['load_pretrain'], policy_class, stats_dir_l=stats_dir, sample_weights=sample_weights, train_ratio=train_ratio)
print("한팔만 사용")
else:
train_dataloader, val_dataloader, stats, _ = load_data(dataset_dir, name_filter, camera_names, batch_size_train, batch_size_val, args['chunk_size'], args['skip_mirrored_data'], config['load_pretrain'], policy_class, stats_dir_l=stats_dir, sample_weights=sample_weights, train_ratio=train_ratio)

print("두팔 사용")
#['action_mean', 'action_std', 'action_min', 'action_max', 'qpos_mean', 'qpos_std', 'example_qpos']가 들어가 있는 stats변수를 저장함
# save dataset stats
stats_path = os.path.join(ckpt_dir, f'dataset_stats.pkl')
Expand Down
4 changes: 3 additions & 1 deletion model_runs.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def main(args):
with open(config_path, 'wb') as f:
pickle.dump(config, f)
if is_eval: #평가모드 True로 값을 줬을 경우
ckpt_names = [f'policy_last.ckpt'] #마지막 체크포인트 파일을 의미
ckpt_names = [f'best_policy_step_27000_seed_0.ckpt'] #f'policy_last.ckpt', #마지막 체크포인트 파일을 의미
results = []
for ckpt_name in ckpt_names: #하나만 있으니 하나에 대해서 실행함
#eval_bc 함수를 통해 지정한 모델(ACT)을 가져와서 돌려보고 성공확률과 평균보상을 반환 함
Expand Down Expand Up @@ -608,6 +608,8 @@ def eval_bc(config, ckpt_name, save_episode=True, num_rollouts=50, dir_step = 0)

print("그리퍼 열고 종료")
env.mycobot.set_gripper_value(100,20,1)

env.save("twocam_mycobot320_chunk20", 0)

#반환된 보상의 총합, 최대 보상, 성공 여부 계산해서 출력
rewards = np.array(rewards)
Expand Down
2 changes: 1 addition & 1 deletion sim_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def before_step(self, action, physics):
full_right_gripper_action = [right_gripper_action, -right_gripper_action]

env_action = np.concatenate([left_arm_action, full_left_gripper_action, right_arm_action, full_right_gripper_action])
env_action = np.concatenate([right_arm_action, full_right_gripper_action])

super().before_step(env_action, physics)
return

Expand Down

0 comments on commit 9b0363a

Please sign in to comment.