From 50077ef63c391f69d20b3b6f1b1e0b14eecc0a99 Mon Sep 17 00:00:00 2001 From: CoderHaoranLee <917735857@qq.com> Date: Mon, 20 Jun 2022 14:13:37 +0800 Subject: [PATCH] update api_test.py for 1.0.0 --- api_test.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/api_test.py b/api_test.py index da2696a..a44c823 100644 --- a/api_test.py +++ b/api_test.py @@ -1,7 +1,8 @@ +#pip install CogEnvDecoder==1.0.0 from Cogenvdecoder.CogEnvDecoder import CogEnvDecoder import numpy as np import cv2 - +import time def check_state(state, info=None): image_data = state["color_image"] laser_data = np.array(state["laser"]) @@ -13,6 +14,8 @@ def check_state(state, info=None): # self_pose: [x, y, theta(rad)], self_info: [remaining HP, remaining bullet] # enemy_pose: [x, y, theta(rad)], enemy_info: [remaining HP, remaining bullet] print("self pose: {}, self info: {}, enemy active: {}, enemy pose: {}, enemy_info: {}".format(vector_data[0], vector_data[1], vector_data[2], vector_data[3], vector_data[4])) + # self_velocity: [vx, vy, vw] + print("self velocity: {}".format(vector_data[11])) # goal_x: [x, y, is_activated?] print("goal 1: {}, goal 2: {}, goal 3: {}, goal 4: {}, goal 5:{}".format(vector_data[5], vector_data[6], vector_data[7], vector_data[8], vector_data[9])) # total counts of collisions, total collision time @@ -24,23 +27,33 @@ def check_state(state, info=None): print("-----------------------end check---------------------") -env = CogEnvDecoder(env_name="linux_V1/1.x86_64", no_graphics=False, time_scale=1, worker_id=1) # linux os +env = CogEnvDecoder(env_name="stage2/reality_linux_v3.0/cog_sim2real_env.x86_64", no_graphics=False, + time_scale=1, worker_id=2, seed=1234, force_sync=True) # linux os # env = CogEnvDecoder(env_name="win_V1/RealGame.exe", no_graphics=False, time_scale=1, worker_id=1) # windows os # env_name: path of the simulator # no_graphics: should use headless mode [Warning: if no_graphics is True, image if invalid!] # time_scale: useful for speedup collecting data during training, max value is 100 # worker_id: socket port offset, useful for multi-thread training +# seed: random seed for generate position of the goals and the robots +# force_sync: use time synchronization (True) or not (False) num_episodes = 10 num_steps_per_episode = 500 # max: 1500 for i in range(num_episodes): #every time call the env.reset() will reset the envinronment - observation = env.reset() + observation = env.reset(fri_cor=0.1, KP=8, KI=0, KD=2, VK1=0.375, M=3.4, Wheel_I=0.0125) + # fir_cor: friction factor + # KP, KI, KD: parameters of PID + # VK1: parameter of the motor(M3508I) + # M: the mass of the robot + # Wheel_I: the inertia of the wheel for j in range(num_steps_per_episode): - # action = env.action_space.sample() - action = [0.5, 0.5, 0.1, 0] # [vx, vy, vw, fire]; vx: the velocity at which the vehicle moves forward, vy: the velocity at which the vehicle moves to the left, vw: Angular speed of the vehicle counterclockwise rotation, fire: Shoot or not + action = env.action_space.sample() + # action = [0.5, 0.5, 0.1, 0] # [vx, vy, vw, fire]; vx: the velocity at which the vehicle moves forward, vy: the velocity at which the vehicle moves to the left, vw: Angular speed of the vehicle counterclockwise rotation, fire: Shoot or not obs, reward, done, info = env.step(action) cv2.imshow("color_image", obs["color_image"]) cv2.waitKey(1) check_state(obs, info) - print(reward) + if done: + break +