Created
October 10, 2020 02:55
-
-
Save yenchenlin/32d2d7a375c69c47ddd26b33b8cb1f02 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Construct 3D calibration grid across workspace | |
gridspace_x = np.linspace(self._workspace_bounds[0,0]+0.1,self._workspace_bounds[0,1]-0.1,1+(self._workspace_bounds[0,1]-self._workspace_bounds[0,0]-0.2)/calib_grid_step) | |
gridspace_y = np.linspace(self._workspace_bounds[1,0]+0.1,self._workspace_bounds[1,1]-0.1,1+(self._workspace_bounds[1,1]-self._workspace_bounds[1,0]-0.2)/calib_grid_step) | |
calib_grid_x,calib_grid_y,calib_grid_z = np.meshgrid(gridspace_x,gridspace_y,self._workspace_bounds[2,0]+0.1) | |
num_calib_grid_pts = calib_grid_x.shape[0]*calib_grid_x.shape[1]*calib_grid_x.shape[2] | |
calib_grid_x.shape = (num_calib_grid_pts,1) | |
calib_grid_y.shape = (num_calib_grid_pts,1) | |
calib_grid_z.shape = (num_calib_grid_pts,1) | |
calib_grid_pts = np.concatenate((calib_grid_x,calib_grid_y,calib_grid_z), axis=1) | |
# Move robot to each calibration point in workspace | |
global measured_pts, observed_pts, observed_pix, world2camera | |
measured_pts = [] | |
observed_pts = [] | |
observed_pix = [] | |
print('Collecting data ...') | |
for calib_pt_idx in range(num_calib_grid_pts): | |
tool_position = calib_grid_pts[calib_pt_idx,:] | |
print(tool_position, tool_orientation) | |
self.robot.movej(use_pos=True,params=[tool_position[0],tool_position[1],tool_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2]],blocking=True) | |
time.sleep(1.0) | |
chckr_found = False | |
while not chckr_found: | |
# Find pixel location of red block | |
color_im,depth_im = self._bin_cam.color_im,self._bin_cam.depth_im | |
# plt.imshow(color_im) | |
# plt.show() | |
chckr_size = (3,3) | |
refine_criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,30,0.001) | |
bgr_im = cv2.cvtColor(color_im,cv2.COLOR_RGB2BGR) | |
gray_im = cv2.cvtColor(bgr_im,cv2.COLOR_RGB2GRAY) | |
chckr_found, crnrs = cv2.findChessboardCorners(gray_im,chckr_size,None,cv2.CALIB_CB_ADAPTIVE_THRESH) | |
if chckr_found: | |
crnrs_refined = cv2.cornerSubPix(gray_im,crnrs,(3,3),(-1,-1),refine_criteria) | |
block_pix = crnrs_refined[4,0,:] | |
else: | |
time.sleep(0.01) | |
continue | |
# conf_map = color_im[:,:,0]/255.*(1-color_im[:,:,1]/255.)*(1-color_im[:,:,2]/255.) | |
# conf_map = cv2.GaussianBlur(conf_map,(101,101),0) | |
# block_pix = np.round(np.unravel_index(np.argmax(conf_map),conf_map.shape)).astype(int) # y,x | |
# block_pix = np.flip(block_pix,axis=0) # save in fromat: x,y | |
# Get observed checkerboard center 3D point in camera space | |
block_z = depth_im[int(np.round(block_pix[1])),int(np.round(block_pix[0]))] | |
block_x = np.multiply(block_pix[1]-self._bin_cam.color_intr[0,2],block_z/self._bin_cam.color_intr[0,0]) | |
block_y = np.multiply(block_pix[0]-self._bin_cam.color_intr[1,2],block_z/self._bin_cam.color_intr[1,1]) | |
print("Found {}".format([block_x,block_y,block_z])) | |
if block_z == 0: | |
print("Zero Z, potential error!") | |
continue | |
# Save calibration point and observed checkerboard center | |
observed_pts.append([block_x,block_y,block_z]) | |
tool_position += self._block_offset_from_tool | |
measured_pts.append(tool_position) | |
observed_pix.append(block_pix) | |
# Draw and display the corners | |
vis_im = cv2.circle(color_im,(block_pix[0],block_pix[1]),7,(0,255,0),2) | |
# cv2.imwrite('%06d.png' % len(measured_pts), vis) | |
cv2.imshow('Calibration',cv2.cvtColor(vis_im,cv2.COLOR_RGB2BGR)) | |
cv2.waitKey(10) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment