From: Erik Andresen Date: Sat, 28 Aug 2021 08:22:38 +0000 (+0200) Subject: added grab_bottle script X-Git-Url: https://defiant.homedns.org/gitweb/?a=commitdiff_plain;h=cae49cdeecd229a78b3fb079de1b25f6e34c3210;p=wt_open_manipulator.git added grab_bottle script --- diff --git a/scripts/grab_bottle.py b/scripts/grab_bottle.py new file mode 100755 index 0000000..7744d7a --- /dev/null +++ b/scripts/grab_bottle.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# -*- coding: iso-8859-15 -*- + +""" +roslaunch darknet_ros darknet_ros.launch image:=/camera/color/image_raw +""" + +import sys +import rospy +import tf +import tf2_ros +import tf2_geometry_msgs +import image_geometry +import moveit_commander +import numpy as np +from math import * +from time import sleep +from sensor_msgs.msg import Image, CameraInfo +from darknet_ros_msgs.msg import BoundingBoxes +from geometry_msgs.msg import PoseStamped +from cv_bridge import CvBridge + +class OpenManipGrabBottle: + def __init__(self): + rospy.init_node('open_manip_grab_bottle') + moveit_commander.roscpp_initialize(sys.argv) + self.sub_depthinfo = rospy.Subscriber("/camera/aligned_depth_to_color/camera_info", CameraInfo, self.depthinfo_received) + self.tf_broadcaster = tf.broadcaster.TransformBroadcaster() + self.depthimgmsg = None + self.camera_model = None + self.bridge = CvBridge() + self.tfBuffer = tf2_ros.Buffer() + self.listener = tf2_ros.TransformListener(self.tfBuffer) + self.target_class = "bottle" + + self.gripper_pose_pub = rospy.Publisher('gripper_pose', PoseStamped) + self.arm = moveit_commander.MoveGroupCommander("arm") + self.gripper = moveit_commander.MoveGroupCommander("gripper") + self.arm.set_goal_position_tolerance(0.01) + self.arm.set_goal_orientation_tolerance(5*pi/180) + self.home() + sleep(10) # wait for image + self.register_subscriber() + rospy.spin() + + def register_subscriber(self): + self.sub_obj_detection = rospy.Subscriber("/darknet_ros/bounding_boxes", BoundingBoxes, self.detected_objects) + self.sub_depthimage = rospy.Subscriber("/camera/aligned_depth_to_color/image_raw", Image, self.depthimage_received) + + def home(self): + self.gripper.set_named_target('close') + self.gripper.go() + #self.arm.set_named_target('home') + self.arm.go([f * pi/180 for f in [90.0, -90, 77, 20]], wait=True) + + def detected_objects(self, msg): + if self.depthimgmsg is None or self.camera_model is None: + return + box_bottles = filter(lambda box: box.Class == self.target_class and box.probability > 0.5, msg.bounding_boxes) + if len(box_bottles) > 0: + box = box_bottles[0] # just use the first one found for now + box_x = int(box.xmin + (box.xmax - box.xmin)/2.0) + box_y = int(box.ymin + (box.ymax - box.ymin)/2.0) + depthframe = self.bridge.imgmsg_to_cv2(self.depthimgmsg) + dist = depthframe[box_y][box_x]/1000.0 # assume size rgb = size depth + if dist == 0: + rospy.logerr("depth invalid") + return + + self.sub_obj_detection.unregister() + self.sub_depthimage.unregister() + + coords3d = [f * dist for f in self.camera_model.projectPixelTo3dRay([box_x, box_y])] + point = PoseStamped() + point.header.frame_id = self.depthimgmsg.header.frame_id + point.header.stamp = rospy.Time.now() + point.pose.position.x = coords3d[0] + point.pose.position.y = coords3d[1] + point.pose.position.z = coords3d[2] + point = self.tfBuffer.transform(point, "open_manipulator_base", rospy.Duration(1)) + point.pose.orientation.x, point.pose.orientation.y, point.pose.orientation.z, point.pose.orientation.w = [0, 0, 0, 1] # bottle stands up + print point + point.pose.position.z-=0.02 + + self.tf_broadcaster.sendTransform([point.pose.position.x, point.pose.position.y, point.pose.position.z], [point.pose.orientation.x, point.pose.orientation.y, point.pose.orientation.z, point.pose.orientation.w], rospy.Time.now(), self.target_class, "open_manipulator_base") + + if self.pick(point): + rospy.signal_shutdown("done") + else: + self.home() + self.register_subscriber() + + def depthimage_received(self, msg): + self.depthimgmsg = msg + + def depthinfo_received(self, msg): + self.camera_model = image_geometry.PinholeCameraModel() + self.camera_model.fromCameraInfo(msg) + self.sub_depthinfo.unregister() + + def pick(self, pose): + pos2d = [pose.pose.position.x, pose.pose.position.y] + dist = np.linalg.norm(pos2d) + pos2d_unit = pos2d / dist # unit vector + + # orientation towards base + q = tf.transformations.quaternion_from_euler(0, 0, acos(np.dot(pos2d_unit, [1, 0]))) # use the dot product to get the angle + pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z, pose.pose.orientation.w = [q[0], q[1], q[2], q[3]] + + # goal 5cm before + pose.pose.position.x = pos2d_unit[0] * (dist-0.05) + pose.pose.position.y = pos2d_unit[1] * (dist-0.05) + self.arm.set_pose_target(pose) + if not self.arm.go(): + print "before position failed" + return False + + self.gripper.set_named_target('open') + self.gripper.go() + + # goal on target +3cm + pose.pose.position.x = pos2d_unit[0] * (dist+0.03) + pose.pose.position.y = pos2d_unit[1] * (dist+0.03) + self.arm.set_pose_target(pose) + if not self.arm.go(): + print "position on target failed" + return False + + self.gripper.set_named_target('close') + self.gripper.go() + + # height + angles = [0.0, 0.0, 0.0, 0.0] + while angles == [0.0, 0.0, 0.0, 0.0]: # invalid + angles = self.arm.get_current_joint_values() + print [f * 180/pi for f in angles] + angles[1]-=20*pi/180 + if not self.arm.go(angles, wait=True): + print "higher position failed" + return True + + if not self.arm.go([f * pi/180 for f in [-90, -60, 0, 0]], wait=True): + print "final position failed" + return True + + return True + +if __name__ == "__main__": + OpenManipGrabBottle()