Skip to content

Commit

Permalink
Optionally rotate object pose before publishing
Browse files Browse the repository at this point in the history
This is useful if the object mesh is rotated differently from the one
the network was trained with.
  • Loading branch information
mintar committed Jun 19, 2019
1 parent c63f92c commit b4896d1
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 9 deletions.
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ find_package(catkin REQUIRED COMPONENTS
rospy
sensor_msgs
std_msgs
tf
vision_msgs
visualization_msgs
)
Expand Down
8 changes: 8 additions & 0 deletions config/config_pose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,14 @@ draw_colors: {
"bleach": [232, 222, 12], # yellow
}

# optional: provide a transform that is applied to the pose returned by DOPE
model_transforms: {
# "cracker": [[ 0, 0, 1, 0],
# [ 0, -1, 0, 0],
# [ 1, 0, 0, 0],
# [ 0, 0, 0, 1]]
}

# optional: if you provide a mesh of the object here, a mesh marker will be
# published for visualization in RViz
meshes: {
Expand Down
45 changes: 36 additions & 9 deletions nodes/dope
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import numpy as np
import resource_retriever
import rospkg
import rospy
import tf.transformations
import yaml
from PIL import Image
from PIL import ImageDraw
Expand Down Expand Up @@ -102,6 +103,7 @@ class DopeNode(object):
self.draw_colors = {}
self.dimensions = {}
self.class_ids = {}
self.model_transforms = {}
self.meshes = {}
self.cv_bridge = CvBridge()

Expand All @@ -128,6 +130,12 @@ class DopeNode(object):
)
self.models[model].load_net_model()

try:
M = np.array(params['model_transforms'][model], dtype='float64')
self.model_transforms[model] = tf.transformations.quaternion_from_matrix(M)
except KeyError:
self.model_transforms[model] = np.array([0.0, 0.0, 0.0, 1.0], dtype='float64')

try:
self.meshes[model] = params['meshes'][model]
except KeyError:
Expand Down Expand Up @@ -229,7 +237,7 @@ class DopeNode(object):
detection_array = Detection3DArray()
detection_array.header = image_msg.header

for model_id, m in enumerate(self.models):
for m in self.models:
# Detect object
results = ObjectDetector.detect_object_in_image(
self.models[m].net,
Expand All @@ -244,20 +252,30 @@ class DopeNode(object):
continue
loc = result["location"]
ori = result["quaternion"]

# transform orientation
transformed_ori = tf.transformations.quaternion_multiply(ori, self.model_transforms[m])

# rotate bbox dimensions if necessary
# (this only works properly if model_transform is in 90 degree angles)
dims = rotate_vector(vector=self.dimensions[m], quaternion=self.model_transforms[m])
dims = np.absolute(dims)
dims = tuple(dims)

pose_msg = PoseStamped()
pose_msg.header = image_msg.header
CONVERT_SCALE_CM_TO_METERS = 100
pose_msg.pose.position.x = loc[0] / CONVERT_SCALE_CM_TO_METERS
pose_msg.pose.position.y = loc[1] / CONVERT_SCALE_CM_TO_METERS
pose_msg.pose.position.z = loc[2] / CONVERT_SCALE_CM_TO_METERS
pose_msg.pose.orientation.x = ori[0]
pose_msg.pose.orientation.y = ori[1]
pose_msg.pose.orientation.z = ori[2]
pose_msg.pose.orientation.w = ori[3]
pose_msg.pose.orientation.x = transformed_ori[0]
pose_msg.pose.orientation.y = transformed_ori[1]
pose_msg.pose.orientation.z = transformed_ori[2]
pose_msg.pose.orientation.w = transformed_ori[3]

# Publish
self.pubs[m].publish(pose_msg)
self.pub_dimension[m].publish(str(self.dimensions[m]))
self.pub_dimension[m].publish(str(dims))

# Add to Detection3DArray
detection = Detection3D()
Expand All @@ -267,9 +285,9 @@ class DopeNode(object):
hypothesis.pose.pose = pose_msg.pose
detection.results.append(hypothesis)
detection.bbox.center = pose_msg.pose
detection.bbox.size.x = self.dimensions[m][0] / CONVERT_SCALE_CM_TO_METERS
detection.bbox.size.y = self.dimensions[m][1] / CONVERT_SCALE_CM_TO_METERS
detection.bbox.size.z = self.dimensions[m][2] / CONVERT_SCALE_CM_TO_METERS
detection.bbox.size.x = dims[0] / CONVERT_SCALE_CM_TO_METERS
detection.bbox.size.y = dims[1] / CONVERT_SCALE_CM_TO_METERS
detection.bbox.size.z = dims[2] / CONVERT_SCALE_CM_TO_METERS
detection_array.detections.append(detection)

# Draw the cube
Expand Down Expand Up @@ -362,6 +380,15 @@ class DopeNode(object):
self.pub_markers.publish(markers)


def rotate_vector(vector, quaternion):
q_conj = tf.transformations.quaternion_conjugate(quaternion)
vector = np.array(vector, dtype='float64')
vector = np.append(vector, [0.0])
vector = tf.transformations.quaternion_multiply(q_conj, vector)
vector = tf.transformations.quaternion_multiply(vector, quaternion)
return vector[:3]


def main():
"""Main routine to run DOPE"""

Expand Down
1 change: 1 addition & 0 deletions package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
<depend>rospy</depend>
<depend>sensor_msgs</depend>
<depend>std_msgs</depend>
<depend>tf</depend>
<depend>vision_msgs</depend>
<depend>visualization_msgs</depend>

Expand Down

0 comments on commit b4896d1

Please sign in to comment.