Skip to content
Snippets Groups Projects
Commit 0d3d7978 authored by Fernando Herrero's avatar Fernando Herrero
Browse files

Set relative topics and add remaps to launch files

parent 8c36696f
No related branches found
No related tags found
No related merge requests found
...@@ -2,14 +2,28 @@ ...@@ -2,14 +2,28 @@
<!-- --> <!-- -->
<launch> <launch>
<arg name="node_name" default="objetc_to_distance"/> <arg name="node_name" default="person_bbox_to_distance"/>
<arg name="output" default="screen"/> <arg name="camera_in_topic" default="/camera/color/image_raw"/>
<arg name="bbox_in_topic" default="/human_detection/bounding_box"/>
<arg name="detection_image_in" default="/human_detection/detection_image"/>
<arg name="depth_image_in" default="/camera/aligned_depth_to_color/image_raw"/>
<arg name="detection_distance_image_out" default="/$(arg node_name)/detection_distance_image"/>
<arg name="people_marker_array_out" default="/$(arg node_name)/people_marker_array"/>
<arg name="output" default="screen"/>
<node pkg="get_obj_dist" <node pkg="get_obj_dist"
name="$(arg node_name)" name="$(arg node_name)"
type="object_to_distance.py" type="object_to_distance.py"
output="$(arg output)" output="$(arg output)"
args=""> args="">
<remap from="~camera/color/image_raw" to="$(arg camera_in_topic)"/>
<remap from="~detection_image_in" to="$(arg detection_image_in)"/>
<remap from="~bounding_box_in" to="$(arg bbox_in_topic)"/>
<remap from="~depth_image_in" to="$(arg depth_image_in)"/>
<remap from="~depth_image_in" to="$(arg depth_image_in)"/>
<remap from="~detection_distance_image_out" to="$(arg detection_distance_image_out)"/>
<remap from="~people_marker_array_out" to="$(arg people_marker_array_out)"/>
</node> </node>
</launch> </launch>
...@@ -30,13 +30,13 @@ class obj_dist: ...@@ -30,13 +30,13 @@ class obj_dist:
self.set_marker_array(marker_size, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL') self.set_marker_array(marker_size, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
# Initiale publishers # Initiale publishers
self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10) self.dist_pub = rospy.Publisher('~detection_distance_image_out', Image, queue_size=10)
self.render_pub = rospy.Publisher('/obj_to_dist/show_people_marker_array', MarkerArray, queue_size=10) self.render_pub = rospy.Publisher('~people_marker_array_out', MarkerArray, queue_size=10)
# Initialize subscribers # Initialize subscribers
self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list) self.bbx_sub = message_filters.Subscriber('~bounding_box_in', box_list)
self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image) self.human_image_sub = message_filters.Subscriber('~detection_image_in', Image)
self.depth_image_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image) self.depth_image_sub = message_filters.Subscriber('~depth_image_in', Image)
# self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image) # self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image)
ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub, ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub,
...@@ -121,7 +121,7 @@ class obj_dist: ...@@ -121,7 +121,7 @@ class obj_dist:
def main(args): def main(args):
rospy.init_node('obj_dist', anonymous=True) rospy.init_node('obj_dist', anonymous=True)
print("object_to_distance.py is running") #print("object_to_distance.py is running")
_ = obj_dist() _ = obj_dist()
rospy.spin() rospy.spin()
cv2.destroyAllWindows() cv2.destroyAllWindows()
......
...@@ -3,6 +3,10 @@ ...@@ -3,6 +3,10 @@
<launch> <launch>
<arg name="node_name" default="human_detection"/> <arg name="node_name" default="human_detection"/>
<arg name="camera_in_topic" default="/camera/color/image_raw"/>
<arg name="image_out_topic" default="/$(arg node_name)/detection_image"/>
<arg name="bbox_out_topic" default="/$(arg node_name)/bounding_box"/>
<arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/> <arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/>
<arg name="output" default="screen"/> <arg name="output" default="screen"/>
...@@ -11,6 +15,9 @@ ...@@ -11,6 +15,9 @@
type="human_detector.py" type="human_detector.py"
output="$(arg output)" output="$(arg output)"
args="--venv $(arg virtual_env_path)"> args="--venv $(arg virtual_env_path)">
<remap from="~camera/color/image_raw" to="$(arg camera_in_topic)"/>
<remap from="~detection_image" to="$(arg image_out_topic)"/>
<remap from="~bounding_box" to="$(arg bbox_out_topic)"/>
</node> </node>
</launch> </launch>
...@@ -8,13 +8,11 @@ ...@@ -8,13 +8,11 @@
</include> </include>
<include file="$(find human_detection)/launch/node.launch"> <include file="$(find human_detection)/launch/node.launch">
<arg name="node_name" value="human_detection"/>
<arg name="virtual_env_path" value="$(arg virtual_env_path)"/> <arg name="virtual_env_path" value="$(arg virtual_env_path)"/>
<arg name="output" value="$(arg output)"/> <arg name="output" value="$(arg output)"/>
</include> </include>
<include file="$(find get_obj_dist)/launch/node.launch"> <include file="$(find get_obj_dist)/launch/node.launch">
<arg name="node_name" value="object_to_distance"/>
<arg name="output" value="$(arg output)"/> <arg name="output" value="$(arg output)"/>
</include> </include>
......
...@@ -116,7 +116,7 @@ Visualization Manager: ...@@ -116,7 +116,7 @@ Visualization Manager:
Value: true Value: true
- Class: rviz/Image - Class: rviz/Image
Enabled: true Enabled: true
Image Topic: /human_detected_image/image Image Topic: /human_detection/detection_image
Max Value: 1 Max Value: 1
Median window: 5 Median window: 5
Min Value: 0 Min Value: 0
...@@ -128,7 +128,7 @@ Visualization Manager: ...@@ -128,7 +128,7 @@ Visualization Manager:
Value: true Value: true
- Class: rviz/Image - Class: rviz/Image
Enabled: true Enabled: true
Image Topic: /obj_to_dist/human_distance Image Topic: /person_bbox_to_distance/detection_distance_image
Max Value: 1 Max Value: 1
Median window: 5 Median window: 5
Min Value: 0 Min Value: 0
...@@ -140,7 +140,7 @@ Visualization Manager: ...@@ -140,7 +140,7 @@ Visualization Manager:
Value: true Value: true
- Class: rviz/MarkerArray - Class: rviz/MarkerArray
Enabled: true Enabled: true
Marker Topic: /obj_to_dist/show_people_marker_array Marker Topic: /person_bbox_to_distance/people_marker_array
Name: MarkerArray Name: MarkerArray
Namespaces: Namespaces:
"": true "": true
...@@ -189,10 +189,10 @@ Visualization Manager: ...@@ -189,10 +189,10 @@ Visualization Manager:
Invert Z Axis: false Invert Z Axis: false
Name: Current View Name: Current View
Near Clip Distance: 0.009999999776482582 Near Clip Distance: 0.009999999776482582
Pitch: 1.5697963237762451 Pitch: 1.3447965383529663
Target Frame: <Fixed Frame> Target Frame: <Fixed Frame>
Value: Orbit (rviz) Value: Orbit (rviz)
Yaw: 3.145402669906616 Yaw: 3.165403366088867
Saved: ~ Saved: ~
Window Geometry: Window Geometry:
DetectionDistanceImage: DetectionDistanceImage:
......
...@@ -62,7 +62,7 @@ detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') ...@@ -62,7 +62,7 @@ detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected # Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0')
...@@ -70,17 +70,18 @@ num_detections = detection_graph.get_tensor_by_name('num_detections:0') ...@@ -70,17 +70,18 @@ num_detections = detection_graph.get_tensor_by_name('num_detections:0')
class human_detector: class human_detector:
def __init__(self): def __init__(self):
self.bridge = CvBridge() self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback) self.image_sub = rospy.Subscriber("~camera/color/image_raw", Image, self.callback)
self.image_pub = rospy.Publisher("/human_detected_image/image", Image, queue_size=10) self.image_pub = rospy.Publisher("~detection_image", Image, queue_size=10)
self.bbx_pub = rospy.Publisher("/human_detected_image/bounding_box", box_list, queue_size=10) self.bbx_pub = rospy.Publisher("~bounding_box", box_list, queue_size=10)
def callback(self, data): def callback(self, data):
t0 = time.time() t0 = time.time()
try: try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e: except CvBridgeError as e:
print(e) rospy.logerr("CvBridgeError: " +e)
#print(e)
return return
frame_expanded = np.expand_dims(cv_image, axis=0) frame_expanded = np.expand_dims(cv_image, axis=0)
...@@ -155,7 +156,8 @@ class human_detector: ...@@ -155,7 +156,8 @@ class human_detector:
cvt_msg.header.stamp = data.header.stamp cvt_msg.header.stamp = data.header.stamp
self.image_pub.publish(cvt_msg) self.image_pub.publish(cvt_msg)
except CvBridgeError as e: except CvBridgeError as e:
print(e) rospy.logerr("CvBridgeError: " + e)
#print(e)
# Append Accuracy # Append Accuracy
# if scores[0][0] > 0.01: # if scores[0][0] > 0.01:
...@@ -163,13 +165,15 @@ class human_detector: ...@@ -163,13 +165,15 @@ class human_detector:
def main(): def main():
_ = human_detector()
rospy.init_node('human_detector', anonymous=True) rospy.init_node('human_detector', anonymous=True)
_ = human_detector()
rospy.spin() rospy.spin()
# graph_average_percentage(prediction_level_list) # graph_average_percentage(prediction_level_list)
if __name__ == '__main__': if __name__ == '__main__':
print('Current Tensorflow Version: ' + str(tf.__version__)) #print('Current Tensorflow Version: ' + str(tf.__version__))
rospy.set_param('use_sim_time', 'True') str = "Current Tensorflow Version: " + str(tf.__version__)
rospy.loginfo(str)
#rospy.set_param('use_sim_time', 'True')
main() main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment