Skip to content
Snippets Groups Projects
Commit 8c26e96d authored by Jerry Kuo's avatar Jerry Kuo Committed by jerry
Browse files

Add Display Marker

parent 340a5287
No related branches found
No related tags found
No related merge requests found
...@@ -8,7 +8,7 @@ import message_filters ...@@ -8,7 +8,7 @@ import message_filters
sys.path.append('/home/jerry/Documents/workspaces/ROS_human_detection/src/human_detection') sys.path.append('/home/jerry/Documents/workspaces/ROS_human_detection/src/human_detection')
from sensor_msgs import point_cloud2 as pcl from visualization_msgs.msg import Marker
from sensor_msgs.msg import Image, PointCloud2 from sensor_msgs.msg import Image, PointCloud2
from cv_bridge import CvBridge, CvBridgeError from cv_bridge import CvBridge, CvBridgeError
from human_detection.msg import bounding_box, box_list from human_detection.msg import bounding_box, box_list
...@@ -19,12 +19,12 @@ font_scale = 0.75 ...@@ -19,12 +19,12 @@ font_scale = 0.75
font_color = (255, 255, 255) font_color = (255, 255, 255)
line_type = 2 line_type = 2
class obj_dist: class obj_dist:
def __init__(self): def __init__(self):
# Initializing ROS Topics # Initializing ROS Topics
self.bridge = CvBridge() self.bridge = CvBridge()
self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10) self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10)
self.render_pub = rospy.Publisher('/obj_to_dist/render_human', Marker, queue_size=10)
# self.bbx_sub = rospy.Subscriber('/human_detected_image/bounding_box', box_list, self.yea) # self.bbx_sub = rospy.Subscriber('/human_detected_image/bounding_box', box_list, self.yea)
# self.human_image_sub = rospy.Subscriber('/human_detected_image/image', Image, self.yea1) # self.human_image_sub = rospy.Subscriber('/human_detected_image/image', Image, self.yea1)
...@@ -33,30 +33,72 @@ class obj_dist: ...@@ -33,30 +33,72 @@ class obj_dist:
self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list) self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list)
self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image) self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image)
self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image) self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image)
self.pcl_sub = message_filters.Subscriber('/camera/depth_registered/points', PointCloud2)
ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub, self.depth_image_sub], ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub,
self.depth_image_sub, self.pcl_sub],
queue_size=10, queue_size=10,
slop=1) slop=1)
ts.registerCallback(self.callback) ts.registerCallback(self.callback)
def callback(self, bbx, image, depth): def callback(self, bbx, image, depth, point_cloud):
if bbx.length: if bbx.length:
cv_depth = self.bridge.imgmsg_to_cv2(depth, 'passthrough') cv_depth = self.bridge.imgmsg_to_cv2(depth, 'passthrough')
cv_image = self.bridge.imgmsg_to_cv2(image, 'bgr8') cv_image = self.bridge.imgmsg_to_cv2(image, 'bgr8')
for box in bbx.people_list: for box in bbx.people_list:
roi_depth = cv_depth[box.xmin:box.xmax, box.ymin:box.ymax] cood = self.get_human_distance(cv_depth, cv_image, box)
self.render_human(cood, point_cloud)
filtered_depth = roi_depth[roi_depth <= 2900]
_size = len(filtered_depth) def get_human_distance(self, cv_depth, cv_image, box):
roi_depth = cv_depth[box.xmin:box.xmax, box.ymin:box.ymax]
avg_distance = filtered_depth.sum() / _size / 1000
cv2.putText(cv_image, '{} meters'.format(avg_distance), filtered_depth = roi_depth[roi_depth <= 2900]
(box.xmin, box.ymax-100), _size = len(filtered_depth)
font,
font_scale, avg_distance = filtered_depth.sum() / _size / 1000
font_color, x_meter = self.get_x_in_meters(box.xmin, box.xmax, avg_distance)
line_type) cv2.putText(cv_image, '{} meters'.format(avg_distance),
self.dist_pub.publish(self.bridge.cv2_to_imgmsg(cv_image)) (box.xmin, box.ymax - 100),
font,
font_scale,
font_color,
line_type)
self.dist_pub.publish(self.bridge.cv2_to_imgmsg(cv_image))
return x_meter, 0, avg_distance
def render_human(self, cood, point_cloud):
marker = Marker()
marker.header.frame_id = 'camera_color_optical_frame'
marker.header.stamp = rospy.Time.now()
marker.id = 0
marker.action = Marker.ADD
marker.lifetime = rospy.Duration()
marker.type = Marker.SPHERE
marker.pose.position.x = cood[0]
marker.pose.position.y = cood[1]
marker.pose.position.z = cood[2]
marker.scale.x = 0.5
marker.scale.y = 0.5
marker.scale.z = 0.5
marker.pose.orientation.x = 0
marker.pose.orientation.y = 0
marker.pose.orientation.z = 0
marker.color.a = 1.0
marker.color.r = 0.0
marker.color.g = 1.0
marker.color.b = 0.0
self.render_pub.publish(marker)
def get_x_in_meters(self, xmin, xmax, z_i):
# Tune z_c to get better value lol.
z_c = 100
ret_val = (z_i*(xmax+xmin-600.0))/(2*z_c)
return ret_val
def main(args): def main(args):
......
import cv2 import cv2
# Number of classes the object detector can identify # Number of classes the object detector can identify
...@@ -9,9 +8,9 @@ person_id = 1 ...@@ -9,9 +8,9 @@ person_id = 1
text_position = (10, 450) text_position = (10, 450)
font = cv2.FONT_HERSHEY_SIMPLEX font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.75 font_scale = 0.75
font_color = (255,255,255) font_color = (255, 255, 255)
line_type = 2 line_type = 2
run_time_list = [] run_time_list = []
prediction_level_list = [] prediction_level_list = []
screen_width = 600 screen_width = 600
screen_height = 480 screen_height = 480
\ No newline at end of file
#!/usr/bin/env python #!/usr/bin/env python
import time import time
import sys import sys
import os import os
import rospy import rospy
from setuptools import setup, find_packages
import tensorflow as tf import tensorflow as tf
sys.path.append('/home/jerry/Documents/workspaces/ROS_human_detection/src/human_detection') sys.path.append('/home/jerry/Documents/workspaces/human_detection/src/ROS_human_detection/human_detection/src')
# For performance analysis timing, import time. # For performance analysis timing, import time.
from analysis_tools.data_grapher import * from analysis_tools.data_grapher import *
...@@ -21,7 +20,7 @@ from object_detection.utils import label_map_util ...@@ -21,7 +20,7 @@ from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util from object_detection.utils import visualization_utils as vis_util
# Defining Paths # Defining Paths
CWD_PATH = '/home/jerry/Documents/workspaces/ROS_human_detection/src/human_detection/src' CWD_PATH = '/home/jerry/Documents/workspaces/human_detection/src/ROS_human_detection/human_detection/src'
LABEL_MAPS = ['human_label_map.pbtxt', 'mscoco_label_map.pbtxt'] LABEL_MAPS = ['human_label_map.pbtxt', 'mscoco_label_map.pbtxt']
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, 'frozen_inference_graph.pb') PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, 'frozen_inference_graph.pb')
...@@ -61,15 +60,15 @@ detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') ...@@ -61,15 +60,15 @@ detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected # Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# time_origin = time.time() # time_origin = time.time()
class human_detector: class human_detector:
def __init__(self): def __init__(self):
self.bridge = CvBridge() self.bridge = CvBridge()
self.image_pub = rospy.Publisher( self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback)
"/human_detected_image/image", Image, queue_size=10)
self.image_pub = rospy.Publisher("/human_detected_image/image", Image, queue_size=10)
self.bbx_pub = rospy.Publisher('/human_detected_image/bounding_box', box_list, queue_size=10) self.bbx_pub = rospy.Publisher('/human_detected_image/bounding_box', box_list, queue_size=10)
self.image_sub = rospy.Subscriber(
"/camera/color/image_raw", Image, self.callback)
def callback(self, data): def callback(self, data):
t0 = time.time() t0 = time.time()
...@@ -140,7 +139,7 @@ class human_detector: ...@@ -140,7 +139,7 @@ class human_detector:
bbx_list.people_list.append(coordinates) bbx_list.people_list.append(coordinates)
counter += 1 counter += 1
print(list_length) # print(list_length)
bbx_list.length = list_length bbx_list.length = list_length
self.bbx_pub.publish(bbx_list) self.bbx_pub.publish(bbx_list)
...@@ -167,7 +166,7 @@ def main(): ...@@ -167,7 +166,7 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
print('Current Tensorflow Version: ' + str(tf.__version__)) print('Current Tensorflow Version: ' + str(tf.__version__))
# ospy.set_param('use_sim_time', 'True') rospy.set_param('use_sim_time', 'True')
main() main()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment