diff --git a/get_obj_dist/src/object_to_distance.py b/get_obj_dist/src/object_to_distance.py index b42a91ad460c82836703cd0bd1966525b2c48cac..b684af68b14353c3022d3d93f53b7cd0639414ed 100755 --- a/get_obj_dist/src/object_to_distance.py +++ b/get_obj_dist/src/object_to_distance.py @@ -28,12 +28,14 @@ class obj_dist: self.moving_average = [3] * size_of_moving_avg self.set_marker_array(5, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL') - # Initializing ROS Topics + # Initiale publishers self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10) self.render_pub = rospy.Publisher('/obj_to_dist/show_people_marker_array', MarkerArray, queue_size=10) + # Initialize subscribers self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list) self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image) + # self.depth_image_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image) self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image) ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub, @@ -57,7 +59,8 @@ class obj_dist: def get_human_distance(self, cv_depth, cv_image, box, person_id): roi_depth = cv_depth[box.ymin:box.ymax, box.xmin:box.xmax] - filtered_depth, _size = filter_background(roi_depth) + # filtered_depth, _size = filter_background(roi_depth) + filtered_depth, _size = dynamic_background(roi_depth) if _size: self.moving_average.pop() self.moving_average.insert(0, filtered_depth.sum() / _size / 1000.0) @@ -79,14 +82,13 @@ class obj_dist: return -1, -1, -1 def set_marker_array(self, size, frame_id, package): - # self.marker_array.markers = [Marker()] * size - for i in range(0, size): + for counter in range(0, size): self.marker_array.markers.append(Marker()) - self.marker_array.markers[i].header.frame_id = frame_id - self.marker_array.markers[i].lifetime = rospy.Duration() - self.marker_array.markers[i].id = i - self.marker_array.markers[i].type = Marker.MESH_RESOURCE - self.marker_array.markers[i].mesh_resource = package + self.marker_array.markers[counter].header.frame_id = frame_id + self.marker_array.markers[counter].lifetime = rospy.Duration() + self.marker_array.markers[counter].id = counter + self.marker_array.markers[counter].type = Marker.MESH_RESOURCE + self.marker_array.markers[counter].mesh_resource = package def set_model_coordinates(self, cood, person_id): self.marker_array.markers[person_id].header.stamp = rospy.Time.now() diff --git a/get_obj_dist/src/utils.py b/get_obj_dist/src/utils.py index 77a08112d0f50ba2d05f4c4138409f755abbb482..962d4a0359e5eb1d67f40125a99f63246c6fb516 100644 --- a/get_obj_dist/src/utils.py +++ b/get_obj_dist/src/utils.py @@ -1,12 +1,17 @@ import numpy as np +import matplotlib.pyplot as plt + +_noise_limit = 100 +_diff = 1000 +_bin = [i * _diff for i in range(0, 11)] # Really basic way of separating foreground and background. -def filter_background(roi): +def filter_background(roi, max_depth=8000): # Anything further than 8000mm, we consider it as background # Anything less than 100mm is consider noise - ret_val = np.ma.masked_greater(roi, 8000) - ret_val = np.ma.masked_less(ret_val, 100) + ret_val = np.ma.masked_greater(roi, max_depth) + ret_val = np.ma.masked_less(ret_val, _noise_limit) unique, counts = np.unique(ret_val.mask, return_counts=True) _dict = dict(zip(unique, counts)) if False in _dict: @@ -15,6 +20,19 @@ def filter_background(roi): return ret_val, 0 +def dynamic_background(roi): + # Anything less than 100mm is sure noise + # roi = np.ma.masked_less(roi, 100) + roi_1d = roi.flatten() + hist, bins = np.histogram(roi, bins=_bin, density=True) + max_bin = hist.argmax() + 1 + + # plt.hist(roi_1d, bins=_bin, density=True) + # plt.title('Hello') + # plt.show() + return filter_background(roi, max_bin * _diff) + + def get_x_in_meters(xmin, xmax, z_i): # Tune z_c to get better value lol. # 500 is literally randomly chosen lol diff --git a/human_detection/launch/hdm_full.launch b/human_detection/launch/hdm_full.launch new file mode 100644 index 0000000000000000000000000000000000000000..a0d3db1c9fb62fad5f6fef2d54ebf00b918fe337 --- /dev/null +++ b/human_detection/launch/hdm_full.launch @@ -0,0 +1,4 @@ +<launch> + <node pkg="human_detection" type="webcam.py" name="webcam"/> + <node pkg="get_obj_dist" type="object_to_distance.py" name="object_to_distance"/> +</launch> \ No newline at end of file diff --git a/human_detection/src/webcam.py b/human_detection/src/webcam.py index f26ca35b6482da22ccf6df14c6f9aba2b4637fe7..ce24049c107ea3f667487757be2c736035249838 100755 --- a/human_detection/src/webcam.py +++ b/human_detection/src/webcam.py @@ -147,7 +147,6 @@ class human_detector: cvt_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8") cvt_msg.header.stamp = rospy.Time.now() self.image_pub.publish(cvt_msg) - # print(cvt_msg.header) except CvBridgeError as e: print(e) diff --git a/human_detection_demo.mp4 b/human_detection_demo.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..57d2884ed18681a37ae14fefb84453876510cbf3 Binary files /dev/null and b/human_detection_demo.mp4 differ diff --git a/launch/HDM_full.launch b/launch/HDM_full.launch deleted file mode 100644 index a1981e8adba9a70cfeb160d8d05919bf4177fb87..0000000000000000000000000000000000000000 --- a/launch/HDM_full.launch +++ /dev/null @@ -1,5 +0,0 @@ -<launch> - <node pkg="human_detection" type="webcam" name="webcam"/> - <node pkg="get_obj_dist" type="object_to_distance" name="object_to_distance"/> - -</launch> \ No newline at end of file