Skip to content
Snippets Groups Projects
Unverified Commit 22113e8f authored by Jerry Kuo's avatar Jerry Kuo Committed by GitHub
Browse files

Merge pull request #2 from SuperKuooo/complex_scene_detection

Complex scene detection
parents dadb0bdc 5cbd88e3
No related branches found
No related tags found
No related merge requests found
.idea /.vscode
*.bag *.bag
*.pyc *.pyc
.idea
.catkin_workspace .catkin_workspace
# *CMakeLists.txt # *CMakeLists.txt
\ No newline at end of file
#!/usr/bin/env python #!/usr/bin/env python
import numpy as np
import sys import sys
import cv2 import cv2
import rospy import rospy
...@@ -17,6 +16,7 @@ font = cv2.FONT_HERSHEY_SIMPLEX ...@@ -17,6 +16,7 @@ font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.75 font_scale = 0.75
font_color = (255, 255, 255) font_color = (255, 255, 255)
line_type = 2 line_type = 2
marker_size = 5
size_of_moving_avg = 15 size_of_moving_avg = 15
...@@ -25,16 +25,19 @@ class obj_dist: ...@@ -25,16 +25,19 @@ class obj_dist:
# Declare functional variables # Declare functional variables
self.bridge = CvBridge() self.bridge = CvBridge()
self.marker_array = MarkerArray() self.marker_array = MarkerArray()
self.moving_average = [3] * size_of_moving_avg
self.set_marker_array(5, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
# Initializing ROS Topics self.moving_average = [[3] * size_of_moving_avg] * marker_size
self.set_marker_array(marker_size, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
# Initiale publishers
self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10) self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10)
self.render_pub = rospy.Publisher('/obj_to_dist/show_people_marker_array', MarkerArray, queue_size=10) self.render_pub = rospy.Publisher('/obj_to_dist/show_people_marker_array', MarkerArray, queue_size=10)
# Initialize subscribers
self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list) self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list)
self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image) self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image)
self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image) self.depth_image_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image)
# self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image)
ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub, ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub,
self.depth_image_sub], self.depth_image_sub],
...@@ -43,6 +46,7 @@ class obj_dist: ...@@ -43,6 +46,7 @@ class obj_dist:
ts.registerCallback(self.callback) ts.registerCallback(self.callback)
def callback(self, bbx, image, depth): def callback(self, bbx, image, depth):
print('working lol')
if bbx.length: if bbx.length:
cv_depth = self.bridge.imgmsg_to_cv2(depth, 'passthrough') cv_depth = self.bridge.imgmsg_to_cv2(depth, 'passthrough')
cv_image = self.bridge.imgmsg_to_cv2(image, 'bgr8') cv_image = self.bridge.imgmsg_to_cv2(image, 'bgr8')
...@@ -57,12 +61,13 @@ class obj_dist: ...@@ -57,12 +61,13 @@ class obj_dist:
def get_human_distance(self, cv_depth, cv_image, box, person_id): def get_human_distance(self, cv_depth, cv_image, box, person_id):
roi_depth = cv_depth[box.ymin:box.ymax, box.xmin:box.xmax] roi_depth = cv_depth[box.ymin:box.ymax, box.xmin:box.xmax]
filtered_depth, _size = filter_background(roi_depth) # filtered_depth, _size = filter_background(roi_depth)
filtered_depth, _size = dynamic_background(roi_depth)
if _size: if _size:
self.moving_average.pop() self.moving_average[person_id].pop()
self.moving_average.insert(0, filtered_depth.sum() / _size / 1000.0) self.moving_average[person_id].insert(0, filtered_depth.sum() / _size / 1000.0)
current_avg = sum(self.moving_average) / size_of_moving_avg current_avg = sum(self.moving_average[person_id]) / size_of_moving_avg
x_meter = get_x_in_meters(box.xmin, box.xmax, current_avg) x_meter = get_x_in_meters(box.xmin, box.xmax, current_avg)
cv2.putText(cv_image, '{} meters / Person {}'.format(round(current_avg, 2), person_id), cv2.putText(cv_image, '{} meters / Person {}'.format(round(current_avg, 2), person_id),
...@@ -79,14 +84,13 @@ class obj_dist: ...@@ -79,14 +84,13 @@ class obj_dist:
return -1, -1, -1 return -1, -1, -1
def set_marker_array(self, size, frame_id, package): def set_marker_array(self, size, frame_id, package):
# self.marker_array.markers = [Marker()] * size for counter in range(0, size):
for i in range(0, size):
self.marker_array.markers.append(Marker()) self.marker_array.markers.append(Marker())
self.marker_array.markers[i].header.frame_id = frame_id self.marker_array.markers[counter].header.frame_id = frame_id
self.marker_array.markers[i].lifetime = rospy.Duration() self.marker_array.markers[counter].lifetime = rospy.Duration()
self.marker_array.markers[i].id = i self.marker_array.markers[counter].id = counter
self.marker_array.markers[i].type = Marker.MESH_RESOURCE self.marker_array.markers[counter].type = Marker.MESH_RESOURCE
self.marker_array.markers[i].mesh_resource = package self.marker_array.markers[counter].mesh_resource = package
def set_model_coordinates(self, cood, person_id): def set_model_coordinates(self, cood, person_id):
self.marker_array.markers[person_id].header.stamp = rospy.Time.now() self.marker_array.markers[person_id].header.stamp = rospy.Time.now()
...@@ -124,5 +128,5 @@ def main(args): ...@@ -124,5 +128,5 @@ def main(args):
if __name__ == '__main__': if __name__ == '__main__':
# rospy.set_param("use_sim_time", 'true') rospy.set_param("use_sim_time", 'true')
main(sys.argv) main(sys.argv)
File moved
import numpy as np import numpy as np
import matplotlib.pyplot as plt
_noise_limit = 100
_diff = 1000
_bin = [i * _diff for i in range(0, 11)]
# Really basic way of separating foreground and background. # Really basic way of separating foreground and background.
def filter_background(roi): def filter_background(roi, max_depth=8000):
# Anything further than 8000mm, we consider it as background # Anything further than 8000mm, we consider it as background
# Anything less than 100mm is consider noise # Anything less than 100mm is consider noise
ret_val = np.ma.masked_greater(roi, 8000) ret_val = np.ma.masked_greater(roi, max_depth)
ret_val = np.ma.masked_less(ret_val, 100) ret_val = np.ma.masked_less(ret_val, _noise_limit)
unique, counts = np.unique(ret_val.mask, return_counts=True) unique, counts = np.unique(ret_val.mask, return_counts=True)
_dict = dict(zip(unique, counts)) _dict = dict(zip(unique, counts))
if False in _dict: if False in _dict:
...@@ -15,6 +20,19 @@ def filter_background(roi): ...@@ -15,6 +20,19 @@ def filter_background(roi):
return ret_val, 0 return ret_val, 0
def dynamic_background(roi):
# Anything less than 100mm is sure noise
# roi = np.ma.masked_less(roi, 100)
roi_1d = roi.flatten()
hist, bins = np.histogram(roi_1d, bins=_bin, density=True)
max_bin = hist.argmax() + 1
# plt.hist(roi_1d, bins=_bin, density=True)
# plt.title('Hello')
# plt.show()
return filter_background(roi, max_bin * _diff)
def get_x_in_meters(xmin, xmax, z_i): def get_x_in_meters(xmin, xmax, z_i):
# Tune z_c to get better value lol. # Tune z_c to get better value lol.
# 500 is literally randomly chosen lol # 500 is literally randomly chosen lol
......
<launch>
<node pkg="human_detection" type="webcam.py" name="webcam"/>
<node pkg="get_obj_dist" type="object_to_distance.py" name="object_to_distance"/>
</launch>
\ No newline at end of file
...@@ -8,43 +8,47 @@ from std_msgs.msg import String ...@@ -8,43 +8,47 @@ from std_msgs.msg import String
from sensor_msgs.msg import Image from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError from cv_bridge import CvBridge, CvBridgeError
class image_converter: class image_converter:
def __init__(self): def __init__(self):
self.image_pub = rospy.Publisher("image_topic_2",Image, queue_size=10) self.image_pub = rospy.Publisher("image_topic_2", Image, queue_size=10)
self.bridge = CvBridge() self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/color/image_raw",Image,self.callback) self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback)
def callback(self,data): def callback(self, data):
try: try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = cv_image.shape except CvBridgeError as e:
if cols > 60 and rows > 60 : print(e)
cv2.circle(cv_image, (50,50), 10, 255)
cv2.imshow("Image window", cv_image) (rows, cols, channels) = cv_image.shape
cv2.waitKey(3) if cols > 60 and rows > 60:
cv2.circle(cv_image, (50, 50), 10, 255)
cv2.imshow("Image window", cv_image)
cv2.waitKey(3)
try:
ret_img = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
ret_img.header = data.header
print(rospy.Time.now())
self.image_pub.publish(ret_img)
except CvBridgeError as e:
print(e)
try:
ret_img = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
ret_img.header = data.header
self.image_pub.publish(ret_img)
except CvBridgeError as e:
print(e)
def main(args): def main(args):
ic = image_converter() ic = image_converter()
rospy.init_node('image_converter', anonymous=True) rospy.init_node('image_converter', anonymous=True)
try: try:
rospy.spin() rospy.spin()
except KeyboardInterrupt: except KeyboardInterrupt:
print("Shutting down") print("Shutting down")
cv2.destroyAllWindows() cv2.destroyAllWindows()
if __name__ == '__main__': if __name__ == '__main__':
main(sys.argv) main(sys.argv)
\ No newline at end of file
...@@ -68,7 +68,7 @@ class human_detector: ...@@ -68,7 +68,7 @@ class human_detector:
self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback) self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback)
self.image_pub = rospy.Publisher("/human_detected_image/image", Image, queue_size=10) self.image_pub = rospy.Publisher("/human_detected_image/image", Image, queue_size=10)
self.bbx_pub = rospy.Publisher('/human_detected_image/bounding_box', box_list, queue_size=10) self.bbx_pub = rospy.Publisher("/human_detected_image/bounding_box", box_list, queue_size=10)
def callback(self, data): def callback(self, data):
t0 = time.time() t0 = time.time()
...@@ -126,7 +126,7 @@ class human_detector: ...@@ -126,7 +126,7 @@ class human_detector:
counter = 0 counter = 0
list_length = 0 list_length = 0
bbx_list = box_list() bbx_list = box_list()
bbx_list.header.stamp = rospy.Time.now() bbx_list.header.stamp = data.header.stamp
for score in scores[0]: for score in scores[0]:
if score > 0.6: if score > 0.6:
...@@ -145,12 +145,10 @@ class human_detector: ...@@ -145,12 +145,10 @@ class human_detector:
try: try:
cvt_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8") cvt_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
cvt_msg.header.stamp = rospy.Time.now() cvt_msg.header.stamp = data.header.stamp
self.image_pub.publish(cvt_msg) self.image_pub.publish(cvt_msg)
# print(cvt_msg.header)
except CvBridgeError as e: except CvBridgeError as e:
print(e) print(e)
# Append Accuracy # Append Accuracy
# if scores[0][0] > 0.01: # if scores[0][0] > 0.01:
......
File added
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment