diff --git a/get_obj_dist/launch/node.launch b/get_obj_dist/launch/node.launch
index 1012a037ab0e15c0029acfbd117de21d5eb083d1..2574542f9af88b31ef1c759b742e77049262513a 100644
--- a/get_obj_dist/launch/node.launch
+++ b/get_obj_dist/launch/node.launch
@@ -2,7 +2,7 @@
 <!-- -->
 <launch>
 
-  <arg name="node_name"        default="person_bbox_to_distance"/>
+  <arg name="node_name"        default="human_bbox_to_distance"/>
   <arg name="camera_in_topic"  default="/camera/color/image_raw"/>
 
   <arg name="bbox_in_topic"      default="/human_detection/bounding_box"/>
diff --git a/get_obj_dist/src/object_to_distance.py b/get_obj_dist/src/object_to_distance.py
index 6bd6f07ef8e66c0079928b058d55da0a5c94b824..97c62d9a89d03156280f7a8bcd05ba88c3ad4588 100755
--- a/get_obj_dist/src/object_to_distance.py
+++ b/get_obj_dist/src/object_to_distance.py
@@ -11,6 +11,8 @@ from cv_bridge import CvBridge, CvBridgeError
 from human_detection.msg import box_list
 from utils import *
 
+from spencer_tracking_msgs.msg import DetectedPersons, DetectedPerson
+
 text_position = (10, 50)
 font = cv2.FONT_HERSHEY_SIMPLEX
 font_scale = 0.75
@@ -30,8 +32,9 @@ class obj_dist:
         self.set_marker_array(marker_size, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
 
         # Initiale publishers
-        self.dist_pub = rospy.Publisher('~detection_distance_image_out', Image, queue_size=10)
-        self.render_pub = rospy.Publisher('~people_marker_array_out', MarkerArray, queue_size=10)
+        self.image_pub = rospy.Publisher('~detection_distance_image_out', Image, queue_size=1)
+        self.marker_pub = rospy.Publisher('~people_marker_array_out', MarkerArray, queue_size=1)
+        self.detections_pub = rospy.Publisher('~detected_persons', DetectedPersons, queue_size=1)
 
         # Initialize subscribers
         self.bbx_sub = message_filters.Subscriber('~bounding_box_in', box_list)
@@ -46,6 +49,10 @@ class obj_dist:
         ts.registerCallback(self.callback)
 
     def callback(self, bbx, image, depth):
+
+        detections = DetectedPersons()
+        detections.header = depth.header
+
         #print('working lol')
         if bbx.length:
             cv_depth = self.bridge.imgmsg_to_cv2(depth, 'passthrough')
@@ -55,9 +62,22 @@ class obj_dist:
                 if i <= bbx.length - 1:
                     cood = self.get_human_distance(cv_depth, cv_image, bbx.people_list[i], i)
                     self.set_model_coordinates(cood, i)
+
+                    det = DetectedPerson()
+                    det.confidence = 0 #TODO: subscribe to detection2d_array which has confidence (bbox doesnt)
+                    det.pose.pose.position.x = cood[0]
+                    det.pose.pose.position.y = cood[1]
+                    det.pose.pose.position.z = cood[2]
+                    det.pose.pose.orientation.w=1.0
+                    det.modality = "rgbd"
+
+                    detections.detections.append(det)
+
                 else:
                     self.set_model_coordinates((-1, -1, -1), i)
-        self.render_pub.publish(self.marker_array)
+
+        self.marker_pub.publish(self.marker_array)
+        self.detections_pub.publish(detections)
 
     def get_human_distance(self, cv_depth, cv_image, box, person_id):
         roi_depth = cv_depth[box.ymin:box.ymax, box.xmin:box.xmax]
@@ -76,10 +96,10 @@ class obj_dist:
                         font_scale,
                         font_color,
                         line_type)
-            self.dist_pub.publish(self.bridge.cv2_to_imgmsg(cv_image))
+            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image))
 
             # TODO: Add dynamic find floor
-            return x_meter, 0.3, current_avg
+            return x_meter, 0.0, current_avg
         else:
             return -1, -1, -1
 
diff --git a/human_detection/CMakeLists.txt b/human_detection/CMakeLists.txt
index ee726a0cb6924ee29377e06e5ce7d2508eb47d25..7737b66108147ad66954f06eee39f22b2e414e65 100644
--- a/human_detection/CMakeLists.txt
+++ b/human_detection/CMakeLists.txt
@@ -11,6 +11,7 @@ find_package(catkin REQUIRED COMPONENTS
   roslib
   rospy
   std_msgs
+  vision_msgs
   message_generation
 )
 
@@ -107,7 +108,7 @@ catkin_package(
 #  INCLUDE_DIRS include
 #  LIBRARIES human_detection
    CATKIN_DEPENDS roslib rospy std_msgs message_runtime
-			
+
 #  DEPENDS system_lib
 )
 
diff --git a/human_detection/package.xml b/human_detection/package.xml
index 9ba4dd01f3dd28f61b9274dc2bb47f9951c51d42..281c98e5a1612442c94f4190f4836f90dd35d9e2 100644
--- a/human_detection/package.xml
+++ b/human_detection/package.xml
@@ -52,7 +52,7 @@
   <build_depend>roslib</build_depend>
   <build_depend>rospy</build_depend>
   <build_depend>std_msgs</build_depend>
-  <build_depend>message_generation</build_depend>  
+  <build_depend>message_generation</build_depend>
   <build_export_depend>roslib</build_export_depend>
   <build_export_depend>rospy</build_export_depend>
   <build_export_depend>std_msgs</build_export_depend>
@@ -61,6 +61,8 @@
   <exec_depend>rospy</exec_depend>
   <exec_depend>std_msgs</exec_depend>
 
+  <depend>vision_msgs</depend>
+
 
   <!-- The export tag contains other, unspecified, tags -->
   <export>
diff --git a/human_detection/rviz/test.rviz b/human_detection/rviz/test.rviz
index 10b7d63fe9930952d633650f3940c0733e710f14..0b5f0eb93df498eaf831f92a92bfbb0ac3d114e9 100644
--- a/human_detection/rviz/test.rviz
+++ b/human_detection/rviz/test.rviz
@@ -6,6 +6,8 @@ Panels:
       Expanded:
         - /Global Options1
         - /Status1
+        - /DetectionDistanceImage1
+        - /MarkerArray1
       Splitter Ratio: 0.5
     Tree Height: 617
   - Class: rviz/Selection
@@ -128,7 +130,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/Image
       Enabled: true
-      Image Topic: /person_bbox_to_distance/detection_distance_image
+      Image Topic: /human_bbox_to_distance/detection_distance_image
       Max Value: 1
       Median window: 5
       Min Value: 0
@@ -140,7 +142,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/MarkerArray
       Enabled: true
-      Marker Topic: /person_bbox_to_distance/people_marker_array
+      Marker Topic: /human_bbox_to_distance/people_marker_array
       Name: MarkerArray
       Namespaces:
         "": true
diff --git a/human_detection/src/human_detector.py b/human_detection/src/human_detector.py
index 992eb813739ef7c4089ea687c28308d30d207be7..6895e5852eafa09e8dd6101e5de8d2db6ca05cb8 100755
--- a/human_detection/src/human_detector.py
+++ b/human_detection/src/human_detector.py
@@ -20,6 +20,8 @@ from human_detection.msg import bounding_box, box_list
 from sensor_msgs.msg import Image
 from cv_bridge import CvBridge, CvBridgeError
 
+from vision_msgs.msg import Detection2DArray, Detection2D, ObjectHypothesisWithPose
+
 # Import utilites
 from object_detection.utils import label_map_util
 from object_detection.utils import visualization_utils as vis_util
@@ -72,8 +74,9 @@ class human_detector:
         self.bridge = CvBridge()
         self.image_sub = rospy.Subscriber("~camera/color/image_raw", Image, self.callback)
 
-        self.image_pub = rospy.Publisher("~detection_image", Image, queue_size=10)
-        self.bbx_pub = rospy.Publisher("~bounding_box", box_list, queue_size=10)
+        self.image_pub = rospy.Publisher("~detection_image", Image, queue_size=1)
+        self.bbx_pub = rospy.Publisher("~bounding_box", box_list, queue_size=1)
+        self.detection2d_array_pub = rospy.Publisher("~detection2d_array", Detection2DArray, queue_size=1)
 
     def callback(self, data):
         t0 = time.time()
@@ -133,17 +136,34 @@ class human_detector:
         list_length = 0
         bbx_list = box_list()
         bbx_list.header.stamp = data.header.stamp
+
+        det2d_array = Detection2DArray()
+        det2d_array.header.stamp = data.header.stamp
+
         spotted = False
 
         for score in scores[0]:
             if score > 0.6:
                 list_length += 1
-                coordinates = bounding_box()
-                coordinates.ymin = int(boxes[0, counter, 0] * screen_height)
-                coordinates.xmin = int(boxes[0, counter, 1] * screen_width)
-                coordinates.ymax = int(boxes[0, counter, 2] * screen_height)
-                coordinates.xmax = int(boxes[0, counter, 3] * screen_width)
-                bbx_list.people_list.append(coordinates)
+                bbox = bounding_box()
+                bbox.ymin = int(boxes[0, counter, 0] * screen_height)
+                bbox.xmin = int(boxes[0, counter, 1] * screen_width)
+                bbox.ymax = int(boxes[0, counter, 2] * screen_height)
+                bbox.xmax = int(boxes[0, counter, 3] * screen_width)
+                bbx_list.people_list.append(bbox)
+
+                det2d = Detection2D()
+                det2d.header = det2d_array.header
+                obj = ObjectHypothesisWithPose()
+                obj.score = score
+                det2d.results.append(obj)
+                det2d.bbox.size_x = bbox.xmax - bbox.xmin
+                det2d.bbox.size_y = bbox.ymax - bbox.ymin
+                det2d.bbox.center.x = bbox.xmin + det2d.bbox.size_x/2.0
+                det2d.bbox.center.y = bbox.ymin + det2d.bbox.size_y/2.0
+                det2d.bbox.center.theta = 0
+
+                det2d_array.detections.append(det2d)
 
             counter += 1
 
@@ -151,6 +171,8 @@ class human_detector:
         bbx_list.length = list_length
         self.bbx_pub.publish(bbx_list)
 
+        self.detection2d_array_pub.publish(det2d_array)
+
         try:
             cvt_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
             cvt_msg.header.stamp = data.header.stamp