diff --git a/get_obj_dist/launch/node.launch b/get_obj_dist/launch/node.launch
index d8fc353993517010bc28114593509d62066f62d6..e466591c2eae2eb0701b83681d44fc98573d99d6 100644
--- a/get_obj_dist/launch/node.launch
+++ b/get_obj_dist/launch/node.launch
@@ -3,15 +3,18 @@
 <launch>
 
   <arg name="camera_in_topic"      default="/camera/color/image_raw"/>
-  <arg name="depth_image_in"       default="/camera/aligned_depth_to_color/image_raw"/>
+  <arg name="depth_image_in_topic"       default="/camera/aligned_depth_to_color/image_raw"/>
 
-  <arg name="node_name"        default="human_tensorflow_detection_2d_to_3d"/>
-  <arg name="bbox_in_topic"        default="/human_tensorflow_detection/bounding_box"/>
-  <arg name="detection_image_in"   default="/human_tensorflow_detection/image"/>
-  <arg name="detection2d_array_in" default="/human_tensorflow_detection/detection2d_array"/>
+  <arg name="node_name"        default="human_tensorflow_detection_3d"/>
+  <arg name="bbox_in_topic"        default="/human_tensorflow_detection_2d/bounding_box"/>
+  <arg name="detection_image_in"   default="/human_tensorflow_detection_2d/image"/>
+  <arg name="detection2d_array_in" default="/human_tensorflow_detection_2d/detection2d_array"/>
 
   <arg name="detection_distance_image_out" default="/$(arg node_name)/image"/>
   <arg name="people_marker_array_out"      default="/$(arg node_name)/people_marker_array"/>
+  <arg name="detected_persons_out"         default="/$(arg node_name)/detected_persons"/>
+
+
   <arg name="output"             default="screen"/>
   <arg name="required"           default="true"/>
 
@@ -25,13 +28,10 @@
     <remap from="~detection_image_in"     to="$(arg detection_image_in)"/>
     <remap from="~bounding_box_in"        to="$(arg bbox_in_topic)"/>
     <remap from="~detection2d_array"      to="$(arg detection2d_array_in)"/>
-    <remap from="~depth_image_in"         to="$(arg depth_image_in)"/>
+    <remap from="~depth_image_in"         to="$(arg depth_image_in_topic)"/>
     <remap from="~detection_distance_image_out"  to="$(arg detection_distance_image_out)"/>
     <remap from="~people_marker_array_out"       to="$(arg people_marker_array_out)"/>
-
-
-
-    /human_bbox_to_distance/detection2d_array
+    <remap from="~detected_persons"              to="$(arg detected_persons_out)"/>
   </node>
 
 </launch>
diff --git a/get_obj_dist/src/human_2d_to_3d.py b/get_obj_dist/src/human_2d_to_3d.py
index d251b0848505371ab9cb474fff625fe21e4d929b..9830da8576eabe2aa1c8d472d8eea69cb28ab3b9 100755
--- a/get_obj_dist/src/human_2d_to_3d.py
+++ b/get_obj_dist/src/human_2d_to_3d.py
@@ -22,6 +22,8 @@ line_type = 2
 marker_size = 5
 size_of_moving_avg = 15
 
+#MESH_PATH = 'package://get_obj_dist/human_model.STL'
+
 
 class obj_dist:
     def __init__(self):
@@ -29,8 +31,12 @@ class obj_dist:
         self.bridge = CvBridge()
         self.marker_array = MarkerArray()
 
+        self.covariance = [0.05, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 999999999.0, 0, 0, 0, 0, 0, 0, 999999999.0, 0, 0, 0, 0, 0, 0, 999999999.0]
+
+        self.run_once = False
+
         self.moving_average = [[3] * size_of_moving_avg] * marker_size
-        self.set_marker_array(marker_size, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
+        #self.set_marker_array(marker_size, 'camera_color_optical_frame', MESH_PATH)
 
         # Initiale publishers
         self.image_pub = rospy.Publisher('~detection_distance_image_out', Image, queue_size=1)
@@ -97,6 +103,10 @@ class obj_dist:
 
     def callback2(self, det2d, image, depth):
 
+        if not self.run_once:
+          self.set_marker_array(marker_size, det2d.header.frame_id)
+          self.run_once = True
+
         detections = DetectedPersons()
         detections.header = depth.header
 
@@ -111,15 +121,18 @@ class obj_dist:
                     cood = self.get_human_distance2(cv_depth, cv_image, det2d.detections[i], i)
                     self.set_model_coordinates(cood, i)
 
-                    det = DetectedPerson()
-                    det.confidence = det2d.detections[i].results[0].score
-                    det.pose.pose.position.x = cood[0]
-                    det.pose.pose.position.y = cood[1]
-                    det.pose.pose.position.z = cood[2]
-                    det.pose.pose.orientation.w=1.0
-                    det.modality = "rgbd"
+                    if cood[0]!=-1 and cood[1]!=-1 and cood[2]!=-1:
+                        det = DetectedPerson()
+                        det.detection_id = i #TODO: used?
+                        det.confidence = det2d.detections[i].results[0].score
+                        det.pose.pose.position.x = cood[0]
+                        det.pose.pose.position.y = cood[1]
+                        det.pose.pose.position.z = cood[2]
+                        det.pose.pose.orientation.w=1.0
+                        det.pose.covariance = self.covariance
+                        det.modality = "rgbd"
 
-                    detections.detections.append(det)
+                        detections.detections.append(det)
 
                 else:
                     self.set_model_coordinates((-1, -1, -1), i)
@@ -171,8 +184,8 @@ class obj_dist:
             current_avg = sum(self.moving_average[person_id]) / size_of_moving_avg
 
             x_meter = get_x_in_meters(xmin, xmax, current_avg)
-            cv2.putText(cv_image, '{} meters / Person {}'.format(round(current_avg, 2), person_id),
-                        (xmin, ymax - 100),
+            cv2.putText(cv_image, 'z={}m/id={}'.format(round(current_avg, 2), person_id),
+                        (int(xmin+(det.bbox.size_x/2)-50), ymax - 100),
                         font,
                         font_scale,
                         font_color,
@@ -184,14 +197,14 @@ class obj_dist:
         else:
             return -1, -1, -1
 
-    def set_marker_array(self, size, frame_id, package):
+    def set_marker_array(self, size, frame_id):
         for counter in range(0, size):
             self.marker_array.markers.append(Marker())
             self.marker_array.markers[counter].header.frame_id = frame_id
             self.marker_array.markers[counter].lifetime = rospy.Duration()
             self.marker_array.markers[counter].id = counter
-            self.marker_array.markers[counter].type = Marker.MESH_RESOURCE
-            self.marker_array.markers[counter].mesh_resource = package
+            self.marker_array.markers[counter].type = Marker.CYLINDER
+            #self.marker_array.markers[counter].mesh_resource = package
 
     def set_model_coordinates(self, cood, person_id):
         self.marker_array.markers[person_id].header.stamp = rospy.Time.now()
@@ -204,18 +217,19 @@ class obj_dist:
             self.marker_array.markers[person_id].color.r = 1.0
             self.marker_array.markers[person_id].color.g = 1.0
             self.marker_array.markers[person_id].color.b = 1.0
-            self.marker_array.markers[person_id].color.a = 1.0
+            self.marker_array.markers[person_id].color.a = 0.5
 
             # TODO: Change the scale of the DAE model so these numbers make more sense
-            self.marker_array.markers[person_id].scale.x = 0.0005
-            self.marker_array.markers[person_id].scale.y = 0.0009
-            self.marker_array.markers[person_id].scale.z = 0.0008
+            self.marker_array.markers[person_id].scale.x = 0.5
+            self.marker_array.markers[person_id].scale.y = 0.5
+            self.marker_array.markers[person_id].scale.z = 1.0
 
-            self.marker_array.markers[person_id].pose.orientation.x = 1.0
+            self.marker_array.markers[person_id].pose.orientation.x = 0.707
             self.marker_array.markers[person_id].pose.orientation.y = 0.0
             self.marker_array.markers[person_id].pose.orientation.z = 0.0
+            self.marker_array.markers[person_id].pose.orientation.w = 0.707
 
-            self.marker_array.markers[person_id].pose.position.x = cood[0] - 0.4
+            self.marker_array.markers[person_id].pose.position.x = cood[0]
             self.marker_array.markers[person_id].pose.position.y = cood[1]
             self.marker_array.markers[person_id].pose.position.z = cood[2]
 
diff --git a/human_detection/launch/node.launch b/human_detection/launch/node.launch
index 60a848e9d65dbed62a4c6ccf6ee2fd74f9b98528..1004d9d40b5ab6e1e752c839b20c5a40681de476 100644
--- a/human_detection/launch/node.launch
+++ b/human_detection/launch/node.launch
@@ -4,7 +4,7 @@
 
   <arg name="camera_in_topic"  default="/camera/color/image_raw"/>
 
-  <arg name="node_name"        default="human_tensorflow_detection"/>
+  <arg name="node_name"        default="human_tensorflow_detection_2d"/>
   <arg name="image_out_topic"  default="/$(arg node_name)/image"/>
   <arg name="bbox_out_topic"   default="/$(arg node_name)/bounding_box"/>
   <arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/>
diff --git a/human_detection/launch/test.launch b/human_detection/launch/test.launch
new file mode 100644
index 0000000000000000000000000000000000000000..ef1180dabf4df54a542806cf9c9b6c9049cdca18
--- /dev/null
+++ b/human_detection/launch/test.launch
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<launch>
+
+  <arg name="camera_in_topic"  default="/camera/color/image_raw"/>
+  <arg name="depth_image_in_topic" default="/camera/aligned_depth_to_color/image_raw"/>
+  <arg name="detected_persons_out" default="/human_tensorflow_detection_3d/detected_persons"/>
+
+  <arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/>
+  <arg name="output"           default="screen"/>
+
+
+  <include file="$(find human_detection)/launch/node.launch">
+    <arg name="camera_in_topic"  value="$(arg camera_in_topic)"/>
+    <arg name="virtual_env_path" value="$(arg virtual_env_path)"/>
+    <arg name="output"           value="$(arg output)"/>
+  </include>
+
+  <include file="$(find get_obj_dist)/launch/node.launch">
+    <arg name="camera_in_topic"       value="$(arg camera_in_topic)"/>
+    <arg name="depth_image_in_topic"  value="$(arg depth_image_in_topic)"/>
+    <arg name="detected_persons_out"  value="$(arg detected_persons_out)"/>
+    <arg name="output"                value="$(arg output)"/>
+  </include>
+
+</launch>
diff --git a/human_detection/launch/test_realsense.launch b/human_detection/launch/test_realsense.launch
index 793511516bcf71bcd06f97f06d7005ab3499d74d..0751c56d719fa71407b0d05307599b299c3c4763 100644
--- a/human_detection/launch/test_realsense.launch
+++ b/human_detection/launch/test_realsense.launch
@@ -1,21 +1,23 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <launch>
 
+  <arg name="camera_in_topic"      default="/camera/color/image_raw"/>
+  <arg name="depth_image_in_topic" default="/camera/aligned_depth_to_color/image_raw"/>
+  <arg name="detected_persons_out" default="/human_tensorflow_detection_3d/detected_persons"/>
   <arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/>
   <arg name="output"           default="screen"/>
 
   <include file="$(find realsense2_camera)/launch/rs_rgbd.launch">
   </include>
 
-  <include file="$(find human_detection)/launch/node.launch">
+  <include file="$(find human_detection)/launch/test.launch">
+    <arg name="camera_in_topic"       value="$(arg camera_in_topic)"/>
+    <arg name="depth_image_in_topic"  value="$(arg depth_image_in_topic)"/>
+    <arg name="detected_persons_out"  value="$(arg detected_persons_out)"/>
     <arg name="virtual_env_path" value="$(arg virtual_env_path)"/>
     <arg name="output"           value="$(arg output)"/>
   </include>
 
-  <include file="$(find get_obj_dist)/launch/node.launch">
-    <arg name="output"           value="$(arg output)"/>
-  </include>
-
   <node name="rviz"
         pkg="rviz"
         type="rviz"
diff --git a/human_detection/launch/test_spencer.launch b/human_detection/launch/test_spencer.launch
new file mode 100644
index 0000000000000000000000000000000000000000..48fefcbdd74de8c3135eb8250d94a394f41ae7c4
--- /dev/null
+++ b/human_detection/launch/test_spencer.launch
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<launch>
+
+  <arg name="camera_in_topic"  default="/spencer/sensors/rgbd_front_top/rgb/image_raw"/>
+  <arg name="depth_image_in_topic" default="/spencer/sensors/rgbd_front_top/depth/image_raw"/>
+  <!--<arg name="detected_persons_out" default="/spencer/perception_internal/detected_persons/rgbd_front_top/upper_body"/>-->
+  <arg name="detected_persons_out" default="/spencer/perception_internal/detected_persons/rgbd_front_top/hog"/>
+
+  <arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/>
+  <arg name="output"           default="screen"/>
+
+  <include file="$(find human_detection)/launch/test.launch">
+    <arg name="camera_in_topic"       value="$(arg camera_in_topic)"/>
+    <arg name="depth_image_in_topic"  value="$(arg depth_image_in_topic)"/>
+    <arg name="detected_persons_out"  value="$(arg detected_persons_out)"/>
+    <arg name="virtual_env_path" value="$(arg virtual_env_path)"/>
+    <arg name="output"           value="$(arg output)"/>
+  </include>
+
+</launch>
diff --git a/human_detection/rviz/test.rviz b/human_detection/rviz/test.rviz
index 25f9bfe395b26c4327e7dc6947f39137fa402869..ec8ee105974b7117a5a1cb99741e0f5705247868 100644
--- a/human_detection/rviz/test.rviz
+++ b/human_detection/rviz/test.rviz
@@ -116,7 +116,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/Image
       Enabled: true
-      Image Topic: /human_tensorflow_detection/image
+      Image Topic: /human_tensorflow_detection_2d/image
       Max Value: 1
       Median window: 5
       Min Value: 0
@@ -128,7 +128,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/Image
       Enabled: true
-      Image Topic: /human_tensorflow_detection_2d_to_3d/image
+      Image Topic: /human_tensorflow_detection_3d/image
       Max Value: 1
       Median window: 5
       Min Value: 0
@@ -140,7 +140,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/MarkerArray
       Enabled: true
-      Marker Topic: /human_tensorflow_detection_2d_to_3d/people_marker_array
+      Marker Topic: /human_tensorflow_detection_3d/people_marker_array
       Name: DistanceMarker
       Namespaces:
         "": true
diff --git a/human_detection/src/human_detector.py b/human_detection/src/human_detector.py
index 2f3c201ebc49af40de2167bb3ae445614a144105..08b108416f1b0b5a2bd562c0333a966ac80f23f7 100755
--- a/human_detection/src/human_detector.py
+++ b/human_detection/src/human_detector.py
@@ -129,8 +129,11 @@ class human_detector:
 
         if len(run_time_list):
             avg_run_time = round(sum(run_time_list) / len(run_time_list) * 1000, 1)
+            left_margin = 10
+            bottom_margin = 10
+            time_text_position = (left_margin, data.height-bottom_margin)
             cv2.putText(cv_image, 'Run Time: {}ms'.format(avg_run_time),
-                        text_position,
+                        time_text_position,
                         font,
                         font_scale,
                         font_color,
@@ -143,6 +146,7 @@ class human_detector:
 
         det2d_array = Detection2DArray()
         det2d_array.header.stamp = data.header.stamp
+        det2d_array.header.frame_id = data.header.frame_id
 
         spotted = False
 
@@ -180,6 +184,7 @@ class human_detector:
         try:
             cvt_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
             cvt_msg.header.stamp = data.header.stamp
+            cvt_msg.header.frame_id = data.header.frame_id
             self.image_pub.publish(cvt_msg)
         except CvBridgeError as e:
             rospy.logerr(rospy.get_name() + ": CvBridgeError: " + e)