diff --git a/.gitignore b/.gitignore
index 30bd60effde2c708c3b453273a5e4bb684859502..558af26895021178d5241b674d2e341ff27806aa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
-.idea
+/.vscode
 *.bag
 *.pyc
+.idea
 .catkin_workspace
 
-# *CMakeLists.txt
\ No newline at end of file
+# *CMakeLists.txt
diff --git a/get_obj_dist/src/object_to_distance.py b/get_obj_dist/src/object_to_distance.py
index b684af68b14353c3022d3d93f53b7cd0639414ed..fb682489b0172209281521f8ea88090409d5cf5a 100755
--- a/get_obj_dist/src/object_to_distance.py
+++ b/get_obj_dist/src/object_to_distance.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python
 
-import numpy as np
 import sys
 import cv2
 import rospy
@@ -17,6 +16,7 @@ font = cv2.FONT_HERSHEY_SIMPLEX
 font_scale = 0.75
 font_color = (255, 255, 255)
 line_type = 2
+marker_size = 5
 size_of_moving_avg = 15
 
 
@@ -25,8 +25,9 @@ class obj_dist:
         # Declare functional variables
         self.bridge = CvBridge()
         self.marker_array = MarkerArray()
-        self.moving_average = [3] * size_of_moving_avg
-        self.set_marker_array(5, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
+
+        self.moving_average = [[3] * size_of_moving_avg] * marker_size
+        self.set_marker_array(marker_size, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
 
         # Initiale publishers
         self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10)
@@ -35,8 +36,8 @@ class obj_dist:
         # Initialize subscribers
         self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list)
         self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image)
-        # self.depth_image_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image)
-        self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image)
+        self.depth_image_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image)
+        # self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image)
 
         ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub,
                                                           self.depth_image_sub],
@@ -45,6 +46,7 @@ class obj_dist:
         ts.registerCallback(self.callback)
 
     def callback(self, bbx, image, depth):
+        print('working lol')
         if bbx.length:
             cv_depth = self.bridge.imgmsg_to_cv2(depth, 'passthrough')
             cv_image = self.bridge.imgmsg_to_cv2(image, 'bgr8')
@@ -62,10 +64,10 @@ class obj_dist:
         # filtered_depth, _size = filter_background(roi_depth)
         filtered_depth, _size = dynamic_background(roi_depth)
         if _size:
-            self.moving_average.pop()
-            self.moving_average.insert(0, filtered_depth.sum() / _size / 1000.0)
+            self.moving_average[person_id].pop()
+            self.moving_average[person_id].insert(0, filtered_depth.sum() / _size / 1000.0)
 
-            current_avg = sum(self.moving_average) / size_of_moving_avg
+            current_avg = sum(self.moving_average[person_id]) / size_of_moving_avg
 
             x_meter = get_x_in_meters(box.xmin, box.xmax, current_avg)
             cv2.putText(cv_image, '{} meters / Person {}'.format(round(current_avg, 2), person_id),
@@ -126,5 +128,5 @@ def main(args):
 
 
 if __name__ == '__main__':
-    # rospy.set_param("use_sim_time", 'true')
+    rospy.set_param("use_sim_time", 'true')
     main(sys.argv)
diff --git a/get_obj_dist/src/utils.py b/get_obj_dist/src/utils.py
index 962d4a0359e5eb1d67f40125a99f63246c6fb516..8d2a3a31a40f47b260b48563e44381acec56c8d4 100644
--- a/get_obj_dist/src/utils.py
+++ b/get_obj_dist/src/utils.py
@@ -24,7 +24,7 @@ def dynamic_background(roi):
     # Anything less than 100mm is sure noise
     # roi = np.ma.masked_less(roi, 100)
     roi_1d = roi.flatten()
-    hist, bins = np.histogram(roi, bins=_bin, density=True)
+    hist, bins = np.histogram(roi_1d, bins=_bin, density=True)
     max_bin = hist.argmax() + 1
 
     # plt.hist(roi_1d, bins=_bin, density=True)
diff --git a/human_detection/src/reference.py b/human_detection/src/reference.py
index a94bb1d56fd4ea924a1e152cb0fa9e322f399987..c9f45d7ac5037b57a75937ae382249902399ee21 100755
--- a/human_detection/src/reference.py
+++ b/human_detection/src/reference.py
@@ -8,43 +8,47 @@ from std_msgs.msg import String
 from sensor_msgs.msg import Image
 from cv_bridge import CvBridge, CvBridgeError
 
+
 class image_converter:
 
-  def __init__(self):
-    self.image_pub = rospy.Publisher("image_topic_2",Image, queue_size=10)
+    def __init__(self):
+        self.image_pub = rospy.Publisher("image_topic_2", Image, queue_size=10)
 
-    self.bridge = CvBridge()
-    self.image_sub = rospy.Subscriber("/camera/color/image_raw",Image,self.callback)
+        self.bridge = CvBridge()
+        self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback)
 
-  def callback(self,data):
-    try:
-      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
-      
-    except CvBridgeError as e:
-      print(e)
+    def callback(self, data):
+        try:
+            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
 
-    (rows,cols,channels) = cv_image.shape
-    if cols > 60 and rows > 60 :
-      cv2.circle(cv_image, (50,50), 10, 255)
+        except CvBridgeError as e:
+            print(e)
 
-    cv2.imshow("Image window", cv_image)
-    cv2.waitKey(3)
+        (rows, cols, channels) = cv_image.shape
+        if cols > 60 and rows > 60:
+            cv2.circle(cv_image, (50, 50), 10, 255)
+
+        cv2.imshow("Image window", cv_image)
+        cv2.waitKey(3)
+
+        try:
+            ret_img = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
+            ret_img.header = data.header
+            print(rospy.Time.now())
+            self.image_pub.publish(ret_img)
+        except CvBridgeError as e:
+            print(e)
 
-    try:
-      ret_img = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
-      ret_img.header = data.header
-      self.image_pub.publish(ret_img)
-    except CvBridgeError as e:
-      print(e)
 
 def main(args):
-  ic = image_converter()
-  rospy.init_node('image_converter', anonymous=True)
-  try:
-    rospy.spin()
-  except KeyboardInterrupt:
-    print("Shutting down")
-  cv2.destroyAllWindows()
+    ic = image_converter()
+    rospy.init_node('image_converter', anonymous=True)
+    try:
+        rospy.spin()
+    except KeyboardInterrupt:
+        print("Shutting down")
+    cv2.destroyAllWindows()
+
 
 if __name__ == '__main__':
-    main(sys.argv)
\ No newline at end of file
+    main(sys.argv)
diff --git a/human_detection/src/webcam.py b/human_detection/src/webcam.py
index ce24049c107ea3f667487757be2c736035249838..1075dcd807a81bcfba40300c0995462bba627c16 100755
--- a/human_detection/src/webcam.py
+++ b/human_detection/src/webcam.py
@@ -68,7 +68,7 @@ class human_detector:
         self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback)
 
         self.image_pub = rospy.Publisher("/human_detected_image/image", Image, queue_size=10)
-        self.bbx_pub = rospy.Publisher('/human_detected_image/bounding_box', box_list, queue_size=10)
+        self.bbx_pub = rospy.Publisher("/human_detected_image/bounding_box", box_list, queue_size=10)
 
     def callback(self, data):
         t0 = time.time()
@@ -126,7 +126,7 @@ class human_detector:
         counter = 0
         list_length = 0
         bbx_list = box_list()
-        bbx_list.header.stamp = rospy.Time.now()
+        bbx_list.header.stamp = data.header.stamp
 
         for score in scores[0]:
             if score > 0.6:
@@ -145,11 +145,10 @@ class human_detector:
 
         try:
             cvt_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
-            cvt_msg.header.stamp = rospy.Time.now()
+            cvt_msg.header.stamp = data.header.stamp
             self.image_pub.publish(cvt_msg)
         except CvBridgeError as e:
             print(e)
- 
 
         # Append Accuracy
         # if scores[0][0] > 0.01: