diff --git a/include/stream_srt_source.h b/include/stream_srt_source.h
index 6eb8c5de1de598fe4971d703be657cc824b82150..2a1e35732490b9be04265f8b99aa6090414065d6 100644
--- a/include/stream_srt_source.h
+++ b/include/stream_srt_source.h
@@ -20,7 +20,7 @@ extern "C"
 #include <chrono>
 
 #define NUM_VIDEO_BUFFERS 4
-#define NUM_AUDIO_BUFFERS 4
+#define NUM_AUDIO_BUFFERS 8
 
 typedef struct{
   void   *start;
@@ -60,6 +60,14 @@ typedef struct{
   unsigned int av_profile;
 }TVideoConfig;
 
+typedef struct
+{
+  unsigned char **data;
+  unsigned int num_bytes;
+  long long frame_time;
+  int64_t frame_pts;
+}TAudioBuffer;
+
 typedef struct{
   unsigned long int av_channels_layout;
   PaSampleFormat pa_sample_format;
@@ -71,15 +79,16 @@ typedef struct{
   AVCodecContext *encoder_context;
   AVFrame *frame;
   AVFrame *tmp_frame;
-  int64_t next_pts;
-  int nb_samples;
   struct SwrContext *resampler;
-  int samples_count;
+  int nb_samples;
   // device data
   PaStream *audio_stream;
   CMutex access;
-  long long frame_time;
   std::chrono::time_point<std::chrono::high_resolution_clock> start_time;
+  int64_t next_pts;
+  TAudioBuffer *buffers;
+  unsigned int buffer_write;
+  unsigned int buffer_read;
 }TAudioData;
 
 typedef struct{
diff --git a/src/stream_srt_source.cpp b/src/stream_srt_source.cpp
index 91eaeb17dd60a2ed7dc4e499c3738e85e73e6cc3..a010667d57f1f926acbde060bd9c50fbe40a3b22 100644
--- a/src/stream_srt_source.cpp
+++ b/src/stream_srt_source.cpp
@@ -41,11 +41,12 @@ CStreamSrtSource::CStreamSrtSource()
   this->audio_data.frame=NULL;
   this->audio_data.tmp_frame=NULL;
   this->audio_data.next_pts=0;
-  this->audio_data.samples_count=0;
   this->audio_data.start_time=std::chrono::high_resolution_clock::now();
-  this->audio_data.frame_time=0;
   this->audio_data.audio_stream=NULL;
   this->audio_data.resampler=NULL;
+  this->audio_data.buffers=NULL;
+  this->audio_data.buffer_write=0;
+  this->audio_data.buffer_read=0;
   // initialize events
   this->event_server=CEventServer::instance();
   this->finish_main_thread_event_id="stream_srt_source_finish_main_thread_event";
@@ -447,6 +448,21 @@ void CStreamSrtSource::open_video_device(void)
       this->close_video_device();
       throw CStreamSrtSourceVideoDeviceException(_HERE_,"Impossible to set H264 level",errno,strerror(errno));
     }
+
+    // enable repeting the header sequence
+    memset(&ecs1, 0, sizeof(ecs1));
+    memset(&ec1, 0, sizeof(ec1));
+    ec1.id = V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER;
+    ec1.value = 1;
+    ec1.size = 0;
+    ecs1.controls = &ec1;
+    ecs1.count = 1;
+    ecs1.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+    if(this->xioctl(this->video_data.fd,VIDIOC_S_EXT_CTRLS,&ecs1)==-1)
+    {
+      this->close_video_device();
+      throw CStreamSrtSourceVideoDeviceException(_HERE_,"Impossible to enable repeating the header sequence",errno,strerror(errno));
+    }
   }
   // request DMA buffers
   memset(&req,0,sizeof(req));
@@ -779,7 +795,7 @@ void CStreamSrtSource::open_audio(void)
   }
   else
     this->audio_data.encoder_context->channel_layout=this->audio_data.av_channels_layout;
-  this->audio_data.encoder_context->flags|=AV_CODEC_FLAG_GLOBAL_HEADER;
+//  this->audio_data.encoder_context->flags|=AV_CODEC_FLAG_GLOBAL_HEADER;
 
   // open codec
   ret=avcodec_open2(this->audio_data.encoder_context,this->audio_data.codec,NULL);
@@ -833,6 +849,11 @@ void CStreamSrtSource::open_audio(void)
     }
     this->audio_data.av_sample_format=this->audio_data.encoder_context->sample_fmt;
   }
+  else
+  {
+    this->audio_data.resampler=NULL;
+    this->audio_data.tmp_frame=NULL;
+  }
   // allocate frame
   this->audio_data.frame=av_frame_alloc();
   if(this->audio_data.frame==NULL)
@@ -890,7 +911,13 @@ void CStreamSrtSource::open_audio_device(void)
   const PaDeviceInfo *device_info;
   std::string device_name;
   PaStreamParameters audio_params;
- 
+
+  // reset any pending events
+  if(this->event_server->event_is_set(this->new_audio_buffer_event_id))
+  {
+    this->event_server->reset_event(this->new_audio_buffer_event_id);
+    std::cout << "resetting audio event" << std::endl;
+  }
   err=Pa_Initialize();
   if(err!=paNoError)
     throw CStreamSrtSourceAudioDeviceException(_HERE_,"Impossible to initialize PortAudio",err,Pa_GetErrorText(err));
@@ -919,6 +946,37 @@ void CStreamSrtSource::open_audio_device(void)
         this->close_audio_device();
         throw CStreamSrtSourceAudioDeviceException(_HERE_,"Impossible to open audio device",err,Pa_GetErrorText(err));
       }
+      // initialize buffers
+      if(this->audio_data.buffers!=NULL)
+      {
+        for(unsigned int i=0;i<NUM_AUDIO_BUFFERS;i++)
+        {
+          if(this->audio_data.buffers[i].data!=NULL)
+          {
+            for(int j=0;j<this->config.audio.num_channels;j++)
+            {
+              delete[] this->audio_data.buffers[i].data[j];
+              this->audio_data.buffers[i].data[j]=NULL;
+            }
+            delete[] this->audio_data.buffers[i].data;
+            audio_data.buffers[i].data=NULL;
+          }
+        }
+        delete[] this->audio_data.buffers;
+        this->audio_data.buffers=NULL;
+      }
+      this->audio_data.buffers=new TAudioBuffer[NUM_AUDIO_BUFFERS];
+      for(unsigned int i=0;i<NUM_AUDIO_BUFFERS;i++)
+      {
+        audio_data.buffers[i].data=new unsigned char *[this->config.audio.num_channels];
+        for(int j=0;j<this->config.audio.num_channels;j++)
+          audio_data.buffers[i].data[j]=new unsigned char[this->audio_data.nb_samples*this->audio_data.sample_size];
+        audio_data.buffers[i].frame_time=0;
+        audio_data.buffers[i].frame_pts=0;
+        audio_data.buffers[i].num_bytes=0;
+      }
+      this->audio_data.buffer_write=0;
+      this->audio_data.buffer_read=0;
     }
     return;
   }
@@ -936,9 +994,7 @@ void CStreamSrtSource::start_audio_device(void)
   if(this->thread_server->get_thread_state(this->capture_audio_thread_id)==attached)
     this->thread_server->start_thread(this->capture_audio_thread_id);
   this->audio_data.next_pts=0;
-  this->audio_data.frame_time=0;
   this->audio_data.start_time=std::chrono::high_resolution_clock::now();
-  this->audio_data.samples_count=0;
 }
 
 void CStreamSrtSource::stop_audio_device(void)
@@ -973,6 +1029,27 @@ void CStreamSrtSource::close_audio_device(void)
     this->audio_data.audio_stream=NULL;
   }
   Pa_Terminate();
+  // delete buffers
+  if(this->audio_data.buffers!=NULL)
+  {
+    for(unsigned int i=0;i<NUM_AUDIO_BUFFERS;i++)
+    {
+      if(this->audio_data.buffers[i].data!=NULL)
+      {
+        for(int j=0;j<this->config.audio.num_channels;j++)
+        {
+          delete[] this->audio_data.buffers[i].data[j];
+          this->audio_data.buffers[i].data[j]=NULL;
+        }
+        delete[] audio_data.buffers[i].data;
+        audio_data.buffers[i].data=NULL;
+      }
+    }
+    delete[] this->audio_data.buffers;
+    this->audio_data.buffers=NULL;
+  }
+  this->audio_data.buffer_write=0;
+  this->audio_data.buffer_read=0;
 }
 
 int CStreamSrtSource::audioCallback(const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, void *userData)
@@ -982,16 +1059,18 @@ int CStreamSrtSource::audioCallback(const void *inputBuffer, void *outputBuffer,
 
   data->audio_data.access.enter();
   for(unsigned int i=0;i<framesPerBuffer; i++)
-    for(unsigned int j=0;j<data->config.audio.num_channels;j++)
+    for(int j=0;j<data->config.audio.num_channels;j++)
       for(unsigned int k=0;k<data->audio_data.sample_size;k++)
-        data->audio_data.tmp_frame->data[j][i*data->audio_data.sample_size+k]=((unsigned char *)inputBuffer)[i*data->audio_data.sample_size*data->config.audio.num_channels+j*data->audio_data.sample_size+k];
-  if(!data->event_server->event_is_set(data->new_audio_buffer_event_id))
-    data->event_server->set_event(data->new_audio_buffer_event_id);
+        data->audio_data.buffers[data->audio_data.buffer_write].data[j][i*data->audio_data.sample_size+k]=((unsigned char *)inputBuffer)[i*data->audio_data.sample_size*data->config.audio.num_channels+j*data->audio_data.sample_size+k];
   std::chrono::time_point<std::chrono::high_resolution_clock> end=std::chrono::high_resolution_clock::now();
-  data->audio_data.frame_time=std::chrono::duration_cast<std::chrono::microseconds>(end-data->audio_data.start_time).count();
-  data->audio_data.tmp_frame->pts=data->audio_data.next_pts;
+  data->audio_data.buffers[data->audio_data.buffer_write].num_bytes=framesPerBuffer*data->audio_data.sample_size;
+  data->audio_data.buffers[data->audio_data.buffer_write].frame_time=std::chrono::duration_cast<std::chrono::microseconds>(end-data->audio_data.start_time).count();
+  data->audio_data.buffers[data->audio_data.buffer_write].frame_pts=data->audio_data.next_pts;
   data->audio_data.next_pts+=std::chrono::duration_cast<std::chrono::microseconds>(end-data->audio_data.start_time).count();
   data->audio_data.start_time=end;
+//  if(!data->event_server->event_is_set(data->new_audio_buffer_event_id))
+  data->event_server->set_event(data->new_audio_buffer_event_id);
+  data->audio_data.buffer_write=(data->audio_data.buffer_write+1)%NUM_AUDIO_BUFFERS;
   data->audio_data.access.exit();
 
   /* set the output to zero */
@@ -1005,6 +1084,8 @@ void *CStreamSrtSource::capture_audio_thread(void *param)
   int ret,got_packet,index,dst_nb_samples;
   CStreamSrtSource *stream=(CStreamSrtSource *)param;
   std::list<std::string> events;
+  long long frame_time;
+  int64_t frame_pts;
   bool end=false;
   AVPacket pkt;
 
@@ -1018,18 +1099,38 @@ void *CStreamSrtSource::capture_audio_thread(void *param)
     else// new buffer available
     {
       av_init_packet(&pkt);
-      dst_nb_samples=av_rescale_rnd(swr_get_delay(stream->audio_data.resampler,stream->audio_data.encoder_context->sample_rate)+stream->audio_data.tmp_frame->nb_samples,stream->audio_data.encoder_context->sample_rate,stream->audio_data.encoder_context->sample_rate, AV_ROUND_UP);
       ret=av_frame_make_writable(stream->audio_data.frame);
       if(ret<0)
         std::cout << "Impossible to make frame audio writable" << std::endl;
-      /* convert to destination format */
-      stream->audio_data.access.enter();
-      ret=swr_convert(stream->audio_data.resampler,stream->audio_data.frame->data,dst_nb_samples,(const uint8_t **)stream->audio_data.tmp_frame->data,stream->audio_data.tmp_frame->nb_samples);
-      if(ret<0) 
-        std::cout << "Impossible to make resample audio frame" << std::endl;
-      stream->audio_data.access.exit();
-      stream->audio_data.frame->pts=av_rescale_q(stream->audio_data.samples_count,(AVRational){1,stream->audio_data.encoder_context->sample_rate},stream->audio_data.encoder_context->time_base);
-      stream->audio_data.samples_count+=dst_nb_samples;
+      if(stream->audio_data.resampler!=NULL)
+      {
+        stream->audio_data.access.enter();
+        for(int i=0;i<stream->config.audio.num_channels;i++)
+          memcpy(stream->audio_data.tmp_frame->data[i],stream->audio_data.buffers[stream->audio_data.buffer_read].data[i],stream->audio_data.buffers[stream->audio_data.buffer_read].num_bytes);
+        stream->audio_data.tmp_frame->pts=stream->audio_data.buffers[stream->audio_data.buffer_read].frame_pts;
+        stream->audio_data.tmp_frame->nb_samples=stream->audio_data.buffers[stream->audio_data.buffer_read].num_bytes/stream->audio_data.sample_size;
+        dst_nb_samples=av_rescale_rnd(swr_get_delay(stream->audio_data.resampler,stream->audio_data.encoder_context->sample_rate)+stream->audio_data.tmp_frame->nb_samples,stream->audio_data.encoder_context->sample_rate,stream->audio_data.encoder_context->sample_rate, AV_ROUND_UP);
+        /* convert to destination format */
+        ret=swr_convert(stream->audio_data.resampler,stream->audio_data.frame->data,dst_nb_samples,(const uint8_t **)stream->audio_data.tmp_frame->data,stream->audio_data.tmp_frame->nb_samples);
+        if(ret<0) 
+          std::cout << "Impossible to make resample audio frame" << std::endl;
+        frame_time=stream->audio_data.buffers[stream->audio_data.buffer_read].frame_time;
+        frame_pts=stream->audio_data.buffers[stream->audio_data.buffer_read].frame_pts;
+        stream->audio_data.buffer_read=(stream->audio_data.buffer_read+1)%NUM_AUDIO_BUFFERS;
+        stream->audio_data.access.exit();
+      }
+      else
+      {
+        stream->audio_data.access.enter();
+        for(int i=0;i<stream->config.audio.num_channels;i++)
+          memcpy(stream->audio_data.frame->data[i],stream->audio_data.buffers[stream->audio_data.buffer_read].data[i],stream->audio_data.buffers[stream->audio_data.buffer_read].num_bytes);
+        stream->audio_data.frame->pts=stream->audio_data.buffers[stream->audio_data.buffer_read].frame_pts;
+        stream->audio_data.frame->nb_samples=stream->audio_data.buffers[stream->audio_data.buffer_read].num_bytes/stream->audio_data.sample_size;
+        frame_time=stream->audio_data.buffers[stream->audio_data.buffer_read].frame_time;
+        frame_pts=stream->audio_data.buffers[stream->audio_data.buffer_read].frame_pts;
+        stream->audio_data.buffer_read=(stream->audio_data.buffer_read+1)%NUM_AUDIO_BUFFERS;
+        stream->audio_data.access.exit();
+      }
       // encode audio frame
       ret=avcodec_encode_audio2(stream->audio_data.encoder_context,&pkt,stream->audio_data.frame,&got_packet);
       if(ret<0) 
@@ -1037,9 +1138,10 @@ void *CStreamSrtSource::capture_audio_thread(void *param)
       else if(got_packet) 
       {
         // assign actual time stamps
-        pkt.duration=(stream->audio_data.frame_time*9)/100;
-        pkt.pts=(stream->audio_data.next_pts*9)/100;
-        pkt.dts=(stream->audio_data.next_pts*9)/100;
+        pkt.duration=(frame_time*9)/100;
+        pkt.pts=(frame_pts*9)/100;
+        pkt.dts=AV_NOPTS_VALUE;
+        //pkt.dts=(stream->audio_data.next_pts*9)/100;
         pkt.stream_index = stream->audio_data.stream->index;
         /* Write the compressed frame to the media file. */
 //        std::cout << "pts: " << av_ts2str(pkt.pts) << " pts_time: " << av_ts2timestr(pkt.pts,&stream->output_context->streams[pkt.stream_index]->time_base) << " dts: " << av_ts2str(pkt.dts) << " dts_time: " << av_ts2timestr(pkt.dts, &stream->output_context->streams[pkt.stream_index]->time_base) << " duration: " << av_ts2str(pkt.duration) << " duration_time: " << av_ts2timestr(pkt.duration, &stream->output_context->streams[pkt.stream_index]->time_base) << " stream_index: " << pkt.stream_index << std::endl;
@@ -1058,7 +1160,9 @@ void *CStreamSrtSource::capture_audio_thread(void *param)
       av_free_packet(&pkt);
     }
   }
-
+  av_init_packet(&pkt);
+  avcodec_encode_audio2(stream->audio_data.encoder_context,&pkt,NULL,&got_packet);
+  av_free_packet(&pkt);
   pthread_exit(NULL);
 }