Fix issues with too much audio in events by storing packets in the queue with their timestamps converted to AV_TIME_BASE_Q, so that we can sort video and audio packets together.

pull/2618/head
Isaac Connor 2019-06-11 10:19:42 -04:00
parent 3a91880e2e
commit acb95709e6
6 changed files with 91 additions and 44 deletions

View File

@ -873,6 +873,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
} }
} // end if recording or not } // end if recording or not
// Buffer video packets, since we are not recording. // Buffer video packets, since we are not recording.
// All audio packets are keyframes, so only if it's a video keyframe // All audio packets are keyframes, so only if it's a video keyframe
if ( packet.stream_index == mVideoStreamId ) { if ( packet.stream_index == mVideoStreamId ) {
@ -885,17 +886,18 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
} }
packetqueue->clearQueue(monitor->GetPreEventCount(), mVideoStreamId); packetqueue->clearQueue(monitor->GetPreEventCount(), mVideoStreamId);
packetqueue->queuePacket(&packet);
packetqueue->queuePacket(&packet, mFormatContext->streams[packet.stream_index]);
} else if ( packetqueue->size() ) { } else if ( packetqueue->size() ) {
// it's a keyframe or we already have something in the queue // it's a keyframe or we already have something in the queue
packetqueue->queuePacket(&packet); packetqueue->queuePacket(&packet, mFormatContext->streams[packet.stream_index]);
} }
} else if ( packet.stream_index == mAudioStreamId ) { } else if ( packet.stream_index == mAudioStreamId ) {
// The following lines should ensure that the queue always begins with a video keyframe // The following lines should ensure that the queue always begins with a video keyframe
//Debug(2, "Have audio packet, reocrd_audio is (%d) and packetqueue.size is (%d)", record_audio, packetqueue.size() ); //Debug(2, "Have audio packet, reocrd_audio is (%d) and packetqueue.size is (%d)", record_audio, packetqueue.size() );
if ( record_audio && packetqueue->size() ) { if ( record_audio && packetqueue->size() ) {
// if it's audio, and we are doing audio, and there is already something in the queue // if it's audio, and we are doing audio, and there is already something in the queue
packetqueue->queuePacket(&packet); packetqueue->queuePacket(&packet, mFormatContext->streams[packet.stream_index]);
} }
} // end if packet type } // end if packet type
@ -904,9 +906,19 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
if ( have_video_keyframe || keyframe ) { if ( have_video_keyframe || keyframe ) {
if ( videoStore ) { if ( videoStore ) {
AVPacket out_packet;
av_init_packet(&out_packet);
if ( zm_av_packet_ref( &out_packet, &packet ) < 0 ) {
Error("error refing packet");
}
out_packet.pts = av_rescale_q(out_packet.pts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q);
out_packet.dts = av_rescale_q(out_packet.dts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q);
out_packet.duration = av_rescale_q(out_packet.duration, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q);
//Write the packet to our video store //Write the packet to our video store
int ret = videoStore->writeVideoFramePacket(&packet); int ret = videoStore->writeVideoFramePacket(&out_packet);
zm_av_packet_unref(&out_packet);
if ( ret < 0 ) { //Less than zero and we skipped a frame if ( ret < 0 ) { //Less than zero and we skipped a frame
zm_av_packet_unref(&packet); zm_av_packet_unref(&packet);
return 0; return 0;
@ -1005,10 +1017,21 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
if ( videoStore ) { if ( videoStore ) {
if ( record_audio ) { if ( record_audio ) {
if ( have_video_keyframe ) { if ( have_video_keyframe ) {
AVPacket out_packet;
av_init_packet(&out_packet);
if ( zm_av_packet_ref( &out_packet, &packet ) < 0 ) {
Error("error refing packet");
}
out_packet.pts = av_rescale_q(out_packet.pts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q);
out_packet.dts = av_rescale_q(out_packet.dts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q);
out_packet.duration = av_rescale_q(out_packet.duration, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q);
Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index); Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index);
//Write the packet to our video store //Write the packet to our video store
//FIXME no relevance of last key frame //FIXME no relevance of last key frame
int ret = videoStore->writeAudioFramePacket( &packet ); int ret = videoStore->writeAudioFramePacket(&out_packet);
zm_av_packet_unref(&out_packet);
if ( ret < 0 ) {//Less than zero and we skipped a frame if ( ret < 0 ) {//Less than zero and we skipped a frame
Warning("Failure to write audio packet."); Warning("Failure to write audio packet.");
zm_av_packet_unref( &packet ); zm_av_packet_unref( &packet );

View File

@ -24,23 +24,29 @@
using namespace std; using namespace std;
ZMPacket::ZMPacket( AVPacket *p ) { ZMPacket::ZMPacket( AVPacket *p, AVStream *stream ) {
frame = NULL; frame = NULL;
image = NULL; image = NULL;
av_init_packet( &packet ); av_init_packet( &packet );
if ( zm_av_packet_ref( &packet, p ) < 0 ) { if ( zm_av_packet_ref( &packet, p ) < 0 ) {
Error("error refing packet"); Error("error refing packet");
} }
packet.pts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q);
packet.dts = av_rescale_q(packet.dts, stream->time_base, AV_TIME_BASE_Q);
packet.duration = av_rescale_q(packet.duration, stream->time_base, AV_TIME_BASE_Q);
gettimeofday( &timestamp, NULL ); gettimeofday( &timestamp, NULL );
} }
ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) { ZMPacket::ZMPacket( AVPacket *p, AVStream *stream, struct timeval *t ) {
frame = NULL; frame = NULL;
image = NULL; image = NULL;
av_init_packet( &packet ); av_init_packet( &packet );
if ( zm_av_packet_ref( &packet, p ) < 0 ) { if ( zm_av_packet_ref( &packet, p ) < 0 ) {
Error("error refing packet"); Error("error refing packet");
} }
packet.pts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q);
packet.dts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q);
packet.duration = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q);
timestamp = *t; timestamp = *t;
} }

View File

@ -38,8 +38,8 @@ class ZMPacket {
struct timeval timestamp; struct timeval timestamp;
public: public:
AVPacket *av_packet() { return &packet; } AVPacket *av_packet() { return &packet; }
ZMPacket( AVPacket *packet, struct timeval *timestamp ); ZMPacket( AVPacket *packet, AVStream *stream, struct timeval *timestamp );
explicit ZMPacket( AVPacket *packet ); explicit ZMPacket( AVPacket *packet, AVStream * );
~ZMPacket(); ~ZMPacket();
}; };

View File

@ -57,8 +57,8 @@ bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) {
Debug(2, "Looking at packet with stream index (%d) with dts %" PRId64, Debug(2, "Looking at packet with stream index (%d) with dts %" PRId64,
av_packet->stream_index, av_packet->dts); av_packet->stream_index, av_packet->dts);
if ( if (
( av_packet->stream_index == zm_packet->packet.stream_index ) //( av_packet->stream_index == zm_packet->packet.stream_index )
&& //&&
( av_packet->dts != AV_NOPTS_VALUE ) ( av_packet->dts != AV_NOPTS_VALUE )
&& &&
( av_packet->dts <= zm_packet->packet.dts) ( av_packet->dts <= zm_packet->packet.dts)
@ -85,16 +85,22 @@ bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) {
} }
// Convert to a forward iterator so that we can insert at end // Convert to a forward iterator so that we can insert at end
std::list<ZMPacket *>::iterator f_it = it.base(); std::list<ZMPacket *>::iterator f_it = it.base();
Debug(2, "Insert packet with stream index (%d) with dts %" PRId64 " for dts %" PRId64,
(*f_it)->packet.stream_index, (*f_it)->packet.dts, zm_packet->packet.dts);
if ( f_it == pktQueue.end() ) {
Debug(2, "Pushing to end");
pktQueue.push_back(zm_packet);
} else {
Debug(2, "Insert packet with stream index (%d) with dts %" PRId64 " for dts %" PRId64, Debug(2, "Insert packet with stream index (%d) with dts %" PRId64 " for dts %" PRId64,
(*f_it)->packet.stream_index, (*f_it)->packet.dts, zm_packet->packet.dts); (*f_it)->packet.stream_index, (*f_it)->packet.dts, zm_packet->packet.dts);
pktQueue.insert(f_it, zm_packet); pktQueue.insert(f_it, zm_packet);
}
packet_counts[zm_packet->packet.stream_index] += 1; packet_counts[zm_packet->packet.stream_index] += 1;
return true; return true;
} }
Debug(1,"Unable to insert packet for stream %d with dts %" PRId64 " into queue.", Debug(1,"Unable to find a spot for stream %d with dts %" PRId64 ". Sticking on front",
zm_packet->packet.stream_index, zm_packet->packet.dts); zm_packet->packet.stream_index, zm_packet->packet.dts);
// Must be before any packets in the queue. Stick it at the beginning // Must be before any packets in the queue. Stick it at the beginning
pktQueue.push_front(zm_packet); pktQueue.push_front(zm_packet);
@ -102,8 +108,8 @@ bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) {
return true; return true;
} // end bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) } // end bool zm_packetqueue::queuePacket(ZMPacket* zm_packet)
bool zm_packetqueue::queuePacket(AVPacket* av_packet) { bool zm_packetqueue::queuePacket(AVPacket* av_packet, AVStream *stream) {
ZMPacket *zm_packet = new ZMPacket(av_packet); ZMPacket *zm_packet = new ZMPacket(av_packet, stream);
return queuePacket(zm_packet); return queuePacket(zm_packet);
} }

View File

@ -33,9 +33,9 @@ class zm_packetqueue {
public: public:
zm_packetqueue(int max_stream_id); zm_packetqueue(int max_stream_id);
virtual ~zm_packetqueue(); virtual ~zm_packetqueue();
bool queuePacket(AVPacket* packet, struct timeval *timestamp); bool queuePacket(AVPacket* packet, AVStream *stream, struct timeval *timestamp);
bool queuePacket(ZMPacket* packet); bool queuePacket(ZMPacket* packet);
bool queuePacket(AVPacket* packet); bool queuePacket(AVPacket* packet, AVStream *stream);
ZMPacket * popPacket(); ZMPacket * popPacket();
bool popVideoPacket(ZMPacket* packet); bool popVideoPacket(ZMPacket* packet);
bool popAudioPacket(ZMPacket* packet); bool popAudioPacket(ZMPacket* packet);

View File

@ -860,22 +860,23 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
if ( ipkt->duration != AV_NOPTS_VALUE ) { if ( ipkt->duration != AV_NOPTS_VALUE ) {
duration = av_rescale_q( duration = av_rescale_q(
ipkt->duration, ipkt->duration,
video_in_stream->time_base, AV_TIME_BASE_Q,
video_out_stream->time_base); video_out_stream->time_base);
Debug(1, "duration from ipkt: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ") (%d/%d) (%d/%d)", Debug(1, "duration from ipkt: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ") (%d/%d) (%d/%d)",
ipkt->pts, ipkt->pts,
video_last_pts, video_last_pts,
ipkt->duration, ipkt->duration,
duration, duration,
video_in_stream->time_base.num, 1,
video_in_stream->time_base.den, AV_TIME_BASE,
video_out_stream->time_base.num, video_out_stream->time_base.num,
video_out_stream->time_base.den video_out_stream->time_base.den
); );
} else { } else {
duration = av_rescale_q( duration = av_rescale_q(
ipkt->pts - video_last_pts, ipkt->pts - video_last_pts,
video_in_stream->time_base, AV_TIME_BASE_Q,
//video_in_stream->time_base,
video_out_stream->time_base); video_out_stream->time_base);
Debug(1, "duration calc: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ")", Debug(1, "duration calc: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ")",
ipkt->pts, ipkt->pts,
@ -885,7 +886,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
); );
if ( duration <= 0 ) { if ( duration <= 0 ) {
// Why are we setting the duration to 1? // Why are we setting the duration to 1?
duration = ipkt->duration ? ipkt->duration : av_rescale_q(1,video_in_stream->time_base, video_out_stream->time_base); duration = ipkt->duration ? ipkt->duration : av_rescale_q(1, AV_TIME_BASE_Q, video_out_stream->time_base);
} }
} }
opkt.duration = duration; opkt.duration = duration;
@ -902,7 +903,8 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
} else { } else {
opkt.pts = av_rescale_q( opkt.pts = av_rescale_q(
ipkt->pts - video_first_pts, ipkt->pts - video_first_pts,
video_in_stream->time_base, AV_TIME_BASE_Q,
//video_in_stream->time_base,
video_out_stream->time_base video_out_stream->time_base
); );
} }
@ -929,7 +931,8 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
} else { } else {
opkt.dts = av_rescale_q( opkt.dts = av_rescale_q(
ipkt->dts - video_first_dts, ipkt->dts - video_first_dts,
video_in_stream->time_base, AV_TIME_BASE_Q,
//video_in_stream->time_base,
video_out_stream->time_base video_out_stream->time_base
); );
Debug(3, "opkt.dts = %" PRId64 " from ipkt->dts(%" PRId64 ") - first_pts(%" PRId64 ")", Debug(3, "opkt.dts = %" PRId64 " from ipkt->dts(%" PRId64 ") - first_pts(%" PRId64 ")",
@ -1027,6 +1030,7 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
// sending AV_NOPTS_VALUE doesn't really work but we seem to get it in ffmpeg 2.8 // sending AV_NOPTS_VALUE doesn't really work but we seem to get it in ffmpeg 2.8
out_frame->pts = audio_next_pts; out_frame->pts = audio_next_pts;
} }
// We need to keep track of this due to resampling
audio_next_pts = out_frame->pts + out_frame->nb_samples; audio_next_pts = out_frame->pts + out_frame->nb_samples;
av_init_packet(&opkt); av_init_packet(&opkt);
@ -1087,7 +1091,8 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
if ( ipkt->duration && (ipkt->duration != AV_NOPTS_VALUE) ) { if ( ipkt->duration && (ipkt->duration != AV_NOPTS_VALUE) ) {
opkt.duration = av_rescale_q( opkt.duration = av_rescale_q(
ipkt->duration, ipkt->duration,
audio_in_stream->time_base, AV_TIME_BASE_Q,
//audio_in_stream->time_base,
audio_out_stream->time_base); audio_out_stream->time_base);
} }
// Scale the PTS of the outgoing packet to be the correct time base // Scale the PTS of the outgoing packet to be the correct time base
@ -1099,7 +1104,8 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
} else { } else {
opkt.pts = av_rescale_q( opkt.pts = av_rescale_q(
ipkt->pts - audio_first_pts, ipkt->pts - audio_first_pts,
audio_in_stream->time_base, AV_TIME_BASE_Q,
//audio_in_stream->time_base,
audio_out_stream->time_base); audio_out_stream->time_base);
Debug(2, "audio opkt.pts = %" PRId64 " from ipkt->pts(%" PRId64 ") - first_pts(%" PRId64 ")", Debug(2, "audio opkt.pts = %" PRId64 " from ipkt->pts(%" PRId64 ") - first_pts(%" PRId64 ")",
opkt.pts, ipkt->pts, audio_first_pts); opkt.pts, ipkt->pts, audio_first_pts);
@ -1126,7 +1132,8 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
} else { } else {
opkt.dts = av_rescale_q( opkt.dts = av_rescale_q(
ipkt->dts - audio_first_dts, ipkt->dts - audio_first_dts,
audio_in_stream->time_base, AV_TIME_BASE_Q,
//audio_in_stream->time_base,
audio_out_stream->time_base); audio_out_stream->time_base);
Debug(2, "opkt.dts = %" PRId64 " from ipkt.dts(%" PRId64 ") - first_dts(%" PRId64 ")", Debug(2, "opkt.dts = %" PRId64 " from ipkt.dts(%" PRId64 ") - first_dts(%" PRId64 ")",
opkt.dts, ipkt->dts, audio_first_dts); opkt.dts, ipkt->dts, audio_first_dts);
@ -1176,6 +1183,7 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
int VideoStore::resample_audio() { int VideoStore::resample_audio() {
// Resample the in_frame into the audioSampleBuffer until we process the whole // Resample the in_frame into the audioSampleBuffer until we process the whole
// decoded data. Note: pts does not survive resampling or converting // decoded data. Note: pts does not survive resampling or converting
// if we ask for less samples than we input, convert_frame will buffer the remainder apparently
#if defined(HAVE_LIBSWRESAMPLE) || defined(HAVE_LIBAVRESAMPLE) #if defined(HAVE_LIBSWRESAMPLE) || defined(HAVE_LIBAVRESAMPLE)
#if defined(HAVE_LIBSWRESAMPLE) #if defined(HAVE_LIBSWRESAMPLE)
Debug(2, "Converting %d to %d samples using swresample", Debug(2, "Converting %d to %d samples using swresample",
@ -1224,6 +1232,7 @@ int VideoStore::resample_audio() {
// AAC requires 1024 samples per encode. Our input tends to be 160, so need to buffer them. // AAC requires 1024 samples per encode. Our input tends to be 160, so need to buffer them.
if ( frame_size > av_audio_fifo_size(fifo) ) { if ( frame_size > av_audio_fifo_size(fifo) ) {
Debug(1, "Not enough samples in the fifo");
return 0; return 0;
} }
@ -1233,15 +1242,18 @@ int VideoStore::resample_audio() {
} }
out_frame->nb_samples = frame_size; out_frame->nb_samples = frame_size;
// resampling changes the duration because the timebase is 1/samples // resampling changes the duration because the timebase is 1/samples
out_frame->pkt_duration = out_frame->nb_samples;
// out_frame->sample_rate;
if ( in_frame->pts != AV_NOPTS_VALUE ) { if ( in_frame->pts != AV_NOPTS_VALUE ) {
out_frame->pkt_duration = av_rescale_q( //out_frame->pkt_duration = av_rescale_q(
in_frame->pkt_duration, //in_frame->pkt_duration,
audio_in_stream->time_base, //audio_in_stream->time_base,
audio_out_stream->time_base); //audio_out_stream->time_base);
out_frame->pts = av_rescale_q( out_frame->pts = av_rescale_q(
in_frame->pts, in_frame->pts,
audio_in_stream->time_base, AV_TIME_BASE_Q,
audio_out_stream->time_base); //audio_in_ctx->time_base,
audio_out_ctx->time_base);
} }
#else #else
#if defined(HAVE_LIBAVRESAMPLE) #if defined(HAVE_LIBAVRESAMPLE)