fixup decoding when recording from a non-ffmpeg suorce

pull/3122/head
Isaac Connor 2017-11-14 13:16:07 -08:00
parent bac3fbfce9
commit 3129afa5d6
12 changed files with 163 additions and 126 deletions

View File

@ -109,6 +109,8 @@ public:
virtual int PostCapture()=0;
virtual AVStream *get_VideoStream() { return NULL; };
virtual AVStream *get_AudioStream() { return NULL; };
virtual AVCodecContext *get_VideoCodecContext() { return NULL; };
virtual AVCodecContext *get_AudioCodecContext() { return NULL; };
int get_VideoStreamId() { return mVideoStreamId; };
int get_AudioStreamId() { return mAudioStreamId; };
};

View File

@ -195,8 +195,7 @@ int FfmpegCamera::Capture( ZMPacket &zm_packet ) {
mReopenThread = 0;
}
ret = av_read_frame( mFormatContext, &packet );
if ( ret < 0 ) {
if ( (ret = av_read_frame( mFormatContext, &packet )) < 0 ) {
if (
// Check if EOF.
(ret == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
@ -433,18 +432,6 @@ int FfmpegCamera::OpenFfmpeg() {
}
}
if ( mVideoCodecContext->codec_id != AV_CODEC_ID_H264 ) {
#ifdef AV_CODEC_ID_H265
if ( mVideoCodecContext->codec_id == AV_CODEC_ID_H265 ) {
Debug( 1, "Input stream appears to be h265. The stored event file may not be viewable in browser." );
} else {
#endif
Warning( "Input stream is not h264. The stored event file may not be viewable in browser." );
#ifdef AV_CODEC_ID_H265
}
#endif
}
if (mVideoCodecContext->hwaccel != NULL) {
Debug(1, "HWACCEL in use");
} else {

View File

@ -127,6 +127,8 @@ class FfmpegCamera : public Camera {
return mFormatContext->streams[mAudioStreamId];
return NULL;
}
AVCodecContext *get_VideoCodecContext() { return mVideoCodecContext; };
AVCodecContext *get_AudioCodecContext() { return mAudioCodecContext; };
};
#endif // ZM_FFMPEG_CAMERA_H

View File

@ -146,13 +146,15 @@ Image::Image( const AVFrame *frame ) {
void Image::Assign( const AVFrame *frame ) {
/* Assume the dimensions etc are correct. FIXME */
AVPixelFormat format = (AVPixelFormat)AVPixFormat();
AVFrame *dest_frame = zm_av_frame_alloc();
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(dest_frame->data, dest_frame->linesize,
buffer, AV_PIX_FMT_RGBA, width, height, 1);
buffer, format, width, height, 1);
#else
avpicture_fill( (AVPicture *)dest_frame, buffer,
AV_PIX_FMT_RGBA, width, height);
format, width, height);
#endif
#if HAVE_LIBSWSCALE
@ -161,13 +163,13 @@ void Image::Assign( const AVFrame *frame ) {
height,
(AVPixelFormat)frame->format,
width, height,
AV_PIX_FMT_RGBA, SWS_BICUBIC, NULL,
format, SWS_BICUBIC, NULL,
NULL, NULL);
if ( mConvertContext == NULL )
Fatal( "Unable to create conversion context" );
if ( sws_scale(mConvertContext, frame->data, frame->linesize, 0, frame->height, dest_frame->data, dest_frame->linesize) < 0 )
Fatal("Unable to convert raw format %u to target format %u", frame->format, AV_PIX_FMT_RGBA);
Fatal("Unable to convert raw format %u to target format %u", frame->format, format);
#else // HAVE_LIBSWSCALE
Fatal("You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras");
#endif // HAVE_LIBSWSCALE

View File

@ -2822,6 +2822,18 @@ int Monitor::Capture() {
static int FirstCapture = 1; // Used in de-interlacing to indicate whether this is the even or odd image
unsigned int index = image_count % image_buffer_count;
if ( (index == shared_data->last_read_index) && (function > MONITOR) ) {
Warning( "Buffer overrun at index %d, image %d, slow down capture, speed up analysis or increase ring buffer size", index, image_count );
time_t now = time(0);
double approxFps = double(image_buffer_count)/double(now-image_buffer[index].timestamp.tv_sec);
time_t last_read_delta = now - shared_data->last_read_time;
if ( last_read_delta > (image_buffer_count/approxFps) ) {
Warning( "Last image read from shared memory %ld seconds ago, zma may have gone away", last_read_delta )
shared_data->last_read_index = image_buffer_count;
}
}
Image* capture_image = image_buffer[index].image;
ZMPacket *packet = &image_buffer[index];
// clears frame
@ -2854,60 +2866,54 @@ packet->reset();
capture_image->Fill(signalcolor);
shared_data->signal = false;
return -1;
} else if ( captureResult > 0 ) {
if ( packet->packet.size && ! packet->frame ) {
packet->decode( camera->get_VideoCodecContext() );
packet->get_image();
}
/* Deinterlacing */
if ( deinterlacing_value ) {
if ( deinterlacing_value == 1 ) {
capture_image->Deinterlace_Discard();
} else if ( deinterlacing_value == 2 ) {
capture_image->Deinterlace_Linear();
} else if ( deinterlacing_value == 3 ) {
capture_image->Deinterlace_Blend();
} else if ( deinterlacing_value == 4 ) {
capture_image->Deinterlace_4Field( next_buffer.image, (deinterlacing>>8)&0xff );
} else if ( deinterlacing_value == 5 ) {
capture_image->Deinterlace_Blend_CustomRatio( (deinterlacing>>8)&0xff );
/* Deinterlacing */
if ( deinterlacing_value ) {
if ( deinterlacing_value == 1 ) {
capture_image->Deinterlace_Discard();
} else if ( deinterlacing_value == 2 ) {
capture_image->Deinterlace_Linear();
} else if ( deinterlacing_value == 3 ) {
capture_image->Deinterlace_Blend();
} else if ( deinterlacing_value == 4 ) {
capture_image->Deinterlace_4Field( next_buffer.image, (deinterlacing>>8)&0xff );
} else if ( deinterlacing_value == 5 ) {
capture_image->Deinterlace_Blend_CustomRatio( (deinterlacing>>8)&0xff );
}
}
}
if ( orientation != ROTATE_0 ) {
switch ( orientation ) {
case ROTATE_0 :
// No action required
break;
case ROTATE_90 :
case ROTATE_180 :
case ROTATE_270 :
capture_image->Rotate( (orientation-1)*90 );
break;
case FLIP_HORI :
case FLIP_VERT :
capture_image->Flip( orientation==FLIP_HORI );
break;
if ( orientation != ROTATE_0 ) {
switch ( orientation ) {
case ROTATE_0 :
// No action required
break;
case ROTATE_90 :
case ROTATE_180 :
case ROTATE_270 :
capture_image->Rotate( (orientation-1)*90 );
break;
case FLIP_HORI :
case FLIP_VERT :
capture_image->Flip( orientation==FLIP_HORI );
break;
}
}
}
if ( (index == shared_data->last_read_index) && (function > MONITOR) ) {
Warning( "Buffer overrun at index %d, image %d, slow down capture, speed up analysis or increase ring buffer size", index, image_count );
time_t now = time(0);
double approxFps = double(image_buffer_count)/double(now-image_buffer[index].timestamp.tv_sec);
time_t last_read_delta = now - shared_data->last_read_time;
if ( last_read_delta > (image_buffer_count/approxFps) ) {
Warning( "Last image read from shared memory %ld seconds ago, zma may have gone away", last_read_delta )
shared_data->last_read_index = image_buffer_count;
if ( privacy_bitmask )
capture_image->MaskPrivacy( privacy_bitmask );
if ( config.timestamp_on_capture ) {
TimestampImage( capture_image, &packet->timestamp );
}
}
if ( privacy_bitmask )
capture_image->MaskPrivacy( privacy_bitmask );
if ( config.timestamp_on_capture ) {
TimestampImage( capture_image, &packet->timestamp );
}
int video_stream_id = camera->get_VideoStreamId();
#if 0
int video_stream_id = camera->get_VideoStreamId();
//Video recording
if ( video_store_data->recording.tv_sec ) {
if ( shared_data->last_event_id != this->GetVideoWriterEventId() ) {
@ -2986,13 +2992,17 @@ packet->reset();
}
}
#endif
shared_data->signal = CheckSignal(capture_image);
shared_data->last_write_index = index;
shared_data->last_write_time = image_buffer[index].timestamp.tv_sec;
image_count++;
} else { // result == 0
} // end if result
} // end if deinterlacing
shared_data->signal = CheckSignal(capture_image);
shared_data->last_write_index = index;
shared_data->last_write_time = image_buffer[index].timestamp.tv_sec;
mutex.unlock();
image_count++;
if ( image_count && fps_report_interval && !(image_count%fps_report_interval) ) {
struct timeval now;
@ -3013,8 +3023,11 @@ packet->reset();
snprintf( sql, sizeof(sql), "UPDATE Monitors SET CaptureFPS='%.2lf' WHERE Id=%d", fps, id );
if ( mysql_query( &dbconn, sql ) ) {
Error( "Can't run query: %s", mysql_error( &dbconn ) );
}
} else {
Debug(2,"toofast");}
} // end if too fast
}else {
Debug(2,"Nor reopting fps");
} // end if should report fps
// Icon: I'm not sure these should be here. They have nothing to do with capturing

View File

@ -166,8 +166,6 @@ protected:
VideoStore *videoStore;
zm_packetqueue packetqueue;
Mutex mutex;
std::string output_codec;
std::string output_container;
class MonitorLink {
protected:
@ -238,6 +236,8 @@ protected:
int savejpegspref;
VideoWriter videowriter;
std::string encoderparams;
std::string output_codec;
std::string output_container;
std::vector<EncoderParameter_t> encoderparamsvec;
bool record_audio; // Whether to store the audio that we receive

View File

@ -27,6 +27,7 @@ using namespace std;
ZMPacket::ZMPacket( ) {
keyframe = 0;
image = NULL;
in_frame = NULL;
frame = NULL;
buffer = NULL;
av_init_packet( &packet );
@ -37,6 +38,7 @@ ZMPacket::ZMPacket( ) {
ZMPacket::ZMPacket( Image *i ) {
keyframe = 1;
image = i;
in_frame = NULL;
frame = NULL;
buffer = NULL;
av_init_packet( &packet );
@ -48,6 +50,8 @@ ZMPacket::ZMPacket( AVPacket *p ) {
set_packet( p );
keyframe = p->flags & AV_PKT_FLAG_KEY;
buffer = NULL;
in_frame = NULL;
frame = NULL;
}
ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) {
@ -56,6 +60,8 @@ ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) {
timestamp = *t;
keyframe = p->flags & AV_PKT_FLAG_KEY;
buffer = NULL;
in_frame = NULL;
frame = NULL;
}
ZMPacket::ZMPacket( AVPacket *p, AVFrame *f, Image *i ) {
av_init_packet( &packet );
@ -63,10 +69,16 @@ ZMPacket::ZMPacket( AVPacket *p, AVFrame *f, Image *i ) {
image = i;
frame = f;
buffer = NULL;
in_frame = NULL;
frame = NULL;
}
ZMPacket::~ZMPacket() {
zm_av_packet_unref( &packet );
if ( in_frame ) {
//av_free(frame->data);
av_frame_free( &in_frame );
}
if ( frame ) {
//av_free(frame->data);
av_frame_free( &frame );
@ -82,11 +94,16 @@ void ZMPacket::reset() {
Debug(2,"reset");
zm_av_packet_unref( &packet );
packet.size = 0;
if ( in_frame ) {
Debug(4,"reset frame");
av_frame_free( &in_frame );
}
if ( frame ) {
Debug(4,"reset frame");
av_frame_free( &frame );
}
if ( buffer ) {
Debug(2,"freeing buffer");
Debug(4,"freeing buffer");
av_freep( &buffer );
}
}
@ -94,17 +111,17 @@ void ZMPacket::reset() {
int ZMPacket::decode( AVCodecContext *ctx ) {
Debug(4, "about to decode video" );
if ( frame ) {
if ( in_frame ) {
Error("Already have a frame?");
} else {
frame = zm_av_frame_alloc();
in_frame = zm_av_frame_alloc();
}
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
int ret = avcodec_send_packet( ctx, &packet );
if ( ret < 0 ) {
Error( "Unable to send packet: %s", av_make_error_string(ret).c_str() );
av_frame_free( &frame );
av_frame_free( &in_frame );
return 0;
}
@ -113,21 +130,21 @@ int ZMPacket::decode( AVCodecContext *ctx ) {
ret = avcodec_receive_frame( ctx, hwFrame );
if ( ret < 0 ) {
Error( "Unable to receive frame: %s", av_make_error_string(ret).c_str() );
av_frame_free( &frame );
av_frame_free( &in_frame );
return 0;
}
ret = av_hwframe_transfer_data(frame, hwFrame, 0);
if ( ret < 0 ) {
Error( "Unable to transfer frame: %s", av_make_error_string(ret).c_str() );
av_frame_free( &frame );
av_frame_free( &in_frame );
return 0;
}
} else {
#endif
ret = avcodec_receive_frame( ctx, frame );
ret = avcodec_receive_frame( ctx, in_frame );
if ( ret < 0 ) {
Error( "Unable to receive frame: %s", av_make_error_string(ret).c_str() );
av_frame_free( &frame );
av_frame_free( &in_frame );
return 0;
}
@ -137,23 +154,23 @@ int ZMPacket::decode( AVCodecContext *ctx ) {
# else
int frameComplete = 0;
int ret = zm_avcodec_decode_video( ctx, frame, &frameComplete, &packet );
int ret = zm_avcodec_decode_video( ctx, in_frame, &frameComplete, &packet );
if ( ret < 0 ) {
Error( "Unable to decode frame at frame %s", av_make_error_string(ret).c_str() );
av_frame_free( &frame );
av_frame_free( &in_frame );
return 0;
}
if ( ! frameComplete ) {
Debug(1, "incomplete frame?");
av_frame_free( &frame );
av_frame_free( &in_frame );
return 0;
}
#endif
return 1;
} // end ZMPacket::decode
Image * ZMPacket::get_image( Image *i = NULL ) {
if ( ! frame ) {
Image * ZMPacket::get_image( Image *i ) {
if ( ! in_frame ) {
Error("Can't get image without frame.. maybe need to decode first");
return NULL;
}
@ -164,7 +181,7 @@ Image * ZMPacket::get_image( Image *i = NULL ) {
}
image = i;
}
image->Assign( frame );
image->Assign( in_frame );
return image;
}

View File

@ -35,6 +35,7 @@ class ZMPacket {
int keyframe;
AVPacket packet; // Input packet, undecoded
AVFrame *in_frame; // Input image, decoded Theoretically only filled if needed.
AVFrame *frame; // Input image, decoded Theoretically only filled if needed.
uint8_t *buffer;
Image *image; // Our internal image object representing this frame
@ -43,7 +44,7 @@ class ZMPacket {
AVPacket *av_packet() { return &packet; }
AVPacket *set_packet( AVPacket *p ) ;
AVFrame *av_frame() { return frame; }
Image *get_image( Image * );
Image *get_image( Image *i=NULL );
Image *set_image( Image * );
int is_keyframe() { return keyframe; };

View File

@ -78,6 +78,27 @@ int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf,
return 0;
}
int SWScale::Convert(
AVFrame *in_frame,
AVFrame *out_frame
) {
/* Get the context */
swscale_ctx = sws_getCachedContext( swscale_ctx,
in_frame->width, in_frame->height, (AVPixelFormat)in_frame->format,
out_frame->width, out_frame->height, (AVPixelFormat)out_frame->format,
SWS_FAST_BILINEAR, NULL, NULL, NULL );
if ( swscale_ctx == NULL ) {
Error("Failed getting swscale context");
return -6;
}
/* Do the conversion */
if(!sws_scale(swscale_ctx, in_frame->data, in_frame->linesize, 0, in_frame->height, out_frame->data, out_frame->linesize ) ) {
Error("swscale conversion failed");
return -10;
}
return 0;
}
int SWScale::Convert(
const uint8_t* in_buffer,

View File

@ -13,6 +13,7 @@ class SWScale {
int SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height);
int ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size);
int ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size);
int Convert( AVFrame *in_frame, AVFrame *out_frame );
int Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height);
int Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height);

View File

@ -92,6 +92,8 @@ VideoStore::VideoStore(
//video_in_ctx.codec_id = video_in_stream->codecpar.codec_id;
#else
video_in_ctx = video_in_stream->codec;
Debug(2,"Copied video context from input stream");
zm_dump_codec(video_in_ctx);
#endif
} else {
Debug(2, "No input ctx");
@ -147,6 +149,13 @@ VideoStore::VideoStore(
video_out_ctx->width = monitor->Width();
video_out_ctx->height = monitor->Height();
video_out_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
if ( monitor->OutputCodec() == "mjpeg" ) {
video_out_codec = avcodec_find_encoder_by_name("mjpeg");
@ -199,8 +208,8 @@ VideoStore::VideoStore(
av_dict_set( &opts, "preset", "ultrafast", 0 );
}
if ( ! av_dict_get( opts, "tune", NULL, 0 ) ) {
Debug(2,"Setting tune to lowlatency");
av_dict_set( &opts, "tune", "lowlatency", 0 );
Debug(2,"Setting tune to zerolatency");
av_dict_set( &opts, "tune", "zerolatency", 0 );
}
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
@ -832,38 +841,8 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
Debug(3, "Have encoding video frame count (%d)", frame_count);
if ( ! zm_packet->frame ) {
if ( zm_packet->packet.size ) {
AVPacket *ipkt = &zm_packet->packet;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( ( ret = avcodec_send_packet(video_in_ctx, ipkt) ) < 0 ) {
Error("avcodec_send_packet fail %s", av_make_error_string(ret).c_str());
return 0;
}
if ( ( ret = avcodec_receive_frame(video_in_ctx, in_frame) ) < 0 ) {
Error("avcodec_receive_frame fail %s", av_make_error_string(ret).c_str());
return 0;
}
#else
int data_present;
if ((ret = avcodec_decode_video2(video_in_ctx, in_frame,
&data_present, ipkt )) < 0) {
Error("Could not decode frame (error '%s')\n",
av_make_error_string(ret).c_str());
av_frame_free(&in_frame);
return 0;
} else {
Debug(3, "Decoded frame data_present(%d)", data_present);
}
if ( !data_present ) {
Debug(2, "Not ready to transcode a frame yet.");
return 0;
}
#endif
zm_packet->frame = in_frame;
} else if ( zm_packet->image ) {
AVFrame *frame = zm_packet->frame = zm_av_frame_alloc();
if ( ! frame ) {
AVFrame *out_frame = zm_packet->frame = zm_av_frame_alloc();
if ( ! out_frame ) {
Error("Unable to allocate a frame");
return 0;
}
@ -874,8 +853,8 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
video_out_ctx->height, 1);
zm_packet->buffer = (uint8_t *)av_malloc(codec_imgsize);
av_image_fill_arrays(
frame->data,
frame->linesize,
out_frame->data,
out_frame->linesize,
zm_packet->buffer,
video_out_ctx->pix_fmt,
video_out_ctx->width,
@ -888,7 +867,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
video_out_ctx->height);
zm_packet->buffer = (uint8_t *)av_malloc(codec_imgsize);
avpicture_fill(
(AVPicture *)frame,
(AVPicture *)out_frame,
zm_packet->buffer,
video_out_ctx->pix_fmt,
video_out_ctx->width,
@ -896,9 +875,21 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
);
#endif
frame->width = video_out_ctx->width;
frame->height = video_out_ctx->height;
frame->format = video_out_ctx->pix_fmt;
out_frame->width = video_out_ctx->width;
out_frame->height = video_out_ctx->height;
out_frame->format = video_out_ctx->pix_fmt;
if ( ! zm_packet->in_frame ) {
if ( zm_packet->packet.size ) {
if ( ! zm_packet->decode( video_in_ctx ) ) {
Debug(2, "unable to decode yet.");
return 0;
}
}
//Go straight to out frame
swscale.Convert( zm_packet->in_frame, out_frame );
} else if ( zm_packet->image ) {
//Go straight to out frame
swscale.Convert(zm_packet->image,
zm_packet->buffer,
codec_imgsize,

View File

@ -1 +1 @@
1.31.13
1.31.12