Merge branch 'zma_to_thread' of github.com:ConnorTechnology/ZoneMinder into zma_to_thread

pull/3122/head
Isaac Connor 2018-04-14 14:58:25 -04:00
commit c7deb91319
18 changed files with 571 additions and 581 deletions

View File

@ -60,7 +60,6 @@ FOR EACH ROW
//
delimiter //
DROP TRIGGER IF EXISTS Events_Week_delete_trigger//
CREATE TRIGGER Events_Week_delete_trigger BEFORE DELETE ON Events_Week
FOR EACH ROW BEGIN

View File

@ -457,7 +457,8 @@ CREATE TABLE `Monitors` (
`Deinterlacing` int(10) unsigned NOT NULL default '0',
`SaveJPEGs` TINYINT NOT NULL DEFAULT '3' ,
`VideoWriter` TINYINT NOT NULL DEFAULT '0',
`OutputCodec` enum('h264','mjpeg','mpeg1','mpeg2'),
`OutputCodec` int(10) unsigned NOT NULL default 0,
`Encoder` enum('auto','h264','h264_omx','mjpeg','mpeg1','mpeg2'),
`OutputContainer` enum('auto','mp4','mkv'),
`EncoderParameters` TEXT,
`RecordAudio` TINYINT NOT NULL DEFAULT '0',

View File

@ -176,16 +176,17 @@ BEGIN
WHERE Id=OLD.MonitorId;
END IF;
END IF;
ELSEIF ( NEW.Archived AND diff ) THEN
ELSE
IF ( NEW.Archived AND diff ) THEN
UPDATE Events_Archived SET DiskSpace=NEW.DiskSpace WHERE EventId=NEW.Id;
END IF;
END IF;
IF ( diff ) THEN
UPDATE Monitors SET TotalEventDiskSpace = COALESCE(TotalEventDiskSpace,0) - COALESCE(OLD.DiskSpace,0) + COALESCE(NEW.DiskSpace,0) WHERE Id=OLD.MonitorId;
END IF;
END;
//
delimiter ;

View File

@ -10,3 +10,18 @@ SET @s = (SELECT IF(
PREPARE stmt FROM @s;
EXECUTE stmt;
ALTER TABLE `Monitors` MODIFY `OutputCodec` int(10) UNSIGNED NOT NULL default 0;
SET @s = (SELECT IF(
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = DATABASE()
AND table_name = 'Monitors'
AND column_name = 'Encoder'
) > 0,
"SELECT 'Column Encoder already exists in Monitors'",
"ALTER TABLE `Monitors` ADD `Encoder` enum('auto','h264','h264_omx','mjpeg','mpeg1','mpeg2') AFTER `OutputCodec`"
));
PREPARE stmt FROM @s;
EXECUTE stmt;

View File

@ -130,8 +130,8 @@ sub Execute {
}
sub Sql {
my $self = $_[0];
$$self{Sql} = shift if @_;;
my $self = shift;
$$self{Sql} = shift if @_;
if ( ! $$self{Sql} ) {
my $filter_expr = ZoneMinder::General::jsonDecode($self->{Query});
my $sql = 'SELECT E.*,

View File

@ -98,6 +98,7 @@ Event::Event(
monitor->GetOptSaveJPEGs(),
storage->SchemeString().c_str()
);
db_mutex.lock();
if ( mysql_query(&dbconn, sql) ) {
Error("Can't insert event: %s. sql was (%s)", mysql_error(&dbconn), sql);
@ -192,7 +193,7 @@ Event::Event(
if ( monitor->GetOptVideoWriter() != 0 ) {
std::string container = monitor->OutputContainer();
if ( container == "auto" || container == "" ) {
if ( monitor->OutputCodec() == "h264" ) {
if ( monitor->OutputCodec() == AV_CODEC_ID_H264 ) {
container = "mp4";
} else {
container = "mkv";

View File

@ -461,3 +461,15 @@ void dumpPacket(AVPacket *pkt, const char *text) {
pkt->duration);
Debug(2, "%s:%d:%s: %s", __FILE__, __LINE__, text, b);
}
void zm_free_codec( AVCodecContext **ctx ) {
if ( *ctx ) {
avcodec_close(*ctx);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// We allocate and copy in newer ffmpeg, so need to free it
avcodec_free_context(ctx);
#endif
*ctx = NULL;
audio_in_codec = NULL;
} // end if
}

View File

@ -476,6 +476,6 @@ int FfmpegCamera::CloseFfmpeg() {
}
return 0;
} // end FfmpegCamera::Close
} // end int FfmpegCamera::CloseFfmpeg()
#endif // HAVE_LIBAVFORMAT

View File

@ -164,5 +164,3 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id, int frame_number) {
} // end while frame_number > streams.frame_count
return frame;
} // end AVFrame *FFmpeg_Input::get_frame

View File

@ -2132,6 +2132,7 @@ AVStream *LocalCamera::get_VideoStream() {
AVFormatContext *oc = avformat_alloc_context();
video_stream = avformat_new_stream( oc, NULL );
if ( video_stream ) {
video_stream->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_stream->codecpar->width = width;
video_stream->codecpar->height = height;

View File

@ -71,7 +71,7 @@ std::string load_monitor_sql =
"Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, " // V4L Settings
"Protocol, Method, Options, User, Pass, Host, Port, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, "
"SaveJPEGs, VideoWriter, EncoderParameters, "
"OutputCodec, OutputContainer, "
"OutputCodec, Encoder, OutputContainer, "
"RecordAudio, "
"Brightness, Contrast, Hue, Colour, "
"EventPrefix, LabelFormat, LabelX, LabelY, LabelSize,"
@ -292,7 +292,7 @@ Monitor::Monitor()
colours(0),
videowriter(DISABLED),
encoderparams(""),
output_codec(""),
output_codec(0),
output_container(""),
record_audio(0),
//event_prefix
@ -460,7 +460,8 @@ void Monitor::Load(MYSQL_ROW dbrow, bool load_zones=true, Purpose p = QUERY) {
/* Parse encoder parameters */
ParseEncoderParameters(encoderparams.c_str(), &encoderparamsvec);
output_codec = dbrow[col] ? dbrow[col] : ""; col++;
output_codec = dbrow[col] ? atoi(dbrow[col]) : 0; col++;
encoder = dbrow[col] ? dbrow[col] : ""; col++;
output_container = dbrow[col] ? dbrow[col] : ""; col++;
record_audio = (*dbrow[col] != '0'); col++;
@ -883,25 +884,7 @@ bool Monitor::connect() {
} // Monitor::connect
Monitor::~Monitor() {
if ( videoStore ) {
delete videoStore;
videoStore = NULL;
}
delete packetqueue;
packetqueue = NULL;
if ( timestamps ) {
delete[] timestamps;
timestamps = 0;
}
if ( images ) {
delete[] images;
images = 0;
}
if ( privacy_bitmask ) {
delete[] privacy_bitmask;
privacy_bitmask = NULL;
}
if ( mem_ptr ) {
if ( event ) {
Info( "%s: image_count:%d - Closing event %llu, shutting down", name, image_count, event->Id() );
@ -914,6 +897,11 @@ Monitor::~Monitor() {
event_delete_thread = NULL;
}
}
if ( event_delete_thread ) {
event_delete_thread->join();
delete event_delete_thread;
event_delete_thread = NULL;
}
if ( deinterlacing_value == 4 ) {
delete next_buffer.image;
@ -966,6 +954,27 @@ Monitor::~Monitor() {
#endif // ZM_MEM_MAPPED
} // end if mem_ptr
if ( videoStore ) {
delete videoStore;
videoStore = NULL;
}
if ( timestamps ) {
delete[] timestamps;
timestamps = 0;
}
if ( images ) {
delete[] images;
images = 0;
}
delete packetqueue;
packetqueue = NULL;
if ( privacy_bitmask ) {
delete[] privacy_bitmask;
privacy_bitmask = NULL;
}
for ( int i=0; i < n_zones; i++ ) {
delete zones[i];
}
@ -1550,17 +1559,17 @@ void Monitor::UpdateAnalysisFPS() {
if ( new_analysis_fps != analysis_fps ) {
analysis_fps = new_analysis_fps;
static char sql[ZM_SQL_SML_BUFSIZ];
char sql[ZM_SQL_SML_BUFSIZ];
snprintf(sql, sizeof(sql), "INSERT INTO Monitor_Status (MonitorId,AnalysisFPS) VALUES (%d, %.2lf) ON DUPLICATE KEY UPDATE AnalysisFPS = %.2lf", id, analysis_fps, analysis_fps);
db_mutex.lock();
if ( mysql_query( &dbconn, sql ) ) {
Error("Can't run query: %s", mysql_error(&dbconn));
}
db_mutex.unlock();
}
last_analysis_fps_time = now.tv_sec;
}
}
}
// Would be nice if this JUST did analysis
// This idea is that we should be analysing as close to the capture frame as possible.
@ -1589,17 +1598,19 @@ bool Monitor::Analyse() {
Debug(2, "Analysis index (%d), last_Write(%d)", index, shared_data->last_write_index);
packets_processed += 1;
struct timeval *timestamp = snap->timestamp;
Image *snap_image = snap->image;
if ( snap->image_index == -1 ) {
snap->unlock();
Debug(2, "skipping because audio");
// We want to skip, but if we return, we may sleep.
//
if ( ! packetqueue->increment_analysis_it() ) {
Debug(2, "No more packets to analyse");
return false;
}
continue;
}
struct timeval *timestamp = snap->timestamp;
Image *snap_image = snap->image;
// signal is set by capture
bool signal = shared_data->signal;
@ -1727,6 +1738,19 @@ bool Monitor::Analyse() {
// Create event
event = new Event( this, *timestamp, "Continuous", noteSetMap );
shared_data->last_event_id = event->Id();
// lets construct alarm cause. It will contain cause + names of zones alarmed
std::string alarm_cause="Continuous";
for ( int i=0; i < n_zones; i++) {
if (zones[i]->Alarmed()) {
alarm_cause += std::string(zones[i]->Label());
if (i < n_zones-1) {
alarm_cause +=",";
}
}
}
alarm_cause = cause+" "+alarm_cause;
strncpy( shared_data->alarm_cause,alarm_cause.c_str() , sizeof(shared_data->alarm_cause) );
video_store_data->recording = event->StartTime();
Info( "%s: %03d - Opening new event %llu, section start", name, analysis_image_count, event->Id() );
/* To prevent cancelling out an existing alert\prealarm\alarm state */
@ -1743,6 +1767,18 @@ bool Monitor::Analyse() {
Info( "%s: %03d - Gone into alarm state", name, analysis_image_count );
shared_data->state = state = ALARM;
if ( ! event ) {
// lets construct alarm cause. It will contain cause + names of zones alarmed
std::string alarm_cause="";
for ( int i=0; i < n_zones; i++) {
if (zones[i]->Alarmed()) {
alarm_cause += std::string(zones[i]->Label());
if (i < n_zones-1) {
alarm_cause +=",";
}
}
}
alarm_cause = cause+" "+alarm_cause;
strncpy( shared_data->alarm_cause,alarm_cause.c_str() , sizeof(shared_data->alarm_cause) );
event = new Event( this, *timestamp, cause, noteSetMap );
shared_data->last_event_id = event->Id();
}
@ -1817,9 +1853,14 @@ bool Monitor::Analyse() {
// Alert means this frame has no motion, but we were alarmed and are still recording.
if ( noteSetMap.size() > 0 )
event->updateNotes( noteSetMap );
} else if ( state == TAPE ) {
if ( !(analysis_image_count%(frame_skip+1)) ) {
}
//} else if ( state == TAPE ) {
//if ( !(analysis_image_count%(frame_skip+1)) ) {
//if ( config.bulk_frame_interval > 1 ) {
//event->AddFrame( snap_image, *timestamp, (event->Frames()<pre_event_count?0:-1) );
//} else {
//event->AddFrame( snap_image, *timestamp );
//}
//}
}
if ( function == MODECT || function == MOCORD ) {
ref_image.Blend( *snap_image, ( state==ALARM ? alarm_ref_blend_perc : ref_blend_perc ) );
@ -1921,7 +1962,7 @@ void Monitor::ReloadZones() {
zones = 0;
n_zones = Zone::Load(this, zones);
//DumpZoneImage();
}
} // end void Monitor::ReloadZones()
void Monitor::ReloadLinkedMonitors(const char *p_linked_monitors) {
Debug(1, "Reloading linked monitors for monitor %s, '%s'", name, p_linked_monitors);
@ -1938,6 +1979,7 @@ void Monitor::ReloadLinkedMonitors(const char *p_linked_monitors) {
int n_link_ids = 0;
unsigned int link_ids[256];
// This nasty code picks out strings of digits from p_linked_monitors and tries to load them.
char link_id_str[8];
char *dest_ptr = link_id_str;
const char *src_ptr = p_linked_monitors;
@ -1982,19 +2024,19 @@ void Monitor::ReloadLinkedMonitors(const char *p_linked_monitors) {
Debug(1, "Checking linked monitor %d", link_ids[i]);
static char sql[ZM_SQL_SML_BUFSIZ];
snprintf( sql, sizeof(sql), "select Id, Name from Monitors where Id = %d and Function != 'None' and Function != 'Monitor' and Enabled = 1", link_ids[i] );
snprintf(sql, sizeof(sql), "SELECT Id, Name FROM Monitors WHERE Id = %d AND Function != 'None' AND Function != 'Monitor' AND Enabled = 1", link_ids[i] );
db_mutex.lock();
if ( mysql_query(&dbconn, sql) ) {
Error("Can't run query: %s", mysql_error(&dbconn));
db_mutex.unlock();
exit( mysql_errno( &dbconn ) );
continue;
}
MYSQL_RES *result = mysql_store_result(&dbconn);
db_mutex.unlock();
if ( !result ) {
Error("Can't use query result: %s", mysql_error(&dbconn));
exit( mysql_errno( &dbconn ) );
continue;
}
int n_monitors = mysql_num_rows(result);
if ( n_monitors == 1 ) {
@ -2005,9 +2047,9 @@ void Monitor::ReloadLinkedMonitors(const char *p_linked_monitors) {
Warning("Can't link to monitor %d, invalid id, function or not enabled", link_ids[i]);
}
mysql_free_result(result);
}
} // end foreach n_link_id
n_linked_monitors = count;
}
} // end if n_link_ids > 0
}
}
@ -2129,7 +2171,7 @@ int Monitor::Capture() {
return 0;
}
} else {
Debug(2,"Before capture");
Debug(4, "Capturing");
captureResult = camera->Capture(*packet);
gettimeofday( packet->timestamp, NULL );
if ( captureResult < 0 ) {
@ -2589,18 +2631,17 @@ unsigned int Monitor::SubpixelOrder() const { return camera->SubpixelOrder(); }
int Monitor::PrimeCapture() {
int ret = camera->PrimeCapture();
if ( ret == 0 ) {
if ( packetqueue )
delete packetqueue;
video_stream_id = camera->get_VideoStreamId();
packetqueue = new zm_packetqueue(image_buffer_count, video_stream_id);
}
Debug(2, "Video stream id is (%d), minimum_packets to keep in buffer(%d)", video_stream_id, pre_event_buffer_count);
return ret;
}
int Monitor::PreCapture() {
return camera->PreCapture();
}
int Monitor::PostCapture() {
return camera->PostCapture();
}
int Monitor::PreCapture() const { return camera->PreCapture(); }
int Monitor::PostCapture() const { return camera->PostCapture() ; }
Monitor::Orientation Monitor::getOrientation() const { return orientation; }
// Wait for camera to get an image, and then assign it as the base reference image. So this should be done as the first task in the analysis thread startup.

View File

@ -262,7 +262,8 @@ protected:
int colours;
VideoWriter videowriter;
std::string encoderparams;
std::string output_codec;
int output_codec;
std::string encoder;
std::string output_container;
std::vector<EncoderParameter_t> encoderparamsvec;
_AVPIXELFORMAT imagePixFormat;
@ -372,51 +373,6 @@ public:
// OurCheckAlarms seems to be unused. Check it on zm_monitor.cpp for more info.
//bool OurCheckAlarms( Zone *zone, const Image *pImage );
Monitor(
int p_id,
const char *p_name,
unsigned int p_server_id,
unsigned int p_storage_id,
int p_function,
bool p_enabled,
const char *p_linked_monitors,
Camera *p_camera,
int p_orientation,
unsigned int p_deinterlacing,
int p_savejpegs,
int p_colours,
VideoWriter p_videowriter,
std::string &p_encoderparams,
std::string &p_output_codec,
std::string &p_output_container,
bool p_record_audio,
const char *p_event_prefix,
const char *p_label_format,
const Coord &p_label_coord,
int label_size,
int p_image_buffer_count,
int p_warmup_count,
int p_pre_event_count,
int p_post_event_count,
int p_stream_replay_buffer,
int p_alarm_frame_count,
int p_section_length,
int p_frame_skip,
int p_motion_frame_skip,
double p_analysis_fps,
unsigned int p_analysis_update_delay,
int p_capture_delay,
int p_alarm_capture_delay,
int p_fps_report_interval,
int p_ref_blend_perc,
int p_alarm_ref_blend_perc,
bool p_track_motion,
Rgb p_signal_check_colour,
bool p_embed_exif,
Purpose p_purpose,
int p_n_zones=0,
Zone *p_zones[]=0
);
~Monitor();
void AddZones( int p_n_zones, Zone *p_zones[] );
@ -486,7 +442,8 @@ public:
VideoWriter GetOptVideoWriter() const { return videowriter; }
const std::vector<EncoderParameter_t>* GetOptEncoderParams() const { return &encoderparamsvec; }
const std::string &GetEncoderOptions() const { return encoderparams; }
const std::string &OutputCodec() const { return output_codec; }
const int OutputCodec() const { return output_codec; }
const std::string &Encoder() const { return encoder; }
const std::string &OutputContainer() const { return output_container; }
uint64_t GetVideoWriterEventId() const { return video_store_data->current_event; }
@ -527,9 +484,9 @@ public:
int actionContrast( int p_contrast=-1 );
int PrimeCapture();
int PreCapture();
int PreCapture() const;
int Capture();
int PostCapture();
int PostCapture() const;
void CheckAction();

View File

@ -39,7 +39,7 @@ bool zm_packetqueue::queuePacket( ZMPacket* zm_packet ) {
// If we can never queue the same packet, then they can never go past
if ( zm_packet->image_index == first_video_packet_index ) {
Debug(2, "queuing packet that is already on the queue(%d)", zm_packet->image_index );
ZMPacket *p;
ZMPacket *p = NULL;;
while ( pktQueue.size() && (p = pktQueue.front()) && ( p->image_index != zm_packet->image_index ) ) {
if ( ( analysis_it != pktQueue.end() ) && ( *analysis_it == p ) ) {
Debug(2, "Increasing analysis_it");

View File

@ -31,6 +31,13 @@ extern "C" {
#include "libavutil/time.h"
}
VideoStore::CodecData VideoStore::codec_data[] = {
{ AV_CODEC_ID_H264, "h264", "h264_omx", AV_PIX_FMT_YUV420P },
{ AV_CODEC_ID_H264, "h264", "h264", AV_PIX_FMT_YUV420P },
{ AV_CODEC_ID_H264, "h264", "libx264", AV_PIX_FMT_YUV420P },
{ AV_CODEC_ID_MJPEG, "mjpeg", "mjpeg", AV_PIX_FMT_YUVJ422P },
};
VideoStore::VideoStore(
const char *filename_in,
const char *format_in,
@ -49,6 +56,9 @@ monitor = p_monitor;
frame_count = 0;
in_frame = NULL;
video_out_codec = NULL;
video_out_stream = NULL;
converted_in_samples = NULL;
audio_out_codec = NULL;
audio_in_codec = NULL;
@ -140,87 +150,7 @@ bool VideoStore::open() {
video_in_stream_index = 0;
}
if ( monitor->OutputCodec() == "mjpeg" ) {
Debug(2,"Using mjpeg");
video_out_codec = avcodec_find_encoder_by_name("mjpeg");
if ( ! video_out_codec ) {
Debug(1, "Didn't find mjpeg encoder");
video_out_codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
}
video_out_ctx = avcodec_alloc_context3( video_out_codec );
video_out_ctx->codec_id = video_out_codec->id;
video_out_ctx->pix_fmt = AV_PIX_FMT_YUVJ422P;
} else if ( monitor->OutputCodec() == "h264" || monitor->OutputCodec() == "" ) {
AVPixelFormat pf = AV_PIX_FMT_YUV420P;
// First try hardware accell
video_out_codec = avcodec_find_encoder_by_name("h264_omx");
if ( ! video_out_codec ) {
Debug(1, "Didn't find omx");
video_out_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
}
if ( ! video_out_codec ) {
if ( AV_CODEC_ID_NONE ==
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_in_stream->codecpar->codec_id
#else
video_in_stream->codec->codec_id
#endif
) {
Debug(1, "trying xh264rgb");
// We will be encoding rgb images, so prefer
video_out_codec = avcodec_find_encoder_by_name("libx264rgb");
if ( ! video_out_codec ) {
video_out_codec = avcodec_find_encoder_by_name("libx264");
} else {
pf =
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
(AVPixelFormat)video_in_stream->codecpar->format;
#else
video_in_stream->codec->pix_fmt;
#endif
}
} else {
video_out_codec = avcodec_find_encoder_by_name("libx264");
pf = AV_PIX_FMT_YUV420P;
}
}
// Need to do lookup by codec_id
if ( ! video_out_codec ) {
Error("Didn't find h264 encoder");
video_out_codec = NULL;
return false;
}
Debug(1, "Using %s for codec", video_out_codec->name);
video_out_ctx = avcodec_alloc_context3(video_out_codec);
if ( AV_CODEC_ID_H264 != video_out_ctx->codec_id ) {
Warning("Have to set codec_id?");
video_out_ctx->codec_id = AV_CODEC_ID_H264;
}
video_out_ctx->pix_fmt = pf;
} else {
Error("Unsupported output codec selected");
return false;
}
// Copy params from instream to ctx
// // FIXME SHould check that we are set to passthrough
if ( video_in_stream && ( video_in_ctx->codec_id == AV_CODEC_ID_H264 ) ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_parameters_to_context(video_out_ctx, video_in_stream->codecpar);
#else
ret = avcodec_copy_context( video_out_ctx, video_in_ctx );
#endif
if ( ret < 0 ) {
Error("Could not initialize ctx parameteres");
return false;
}
//video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
video_out_ctx->time_base = video_in_ctx->time_base;
video_out_ctx = avcodec_alloc_context3(NULL);
if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) {
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
@ -228,6 +158,26 @@ Debug(2,"Using mjpeg");
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
int wanted_codec = monitor->OutputCodec();
if ( ! wanted_codec ) {
// default to h264
wanted_codec = AV_CODEC_ID_H264;
}
// // FIXME SHould check that we are set to passthrough
if ( video_in_stream && ( video_in_ctx->codec_id == wanted_codec ) ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_parameters_to_context(video_out_ctx, video_in_stream->codecpar);
#else
ret = avcodec_copy_context( video_out_ctx, video_in_ctx );
#endif
// Copy params from instream to ctx
if ( ret < 0 ) {
Error("Could not initialize ctx parameteres");
return false;
}
//video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
video_out_ctx->time_base = video_in_ctx->time_base;
// Fix deprecated formats
switch ( video_out_ctx->pix_fmt ) {
case AV_PIX_FMT_YUVJ422P :
@ -245,39 +195,66 @@ Debug(2,"Using mjpeg");
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
break;
}
zm_dump_codec(video_out_ctx);
// Only set orientation if doing passthrough, otherwise the frame image will be rotated
Monitor::Orientation orientation = monitor->getOrientation();
if ( orientation ) {
Debug(3, "Have orientation");
if ( orientation == Monitor::ROTATE_0 ) {
} else if ( orientation == Monitor::ROTATE_90 ) {
ret = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if ( orientation == Monitor::ROTATE_180 ) {
ret = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if ( orientation == Monitor::ROTATE_270 ) {
ret = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else {
/** Create a new frame to store the */
if ( !(video_in_frame = zm_av_frame_alloc()) ) {
Error("Could not allocate video_in frame");
return false;
Warning("Unsupported Orientation(%d)", orientation);
}
} // end if orientation
} else {
for ( unsigned int i = 0; i < sizeof(codec_data) / sizeof(*codec_data); i++ ) {
if ( codec_data[i].codec_id != monitor->OutputCodec() )
continue;
video_out_codec = avcodec_find_encoder_by_name(codec_data[i].codec_name);
if ( ! video_out_codec ) {
Debug(1, "Didn't find encoder for %s", codec_data[i].codec_name);
continue;
}
Debug(1, "Using %s for codec", video_out_codec->name);
//video_out_ctx = avcodec_alloc_context3(video_out_codec);
if ( video_out_codec->id != video_out_ctx->codec_id ) {
Warning("Have to set codec_id?");
video_out_ctx->codec_id = AV_CODEC_ID_H264;
}
video_out_ctx->pix_fmt = codec_data[i].pix_fmt;
video_out_ctx->level = 32;
// Don't have an input stream, so need to tell it what we are sending it, or are transcoding
video_out_ctx->width = monitor->Width();
video_out_ctx->height = monitor->Height();
video_out_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) {
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
/* video time_base can be set to whatever is handy and supported by encoder */
video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
video_out_ctx->gop_size = 12;
video_out_ctx->qmin = 10;
video_out_ctx->qmax = 51;
video_out_ctx->qcompress = 0.6;
//video_out_ctx->bit_rate = 4000000;
//video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
video_out_ctx->time_base = (AVRational){1, 30}; // microseconds as base frame rate
video_out_ctx->framerate = (AVRational){30,1};
//video_out_ctx->gop_size = 12;
//video_out_ctx->qmin = 10;
//video_out_ctx->qmax = 51;
//video_out_ctx->qcompress = 0.6;
video_out_ctx->bit_rate = 400*1024;
video_out_ctx->thread_count = 0;
if ( video_out_ctx->codec_id == AV_CODEC_ID_H264 ) {
video_out_ctx->max_b_frames = 1;
if ( video_out_ctx->priv_data ) {
av_opt_set(video_out_ctx->priv_data, "preset", "ultrafast", 0);
av_opt_set(video_out_ctx->priv_data, "crf", "1", AV_OPT_SEARCH_CHILDREN);
//av_opt_set(video_out_ctx->priv_data, "preset", "ultrafast", 0);
} else {
Debug(2, "Not setting priv_data");
}
@ -304,22 +281,24 @@ Debug(2,"Using mjpeg");
}
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
Warning("Can't open video codec (%s)! %s, trying h264",
Warning("Can't open video codec (%s) %s",
video_out_codec->name,
av_make_error_string(ret).c_str()
);
video_out_codec = avcodec_find_encoder_by_name("h264");
if ( ! video_out_codec ) {
Error("Can't find h264 encoder");
video_out_codec = avcodec_find_encoder_by_name("libx264");
if ( ! video_out_codec ) {
Error("Can't find libx264 encoder");
return false;
video_out_codec = NULL;
}
AVDictionaryEntry *e = NULL;
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
Warning( "Encoder Option %s not recognized by ffmpeg codec", e->key);
}
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
Error("Can't open video codec (%s)! %s", video_out_codec->name,
av_make_error_string(ret).c_str() );
av_dict_free(&opts);
if ( video_out_codec ) break;
} // end foreach codec
if ( ! video_out_codec ) {
Error("Can't open video codec!");
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// We allocate and copy in newer ffmpeg, so need to free it
avcodec_free_context(&video_out_ctx);
@ -327,21 +306,17 @@ Debug(2,"Using mjpeg");
video_out_ctx = NULL;
return false;
}
} // end if can't open codec
Debug(2,"Sucess opening codec");
AVDictionaryEntry *e = NULL;
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
Warning( "Encoder Option %s not recognized by ffmpeg codec", e->key);
}
av_dict_free(&opts);
} // end if copying or trasncoding
if ( !video_out_ctx->codec_tag ) {
video_out_ctx->codec_tag =
av_codec_get_tag(oc->oformat->codec_tag, video_out_ctx->codec_id );
Debug(2, "No codec_tag, setting to h264 ? ");
}
} // end if copying or trasncoding
video_out_stream = avformat_new_stream(oc, video_out_codec);
if ( ! video_out_stream ) {
@ -358,27 +333,6 @@ Debug(2,"Using mjpeg");
avcodec_copy_context(video_out_stream->codec, video_out_ctx);
#endif
if ( video_in_stream && ( video_in_ctx->codec_id == AV_CODEC_ID_H264 ) ) {
// Only set orientation if doing passthrough, otherwise the frame image will be rotated
Monitor::Orientation orientation = monitor->getOrientation();
if ( orientation ) {
Debug(3, "Have orientation");
if ( orientation == Monitor::ROTATE_0 ) {
} else if ( orientation == Monitor::ROTATE_90 ) {
ret = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if ( orientation == Monitor::ROTATE_180 ) {
ret = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if ( orientation == Monitor::ROTATE_270 ) {
ret = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else {
Warning("Unsupported Orientation(%d)", orientation);
}
}
}
if ( audio_in_stream ) {
audio_in_stream_index = audio_in_stream->index;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
@ -464,11 +418,9 @@ Debug(2,"Using mjpeg");
/* open the out file, if needed */
if ( !(out_format->flags & AVFMT_NOFILE) ) {
ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);
if (ret < 0) {
if ( (ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE, NULL, NULL) ) < 0 ) {
Error("Could not open out file '%s': %s\n", filename,
av_make_error_string(ret).c_str());
return false;
}
}
@ -518,17 +470,9 @@ void VideoStore::write_audio_packet( AVPacket &pkt ) {
av_interleaved_write_frame(oc, &pkt);
} // end void VideoStore::Write_audio_packet( AVPacket &pkt )
VideoStore::~VideoStore() {
if ( oc->pb ) {
if ( ( video_out_ctx->codec_id != video_in_ctx->codec_id ) || audio_out_codec ) {
Debug(2,"Different codecs between in and out");
void VideoStore::flush_codecs() {
// The codec queues data. We need to send a flush command and out
// whatever we get. Failures are not fatal.
AVPacket pkt;
// WIthout these we seg fault I don't know why.
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
// I got crashes if the codec didn't do DELAY, so let's test for it.
if ( video_out_ctx->codec && ( video_out_ctx->codec->capabilities &
@ -539,27 +483,25 @@ VideoStore::~VideoStore() {
#endif
) ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// Put encoder into flushing mode
avcodec_send_frame(video_out_ctx, NULL);
while (1) {
ret = avcodec_receive_packet(video_out_ctx, &pkt);
if (ret < 0) {
AVPacket pkt;
// Without these we seg fault I don't know why.
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( (ret = avcodec_receive_packet(video_out_ctx, &pkt)) < 0 ) {
if ( AVERROR_EOF != ret ) {
Error("ERror encoding audio while flushing (%d) (%s)", ret,
Error("Error encoding audio while flushing (%d) (%s)", ret,
av_err2str(ret));
}
break;
}
#else
while (1) {
// WIthout these we seg fault I don't know why.
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int got_packet = 0;
ret = avcodec_encode_video2(video_out_ctx, &pkt, NULL, &got_packet);
if ( ret < 0 ) {
if ( (ret = avcodec_encode_video2(video_out_ctx, &pkt, NULL, &got_packet)) < 0 ) {
Error("ERror encoding video while flushing (%d) (%s)", ret, av_err2str(ret));
break;
}
@ -573,18 +515,13 @@ VideoStore::~VideoStore() {
} // end if have delay capability
if ( audio_out_codec ) {
// The codec queues data. We need to send a flush command and out
// whatever we get. Failures are not fatal.
avcodec_send_frame(audio_out_ctx, NULL);
while (1) {
AVPacket pkt;
// WIthout these we seg fault I don't know why.
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// Put encoder into flushing mode
avcodec_send_frame(audio_out_ctx, NULL);
while (1) {
if ( (ret = avcodec_receive_packet(audio_out_ctx, &pkt) ) < 0 ) {
if ( AVERROR_EOF != ret ) {
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str(ret));
@ -592,10 +529,6 @@ VideoStore::~VideoStore() {
break;
}
#else
while (1) {
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int got_packet = 0;
if ( (ret = avcodec_encode_audio2(audio_out_ctx, &pkt, NULL, &got_packet)) < 0 ) {
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str(ret));
@ -610,6 +543,13 @@ VideoStore::~VideoStore() {
zm_av_packet_unref(&pkt);
} // while have buffered frames
} // end if audio_out_codec
}
VideoStore::~VideoStore() {
if ( oc->pb ) {
if ( ( video_out_ctx->codec_id != video_in_ctx->codec_id ) || audio_out_codec ) {
Debug(2,"Different codecs between in and out");
flush_output();
} // end if buffers
// Flush Queues
@ -640,6 +580,7 @@ VideoStore::~VideoStore() {
// allocation/de-allocation constantly, or whether we can just re-use it.
// Just do a file open/close/writeheader/etc.
// What if we were only doing audio recording?
if ( video_out_stream ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// We allocate and copy in newer ffmpeg, so need to free it
@ -688,7 +629,7 @@ VideoStore::~VideoStore() {
converted_in_samples = NULL;
}
#endif
}
} // end if audio_out_stream
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( video_in_ctx ) {
avcodec_free_context(&video_in_ctx);
@ -1027,8 +968,9 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
zm_packet->out_frame->pts = 0;
zm_packet->out_frame->coded_picture_number = 0;
} else {
uint64_t seconds = zm_packet->timestamp->tv_sec*(uint64_t)1000000;
zm_packet->out_frame->pts = ( seconds + zm_packet->timestamp->tv_usec ) - video_start_pts;
uint64_t seconds = ( zm_packet->timestamp->tv_sec*(uint64_t)1000000 + zm_packet->timestamp->tv_usec ) - video_start_pts;
zm_packet->out_frame->pts = av_rescale_q( seconds, video_in_stream->time_base, video_out_ctx->time_base);
//zm_packet->out_frame->pkt_duration = zm_packet->out_frame->pts - video_start_pts;
Debug(2, " Setting pts for frame(%d), set to (%" PRId64 ") from (start %" PRIu64 " - %" PRIu64 " - secs(%d) usecs(%d)",
frame_count, zm_packet->out_frame->pts, video_start_pts, seconds, zm_packet->timestamp->tv_sec, zm_packet->timestamp->tv_usec );
@ -1044,6 +986,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// Do this to allow the encoder to choose whether to use I/P/B frame
zm_packet->out_frame->pict_type = AV_PICTURE_TYPE_NONE;
Debug(4, "Sending frame");
if ( (ret = avcodec_send_frame(video_out_ctx, zm_packet->out_frame)) < 0 ) {
Error("Could not send frame (error '%s')", av_make_error_string(ret).c_str());
return -1;

View File

@ -18,6 +18,15 @@ class VideoStore;
class VideoStore {
private:
struct CodecData {
const int codec_id;
const char *codec_codec;
const char *codec_name;
const enum AVPixelFormat pix_fmt;
};
static struct CodecData codec_data[];
Monitor *monitor;
AVOutputFormat *out_format;
AVFormatContext *oc;
@ -89,6 +98,7 @@ public:
int writeAudioFramePacket( ZMPacket *pkt );
int writePacket( ZMPacket *pkt );
int write_packets( zm_packetqueue &queue );
void flush_codecs();
};
#endif //havelibav

View File

@ -15,7 +15,8 @@ private $defaults = array(
'Height' => null,
'Orientation' => null,
'AnalysisFPSLimit' => null,
'OutputCodec' => 'h264',
'OutputCodec' => '0',
'Encoder' => 'auto',
'OutputContainer' => 'auto',
'ZoneCount' => 0,
'Triggers' => null,

View File

@ -471,13 +471,21 @@ $videowriteropts = array(
'X264 Encode' => 1,
'H264 Camera Passthrough' => 2
);
$videowriter_codecs = array(
'' => translate('Disabled'),
$videowriter_encoders = array(
'' => translate('Auto'),
'h264_omx' => 'h264_omx',
'h264' => 'h264',
'mjpeg' => 'mjpeg',
'mpeg1' => 'mpeg1',
'mpeg2' => 'mpeg2',
);
$videowriter_codecs = array(
'0' => translate('Disabled'),
'220' => 'h264',
'8' => 'mjpeg',
'1' => 'mpeg1',
'2' => 'mpeg2',
);
$videowriter_containers = array(
'' => translate('Auto'),
'mp4' => 'mp4',
@ -610,6 +618,7 @@ if ( $tab != 'storage' ) {
<input type="hidden" name="newMonitor[SaveJPEGs]" value="<?php echo validHtmlStr($monitor->SaveJPEGs()) ?>"/>
<input type="hidden" name="newMonitor[VideoWriter]" value="<?php echo validHtmlStr($monitor->VideoWriter()) ?>"/>
<input type="hidden" name="newMonitor[OutputCodec]" value="<?php echo validHtmlStr($monitor->OutputCodec()) ?>"/>
<input type="hidden" name="newMonitor[Encoder]" value="<?php echo validHtmlStr($monitor->Encoder()) ?>"/>
<input type="hidden" name="newMonitor[OutputContainer]" value="<?php echo validHtmlStr($monitor->OutputContainer()) ?>"/>
<input type="hidden" name="newMonitor[EncoderParameters]" value="<?php echo validHtmlStr($monitor->EncoderParameters()) ?>"/>
<input type="hidden" name="newMonitor[RecordAudio]" value="<?php echo validHtmlStr($monitor->RecordAudio()) ?>"/>
@ -916,6 +925,7 @@ if ( $monitor->Type() == 'Local' ) {
<tr><td><?php echo translate('SaveJPEGs') ?></td><td><select name="newMonitor[SaveJPEGs]"><?php foreach ( $savejpegopts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->SaveJPEGs() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
<tr><td><?php echo translate('VideoWriter') ?></td><td><select name="newMonitor[VideoWriter]"><?php foreach ( $videowriteropts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->VideoWriter() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
<tr><td><?php echo translate('OutputCodec') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputCodec]', $videowriter_codecs, $monitor->OutputCodec() );?></td></tr>
<tr><td><?php echo translate('Encoder') ?></td><td><?php echo htmlSelect( 'newMonitor[Encoder]', $videowriter_encoders, $monitor->Encoder() );?></td></tr>
<tr><td><?php echo translate('OutputContainer') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputContainer]', $videowriter_containers, $monitor->OutputContainer() );?></td></tr>
<tr><td><?php echo translate('OptionalEncoderParam') ?></td><td><textarea name="newMonitor[EncoderParameters]" rows="4" cols="36"><?php echo validHtmlStr($monitor->EncoderParameters()) ?></textarea></td></tr>
<tr><td><?php echo translate('RecordAudio') ?></td><td><input type="checkbox" name="newMonitor[RecordAudio]" value="1"<?php if ( $monitor->RecordAudio() ) { ?> checked="checked"<?php } ?>/></td></tr>