Merge branch 'second_ffmpeg_url'

pull/3187/head
Isaac Connor 2021-03-03 12:07:44 -05:00
commit ed88719cd1
26 changed files with 160 additions and 82 deletions

View File

@ -468,6 +468,7 @@ CREATE TABLE `Monitors` (
`Port` varchar(8) NOT NULL default '',
`SubPath` varchar(64) NOT NULL default '',
`Path` varchar(255),
`SecondPath` varchar(255),
`Options` varchar(255),
`User` varchar(64),
`Pass` varchar(64),

15
db/zm_update-1.35.20.sql Normal file
View File

@ -0,0 +1,15 @@
--
-- Add SecondPath to Monitors
--
SET @s = (SELECT IF(
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = DATABASE()
AND table_name = 'Monitors'
AND column_name = 'SecondPath'
) > 0,
"SELECT 'Column SecondPath already exists in Monitors'",
"ALTER TABLE `Monitors` ADD `SecondPath` VARCHAR(255) AFTER `Path`"
));
PREPARE stmt FROM @s;
EXECUTE stmt;

View File

@ -28,7 +28,7 @@
%global _hardened_build 1
Name: zoneminder
Version: 1.35.19
Version: 1.35.20
Release: 1%{?dist}
Summary: A camera monitoring and analysis tool
Group: System Environment/Daemons

View File

@ -54,6 +54,7 @@ Camera::Camera(
mVideoStream(nullptr),
mAudioStream(nullptr),
mFormatContext(nullptr),
mSecondFormatContext(nullptr),
bytes(0)
{
linesize = width * colours;
@ -68,12 +69,16 @@ Camera::~Camera() {
if ( mFormatContext ) {
// Should also free streams
avformat_free_context(mFormatContext);
mVideoStream = nullptr;
mAudioStream = nullptr;
}
if ( mSecondFormatContext ) {
// Should also free streams
avformat_free_context(mSecondFormatContext);
}
mVideoStream = nullptr;
mAudioStream = nullptr;
}
AVStream *Camera::get_VideoStream() {
AVStream *Camera::getVideoStream() {
if ( !mVideoStream ) {
if ( !mFormatContext )
mFormatContext = avformat_alloc_context();

View File

@ -56,7 +56,8 @@ protected:
AVCodecContext *mAudioCodecContext;
AVStream *mVideoStream;
AVStream *mAudioStream;
AVFormatContext *mFormatContext;
AVFormatContext *mFormatContext; // One for video, one for audio
AVFormatContext *mSecondFormatContext; // One for video, one for audio
unsigned int bytes;
public:
@ -119,12 +120,12 @@ public:
//return (type == FFMPEG_SRC )||(type == REMOTE_SRC);
}
virtual AVStream *get_VideoStream();
virtual AVStream *get_AudioStream() { return mAudioStream; };
virtual AVCodecContext *get_VideoCodecContext() { return mVideoCodecContext; };
virtual AVCodecContext *get_AudioCodecContext() { return mAudioCodecContext; };
int get_VideoStreamId() { return mVideoStreamId; };
int get_AudioStreamId() { return mAudioStreamId; };
virtual AVStream *getVideoStream();
virtual AVStream *getAudioStream() { return mAudioStream; };
virtual AVCodecContext *getVideoCodecContext() { return mVideoCodecContext; };
virtual AVCodecContext *getAudioCodecContext() { return mAudioCodecContext; };
int getVideoStreamId() { return mVideoStreamId; };
int getAudioStreamId() { return mAudioStreamId; };
virtual int PrimeCapture() { return 0; }
virtual int PreCapture() = 0;

View File

@ -19,6 +19,8 @@
#include "zm_ffmpeg_camera.h"
#include "zm_ffmpeg_input.h"
#include "zm_monitor.h"
#include "zm_packet.h"
#include "zm_signal.h"
#include "zm_utils.h"
@ -93,6 +95,7 @@ static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType type) {
FfmpegCamera::FfmpegCamera(
const Monitor *monitor,
const std::string &p_path,
const std::string &p_second_path,
const std::string &p_method,
const std::string &p_options,
int p_width,
@ -121,6 +124,7 @@ FfmpegCamera::FfmpegCamera(
p_record_audio
),
mPath(p_path),
mSecondPath(p_second_path),
mMethod(p_method),
mOptions(p_options),
hwaccel_name(p_hwaccel_name),
@ -184,15 +188,25 @@ int FfmpegCamera::PreCapture() {
}
int FfmpegCamera::Capture(ZMPacket &zm_packet) {
if ( !mCanCapture )
return -1;
if (!mCanCapture) return -1;
int ret;
if ( (ret = av_read_frame(mFormatContext, &packet)) < 0 ) {
AVStream *stream;
if ((mFormatContextPtr != mFormatContext) or !mSecondFormatContext) {
mFormatContextPtr = mFormatContext;
stream = mVideoStream;
Debug(1, "Using video input");
} else {
mFormatContextPtr = mSecondFormatContext;
stream = mAudioStream;
Debug(1, "Using audio input");
}
if ( (ret = av_read_frame(mFormatContextPtr, &packet)) < 0 ) {
if (
// Check if EOF.
(ret == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
(ret == AVERROR_EOF || (mFormatContextPtr->pb && mFormatContextPtr->pb->eof_reached)) ||
// Check for Connection failure.
(ret == -110)
) {
@ -204,16 +218,17 @@ int FfmpegCamera::Capture(ZMPacket &zm_packet) {
}
return -1;
}
ZM_DUMP_STREAM_PACKET(mFormatContext->streams[packet.stream_index], packet, "ffmpeg_camera in");
ZM_DUMP_STREAM_PACKET(stream, packet, "ffmpeg_camera in");
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
zm_packet.codec_type = mFormatContext->streams[packet.stream_index]->codecpar->codec_type;
zm_packet.codec_type = stream->codecpar->codec_type;
#else
zm_packet.codec_type = mFormatContext->streams[packet.stream_index]->codec->codec_type;
zm_packet.codec_type = stream->codec->codec_type;
#endif
bytes += packet.size;
zm_packet.set_packet(&packet);
zm_packet.pts = av_rescale_q(packet.pts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q);
zm_packet.stream = stream;
zm_packet.pts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q);
zm_av_packet_unref(&packet);
return 1;
} // FfmpegCamera::Capture
@ -230,7 +245,7 @@ int FfmpegCamera::OpenFfmpeg() {
// Open the input, not necessarily a file
#if !LIBAVFORMAT_VERSION_CHECK(53, 2, 0, 4, 0)
if ( av_open_input_file(&mFormatContext, mPath.c_str(), nullptr, 0, nullptr) != 0 )
if (av_open_input_file(&mFormatContext, mPath.c_str(), nullptr, 0, nullptr) != 0)
#else
// Handle options
AVDictionary *opts = nullptr;
@ -259,7 +274,6 @@ int FfmpegCamera::OpenFfmpeg() {
Warning("Could not set rtsp_transport method '%s'", method.c_str());
}
} // end if RTSP
// #av_dict_set(&opts, "timeout", "10000000", 0); // in microseconds.
Debug(1, "Calling avformat_open_input for %s", mPath.c_str());
@ -476,13 +490,25 @@ int FfmpegCamera::OpenFfmpeg() {
}
zm_dump_codec(mVideoCodecContext);
if (mAudioStreamId == -1 and !monitor->GetSecondPath().empty()) {
Debug(1, "Trying secondary stream at %s", monitor->GetSecondPath().c_str());
FFmpeg_Input *second_input = new FFmpeg_Input();
if (second_input->Open(monitor->GetSecondPath().c_str()) > 0) {
mSecondFormatContext = second_input->get_format_context();
mAudioStreamId = second_input->get_audio_stream_id();
mAudioStream = second_input->get_audio_stream();
} else {
Warning("Failed to open secondary input");
}
} // end if have audio stream
if ( mAudioStreamId >= 0 ) {
AVCodec *mAudioCodec = nullptr;
if ( (mAudioCodec = avcodec_find_decoder(
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
mFormatContext->streams[mAudioStreamId]->codecpar->codec_id
mAudioStream->codecpar->codec_id
#else
mFormatContext->streams[mAudioStreamId]->codec->codec_id
mAudioStream->codec->codec_id
#endif
)) == nullptr ) {
Debug(1, "Can't find codec for audio stream from %s", mPath.c_str());
@ -491,11 +517,10 @@ int FfmpegCamera::OpenFfmpeg() {
mAudioCodecContext = avcodec_alloc_context3(mAudioCodec);
avcodec_parameters_to_context(
mAudioCodecContext,
mFormatContext->streams[mAudioStreamId]->codecpar
mAudioStream->codecpar
);
#else
mAudioCodecContext = mFormatContext->streams[mAudioStreamId]->codec;
// = avcodec_alloc_context3(mAudioCodec);
mAudioCodecContext = mAudioStream->codec;
#endif
zm_dump_stream_format(mFormatContext, mAudioStreamId, 0, 0);
@ -508,9 +533,9 @@ int FfmpegCamera::OpenFfmpeg() {
{
Error("Unable to open codec for audio stream from %s", mPath.c_str());
return -1;
} // end if opened
} // end if found decoder
} // end if have audio stream
} // end if opened
} // end if found decoder
} // end if mAudioStreamId
if (
((unsigned int)mVideoCodecContext->width != width)

View File

@ -34,6 +34,7 @@ typedef struct DecodeContext {
class FfmpegCamera : public Camera {
protected:
std::string mPath;
std::string mSecondPath;
std::string mMethod;
std::string mOptions;
@ -42,8 +43,9 @@ class FfmpegCamera : public Camera {
std::string hwaccel_device;
int frameCount;
_AVPIXELFORMAT imagePixFormat;
AVFormatContext *mFormatContextPtr;
bool use_hwaccel; //will default to on if hwaccel specified, will get turned off if there is a failure
#if HAVE_LIBAVUTIL_HWCONTEXT_H
@ -69,6 +71,7 @@ class FfmpegCamera : public Camera {
FfmpegCamera(
const Monitor *monitor,
const std::string &path,
const std::string &second_path,
const std::string &p_method,
const std::string &p_options,
int p_width,

View File

@ -118,11 +118,11 @@ int FFmpeg_Input::Open(const char *filepath) {
} // end foreach stream
if ( video_stream_id == -1 )
Error("Unable to locate video stream in %s", filepath);
Warning("Unable to locate video stream in %s", filepath);
if ( audio_stream_id == -1 )
Debug(3, "Unable to locate audio stream in %s", filepath);
return 0;
return 1;
} // end int FFmpeg_Input::Open( const char * filepath )
int FFmpeg_Input::Close( ) {

View File

@ -36,6 +36,13 @@ class FFmpeg_Input {
int get_audio_stream_id() const {
return audio_stream_id;
}
AVStream *get_video_stream() {
return ( video_stream_id >= 0 ) ? input_format_context->streams[video_stream_id] : nullptr;
}
AVStream *get_audio_stream() {
return ( audio_stream_id >= 0 ) ? input_format_context->streams[audio_stream_id] : nullptr;
}
AVFormatContext *get_format_context() { return input_format_context; };
private:
typedef struct {

View File

@ -279,6 +279,8 @@ int LibvlcCamera::Capture( ZMPacket &zm_packet ) {
mLibvlcData.mutex.lock();
zm_packet.image->Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
zm_packet.packet.stream_index = mVideoStreamId;
zm_packet.stream = mVideoStream;
mLibvlcData.newImage.setValueImmediate(false);
mLibvlcData.mutex.unlock();

View File

@ -178,7 +178,7 @@ int VncCamera::PrimeCapture() {
Warning("Specified dimensions do not match screen size monitor: (%dx%d) != vnc: (%dx%d)",
width, height, mRfb->width, mRfb->height);
}
get_VideoStream();
getVideoStream();
return 1;
}
@ -207,6 +207,7 @@ int VncCamera::Capture(ZMPacket &zm_packet) {
zm_packet.keyframe = 1;
zm_packet.codec_type = AVMEDIA_TYPE_VIDEO;
zm_packet.packet.stream_index = mVideoStreamId;
zm_packet.stream = mVideoStream;
uint8_t *directbuffer = zm_packet.image->WriteBuffer(width, height, colours, subpixelorder);
Debug(1, "scale src %p, %d, dest %p %d %d %dx%d %dx%d", mVncData.buffer,

View File

@ -1975,7 +1975,7 @@ int LocalCamera::Contrast( int p_contrast ) {
}
int LocalCamera::PrimeCapture() {
get_VideoStream();
getVideoStream();
if ( !device_prime )
return 1;
@ -2240,6 +2240,7 @@ int LocalCamera::Capture(ZMPacket &zm_packet) {
} // end if doing conversion or not
zm_packet.packet.stream_index = mVideoStreamId;
zm_packet.stream = mVideoStream;
zm_packet.codec_type = AVMEDIA_TYPE_VIDEO;
zm_packet.keyframe = 1;
return 1;

View File

@ -78,7 +78,7 @@ std::string load_monitor_sql =
"SELECT `Id`, `Name`, `ServerId`, `StorageId`, `Type`, `Function`+0, `Enabled`, `DecodingEnabled`, "
"`LinkedMonitors`, `AnalysisFPSLimit`, `AnalysisUpdateDelay`, `MaxFPS`, `AlarmMaxFPS`,"
"`Device`, `Channel`, `Format`, `V4LMultiBuffer`, `V4LCapturesPerFrame`, " // V4L Settings
"`Protocol`, `Method`, `Options`, `User`, `Pass`, `Host`, `Port`, `Path`, `Width`, `Height`, `Colours`, `Palette`, `Orientation`+0, `Deinterlacing`, "
"`Protocol`, `Method`, `Options`, `User`, `Pass`, `Host`, `Port`, `Path`, `SecondPath`, `Width`, `Height`, `Colours`, `Palette`, `Orientation`+0, `Deinterlacing`, "
"`DecoderHWAccelName`, `DecoderHWAccelDevice`, `RTSPDescribe`, "
"`SaveJPEGs`, `VideoWriter`, `EncoderParameters`, "
"`OutputCodec`, `Encoder`, `OutputContainer`, "
@ -420,7 +420,7 @@ Monitor::Monitor()
"SELECT Id, Name, ServerId, StorageId, Type, Function+0, Enabled, DecodingEnabled, LinkedMonitors, "
"AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS,"
"Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, " // V4L Settings
"Protocol, Method, Options, User, Pass, Host, Port, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, "
"Protocol, Method, Options, User, Pass, Host, Port, Path, SecondPath, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, "
"SaveJPEGs, VideoWriter, EncoderParameters,
"OutputCodec, Encoder, OutputContainer,"
"RecordAudio, "
@ -512,6 +512,7 @@ void Monitor::Load(MYSQL_ROW dbrow, bool load_zones=true, Purpose p = QUERY) {
host = dbrow[col] ? dbrow[col] : ""; col++;
port = dbrow[col] ? dbrow[col] : ""; col++;
path = dbrow[col] ? dbrow[col] : ""; col++;
second_path = dbrow[col] ? dbrow[col] : ""; col++;
camera_width = atoi(dbrow[col]); col++;
camera_height = atoi(dbrow[col]); col++;
@ -771,6 +772,7 @@ void Monitor::LoadCamera() {
case FFMPEG: {
camera = ZM::make_unique<FfmpegCamera>(this,
path,
second_path,
method,
options,
camera_width,
@ -1890,7 +1892,7 @@ bool Monitor::Analyse() {
}// else
if (signal) {
if (snap->image or (snap->packet.stream_index == video_stream_id)) {
if (snap->image or (snap->codec_type == AVMEDIA_TYPE_VIDEO)) {
struct timeval *timestamp = snap->timestamp;
if ( Active() and (function == MODECT or function == MOCORD) and snap->image ) {
@ -2257,7 +2259,7 @@ bool Monitor::Analyse() {
if ( snap->keyframe ) {
// avcodec strips out important nals that describe the stream and
// stick them in extradata. Need to send them along with keyframes
AVStream *stream = camera->get_VideoStream();
AVStream *stream = camera->getVideoStream();
video_fifo->write(
static_cast<unsigned char *>(stream->codecpar->extradata),
stream->codecpar->extradata_size);
@ -2554,13 +2556,14 @@ int Monitor::Capture() {
Debug(2, "Have packet stream_index:%d ?= videostream_id:(%d) q.vpktcount(%d) event?(%d) ",
packet->packet.stream_index, video_stream_id, packetqueue.packet_count(video_stream_id), ( event ? 1 : 0 ) );
if (packet->packet.stream_index == video_stream_id) {
if (packet->codec_type == AVMEDIA_TYPE_VIDEO) {
packet->packet.stream_index = video_stream_id; // Convert to packetQueue's index
if (video_fifo) {
if ( packet->keyframe ) {
// avcodec strips out important nals that describe the stream and
// stick them in extradata. Need to send them along with keyframes
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
AVStream *stream = camera->get_VideoStream();
AVStream *stream = camera->getVideoStream();
video_fifo->write(
static_cast<unsigned char *>(stream->codecpar->extradata),
stream->codecpar->extradata_size,
@ -2569,16 +2572,15 @@ int Monitor::Capture() {
}
video_fifo->writePacket(*packet);
}
} else if (packet->packet.stream_index == audio_stream_id) {
} else if (packet->codec_type == AVMEDIA_TYPE_AUDIO) {
if (audio_fifo)
audio_fifo->writePacket(*packet);
}
if ( (packet->packet.stream_index != video_stream_id) and ! packet->image ) {
// Only queue if we have some video packets in there. Should push this logic into packetqueue
if ( record_audio and (packetqueue.packet_count(video_stream_id) or event) ) {
if (record_audio and (packetqueue.packet_count(video_stream_id) or event)) {
packet->image_index=-1;
Debug(2, "Queueing audio packet");
packet->packet.stream_index = audio_stream_id; // Convert to packetQueue's index
packetqueue.queuePacket(packet);
} else {
Debug(4, "Not Queueing audio packet");
@ -2587,6 +2589,8 @@ int Monitor::Capture() {
// Don't update last_write_index because that is used for live streaming
//shared_data->last_write_time = image_buffer[index].timestamp->tv_sec;
return 1;
} else {
Debug(1, "Unknown codec type %d", packet->codec_type);
} // end if audio
if ( !packet->image ) {
@ -2599,7 +2603,7 @@ int Monitor::Capture() {
// We don't actually care about camera colours, pixel order etc. We care about the desired settings
//
//capture_image = packet->image = new Image(width, height, camera->Colours(), camera->SubpixelOrder());
int ret = packet->decode(camera->get_VideoCodecContext());
int ret = packet->decode(camera->getVideoCodecContext());
if ( ret < 0 ) {
Error("decode failed");
} else if ( ret == 0 ) {
@ -2982,13 +2986,13 @@ unsigned int Monitor::SubpixelOrder() const { return camera->SubpixelOrder(); }
int Monitor::PrimeCapture() {
int ret = camera->PrimeCapture();
if ( ret > 0 ) {
video_stream_id = camera->get_VideoStreamId();
if ( -1 != camera->getVideoStreamId() ) {
video_stream_id = packetqueue.addStream();
}
packetqueue.addStreamId(video_stream_id);
audio_stream_id = camera->get_AudioStreamId();
if ( audio_stream_id >= 0 ) {
packetqueue.addStreamId(audio_stream_id);
if ( -1 != camera->getAudioStreamId() ) {
audio_stream_id = packetqueue.addStream();
packetqueue.addStream();
shared_data->audio_frequency = camera->getFrequency();
shared_data->audio_channels = camera->getChannels();
}
@ -2998,7 +3002,7 @@ int Monitor::PrimeCapture() {
if (rtsp_server) {
if (video_stream_id >= 0) {
AVStream *videoStream = camera->get_VideoStream();
AVStream *videoStream = camera->getVideoStream();
snprintf(shared_data->video_fifo_path, sizeof(shared_data->video_fifo_path)-1, "%s/video_fifo_%d.%s",
staticConfig.PATH_SOCKS.c_str(),
id,
@ -3011,7 +3015,7 @@ int Monitor::PrimeCapture() {
video_fifo = new Fifo(shared_data->video_fifo_path, true);
}
if (record_audio and (audio_stream_id >= 0)) {
AVStream *audioStream = camera->get_AudioStream();
AVStream *audioStream = camera->getAudioStream();
snprintf(shared_data->audio_fifo_path, sizeof(shared_data->audio_fifo_path)-1, "%s/video_fifo_%d.%s",
staticConfig.PATH_SOCKS.c_str(), id,
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
@ -3057,7 +3061,7 @@ void Monitor::get_ref_image() {
(
!( snap = packetqueue.get_packet(analysis_it))
or
( snap->packet.stream_index != video_stream_id )
( snap->codec_type != AVMEDIA_TYPE_VIDEO )
or
! snap->image
)

View File

@ -248,6 +248,7 @@ protected:
std::string user;
std::string pass;
std::string path;
std::string second_path;
char device[64];
int palette;
@ -485,11 +486,12 @@ public:
int32_t GetImageBufferCount() const { return image_buffer_count; };
State GetState() const { return (State)shared_data->state; }
AVStream *GetAudioStream() const { return camera ? camera->get_AudioStream() : nullptr; };
AVCodecContext *GetAudioCodecContext() const { return camera ? camera->get_AudioCodecContext() : nullptr; };
AVStream *GetVideoStream() const { return camera ? camera->get_VideoStream() : nullptr; };
AVCodecContext *GetVideoCodecContext() const { return camera ? camera->get_VideoCodecContext() : nullptr; };
AVStream *GetAudioStream() const { return camera ? camera->getAudioStream() : nullptr; };
AVCodecContext *GetAudioCodecContext() const { return camera ? camera->getAudioCodecContext() : nullptr; };
AVStream *GetVideoStream() const { return camera ? camera->getVideoStream() : nullptr; };
AVCodecContext *GetVideoCodecContext() const { return camera ? camera->getVideoCodecContext() : nullptr; };
const std::string GetSecondPath() const { return second_path; };
const std::string GetVideoFifoPath() const { return shared_data ? shared_data->video_fifo_path : ""; };
const std::string GetAudioFifoPath() const { return shared_data ? shared_data->audio_fifo_path : ""; };
const std::string GetRTSPStreamName() const { return rtsp_streamname; };

View File

@ -149,6 +149,9 @@ int ZMPacket::decode(AVCodecContext *ctx) {
in_frame = zm_av_frame_alloc();
}
// packets are always stored in AV_TIME_BASE_Q so need to convert to codec time base
//av_packet_rescale_ts(&packet, AV_TIME_BASE_Q, ctx->time_base);
int ret = zm_send_packet_receive_frame(ctx, in_frame, packet);
if ( ret < 0 ) {
if ( AVERROR(EAGAIN) != ret ) {

View File

@ -38,6 +38,7 @@ class ZMPacket {
std::recursive_mutex mutex;
int keyframe;
AVStream *stream; // Input stream
AVPacket packet; // Input packet, undecoded
AVFrame *in_frame; // Input image, decoded Theoretically only filled if needed.
AVFrame *out_frame; // output image, Only filled if needed.

View File

@ -38,17 +38,20 @@ PacketQueue::PacketQueue():
/* Assumes queue is empty when adding streams
* Assumes first stream added will be the video stream
*/
void PacketQueue::addStreamId(int p_stream_id) {
int PacketQueue::addStream() {
deleting = false;
if ( video_stream_id == -1 )
video_stream_id = p_stream_id;
if ( max_stream_id < p_stream_id ) {
if ( packet_counts ) delete[] packet_counts;
max_stream_id = p_stream_id;
packet_counts = new int[max_stream_id+1];
for ( int i=0; i <= max_stream_id; ++i )
packet_counts[i] = 0;
if ( max_stream_id == -1 ) {
video_stream_id = 0;
max_stream_id = 0;
} else {
max_stream_id ++;
}
if ( packet_counts ) delete[] packet_counts;
packet_counts = new int[max_stream_id+1];
for ( int i=0; i <= max_stream_id; ++i )
packet_counts[i] = 0;
return max_stream_id;
}
PacketQueue::~PacketQueue() {

View File

@ -48,7 +48,7 @@ class PacketQueue {
std::list<ZMPacket *>::const_iterator end() const { return pktQueue.end(); }
std::list<ZMPacket *>::const_iterator begin() const { return pktQueue.begin(); }
void addStreamId(int p_stream_id);
int addStream();
void setMaxVideoPackets(int p);
bool queuePacket(ZMPacket* packet);

View File

@ -1047,7 +1047,7 @@ int RemoteCameraHttp::PrimeCapture() {
mode = SINGLE_IMAGE;
buffer.clear();
}
get_VideoStream();
getVideoStream();
return 1;
}
@ -1088,6 +1088,7 @@ int RemoteCameraHttp::Capture(ZMPacket &packet) {
packet.keyframe = 1;
packet.codec_type = AVMEDIA_TYPE_VIDEO;
packet.packet.stream_index = mVideoStreamId;
packet.stream = mVideoStream;
switch ( format ) {
case JPEG :

View File

@ -309,6 +309,7 @@ int RemoteCameraRtsp::Capture(ZMPacket &zm_packet) {
#endif
}
zm_packet.codec_type = mVideoCodecContext->codec_type;
zm_packet.stream = mVideoStream;
frameComplete = true;
Debug(2, "Frame: %d - %d/%d", frameCount, bytes_consumed, buffer.size());
packet->data = nullptr;

View File

@ -49,8 +49,6 @@ VideoStore::VideoStore(
oc(nullptr),
video_out_stream(nullptr),
audio_out_stream(nullptr),
video_in_stream_index(-1),
audio_in_stream_index(-1),
video_out_codec(nullptr),
video_in_ctx(p_video_in_ctx),
video_out_ctx(nullptr),
@ -123,8 +121,6 @@ bool VideoStore::open() {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
zm_dump_codecpar(video_in_stream->codecpar);
#endif
video_in_stream_index = video_in_stream->index;
if ( monitor->GetOptVideoWriter() == Monitor::PASSTHROUGH ) {
// Don't care what codec, just copy parameters
video_out_ctx = avcodec_alloc_context3(nullptr);
@ -945,14 +941,12 @@ bool VideoStore::setup_resampler() {
} // end bool VideoStore::setup_resampler()
int VideoStore::writePacket(ZMPacket *ipkt) {
if ( ipkt->packet.stream_index == video_in_stream_index ) {
if ( ipkt->codec_type == AVMEDIA_TYPE_VIDEO ) {
return writeVideoFramePacket(ipkt);
} else if ( ipkt->packet.stream_index == audio_in_stream_index ) {
} else if ( ipkt->codec_type == AVMEDIA_TYPE_AUDIO ) {
return writeAudioFramePacket(ipkt);
}
Error("Unknown stream type in packet (%d) input video stream is (%d) and audio is (%d)",
ipkt->packet.stream_index, video_in_stream_index, ( audio_in_stream ? audio_in_stream_index : -1 )
);
Error("Unknown stream type in packet (%d)", ipkt->codec_type);
return 0;
}

View File

@ -40,8 +40,6 @@ class VideoStore {
AVFormatContext *oc;
AVStream *video_out_stream;
AVStream *audio_out_stream;
int video_in_stream_index;
int audio_in_stream_index;
AVCodec *video_out_codec;
AVCodecContext *video_in_ctx;

View File

@ -1 +1 @@
1.35.19
1.35.20

View File

@ -53,6 +53,7 @@ class Monitor extends ZM_Object {
'Port' => '',
'SubPath' => '',
'Path' => null,
'SecondPath' => null,
'Options' => null,
'User' => null,
'Pass' => null,

View File

@ -11,6 +11,8 @@
textarea,
input[name="newMonitor[Name]"],
input[name="newMonitor[Path]"],
input[name="newMonitor[SecondPath]"],
input[name="newMonitor[LabelFormat]"],
input[name="newMonitor[ControlDevice]"],
input[name="newMonitor[ControlAddress]"] {

View File

@ -676,7 +676,10 @@ switch ( $name ) {
{
if ( ZM_HAS_V4L && $monitor->Type() == 'Local' ) {
?>
<tr><td class="text-right pr-3"><?php echo translate('DevicePath') ?></td><td><input type="text" name="newMonitor[Device]" value="<?php echo validHtmlStr($monitor->Device()) ?>"/></td></tr>
<tr>
<td class="text-right pr-3"><?php echo translate('DevicePath') ?></td>
<td><input type="text" name="newMonitor[Device]" value="<?php echo validHtmlStr($monitor->Device()) ?>"/></td>
</tr>
<tr>
<td><?php echo translate('CaptureMethod') ?></td>
<td><?php echo htmlSelect('newMonitor[Method]', $localMethods, $monitor->Method(), array('onchange'=>'submitTab', 'data-tab-name'=>$tab) ); ?></td>
@ -819,6 +822,10 @@ include('_monitor_source_nvsocket.php');
}
if ( $monitor->Type() == 'Ffmpeg' ) {
?>
<tr class="SourceSecondPath">
<td class="text-right pr-3"><?php echo translate('SourceSecondPath') ?></td>
<td><input type="text" name="newMonitor[SecondPath]" value="<?php echo validHtmlStr($monitor->SecondPath()) ?>" /></td>
</tr>
<tr class="DecoderHWAccelName">
<td class="text-right pr-3">
<?php echo translate('DecoderHWAccelName'); echo makeHelpLink('OPTIONS_DECODERHWACCELNAME') ?>