updates for mast ffmpeg
parent
2ffa79172a
commit
e17bcb9178
|
@ -133,6 +133,7 @@ int av_dict_parse_string(AVDictionary **pm, const char *str,
|
|||
#endif // HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE
|
||||
|
||||
#if HAVE_LIBAVUTIL
|
||||
#if LIBAVUTIL_VERSION_CHECK(56, 0, 0, 17, 100)
|
||||
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){
|
||||
int64_t a, b, this_thing;
|
||||
|
||||
|
@ -156,6 +157,7 @@ simple_round:
|
|||
return av_rescale_q(this_thing, fs_tb, out_tb);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat *oformat, const char *format, const char *filename) {
|
||||
AVFormatContext *s = avformat_alloc_context();
|
||||
|
|
|
@ -458,7 +458,9 @@ int FfmpegCamera::OpenFfmpeg() {
|
|||
// STolen from ispy
|
||||
//this fixes issues with rtsp streams!! woot.
|
||||
//mVideoCodecContext->flags2 |= CODEC_FLAG2_FAST | CODEC_FLAG2_CHUNKS | CODEC_FLAG_LOW_DELAY; // Enable faster H264 decode.
|
||||
#ifdef CODEC_FLAG2_FAST
|
||||
mVideoCodecContext->flags2 |= CODEC_FLAG2_FAST | CODEC_FLAG_LOW_DELAY;
|
||||
#endif
|
||||
|
||||
#if HAVE_AVUTIL_HWCONTEXT_H
|
||||
if ( mVideoCodecContext->codec_id == AV_CODEC_ID_H264 ) {
|
||||
|
|
133
src/zm_mpeg.cpp
133
src/zm_mpeg.cpp
|
@ -211,39 +211,52 @@ void VideoStream::SetupCodec( int colours, int subpixelorder, int width, int hei
|
|||
|
||||
Debug( 1, "Allocated stream" );
|
||||
|
||||
AVCodecContext *c = ost->codec;
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
codec_context = avcodec_alloc_context3(NULL);
|
||||
avcodec_parameters_to_context(codec_context, ost->codecpar);
|
||||
#else
|
||||
codec_context = ost->codec;
|
||||
#endif
|
||||
|
||||
c->codec_id = codec->id;
|
||||
c->codec_type = codec->type;
|
||||
codec_context->codec_id = codec->id;
|
||||
codec_context->codec_type = codec->type;
|
||||
|
||||
c->pix_fmt = strcmp( "mjpeg", ofc->oformat->name ) == 0 ? AV_PIX_FMT_YUVJ422P : AV_PIX_FMT_YUV420P;
|
||||
codec_context->pix_fmt = strcmp( "mjpeg", ofc->oformat->name ) == 0 ? AV_PIX_FMT_YUVJ422P : AV_PIX_FMT_YUV420P;
|
||||
if ( bitrate <= 100 ) {
|
||||
// Quality based bitrate control (VBR). Scale is 1..31 where 1 is best.
|
||||
// This gets rid of artifacts in the beginning of the movie; and well, even quality.
|
||||
c->flags |= CODEC_FLAG_QSCALE;
|
||||
c->global_quality = FF_QP2LAMBDA * (31 - (31 * (bitrate / 100.0)));
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
codec_context->flags |= AV_CODEC_FLAG_QSCALE;
|
||||
#else
|
||||
codec_context->flags |= CODEC_FLAG_QSCALE;
|
||||
#endif
|
||||
codec_context->global_quality = FF_QP2LAMBDA * (31 - (31 * (bitrate / 100.0)));
|
||||
} else {
|
||||
c->bit_rate = bitrate;
|
||||
codec_context->bit_rate = bitrate;
|
||||
}
|
||||
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = width;
|
||||
c->height = height;
|
||||
codec_context->width = width;
|
||||
codec_context->height = height;
|
||||
/* time base: this is the fundamental unit of time (in seconds) in terms
|
||||
of which frame timestamps are represented. for fixed-fps content,
|
||||
timebase should be 1/framerate and timestamp increments should be
|
||||
identically 1. */
|
||||
c->time_base.den = frame_rate;
|
||||
c->time_base.num = 1;
|
||||
codec_context->time_base.den = frame_rate;
|
||||
codec_context->time_base.num = 1;
|
||||
|
||||
Debug( 1, "Will encode in %d fps.", c->time_base.den );
|
||||
Debug( 1, "Will encode in %d fps.", codec_context->time_base.den );
|
||||
|
||||
/* emit one intra frame every second */
|
||||
c->gop_size = frame_rate;
|
||||
codec_context->gop_size = frame_rate;
|
||||
|
||||
// some formats want stream headers to be separate
|
||||
if ( of->flags & AVFMT_GLOBALHEADER )
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
||||
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
#else
|
||||
codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#endif
|
||||
} else {
|
||||
Fatal( "of->video_codec == AV_CODEC_ID_NONE" );
|
||||
}
|
||||
|
@ -278,13 +291,11 @@ void VideoStream::OpenStream( ) {
|
|||
/* now that all the parameters are set, we can open the
|
||||
video codecs and allocate the necessary encode buffers */
|
||||
if ( ost ) {
|
||||
AVCodecContext *c = ost->codec;
|
||||
|
||||
/* open the codec */
|
||||
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
|
||||
if ( (avRet = avcodec_open( c, codec )) < 0 )
|
||||
if ( (avRet = avcodec_open( codec_context, codec )) < 0 )
|
||||
#else
|
||||
if ( (avRet = avcodec_open2( c, codec, 0 )) < 0 )
|
||||
if ( (avRet = avcodec_open2( codec_context, codec, 0 )) < 0 )
|
||||
#endif
|
||||
{
|
||||
Fatal( "Could not open codec. Error code %d \"%s\"", avRet, av_err2str( avRet ) );
|
||||
|
@ -293,19 +304,15 @@ void VideoStream::OpenStream( ) {
|
|||
Debug( 1, "Opened codec" );
|
||||
|
||||
/* allocate the encoded raw picture */
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
|
||||
opicture = av_frame_alloc( );
|
||||
#else
|
||||
opicture = avcodec_alloc_frame( );
|
||||
#endif
|
||||
opicture = zm_av_frame_alloc( );
|
||||
if ( !opicture ) {
|
||||
Panic( "Could not allocate opicture" );
|
||||
}
|
||||
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
int size = av_image_get_buffer_size( c->pix_fmt, c->width, c->height, 1 );
|
||||
int size = av_image_get_buffer_size( codec_context->pix_fmt, codec_context->width, codec_context->height, 1 );
|
||||
#else
|
||||
int size = avpicture_get_size( c->pix_fmt, c->width, c->height );
|
||||
int size = avpicture_get_size( codec_context->pix_fmt, codec_context->width, codec_context->height );
|
||||
#endif
|
||||
|
||||
uint8_t *opicture_buf = (uint8_t *)av_malloc( size );
|
||||
|
@ -315,17 +322,17 @@ void VideoStream::OpenStream( ) {
|
|||
}
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
av_image_fill_arrays(opicture->data, opicture->linesize,
|
||||
opicture_buf, c->pix_fmt, c->width, c->height, 1);
|
||||
opicture_buf, codec_context->pix_fmt, codec_context->width, codec_context->height, 1);
|
||||
#else
|
||||
avpicture_fill( (AVPicture *)opicture, opicture_buf, c->pix_fmt,
|
||||
c->width, c->height );
|
||||
avpicture_fill( (AVPicture *)opicture, opicture_buf, codec_context->pix_fmt,
|
||||
codec_context->width, codec_context->height );
|
||||
#endif
|
||||
|
||||
/* if the output format is not identical to the input format, then a temporary
|
||||
picture is needed too. It is then converted to the required
|
||||
output format */
|
||||
tmp_opicture = NULL;
|
||||
if ( c->pix_fmt != pf ) {
|
||||
if ( codec_context->pix_fmt != pf ) {
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
|
||||
tmp_opicture = av_frame_alloc( );
|
||||
#else
|
||||
|
@ -335,9 +342,9 @@ void VideoStream::OpenStream( ) {
|
|||
Panic( "Could not allocate tmp_opicture" );
|
||||
}
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
int size = av_image_get_buffer_size( pf, c->width, c->height,1 );
|
||||
int size = av_image_get_buffer_size( pf, codec_context->width, codec_context->height,1 );
|
||||
#else
|
||||
int size = avpicture_get_size( pf, c->width, c->height );
|
||||
int size = avpicture_get_size( pf, codec_context->width, codec_context->height );
|
||||
#endif
|
||||
uint8_t *tmp_opicture_buf = (uint8_t *)av_malloc( size );
|
||||
if ( !tmp_opicture_buf ) {
|
||||
|
@ -347,10 +354,10 @@ void VideoStream::OpenStream( ) {
|
|||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
av_image_fill_arrays(tmp_opicture->data,
|
||||
tmp_opicture->linesize, tmp_opicture_buf, pf,
|
||||
c->width, c->height, 1);
|
||||
codec_context->width, codec_context->height, 1);
|
||||
#else
|
||||
avpicture_fill( (AVPicture *)tmp_opicture,
|
||||
tmp_opicture_buf, pf, c->width, c->height );
|
||||
tmp_opicture_buf, pf, codec_context->width, codec_context->height );
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -375,7 +382,12 @@ void VideoStream::OpenStream( ) {
|
|||
}
|
||||
|
||||
video_outbuf = NULL;
|
||||
#if LIBAVFORMAT_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
codec_context->codec_id == AV_CODEC_ID_RAWVIDEO) {
|
||||
#else
|
||||
if ( !(of->flags & AVFMT_RAWPICTURE) ) {
|
||||
#endif
|
||||
/* allocate output buffer */
|
||||
/* XXX: API change will be done */
|
||||
// TODO: Make buffer dynamic.
|
||||
|
@ -446,6 +458,8 @@ VideoStream::VideoStream( const char *in_filename, const char *in_format, int bi
|
|||
if ( pthread_mutex_init( buffer_copy_lock, NULL ) != 0 ) {
|
||||
Fatal("pthread_mutex_init failed");
|
||||
}
|
||||
|
||||
codec_context = NULL;
|
||||
}
|
||||
|
||||
VideoStream::~VideoStream( ) {
|
||||
|
@ -481,7 +495,7 @@ VideoStream::~VideoStream( ) {
|
|||
|
||||
/* close each codec */
|
||||
if ( ost ) {
|
||||
avcodec_close( ost->codec );
|
||||
avcodec_close( codec_context );
|
||||
av_free( opicture->data[0] );
|
||||
av_frame_free( &opicture );
|
||||
if ( tmp_opicture ) {
|
||||
|
@ -564,17 +578,15 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
static struct SwsContext *img_convert_ctx = 0;
|
||||
#endif // HAVE_LIBSWSCALE
|
||||
|
||||
AVCodecContext *c = ost->codec;
|
||||
|
||||
if ( c->pix_fmt != pf ) {
|
||||
if ( codec_context->pix_fmt != pf ) {
|
||||
memcpy( tmp_opicture->data[0], buffer, buffer_size );
|
||||
#ifdef HAVE_LIBSWSCALE
|
||||
if ( !img_convert_ctx ) {
|
||||
img_convert_ctx = sws_getCachedContext( NULL, c->width, c->height, pf, c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
|
||||
img_convert_ctx = sws_getCachedContext( NULL, codec_context->width, codec_context->height, pf, codec_context->width, codec_context->height, codec_context->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
|
||||
if ( !img_convert_ctx )
|
||||
Panic( "Unable to initialise image scaling context" );
|
||||
}
|
||||
sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, c->height, opicture->data, opicture->linesize );
|
||||
sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, codec_context->height, opicture->data, opicture->linesize );
|
||||
#else // HAVE_LIBSWSCALE
|
||||
Fatal( "swscale is required for MPEG mode" );
|
||||
#endif // HAVE_LIBSWSCALE
|
||||
|
@ -586,7 +598,13 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
AVPacket *pkt = packet_buffers[packet_index];
|
||||
av_init_packet( pkt );
|
||||
int got_packet = 0;
|
||||
#if LIBAVFORMAT_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
codec_context->codec_id == AV_CODEC_ID_RAWVIDEO) {
|
||||
#else
|
||||
if ( of->flags & AVFMT_RAWPICTURE ) {
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(52, 30, 2, 30, 2)
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
#else
|
||||
|
@ -597,19 +615,34 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
pkt->size = sizeof (AVPicture);
|
||||
got_packet = 1;
|
||||
} else {
|
||||
opicture_ptr->pts = c->frame_number;
|
||||
opicture_ptr->quality = c->global_quality;
|
||||
opicture_ptr->pts = codec_context->frame_number;
|
||||
opicture_ptr->quality = codec_context->global_quality;
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
// Put encoder into flushing mode
|
||||
avcodec_send_frame(codec_context, opicture_ptr);
|
||||
int ret = avcodec_receive_packet(codec_context, pkt);
|
||||
if ( ret < 0 ) {
|
||||
if ( AVERROR_EOF != ret ) {
|
||||
Error("ERror encoding video (%d) (%s)", ret,
|
||||
av_err2str(ret));
|
||||
}
|
||||
} else {
|
||||
got_packet = 1;
|
||||
}
|
||||
#else
|
||||
|
||||
#if LIBAVFORMAT_VERSION_CHECK(54, 1, 0, 2, 100)
|
||||
int ret = avcodec_encode_video2( c, pkt, opicture_ptr, &got_packet );
|
||||
int ret = avcodec_encode_video2( codec_context, pkt, opicture_ptr, &got_packet );
|
||||
if ( ret != 0 ) {
|
||||
Fatal( "avcodec_encode_video2 failed with errorcode %d \"%s\"", ret, av_err2str( ret ) );
|
||||
}
|
||||
#else
|
||||
int out_size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, opicture_ptr );
|
||||
int out_size = avcodec_encode_video( codec_context, video_outbuf, video_outbuf_size, opicture_ptr );
|
||||
got_packet = out_size > 0 ? 1 : 0;
|
||||
pkt->data = got_packet ? video_outbuf : NULL;
|
||||
pkt->size = got_packet ? out_size : 0;
|
||||
#endif
|
||||
#endif
|
||||
if ( got_packet ) {
|
||||
// if ( c->coded_frame->key_frame )
|
||||
|
@ -622,12 +655,12 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
// }
|
||||
|
||||
if ( pkt->pts != (int64_t)AV_NOPTS_VALUE ) {
|
||||
pkt->pts = av_rescale_q( pkt->pts, c->time_base, ost->time_base );
|
||||
pkt->pts = av_rescale_q( pkt->pts, codec_context->time_base, ost->time_base );
|
||||
}
|
||||
if ( pkt->dts != (int64_t)AV_NOPTS_VALUE ) {
|
||||
pkt->dts = av_rescale_q( pkt->dts, c->time_base, ost->time_base );
|
||||
pkt->dts = av_rescale_q( pkt->dts, codec_context->time_base, ost->time_base );
|
||||
}
|
||||
pkt->duration = av_rescale_q( pkt->duration, c->time_base, ost->time_base );
|
||||
pkt->duration = av_rescale_q( pkt->duration, codec_context->time_base, ost->time_base );
|
||||
pkt->stream_index = ost->index;
|
||||
}
|
||||
}
|
||||
|
@ -658,8 +691,12 @@ void *VideoStream::StreamingThreadCallback(void *ctx){
|
|||
VideoStream* videoStream = reinterpret_cast<VideoStream*>(ctx);
|
||||
|
||||
const uint64_t nanosecond_multiplier = 1000000000;
|
||||
|
||||
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->ost->codec->time_base.num) / (videoStream->ost->codec->time_base.den) );
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->codec_context->time_base.num) / (videoStream->codec_context->time_base.den) );
|
||||
#else
|
||||
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->codec_context->time_base.num) / (videoStream->codec_context->time_base.den) );
|
||||
#endif
|
||||
uint64_t frame_count = 0;
|
||||
timespec start_time;
|
||||
clock_gettime(CLOCK_MONOTONIC, &start_time);
|
||||
|
|
|
@ -46,6 +46,7 @@ protected:
|
|||
AVOutputFormat *of;
|
||||
AVFormatContext *ofc;
|
||||
AVStream *ost;
|
||||
AVCodecContext *codec_context;
|
||||
AVCodec *codec;
|
||||
AVFrame *opicture;
|
||||
AVFrame *tmp_opicture;
|
||||
|
|
|
@ -103,6 +103,7 @@ int RemoteCamera::Read( int fd, char *buf, int size ) {
|
|||
int bytes_to_recv = size - ReceivedBytes;
|
||||
if ( SOCKET_BUF_SIZE < bytes_to_recv )
|
||||
bytes_to_recv = SOCKET_BUF_SIZE;
|
||||
//Debug(3, "Aiming to receive %d of %d bytes", bytes_to_recv, size );
|
||||
bytes = recv(fd, &buf[ReceivedBytes], bytes_to_recv, 0); //socket, buffer, len, flags
|
||||
if ( bytes <= 0 ) {
|
||||
Error("RemoteCamera::Read Recv error. Closing Socket\n");
|
||||
|
|
|
@ -87,7 +87,8 @@ public:
|
|||
virtual void Terminate() = 0;
|
||||
virtual int Connect() = 0;
|
||||
virtual int Disconnect() = 0;
|
||||
virtual int PreCapture() = 0;
|
||||
virtual int PreCapture() { return 0; };
|
||||
virtual int PrimeCapture() { return 0; };
|
||||
virtual int Capture( Image &image ) = 0;
|
||||
virtual int PostCapture() = 0;
|
||||
virtual int CaptureAndRecord( Image &image, timeval recording, char* event_directory )=0;
|
||||
|
|
|
@ -67,6 +67,7 @@ RemoteCameraNVSocket::RemoteCameraNVSocket(
|
|||
|
||||
timeout.tv_sec = 0;
|
||||
timeout.tv_usec = 0;
|
||||
subpixelorder = ZM_SUBPIX_ORDER_BGR;
|
||||
|
||||
if ( capture ) {
|
||||
Initialise();
|
||||
|
@ -97,43 +98,39 @@ void RemoteCameraNVSocket::Initialise() {
|
|||
}
|
||||
|
||||
int RemoteCameraNVSocket::Connect() {
|
||||
int port_num = atoi(port.c_str());
|
||||
//struct addrinfo *p;
|
||||
struct sockaddr_in servaddr;
|
||||
bzero( &servaddr, sizeof(servaddr));
|
||||
servaddr.sin_family = AF_INET;
|
||||
servaddr.sin_addr.s_addr = htons(INADDR_ANY);
|
||||
servaddr.sin_port = htons(atoi(port.c_str()));
|
||||
struct sockaddr_in servaddr;
|
||||
bzero( &servaddr, sizeof(servaddr));
|
||||
servaddr.sin_family = AF_INET;
|
||||
servaddr.sin_addr.s_addr = htons(INADDR_ANY);
|
||||
servaddr.sin_port = htons(port_num);
|
||||
|
||||
|
||||
sd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
sd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
//for(p = hp; p != NULL; p = p->ai_next) {
|
||||
//sd = socket( p->ai_family, p->ai_socktype, p->ai_protocol );
|
||||
if ( sd < 0 ) {
|
||||
Warning("Can't create socket: %s", strerror(errno) );
|
||||
//continue;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//if ( connect( sd, p->ai_addr, p->ai_addrlen ) < 0 ) {
|
||||
if ( connect( sd, (struct sockaddr *)&servaddr , sizeof(servaddr) ) < 0 ) {
|
||||
close(sd);
|
||||
sd = -1;
|
||||
|
||||
Warning("Can't connect to socket mid: %d : %s", monitor_id, strerror(errno) );
|
||||
return -1;
|
||||
//continue;
|
||||
//}
|
||||
/* If we got here, we must have connected successfully */
|
||||
//break;
|
||||
//sd = socket( p->ai_family, p->ai_socktype, p->ai_protocol );
|
||||
if ( sd < 0 ) {
|
||||
Warning("Can't create socket: %s", strerror(errno) );
|
||||
//continue;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//if ( p == NULL ) {
|
||||
//Error("Unable to connect to the remote camera, aborting");
|
||||
//return( -1 );
|
||||
//}
|
||||
//if ( connect( sd, p->ai_addr, p->ai_addrlen ) < 0 ) {
|
||||
if ( connect( sd, (struct sockaddr *)&servaddr , sizeof(servaddr) ) < 0 ) {
|
||||
close(sd);
|
||||
sd = -1;
|
||||
|
||||
Debug( 3, "Connected to host, socket = %d", sd );
|
||||
return( sd );
|
||||
Warning("Can't connect to socket mid: %d : %s", monitor_id, strerror(errno) );
|
||||
return -1;
|
||||
}
|
||||
|
||||
//if ( p == NULL ) {
|
||||
//Error("Unable to connect to the remote camera, aborting");
|
||||
//return( -1 );
|
||||
//}
|
||||
|
||||
Debug( 3, "Connected to host:%d, socket = %d", port_num, sd );
|
||||
return sd;
|
||||
}
|
||||
|
||||
int RemoteCameraNVSocket::Disconnect() {
|
||||
|
@ -144,132 +141,33 @@ int RemoteCameraNVSocket::Disconnect() {
|
|||
}
|
||||
|
||||
int RemoteCameraNVSocket::SendRequest( std::string request ) {
|
||||
Debug( 2, "Sending request: %s", request.c_str() );
|
||||
Debug( 4, "Sending request: %s", request.c_str() );
|
||||
if ( write( sd, request.data(), request.length() ) < 0 ) {
|
||||
Error( "Can't write: %s", strerror(errno) );
|
||||
Disconnect();
|
||||
return( -1 );
|
||||
}
|
||||
Debug( 3, "Request sent" );
|
||||
Debug( 4, "Request sent" );
|
||||
return( 0 );
|
||||
}
|
||||
|
||||
/* Return codes are as follows:
|
||||
* -1 means there was an error
|
||||
* 0 means no bytes were returned but there wasn't actually an error.
|
||||
* > 0 is the # of bytes read.
|
||||
*/
|
||||
|
||||
int RemoteCameraNVSocket::ReadData( Buffer &buffer, unsigned int bytes_expected ) {
|
||||
fd_set rfds;
|
||||
FD_ZERO(&rfds);
|
||||
FD_SET(sd, &rfds);
|
||||
|
||||
struct timeval temp_timeout = timeout;
|
||||
|
||||
int n_found = select(sd+1, &rfds, NULL, NULL, &temp_timeout);
|
||||
if ( n_found == 0 ) {
|
||||
Debug( 4, "Select timed out timeout was %d secs %d usecs", temp_timeout.tv_sec, temp_timeout.tv_usec );
|
||||
int error = 0;
|
||||
socklen_t len = sizeof(error);
|
||||
int retval = getsockopt(sd, SOL_SOCKET, SO_ERROR, &error, &len);
|
||||
if ( retval != 0 ) {
|
||||
Debug(1, "error getting socket error code %s", strerror(retval));
|
||||
}
|
||||
if ( error != 0 ) {
|
||||
return -1;
|
||||
}
|
||||
// Why are we disconnecting? It's just a timeout, meaning that data wasn't available.
|
||||
//Disconnect();
|
||||
return 0;
|
||||
} else if ( n_found < 0 ) {
|
||||
Error("Select error: %s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
unsigned int total_bytes_to_read = 0;
|
||||
|
||||
if ( bytes_expected ) {
|
||||
total_bytes_to_read = bytes_expected;
|
||||
} else {
|
||||
if ( ioctl( sd, FIONREAD, &total_bytes_to_read ) < 0 ) {
|
||||
Error( "Can't ioctl(): %s", strerror(errno) );
|
||||
return( -1 );
|
||||
}
|
||||
|
||||
if ( total_bytes_to_read == 0 ) {
|
||||
if ( mode == SINGLE_IMAGE ) {
|
||||
int error = 0;
|
||||
socklen_t len = sizeof (error);
|
||||
int retval = getsockopt( sd, SOL_SOCKET, SO_ERROR, &error, &len );
|
||||
if(retval != 0 ) {
|
||||
Debug( 1, "error getting socket error code %s", strerror(retval) );
|
||||
}
|
||||
if (error != 0) {
|
||||
return -1;
|
||||
}
|
||||
// Case where we are grabbing a single jpg, but no content-length was given, so the expectation is that we read until close.
|
||||
return( 0 );
|
||||
}
|
||||
// If socket is closed locally, then select will fail, but if it is closed remotely
|
||||
// then we have an exception on our socket.. but no data.
|
||||
Debug( 3, "Socket closed remotely" );
|
||||
//Disconnect(); // Disconnect is done outside of ReadData now.
|
||||
return( -1 );
|
||||
}
|
||||
|
||||
// There can be lots of bytes available. I've seen 4MB or more. This will vastly inflate our buffer size unnecessarily.
|
||||
if ( total_bytes_to_read > ZM_NETWORK_BUFSIZ ) {
|
||||
total_bytes_to_read = ZM_NETWORK_BUFSIZ;
|
||||
Debug(3, "Just getting 32K" );
|
||||
} else {
|
||||
Debug(3, "Just getting %d", total_bytes_to_read );
|
||||
}
|
||||
} // end if bytes_expected or not
|
||||
Debug( 3, "Expecting %d bytes", total_bytes_to_read );
|
||||
|
||||
int total_bytes_read = 0;
|
||||
do {
|
||||
int bytes_read = buffer.read_into( sd, total_bytes_to_read );
|
||||
if ( bytes_read < 0 ) {
|
||||
Error( "Read error: %s", strerror(errno) );
|
||||
return( -1 );
|
||||
} else if ( bytes_read == 0 ) {
|
||||
Debug( 2, "Socket closed" );
|
||||
//Disconnect(); // Disconnect is done outside of ReadData now.
|
||||
return( -1 );
|
||||
} else if ( (unsigned int)bytes_read < total_bytes_to_read ) {
|
||||
Error( "Incomplete read, expected %d, got %d", total_bytes_to_read, bytes_read );
|
||||
return( -1 );
|
||||
}
|
||||
Debug( 3, "Read %d bytes", bytes_read );
|
||||
total_bytes_read += bytes_read;
|
||||
total_bytes_to_read -= bytes_read;
|
||||
} while ( total_bytes_to_read );
|
||||
|
||||
Debug( 4, buffer );
|
||||
|
||||
return( total_bytes_read );
|
||||
}
|
||||
|
||||
int RemoteCameraNVSocket::PreCapture() {
|
||||
int RemoteCameraNVSocket::PrimeCapture() {
|
||||
if ( sd < 0 ) {
|
||||
Connect();
|
||||
if ( sd < 0 ) {
|
||||
Error( "Unable to connect to camera" );
|
||||
return( -1 );
|
||||
}
|
||||
mode = SINGLE_IMAGE;
|
||||
buffer.clear();
|
||||
}
|
||||
struct image_def {
|
||||
uint16_t width;
|
||||
uint16_t height;
|
||||
uint16_t type;
|
||||
};
|
||||
struct image_def image_def;
|
||||
buffer.clear();
|
||||
struct image_def {
|
||||
uint16_t width;
|
||||
uint16_t height;
|
||||
uint16_t type;
|
||||
};
|
||||
struct image_def image_def;
|
||||
|
||||
if ( SendRequest("GetImageParams") < 0 ) {
|
||||
if ( SendRequest("GetImageParams\n") < 0 ) {
|
||||
Error( "Unable to send request" );
|
||||
Disconnect();
|
||||
return -1;
|
||||
|
@ -289,20 +187,28 @@ struct image_def image_def;
|
|||
}
|
||||
|
||||
int RemoteCameraNVSocket::Capture( Image &image ) {
|
||||
if ( SendRequest("GetNextImage") < 0 ) {
|
||||
if ( SendRequest("GetNextImage\n") < 0 ) {
|
||||
Warning( "Unable to capture image, retrying" );
|
||||
return( 1 );
|
||||
return 0;
|
||||
}
|
||||
if ( Read( sd, buffer, imagesize ) < imagesize ) {
|
||||
Warning( "Unable to capture image, retrying" );
|
||||
return( 1 );
|
||||
return 0;
|
||||
}
|
||||
uint32_t end;
|
||||
if ( Read(sd, (char *) &end , sizeof(end)) < 0 ) {
|
||||
Warning( "Unable to capture image, retrying" );
|
||||
return 0;
|
||||
}
|
||||
if ( end != 0xFFFFFFFF) {
|
||||
Warning("End Bytes Failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
|
||||
return( 0 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
int RemoteCameraNVSocket::PostCapture()
|
||||
{
|
||||
int RemoteCameraNVSocket::PostCapture() {
|
||||
return( 0 );
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ bool p_record_audio );
|
|||
int SendRequest( std::string );
|
||||
int ReadData( Buffer &buffer, unsigned int bytes_expected=0 );
|
||||
int GetResponse();
|
||||
int PreCapture();
|
||||
int PrimeCapture();
|
||||
int Capture( Image &image );
|
||||
int PostCapture();
|
||||
int CaptureAndRecord( Image &image, timeval recording, char* event_directory ) {return(0);};
|
||||
|
|
|
@ -379,21 +379,31 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
stream->id = i;
|
||||
#endif
|
||||
|
||||
AVCodecContext *codec_context = NULL;
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
codec_context = avcodec_alloc_context3(NULL);
|
||||
avcodec_parameters_to_context(codec_context, stream->codecpar);
|
||||
#else
|
||||
codec_context = stream->codec;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
Debug( 1, "Looking for codec for %s payload type %d / %s", mediaDesc->getType().c_str(), mediaDesc->getPayloadType(), mediaDesc->getPayloadDesc().c_str() );
|
||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||
if ( mediaDesc->getType() == "video" )
|
||||
stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
else if ( mediaDesc->getType() == "audio" )
|
||||
stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
codec_context->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
else if ( mediaDesc->getType() == "application" )
|
||||
stream->codec->codec_type = AVMEDIA_TYPE_DATA;
|
||||
codec_context->codec_type = AVMEDIA_TYPE_DATA;
|
||||
#else
|
||||
if ( mediaDesc->getType() == "video" )
|
||||
stream->codec->codec_type = CODEC_TYPE_VIDEO;
|
||||
codec_context->codec_type = CODEC_TYPE_VIDEO;
|
||||
else if ( mediaDesc->getType() == "audio" )
|
||||
stream->codec->codec_type = CODEC_TYPE_AUDIO;
|
||||
codec_context->codec_type = CODEC_TYPE_AUDIO;
|
||||
else if ( mediaDesc->getType() == "application" )
|
||||
stream->codec->codec_type = CODEC_TYPE_DATA;
|
||||
codec_context->codec_type = CODEC_TYPE_DATA;
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
|
||||
|
@ -410,31 +420,27 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
|
||||
codec_name = std::string( smStaticPayloads[i].payloadName );
|
||||
#else
|
||||
strncpy( stream->codec->codec_name, smStaticPayloads[i].payloadName, sizeof(stream->codec->codec_name) );;
|
||||
strncpy( codec_context->codec_name, smStaticPayloads[i].payloadName, sizeof(codec_context->codec_name) );;
|
||||
#endif
|
||||
stream->codec->codec_type = smStaticPayloads[i].codecType;
|
||||
stream->codec->codec_id = smStaticPayloads[i].codecId;
|
||||
stream->codec->sample_rate = smStaticPayloads[i].clockRate;
|
||||
codec_context->codec_type = smStaticPayloads[i].codecType;
|
||||
codec_context->codec_id = smStaticPayloads[i].codecId;
|
||||
codec_context->sample_rate = smStaticPayloads[i].clockRate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// Look in dynamic table
|
||||
for ( unsigned int i = 0; i < (sizeof(smDynamicPayloads)/sizeof(*smDynamicPayloads)); i++ )
|
||||
{
|
||||
if ( smDynamicPayloads[i].payloadName == mediaDesc->getPayloadDesc() )
|
||||
{
|
||||
for ( unsigned int i = 0; i < (sizeof(smDynamicPayloads)/sizeof(*smDynamicPayloads)); i++ ) {
|
||||
if ( smDynamicPayloads[i].payloadName == mediaDesc->getPayloadDesc() ) {
|
||||
Debug( 1, "Got dynamic payload type %d, %s", mediaDesc->getPayloadType(), smDynamicPayloads[i].payloadName );
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
|
||||
codec_name = std::string( smStaticPayloads[i].payloadName );
|
||||
#else
|
||||
strncpy( stream->codec->codec_name, smDynamicPayloads[i].payloadName, sizeof(stream->codec->codec_name) );;
|
||||
strncpy( codec_context->codec_name, smDynamicPayloads[i].payloadName, sizeof(codec_context->codec_name) );;
|
||||
#endif
|
||||
stream->codec->codec_type = smDynamicPayloads[i].codecType;
|
||||
stream->codec->codec_id = smDynamicPayloads[i].codecId;
|
||||
stream->codec->sample_rate = mediaDesc->getClock();
|
||||
codec_context->codec_type = smDynamicPayloads[i].codecType;
|
||||
codec_context->codec_id = smDynamicPayloads[i].codecId;
|
||||
codec_context->sample_rate = mediaDesc->getClock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -450,14 +456,13 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
//return( 0 );
|
||||
}
|
||||
if ( mediaDesc->getWidth() )
|
||||
stream->codec->width = mediaDesc->getWidth();
|
||||
codec_context->width = mediaDesc->getWidth();
|
||||
if ( mediaDesc->getHeight() )
|
||||
stream->codec->height = mediaDesc->getHeight();
|
||||
if ( stream->codec->codec_id == AV_CODEC_ID_H264 && mediaDesc->getSprops().size())
|
||||
{
|
||||
codec_context->height = mediaDesc->getHeight();
|
||||
if ( codec_context->codec_id == AV_CODEC_ID_H264 && mediaDesc->getSprops().size()) {
|
||||
uint8_t start_sequence[]= { 0, 0, 1 };
|
||||
stream->codec->extradata_size= 0;
|
||||
stream->codec->extradata= NULL;
|
||||
codec_context->extradata_size= 0;
|
||||
codec_context->extradata= NULL;
|
||||
char pvalue[1024], *value = pvalue;
|
||||
|
||||
strcpy(pvalue, mediaDesc->getSprops().c_str());
|
||||
|
@ -482,22 +487,33 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
if (packet_size) {
|
||||
uint8_t *dest =
|
||||
(uint8_t *)av_malloc(packet_size + sizeof(start_sequence) +
|
||||
stream->codec->extradata_size +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
codec_context->extradata_size +
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
AV_INPUT_BUFFER_PADDING_SIZE
|
||||
#else
|
||||
FF_INPUT_BUFFER_PADDING_SIZE
|
||||
#endif
|
||||
);
|
||||
if(dest) {
|
||||
if(stream->codec->extradata_size) {
|
||||
if(codec_context->extradata_size) {
|
||||
// av_realloc?
|
||||
memcpy(dest, stream->codec->extradata, stream->codec->extradata_size);
|
||||
av_free(stream->codec->extradata);
|
||||
memcpy(dest, codec_context->extradata, codec_context->extradata_size);
|
||||
av_free(codec_context->extradata);
|
||||
}
|
||||
|
||||
memcpy(dest+stream->codec->extradata_size, start_sequence, sizeof(start_sequence));
|
||||
memcpy(dest+stream->codec->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
|
||||
memset(dest+stream->codec->extradata_size+sizeof(start_sequence)+
|
||||
packet_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
memcpy(dest+codec_context->extradata_size, start_sequence, sizeof(start_sequence));
|
||||
memcpy(dest+codec_context->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
|
||||
memset(dest+codec_context->extradata_size+sizeof(start_sequence)+
|
||||
packet_size, 0,
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
AV_INPUT_BUFFER_PADDING_SIZE
|
||||
#else
|
||||
FF_INPUT_BUFFER_PADDING_SIZE
|
||||
#endif
|
||||
);
|
||||
|
||||
stream->codec->extradata= dest;
|
||||
stream->codec->extradata_size+= sizeof(start_sequence)+packet_size;
|
||||
codec_context->extradata= dest;
|
||||
codec_context->extradata_size+= sizeof(start_sequence)+packet_size;
|
||||
// } else {
|
||||
// av_log(codec, AV_LOG_ERROR, "Unable to allocate memory for extradata!");
|
||||
// return AVERROR(ENOMEM);
|
||||
|
|
18
src/zm_sdp.h
18
src/zm_sdp.h
|
@ -31,13 +31,11 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class SessionDescriptor
|
||||
{
|
||||
class SessionDescriptor {
|
||||
protected:
|
||||
enum { PAYLOAD_TYPE_DYNAMIC=96 };
|
||||
|
||||
struct StaticPayloadDesc
|
||||
{
|
||||
struct StaticPayloadDesc {
|
||||
int payloadType;
|
||||
const char payloadName[6];
|
||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||
|
@ -50,8 +48,7 @@ protected:
|
|||
int autoChannels;
|
||||
};
|
||||
|
||||
struct DynamicPayloadDesc
|
||||
{
|
||||
struct DynamicPayloadDesc {
|
||||
const char payloadName[32];
|
||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||
AVMediaType codecType;
|
||||
|
@ -65,8 +62,7 @@ protected:
|
|||
};
|
||||
|
||||
public:
|
||||
class ConnInfo
|
||||
{
|
||||
class ConnInfo {
|
||||
protected:
|
||||
std::string mNetworkType;
|
||||
std::string mAddressType;
|
||||
|
@ -78,8 +74,7 @@ public:
|
|||
ConnInfo( const std::string &connInfo );
|
||||
};
|
||||
|
||||
class BandInfo
|
||||
{
|
||||
class BandInfo {
|
||||
protected:
|
||||
std::string mType;
|
||||
int mValue;
|
||||
|
@ -88,8 +83,7 @@ public:
|
|||
BandInfo( const std::string &bandInfo );
|
||||
};
|
||||
|
||||
class MediaDescriptor
|
||||
{
|
||||
class MediaDescriptor {
|
||||
protected:
|
||||
std::string mType;
|
||||
int mPort;
|
||||
|
|
|
@ -167,7 +167,11 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
|||
video_out_ctx->time_base.den);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
||||
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
#else
|
||||
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#endif
|
||||
}
|
||||
|
||||
Monitor::Orientation orientation = monitor->getOrientation();
|
||||
|
@ -274,7 +278,11 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
|||
|
||||
if (audio_out_stream) {
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||
audio_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
||||
audio_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
#else
|
||||
audio_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
} // end if audio_in_stream
|
||||
|
|
Loading…
Reference in New Issue