fix video speed, various other
parent
b255414bbb
commit
adab628af9
|
@ -643,7 +643,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
|
|||
|
||||
int keyframe = packet.flags & AV_PKT_FLAG_KEY;
|
||||
|
||||
Debug( 4, "Got packet from stream %d packet pts (%d) dts(%d), key?(%d)",
|
||||
Debug( 4, "Got packet from stream %d packet pts (%u) dts(%u), key?(%d)",
|
||||
packet.stream_index, packet.pts, packet.dts,
|
||||
keyframe
|
||||
);
|
||||
|
@ -794,6 +794,7 @@ else if ( packet.pts && video_last_pts > packet.pts ) {
|
|||
}
|
||||
have_video_keyframe = true;
|
||||
}
|
||||
} // end if keyframe or have_video_keyframe
|
||||
|
||||
Debug(4, "about to decode video" );
|
||||
|
||||
|
@ -854,7 +855,6 @@ else if ( packet.pts && video_last_pts > packet.pts ) {
|
|||
} else {
|
||||
Debug( 3, "Not framecomplete after av_read_frame" );
|
||||
} // end if frameComplete
|
||||
} // end if keyframe or have_video_keyframe
|
||||
} else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams
|
||||
if ( videoStore ) {
|
||||
if ( record_audio ) {
|
||||
|
|
|
@ -397,6 +397,10 @@ VideoStore::~VideoStore(){
|
|||
av_frame_free( &output_frame );
|
||||
output_frame = NULL;
|
||||
}
|
||||
if ( converted_input_samples ) {
|
||||
av_free( converted_input_samples );
|
||||
converted_input_samples =NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -481,16 +485,6 @@ bool VideoStore::setup_resampler() {
|
|||
|
||||
audio_output_context->time_base = (AVRational){ 1, audio_output_context->sample_rate };
|
||||
|
||||
|
||||
Debug(1, "Audio output bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) layout(%d) frame_size(%d)",
|
||||
audio_output_context->bit_rate,
|
||||
audio_output_context->sample_rate,
|
||||
audio_output_context->channels,
|
||||
audio_output_context->sample_fmt,
|
||||
audio_output_context->channel_layout,
|
||||
audio_output_context->frame_size
|
||||
);
|
||||
|
||||
// Now copy them to the output stream
|
||||
audio_output_stream = avformat_new_stream( oc, audio_output_codec );
|
||||
|
||||
|
@ -624,7 +618,7 @@ bool VideoStore::setup_resampler() {
|
|||
Error("Not built with libavresample library. Cannot do audio conversion to AAC");
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
} // end bool VideoStore::setup_resampler()
|
||||
|
||||
|
||||
void VideoStore::dumpPacket( AVPacket *pkt ){
|
||||
|
@ -650,14 +644,20 @@ int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) {
|
|||
opkt.dts = video_next_dts;
|
||||
|
||||
int duration;
|
||||
if ( video_last_pts && ( ipkt->duration == AV_NOPTS_VALUE || ! ipkt->duration ) ) {
|
||||
if ( ! video_last_pts ) {
|
||||
duration = 0;
|
||||
} else {
|
||||
duration = av_rescale_q( ipkt->pts - video_last_pts, video_input_stream->time_base, video_output_stream->time_base);
|
||||
opkt.duration = 0;
|
||||
}
|
||||
|
||||
//#if ( 0 && video_last_pts && ( ipkt->duration == AV_NOPTS_VALUE || ! ipkt->duration ) ) {
|
||||
// Video packets don't really have a duration. Audio does.
|
||||
//opkt.duration = av_rescale_q(duration, video_input_stream->time_base, video_output_stream->time_base);
|
||||
opkt.duration = 0;
|
||||
} else {
|
||||
duration = opkt.duration = av_rescale_q(ipkt->duration, video_input_stream->time_base, video_output_stream->time_base);
|
||||
}
|
||||
//opkt.duration = 0;
|
||||
//} else {
|
||||
//duration = opkt.duration = av_rescale_q(ipkt->duration, video_input_stream->time_base, video_output_stream->time_base);
|
||||
//}
|
||||
video_last_pts = ipkt->pts;
|
||||
video_last_dts = ipkt->dts;
|
||||
|
||||
|
@ -745,7 +745,7 @@ int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) {
|
|||
|
||||
} else {
|
||||
|
||||
video_next_dts = opkt.dts + duration; // Unsure if av_interleaved_write_frame() clobbers opkt.dts when out of order, so storing in advance
|
||||
video_next_dts = opkt.dts + duration;
|
||||
video_next_pts = opkt.pts + duration;
|
||||
ret = av_interleaved_write_frame(oc, &opkt);
|
||||
if ( ret < 0 ) {
|
||||
|
@ -774,6 +774,7 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
}
|
||||
|
||||
if ( audio_output_codec ) {
|
||||
Debug(3, "Have audio codec");
|
||||
#ifdef HAVE_LIBAVRESAMPLE
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
|
@ -788,12 +789,11 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
Error("avcodec_receive_frame fail %s", av_make_error_string(ret).c_str());
|
||||
return 0;
|
||||
}
|
||||
Debug(2, "Frame: samples(%d), format(%d), sample_rate(%d), channel layout(%d) refd(%d)",
|
||||
Debug(2, "Input Frame: samples(%d), format(%d), sample_rate(%d), channel layout(%d)",
|
||||
input_frame->nb_samples,
|
||||
input_frame->format,
|
||||
input_frame->sample_rate,
|
||||
input_frame->channel_layout,
|
||||
audio_output_context->refcounted_frames
|
||||
input_frame->channel_layout
|
||||
);
|
||||
#else
|
||||
/**
|
||||
|
@ -815,8 +815,7 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
int frame_size = input_frame->nb_samples;
|
||||
Debug(4, "Frame size: %d", frame_size );
|
||||
int frame_size = output_frame->nb_samples;
|
||||
|
||||
// Resample the input into the audioSampleBuffer until we proceed the whole decoded data
|
||||
if ( (ret = avresample_convert( resample_context,
|
||||
|
@ -833,31 +832,29 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
}
|
||||
av_frame_unref( input_frame );
|
||||
|
||||
if ( avresample_available( resample_context ) < output_frame->nb_samples ) {
|
||||
Debug(1, "No enough samples yet");
|
||||
int samples_available = avresample_available( resample_context );
|
||||
|
||||
if ( samples_available < frame_size ) {
|
||||
Debug(1, "Not enough samples yet (%d)", samples_available);
|
||||
return 0;
|
||||
}
|
||||
|
||||
Debug(3, "Output_frame samples (%d)", output_frame->nb_samples );
|
||||
// Read a frame audio data from the resample fifo
|
||||
if ( avresample_read( resample_context, output_frame->data, output_frame->nb_samples ) != output_frame->nb_samples ) {
|
||||
if ( avresample_read( resample_context, output_frame->data, frame_size ) != frame_size ) {
|
||||
Warning( "Error reading resampled audio: " );
|
||||
return 0;
|
||||
}
|
||||
Debug(2, "Frame: samples(%d), format(%d), sample_rate(%d), channel layout(%d)",
|
||||
output_frame->nb_samples,
|
||||
output_frame->format,
|
||||
output_frame->sample_rate,
|
||||
output_frame->channel_layout
|
||||
);
|
||||
|
||||
av_init_packet(&opkt);
|
||||
Debug(5, "after init packet" );
|
||||
|
||||
/** Set a timestamp based on the sample rate for the container. */
|
||||
//output_frame->pts = av_rescale_q( opkt.pts, audio_output_context->time_base, audio_output_stream->time_base );
|
||||
|
||||
// convert the packet to the codec timebase from the stream timebase
|
||||
//Debug(3, "output_frame->pts(%d) best effort(%d)", output_frame->pts,
|
||||
//av_frame_get_best_effort_timestamp(output_frame)
|
||||
//);
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
*/
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
if (( ret = avcodec_send_frame( audio_output_context, output_frame ) ) < 0 ) {
|
||||
Error( "Could not send frame (error '%s')",
|
||||
|
@ -865,9 +862,10 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
zm_av_packet_unref(&opkt);
|
||||
return 0;
|
||||
}
|
||||
av_frame_unref( output_frame );
|
||||
|
||||
if (( ret = avcodec_receive_packet( audio_output_context, &opkt )) < 0 ) {
|
||||
//av_frame_unref( output_frame );
|
||||
|
||||
if ( ( ret = avcodec_receive_packet( audio_output_context, &opkt ) ) < 0 ) {
|
||||
if ( EAGAIN == ret ) {
|
||||
// THe codec may need more samples than it has, perfectly valid
|
||||
Debug( 3, "Could not recieve packet (error '%s')",
|
||||
|
@ -878,7 +876,7 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
}
|
||||
zm_av_packet_unref(&opkt);
|
||||
av_frame_unref( input_frame );
|
||||
av_frame_unref( output_frame );
|
||||
//av_frame_unref( output_frame );
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -928,9 +926,8 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
}
|
||||
#else
|
||||
opkt.pts = audio_next_pts;
|
||||
#endif
|
||||
|
||||
opkt.dts = audio_next_dts;
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
if ( ipkt->dts == AV_NOPTS_VALUE ) {
|
||||
|
@ -955,14 +952,20 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
|||
}
|
||||
}
|
||||
#endif
|
||||
audio_last_dts = ipkt->dts;
|
||||
//audio_last_dts = ipkt->dts;
|
||||
if ( opkt.dts > opkt.pts ) {
|
||||
Debug(1,"opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen before presentation.", opkt.dts, opkt.pts );
|
||||
opkt.dts = opkt.pts;
|
||||
}
|
||||
|
||||
// I wonder if we could just use duration instead of all the hoop jumping above?
|
||||
opkt.duration = av_rescale_q(ipkt->duration, audio_input_stream->time_base, audio_output_stream->time_base);
|
||||
//
|
||||
if ( output_frame ) {
|
||||
opkt.duration = output_frame->nb_samples;
|
||||
} else {
|
||||
opkt.duration = ipkt->duration;
|
||||
}
|
||||
//opkt.duration = av_rescale_q(ipkt->duration, audio_input_stream->time_base, audio_output_stream->time_base);
|
||||
Debug( 2, "opkt.pts (%d), opkt.dts(%d) opkt.duration = (%d)", opkt.pts, opkt.dts, opkt.duration );
|
||||
|
||||
// pkt.pos: byte position in stream, -1 if unknown
|
||||
|
|
|
@ -202,7 +202,7 @@ class Event {
|
|||
|
||||
// frame is an array representing the db row for a frame.
|
||||
function getImageSrc( $frame, $scale=SCALE_BASE, $captureOnly=false, $overwrite=false ) {
|
||||
$Storage = new Storage( isset($this->{'StorageId'}) ? $this->{'StorageId'} : NULL );
|
||||
$Storage = new Storage( isset($this->{'StorageId'}) ? $this->{'StorageId'} : NULL );
|
||||
$Event = $this;
|
||||
$eventPath = $Event->Path();
|
||||
|
||||
|
|
|
@ -211,8 +211,9 @@ function process_configfile($configFile) {
|
|||
continue;
|
||||
elseif ( preg_match( '/^\s*#/', $str ))
|
||||
continue;
|
||||
elseif ( preg_match( '/^\s*([^=\s]+)\s*=\s*(.*?)\s*$/', $str, $matches ))
|
||||
elseif ( preg_match( '/^\s*([^=\s]+)\s*=\s*(.*?)\s*$/', $str, $matches )) {
|
||||
$configvals[$matches[1]] = $matches[2];
|
||||
}
|
||||
}
|
||||
fclose( $cfg );
|
||||
return( $configvals );
|
||||
|
|
|
@ -42,16 +42,23 @@ function dbConnect() {
|
|||
}
|
||||
|
||||
try {
|
||||
$dbOptions = array(
|
||||
PDO::MYSQL_ATTR_SSL_CA => ZM_DB_SSL_CA_CERT,
|
||||
PDO::MYSQL_ATTR_SSL_KEY => ZM_DB_SSL_CLIENT_KEY,
|
||||
PDO::MYSQL_ATTR_SSL_CERT => ZM_DB_SSL_CLIENT_CERT,
|
||||
);
|
||||
$dbConn = new PDO( ZM_DB_TYPE . $socket . ';dbname='.ZM_DB_NAME, ZM_DB_USER, ZM_DB_PASS, $dbOptions );
|
||||
|
||||
$dbOptions = null;
|
||||
if ( defined( 'ZM_DB_SSL_CA_CERT' ) and ZM_DB_SSL_CA_CERT ) {
|
||||
$dbOptions = array(
|
||||
PDO::MYSQL_ATTR_SSL_CA => ZM_DB_SSL_CA_CERT,
|
||||
PDO::MYSQL_ATTR_SSL_KEY => ZM_DB_SSL_CLIENT_KEY,
|
||||
PDO::MYSQL_ATTR_SSL_CERT => ZM_DB_SSL_CLIENT_CERT,
|
||||
);
|
||||
$dbConn = new PDO( ZM_DB_TYPE . $socket . ';dbname='.ZM_DB_NAME, ZM_DB_USER, ZM_DB_PASS, $dbOptions );
|
||||
} else {
|
||||
$dbConn = new PDO( ZM_DB_TYPE . $socket . ';dbname='.ZM_DB_NAME, ZM_DB_USER, ZM_DB_PASS );
|
||||
}
|
||||
$dbConn->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);
|
||||
$dbConn->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
|
||||
} catch(PDOException $ex ) {
|
||||
echo "Unable to connect to ZM db." . $ex->getMessage();
|
||||
error_log("Unable to connect to ZM DB " . $ex->getMessage() );
|
||||
$dbConn = null;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue