generate remaining language variables from and to definition file+
parent
e2f54343b6
commit
c16727e3de
File diff suppressed because it is too large
Load Diff
|
@ -1376,259 +1376,271 @@
|
|||
"MQTT Outbound": "MQTT Outbound",
|
||||
"MQTT Client": "MQTT Client",
|
||||
"fieldTextMode": "This is the primary task of the monitor.",
|
||||
"fieldTextModeIsabled": "Inactive monitor, no process will be created in this mode.",
|
||||
"fieldTextModeAtchNly": "Monitor will only stream, no recording will occur unless otherwise ordered by API or Detector.",
|
||||
"fieldTextModeEcord": "Continuous Recording. Segments are made every 15 minutes by default.",
|
||||
"fieldTextModeDisabled": "Inactive monitor, no process will be created in this mode.",
|
||||
"fieldTextModeWatchOnly": "Monitor will only stream, no recording will occur unless otherwise ordered by API or Detector.",
|
||||
"fieldTextModeRecord": "Continuous Recording. Segments are made every 15 minutes by default.",
|
||||
"fieldTextMid": "This is a non-changeable identifier for the monitor. You can duplicate a monitor by double clicking the Monitor ID and changing it.",
|
||||
"fieldTextName": "This is the human-readable display name for the monitor.",
|
||||
"fieldTextMaxkeepdays": "The number of days to keep videos before purging for this monitor specifically.",
|
||||
"fieldTextMaxKeepDays": "The number of days to keep videos before purging for this monitor specifically.",
|
||||
"fieldTextNotes": "Comments you want to leave for this camera.",
|
||||
"fieldTextDir": "Location of where recorded files will be saved. You can configure more locations with the <code>addStorage</code> variable.",
|
||||
"fieldTextType": "Learn to connect here : <a href=\"https://shinobi.video/articles/2019-02-14-how-to-push-streams-to-shinobi-with-rtmp\" target=\"_blank\">Article : How to Push Streams via RTMP to Shinobi</a>",
|
||||
"fieldTextType264265265": "Reading a high quality video streas that sometimes include audio.",
|
||||
"fieldTextTypeM3u8": "Reading a high quality video streas that sometimes include audio.",
|
||||
"fieldTextType4Mp4Ts": "A static file. Read at a lower rate and should not be used for an actual live stream.",
|
||||
"fieldTextTypeHinobiTreamer": "Websocket JPEG-based P2P stream.",
|
||||
"fieldTextTypeAshcamTreamerV2": "Websocket WebM-based P2P stream.",
|
||||
"fieldTextTypeOcal": "Reading Capture Cards, Webcams, or Integrated Cameras.",
|
||||
"fieldTextTypeX": "Mobotix MJPEG Stream",
|
||||
"fieldTextRtmpkey": "Stream Key for incoming streams on the RTMP port.",
|
||||
"fieldTextAutohostenable": "Feed the individual pieces required to build a stream URL or provide the full URL and allow Shinobi to parse it for you.",
|
||||
"fieldTextAutohost": "The full Stream URL.",
|
||||
"fieldTextType": "The method that will used to consume the video stream.",
|
||||
"fieldTextTypeJPEG": "Reading snapshots from a URL and making a stream and/or video from them.",
|
||||
"fieldTextTypeMJPEG": "Similar to JPEG except the frame handling is done by FFMPEG, not Shinobi.",
|
||||
"fieldTextTypeH.264/H.265/H.265+": "Reading a high quality video streas that sometimes include audio.",
|
||||
"fieldTextTypeHLS(.m3u8)": "Reading a high quality video streas that sometimes include audio.",
|
||||
"fieldTextTypeMPEG4(.mp4/.ts)": "A static file. Read at a lower rate and should not be used for an actual live stream.",
|
||||
"fieldTextTypeShinobiStreamer": "Websocket JPEG-based P2P stream.",
|
||||
"fieldTextTypeDashcam(StreamerV2)": "Websocket WebM-based P2P stream.",
|
||||
"fieldTextTypeLocal": "Reading Capture Cards, Webcams, or Integrated Cameras.",
|
||||
"fieldTextTypeRTMP": "Learn to connect here : <a href=\"https://shinobi.video/articles/2019-02-14-how-to-push-streams-to-shinobi-with-rtmp\" target=\"_blank\">Article : How to Push Streams via RTMP to Shinobi</a>",
|
||||
"fieldTextTypeMxPEG": "Mobotix MJPEG Stream",
|
||||
"fieldTextRtmpKey": "Stream Key for incoming streams on the RTMP port.",
|
||||
"fieldTextAutoHostEnable": "Feed the individual pieces required to build a stream URL or provide the full URL and allow Shinobi to parse it for you.",
|
||||
"fieldTextAutoHost": "The full Stream URL.",
|
||||
"fieldTextProtocol": "The protocol that will used to consume the video stream.",
|
||||
"fieldTextRtsptransport": "Standard connection method.",
|
||||
"fieldTextRtsptransportUto": "Let FFMPEG decide. Normally it will try UDP first.",
|
||||
"fieldTextRtspTransport": "The transport protocol your camera will use. TCP is usually the best choice.",
|
||||
"fieldTextRtspTransportAuto": "Let FFMPEG decide. Normally it will try UDP first.",
|
||||
"fieldTextRtspTransportTCP": "Set it to this if UDP starts giving undesired results.",
|
||||
"fieldTextRtspTransportUDP": "FFMPEG tries this first.",
|
||||
"fieldTextRtspTransportHTTP": "Standard connection method.",
|
||||
"fieldTextMuser": "The user login for your camera",
|
||||
"fieldTextMpass": "The password for your camera",
|
||||
"fieldTextHost": "Connection address",
|
||||
"fieldTextPort": "Separate by Commas or a Range",
|
||||
"fieldTextPortforce": "Using the default web port can allow automatic switch to other ports for streams like RTSP.",
|
||||
"fieldTextPortForce": "Using the default web port can allow automatic switch to other ports for streams like RTSP.",
|
||||
"fieldTextPath": "The path to your camera",
|
||||
"fieldTextFatalmax": "The number of times to retry for network connection between the server and camera before setting the monitor to Disabled. No decimals. Set to 0 to retry forever.",
|
||||
"fieldTextSkipping": "Choose if a successful ping is required before a monitor process is started.",
|
||||
"fieldTextIsonvif": "Is this an ONVIF compliant camera?",
|
||||
"fieldTextOnvifnonstandard": "Is this a Non-Standard ONVIF camera?",
|
||||
"fieldTextOnvifport": "ONVIF is usually run on port <code>8000</code>. This can be <code>80</code> as well depending on your camera model.",
|
||||
"fieldTextFatalMax": "The number of times to retry for network connection between the server and camera before setting the monitor to Disabled. No decimals. Set to 0 to retry forever.",
|
||||
"fieldTextSkipPing": "Choose if a successful ping is required before a monitor process is started.",
|
||||
"fieldTextIsOnvif": "Is this an ONVIF compliant camera?",
|
||||
"fieldTextOnvifNonStandard": "Is this a Non-Standard ONVIF camera?",
|
||||
"fieldTextOnvifPort": "ONVIF is usually run on port <code>8000</code>. This can be <code>80</code> as well depending on your camera model.",
|
||||
"fieldTextAduration": "Specify how many microseconds are analyzed to probe the input. Set to 100000 if you are using RTSP and having stream issues.",
|
||||
"fieldTextProbesize": "Specify how big to make the analyzation probe for the input. Set to 100000 if you are using RTSP and having stream issues.",
|
||||
"fieldTextStreamloop": "Loop a static file so the file stream behaves like a live stream.",
|
||||
"fieldTextStreamLoop": "Loop a static file so the file stream behaves like a live stream.",
|
||||
"fieldTextSfps": "Specify the Frame Rate (FPS) in which the camera is providing its stream in.",
|
||||
"fieldTextWallclocktimestampignore": "Base all incoming camera data in camera time instead of server time.",
|
||||
"fieldTextWallClockTimestampIgnore": "Base all incoming camera data in camera time instead of server time.",
|
||||
"fieldTextHeight": "Height of the stream image.",
|
||||
"fieldTextWidth": "Width of the stream image.",
|
||||
"fieldTextAccelerator": "Hardware Acceleration (HWAccel) for decoding streams.",
|
||||
"fieldTextHwaccel": "Decoding Engine",
|
||||
"fieldTextHwaccelvcodec": "Decoding Engine",
|
||||
"fieldTextStreamtype": "Sending FLV encoded frames over WebSocket.",
|
||||
"fieldTextStreamtypeOseidon": "Poseidon is built on Kevin Godell's MP4 processing code. It simulates a streaming MP4 file but using the data of a live stream. Includes Audio. Some browsers can play it like a regular MP4 file. Streams over HTTP or WebSocket.",
|
||||
"fieldTextStreamtypeAse64OverEbsocket": "Sending Base64 encoded frames over WebSocket. This avoids caching but there is no audio.",
|
||||
"fieldTextStreamtypeIncludesUdio": "Similar method to facebook live streams. <b>Includes audio</b> if input provides it. There is a delay of about 4-6 seconds because this method records segments then pushes them to the client rather than push as while it creates them.",
|
||||
"fieldTextStreamflvtype": "This is for the Shinobi dashboard only. Both stream methods are still active and ready to use.",
|
||||
"fieldTextStreamvcodec": "Video codec for streaming.",
|
||||
"fieldTextStreamvcodecUto": "Let FFMPEG choose.",
|
||||
"fieldTextStreamvcodecLibx264": "Used for MP4 video.",
|
||||
"fieldTextStreamvcodecLibx265": "Used for MP4 video.",
|
||||
"fieldTextStreamvcodecCopy": "Used for MP4 video. Has very low CPU usage but cannot use video filters and filesizes may be gigantic. Best to setup your MP4 settings camera-side when using this option.",
|
||||
"fieldTextStreamacodec": "Audio codec for streaming.",
|
||||
"fieldTextStreamacodecUto": "Let FFMPEG choose.",
|
||||
"fieldTextStreamacodecOUdio": "No Audio, this is an option that must be set in some parts of the world due to legal reasons.",
|
||||
"fieldTextStreamacodecLibvorbis": "Used for WebM video.",
|
||||
"fieldTextStreamacodecLibopus": "Used for WebM video.",
|
||||
"fieldTextStreamacodecLibmp3lame": "Used for MP4 video.",
|
||||
"fieldTextStreamacodecAac": "Used for MP4 video.",
|
||||
"fieldTextStreamacodecAc3": "Used for MP4 video.",
|
||||
"fieldTextStreamacodecCopy": "Used for MP4 video. Has very low CPU usage but some audio codecs need custom flags like <code>-strict 2</code> for aac.",
|
||||
"fieldTextHlstime": "How long each video segment should be, in minutes. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextHlslistsize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextPresetstream": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextStreamquality": "Low number means higher quality. Higher number means less quality.",
|
||||
"fieldTextStreamfps": "The speed in which frames are displayed to clients, in Frames Per Second. Be aware there is no default. This can lead to high bandwidth usage.",
|
||||
"fieldTextStreamscalex": "Width of the stream image that is output after processing.",
|
||||
"fieldTextStreamscaley": "Height of the stream image that is output after processing.",
|
||||
"fieldTextStreamrotate": "Change the viewing angle of the video stream.",
|
||||
"fieldTextSignalcheck": "How often your client will check the stream to see if it is alive. This is calculated in minutes.",
|
||||
"fieldTextSignalchecklog": "This is for the client side only. It will display in the log thread when client side signal checks occur.",
|
||||
"fieldTextStreamvf": "Place FFMPEG video filters in this box to affect the streaming portion. No spaces.",
|
||||
"fieldTextTvchannel": "This monitor will have TV Channel features enabled. You will be able to view it in your TV Channel list.",
|
||||
"fieldTextTvchannelid": "A Custom ID for the Channel.",
|
||||
"fieldTextTvchannelgrouptitle": "A Custom Group for the Channel.",
|
||||
"fieldTextStreamtimestamp": "A clock that is burned onto the frames of the video stream.",
|
||||
"fieldTextStreamtimestampfont": "Font File to style your timestamp.",
|
||||
"fieldTextStreamtimestampfontsize": "Font size in pt.",
|
||||
"fieldTextStreamtimestampcolor": "Timstamp text color.",
|
||||
"fieldTextStreamtimestampboxcolor": "Timstamp backdrop color.",
|
||||
"fieldTextStreamtimestampx": "Horiztonal Position of Timestamp",
|
||||
"fieldTextStreamtimestampy": "Vertical Position of Timestamp",
|
||||
"fieldTextStreamwatermark": "An image that is burned onto the frames of the video stream.",
|
||||
"fieldTextStreamwatermarklocation": "Image Location that will be used as Watermark.",
|
||||
"fieldTextStreamwatermarkposition": "An image that is burned onto the frames of the video stream.",
|
||||
"fieldTextDetailSubstreamInputrtsptransportUto": "Let FFMPEG decide. Normally it will try UDP first.",
|
||||
"fieldTextDetailSubstreamInputrtsptransport": "FFMPEG tries this first.",
|
||||
"fieldTextDetailSubstreamOutputstreamtype": "The method that will used to consume the video stream.",
|
||||
"fieldTextDetailSubstreamOutputstreamvcodec": "Video codec for streaming.",
|
||||
"fieldTextDetailSubstreamOutputstreamvcodecUto": "Let FFMPEG choose.",
|
||||
"fieldTextDetailSubstreamOutputstreamvcodecLibx264": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputstreamvcodecLibx265": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputstreamvcodecCopy": "Used for MP4 video. Has very low CPU usage but cannot use video filters and filesizes may be gigantic. Best to setup your MP4 settings camera-side when using this option.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodec": "Audio codec for streaming.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecUto": "Let FFMPEG choose.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecOUdio": "No Audio, this is an option that must be set in some parts of the world due to legal reasons.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecLibvorbis": "Used for WebM video.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecLibopus": "Used for WebM video.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecLibmp3lame": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecAac": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecAc3": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputstreamacodecCopy": "Used for MP4 video. Has very low CPU usage but some audio codecs need custom flags like <code>-strict 2</code> for aac.",
|
||||
"fieldTextDetailSubstreamOutputhlstime": "How long each video segment should be, in minutes. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextDetailSubstreamOutputhlslistsize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextDetailSubstreamOutputpresetstream": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextDetailSubstreamOutputstreamquality": "Low number means higher quality. Higher number means less quality.",
|
||||
"fieldTextDetailSubstreamOutputstreamfps": "The speed in which frames are displayed to clients, in Frames Per Second. Be aware there is no default. This can lead to high bandwidth usage.",
|
||||
"fieldTextDetailSubstreamOutputstreamscalex": "Width of the stream image that is output after processing.",
|
||||
"fieldTextDetailSubstreamOutputstreamscaley": "Height of the stream image that is output after processing.",
|
||||
"fieldTextDetailSubstreamOutputstreamrotate": "Change the viewing angle of the video stream.",
|
||||
"fieldTextDetailSubstreamOutputsvf": "Place FFMPEG video filters in this box to affect the streaming portion. No spaces.",
|
||||
"fieldTextHwaccelVcodec": "Decoding Engine",
|
||||
"fieldTextStreamType": "The method that will used to consume the video stream.",
|
||||
"fieldTextStreamTypePoseidon": "Poseidon is built on Kevin Godell's MP4 processing code. It simulates a streaming MP4 file but using the data of a live stream. Includes Audio. Some browsers can play it like a regular MP4 file. Streams over HTTP or WebSocket.",
|
||||
"fieldTextStreamTypeBase64OverWebsocket": "Sending Base64 encoded frames over WebSocket. This avoids caching but there is no audio.",
|
||||
"fieldTextStreamTypeMJPEG": "Standard Motion JPEG image. No audio.",
|
||||
"fieldTextStreamTypeFLV": "Sending FLV encoded frames over WebSocket.",
|
||||
"fieldTextStreamTypeHLS(includesAudio)": "Similar method to facebook live streams. <b>Includes audio</b> if input provides it. There is a delay of about 4-6 seconds because this method records segments then pushes them to the client rather than push as while it creates them.",
|
||||
"fieldTextStreamFlvType": "This is for the Shinobi dashboard only. Both stream methods are still active and ready to use.",
|
||||
"fieldTextStreamVcodec": "Video codec for streaming.",
|
||||
"fieldTextStreamVcodecAuto": "Let FFMPEG choose.",
|
||||
"fieldTextStreamVcodecLibx264": "Used for MP4 video.",
|
||||
"fieldTextStreamVcodecLibx265": "Used for MP4 video.",
|
||||
"fieldTextStreamVcodecCopy": "Used for MP4 video. Has very low CPU usage but cannot use video filters and filesizes may be gigantic. Best to setup your MP4 settings camera-side when using this option.",
|
||||
"fieldTextStreamAcodec": "Audio codec for streaming.",
|
||||
"fieldTextStreamAcodecAuto": "Let FFMPEG choose.",
|
||||
"fieldTextStreamAcodecNoAudio": "No Audio, this is an option that must be set in some parts of the world due to legal reasons.",
|
||||
"fieldTextStreamAcodecLibvorbis": "Used for WebM video.",
|
||||
"fieldTextStreamAcodecLibopus": "Used for WebM video.",
|
||||
"fieldTextStreamAcodecLibmp3lame": "Used for MP4 video.",
|
||||
"fieldTextStreamAcodecAac": "Used for MP4 video.",
|
||||
"fieldTextStreamAcodecAc3": "Used for MP4 video.",
|
||||
"fieldTextStreamAcodecCopy": "Used for MP4 video. Has very low CPU usage but some audio codecs need custom flags like <code>-strict 2</code> for aac.",
|
||||
"fieldTextHlsTime": "How long each video segment should be, in minutes. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextHlsListSize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextPresetStream": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextStreamQuality": "Low number means higher quality. Higher number means less quality.",
|
||||
"fieldTextStreamFps": "The speed in which frames are displayed to clients, in Frames Per Second. Be aware there is no default. This can lead to high bandwidth usage.",
|
||||
"fieldTextStreamScaleX": "Width of the stream image that is output after processing.",
|
||||
"fieldTextStreamScaleY": "Height of the stream image that is output after processing.",
|
||||
"fieldTextStreamRotate": "Change the viewing angle of the video stream.",
|
||||
"fieldTextSignalCheck": "How often your client will check the stream to see if it is alive. This is calculated in minutes.",
|
||||
"fieldTextSignalCheckLog": "This is for the client side only. It will display in the log thread when client side signal checks occur.",
|
||||
"fieldTextStreamVf": "Place FFMPEG video filters in this box to affect the streaming portion. No spaces.",
|
||||
"fieldTextTvChannel": "This monitor will have TV Channel features enabled. You will be able to view it in your TV Channel list.",
|
||||
"fieldTextTvChannelId": "A Custom ID for the Channel.",
|
||||
"fieldTextTvChannelGroupTitle": "A Custom Group for the Channel.",
|
||||
"fieldTextStreamTimestamp": "A clock that is burned onto the frames of the video stream.",
|
||||
"fieldTextStreamTimestampFont": "Font File to style your timestamp.",
|
||||
"fieldTextStreamTimestampFontSize": "Font size in pt.",
|
||||
"fieldTextStreamTimestampColor": "Timstamp text color.",
|
||||
"fieldTextStreamTimestampBoxColor": "Timstamp backdrop color.",
|
||||
"fieldTextStreamTimestampX": "Horiztonal Position of Timestamp",
|
||||
"fieldTextStreamTimestampY": "Vertical Position of Timestamp",
|
||||
"fieldTextStreamWatermark": "An image that is burned onto the frames of the video stream.",
|
||||
"fieldTextStreamWatermarkLocation": "Image Location that will be used as Watermark.",
|
||||
"fieldTextStreamWatermarkPosition": "An image that is burned onto the frames of the video stream.",
|
||||
"fieldTextDetailSubstreamInputRtspTransportAuto": "Let FFMPEG decide. Normally it will try UDP first.",
|
||||
"fieldTextDetailSubstreamInputRtspTransportTCP": "Set it to this if UDP starts giving undesired results.",
|
||||
"fieldTextDetailSubstreamInputRtspTransportUDP": "FFMPEG tries this first.",
|
||||
"fieldTextDetailSubstreamOutputStreamType": "The method that will used to consume the video stream.",
|
||||
"fieldTextDetailSubstreamOutputStreamVcodec": "Video codec for streaming.",
|
||||
"fieldTextDetailSubstreamOutputStreamVcodecAuto": "Let FFMPEG choose.",
|
||||
"fieldTextDetailSubstreamOutputStreamVcodecLibx264": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputStreamVcodecLibx265": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputStreamVcodecCopy": "Used for MP4 video. Has very low CPU usage but cannot use video filters and filesizes may be gigantic. Best to setup your MP4 settings camera-side when using this option.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodec": "Audio codec for streaming.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecAuto": "Let FFMPEG choose.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecNoAudio": "No Audio, this is an option that must be set in some parts of the world due to legal reasons.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecLibvorbis": "Used for WebM video.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecLibopus": "Used for WebM video.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecLibmp3lame": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecAac": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecAc3": "Used for MP4 video.",
|
||||
"fieldTextDetailSubstreamOutputStreamAcodecCopy": "Used for MP4 video. Has very low CPU usage but some audio codecs need custom flags like <code>-strict 2</code> for aac.",
|
||||
"fieldTextDetailSubstreamOutputHlsTime": "How long each video segment should be, in minutes. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextDetailSubstreamOutputHlsListSize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextDetailSubstreamOutputPresetStream": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextDetailSubstreamOutputStreamQuality": "Low number means higher quality. Higher number means less quality.",
|
||||
"fieldTextDetailSubstreamOutputStreamFps": "The speed in which frames are displayed to clients, in Frames Per Second. Be aware there is no default. This can lead to high bandwidth usage.",
|
||||
"fieldTextDetailSubstreamOutputStreamScaleX": "Width of the stream image that is output after processing.",
|
||||
"fieldTextDetailSubstreamOutputStreamScaleY": "Height of the stream image that is output after processing.",
|
||||
"fieldTextDetailSubstreamOutputStreamRotate": "Change the viewing angle of the video stream.",
|
||||
"fieldTextDetailSubstreamOutputSvf": "Place FFMPEG video filters in this box to affect the streaming portion. No spaces.",
|
||||
"fieldTextSnap": "Get the latest frame in JPEG.",
|
||||
"fieldTextExt": "The file type for your recorded video file.",
|
||||
"fieldTextExt4": "This file type is playable is almost all modern web browsers, that includes mobile. The filesize just tends to be larger unless you lower the quality.",
|
||||
"fieldTextExtEb": "Small filesize, low client compatibility. Good for uploading to sites like YouTube.",
|
||||
"fieldTextExtMP4": "This file type is playable is almost all modern web browsers, that includes mobile. The filesize just tends to be larger unless you lower the quality.",
|
||||
"fieldTextExtWebM": "Small filesize, low client compatibility. Good for uploading to sites like YouTube.",
|
||||
"fieldTextVcodec": "Video codec for recording.",
|
||||
"fieldTextCrf": "Low number means higher quality. Higher number means less quality.",
|
||||
"fieldTextPresetrecord": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextPresetRecord": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextAcodec": "Audio codec for recording.",
|
||||
"fieldTextFps": "The speed in which frames are recorded to files, Frames Per Second. Be aware there is no default. This can lead to large files. Best to set this camera-side.",
|
||||
"fieldTextRecordscaley": "Height of the stream image.",
|
||||
"fieldTextRecordscalex": "Width of the stream image.",
|
||||
"fieldTextRecordScaleY": "Height of the stream image.",
|
||||
"fieldTextRecordScaleX": "Width of the stream image.",
|
||||
"fieldTextCutoff": "In minutes. When to slice off and start a new video file.",
|
||||
"fieldTextRotate": "Change the recording angle of the video stream.",
|
||||
"fieldTextVf": "Place FFMPEG video filters in this box to affect the recording portion. No spaces.",
|
||||
"fieldTextTimestamp": "A clock that is burned onto the frames of the recorded video.",
|
||||
"fieldTextTimestampfont": "Font File to style your timestamp.",
|
||||
"fieldTextTimestampfontsize": "Font size in pt.",
|
||||
"fieldTextTimestampcolor": "Timstamp text color.",
|
||||
"fieldTextTimestampboxcolor": "Timstamp backdrop color.",
|
||||
"fieldTextTimestampx": "Horiztonal Position of Timestamp",
|
||||
"fieldTextTimestampy": "Vertical Position of Timestamp",
|
||||
"fieldTextTimestampFont": "Font File to style your timestamp.",
|
||||
"fieldTextTimestampFontSize": "Font size in pt.",
|
||||
"fieldTextTimestampColor": "Timstamp text color.",
|
||||
"fieldTextTimestampBoxColor": "Timstamp backdrop color.",
|
||||
"fieldTextTimestampX": "Horiztonal Position of Timestamp",
|
||||
"fieldTextTimestampY": "Vertical Position of Timestamp",
|
||||
"fieldTextWatermark": "An image that is burned onto the frames of the recorded video.",
|
||||
"fieldTextWatermarklocation": "Image Location that will be used as Watermark.",
|
||||
"fieldTextWatermarkposition": "An image that is burned onto the frames of the recorded video.",
|
||||
"fieldTextRecordtimelapse": "Create a JPEG based timelapse.",
|
||||
"fieldTextRecordtimelapsemp4": "Create an MP4 file at the end of each day for the timelapse.",
|
||||
"fieldTextRecordtimelapsewatermark": "An image that is burned onto the frames of the recorded video.",
|
||||
"fieldTextRecordtimelapsewatermarklocation": "Image Location that will be used as Watermark.",
|
||||
"fieldTextRecordtimelapsewatermarkposition": "An image that is burned onto the frames of the recorded video.",
|
||||
"fieldTextCustinput": "Custom Flags that bind to the Input of the FFMPEG process.",
|
||||
"fieldTextCuststream": "Custom Flags that bind to the Stream (client side view) of the FFMPEG process.",
|
||||
"fieldTextCustsnap": "Custom Flags that bind to the Snapshots.",
|
||||
"fieldTextCustrecord": "Custom Flags that bind to the recording of the FFMPEG process.",
|
||||
"fieldTextCustdetect": "Custom Flags that bind to the stream Detector uses for analyzation.",
|
||||
"fieldTextCustdetectobject": "Custom Flags that bind to the stream Detector uses for analyzation.",
|
||||
"fieldTextCustsiprecord": "Custom Flags that bind to the output that the Event-Based Recordings siphon from.",
|
||||
"fieldTextCustomoutput": "Add a custom output like JPEG frames or send data straight to another server.",
|
||||
"fieldTextWatermarkLocation": "Image Location that will be used as Watermark.",
|
||||
"fieldTextWatermarkPosition": "An image that is burned onto the frames of the recorded video.",
|
||||
"fieldTextRecordTimelapse": "Create a JPEG based timelapse.",
|
||||
"fieldTextRecordTimelapseMp4": "Create an MP4 file at the end of each day for the timelapse.",
|
||||
"fieldTextRecordTimelapseWatermark": "An image that is burned onto the frames of the recorded video.",
|
||||
"fieldTextRecordTimelapseWatermarkLocation": "Image Location that will be used as Watermark.",
|
||||
"fieldTextRecordTimelapseWatermarkPosition": "An image that is burned onto the frames of the recorded video.",
|
||||
"fieldTextCustInput": "Custom Flags that bind to the Input of the FFMPEG process.",
|
||||
"fieldTextCustStream": "Custom Flags that bind to the Stream (client side view) of the FFMPEG process.",
|
||||
"fieldTextCustSnap": "Custom Flags that bind to the Snapshots.",
|
||||
"fieldTextCustRecord": "Custom Flags that bind to the recording of the FFMPEG process.",
|
||||
"fieldTextCustDetect": "Custom Flags that bind to the stream Detector uses for analyzation.",
|
||||
"fieldTextCustDetectObject": "Custom Flags that bind to the stream Detector uses for analyzation.",
|
||||
"fieldTextCustSipRecord": "Custom Flags that bind to the output that the Event-Based Recordings siphon from.",
|
||||
"fieldTextCustomOutput": "Add a custom output like JPEG frames or send data straight to another server.",
|
||||
"fieldTextDetector": "This will add another output in the FFMPEG command for the motion detector.",
|
||||
"fieldTextDetectorhttpapi": "Do you want to allow HTTP triggers to this camera?",
|
||||
"fieldTextDetectorsendframes": "Push frames to the connected plugin to be analyzed.",
|
||||
"fieldTextDetectorfps": "How many frames a second to send to the motion detector; 2 is the default.",
|
||||
"fieldTextDetectorscalex": "Width of the image being detected. Smaller sizes take less CPU.",
|
||||
"fieldTextDetectorscaley": "Height of the image being detected. Smaller sizes take less CPU.",
|
||||
"fieldTextDetectorlocktimeout": "Lockout for when the next trigger is allowed, to avoid overloading the database and receiving clients. Measured in milliseconds.",
|
||||
"fieldTextDetectorsave": "Save Motion Events in SQL. This will allow display of motion over video during the time motion events occured in the Power Viewer.",
|
||||
"fieldTextDetectorrecordmethod": "There are multiple ways to begin recording when an event occurs, like motion. Traditional Recording is the most user-friendly.",
|
||||
"fieldTextDetectortrigger": "This will order the camera to record if it is set to \"Watch-Only\" when an Event is detected.",
|
||||
"fieldTextDetectortimeout": "The length of time \"Trigger Record\" will run for. This is read in minutes.",
|
||||
"fieldTextWatchdogreset": "If there is an overlap in trigger record should it reset.",
|
||||
"fieldTextDetectorwebhook": "Send a GET request to a URL with some values from the event.",
|
||||
"fieldTextDetectorwebhooktimeout": "This value is a timer to allow the next running of your Webhook. This value is in minutes.",
|
||||
"fieldTextDetectorcommand": "The command that will run. This is the equivalent of running a shell command from terminal.",
|
||||
"fieldTextDetectorcommandtimeout": "This value is a timer to allow the next running of your script. This value is in minutes.",
|
||||
"fieldTextSnapsecondsinward": "in seconds",
|
||||
"fieldTextDetectorpam": "Use Kevin Godell's Motion Detector. This is built into Shinobi and requires no other configuration to activate.",
|
||||
"fieldTextDetectorsensitivity": "The motion confidence rating must exceed this value to be seen as a trigger. This number correlates directly to the confidence rating returned by the motion detector. This option was previously named \"Indifference\".",
|
||||
"fieldTextDetectormaxsensitivity": "The motion confidence rating must be lower than this value to be seen as a trigger. Leave blank for no maximum. This option was previously named \"Max Indifference\".",
|
||||
"fieldTextDetectorthreshold": "Minimum number of detections to fire a motion event. Detections must be within the detector the threshold divided by detector fps seconds. For example, if detector fps is 2 and trigger threshold is 3, then three detections must occur within 1.5 seconds to trigger a motion event. This threshold is per detection region.",
|
||||
"fieldTextDetectorcolorthreshold": "The amount of difference allowed in a pixel before it is considered motion.",
|
||||
"fieldTextInversetrigger": "To trigger outside specified regions. Will not trigger with Full Frame Detection enabled.",
|
||||
"fieldTextDetectorframe": "This will read the entire frame for pixel differences. This is the same as creating a region that covers the entire screen.",
|
||||
"fieldTextDetectornoisefilter": "Attempt to filter grain or repeated motion at a particular indifference.",
|
||||
"fieldTextDetectornoisefilterrange": "The amount of difference allowed in a pixel before it is considered motion.",
|
||||
"fieldTextDetectornotrigger": "Check if motion has occured on an interval. If motion has occurred the check will be reset.",
|
||||
"fieldTextDetectornotriggertimeout": "Timeout is calculated in minutes.",
|
||||
"fieldTextDetectornotriggerdiscord": "If motion has not been detected after the timeout period you will recieve an Discord notification.",
|
||||
"fieldTextDetectornotriggerwebhook": "Send a GET request to a URL with some values from the event.",
|
||||
"fieldTextDetectornotriggercommand": "The command that will run. This is the equivalent of running a shell command from terminal.",
|
||||
"fieldTextDetectornotriggercommandtimeout": "This value is a timer to allow the next running of your script. This value is in minutes.",
|
||||
"fieldTextDetectoraudio": "Check if Audio has occured at a certiain decible. Decible reading may not be accurate to real-world measurement.",
|
||||
"fieldTextDetectorusedetectobject": "Create frames for sending to any connected Plugin.",
|
||||
"fieldTextDetectorsendframesobject": "Push frames to the connected plugin to be analyzed.",
|
||||
"fieldTextDetectorobjcountinregion": "Count Objects only inside Regions.",
|
||||
"fieldTextDetectorlisenceplate": "Enable License Plate Recognition. OpenALPR plugin has this always enabled.",
|
||||
"fieldTextDetectorlisenceplatecountry": "Choose the type of plates to recognize. Only US and EU are supported at this time.",
|
||||
"fieldTextEventrecordscalex": "Width of the Event-based Recording image that is output after processing.",
|
||||
"fieldTextEventrecordscaley": "Height of the Event-based Recording image that is output after processing.",
|
||||
"fieldTextDetectorbufferhlstime": "How long each video segment should be, in seconds. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextDetectorbufferhlslistsize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextDetectorptzfollow": "Follow the largest detected object with PTZ? Requires an Object Detector running or matrices provided with events.",
|
||||
"fieldTextDetectorobjcount": "Count detected objects.",
|
||||
"fieldTextControlinverty": "For When your camera is mounted upside down or uses inverted vertical controls.",
|
||||
"fieldTextDetectorsendvideolength": "In seconds. The length of the video that gets sent to your Notification service, like Email or Discord.",
|
||||
"fieldTextDetectorHttpApi": "Do you want to allow HTTP triggers to this camera?",
|
||||
"fieldTextDetectorSendFrames": "Push frames to the connected plugin to be analyzed.",
|
||||
"fieldTextDetectorFps": "How many frames a second to send to the motion detector; 2 is the default.",
|
||||
"fieldTextDetectorScaleX": "Width of the image being detected. Smaller sizes take less CPU.",
|
||||
"fieldTextDetectorScaleY": "Height of the image being detected. Smaller sizes take less CPU.",
|
||||
"fieldTextDetectorLockTimeout": "Lockout for when the next trigger is allowed, to avoid overloading the database and receiving clients. Measured in milliseconds.",
|
||||
"fieldTextDetectorSave": "Save Motion Events in SQL. This will allow display of motion over video during the time motion events occured in the Power Viewer.",
|
||||
"fieldTextDetectorRecordMethod": "There are multiple ways to begin recording when an event occurs, like motion. Traditional Recording is the most user-friendly.",
|
||||
"fieldTextDetectorTrigger": "This will order the camera to record if it is set to \"Watch-Only\" when an Event is detected.",
|
||||
"fieldTextDetectorTimeout": "The length of time \"Trigger Record\" will run for. This is read in minutes.",
|
||||
"fieldTextWatchdogReset": "If there is an overlap in trigger record should it reset.",
|
||||
"fieldTextDetectorWebhook": "Send a GET request to a URL with some values from the event.",
|
||||
"fieldTextDetectorWebhookTimeout": "This value is a timer to allow the next running of your Webhook. This value is in minutes.",
|
||||
"fieldTextDetectorCommand": "The command that will run. This is the equivalent of running a shell command from terminal.",
|
||||
"fieldTextDetectorCommandTimeout": "This value is a timer to allow the next running of your script. This value is in minutes.",
|
||||
"fieldTextSnapSecondsInward": "in seconds",
|
||||
"fieldTextDetectorPam": "Use Kevin Godell's Motion Detector. This is built into Shinobi and requires no other configuration to activate.",
|
||||
"fieldTextDetectorSensitivity": "The motion confidence rating must exceed this value to be seen as a trigger. This number correlates directly to the confidence rating returned by the motion detector. This option was previously named \"Indifference\".",
|
||||
"fieldTextDetectorMaxSensitivity": "The motion confidence rating must be lower than this value to be seen as a trigger. Leave blank for no maximum. This option was previously named \"Max Indifference\".",
|
||||
"fieldTextDetectorThreshold": "Minimum number of detections to fire a motion event. Detections must be within the detector the threshold divided by detector fps seconds. For example, if detector fps is 2 and trigger threshold is 3, then three detections must occur within 1.5 seconds to trigger a motion event. This threshold is per detection region.",
|
||||
"fieldTextDetectorColorThreshold": "The amount of difference allowed in a pixel before it is considered motion.",
|
||||
"fieldTextInverseTrigger": "To trigger outside specified regions. Will not trigger with Full Frame Detection enabled.",
|
||||
"fieldTextDetectorFrame": "This will read the entire frame for pixel differences. This is the same as creating a region that covers the entire screen.",
|
||||
"fieldTextDetectorNoiseFilter": "Attempt to filter grain or repeated motion at a particular indifference.",
|
||||
"fieldTextDetectorNoiseFilterRange": "The amount of difference allowed in a pixel before it is considered motion.",
|
||||
"fieldTextDetectorNotrigger": "Check if motion has occured on an interval. If motion has occurred the check will be reset.",
|
||||
"fieldTextDetectorNotriggerTimeout": "Timeout is calculated in minutes.",
|
||||
"fieldTextDetectorNotriggerDiscord": "If motion has not been detected after the timeout period you will recieve an Discord notification.",
|
||||
"fieldTextDetectorNotriggerWebhook": "Send a GET request to a URL with some values from the event.",
|
||||
"fieldTextDetectorNotriggerCommand": "The command that will run. This is the equivalent of running a shell command from terminal.",
|
||||
"fieldTextDetectorNotriggerCommandTimeout": "This value is a timer to allow the next running of your script. This value is in minutes.",
|
||||
"fieldTextDetectorAudio": "Check if Audio has occured at a certiain decible. Decible reading may not be accurate to real-world measurement.",
|
||||
"fieldTextDetectorUseDetectObject": "Create frames for sending to any connected Plugin.",
|
||||
"fieldTextDetectorSendFramesObject": "Push frames to the connected plugin to be analyzed.",
|
||||
"fieldTextDetectorObjCountInRegion": "Count Objects only inside Regions.",
|
||||
"fieldTextDetectorLisencePlate": "Enable License Plate Recognition. OpenALPR plugin has this always enabled.",
|
||||
"fieldTextDetectorLisencePlateCountry": "Choose the type of plates to recognize. Only US and EU are supported at this time.",
|
||||
"fieldTextEventRecordScaleX": "Width of the Event-based Recording image that is output after processing.",
|
||||
"fieldTextEventRecordScaleY": "Height of the Event-based Recording image that is output after processing.",
|
||||
"fieldTextDetectorBufferHlsTime": "How long each video segment should be, in seconds. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextDetectorBufferHlsListSize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextDetectorPtzFollow": "Follow the largest detected object with PTZ? Requires an Object Detector running or matrices provided with events.",
|
||||
"fieldTextDetectorObjCount": "Count detected objects.",
|
||||
"fieldTextControlInvertY": "For When your camera is mounted upside down or uses inverted vertical controls.",
|
||||
"fieldTextDetectorSendVideoLength": "In seconds. The length of the video that gets sent to your Notification service, like Email or Discord.",
|
||||
"fieldTextLoglevel": "The amount of data to provide while doing the job.",
|
||||
"fieldTextLoglevelIlent": "None. This will silence all logging.",
|
||||
"fieldTextLoglevelAtal": "Display only fatal errors.",
|
||||
"fieldTextLoglevelOnRror": "Display all important errors. Note : this doesn't always show important information.",
|
||||
"fieldTextLoglevelLlArnings": "Display all warnings. Use this if you can't find out what's wrong with your camera.",
|
||||
"fieldTextLoglevelSilent": "None. This will silence all logging.",
|
||||
"fieldTextLoglevelFatal": "Display only fatal errors.",
|
||||
"fieldTextLoglevelOnError": "Display all important errors. Note : this doesn't always show important information.",
|
||||
"fieldTextLoglevelAllWarnings": "Display all warnings. Use this if you can't find out what's wrong with your camera.",
|
||||
"fieldTextSqllog": "Use this with caution as FFMPEG likes to throw up superfluous data at times which can lead to a lot of database rows.",
|
||||
"fieldTextSqllogO": "No is the default.",
|
||||
"fieldTextSqllogEs": "Do this if you are having recurring issues only.",
|
||||
"fieldTextFactoruth": "Enable a secondary requirement for login through one of the enabled methods.",
|
||||
"fieldTextSqllogNo": "No is the default.",
|
||||
"fieldTextSqllogYes": "Do this if you are having recurring issues only.",
|
||||
"fieldTextFactorAuth": "Enable a secondary requirement for login through one of the enabled methods.",
|
||||
"fieldTextMail": "The login for accounts. The main account holder's email address will get notifications.",
|
||||
"fieldTextPass": "Leave blank to keep the same password during settings modification.",
|
||||
"fieldTextPasswordagain": "Must match Password field if you desire to change it.",
|
||||
"fieldTextPasswordAgain": "Must match Password field if you desire to change it.",
|
||||
"fieldTextSize": "The amount of disk space Shinobi will allow to be consumed before purging. This value is read in megabytes.",
|
||||
"fieldTextSizevideopercent": "Percent of Max Storage Amount the videos can record to.",
|
||||
"fieldTextSizetimelapsepercent": "Percent of Max Storage Amount the timelapse frames can record to.",
|
||||
"fieldTextSizefilebinpercent": "Percent of Max Storage Amount the FileBin archive can use.",
|
||||
"fieldTextSizeVideoPercent": "Percent of Max Storage Amount the videos can record to.",
|
||||
"fieldTextSizeTimelapsePercent": "Percent of Max Storage Amount the timelapse frames can record to.",
|
||||
"fieldTextSizeFilebinPercent": "Percent of Max Storage Amount the FileBin archive can use.",
|
||||
"fieldTextDays": "The number of days to keep videos before purging.",
|
||||
"fieldTextEventdays": "The number of days to keep events before purging.",
|
||||
"fieldTextLogdays": "The number of days to keep logs before purging.",
|
||||
"fieldTextEventDays": "The number of days to keep events before purging.",
|
||||
"fieldTextLogDays": "The number of days to keep logs before purging.",
|
||||
"fieldTextLang": "The primary language of text elements. For complete translation add your language in conf.json e.g:<code>\"language\": \"en_CA\",</code>",
|
||||
"fieldTextAudionote": "Sound when information bubble appears.",
|
||||
"fieldTextAudioalert": "Sound when Event occurs.",
|
||||
"fieldTextAudiodelay": "Delay until next time an Event can start an Alert. Measured in seconds.",
|
||||
"fieldTextEventmonpop": "When an Event occurs popout the monitor stream.",
|
||||
"fieldTextRutilterN": "Enable Ir cut fiter. Typically Day mode.",
|
||||
"fieldTextRutilterFf": "Disable Ir cut fiter. Typically Night mode.",
|
||||
"fieldTextRutilterUto": "Ir cut filter is automatically activated by the device.",
|
||||
"fieldTextAudioNote": "Sound when information bubble appears.",
|
||||
"fieldTextAudioAlert": "Sound when Event occurs.",
|
||||
"fieldTextAudioDelay": "Delay until next time an Event can start an Alert. Measured in seconds.",
|
||||
"fieldTextEventMonPop": "When an Event occurs popout the monitor stream.",
|
||||
"fieldTextIrCutFilterOn": "Enable Ir cut fiter. Typically Day mode.",
|
||||
"fieldTextIrCutFilterOff": "Disable Ir cut fiter. Typically Night mode.",
|
||||
"fieldTextIrCutFilterAuto": "Ir cut filter is automatically activated by the device.",
|
||||
"fieldTextIp": "Range or Single",
|
||||
"fieldTextActionshalt": "Make the event do nothing, as if it never happened.",
|
||||
"fieldTextActionsindifference": "Modify minimum indifference required for event.",
|
||||
"fieldTextActionscommand": "You may use this to trigger a script on command.",
|
||||
"fieldTextActionsrecord": "Use Traditional Recording, Hotswap, or Delete Motionless with their currently set options in the Global Detection Settings section.",
|
||||
"fieldTextMapRtsptransportUto": "Let FFMPEG decide. Normally it will try UDP first.",
|
||||
"fieldTextMapRtsptransport": "FFMPEG tries this first.",
|
||||
"fieldTextChannelStreamtype": "Sending FLV encoded frames over WebSocket.",
|
||||
"fieldTextChannelStreamtypeOseidon": "Poseidon is built on Kevin Godell's MP4 processing code. It simulates a streaming MP4 file but using the data of a live stream. Includes Audio. Some browsers can play it like a regular MP4 file. Streams over HTTP or WebSocket.",
|
||||
"fieldTextChannelStreamtypeIncludesUdio": "Similar method to facebook live streams. <b>Includes audio</b> if input provides it. There is a delay of about 4-6 seconds because this method records segments then pushes them to the client rather than push as while it creates them.",
|
||||
"fieldTextChannelStreamvcodec": "Video codec for streaming.",
|
||||
"fieldTextChannelStreamvcodecUto": "Let FFMPEG choose.",
|
||||
"fieldTextChannelStreamvcodecLibx264": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamvcodecLibx265": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamvcodecCopy": "Used for MP4 video. Has very low CPU usage but cannot use video filters and filesizes may be gigantic. Best to setup your MP4 settings camera-side when using this option.",
|
||||
"fieldTextChannelStreamacodec": "Audio codec for streaming.",
|
||||
"fieldTextChannelStreamacodecUto": "Let FFMPEG choose.",
|
||||
"fieldTextChannelStreamacodecOUdio": "No Audio, this is an option that must be set in some parts of the world due to legal reasons.",
|
||||
"fieldTextChannelStreamacodecLibvorbis": "Used for WebM video.",
|
||||
"fieldTextChannelStreamacodecLibopus": "Used for WebM video.",
|
||||
"fieldTextChannelStreamacodecLibmp3lame": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamacodecAac": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamacodecAc3": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamacodecCopy": "Used for MP4 video. Has very low CPU usage but some audio codecs need custom flags like <code>-strict 2</code> for aac.",
|
||||
"fieldTextChannelHlstime": "How long each video segment should be, in minutes. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextChannelHlslistsize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextChannelPresetstream": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextChannelStreamquality": "Low number means higher quality. Higher number means less quality.",
|
||||
"fieldTextChannelStreamfps": "The speed in which frames are displayed to clients, in Frames Per Second. Be aware there is no default. This can lead to high bandwidth usage.",
|
||||
"fieldTextChannelStreamscalex": "Width of the stream image that is output after processing.",
|
||||
"fieldTextChannelStreamscaley": "Height of the stream image that is output after processing.",
|
||||
"fieldTextChannelStreamrotate": "Change the viewing angle of the video stream.",
|
||||
"fieldTextActionsHalt": "Make the event do nothing, as if it never happened.",
|
||||
"fieldTextActionsIndifference": "Modify minimum indifference required for event.",
|
||||
"fieldTextActionsCommand": "You may use this to trigger a script on command.",
|
||||
"fieldTextActionsRecord": "Use Traditional Recording, Hotswap, or Delete Motionless with their currently set options in the Global Detection Settings section.",
|
||||
"fieldTextMapRtspTransportAuto": "Let FFMPEG decide. Normally it will try UDP first.",
|
||||
"fieldTextMapRtspTransportTCP": "Set it to this if UDP starts giving undesired results.",
|
||||
"fieldTextMapRtspTransportUDP": "FFMPEG tries this first.",
|
||||
"fieldTextChannelStreamType": "The method that will used to consume the video stream.",
|
||||
"fieldTextChannelStreamTypePoseidon": "Poseidon is built on Kevin Godell's MP4 processing code. It simulates a streaming MP4 file but using the data of a live stream. Includes Audio. Some browsers can play it like a regular MP4 file. Streams over HTTP or WebSocket.",
|
||||
"fieldTextChannelStreamTypeMJPEG": "Standard Motion JPEG image. No audio.",
|
||||
"fieldTextChannelStreamTypeFLV": "Sending FLV encoded frames over WebSocket.",
|
||||
"fieldTextChannelStreamTypeHLS(includesAudio)": "Similar method to facebook live streams. <b>Includes audio</b> if input provides it. There is a delay of about 4-6 seconds because this method records segments then pushes them to the client rather than push as while it creates them.",
|
||||
"fieldTextChannelStreamVcodec": "Video codec for streaming.",
|
||||
"fieldTextChannelStreamVcodecAuto": "Let FFMPEG choose.",
|
||||
"fieldTextChannelStreamVcodecLibx264": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamVcodecLibx265": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamVcodecCopy": "Used for MP4 video. Has very low CPU usage but cannot use video filters and filesizes may be gigantic. Best to setup your MP4 settings camera-side when using this option.",
|
||||
"fieldTextChannelStreamAcodec": "Audio codec for streaming.",
|
||||
"fieldTextChannelStreamAcodecAuto": "Let FFMPEG choose.",
|
||||
"fieldTextChannelStreamAcodecNoAudio": "No Audio, this is an option that must be set in some parts of the world due to legal reasons.",
|
||||
"fieldTextChannelStreamAcodecLibvorbis": "Used for WebM video.",
|
||||
"fieldTextChannelStreamAcodecLibopus": "Used for WebM video.",
|
||||
"fieldTextChannelStreamAcodecLibmp3lame": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamAcodecAac": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamAcodecAc3": "Used for MP4 video.",
|
||||
"fieldTextChannelStreamAcodecCopy": "Used for MP4 video. Has very low CPU usage but some audio codecs need custom flags like <code>-strict 2</code> for aac.",
|
||||
"fieldTextChannelHlsTime": "How long each video segment should be, in minutes. Each segment will be drawn by the client through an m3u8 file. Shorter segments take less space.",
|
||||
"fieldTextChannelHlsListSize": "The number of segments maximum before deleting old segments automatically.",
|
||||
"fieldTextChannelPresetStream": "Preset flag for certain video encoders. If you find your camera is crashing every few seconds : try leaving it blank.",
|
||||
"fieldTextChannelStreamQuality": "Low number means higher quality. Higher number means less quality.",
|
||||
"fieldTextChannelStreamFps": "The speed in which frames are displayed to clients, in Frames Per Second. Be aware there is no default. This can lead to high bandwidth usage.",
|
||||
"fieldTextChannelStreamScaleX": "Width of the stream image that is output after processing.",
|
||||
"fieldTextChannelStreamScaleY": "Height of the stream image that is output after processing.",
|
||||
"fieldTextChannelStreamRotate": "Change the viewing angle of the video stream.",
|
||||
"fieldTextChannelSvf": "Place FFMPEG video filters in this box to affect the streaming portion. No spaces."
|
||||
}
|
Loading…
Reference in New Issue