Merge branch 'dev' into 'master'

Over 9000

See merge request Shinobi-Systems/Shinobi!308
cron-addstorage-fix
Moe 2021-06-06 15:59:07 +00:00
commit 7b75f23e96
56 changed files with 6076 additions and 779 deletions

View File

@ -196,9 +196,9 @@ echo "\nCongratulations, Shinobi is now installed!\n"
echo 'To start Shinobi at boot, add a crontab entry for the user "_shinobi" with something like this:\n'
echo '$ doas crontab -u _shinobi -e
echo '$ doas crontab -u _shinobi -e'
@reboot /bin/sh -c "cd /home/_shinobi/Shinobi && pm2 start camera.js cron.js"
echo '@reboot /bin/sh -c "cd /home/_shinobi/Shinobi && pm2 start camera.js cron.js"'
echo "\nYou can access Shinobi at http://$(ifconfig | grep 'inet ' | awk '!/127.0.0.1/ {print $2}'):8080"

View File

@ -4437,14 +4437,20 @@ module.exports = function(s,config,lang){
"color": "navy",
"info": [
{
"field": lang.CSS,
"name": "detail=css",
fieldType:"textarea",
"placeholder": "#main_header{background:#b59f00}",
"field": lang.CSS,
"description": "",
"default": "",
"example": "",
"possible": ""
},
{
"field": lang.hlsOptions,
"name": "localStorage=hlsOptions",
fieldType:"textarea",
"placeholder": "{}",
},
{
"field": lang['Force Monitors Per Row'],
@ -5628,7 +5634,6 @@ module.exports = function(s,config,lang){
"section-pre-class": "col-md-4",
"info": [
{
"id": "monitorStatesSelector",
"field": lang["Monitor"],
"fieldType": "select",
"class": "dark monitors_list",
@ -6041,7 +6046,7 @@ module.exports = function(s,config,lang){
},
}
},
"Montior Configuration Finder": {
"Montior Configuration Finder": {
"section": "Montior Configuration Finder",
"blocks": {
"Search Settings": {
@ -6194,5 +6199,64 @@ module.exports = function(s,config,lang){
},
}
},
"Events": {
"section": "Events",
"blocks": {
"Saved Logs": {
"name": lang["Search Settings"],
"color": "blue",
"section-pre-class": "col-md-4",
"info": [
{
"id": "eventListWithPics-monitors-list",
"field": lang["Type"],
"fieldType": "select",
"possible": [
{
"name": lang['All Monitors'],
"value": "all"
},
{
"name": lang.Monitors,
"optgroup": []
}
]
},
{
"id": "eventListWithPics-daterange",
"field": lang['Date Range'],
},
{
"fieldType": "btn-group",
"btns": [
{
"fieldType": "btn",
"class": "btn-success",
"forForm": true,
"attribute": `type="submit"`,
"btnContent": `${lang['Check']}`,
},
],
}
]
},
"Events Found": {
"name": lang['Events Found'],
"color": "green",
"section-pre-class": "col-md-8 search-parent",
"info": [
{
"field": lang['Search'],
"class": 'search-controller',
},
{
"fieldType": "div",
"id": "eventListWithPics-rows",
"class": "search-body mt-3 row",
}
]
},
}
},
}
}

View File

@ -186,6 +186,8 @@
"Themes": "Themes",
"Videos": "Videos",
"Events": "Events",
"Events Found": "Events Found",
"Recent Events": "Recent Events",
"Streams": "Streams",
"Snapshot": "Snapshot",
"Snapshots": "Snapshots",
@ -425,8 +427,9 @@
"Idle": "Idle",
"Disabled": "Disabled",
"Record": "Record",
"Watch-Only": "Watch-Only",
"Watch Only": "Watch Only",
"Watch-Only": "Watch-Only",
"Set Mode": "Set Mode",
"Toggle Sidebar": "Toggle Sidebar",
"Add Monitor": "Add Monitor",
"Start Recording": "Start Recording",
@ -599,6 +602,8 @@
"Probe Size": "Probe Size",
"Stream Type": "Stream Type",
"# of Allow MJPEG Clients": "# of Allow MJPEG Clients <small>0 for infinite</small>",
"hlsOptions": "HLS Options",
"hlsOptionsInvalid": "HLS Options are Invalid",
"HLS Video Encoder": "Video Encoder",
"HLS Audio Encoder": "Audio Encoder",
"HLS Segment Length": "Segment Length <small>in Seconds</small>",
@ -882,6 +887,7 @@
"coProcessorTextStarted": "coProcessor has started for CPU only outputs.",
"coProcessorTextStopped": "coProcessor has ended.",
"Process Unexpected Exit": "Process Unexpected Exit",
"unexpectedExitText": "Information about this exit will be found before this log. Additionally here is the ffmpeg command that was used when the process crashed.",
"coProcess Unexpected Exit": "coProcess Unexpected Exit",
"Process Crashed for Monitor": "Process Crashed for Monitor",
"coProcess Crashed for Monitor": "coProcess Crashed for Monitor",
@ -1187,6 +1193,10 @@
"Hostname": "Hostname",
"Network": "Network",
"Notice": "Notice",
"Activated": "Activated",
"activatedText": "Your Installation has been Activated.",
"Not Activated": "Not Activated",
"notActivatedText": "Your Installation has failed Activation.",
"getUserInfo": "Get User Information",
"getAllMonitors": "Get All Monitors",
"getAMonitor": "Get a Monitor",

View File

@ -1,6 +1,7 @@
const fs = require('fs');
const path = require('path');
const moment = require('moment');
const request = require('request');
module.exports = (processCwd) => {
const parseJSON = (string) => {
var parsed
@ -78,6 +79,43 @@ module.exports = (processCwd) => {
if(!e){e=new Date};if(!x){x='YYYY-MM-DDTHH-mm-ss'};
return moment(e).format(x);
}
const checkSubscription = (subscriptionId,callback) => {
function subscriptionFailed(){
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
console.error('This Install of Shinobi is NOT Activated')
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
s.systemLog('This Install of Shinobi is NOT Activated')
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
console.log('https://licenses.shinobi.video/subscribe')
}
if(subscriptionId && subscriptionId !== 'sub_XXXXXXXXXXXX'){
var url = 'https://licenses.shinobi.video/subscribe/check?subscriptionId=' + subscriptionId
request(url,{
method: 'GET',
timeout: 30000
}, function(err,resp,body){
var json = s.parseJSON(body)
if(err)console.log(err,json)
var hasSubcribed = json && !!json.ok
callback(hasSubcribed)
if(hasSubcribed){
s.systemLog('This Install of Shinobi is Activated')
if(!json.expired && json.timeExpires){
s.systemLog(`This License expires on ${json.timeExpires}`)
}
}else{
subscriptionFailed()
}
s.onSubscriptionCheckExtensions.forEach(function(extender){
extender(hasSubcribed,json)
})
})
}else{
subscriptionFailed()
callback(false)
}
}
return {
parseJSON: parseJSON,
stringJSON: stringJSON,
@ -89,5 +127,6 @@ module.exports = (processCwd) => {
utcToLocal: utcToLocal,
localToUtc: localToUtc,
formattedTime: formattedTime,
checkSubscription: checkSubscription,
}
}

View File

@ -84,7 +84,7 @@ module.exports = function(jsonData,pamDiffResponder){
// writeToStderr(err.stack)
}
}
createPamDiffEngine = function(){
function createPamDiffEngine(){
const regionsAreMasks = jsonData.rawMonitorConfig.details.detector_frame !== '1' && jsonData.rawMonitorConfig.details.inverse_trigger === '1';
if(Object.keys(regionJson).length === 0 || jsonData.rawMonitorConfig.details.detector_frame === '1'){

View File

@ -5,9 +5,6 @@ const spawn = require('child_process').spawn
const isWindows = (process.platform === 'win32' || process.platform === 'win64')
process.send = process.send || function () {};
if(!process.argv[2] || !process.argv[3]){
return writeToStderr('Missing FFMPEG Command String or no command operator')
}
var jsonData = JSON.parse(fs.readFileSync(process.argv[3],'utf8'))
const ffmpegAbsolutePath = process.argv[2].trim()
const ffmpegCommandString = jsonData.cmd
@ -22,9 +19,11 @@ var writeToStderr = function(text){
// stdioWriters[2].write(Buffer.from(`${new Error('writeToStderr').stack}`, 'utf8' ))
}catch(err){
}
// fs.appendFileSync('/home/Shinobi/test.log',text + '\n','utf8')
// fs.appendFileSync('/home/ubuntu/cdn-site/tools/compilers/diycam/Shinobi/test.log',text + '\n','utf8')
}
if(!process.argv[2] || !process.argv[3]){
return writeToStderr('Missing FFMPEG Command String or no command operator')
}
const buildMonitorUrl = function(e,noPath){
var authd = ''
var url

View File

@ -1,13 +1,17 @@
const fs = require('fs')
const spawn = require('child_process').spawn
const isWindows = process.platform === "win32";
const {
parentPort, workerData
} = require('worker_threads');
var writeToStderr = function(text){
// fs.appendFileSync(rawMonitorConfig.sdir + 'errors.log',text + '\n','utf8')
process.stderr.write(Buffer.from(`${text}`, 'utf8' ))
}
if(!process.argv[2] || !process.argv[3]){
return writeToStderr('Missing FFMPEG Command String or no command operator')
// process.stderr.write(Buffer.from(`${text}`, 'utf8' ))
parentPort.postMessage(text)
}
// if(!process.argv[2] || !process.argv[3]){
// return writeToStderr('Missing FFMPEG Command String or no command operator')
// }
process.send = process.send || function () {};
process.on('uncaughtException', function (err) {
writeToStderr('Uncaught Exception occured!');
@ -29,9 +33,10 @@ process.on('SIGTERM', exitAction);
process.on('SIGINT', exitAction);
process.on('exit', exitAction);
var jsonData = JSON.parse(fs.readFileSync(process.argv[3],'utf8'))
// fs.unlink(process.argv[3],()=>{})
const ffmpegAbsolutePath = process.argv[2].trim()
// var jsonData = JSON.parse(fs.readFileSync(process.argv[3],'utf8'))
// const ffmpegAbsolutePath = process.argv[2].trim()
const jsonData = workerData.jsonData
const ffmpegAbsolutePath = workerData.ffmpegAbsolutePath
const ffmpegCommandString = jsonData.cmd
const temporaryImageFile = jsonData.temporaryImageFile
const iconImageFile = jsonData.iconImageFile

View File

@ -5,8 +5,9 @@ const unzipper = require('unzipper')
const fetch = require("node-fetch")
const spawn = require('child_process').spawn
module.exports = async (s,config,lang,app,io) => {
s.debugLog(`+++++++++++CustomAutoLoad Modules++++++++++++`)
const runningInstallProcesses = {}
const modulesBasePath = s.mainDirectory + '/libs/customAutoLoad/'
const modulesBasePath = __dirname + '/customAutoLoad/'
const extractNameFromPackage = (filePath) => {
const filePathParts = filePath.split('/')
const packageName = filePathParts[filePathParts.length - 1].split('.')[0]
@ -16,6 +17,8 @@ module.exports = async (s,config,lang,app,io) => {
return modulesBasePath + name + '/'
}
const getModule = (moduleName) => {
s.debugLog(`+++++++++++++++++++++++`)
s.debugLog(`Loading : ${moduleName}`)
const modulePath = modulesBasePath + moduleName
const stats = fs.lstatSync(modulePath)
const isDirectory = stats.isDirectory()

View File

@ -1,8 +1,10 @@
const fs = require('fs').promises;
const moment = require('moment');
const execSync = require('child_process').execSync;
const exec = require('child_process').exec;
const spawn = require('child_process').spawn;
const request = require('request');
const imageSaveEventLock = {};
// Matrix In Region Libs >
const SAT = require('sat')
const V = SAT.Vector;
@ -16,6 +18,30 @@ module.exports = (s,config,lang,app,io) => {
const {
moveCameraPtzToMatrix
} = require('../control/ptz.js')(s,config,lang)
async function saveImageFromEvent(options,frameBuffer){
const monitorId = options.mid || options.id
const groupKey = options.ke
if(imageSaveEventLock[groupKey + monitorId])return;
const eventTime = options.time
const objectsFound = options.matrices
const monitorConfig = Object.assign({id: monitorId},s.group[groupKey].rawMonitorConfigurations[monitorId])
const timelapseRecordingDirectory = s.getTimelapseFrameDirectory({mid: monitorId, ke: groupKey})
const currentDate = s.formattedTime(eventTime,'YYYY-MM-DD')
const filename = s.formattedTime(eventTime) + '.jpg'
const location = timelapseRecordingDirectory + currentDate + '/'
try{
await fs.stat(location)
}catch(err){
await fs.mkdir(location)
}
await fs.writeFile(location + filename,frameBuffer)
s.createTimelapseFrameAndInsert(monitorConfig,location,filename,eventTime,{
objects: objectsFound
})
imageSaveEventLock[groupKey + monitorId] = setTimeout(function(){
delete(imageSaveEventLock[groupKey + monitorId])
},1000)
}
const countObjects = async (event) => {
const matrices = event.details.matrices
const eventsCounted = s.group[event.ke].activeMonitors[event.id].eventsCounted || {}
@ -256,7 +282,7 @@ module.exports = (s,config,lang,app,io) => {
}
// check modified indifference
if(
filter.indifference !== false &&
filter.indifference &&
eventDetails.confidence < parseFloat(filter.indifference)
){
// fails indifference check for modified indifference
@ -322,6 +348,14 @@ module.exports = (s,config,lang,app,io) => {
runMultiTrigger(monitorConfig,eventDetails, d, triggerEvent)
}
//save this detection result in SQL, only coords. not image.
if(d.frame){
saveImageFromEvent({
ke: d.ke,
mid: d.id,
time: eventTime,
matrices: eventDetails.matrices || [],
},d.frame)
}
if(forceSave || (filter.save && monitorDetails.detector_save === '1')){
s.knexQuery({
action: "insert",
@ -382,6 +416,30 @@ module.exports = (s,config,lang,app,io) => {
await extender(d,filter)
}
}
const getEventBasedRecordingUponCompletion = function(options){
const response = {ok: true}
return new Promise((resolve,reject) => {
const groupKey = options.ke
const monitorId = options.mid
const activeMonitor = s.group[groupKey].activeMonitors[monitorId]
const eventBasedRecording = activeMonitor.eventBasedRecording
if(eventBasedRecording.process){
const monitorConfig = s.group[groupKey].rawMonitorConfigurations[monitorId]
const recordingDirectory = s.getVideoDirectory(monitorConfig)
const fileTime = eventBasedRecording.lastFileTime
const filename = `${fileTime}.mp4`
response.filename = `${filename}`
response.filePath = `${recordingDirectory}${filename}`
eventBasedRecording.process.on('close',function(){
setTimeout(() => {
resolve(response)
},1000)
})
}else{
resolve(response)
}
})
}
const createEventBasedRecording = function(d,fileTime){
if(!fileTime)fileTime = s.formattedTime()
const logTitleText = lang["Traditional Recording"]
@ -409,6 +467,7 @@ module.exports = (s,config,lang,app,io) => {
}
if(!activeMonitor.eventBasedRecording.process){
activeMonitor.eventBasedRecording.allowEnd = false;
activeMonitor.eventBasedRecording.lastFileTime = `${fileTime}`;
const runRecord = function(){
var ffmpegError = ''
var error
@ -536,9 +595,9 @@ module.exports = (s,config,lang,app,io) => {
s.onEventTriggerBeforeFilterExtensions.forEach(function(extender){
extender(d,filter)
})
const eventDetails = d.details
const passedEventFilters = checkEventFilters(d,monitorDetails,filter)
if(!passedEventFilters)return
const eventDetails = d.details
const detailString = JSON.stringify(eventDetails)
const eventTime = new Date()
if(
@ -618,5 +677,6 @@ module.exports = (s,config,lang,app,io) => {
legacyFilterEvents: legacyFilterEvents,
triggerEvent: triggerEvent,
addEventDetailsToString: addEventDetailsToString,
getEventBasedRecordingUponCompletion: getEventBasedRecordingUponCompletion,
}
}

View File

@ -159,6 +159,11 @@ module.exports = function(s,config){
s.onGetRamUsageExtensions.push(callback)
}
//
s.onSubscriptionCheckExtensions = []
s.onSubscriptionCheck = function(callback){
s.onSubscriptionCheckExtensions.push(callback)
}
//
/////// VIDEOS ////////
s.insertCompletedVideoExtensions = []
s.insertCompletedVideoExtender = function(callback){

View File

@ -25,48 +25,53 @@ module.exports = async (s,config,lang,onFinish) => {
if(config.ffmpegBinary)config.ffmpegDir = config.ffmpegBinary
s.ffmpeg = function(e){
const ffmpegCommand = [`-progress pipe:5`];
([
buildMainInput(e),
buildMainStream(e),
buildJpegApiOutput(e),
buildMainRecording(e),
buildAudioDetector(e),
buildMainDetector(e),
buildEventRecordingOutput(e),
buildTimelapseOutput(e),
]).forEach(function(commandStringPart){
ffmpegCommand.push(commandStringPart)
})
s.onFfmpegCameraStringCreationExtensions.forEach(function(extender){
extender(e,ffmpegCommand)
})
const stdioPipes = createPipeArray(e)
const ffmpegCommandString = ffmpegCommand.join(' ')
//hold ffmpeg command for log stream
s.group[e.ke].activeMonitors[e.mid].ffmpeg = sanitizedFfmpegCommand(e,ffmpegCommandString)
//clean the string of spatial impurities and split for spawn()
const ffmpegCommandParsed = splitForFFPMEG(ffmpegCommandString)
try{
fs.unlinkSync(e.sdir + 'cmd.txt')
}catch(err){
const ffmpegCommand = [`-progress pipe:5`];
([
buildMainInput(e),
buildMainStream(e),
buildJpegApiOutput(e),
buildMainRecording(e),
buildAudioDetector(e),
buildMainDetector(e),
buildEventRecordingOutput(e),
buildTimelapseOutput(e),
]).forEach(function(commandStringPart){
ffmpegCommand.push(commandStringPart)
})
s.onFfmpegCameraStringCreationExtensions.forEach(function(extender){
extender(e,ffmpegCommand)
})
const stdioPipes = createPipeArray(e)
const ffmpegCommandString = ffmpegCommand.join(' ')
//hold ffmpeg command for log stream
s.group[e.ke].activeMonitors[e.mid].ffmpeg = sanitizedFfmpegCommand(e,ffmpegCommandString)
//clean the string of spatial impurities and split for spawn()
const ffmpegCommandParsed = splitForFFPMEG(ffmpegCommandString)
try{
fs.unlinkSync(e.sdir + 'cmd.txt')
}catch(err){
}
fs.writeFileSync(e.sdir + 'cmd.txt',JSON.stringify({
cmd: ffmpegCommandParsed,
pipes: stdioPipes.length,
rawMonitorConfig: s.group[e.ke].rawMonitorConfigurations[e.id],
globalInfo: {
config: config,
isAtleatOneDetectorPluginConnected: s.isAtleatOneDetectorPluginConnected
}
},null,3),'utf8')
var cameraCommandParams = [
'./libs/cameraThread/singleCamera.js',
config.ffmpegDir,
e.sdir + 'cmd.txt'
]
return spawn('node',cameraCommandParams,{detached: true,stdio: stdioPipes})
fs.writeFileSync(e.sdir + 'cmd.txt',JSON.stringify({
cmd: ffmpegCommandParsed,
pipes: stdioPipes.length,
rawMonitorConfig: s.group[e.ke].rawMonitorConfigurations[e.id],
globalInfo: {
config: config,
isAtleatOneDetectorPluginConnected: s.isAtleatOneDetectorPluginConnected
}
},null,3),'utf8')
var cameraCommandParams = [
__dirname + '/cameraThread/singleCamera.js',
config.ffmpegDir,
e.sdir + 'cmd.txt'
]
return spawn('node',cameraCommandParams,{detached: true,stdio: stdioPipes})
}catch(err){
s.systemLog(err)
return null
}
}
if(!config.ffmpegDir){
if(s.isWin){

View File

@ -126,6 +126,7 @@ module.exports = (s,config,lang) => {
return `drawtext=fontfile=${timestampFont}:text='%{localtime}':x=${timestampX}:y=${timestampY}:fontcolor=${timestampColor}:box=1:boxcolor=${timestampBackgroundColor}:fontsize=${timestampFontSize}`
}
const createInputMap = (e, number, input) => {
// inputs, input map
//`e` is the monitor object
//`x` is an object used to contain temporary values.
const inputFlags = []

View File

@ -247,7 +247,7 @@ Run "npm install ffbinaries" to get a different static FFmpeg downloader.`
console.log('ffbinaries : Downloading FFmpeg. Please Wait...');
ffbinaries.downloadBinaries(['ffmpeg', 'ffprobe'], {
destination: ffbinaryDir,
version : '3.4'
version : '4.2'
},function () {
config.ffmpegDir = ffbinaryDir + 'ffmpeg'
response.msg = 'ffbinaries : FFmpeg Downloaded.'

View File

@ -10,6 +10,9 @@ const connectionTester = require('connection-tester')
const SoundDetection = require('shinobi-sound-detection')
const async = require("async");
const URL = require('url')
const {
Worker
} = require('worker_threads');
const { copyObject, createQueue, queryStringToObject, createQueryStringFromObject } = require('./common.js')
module.exports = function(s,config,lang){
const {
@ -72,6 +75,7 @@ module.exports = function(s,config,lang){
}
s.sendMonitorStatus = function(e){
s.group[e.ke].activeMonitors[e.id].monitorStatus = `${e.status}`
s.group[e.ke].activeMonitors[e.id].monitorStatusCode = `${e.code}`
s.tx(Object.assign(e,{f:'monitor_status'}),'GRP_'+e.ke)
}
s.getMonitorCpuUsage = function(e,callback){
@ -181,40 +185,31 @@ module.exports = function(s,config,lang){
var temporaryImageFile = streamDir + s.gid(5) + '.jpg'
var iconImageFile = streamDir + 'icon.jpg'
var ffmpegCmd = splitForFFPMEG(`-loglevel warning -re -probesize 100000 -analyzeduration 100000 ${inputOptions.join(' ')} -i "${url}" ${outputOptions.join(' ')} -f image2 -an -vf "fps=1" -vframes 1 "${temporaryImageFile}"`)
fs.writeFileSync(s.getStreamsDirectory(monitor) + 'snapCmd.txt',JSON.stringify({
cmd: ffmpegCmd,
temporaryImageFile: temporaryImageFile,
iconImageFile: iconImageFile,
useIcon: options.useIcon,
rawMonitorConfig: s.group[monitor.ke].rawMonitorConfigurations[monitor.mid],
},null,3),'utf8')
var cameraCommandParams = [
s.mainDirectory + '/libs/cameraThread/snapshot.js',
config.ffmpegDir,
s.group[monitor.ke].activeMonitors[monitor.id].sdir + 'snapCmd.txt'
]
var snapProcess = spawn('node',cameraCommandParams,{detached: true})
snapProcess.stderr.on('data',function(data){
s.debugLog(data.toString())
const snapProcess = new Worker(__dirname + '/cameraThread/snapshot.js', {
workerData: {
jsonData: {
cmd: ffmpegCmd,
temporaryImageFile: temporaryImageFile,
iconImageFile: iconImageFile,
useIcon: options.useIcon,
rawMonitorConfig: s.group[monitor.ke].rawMonitorConfigurations[monitor.mid],
},
ffmpegAbsolutePath: config.ffmpegDir,
}
});
snapProcess.on('message', function(data){
s.debugLog(data)
})
snapProcess.on('close',function(data){
snapProcess.on('error', (data) => {
console.log(data)
snapProcess.terminate()
})
snapProcess.on('exit', (code) => {
clearTimeout(snapProcessTimeout)
sendTempImage()
})
var snapProcessTimeout = setTimeout(function(){
var pid = snapProcess.pid
if(s.isWin){
spawn("taskkill", ["/pid", pid, '/t'])
}else{
process.kill(-pid, 'SIGTERM')
}
setTimeout(function(){
if(s.isWin === false){
treekill(pid)
}else{
snapProcess.kill()
}
},dynamicTimeout)
snapProcess.terminate()
},dynamicTimeout)
}catch(err){
console.log(err)
@ -290,67 +285,77 @@ module.exports = function(s,config,lang){
})
}
s.mergeDetectorBufferChunks = function(monitor,callback){
var pathDir = s.dir.streams+monitor.ke+'/'+monitor.id+'/'
var mergedFile = s.formattedTime()+'.mp4'
var mergedFilepath = pathDir+mergedFile
fs.readdir(pathDir,function(err,streamDirItems){
var items = []
var copiedItems = []
var videoLength = s.group[monitor.ke].rawMonitorConfigurations[monitor.id].details.detector_send_video_length
if(!videoLength || videoLength === '')videoLength = '10'
if(videoLength.length === 1)videoLength = '0' + videoLength
var createMerged = function(copiedItems){
var allts = pathDir+items.join('_')
s.fileStats(allts,function(err,stats){
if(err){
//not exist
var cat = 'cat '+copiedItems.join(' ')+' > '+allts
exec(cat,function(){
var merger = spawn(config.ffmpegDir,splitForFFPMEG(('-re -i '+allts+' -acodec copy -vcodec copy -t 00:00:' + videoLength + ' '+pathDir+mergedFile)))
merger.stderr.on('data',function(data){
s.userLog(monitor,{type:"Buffer Merge",msg:data.toString()})
})
merger.on('close',function(){
s.file('delete',allts)
copiedItems.forEach(function(copiedItem){
s.file('delete',copiedItem)
return new Promise((resolve,reject) => {
var pathDir = s.dir.streams+monitor.ke+'/'+monitor.id+'/'
var mergedFile = s.formattedTime()+'.mp4'
var mergedFilepath = pathDir+mergedFile
fs.readdir(pathDir,function(err,streamDirItems){
var items = []
var copiedItems = []
var videoLength = s.group[monitor.ke].rawMonitorConfigurations[monitor.id].details.detector_send_video_length
if(!videoLength || videoLength === '')videoLength = '10'
if(videoLength.length === 1)videoLength = '0' + videoLength
var createMerged = function(copiedItems){
var allts = pathDir+items.join('_')
s.fileStats(allts,function(err,stats){
if(err){
//not exist
var cat = 'cat '+copiedItems.join(' ')+' > '+allts
exec(cat,function(){
var merger = spawn(config.ffmpegDir,splitForFFPMEG(('-re -i '+allts+' -acodec copy -vcodec copy -t 00:00:' + videoLength + ' '+pathDir+mergedFile)))
merger.stderr.on('data',function(data){
s.userLog(monitor,{type:"Buffer Merge",msg:data.toString()})
})
merger.on('close',function(){
s.file('delete',allts)
copiedItems.forEach(function(copiedItem){
s.file('delete',copiedItem)
})
setTimeout(function(){
s.file('delete',mergedFilepath)
},1000 * 60 * 3)
delete(merger)
if(callback)callback(mergedFilepath,mergedFile)
resolve({
filePath: mergedFilepath,
filename: mergedFile,
})
})
setTimeout(function(){
s.file('delete',mergedFilepath)
},1000 * 60 * 3)
delete(merger)
callback(mergedFilepath,mergedFile)
})
})
}else{
//file exist
callback(mergedFilepath,mergedFile)
}
})
}
streamDirItems.forEach(function(filename){
if(filename.indexOf('detectorStream') > -1 && filename.indexOf('.m3u8') === -1){
items.push(filename)
}
})
items.sort()
// items = items.slice(items.length - 5,items.length)
items.forEach(function(filename){
try{
var tempFilename = filename.split('.')
tempFilename[0] = tempFilename[0] + 'm'
tempFilename = tempFilename.join('.')
var tempWriteStream = fs.createWriteStream(pathDir+tempFilename)
tempWriteStream.on('finish', function(){
copiedItems.push(pathDir+tempFilename)
if(copiedItems.length === items.length){
createMerged(copiedItems.sort())
}else{
//file exist
if(callback)callback(mergedFilepath,mergedFile)
resolve({
filePath: mergedFilepath,
filename: mergedFile,
})
}
})
fs.createReadStream(pathDir+filename).pipe(tempWriteStream)
}catch(err){
}
streamDirItems.forEach(function(filename){
if(filename.indexOf('detectorStream') > -1 && filename.indexOf('.m3u8') === -1){
items.push(filename)
}
})
items.sort()
// items = items.slice(items.length - 5,items.length)
items.forEach(function(filename){
try{
var tempFilename = filename.split('.')
tempFilename[0] = tempFilename[0] + 'm'
tempFilename = tempFilename.join('.')
var tempWriteStream = fs.createWriteStream(pathDir+tempFilename)
tempWriteStream.on('finish', function(){
copiedItems.push(pathDir+tempFilename)
if(copiedItems.length === items.length){
createMerged(copiedItems.sort())
}
})
fs.createReadStream(pathDir+filename).pipe(tempWriteStream)
}catch(err){
}
})
})
})
}
@ -729,7 +734,7 @@ module.exports = function(s,config,lang){
s.group[e.ke].activeMonitors[e.id].spawn_exit = function(){
if(s.group[e.ke].activeMonitors[e.id].isStarted === true){
if(e.details.loglevel!=='quiet'){
s.userLog(e,{type:lang['Process Unexpected Exit'],msg:{msg:lang['Process Crashed for Monitor'],cmd:s.group[e.ke].activeMonitors[e.id].ffmpeg}});
s.userLog(e,{type:lang['Process Unexpected Exit'],msg:{msg:lang.unexpectedExitText,cmd:s.group[e.ke].activeMonitors[e.id].ffmpeg}});
}
fatalError(e,'Process Unexpected Exit');
scanForOrphanedVideos(e,{

View File

@ -1,6 +1,9 @@
var fs = require("fs")
var Discord = require("discord.js")
module.exports = function(s,config,lang){
const {
getEventBasedRecordingUponCompletion,
} = require('../events/utils.js')(s,config,lang)
//discord bot
if(config.discordBot === true){
try{
@ -64,14 +67,28 @@ module.exports = function(s,config,lang){
s.group[d.ke].activeMonitors[d.id].detector_discordbot = null
},detector_discordbot_timeout)
if(monitorConfig.details.detector_discordbot_send_video === '1'){
// change to function that captures on going video capture, waits, grabs new video file, slices portion (max for transmission) and prepares for delivery
s.mergeDetectorBufferChunks(d,function(mergedFilepath,filename){
let videoPath = null
let videoName = null
const eventBasedRecording = await getEventBasedRecordingUponCompletion({
ke: d.ke,
mid: d.mid
})
if(eventBasedRecording.filePath){
videoPath = eventBasedRecording.filePath
videoName = eventBasedRecording.filename
}else{
const siftedVideoFileFromRam = await s.mergeDetectorBufferChunks(d)
videoPath = siftedVideoFileFromRam.filePath
videoName = siftedVideoFileFromRam.filename
}
console.log(videoPath,videoName)
if(videoPath){
sendMessage({
author: {
name: s.group[d.ke].rawMonitorConfigurations[d.id].name,
icon_url: config.iconURL
},
title: filename,
title: videoName,
fields: [],
timestamp: d.currentTime,
footer: {
@ -80,17 +97,20 @@ module.exports = function(s,config,lang){
}
},[
{
attachment: mergedFilepath,
name: filename
attachment: videoPath,
name: videoName
}
],d.ke)
})
}
}
const {screenShot, isStaticFile} = await s.getRawSnapshotFromMonitor(monitorConfig,{
secondsInward: monitorConfig.details.snap_seconds_inward
})
if(screenShot){
d.screenshotBuffer = d.screenshotBuffer || d.frame
if(!d.screenshotBuffer){
const { screenShot, isStaticFile } = await s.getRawSnapshotFromMonitor(monitorConfig,{
secondsInward: monitorConfig.details.snap_seconds_inward
})
d.screenshotBuffer = screenShot
}
if(d.screenshotBuffer){
sendMessage({
author: {
name: s.group[d.ke].rawMonitorConfigurations[d.id].name,
@ -106,7 +126,7 @@ module.exports = function(s,config,lang){
}
},[
{
attachment: screenShot,
attachment: d.screenshotBuffer,
name: d.screenshotName+'.jpg'
}
],d.ke)
@ -140,6 +160,8 @@ module.exports = function(s,config,lang){
userDetails.discordbot === '1' &&
userDetails.discordbot_token !== ''
){
s.debugLog(`!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!`)
s.debugLog(`Discord Connecting ${userDetails.discordbot_token}`)
s.group[user.ke].discordBot = new Discord.Client()
s.group[user.ke].discordBot.on('ready', () => {
s.userLog({
@ -184,7 +206,8 @@ module.exports = function(s,config,lang){
}
}
const onMonitorUnexpectedExitForDiscord = (monitorConfig) => {
if(monitorConfig.details.notify_discord === '1' && monitorConfig.details.notify_onUnexpectedExit === '1'){
const isEnabled = monitorConfig.details.detector_discordbot === '1' || monitorConfig.details.notify_discord === '1'
if(isEnabled && monitorConfig.details.notify_onUnexpectedExit === '1'){
const ffmpegCommand = s.group[monitorConfig.ke].activeMonitors[monitorConfig.mid].ffmpeg
const description = lang['Process Crashed for Monitor'] + '\n' + ffmpegCommand
const currentTime = new Date()

View File

@ -4,6 +4,9 @@ const {
checkEmail,
} = require("./emailUtils.js")
module.exports = function(s,config,lang){
const {
getEventBasedRecordingUponCompletion,
} = require('../events/utils.js')(s,config,lang)
// mailing with nodemailer
try{
if(config.mail){
@ -152,18 +155,31 @@ module.exports = function(s,config,lang){
})
}
if(monitorConfig.details.detector_mail_send_video === '1'){
// change to function that captures on going video capture, waits, grabs new video file, slices portion (max for transmission) and prepares for delivery
s.mergeDetectorBufferChunks(d,function(mergedFilepath,filename){
let videoPath = null
let videoName = null
const eventBasedRecording = await getEventBasedRecordingUponCompletion({
ke: d.ke,
mid: d.mid
})
if(eventBasedRecording.filePath){
videoPath = eventBasedRecording.filePath
videoName = eventBasedRecording.filename
}else{
const siftedVideoFileFromRam = await s.mergeDetectorBufferChunks(d)
videoPath = siftedVideoFileFromRam.filePath
videoName = siftedVideoFileFromRam.filename
}
if(videoPath){
fs.readFile(mergedFilepath,function(err,buffer){
if(buffer){
sendMessage({
from: config.mail.from,
to: checkEmail(r.mail),
subject: filename,
subject: videoName,
html: '',
attachments: [
{
filename: filename,
filename: videoName,
content: buffer
}
]
@ -175,8 +191,9 @@ module.exports = function(s,config,lang){
})
}
})
})
}
}
d.screenshotBuffer = d.screenshotBuffer || d.frame
if(!d.screenshotBuffer){
const {screenShot, isStaticFile} = await s.getRawSnapshotFromMonitor(monitorConfig,{
secondsInward: monitorConfig.details.snap_seconds_inward

View File

@ -1,5 +1,8 @@
var fs = require("fs")
module.exports = function(s,config,lang){
const {
getEventBasedRecordingUponCompletion,
} = require('../events/utils.js')(s,config,lang)
//telegram bot
if(config.telegramBot === true){
const TelegramBot = require('node-telegram-bot-api');
@ -58,31 +61,47 @@ module.exports = function(s,config,lang){
s.group[d.ke].activeMonitors[d.id].detector_telegrambot = null
},detector_telegrambot_timeout)
if(monitorConfig.details.detector_telegrambot_send_video === '1'){
// change to function that captures on going video capture, waits, grabs new video file, slices portion (max for transmission) and prepares for delivery
s.mergeDetectorBufferChunks(d,function(mergedFilepath,filename){
let videoPath = null
let videoName = null
const eventBasedRecording = await getEventBasedRecordingUponCompletion({
ke: d.ke,
mid: d.mid
})
if(eventBasedRecording.filePath){
videoPath = eventBasedRecording.filePath
videoName = eventBasedRecording.filename
}else{
const siftedVideoFileFromRam = await s.mergeDetectorBufferChunks(d)
videoPath = siftedVideoFileFromRam.filePath
videoName = siftedVideoFileFromRam.filename
}
if(videoPath){
sendMessage({
title: filename,
title: videoName,
},[
{
type: 'video',
attachment: mergedFilepath,
name: filename
attachment: videoPath,
name: videoName
}
],d.ke)
})
}
}
const {screenShot, isStaticFile} = await s.getRawSnapshotFromMonitor(monitorConfig,{
secondsInward: monitorConfig.details.snap_seconds_inward
})
if(screenShot){
d.screenshotBuffer = d.screenshotBuffer || d.frame
if(!d.screenshotBuffer){
const { screenShot, isStaticFile } = await s.getRawSnapshotFromMonitor(monitorConfig,{
secondsInward: monitorConfig.details.snap_seconds_inward
})
d.screenshotBuffer = screenShot
}
if(d.screenshotBuffer){
sendMessage({
title: lang.Event+' - '+d.screenshotName,
description: lang.EventText1+' '+d.currentTimestamp,
},[
{
type: 'photo',
attachment: screenShot,
attachment: d.screenshotBuffer,
name: d.screenshotName+'.jpg'
}
],d.ke)

View File

@ -288,8 +288,9 @@ module.exports = async (s,config,lang,app,io,currentUse) => {
const initializeAllModules = async () => {
fs.readdir(modulesBasePath,function(err,folderContents){
if(!err && folderContents.length > 0){
getModules(true).forEach((shinobiModule) => {
if(!shinobiModule || shinobiModule.properties.disabled){
var moduleList = getModules(true)
moduleList.forEach((shinobiModule) => {
if(!shinobiModule || shinobiModule.properties.disabled || shinobiModule.properties.disabled === undefined){
return;
}
loadModule(shinobiModule)

View File

@ -3,7 +3,7 @@ module.exports = function(process,__dirname){
var packageJson = require('../package.json')
process.send = process.send || function () {};
process.on('uncaughtException', function (err) {
console.error('Uncaught Exception occured!');
console.error(`Uncaught Exception occured! ${new Date()}`);
console.error(err.stack);
});
// [CTRL] + [C] = exit

View File

@ -1,6 +1,5 @@
var fs = require('fs');
var request = require('request');
var moment = require('moment');
var crypto = require('crypto');
var exec = require('child_process').exec;
@ -9,6 +8,9 @@ module.exports = function(s,config,lang,io){
const {
scanForOrphanedVideos
} = require('./video/utils.js')(s,config,lang)
const {
checkSubscription
} = require('./basic/utils.js')(process.cwd())
return new Promise((resolve, reject) => {
var checkedAdminUsers = {}
console.log('FFmpeg version : '+s.ffmpegVersion)
@ -391,41 +393,6 @@ module.exports = function(s,config,lang,io){
})
}
config.userHasSubscribed = false
var checkSubscription = function(callback){
var subscriptionFailed = function(){
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
console.error('This Install of Shinobi is NOT Activated')
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
s.systemLog('This Install of Shinobi is NOT Activated')
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
console.log('https://licenses.shinobi.video/subscribe')
}
if(config.subscriptionId && config.subscriptionId !== 'sub_XXXXXXXXXXXX'){
var url = 'https://licenses.shinobi.video/subscribe/check?subscriptionId=' + config.subscriptionId
request(url,{
method: 'GET',
timeout: 30000
}, function(err,resp,body){
var json = s.parseJSON(body)
if(err)console.log(err,json)
var hasSubcribed = json && !!json.ok
config.userHasSubscribed = hasSubcribed
callback(hasSubcribed)
if(config.userHasSubscribed){
s.systemLog('This Install of Shinobi is Activated')
if(!json.expired){
s.systemLog(`This License expires on ${json.timeExpires}`)
}
}else{
subscriptionFailed()
}
})
}else{
subscriptionFailed()
callback(false)
}
}
//check disk space every 20 minutes
if(config.autoDropCache===true){
setInterval(function(){
@ -444,7 +411,8 @@ module.exports = function(s,config,lang,io){
s.preQueries()
setTimeout(() => {
//check for subscription
checkSubscription(function(){
checkSubscription(config.subscriptionId,function(hasSubcribed){
config.userHasSubscribed = hasSubcribed
//check terminal commander
checkForTerminalCommands(function(){
//load administrators (groups)

View File

@ -13,16 +13,16 @@ module.exports = function(s,config,lang,app,io){
return s.dir.videos+e.ke+'/'+e.id+'_timelapse/';
}
}
s.createTimelapseFrameAndInsert = function(e,location,filename){
s.createTimelapseFrameAndInsert = function(e,location,filename,eventTime,frameDetails){
//e = monitor object
//location = file location
var filePath = location + filename
var fileStats = fs.statSync(filePath)
var details = {}
var details = Object.assign({},frameDetails || {})
if(e.details && e.details.dir && e.details.dir !== ''){
details.dir = e.details.dir
}
var timeNow = new Date()
var timeNow = eventTime || new Date()
var queryInfo = {
ke: e.ke,
mid: e.id,
@ -344,7 +344,7 @@ module.exports = function(s,config,lang,app,io){
}else{
s.closeJsonResponse(res,{ok: false, msg: lang[`Nothing exists`]})
}
})
})
}
}
if(timelapseFramesCache[cacheKey]){

View File

@ -16,6 +16,7 @@ module.exports = function(s,config,lang){
if(!s.group[e.ke].sftp &&
!s.group[e.ke].sftp &&
userDetails.sftp !== '0' &&
userDetails.sftp_save === '1' &&
userDetails.sftp_host &&
userDetails.sftp_host !== ''&&
userDetails.sftp_port &&

View File

@ -752,9 +752,11 @@ module.exports = function(s,config,lang,app,io){
},(err,r) => {
r.forEach(function(v,n){
if(s.group[v.ke] && s.group[v.ke].activeMonitors[v.mid]){
r[n].currentlyWatching = Object.keys(s.group[v.ke].activeMonitors[v.mid].watch).length
r[n].currentCpuUsage = s.group[v.ke].activeMonitors[v.mid].currentCpuUsage
r[n].status = s.group[v.ke].activeMonitors[v.mid].monitorStatus
const activeMonitor = s.group[v.ke].activeMonitors[v.mid]
r[n].currentlyWatching = Object.keys(activeMonitor.watch).length
r[n].currentCpuUsage = activeMonitor.currentCpuUsage
r[n].status = activeMonitor.monitorStatus
r[n].code = activeMonitor.monitorStatusCode
}
var buildStreamURL = function(type,channelNumber){
var streamURL

View File

@ -11,6 +11,9 @@ module.exports = function(s,config,lang,app){
updateSystem,
getSystemInfo,
} = require('./system/utils.js')(config)
const {
checkSubscription
} = require('./basic/utils.js')(process.cwd())
/**
* API : Superuser : Get Logs
*/
@ -138,6 +141,38 @@ module.exports = function(s,config,lang,app){
},res,req)
})
/**
* API : Superuser : Activate Key
*/
app.post(config.webPaths.superApiPrefix+':auth/system/activate', function (req,res){
s.superAuth(req.params,async (resp) => {
var endData = {
ok : true
}
const currentConfig = JSON.parse(fs.readFileSync(s.location.config))
const subscriptionId = s.getPostData(req,'subscriptionId')
if(!subscriptionId){
endData.ok = false
endData.msg = lang.postDataBroken
}else{
s.systemLog('conf.json Modified',{
by: resp.$user.mail,
ip: resp.ip,
old: currentConfig
})
const configError = await modifyConfiguration(Object.assign({
subscriptionId: subscriptionId,
},currentConfig))
if(configError)s.systemLog(configError)
s.tx({f:'save_configuration'},'$')
}
checkSubscription(subscriptionId,function(hasSubcribed){
endData.ok = hasSubcribed
config.userHasSubscribed = hasSubcribed
s.closeJsonResponse(res,endData)
})
},res,req)
})
/**
* API : Superuser : Get users in system
*/
app.all([

1102
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -38,7 +38,7 @@
"node-ssh": "^11.1.1",
"node-telegram-bot-api": "^0.52.0",
"nodemailer": "^6.4.11",
"pam-diff": "github:kevinGodell/pam-diff",
"pam-diff": "^1.0.0",
"path": "^0.12.7",
"pipe2pam": "^0.6.2",
"request": "^2.88.0",
@ -67,12 +67,15 @@
"targets": [
"node12"
],
"scripts": [],
"scripts": [
"libs/cameraThread/detector.js",
"libs/cameraThread/singleCamera.js",
"libs/cameraThread/snapshot.js"
],
"assets": [
"definitions/*",
"languages/*",
"web/*",
"test/*"
"definitions/**/*",
"languages/**/*",
"web/**/*"
]
}
}

4
plugins/deepstack-object/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
conf.json
dist
models
node_modules

View File

@ -0,0 +1,31 @@
FROM node:12.22.1-buster-slim
RUN apt update -y
RUN apt install wget curl net-tools -y
RUN mkdir -p /config
RUN mkdir -p /home/Shinobi/plugins/deepstack-object
WORKDIR /home/Shinobi/plugins/deepstack-object
COPY . /home/Shinobi/plugins/deepstack-object
RUN wget https://gitlab.com/Shinobi-Systems/Shinobi/-/raw/dev/plugins/pluginBase.js -O /home/Shinobi/plugins/deepstack-object/pluginBase.js
RUN wget https://gitlab.com/Shinobi-Systems/Shinobi/-/raw/dev/tools/modifyConfigurationForPlugin.js -O /home/Shinobi/plugins/deepstack-object/modifyConfigurationForPlugin.js
RUN wget https://gitlab.com/Shinobi-Systems/Shinobi/-/raw/dev/plugins/pluginCheck.js -O /home/Shinobi/plugins/pluginCheck.js
RUN ls /home/Shinobi/plugins/deepstack-object
RUN apt install -y sudo dos2unix
RUN npm install pm2 -g
RUN npm install --unsafe-perm
RUN dos2unix /home/Shinobi/plugins/deepstack-object/init.sh
RUN dos2unix /home/Shinobi/plugins/deepstack-object/pm2.yml
RUN chmod -f +x /home/Shinobi/plugins/deepstack-object/init.sh
RUN chmod -f +x /home/Shinobi/plugins/deepstack-object/pm2.yml
EXPOSE 8082
ENTRYPOINT ["/home/Shinobi/plugins/deepstack-object/init.sh"]
CMD [ "pm2-docker", "/home/Shinobi/plugins/deepstack-object/pm2.yml" ]

View File

@ -0,0 +1,19 @@
#!/bin/bash
DIR=$(dirname $0)
echo "Removing existing Node.js modules..."
rm -rf $DIR/node_modules
nonInteractiveFlag=false
if [ ! -e "$DIR/conf.json" ]; then
dontCreateKeyFlag=false
echo "Creating conf.json"
sudo cp $DIR/conf.sample.json $DIR/conf.json
else
echo "conf.json already exists..."
fi
if [ "$dontCreateKeyFlag" = false ]; then
echo "Adding Random Plugin Key to Main Configuration"
node $DIR/../../tools/modifyConfigurationForPlugin.js deepstack-object key=$(head -c 64 < /dev/urandom | sha256sum | awk '{print substr($1,1,60)}')
fi

View File

@ -0,0 +1,70 @@
# Shinobi Video plugin for DeepStack Object Detection
### How to Install DeepStack Object Detection on GPU
> [This document has been rewritten over on ShinobiHub Articles.](https://hub.shinobi.video/articles/view/PcBtEgGuWuEL529)
# Docker Installation
> Install Shinobi Plugin with Docker
> Image is based on `node:12.22.1-buster-slim`.
1. Enter plugin directory. Default Shinobi installation location is `/home/Shinobi`.
```
cd /home/Shinobi/plugins/deepstack-object
```
2. Build Image.
```
docker build --tag shinobi-deepstack-object-image:1.0 .
```
3. Launch the plugin.
- `-e ADD_CONFIG='{"key":"123mypluginkey","host":"172.16.100.238","port":8080,"deepStack":{"host":"172.16.100.238","port":5000,"isSSL":false,"apiKey":"123"}}'` Adds any configuration parameters to the plugin's conf.json file.
- `-p '8082:8082/tcp'` is an optional flag if you decide to run the plugin in host mode.
```
docker run -d --name='shinobi-deepstack-object' -e ADD_CONFIG='{"key":"123mypluginkey","host":"172.16.100.238","port":8080,"deepStack":{"host":"172.16.100.238","port":5000,"isSSL":false,"apiKey":"123"}}' shinobi-deepstack-object-image:1.0
```
** Logs **
```
docker logs /shinobi-deepstack-object
```
** Stop and Remove **
```
docker stop /shinobi-deepstack-object && docker rm /shinobi-deepstack-object
```
### Options (Environment Variables)
| Option | Description | Default |
|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|
| ADD_CONFIG | The plugin's name. | DeepStack-Object |
# Additional Information
Docker - [Get docker](https://docs.docker.com/get-docker/)
DeepStack - [Getting started](https://docs.deepstack.cc/getting-started/index.html#setting-up-deepstack)
Run DeepStack CPU docker image:
```
sudo docker run -e VISION-FACE=True -e VISION-DETECTION=True -v localstorage:/datastore -p 80:5000 deepquestai/deepstack
```
GPU [installation guide](https://docs.deepstack.cc/using-deepstack-with-nvidia-gpus/#step-1-install-docker)
#### More installation options
[Windows (CPU / GPU support)](https://docs.deepstack.cc/windows/index.html)
[nVidia Jetson](https://docs.deepstack.cc/nvidia-jetson/index.html#using-deepstack-with-nvidia-jetson)
[Raspberry PI](https://docs.deepstack.cc/raspberry-pi/index.html#using-deepstack-on-raspberry-pi-alpha)

View File

@ -0,0 +1,16 @@
{
"plug": "DeepStack-Object",
"host": "localhost",
"tfjsBuild": "cpu",
"port": 8080,
"hostPort": 58084,
"key": "1234567890",
"mode": "client",
"type": "detector",
"deepStack": {
"host": "127.0.0.1",
"port": 5000,
"isSSL": false,
"apiKey": "api key as defined in DeepStack"
}
}

View File

@ -0,0 +1,19 @@
#!/bin/sh
cd /home/Shinobi/plugins/deepstack-object
if [ ! -e "./conf.json" ]; then
echo "Creating conf.json"
sudo cp conf.sample.json conf.json
else
echo "conf.json already exists..."
fi
if [ -n "$ADD_CONFIG" ]; then
echo ""
else
ADD_CONFIG="{}"
fi
node ./modifyConfigurationForPlugin.js deepstack-object addToConfig=$ADD_CONFIG maxRetryConnection=100
# Execute Command
echo "Starting $PLUGIN_NAME plugin for Shinobi ..."
exec "$@"

3737
plugins/deepstack-object/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,26 @@
{
"name": "shinobi-deepstack-object",
"author": "Elad Bar",
"version": "1.0.0",
"description": "Object Detection plugin for DeepStack",
"main": "shinobi-deepstack-object.js",
"dependencies": {
"request": "^2.88.0",
"express": "^4.16.2",
"moment": "^2.19.2",
"socket.io": "^2.3.0",
"socket.io-client": "^2.3.0"
},
"devDependencies": {},
"bin": "shinobi-deepstack-object.js",
"pkg": {
"targets": [
"node12"
],
"scripts": [
"../pluginBase.js"
],
"assets": []
},
"disabled": true
}

View File

@ -0,0 +1,4 @@
apps:
- script : '/home/Shinobi/plugins/deepstack-object/shinobi-deepstack-object.js'
name : 'shinobi-deepstack-object'
kill_timeout : 5000

View File

@ -0,0 +1,148 @@
//
// Shinobi - Tensorflow Plugin
// Copyright (C) 2016-2025 Elad Bar, Moe Alam
//
// Base Init >>
const fs = require('fs');
const config = require('./conf.json')
const request = require("request")
var s
const {
workerData
} = require('worker_threads');
if(workerData && workerData.ok === true){
try{
s = require('../pluginWorkerBase.js')(__dirname,config)
}catch(err){
console.log(err)
try{
s = require('./pluginWorkerBase.js')(__dirname,config)
}catch(err){
console.log(err)
return console.log(config.plug,'WORKER : Plugin start has failed. pluginBase.js was not found.')
}
}
}else{
try{
s = require('../pluginBase.js')(__dirname,config)
}catch(err){
console.log(err)
try{
s = require('./pluginBase.js')(__dirname,config)
}catch(err){
console.log(err)
return console.log(config.plug,'Plugin start has failed. pluginBase.js was not found.')
}
}
try{
s = require('../pluginBase.js')(__dirname,config)
}catch(err){
console.log(err)
try{
const {
haltMessage,
checkStartTime,
setStartTime,
} = require('../pluginCheck.js')
if(!checkStartTime()){
console.log(haltMessage,new Date())
s.disconnectWebSocket()
return
}
setStartTime()
}catch(err){
console.log(`pluginCheck failed`)
}
}
}
// Base Init />>
const deepStackHost = config.deepStack["host"]
const deepStackPort = config.deepStack["port"]
const deepStackIsSSL = config.deepStack["isSSL"]
const deepStackApiKey = config.deepStack["apiKey"]
const deepStackProtocol = deepStackIsSSL ? "https" : "http"
const baseUrl = `${deepStackProtocol}://${deepStackHost}:${deepStackPort}/v1`
function deepStackRequest(requestEndpoint,frameBuffer){
const fullEndPointUrl = `${baseUrl}${requestEndpoint || `/vision/detection`}`
return new Promise((resolve,reject) => {
try{
const form = {
"image": {
value: frameBuffer,
options: {
filename: 'frame.jpg'
}
}
}
if(deepStackApiKey) {
form["api_key"] = deepStackApiKey
}
request.post({url:fullEndPointUrl, formData:form}, function(err,res,body){
let predictions = []
try{
const response = JSON.parse(body || {predictions: []})
predictions = response["predictions"] || []
}catch(err){
console.log(res)
console.log(err)
console.log(body)
}
resolve(predictions);
})
}catch(err){
resolve([])
console.log(err)
}
})
}
s.detectObject = async function(frameBuffer,d,tx,frameLocation,callback){
const timeStart = new Date()
const predictions = await deepStackRequest(`/vision/detection`,frameBuffer)
if(predictions.length > 0) {
const mats = []
predictions.forEach(function(v){
const label = v["label"]
const confidence = v["confidence"]
const y_min = v["y_min"]
const x_min = v["x_min"]
const y_max = v["y_max"]
const x_max = v["x_max"]
const width = x_max - x_min
const height = y_max - y_min
mats.push({
x: x_min,
y: y_min,
width: width,
height: height,
tag: label,
confidence: confidence,
})
})
const isObjectDetectionSeparate = d.mon.detector_pam === '1' && d.mon.detector_use_detect_object === '1'
const width = parseFloat(isObjectDetectionSeparate && d.mon.detector_scale_y_object ? d.mon.detector_scale_y_object : d.mon.detector_scale_y)
const height = parseFloat(isObjectDetectionSeparate && d.mon.detector_scale_x_object ? d.mon.detector_scale_x_object : d.mon.detector_scale_x)
tx({
f:'trigger',
id:d.id,
ke:d.ke,
details:{
plug: config.plug,
name: `DeepStack-Object`,
reason: 'object',
matrices: mats,
imgHeight: width,
imgWidth: height,
},
frame: frameBuffer
})
}
callback()
}

View File

@ -0,0 +1,59 @@
#!/bin/bash
DIR=$(dirname $0)
sh INSTALL-face.sh
echo "Removing existing Tensorflow Node.js modules..."
rm -rf $DIR/node_modules
npm install yarn -g --unsafe-perm --force
wget -O $DIR/package.json https://cdn.shinobi.video/binaries/tensorflow/1.7.3/package.json
GPU_INSTALL="0"
echo "Shinobi - Are you installing on ARM64? This applies to computers like Jetson Nano and Raspberry Pi Model 3 B+"
echo "(y)es or (N)o"
read armCpu
if [ "$armCpu" = "y" ] || [ "$armCpu" = "Y" ]; then
echo "Shinobi - Is it a Jetson Nano?"
echo "You must be on JetPack 4.3 for this plugin to install."
echo "JetPack 4.3 Image can be found here : https://developer.nvidia.com/jetpack-43-archive"
echo "(y)es or (N)o"
read isItJetsonNano
echo "Shinobi - You may see Unsupported Errors, please wait while patches are applied."
CUSTOM_SCRIPT_LOCATION_PREFIX="node_modules/@tensorflow/tfjs-node"
if [ "$isItJetsonNano" = "y" ] || [ "$isItJetsonNano" = "Y" ]; then
GPU_INSTALL="1"
CUSTOM_SCRIPT_LOCATION="$CUSTOM_SCRIPT_LOCATION_PREFIX-gpu/scripts/custom-binary.json"
npm install @tensorflow/tfjs-node-gpu@1.7.3 --unsafe-perm
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/1.7.3/libtensorflow-gpu-linux-arm64-1.15.0.tar.gz"}' > "$CUSTOM_SCRIPT_LOCATION"
else
CUSTOM_SCRIPT_LOCATION="$CUSTOM_SCRIPT_LOCATION_PREFIX/scripts/custom-binary.json"
npm install @tensorflow/tfjs-node@1.7.3 --unsafe-perm
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/1.7.3/libtensorflow-cpu-linux-arm-1.15.0.tar.gz"}' > "$CUSTOM_SCRIPT_LOCATION"
fi
cd ../../..
else
echo "Shinobi - Do you want to install TensorFlow.js with GPU support? "
echo "You can run this installer again to change it."
echo "(y)es or (N)o"
read nodejsinstall
if [ "$nodejsinstall" = "y" ] || [ "$nodejsinstall" = "Y" ]; then
GPU_INSTALL="1"
npm install @tensorflow/tfjs-node-gpu@1.7.3 --unsafe-perm
else
npm install @tensorflow/tfjs-node@1.7.3 --unsafe-perm
fi
fi
npm install --unsafe-perm
npm install @tensorflow-models/coco-ssd@2.0.3 @tensorflow/tfjs-converter@1.7.3 @tensorflow/tfjs-core@1.7.3 @tensorflow/tfjs-layers@1.7.3 @tensorflow/tfjs-node@1.7.3 --unsafe-perm
# npm audit fix --force
if [ ! -e "./conf.json" ]; then
echo "Creating conf.json"
sudo cp conf.sample.json conf.json
else
echo "conf.json already exists..."
fi
echo "Adding Random Plugin Key to Main Configuration"
tfjsBuildVal="cpu"
if [ "$GPU_INSTALL" = "1" ]; then
tfjsBuildVal="gpu"
fi
node $DIR/../../tools/modifyConfigurationForPlugin.js tensorflow key=$(head -c 64 < /dev/urandom | sha256sum | awk '{print substr($1,1,60)}') tfjsBuild=$tfjsBuildVal
echo "TF_FORCE_GPU_ALLOW_GROWTH=true" > "$DIR/.env"
echo "#CUDA_VISIBLE_DEVICES=0,2" >> "$DIR/.env"

View File

@ -0,0 +1,172 @@
#!/bin/bash
sh INSTALL-face.sh
echo "ARM CPU Installation is currently NOT supported! Jetson Nano with GPU enabled is currently only supported."
echo "Jetson Nano may experience \"Unsupported Errors\", you may ignore them. Patches will be applied."
if [[ ! $(head -1 /etc/nv_tegra_release) =~ R32.*4\.[34] ]] ; then
echo "ERROR: not JetPack-4.4"
exit 1
fi
cudaCompute=$(cat /sys/module/tegra_fuse/parameters/tegra_chip_id)
# 33 : Nano, TX1
# 24 : TX2
# 25 : Xavier NX and AGX Xavier
DIR=$(dirname $0)
echo $DIR
echo "Replacing package.json for tfjs 2.3.0..."
wget -O $DIR/package.json https://cdn.shinobi.video/binaries/tensorflow/2.3.0/package.json
echo "Removing existing Tensorflow Node.js modules..."
rm -rf $DIR/node_modules
npm install yarn -g --unsafe-perm --force
installJetsonFlag=false
installArmFlag=false
installGpuFlag=false
dontCreateKeyFlag=false
while [ ! $# -eq 0 ]
do
case "$1" in
--jetson)
installJetsonFlag=true
exit
;;
--arm)
installArmFlag=true
exit
;;
--gpu)
installGpuFlag=true
exit
;;
--dont-create-key)
dontCreateKeyFlag=true
exit
;;
esac
shift
done
if [ "$installJetsonFlag" = true ] && [ "$installArmFlag" = true ]; then
echo "--jetson and --arm cannot both be set. Exiting..."
exit -1
fi
if ([ "$installJetsonFlag" = true ] || [ "$installArmFlag" = true ]) && [ "$installGpuFlag" = true ]; then
echo "--gpu flag cannot be set with --jetson or --arm. Exiting..."
exit -2
fi
nonInteractiveFlag=false
if [ "$installJetsonFlag" = true ] || [ "$installArmFlag" = true ] || [ "$installGpuFlag" = true ]; then
nonInteractiveFlag=true
fi
manualInstallRequirements() {
npm install --unsafe-perm
npm install @tensorflow/tfjs-backend-cpu@2.3.0 @tensorflow/tfjs-backend-webgl@2.3.0 @tensorflow/tfjs-converter@2.3.0 @tensorflow/tfjs-core@2.3.0 @tensorflow/tfjs-layers@2.3.0 @tensorflow/tfjs-node@2.3.0 --unsafe-perm
}
runRebuildCpu() {
npm rebuild @tensorflow/tfjs-node --build-addon-from-source --unsafe-perm
}
runRebuildGpu() {
npm rebuild @tensorflow/tfjs-node-gpu --build-addon-from-source --unsafe-perm
}
installJetson() {
installGpuFlag=true
npm install @tensorflow/tfjs-node-gpu@2.3.0 --unsafe-perm
customBinaryLocation="node_modules/@tensorflow/tfjs-node-gpu/scripts/custom-binary.json"
case cudaCompute in
"33" ) # Nano and TX1
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/2.3.0/libtensorflow.tar.gz"}' > "$customBinaryLocation"
;;
"25" ) # Xavier NX and AGX Xavier
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/2.3.0-xavier/libtensorflow.tar.gz"}' > "$customBinaryLocation"
;;
* ) # default
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/2.3.0/libtensorflow.tar.gz"}' > "$customBinaryLocation"
;;
esac
manualInstallRequirements
chmod -R 777 .
runRebuildGpu
}
installGpuRoute() {
installGpuFlag=true
manualInstallRequirements
npm install @tensorflow/tfjs-node-gpu@2.3.0 --unsafe-perm
}
installNonGpuRoute() {
manualInstallRequirements
npm install @tensorflow/tfjs-node@2.3.0 --unsafe-perm
runRebuildCpu
}
if [ "$nonInteractiveFlag" = false ]; then
echo "Shinobi - Are you installing on Jetson Nano or Xavier?"
echo "You must be on JetPack 4.4 for this plugin to install!"
echo "(y)es or (N)o"
read armCpu
if [ "$armCpu" = "y" ] || [ "$armCpu" = "Y" ]; then
# echo "Shinobi - Is it a Jetson Nano?"
# echo "You must be on JetPack 4.4 for this plugin to install!"
# echo "(y)es or (N)o"
# read isItJetsonNano
# echo "Shinobi - You may see Unsupported Errors, please wait while patches are applied."
# if [ "$isItJetsonNano" = "y" ] || [ "$isItJetsonNano" = "Y" ]; then
installJetson
# else
# installArm
# fi
else
echo "Shinobi - Do you want to install TensorFlow.js with GPU support? "
echo "You can run this installer again to change it."
echo "(y)es or (N)o"
read nodejsinstall
if [ "$nodejsinstall" = "y" ] || [ "$nodejsinstall" = "Y" ]; then
installGpuRoute
else
installNonGpuRoute
fi
fi
else
if [ "$installJetsonFlag" = true ]; then
installJetson
fi
#
# if [ "$installArmFlag" = true ]; then
# installArm
# fi
if [ "$installGpuFlag" = true ]; then
installGpuRoute
else
installNonGpuRoute
fi
fi
# # npm audit fix --force
if [ ! -e "$DIR/conf.json" ]; then
dontCreateKeyFlag=false
echo "Creating conf.json"
sudo cp $DIR/conf.sample.json $DIR/conf.json
else
echo "conf.json already exists..."
fi
if [ "$dontCreateKeyFlag" = false ]; then
tfjsBuildVal="cpu"
if [ "$installGpuFlag" = true ]; then
tfjsBuildVal="gpu"
fi
echo "Adding Random Plugin Key to Main Configuration"
node $DIR/../../tools/modifyConfigurationForPlugin.js tensorflow key=$(head -c 64 < /dev/urandom | sha256sum | awk '{print substr($1,1,60)}') tfjsBuild=$tfjsBuildVal
fi
echo "TF_FORCE_GPU_ALLOW_GROWTH=true" > "$DIR/.env"
echo "#CUDA_VISIBLE_DEVICES=0,2" >> "$DIR/.env"

View File

@ -0,0 +1,53 @@
#!/bin/bash
DIR=$(dirname $0)
if [ ! -x "$(command -v node-gyp)" ]; then
# Check if Ubuntu
if [ -x "$(command -v apt)" ]; then
sudo apt install node-gyp -y
sudo apt-get install gcc g++ build-essential libcairo2-dev libpango1.0-dev libjpeg-dev libgif-dev librsvg2-dev -y
fi
# Check if Cent OS
if [ -x "$(command -v yum)" ]; then
sudo yum install node-gyp -y
sudo yum install gcc-c++ cairo-devel libjpeg-turbo-devel pango-devel giflib-devel -y
fi
fi
if [ ! -d "./faces" ]; then
mkdir faces
fi
if [ ! -d "./weights" ]; then
mkdir weights
if [ ! -x "$(command -v wget)" ]; then
# Check if Ubuntu
if [ -x "$(command -v apt)" ]; then
sudo apt install wget -y
fi
# Check if Cent OS
if [ -x "$(command -v yum)" ]; then
sudo yum install wget -y
fi
fi
cdnUrl="https://cdn.shinobi.video/weights/plugin-face-weights"
wget -O weights/face_landmark_68_model-shard1 $cdnUrl/face_landmark_68_model-shard1
wget -O weights/face_landmark_68_model-weights_manifest.json $cdnUrl/face_landmark_68_model-weights_manifest.json
wget -O weights/face_landmark_68_tiny_model-shard1 $cdnUrl/face_landmark_68_tiny_model-shard1
wget -O weights/face_landmark_68_tiny_model-weights_manifest.json $cdnUrl/face_landmark_68_tiny_model-weights_manifest.json
wget -O weights/face_recognition_model-shard1 $cdnUrl/face_recognition_model-shard1
wget -O weights/face_recognition_model-shard2 $cdnUrl/face_recognition_model-shard2
wget -O weights/face_recognition_model-weights_manifest.json $cdnUrl/face_recognition_model-weights_manifest.json
wget -O weights/mtcnn_model-shard1 $cdnUrl/mtcnn_model-shard1
wget -O weights/mtcnn_model-weights_manifest.json $cdnUrl/mtcnn_model-weights_manifest.json
wget -O weights/ssd_mobilenetv1_model-shard1 $cdnUrl/ssd_mobilenetv1_model-shard1
wget -O weights/ssd_mobilenetv1_model-shard2 $cdnUrl/ssd_mobilenetv1_model-shard2
wget -O weights/ssd_mobilenetv1_model-weights_manifest.json $cdnUrl/ssd_mobilenetv1_model-weights_manifest.json
wget -O weights/tiny_face_detector_model-shard1 $cdnUrl/tiny_face_detector_model-shard1
wget -O weights/tiny_face_detector_model-weights_manifest.json $cdnUrl/tiny_face_detector_model-weights_manifest.json
else
echo "weights found..."
fi
if [ ! -e "$DIR/../../libs/customAutoLoad/faceManagerCustomAutoLoadLibrary" ]; then
echo "Installing Face Manager customAutoLoad Module..."
sudo cp -r $DIR/faceManagerCustomAutoLoadLibrary $DIR/../../libs/customAutoLoad/faceManagerCustomAutoLoadLibrary
else
echo "Face Manager customAutoLoad Module already installed..."
fi

View File

@ -0,0 +1,172 @@
#!/bin/bash
sh INSTALL-face.sh
echo "ARM CPU Installation is currently NOT supported! Jetson Nano with GPU enabled is currently only supported."
echo "Jetson Nano may experience \"Unsupported Errors\", you may ignore them. Patches will be applied."
if [[ ! $(head -1 /etc/nv_tegra_release) =~ R32.*4\.[34] ]] ; then
echo "ERROR: not JetPack-4.4"
exit 1
fi
cudaCompute=$(cat /sys/module/tegra_fuse/parameters/tegra_chip_id)
# 33 : Nano, TX1
# 24 : TX2
# 25 : Xavier NX and AGX Xavier
DIR=$(dirname $0)
echo $DIR
echo "Replacing package.json for tfjs 2.3.0..."
wget -O $DIR/package.json https://cdn.shinobi.video/binaries/tensorflow/2.3.0/package.json
echo "Removing existing Tensorflow Node.js modules..."
rm -rf $DIR/node_modules
npm install yarn -g --unsafe-perm --force
installJetsonFlag=false
installArmFlag=false
installGpuFlag=false
dontCreateKeyFlag=false
while [ ! $# -eq 0 ]
do
case "$1" in
--jetson)
installJetsonFlag=true
exit
;;
--arm)
installArmFlag=true
exit
;;
--gpu)
installGpuFlag=true
exit
;;
--dont-create-key)
dontCreateKeyFlag=true
exit
;;
esac
shift
done
if [ "$installJetsonFlag" = true ] && [ "$installArmFlag" = true ]; then
echo "--jetson and --arm cannot both be set. Exiting..."
exit -1
fi
if ([ "$installJetsonFlag" = true ] || [ "$installArmFlag" = true ]) && [ "$installGpuFlag" = true ]; then
echo "--gpu flag cannot be set with --jetson or --arm. Exiting..."
exit -2
fi
nonInteractiveFlag=false
if [ "$installJetsonFlag" = true ] || [ "$installArmFlag" = true ] || [ "$installGpuFlag" = true ]; then
nonInteractiveFlag=true
fi
manualInstallRequirements() {
npm install --unsafe-perm
npm install @tensorflow/tfjs-backend-cpu@2.3.0 @tensorflow/tfjs-backend-webgl@2.3.0 @tensorflow/tfjs-converter@2.3.0 @tensorflow/tfjs-core@2.3.0 @tensorflow/tfjs-layers@2.3.0 @tensorflow/tfjs-node@2.3.0 --unsafe-perm --force
}
runRebuildCpu() {
npm rebuild @tensorflow/tfjs-node --build-addon-from-source --unsafe-perm
}
runRebuildGpu() {
npm rebuild @tensorflow/tfjs-node-gpu --build-addon-from-source --unsafe-perm
}
installJetson() {
installGpuFlag=true
npm install @tensorflow/tfjs-node-gpu@2.3.0 --unsafe-perm
customBinaryLocation="node_modules/@tensorflow/tfjs-node-gpu/scripts/custom-binary.json"
case cudaCompute in
"33" ) # Nano and TX1
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/2.3.0/libtensorflow.tar.gz"}' > "$customBinaryLocation"
;;
"25" ) # Xavier NX and AGX Xavier
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/2.3.0-xavier/libtensorflow.tar.gz"}' > "$customBinaryLocation"
;;
* ) # default
echo '{"tf-lib": "https://cdn.shinobi.video/binaries/tensorflow/2.3.0/libtensorflow.tar.gz"}' > "$customBinaryLocation"
;;
esac
manualInstallRequirements
chmod -R 777 .
runRebuildGpu
}
installGpuRoute() {
installGpuFlag=true
manualInstallRequirements
npm install @tensorflow/tfjs-node-gpu@2.3.0 --unsafe-perm --force
}
installNonGpuRoute() {
manualInstallRequirements
npm install @tensorflow/tfjs-node@2.3.0 --unsafe-perm --force
runRebuildCpu
}
if [ "$nonInteractiveFlag" = false ]; then
echo "Shinobi - Are you installing on Jetson Nano or Xavier?"
echo "You must be on JetPack 4.4 for this plugin to install!"
echo "(y)es or (N)o"
read armCpu
if [ "$armCpu" = "y" ] || [ "$armCpu" = "Y" ]; then
# echo "Shinobi - Is it a Jetson Nano?"
# echo "You must be on JetPack 4.4 for this plugin to install!"
# echo "(y)es or (N)o"
# read isItJetsonNano
# echo "Shinobi - You may see Unsupported Errors, please wait while patches are applied."
# if [ "$isItJetsonNano" = "y" ] || [ "$isItJetsonNano" = "Y" ]; then
installJetson
# else
# installArm
# fi
else
echo "Shinobi - Do you want to install TensorFlow.js with GPU support? "
echo "You can run this installer again to change it."
echo "(y)es or (N)o"
read nodejsinstall
if [ "$nodejsinstall" = "y" ] || [ "$nodejsinstall" = "Y" ]; then
installGpuRoute
else
installNonGpuRoute
fi
fi
else
if [ "$installJetsonFlag" = true ]; then
installJetson
fi
#
# if [ "$installArmFlag" = true ]; then
# installArm
# fi
if [ "$installGpuFlag" = true ]; then
installGpuRoute
else
installNonGpuRoute
fi
fi
# # npm audit fix --force
if [ ! -e "$DIR/conf.json" ]; then
dontCreateKeyFlag=false
echo "Creating conf.json"
sudo cp $DIR/conf.sample.json $DIR/conf.json
else
echo "conf.json already exists..."
fi
if [ "$dontCreateKeyFlag" = false ]; then
tfjsBuildVal="cpu"
if [ "$installGpuFlag" = true ]; then
tfjsBuildVal="gpu"
fi
echo "Adding Random Plugin Key to Main Configuration"
node $DIR/../../tools/modifyConfigurationForPlugin.js tensorflow key=$(head -c 64 < /dev/urandom | sha256sum | awk '{print substr($1,1,60)}') tfjsBuild=$tfjsBuildVal
fi
echo "TF_FORCE_GPU_ALLOW_GROWTH=true" > "$DIR/.env"
echo "#CUDA_VISIBLE_DEVICES=0,2" >> "$DIR/.env"

View File

@ -1,145 +1,161 @@
#!/bin/bash
sh INSTALL-face.sh
DIR=$(dirname $0)
echo "Do not attempt to use this Installer on ARM-based CPUs."
echo "Removing existing Tensorflow Node.js modules..."
rm -rf $DIR/node_modules
if [ -x "$(command -v apt)" ]; then
sudo apt update -y
npm install yarn -g --unsafe-perm --force
installJetsonFlag=false
installArmFlag=false
installGpuFlag=false
dontCreateKeyFlag=false
while [ ! $# -eq 0 ];
do
case "$1" in
--jetson)
installJetsonFlag=true
exit
;;
--arm)
installArmFlag=true
exit
;;
--gpu)
installGpuFlag=true
exit
;;
--dont-create-key)
dontCreateKeyFlag=true
exit
;;
esac
shift
done
if [ "$installJetsonFlag" = true ] && [ "$installArmFlag" = true ]; then
echo "--jetson and --arm cannot both be set. Exiting..."
exit -1
fi
# Check if Cent OS
if [ -x "$(command -v yum)" ]; then
sudo yum update -y
if ([ "$installJetsonFlag" = true ] || [ "$installArmFlag" = true ]) && [ "$installGpuFlag" = true ]; then
echo "--gpu flag cannot be set with --jetson or --arm. Exiting..."
exit -2
fi
INSTALL_WITH_GPU="0"
INSTALL_FOR_ARM64="0"
INSTALL_FOR_ARM="0"
TFJS_SUFFIX=""
echo "----------------------------------------"
echo "-- Installing Face Plugin for Shinobi --"
echo "----------------------------------------"
echo "Are you Installing on an ARM CPU?"
echo "like Jetson Nano or Raspberry Pi Model 3 B+. Default is No."
echo "(y)es or (N)o"
read useArm
if [ "$useArm" = "y" ] || [ "$useArm" = "Y" ] || [ "$useArm" = "YES" ] || [ "$useArm" = "yes" ] || [ "$useArm" = "Yes" ]; then
INSTALL_FOR_ARM="1"
echo "Are you Installing on an ARM64 CPU?"
echo "like Jetson Nano. Default is No (64/32-bit)"
echo "(y)es or (N)o"
read useArm64
if [ "$useArm64" = "y" ] || [ "$useArm64" = "Y" ] || [ "$useArm64" = "YES" ] || [ "$useArm64" = "yes" ] || [ "$useArm64" = "Yes" ]; then
INSTALL_FOR_ARM64="1"
fi
nonInteractiveFlag=false
if [ "$installJetsonFlag" = true ] || [ "$installArmFlag" = true ] || [ "$installGpuFlag" = true ]; then
nonInteractiveFlag=true
fi
if [ -d "/usr/local/cuda" ]; then
echo "Do you want to install the plugin with CUDA support?"
echo "Do this if you installed NVIDIA Drivers, CUDA Toolkit, and CuDNN"
echo "(y)es or (N)o"
read usecuda
if [ "$usecuda" = "y" ] || [ "$usecuda" = "Y" ] || [ "$usecuda" = "YES" ] || [ "$usecuda" = "yes" ] || [ "$usecuda" = "Yes" ]; then
INSTALL_WITH_GPU="1"
TFJS_SUFFIX="-gpu"
fi
fi
echo "-----------------------------------"
if [ ! -d "./faces" ]; then
mkdir faces
fi
if [ ! -d "./weights" ]; then
mkdir weights
if [ ! -x "$(command -v wget)" ]; then
# Check if Ubuntu
if [ -x "$(command -v apt)" ]; then
sudo apt install wget -y
fi
# Check if Cent OS
if [ -x "$(command -v yum)" ]; then
sudo yum install wget -y
fi
fi
cdnUrl="https://cdn.shinobi.video/weights/plugin-face-weights"
wget -O weights/face_landmark_68_model-shard1 $cdnUrl/face_landmark_68_model-shard1
wget -O weights/face_landmark_68_model-weights_manifest.json $cdnUrl/face_landmark_68_model-weights_manifest.json
wget -O weights/face_landmark_68_tiny_model-shard1 $cdnUrl/face_landmark_68_tiny_model-shard1
wget -O weights/face_landmark_68_tiny_model-weights_manifest.json $cdnUrl/face_landmark_68_tiny_model-weights_manifest.json
wget -O weights/face_recognition_model-shard1 $cdnUrl/face_recognition_model-shard1
wget -O weights/face_recognition_model-shard2 $cdnUrl/face_recognition_model-shard2
wget -O weights/face_recognition_model-weights_manifest.json $cdnUrl/face_recognition_model-weights_manifest.json
wget -O weights/mtcnn_model-shard1 $cdnUrl/mtcnn_model-shard1
wget -O weights/mtcnn_model-weights_manifest.json $cdnUrl/mtcnn_model-weights_manifest.json
wget -O weights/ssd_mobilenetv1_model-shard1 $cdnUrl/ssd_mobilenetv1_model-shard1
wget -O weights/ssd_mobilenetv1_model-shard2 $cdnUrl/ssd_mobilenetv1_model-shard2
wget -O weights/ssd_mobilenetv1_model-weights_manifest.json $cdnUrl/ssd_mobilenetv1_model-weights_manifest.json
wget -O weights/tiny_face_detector_model-shard1 $cdnUrl/tiny_face_detector_model-shard1
wget -O weights/tiny_face_detector_model-weights_manifest.json $cdnUrl/tiny_face_detector_model-weights_manifest.json
manualInstallRequirements() {
npm install --unsafe-perm
npm install @tensorflow/tfjs-backend-cpu@2.7.0 @tensorflow/tfjs-backend-webgl@2.7.0 @tensorflow/tfjs-converter@2.7.0 @tensorflow/tfjs-core@2.7.0 @tensorflow/tfjs-layers@2.7.0 @tensorflow/tfjs-node@2.7.0 --unsafe-perm --force
}
installJetson() {
installGpuFlag=true
npm install @tensorflow/tfjs-node-gpu@2.7.0 --unsafe-perm --force
cd node_modules/@tensorflow/tfjs-node-gpu
echo '{"tf-lib": "https://cdn.shinobi.video/installers/libtensorflow-gpu-linux-arm64-1.15.0.tar.gz"}' > "scripts/custom-binary.json"
}
installArm() {
npm install @tensorflow/tfjs-node@2.7.0 --unsafe-perm --force
cd node_modules/@tensorflow/tfjs-node
echo '{"tf-lib": "https://cdn.shinobi.video/installers/libtensorflow-cpu-linux-arm-1.15.0.tar.gz"}' > "scripts/custom-binary.json"
}
installGpuRoute() {
installGpuFlag=true
manualInstallRequirements
npm install @tensorflow/tfjs-node-gpu@2.7.0 --unsafe-perm --force
}
installNonGpuRoute() {
manualInstallRequirements
npm install @tensorflow/tfjs-node@2.7.0 --unsafe-perm --force
}
runRebuildCpu() {
npm rebuild @tensorflow/tfjs-node --build-addon-from-source --unsafe-perm
}
runRebuildGpu() {
npm rebuild @tensorflow/tfjs-node-gpu --build-addon-from-source --unsafe-perm
}
if [ "$nonInteractiveFlag" = false ]; then
# echo "Shinobi - Are you installing on ARM64? This applies to computers like Jetson Nano and Raspberry Pi Model 3 B+"
# echo "(y)es or (N)o"
# read armCpu
# if [ "$armCpu" = "y" ] || [ "$armCpu" = "Y" ]; then
# echo "Shinobi - Is it a Jetson Nano?"
# echo "You must be on JetPack 4.3 for this plugin to install."
# echo "JetPack 4.3 Image can be found here : https://developer.nvidia.com/jetpack-43-archive"
# echo "(y)es or (N)o"
# read isItJetsonNano
# echo "Shinobi - You may see Unsupported Errors, please wait while patches are applied."
# if [ "$isItJetsonNano" = "y" ] || [ "$isItJetsonNano" = "Y" ]; then
# installJetson
# else
# installArm
# fi
# else
echo "Shinobi - Do you want to install TensorFlow.js with GPU support? "
echo "You can run this installer again to change it."
echo "(y)es or (N)o"
read nodejsinstall
if [ "$nodejsinstall" = "y" ] || [ "$nodejsinstall" = "Y" ]; then
installGpuRoute
else
installNonGpuRoute
fi
# fi
else
echo "weights found..."
if [ "$installJetsonFlag" = true ]; then
installJetson
armAfterInstall
fi
if [ "$installArmFlag" = true ]; then
installArm
armAfterInstall
fi
if [ "$installGpuFlag" = true ]; then
installGpuRoute
else
installNonGpuRoute
fi
fi
# npm install @tensorflow/tfjs-node-gpu@2.7.0
# npm audit fix --force
if [ "$installGpuFlag" = true ]; then
runRebuildGpu
else
runRebuildCpu
fi
echo "-----------------------------------"
if [ ! -e "./conf.json" ]; then
dontCreateKeyFlag=false
echo "Creating conf.json"
sudo cp conf.sample.json conf.json
else
echo "conf.json already exists..."
fi
if [ ! -e "$DIR/../../libs/customAutoLoad/faceManagerCustomAutoLoadLibrary" ]; then
echo "Installing Face Manager customAutoLoad Module..."
sudo cp -r $DIR/faceManagerCustomAutoLoadLibrary $DIR/../../libs/customAutoLoad/faceManagerCustomAutoLoadLibrary
else
echo "Face Manager customAutoLoad Module already installed..."
fi
tfjsBuildVal="cpu"
if [ "$INSTALL_WITH_GPU" = "1" ]; then
tfjsBuildVal="gpu"
if [ "$dontCreateKeyFlag" = false ]; then
tfjsBuildVal="cpu"
if [ "$installGpuFlag" = true ]; then
tfjsBuildVal="gpu"
fi
echo "Adding Random Plugin Key to Main Configuration"
node $DIR/../../tools/modifyConfigurationForPlugin.js face key=$(head -c 64 < /dev/urandom | sha256sum | awk '{print substr($1,1,60)}') tfjsBuild=$tfjsBuildVal
fi
echo "-----------------------------------"
echo "Adding Random Plugin Key to Main Configuration"
node $DIR/../../tools/modifyConfigurationForPlugin.js face key=$(head -c 64 < /dev/urandom | sha256sum | awk '{print substr($1,1,60)}') tfjsBuild=$tfjsBuildVal
echo "-----------------------------------"
echo "Getting node-gyp to build C++ modules"
if [ ! -x "$(command -v node-gyp)" ]; then
# Check if Ubuntu
if [ -x "$(command -v apt)" ]; then
sudo apt install node-gyp -y
sudo apt-get install gcc g++ build-essential libcairo2-dev libpango1.0-dev libjpeg-dev libgif-dev librsvg2-dev -y
fi
# Check if Cent OS
if [ -x "$(command -v yum)" ]; then
sudo yum install node-gyp -y
sudo yum install gcc-c++ cairo-devel libjpeg-turbo-devel pango-devel giflib-devel -y
fi
fi
sudo npm install --unsafe-perm
sudo npm install node-gyp -g --unsafe-perm --force
echo "-----------------------------------"
# echo "Getting C++ module : @tensorflow/tfjs-node@0.1.21"
# echo "https://github.com/tensorflow/tfjs-node"
# npm install @tensorflow/tfjs-converter@1.7.4 @tensorflow/tfjs-layers@1.7.4 --unsafe-perm
if [ "$INSTALL_WITH_GPU" = "1" ]; then
echo "GPU version of tjfs : https://github.com/tensorflow/tfjs-node-gpu"
else
echo "CPU version of tjfs : https://github.com/tensorflow/tfjs-node"
fi
npm install @tensorflow/tfjs-node$TFJS_SUFFIX --unsafe-perm
if [ "$INSTALL_FOR_ARM" = "1" ]; then
BINARY_LOCATION="node_modules/@tensorflow/tfjs-node$TFJS_SUFFIX/scripts/custom-binary.json"
if [ "$INSTALL_FOR_ARM64" = "1" ]; then
echo "{
\"tf-lib\": \"https://cdn.shinobi.video/binaries/libtensorflow-gpu-linux-arm64-1.15.0.tar.gz\"
}" > $BINARY_LOCATION
else
echo "{
\"tf-lib\": \"https://cdn.shinobi.video/binaries/libtensorflow-cpu-linux-arm-1.15.0.tar.gz\"
}" > $BINARY_LOCATION
fi
npm rebuild @tensorflow/tfjs-node$TFJS_SUFFIX --build-addon-from-source --unsafe-perm
fi
rm -rf $DIR/node_modules/@tensorflow/tfjs-backend-cpu
rm -rf $DIR/node_modules/@tensorflow/tfjs-backend-webgl
echo "-----------------------------------"
echo "Start the plugin with pm2 like so :"
echo "pm2 start shinobi-face.js"
echo "-----------------------------------"
echo "Start the plugin without pm2 :"
echo "node shinobi-face.js"
echo "TF_FORCE_GPU_ALLOW_GROWTH=true" > "$DIR/.env"
echo "#CUDA_VISIBLE_DEVICES=0,2" >> "$DIR/.env"

View File

@ -207,6 +207,7 @@ var addAwaitStatements = async function(){
imgWidth: imgWidth,
ms: endTime - startTime
},
frame: frameBuffer
})
}
}

View File

@ -98,8 +98,8 @@ s.detectObject = function(buffer,d,tx,frameLocation,callback){
matrices: matrices,
imgHeight: d.mon.detector_scale_y,
imgWidth: d.mon.detector_scale_x,
frame: d.base64
}
},
frame: buffer
})
}
callback()

View File

@ -47,9 +47,12 @@ if(workerData && workerData.ok === true){
var ready = false;
const spawn = require('child_process').spawn;
var child = null
function debugLog(...args){
if(config.debugLog === true)console.log(...args)
}
function respawn() {
console.log("respawned python",(new Date()))
debugLog("respawned python",(new Date()))
const theChild = spawn('python3', ['-u', currentDirectory + 'detect_image.py']);
var lastStatusLog = new Date();
@ -62,19 +65,19 @@ function respawn() {
var rawString = data.toString('utf8');
if (new Date() - lastStatusLog > 5000) {
lastStatusLog = new Date();
console.log(rawString, new Date());
debugLog(rawString, lastStatusLog);
}
var messages = rawString.split('\n')
messages.forEach((message) => {
if (message === "") return;
var obj = JSON.parse(message)
if (obj.type === "error") {
console.log("Script got error: " + message.data, new Date());
debugLog("Script got error: " + message.data, new Date());
throw message.data;
}
if (obj.type === "info" && obj.data === "ready") {
console.log("set ready true")
debugLog("set ready true")
ready = true;
} else {
if (obj.type !== "data" && obj.type !== "info") {
@ -128,7 +131,7 @@ async function process(buffer, type) {
s.detectObject = function (buffer, d, tx, frameLocation, callback) {
process(buffer).then((resp) => {
var results = resp.data
//console.log(resp.time)
//debugLog(resp.time)
if (Array.isArray(results) && results[0]) {
var mats = []
results.forEach(function (v) {
@ -156,7 +159,8 @@ s.detectObject = function (buffer, d, tx, frameLocation, callback) {
imgHeight: width,
imgWidth: height,
time: resp.time
}
},
frame: buffer
})
}
callback()

View File

@ -0,0 +1,47 @@
FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
RUN apt update -y
ENV DEBIAN_FRONTEND=noninteractive
RUN apt -y install tzdata
RUN apt install wget curl net-tools -y
RUN apt install -y sudo dos2unix
RUN apt -y install curl dirmngr apt-transport-https lsb-release ca-certificates
RUN curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
RUN apt -y install nodejs
RUN mkdir -p /config
RUN mkdir -p /home/Shinobi/plugins/tensorflow
WORKDIR /home/Shinobi/plugins/tensorflow
COPY . /home/Shinobi/plugins/tensorflow
RUN wget https://gitlab.com/Shinobi-Systems/Shinobi/-/raw/dev/plugins/pluginBase.js -O /home/Shinobi/plugins/tensorflow/pluginBase.js
RUN wget https://gitlab.com/Shinobi-Systems/Shinobi/-/raw/dev/tools/modifyConfigurationForPlugin.js -O /home/Shinobi/plugins/tensorflow/modifyConfigurationForPlugin.js
RUN wget https://gitlab.com/Shinobi-Systems/Shinobi/-/raw/dev/plugins/pluginCheck.js -O /home/Shinobi/plugins/pluginCheck.js
RUN ls /home/Shinobi/plugins/tensorflow
RUN apt install -y python build-essential
RUN apt install -y \
make \
g++ \
gcc \
node-pre-gyp
RUN npm install pm2 -g
RUN npm install --unsafe-perm
RUN npm install @tensorflow/tfjs-backend-cpu@2.7.0 @tensorflow/tfjs-backend-webgl@2.7.0 @tensorflow/tfjs-converter@2.7.0 @tensorflow/tfjs-core@2.7.0 @tensorflow/tfjs-layers@2.7.0 @tensorflow/tfjs-node@2.7.0 --unsafe-perm --force
# RUN npm install @tensorflow/tfjs-node@2.7.0 --unsafe-perm --force
RUN npm install @tensorflow/tfjs-node-gpu@2.7.0 --unsafe-perm --force
RUN npm rebuild @tensorflow/tfjs-node build-addon-from-source
RUN dos2unix /home/Shinobi/plugins/tensorflow/init.sh
RUN dos2unix /home/Shinobi/plugins/tensorflow/pm2.yml
RUN chmod -f +x /home/Shinobi/plugins/tensorflow/init.sh
RUN chmod -f +x /home/Shinobi/plugins/tensorflow/pm2.yml
EXPOSE 8082
ENTRYPOINT ["/home/Shinobi/plugins/tensorflow/init.sh"]
CMD [ "pm2-docker", "/home/Shinobi/plugins/tensorflow/pm2.yml" ]

View File

@ -82,3 +82,47 @@ Add the `plugins` array if you don't already have it. Add the following *object
}
],
```
# Docker Installation
> Install Shinobi Plugin with Docker
> Image is based on `node:12.22.1-buster-slim`.
1. Enter plugin directory. Default Shinobi installation location is `/home/Shinobi`.
```
cd /home/Shinobi/plugins/tensorflow
```
2. Build Image.
```
docker build --tag shinobi-tensorflow-image:1.0 .
```
3. Launch the plugin.
- `-e ADD_CONFIG='{"key":"123mypluginkey","tfjsBuild":"gpu","host":"172.16.100.238","port":"8080"}'` Adds any configuration parameters to the plugin's conf.json file.
- `-p '8082:8082/tcp'` is an optional flag if you decide to run the plugin in host mode.
```
docker run -d --gpus all --name='shinobi-tensorflow' -e ADD_CONFIG='{"key":"123mypluginkey","tfjsBuild":"gpu","host":"172.16.100.238","port":"8080"}' shinobi-tensorflow-image:1.0
```
** Logs **
```
docker logs /shinobi-tensorflow
```
** Stop and Remove **
```
docker stop /shinobi-tensorflow && docker rm /shinobi-tensorflow
```
### Options (Environment Variables)
| Option | Description | Default |
|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|
| ADD_CONFIG | The plugin's name. | Tensorflow |

View File

@ -0,0 +1,19 @@
#!/bin/sh
cd /home/Shinobi/plugins/tensorflow
if [ ! -e "./conf.json" ]; then
echo "Creating conf.json"
sudo cp conf.sample.json conf.json
else
echo "conf.json already exists..."
fi
if [ -n "$ADD_CONFIG" ]; then
echo ""
else
ADD_CONFIG="{}"
fi
node ./modifyConfigurationForPlugin.js tensorflow addToConfig=$ADD_CONFIG maxRetryConnection=100
# Execute Command
echo "Starting $PLUGIN_NAME plugin for Shinobi ..."
exec "$@"

View File

@ -14,8 +14,8 @@
"dotenv": "^8.2.0",
"express": "^4.16.2",
"moment": "^2.19.2",
"socket.io": "^2.0.4",
"socket.io-client": "^1.7.4"
"socket.io": "^2.3.0",
"socket.io-client": "^2.3.0"
},
"devDependencies": {},
"bin": "shinobi-tensorflow.js",

View File

@ -0,0 +1,4 @@
apps:
- script : '/home/Shinobi/plugins/tensorflow/shinobi-tensorflow.js'
name : 'shinobi-tensorflow'
kill_timeout : 5000

View File

@ -85,7 +85,8 @@ s.detectObject = function(buffer,d,tx,frameLocation,callback){
imgHeight:width,
imgWidth:height,
time: resp.time
}
},
frame: buffer
})
}
callback()

View File

@ -72,7 +72,8 @@ s.detectObject = async function(buffer,d,tx,frameLocation,callback){
imgHeight:parseFloat(d.mon.detector_scale_y),
imgWidth:parseFloat(d.mon.detector_scale_x),
time: (new Date()) - timeStart
}
},
frame: frame
})
}
fs.unlink(frame,function(){

View File

@ -67,7 +67,7 @@ fs.stat(pluginLocation,function(err){
var config = JSON.parse(fs.readFileSync(configLocation))
}catch(err){
try{
var config = fs.readFileSync(`${pluginLocation}conf.sample.json`,'utf8')
var config = JSON.parse(fs.readFileSync(`${pluginLocation}conf.sample.json`,'utf8'))
fs.writeFileSync(`${pluginLocation}conf.json`,JSON.stringify(config,null,3),'utf8')
}catch(err){
var config = {}

View File

@ -315,12 +315,6 @@ $(document).ready(function(e){
break;
}
break;
case'cronStop':
$.ccio.cx({f:'cron',ff:'stop'})
break;
case'cronRestart':
$.ccio.cx({f:'cron',ff:'restart'})
break;
case'jpegToggle':
e.cx={f:'monitor',ff:'jpeg_on'};
if($.ccio.op().jpeg_on===true){

View File

@ -0,0 +1,58 @@
<form id="hey-activate" class="card shadow mb-3">
<div class="card-header">
<%- lang['Not Activated'] %>
</div>
<div class="card-body" style="min-height:auto">
<div class="form-group">
<input name="subscriptionId" id="pass" tabindex="2" class="form-control wide-text" placeholder="License Key / Subscription ID">
</div>
<div class="form-group mb-0">
<button class="btn btn-sm btn-round btn-block btn-success" type="submit"><%- lang.Save %></button>
</div>
</div>
</form>
<!-- <style>
#hey-activate {
left: 0;
right: 0;
margin: auto;
position: fixed;
max-width: 400px;
padding: 20px;
background: rgba(0,0,0,0.7);
border-radius: 10px;
box-shadow: 0 0 10px #333;
}
</style> -->
<script>
$(document).ready(function(){
var heyActivateCard = $('#hey-activate')
var heyActivateCardSubmit = heyActivateCard.find('[type="submit"]')
heyActivateCard.submit(function(e){
e.preventDefault()
var formValues = $(this).serializeObject()
heyActivateCardSubmit.html(`<i class="fa fa-spinner fa-pulse"></i>`)
$.post(superApiPrefix + $user.sessionKey + '/system/activate',{
subscriptionId: formValues.subscriptionId
},function(data){
var noticeTitle = lang['Not Activated']
var noticeText = data.msg || lang.notActivatedText
var noticeType = 'warning'
if(data.ok){
noticeTitle = lang.Activated
noticeText = lang.activatedText
noticeType = 'success'
heyActivateCard.remove()
}else{
heyActivateCardSubmit.html(lang.Save)
}
new PNotify({
title: noticeTitle,
text: noticeText,
type: noticeType
})
})
return false
})
})
</script>

View File

@ -19,7 +19,11 @@
</div>
<div class="row">
<div class="col-md-5">
<pre class="super-system-info form-group-group red"></pre>
<div class="card shadow mb-3">
<pre class="super-system-info card-body mb-0">
</pre>
</div>
</div>
<div id="logs" class="col-md-7">
<div class="form-group-group red" style="height:400px;overflow:auto">

View File

@ -103,7 +103,14 @@
<div class="tab-pane active" id="accounts" role="tabpanel">
<div class="row">
<div class="col-md-5 text-left">
<pre class="super-system-info form-group-group red"></pre>
<% if(!config.userHasSubscribed){ %>
<% include blocks/heyActivate.ejs %>
<% } %>
<div class="card shadow mb-3">
<pre class="super-system-info card-body mb-0">
</pre>
</div>
</div>
<div class="col-md-7">
<div class="mb-4"><a href="#" class="add btn btn-block btn-default"><i class="fa fa-plus"></i> <%- lang.Add %></a></div>