fix-non-showing-inputs
Moe Alam 2020-11-02 21:29:01 -08:00
commit f7fd086665
8 changed files with 586 additions and 0 deletions

4
plugins/tensorflow-coral/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
conf.json
dist
models
.env

View File

@ -0,0 +1,31 @@
#!/bin/bash
DIR=`dirname $0`
echo "Installing coral dependencies..."
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo apt-get update
sudo apt-get install libedgetpu1-max
sudo apt-get install libatlas-base-dev
echo "Coral dependencies installed."
echo "Getting coral object detection models..."
mkdir -p models
wget "https://github.com/google-coral/edgetpu/raw/master/test_data/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite"
mv ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite models/
wget "https://dl.google.com/coral/canned_models/coco_labels.txt"
mv coco_labels.txt models/
echo "Models downloaded."
npm install yarn -g --unsafe-perm --force
npm install --unsafe-perm
if [ ! -e "./conf.json" ]; then
echo "Creating conf.json"
sudo cp conf.sample.json conf.json
else
echo "conf.json already exists..."
fi
echo "Adding Random Plugin Key to Main Configuration"
node $DIR/../../tools/modifyConfigurationForPlugin.js tensorflow-coral key=$(head -c 64 < /dev/urandom | sha256sum | awk '{print substr($1,1,60)}')
echo "!!!IMPORTANT!!!"
echo "IF YOU DON'T HAVE INSTALLED CORAL DEPENDENCIES BEFORE, YOU NEED TO PLUG OUT AND THEN PLUG IN YOUR CORAL USB ACCELERATOR BEFORE USING THIS PLUGIN"

View File

@ -0,0 +1,82 @@
# TensorFlowCoral.js
**Ubuntu and CentOS only**
Go to the Shinobi directory. **/home/Shinobi** is the default directory.
```
cd /home/Shinobi/plugins/tensorflow
```
Install TensorFlows python version first:
https://www.tensorflow.org/lite/guide/python
Make sure that you are downloading the correct file for your system architecture and python version.
Install other python dependencies
```
pip install pillow
pip install numpy
```
Copy the config file.
```
sh INSTALL.sh
```
IF YOU DON'T HAVE INSTALLED CORAL DEPENDENCIES BEFORE, YOU NEED TO PLUG OUT AND THEN PLUG IN YOUR CORAL USB ACCELERATOR BEFORE USING THIS PLUGIN!
Start the plugin.
```
pm2 start shinobi-tensorflow.js
```
Doing this will reveal options in the monitor configuration. Shinobi does not need to be restarted when a plugin is initiated or stopped.
## Run the plugin as a Host
> The main app (Shinobi) will be the client and the plugin will be the host. The purpose of allowing this method is so that you can use one plugin for multiple Shinobi instances. Allowing you to easily manage connections without starting multiple processes.
Edit your plugins configuration file. Set the `hostPort` **to be different** than the `listening port for camera.js`.
```
nano conf.json
```
Here is a sample of a Host configuration for the plugin.
- `plug` is the name of the plugin corresponding in the main configuration file.
- `https` choose if you want to use SSL or not. Default is `false`.
- `hostPort` can be any available port number. **Don't make this the same port number as Shinobi.** Default is `8082`.
- `type` tells the main application (Shinobi) what kind of plugin it is. In this case it is a detector.
```
{
"plug":"Tensorflow",
"hostPort":8082,
"key":"Tensorflow123123",
"mode":"host",
"type":"detector"
}
```
Now modify the **main configuration file** located in the main directory of Shinobi.
```
nano conf.json
```
Add the `plugins` array if you don't already have it. Add the following *object inside the array*.
```
"plugins":[
{
"id" : "Tensorflow",
"https" : false,
"host" : "localhost",
"port" : 8082,
"key" : "Tensorflow123123",
"mode" : "host",
"type" : "detector"
}
],
```

View File

@ -0,0 +1,9 @@
{
"plug":"TensorflowCoral",
"host":"localhost",
"port":8080,
"hostPort":8082,
"key":"change_this_to_something_very_random____make_sure_to_match__/plugins/tensorflow-coral/conf.json",
"mode":"client",
"type":"detector"
}

View File

@ -0,0 +1,163 @@
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to work with detection models."""
import collections
import numpy as np
Object = collections.namedtuple('Object', ['id', 'score', 'bbox'])
class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])):
"""Bounding box.
Represents a rectangle which sides are either vertical or horizontal, parallel
to the x or y axis.
"""
__slots__ = ()
@property
def width(self):
"""Returns bounding box width."""
return self.xmax - self.xmin
@property
def height(self):
"""Returns bounding box height."""
return self.ymax - self.ymin
@property
def area(self):
"""Returns bound box area."""
return self.width * self.height
@property
def valid(self):
"""Returns whether bounding box is valid or not.
Valid bounding box has xmin <= xmax and ymin <= ymax which is equivalent to
width >= 0 and height >= 0.
"""
return self.width >= 0 and self.height >= 0
def scale(self, sx, sy):
"""Returns scaled bounding box."""
return BBox(xmin=sx * self.xmin,
ymin=sy * self.ymin,
xmax=sx * self.xmax,
ymax=sy * self.ymax)
def translate(self, dx, dy):
"""Returns translated bounding box."""
return BBox(xmin=dx + self.xmin,
ymin=dy + self.ymin,
xmax=dx + self.xmax,
ymax=dy + self.ymax)
def map(self, f):
"""Returns bounding box modified by applying f for each coordinate."""
return BBox(xmin=f(self.xmin),
ymin=f(self.ymin),
xmax=f(self.xmax),
ymax=f(self.ymax))
@staticmethod
def intersect(a, b):
"""Returns the intersection of two bounding boxes (may be invalid)."""
return BBox(xmin=max(a.xmin, b.xmin),
ymin=max(a.ymin, b.ymin),
xmax=min(a.xmax, b.xmax),
ymax=min(a.ymax, b.ymax))
@staticmethod
def union(a, b):
"""Returns the union of two bounding boxes (always valid)."""
return BBox(xmin=min(a.xmin, b.xmin),
ymin=min(a.ymin, b.ymin),
xmax=max(a.xmax, b.xmax),
ymax=max(a.ymax, b.ymax))
@staticmethod
def iou(a, b):
"""Returns intersection-over-union value."""
intersection = BBox.intersect(a, b)
if not intersection.valid:
return 0.0
area = intersection.area
return area / (a.area + b.area - area)
def input_size(interpreter):
"""Returns input image size as (width, height) tuple."""
_, height, width, _ = interpreter.get_input_details()[0]['shape']
return width, height
def input_tensor(interpreter):
"""Returns input tensor view as numpy array of shape (height, width, 3)."""
tensor_index = interpreter.get_input_details()[0]['index']
return interpreter.tensor(tensor_index)()[0]
def set_input(interpreter, size, resize):
"""Copies a resized and properly zero-padded image to the input tensor.
Args:
interpreter: Interpreter object.
size: original image size as (width, height) tuple.
resize: a function that takes a (width, height) tuple, and returns an RGB
image resized to those dimensions.
Returns:
Actual resize ratio, which should be passed to `get_output` function.
"""
width, height = input_size(interpreter)
w, h = size
scale = min(width / w, height / h)
w, h = int(w * scale), int(h * scale)
tensor = input_tensor(interpreter)
tensor.fill(0) # padding
_, _, channel = tensor.shape
tensor[:h, :w] = np.reshape(resize((w, h)), (h, w, channel))
return scale, scale
def output_tensor(interpreter, i):
"""Returns output tensor view."""
tensor = interpreter.tensor(interpreter.get_output_details()[i]['index'])()
return np.squeeze(tensor)
def get_output(interpreter, score_threshold, image_scale=(1.0, 1.0)):
"""Returns list of detected objects."""
boxes = output_tensor(interpreter, 0)
class_ids = output_tensor(interpreter, 1)
scores = output_tensor(interpreter, 2)
count = int(output_tensor(interpreter, 3))
width, height = input_size(interpreter)
image_scale_x, image_scale_y = image_scale
sx, sy = width / image_scale_x, height / image_scale_y
def make(i):
ymin, xmin, ymax, xmax = boxes[i]
return Object(
id=int(class_ids[i]),
score=float(scores[i]),
bbox=BBox(xmin=xmin,
ymin=ymin,
xmax=xmax,
ymax=ymax).scale(sx, sy).map(int))
return [make(i) for i in range(count) if scores[i] >= score_threshold]

View File

@ -0,0 +1,118 @@
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example using TF Lite to detect objects in a given image."""
import argparse
import time
import sys
from PIL import Image
from PIL import ImageDraw
from io import BytesIO, StringIO
import time
import base64
import json
import detect
import tflite_runtime.interpreter as tflite
import platform
EDGETPU_SHARED_LIB = {
'Linux': 'libedgetpu.so.1',
'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'
}[platform.system()]
def load_labels(path, encoding='utf-8'):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, 'r', encoding=encoding) as f:
lines = f.readlines()
if not lines:
return {}
if lines[0].split(' ', maxsplit=1)[0].isdigit():
pairs = [line.split(' ', maxsplit=1) for line in lines]
return {int(index): label.strip() for index, label in pairs}
else:
return {index: line.strip() for index, line in enumerate(lines)}
def make_interpreter(model_file):
model_file, *device = model_file.split('@')
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[
tflite.load_delegate(EDGETPU_SHARED_LIB,
{'device': device[0]} if device else {})
])
def draw_objects(draw, objs, labels):
"""Draws the bounding box and label for each object."""
for obj in objs:
bbox = obj.bbox
draw.rectangle([(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)],
outline='red')
draw.text((bbox.xmin + 10, bbox.ymin + 10),
'%s\n%.2f' % (labels.get(obj.id, obj.id), obj.score),
fill='red')
def printInfo(text):
print(json.dumps({"type": "info", "data": text}))
def printError(text):
print(json.dumps({"type": "error", "data": text}))
def printData(array, time):
print(json.dumps({"type": "data", "data": array, "time": time}))
def main():
labels = load_labels("models/coco_labels.txt")
interpreter = make_interpreter("models/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite")
interpreter.allocate_tensors()
threshold = 0.4
printInfo("ready")
while True:
line = sys.stdin.readline().rstrip("\n")
try:
rawImage = BytesIO(base64.b64decode(line))
image = Image.open(rawImage)
scale = detect.set_input(interpreter, image.size,
lambda size: image.resize(size, Image.ANTIALIAS))
start = time.perf_counter()
interpreter.invoke()
inference_time = time.perf_counter() - start
objs = detect.get_output(interpreter, threshold, scale)
output = []
for obj in objs:
label = labels.get(obj.id, obj.id)
labelID = obj[0]
score = obj[1]
bbox = obj[2]
output.append({"bbox": bbox, "class": label, "score": score})
printData(output, (inference_time * 1000))
except Exception as e:
printError(str(e))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,33 @@
{
"name": "shinobi-tensorflow-coral",
"author": "Shinob Systems, Moinul Alam | dermodmaster, Levent Koch",
"version": "1.0.0",
"description": "Object Detection plugin based on tensorflow using Google Coral USB Accelerator",
"main": "shinobi-tensorflow.js",
"dependencies": {
"dotenv": "^8.2.0",
"express": "^4.16.2",
"moment": "^2.19.2",
"socket.io": "^2.0.4",
"socket.io-client": "^1.7.4"
},
"devDependencies": {},
"bin": "shinobi-tensorflow.js",
"scripts": {
"package": "pkg package.json -t linux,macos,win --out-path dist",
"package-x64": "pkg package.json -t linux-x64,macos-x64,win-x64 --out-path dist/x64",
"package-x86": "pkg package.json -t linux-x86,macos-x86,win-x86 --out-path dist/x86",
"package-armv6": "pkg package.json -t linux-armv6,macos-armv6,win-armv6 --out-path dist/armv6",
"package-armv7": "pkg package.json -t linux-armv7,macos-armv7,win-armv7 --out-path dist/armv7",
"package-all": "npm run package && npm run package-x64 && npm run package-x86 && npm run package-armv6 && npm run package-armv7"
},
"pkg": {
"targets": [
"node12"
],
"scripts": [
"../pluginBase.js"
],
"assets": []
}
}

View File

@ -0,0 +1,146 @@
//
// Shinobi - Tensorflow Plugin
// Copyright (C) 2016-2025 Moe Alam, moeiscool
// Copyright (C) 2020 Levent Koch, dermodmaster
//
// # Donate
//
// If you like what I am doing here and want me to continue please consider donating :)
// PayPal : paypal@m03.ca
//
// Base Init >>
var fs = require('fs');
var config = require('./conf.json')
var dotenv = require('dotenv').config()
var s
try {
s = require('../pluginBase.js')(__dirname, config)
} catch (err) {
console.log(err)
try {
s = require('./pluginBase.js')(__dirname, config)
} catch (err) {
console.log(err)
return console.log(config.plug, 'Plugin start has failed. pluginBase.js was not found.')
}
}
var ready = false;
const spawn = require('child_process').spawn;
var child = null
function respawn() {
console.log("respawned python",(new Date()))
const theChild = spawn('python3', ['-u', 'detect_image.py']);
var lastStatusLog = new Date();
theChild.on('exit', () => {
child = respawn();
});
theChild.stdout.on('data', function (data) {
var rawString = data.toString('utf8');
if (new Date() - lastStatusLog > 5000) {
lastStatusLog = new Date();
console.log(rawString, new Date());
}
var messages = rawString.split('\n')
messages.forEach((message) => {
if (message === "") return;
var obj = JSON.parse(message)
if (obj.type === "error") {
console.log("Script got error: " + message.data, new Date());
throw message.data;
}
if (obj.type === "info" && obj.data === "ready") {
console.log("set ready true")
ready = true;
} else {
if (obj.type !== "data" && obj.type !== "info") {
throw "Unexpected message: " + rawString;
}
}
})
})
return theChild
}
// Base Init />>
child = respawn();
const emptyDataObject = { data: [], type: undefined, time: 0 };
async function process(buffer, type) {
const startTime = new Date();
if (!ready) {
return emptyDataObject;
}
ready = false;
child.stdin.write(buffer.toString('base64') + '\n');
var message = null;
await new Promise(resolve => {
child.stdout.once('data', (data) => {
var rawString = data.toString('utf8').split("\n")[0];
try {
message = JSON.parse(rawString)
}
catch (e) {
message = { data: [] };
}
resolve();
});
})
const data = message.data;
ready = true;
return {
data: data,
type: type,
time: new Date() - startTime
}
}
s.detectObject = function (buffer, d, tx, frameLocation, callback) {
process(buffer).then((resp) => {
var results = resp.data
//console.log(resp.time)
if (Array.isArray(results) && results[0]) {
var mats = []
results.forEach(function (v) {
mats.push({
x: v.bbox[0],
y: v.bbox[1],
width: v.bbox[2],
height: v.bbox[3],
tag: v.class,
confidence: v.score,
})
})
var isObjectDetectionSeparate = d.mon.detector_pam === '1' && d.mon.detector_use_detect_object === '1'
var width = parseFloat(isObjectDetectionSeparate && d.mon.detector_scale_y_object ? d.mon.detector_scale_y_object : d.mon.detector_scale_y)
var height = parseFloat(isObjectDetectionSeparate && d.mon.detector_scale_x_object ? d.mon.detector_scale_x_object : d.mon.detector_scale_x)
tx({
f: 'trigger',
id: d.id,
ke: d.ke,
details: {
plug: config.plug,
name: 'Tensorflow',
reason: 'object',
matrices: mats,
imgHeight: width,
imgWidth: height,
time: resp.time
}
})
}
callback()
})
}