Skip to content
This repository has been archived by the owner on Mar 19, 2023. It is now read-only.

Commit

Permalink
Merge pull request #4 from robmarkcole/adds_teach_service
Browse files Browse the repository at this point in the history
Adds teach service
  • Loading branch information
robmarkcole authored Jan 19, 2019
2 parents be938aa + ce9f562 commit 94ba5b0
Show file tree
Hide file tree
Showing 4 changed files with 291 additions and 14 deletions.
16 changes: 15 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ sudo docker pull deepquestai/deepstack
Place the `custom_components` folder in your configuration directory (or add its contents to an existing `custom_components` folder). Then configure face recognition and/or object detection. Note that at we use `scan_interval` to (optionally) limit computation, [as described here](https://www.home-assistant.io/components/image_processing/#scan_interval-and-optimising-resources).

## Face recognition
Deepstack [face recognition](https://deepstackpython.readthedocs.io/en/latest/facerecognition.html) counts faces and will recognise them if you have trained your Deepstack. On you machine with docker, run Deepstack with the face recognition service active on port `5000`:
Deepstack [face recognition](https://deepstackpython.readthedocs.io/en/latest/facerecognition.html) counts faces and will recognise them if you have trained your Deepstack using the `deepstack_teach_face` service.

On you machine with docker, run Deepstack with the face recognition service active on port `5000`:
```
sudo docker run -e VISION-FACE=True -v localstorage:/datastore -p 5000:5000 deepquestai/deepstack
```
Expand All @@ -36,6 +38,18 @@ Configuration variables:
- **source**: Must be a camera.
- **name**: (Optional) A custom name for the the entity.
#### Service `deepstack_teach_face`
This service is for teaching (or [registering](https://deepstackpython.readthedocs.io/en/latest/facerecognition.html#face-registeration)) faces with deepstack, so that they can be recognised.

Example valid service data:
```
{
"name": "superman",
"file_path": "/Users/robincole/.homeassistant/images/superman_1.jpeg"
}
```


<p align="center">
<img src="https://github.com/robmarkcole/HASS-Deepstack/blob/master/docs/face_usage.png" width="500">
</p>
Expand Down
82 changes: 81 additions & 1 deletion custom_components/image_processing/deepstack_face.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,19 +25,32 @@
_LOGGER = logging.getLogger(__name__)

CLASSIFIER = 'deepstack_face'
DATA_DEEPSTACK = 'deepstack_classifiers'
FILE_PATH = 'file_path'
SERVICE_TEACH_FACE = 'deepstack_teach_face'


PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
})

SERVICE_TEACH_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
})

def get_matched_faces(predictions):
"""
Get the predicted faces and their confidence.
"""
return {face['userid']: round(face['confidence']*100, 1) for face in predictions if not face['userid'] == 'unknown'}
try:
matched_faces = {face['userid']: round(face['confidence']*100, 1)
for face in predictions if not face['userid'] == 'unknown'}
return matched_faces
except:
return {}


def post_image(url, image):
Expand All @@ -53,18 +66,75 @@ def post_image(url, image):
return None


def register_face(url, name, file_path):
"""
Register a name to a file.
"""
try:
with open(file_path, "rb") as image:
response = requests.post(url,
files={"image": image.read()},
data={"userid": name})

if response.status_code == 200 and response.json()['success'] == True:
_LOGGER.info(
"%s taught face %s using file %s", CLASSIFIER, name, file_path)
elif response.status_code == 200 and response.json()['success'] == False:
error = response.json()['error']
_LOGGER.warning(
"%s taught error: %s", CLASSIFIER, error)

else:
_LOGGER.error("%s error : %s", CLASSIFIER, response.json())

except Exception as exc:
_LOGGER.warning("%s error : %s", CLASSIFIER, exc)


def valid_file_path(file_path):
"""Check that a file_path points to a valid file."""
try:
cv.isfile(file_path)
return True
except vol.Invalid:
_LOGGER.error(
"%s error: Invalid file path: %s", CLASSIFIER, file_path)
return False


def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
if DATA_DEEPSTACK not in hass.data:
hass.data[DATA_DEEPSTACK] = []

ip_address = config[CONF_IP_ADDRESS]
port = config[CONF_PORT]
entities = []
for camera in config[CONF_SOURCE]:
face_entity = FaceClassifyEntity(
ip_address, port, camera[CONF_ENTITY_ID], camera.get(CONF_NAME))
entities.append(face_entity)
hass.data[DATA_DEEPSTACK].append(face_entity)

add_devices(entities)

def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get('entity_id')

classifiers = hass.data[DATA_DEEPSTACK]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]

for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)

hass.services.register(
DOMAIN, SERVICE_TEACH_FACE, service_handle,
schema=SERVICE_TEACH_SCHEMA)


class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
Expand All @@ -74,6 +144,8 @@ def __init__(self, ip_address, port, camera_entity, name=None):
super().__init__()
self._url_check = "http://{}:{}/v1/vision/face/recognize".format(
ip_address, port)
self._url_register = "http://{}:{}/v1/vision/face/register".format(
ip_address, port)
self._camera = camera_entity
if name:
self._name = name
Expand All @@ -99,6 +171,14 @@ def process_image(self, image):
self.total_faces = None
self._matched = {}

def teach(self, name, file_path):
"""Teach classifier a face name."""
if (not self.hass.config.is_allowed_path(file_path)
or not valid_file_path(file_path)):
return
register_face(
self._url_register, name, file_path)

@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
Expand Down
105 changes: 98 additions & 7 deletions development/.ipynb_checkpoints/Deepstack face dev-checkpoint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
" \"\"\"\n",
" Get the predicted faces and their confidence.\n",
" \"\"\"\n",
" return {face['userid']: round(face['confidence']*100, 1) for face in predictions if face['userid'] is not 'unknown'}"
" return {face['userid']: round(face['confidence']*100, 1) for face in predictions if not face['userid'] == 'unknown'}"
]
},
{
Expand Down Expand Up @@ -183,7 +183,7 @@
{
"data": {
"text/plain": [
"{'Adele': 50.1, 'unknown': 0}"
"{'Adele': 50.1}"
]
},
"execution_count": 9,
Expand Down Expand Up @@ -226,25 +226,45 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"response = requests.post(f\"http://{IP}:{PORT}/v1/vision/face/register\",files={\"image\":open(\"adele_train.jpg\",\"rb\").read()}, data={\"userid\":\"Adele\"})"
"response = requests.post(f\"http://{IP}:{PORT}/v1/vision/face/register\",files={\"image\":open(\"adele_train.jpg\",\"rb\").read()}, data={\"userid\":\"\"})"
]
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 34,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'success': True, 'message': 'face updated'}"
"200"
]
},
"execution_count": 12,
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response.status_code"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'success': False, 'error': 'userid not specified'}"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -253,6 +273,77 @@
"response.json()"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [
{
"ename": "KeyError",
"evalue": "'message'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-27-c92a3c225643>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjson\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'message'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mKeyError\u001b[0m: 'message'"
]
}
],
"source": [
"response.json()['message']"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [],
"source": [
"def register_face(url, name, file_path):\n",
" \"\"\"\n",
" Register a name to a file.\n",
" \"\"\"\n",
" try:\n",
" with open(file_path,\"rb\") as image:\n",
" response = requests.post(url, \n",
" files={\"image\":image.read()}, \n",
" data={\"userid\":name})\n",
" \n",
" if response.status_code == 200 and response.json()['success'] == True:\n",
" print(f\"Deepstack taught face {name} using file {file_path}\")\n",
" elif response.status_code == 200 and response.json()['success'] == False:\n",
" error = response.json()['error']\n",
" print(f\"Deepstack error: {error}\")\n",
"\n",
" else:\n",
" print(response.json())\n",
" \n",
" except Exception as exc:\n",
" print(exc)"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Deepstack taught face Adele using file adele_train.jpg\n"
]
}
],
"source": [
"url = f\"http://{IP}:{PORT}/v1/vision/face/register\"\n",
"name = \"Adele\"\n",
"file_path = \"adele_train.jpg\"\n",
"\n",
"register_face(url, name, file_path)"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand Down
Loading

0 comments on commit 94ba5b0

Please sign in to comment.