diff --git a/.gitignore b/.gitignore
index 0f59fc0..5ef8ead 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,6 @@
venv
build
-opencv.wrap.egg-info
+opencv_wrap.egg-info
dist
__pycache__
-opencv.wrap/test.py
.vscode
diff --git a/README.md b/README.md
index 9b94b64..b652af0 100644
--- a/README.md
+++ b/README.md
@@ -1,38 +1,290 @@
-# opencv.wrap
+# opencv_wrap
-```
A collection of decorators for opencv and helper functions for multiple opencv tasks.
+
+Working with opencv can be quite a hussel, a lot of boiler code, nested functions for specific use cases, this package is designed to make it easier to work with opencv, while focusing on the main task in hand. best for prototyping and quick testing. second part is speed and performance, this package is designed to be fast and efficient.
+
+---
+
+Built with ☕ by [@rishi23root](https://github.com/rishi23root/)
+
+rishi23root/opencv_wrap/
+
+[![GitHub stars](https://img.shields.io/github/stars/rishi23root/opencv_wrap.svg)](https://github.com/rishi23root/opencv_wrap/stargazers)
+[![PyPI](https://img.shields.io/pypi/v/opencv_wrap.svg)](https://pypi.org/project/opencv_wrap/)
+[![GitHub](https://img.shields.io/github/license/rishi23root/opencv_wrap.svg)](https://github.com/rishi23root/opencv_wrap/blob/master/LICENSE) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/Django.svg) [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-:D-1EAEDB.svg)](https://saythanks.io/to/rishi23root)
+
+# Installation
+
+```bash
+pip install opencv-wrap
+```
+
+#### Very basic example of reading camera feed and displaying it. with just 5 lines of code. 😎
+
+```python
+from opencv_wrap import cv2Decorator
+
+@cv2Decorator.TotalTimeTaken(show=True)
+@cv2Decorator.AccessCamOrVideo(show=True)
+@cv2Decorator.CalculateFps(draw=True)
+def all_actions(**kwargs):
+ return kwargs
+
+all_actions()
+```
+
+#### Advance example of face detection and smart viewer. with just 23 lines of code. 😊
+
+```python
+from opencv_wrap import cv2Decorator
+import cv2
+from opencv_wrap.detectors import Face
+from opencv_wrap.utils.helper import show_all_frames, clipImage
+
+@cv2Decorator.DetectInEachFrame(detector=Face(verbose=True),name="face")
+@cv2Decorator.TotalTimeTaken(show=True)
+@cv2Decorator.AccessCamOrVideo(show=False,videoPath="./opencv_wrap/testMedia/test.mp4") # path to video
+@cv2Decorator.CalculateFps(draw=False)
+@cv2Decorator.MirrorFrame()
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+def all_actions(**kwargs):
+ mainFrameCopy = kwargs["frame"].copy()
+ processed = kwargs["face"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["face"].getDetectionBox(
+ processed, kwargs["frame"], draw=True
+ )
+ kwargs["face"].getLandmarks(processed, kwargs["frame"], draw=True)
+ kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+ show_all_frames(kwargs, keysToShow=["frame", "detected"])
+ return kwargs
+
+all_actions()
```
-Built with ❤︎ and ☕ by [@rishi23root](https://github.com/rishi23root/)
+
-rishi23root/opencv.wrap/
+---
-[![GitHub stars](https://img.shields.io/github/stars/rishi23root/opencv.wrap.svg)](https://github.com/rishi23root/opencv.wrap/stargazers)
-[![PyPI](https://img.shields.io/pypi/v/opencv.wrap.svg)](https://pypi.org/project/opencv.wrap/)
-[![GitHub](https://img.shields.io/github/license/rishi23root/opencv.wrap.svg)](https://github.com/rishi23root/opencv.wrap/blob/master/LICENSE) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/Django.svg) [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-:D-1EAEDB.svg)](https://saythanks.io/to/rishi23root27@gmail.com)
+## Features with decorators
-# Features
+```python
+from opencv_wrap import cv2Decorator
+
+@cv2Decorator.TotalTimeTaken(show=True)
+...
+```
- TotalTimeTaken
- CalculateFps
- MirrorFrame
- ConvertCOLOR
-- ReadCamAndShowFrames
-- ReadCamAddDetectShowFrames
-- ReadCamAddDetectShowFrames_video
+- AccessCamOrVideo
+- DetectInEachFrame
+
+## Utils to help you with opencv tasks
+
+```python
+from opencv_wrap.utils import DetectorClass
+from opencv_wrap.utils.helper import detectionBox
+```
-# additional Utils
+#### Detector Parent
-- saveFrames
-- detectionBox
-- show_all_frames
+DetectorClass is a base class for all the detectors. provide some basic functions like Singleton and isVerbose.
-# Detection Utils
+#### Helper functions
+
+- `saveFrames`
+- `detectionBox`
+- `detectionBox`
+- `resizeImage`
+- `clipImage`
+- `added_title`
+- `combine_images`
+
+## Detection Classes
+
+```python
+from opencv_wrap.detectors import Face , Hand, Pose
+```
-- face detection
-- hand detection
+- Face detection
+- Hand detection
+- Pose detection
- eye detection (yet to be added)
+#### you can reconstruct the `detector` classes as per your need. 😊
+
+like extend the class and add more functions to it. like action of certain detections.
+
+> **example**, blur everything but face. can be useful when you want to hide the background and just fucus on the object, here Face.
+
+```python
+import cv2
+from opencv_wrap import cv2Decorator
+from opencv_wrap.detectors import Face
+
+class FaceExtented(Face):
+ def blurEverytingButFace(self, frame, face_coordinate):
+ # make a copy of the frame
+ frameCopy = frame.copy()
+ frame = cv2.blur(frame, (50,50))
+ for (x, y, w, h) in face_coordinate:
+ frame[y : y + h, x : x + w] = frameCopy[y : y + h, x : x + w]
+ return frame
+
+@cv2Decorator.DetectInEachFrame(detector=FaceExtented(verbose=True),name="face")
+@cv2Decorator.AccessCamOrVideo(show=True,videoPath="./opencv_wrap/testMedia/test.mp4")
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+def all_actions(\*\*kwargs):
+ processed = kwargs["face"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["face"].getDetectionBox(
+ processed, kwargs["frame"], draw=False,padding_ratio=0.4)
+ kwargs["frame"] = kwargs["face"].blurEverytingButFace(kwargs["frame"], face_coordinate)
+ return kwargs
+
+all_actions()
+```
+
+
+
+---
+
+> ## OPEN FOR CONTRIBUTIONS 🤝
+
+#### Steps to start contributing
+
+0. Star the repo 🌟
+1. Fork the repo 👨💻
+2. Clone the repo 📂
+3. Create a new issue 🔖
+4. Make changes 📜
+5. Push the changes 🚀
+6. Create a pull request 🌐
+
+---
+
+## More Usage Examples
+
+> **Example 1** : Reading a single frame from the directory
+
+```python
+@cv2Decorator.DetectInEachFrame(
+ detector=cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml"),
+ name='face')
+@cv2Decorator.MirrorFrame()
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+def all_actions(**kwargs):
+ frame = kwargs['frame']
+ # detect face from trainerd data and detectMultiScale use to deteat every size of face
+ face_coordinate = kwargs['face'].detectMultiScale(kwargs['greyScale'],1.3,5)
+ detectionBox(detectedArr=face_coordinate, frame=frame)
+ return kwargs
+
+frame = cv2.imread('./opencv_wrap/testMedia/test.jpg')
+
+kwargs = all_actions(frame=frame)
+cv2.imshow('frame',kwargs['frame'])
+key = cv2.waitKey(0)
+```
+
+> **Example 2** : Reading cam and detecting Hand in each frame
+
+```python
+@cv2Decorator.DetectInEachFrame(
+ detector=Hand(verbose=True),
+ name="hand",
+)
+@cv2Decorator.TotalTimeTaken(show=True)
+@cv2Decorator.AccessCamOrVideo(show=False, fps=12)
+@cv2Decorator.CalculateFps(draw=True)
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+def all_actions(**kwargs):
+ mainFrameCopy = kwargs["frame"].copy()
+ processed = kwargs["hand"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["hand"].getDetectionBox(
+ processed, kwargs["frame"], draw=True
+ )
+ kwargs["hand"].getLandmarks(processed, kwargs["frame"],draw=True)
+ # print(len(face_coordinate))
+ kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+ show_all_frames(kwargs, keysToShow=["frame", "detected"])
+ return kwargs
+
+
+kwargs = all_actions()
+```
+
+> **Example 3** : Reading video and detecting Pose in each frame
+
+```python
+@cv2Decorator.DetectInEachFrame(
+ detector=Pose(verbose=True),
+ name="pose",
+)
+@cv2Decorator.TotalTimeTaken(show=True)
+@cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+@cv2Decorator.CalculateFps(draw=True)
+@cv2Decorator.MirrorFrame()
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+def all_actions(**kwargs):
+ mainFrameCopy = kwargs["frame"].copy()
+ processed = kwargs["pose"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["pose"].getDetectionBox(
+ processed, kwargs["frame"], draw=True
+ )
+ kwargs["pose"].getLandmarks(processed, kwargs["frame"],draw=True)
+
+ kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+ show_all_frames(kwargs, keysToShow=["frame", "detected"])
+ return kwargs
+
+
+all_actions()
+```
+
+> **Example 4** : Reading video and saving each frame in a folder
+
+```python
+from opencv_wrap import cv2Decorator
+from opencv_wrap.utils.helper import saveFrame
+
+@cv2Decorator.AccessCamOrVideo(show=True, videoPath="./opencv_wrap/testMedia/test.mp4", )
+def all_actions(**kwargs):
+ saveFrame(kwargs['frame'],kwargs['frame_count'],destination='./output')
+ return kwargs
+
+all_actions()
+```
+
+
+
+> **Example 5** : Reading video and show converted frame in smart view
+
+```python
+@cv2Decorator.TotalTimeTaken(show=True)
+@cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+@cv2Decorator.CalculateFps(draw=True)
+@cv2Decorator.MirrorFrame()
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+def all_actions(**kwargs):
+ show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame'])
+ return kwargs
+
+all_actions()
+```
+
+
+
+---
+
+# Future Updates
-add code examples here for each feature and utilities
\ No newline at end of file
+- [ ] Face recognition
+- [ ] Eye detection
+- [ ] Object detection
+- [ ] Image classification
+- [ ] segmentation (decorator)
+- [ ] making whole program faster by atleast 10x using cython
diff --git a/README.rst b/README.rst
index 46e4a91..c4c9f0e 100644
--- a/README.rst
+++ b/README.rst
@@ -1,4 +1,4 @@
-opencv.wrap
+opencv_wrap
===========
::
@@ -7,43 +7,305 @@ opencv.wrap
Built with ❤︎ and ☕ by [@rishi23root](https://github.com/rishi23root/)
-rishi23root/opencv.wrap/
+rishi23root/opencv_wrap/
|GitHub stars| |PyPI| |GitHub| |PyPI - Python Version| |Say Thanks!|
-Features
-========
+Installation
+============
+
+.. code:: bash
+
+ pip install opencv-wrap
+
+Very basic example of reading camera feed and displaying it. with just 5 lines of code. 😎
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code:: python
+
+ from opencv_wrap import cv2Decorator
+
+ @cv2Decorator.TotalTimeTaken(show=True)
+ @cv2Decorator.AccessCamOrVideo(show=True)
+ @cv2Decorator.CalculateFps(draw=True)
+ def all_actions(**kwargs):
+ return kwargs
+
+ all_actions()
+
+Advance example of face detection and smart viewer. with just 23 lines of code. 😊
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code:: python
+
+ from opencv_wrap import cv2Decorator
+ import cv2
+ from opencv_wrap.detectors import Face
+ from opencv_wrap.utils.helper import show_all_frames, clipImage
+
+ @cv2Decorator.DetectInEachFrame(detector=Face(verbose=True),name="face")
+ @cv2Decorator.TotalTimeTaken(show=True)
+ @cv2Decorator.AccessCamOrVideo(show=False,videoPath="./opencv_wrap/testMedia/test.mp4") # path to video
+ @cv2Decorator.CalculateFps(draw=False)
+ @cv2Decorator.MirrorFrame()
+ @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+ def all_actions(**kwargs):
+ mainFrameCopy = kwargs["frame"].copy()
+ processed = kwargs["face"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["face"].getDetectionBox(
+ processed, kwargs["frame"], draw=True
+ )
+ kwargs["face"].getLandmarks(processed, kwargs["frame"], draw=True)
+ kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+ show_all_frames(kwargs, keysToShow=["frame", "detected"])
+ return kwargs
+
+ all_actions()
+
+--------------
+
+Features with decorators
+------------------------
+
+.. code:: python
+
+ from opencv_wrap import cv2Decorator
+
+ @cv2Decorator.TotalTimeTaken(show=True)
+ ...
- TotalTimeTaken
- CalculateFps
- MirrorFrame
- ConvertCOLOR
-- ReadCamAndShowFrames
-- ReadCamAddDetectShowFrames
-- ReadCamAddDetectShowFrames_video
+- AccessCamOrVideo
+- DetectInEachFrame
-additional Utils
-================
+Utils to help you with opencv tasks
+-----------------------------------
-- saveFrames
-- detectionBox
-- show_all_frames
+.. code:: python
-Detection Utils
-===============
+ from opencv_wrap.utils import DetectorClass
+ from opencv_wrap.utils.helper import detectionBox
-- face detection
-- hand detection
+Detector Parent
+^^^^^^^^^^^^^^^
+
+DetectorClass is a base class for all the detectors. provide some basic
+functions like Singleton and isVerbose.
+
+Helper functions
+^^^^^^^^^^^^^^^^
+
+- ``saveFrames``
+- ``detectionBox``
+- ``detectionBox``
+- ``resizeImage``
+- ``clipImage``
+- ``added_title``
+- ``combine_images``
+
+Detection Classes
+-----------------
+
+.. code:: python
+
+ from opencv_wrap.detectors import Face , Hand, Pose
+
+- Face detection
+- Hand detection
+- Pose detection
- eye detection (yet to be added)
-add code examples here for each feature and utilities
+you can reconstruct the ``detector`` classes as per your need. 😊
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+like extend the class and add more functions to it. like action of
+certain detections.
+
+ **example**, blur everything but face. can be useful when you want to
+ hide the background and just fucus on the object, here Face.
+
+.. code:: python
+
+ import cv2
+ from opencv_wrap import cv2Decorator
+ from opencv_wrap.detectors import Face
+
+ class FaceExtented(Face):
+ def blurEverytingButFace(self, frame, face_coordinate):
+ # make a copy of the frame
+ frameCopy = frame.copy()
+ frame = cv2.blur(frame, (50,50))
+ for (x, y, w, h) in face_coordinate:
+ frame[y : y + h, x : x + w] = frameCopy[y : y + h, x : x + w]
+ return frame
+
+ @cv2Decorator.DetectInEachFrame(detector=FaceExtented(verbose=True),name="face")
+ @cv2Decorator.AccessCamOrVideo(show=True,videoPath="./opencv_wrap/testMedia/test.mp4")
+ @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+ def all_actions(\*\*kwargs):
+ processed = kwargs["face"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["face"].getDetectionBox(
+ processed, kwargs["frame"], draw=False,padding_ratio=0.4)
+ kwargs["frame"] = kwargs["face"].blurEverytingButFace(kwargs["frame"], face_coordinate)
+ return kwargs
+
+ all_actions()
+
+--------------
+
+ .. rubric:: OPEN FOR CONTRIBUTIONS 🤝
+ :name: open-for-contributions
+
+Steps to start contributing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+0. Star the repo 🌟
+1. Fork the repo 👨💻
+2. Clone the repo 📂
+3. Create a new issue 🔖
+4. Make changes 📜
+5. Push the changes 🚀
+6. Create a pull request 🌐
+
+--------------
+
+More Usage Examples
+-------------------
+
+ **Example 1** : Reading a single frame from the directory
+
+.. code:: python
+
+ @cv2Decorator.DetectInEachFrame(
+ detector=cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml"),
+ name='face')
+ @cv2Decorator.MirrorFrame()
+ @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+ def all_actions(**kwargs):
+ frame = kwargs['frame']
+ # detect face from trainerd data and detectMultiScale use to deteat every size of face
+ face_coordinate = kwargs['face'].detectMultiScale(kwargs['greyScale'],1.3,5)
+ detectionBox(detectedArr=face_coordinate, frame=frame)
+ return kwargs
+
+ frame = cv2.imread('./opencv_wrap/testMedia/test.jpg')
+
+ kwargs = all_actions(frame=frame)
+ cv2.imshow('frame',kwargs['frame'])
+ key = cv2.waitKey(0)
+
+..
+
+ **Example 2** : Reading cam and detecting Hand in each frame
+
+.. code:: python
+
+ @cv2Decorator.DetectInEachFrame(
+ detector=Hand(verbose=True),
+ name="hand",
+ )
+ @cv2Decorator.TotalTimeTaken(show=True)
+ @cv2Decorator.AccessCamOrVideo(show=False, fps=12)
+ @cv2Decorator.CalculateFps(draw=True)
+ @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+ def all_actions(**kwargs):
+ mainFrameCopy = kwargs["frame"].copy()
+ processed = kwargs["hand"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["hand"].getDetectionBox(
+ processed, kwargs["frame"], draw=True
+ )
+ kwargs["hand"].getLandmarks(processed, kwargs["frame"],draw=True)
+ # print(len(face_coordinate))
+ kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+ show_all_frames(kwargs, keysToShow=["frame", "detected"])
+ return kwargs
+
+
+ kwargs = all_actions()
+
+..
+
+ **Example 3** : Reading video and detecting Pose in each frame
+
+.. code:: python
+
+ @cv2Decorator.DetectInEachFrame(
+ detector=Pose(verbose=True),
+ name="pose",
+ )
+ @cv2Decorator.TotalTimeTaken(show=True)
+ @cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+ @cv2Decorator.CalculateFps(draw=True)
+ @cv2Decorator.MirrorFrame()
+ @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+ @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+ def all_actions(**kwargs):
+ mainFrameCopy = kwargs["frame"].copy()
+ processed = kwargs["pose"].detect(kwargs["bgr_frame"])
+ face_coordinate = kwargs["pose"].getDetectionBox(
+ processed, kwargs["frame"], draw=True
+ )
+ kwargs["pose"].getLandmarks(processed, kwargs["frame"],draw=True)
+
+ kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+ show_all_frames(kwargs, keysToShow=["frame", "detected"])
+ return kwargs
+
+
+ all_actions()
+
+..
+
+ **Example 4** : Reading video and saving each frame in a folder
+
+.. code:: python
+
+ from opencv_wrap import cv2Decorator
+ from opencv_wrap.utils.helper import saveFrame
+
+ @cv2Decorator.AccessCamOrVideo(show=True, videoPath="./opencv_wrap/testMedia/test.mp4", )
+ def all_actions(**kwargs):
+ saveFrame(kwargs['frame'],kwargs['frame_count'],destination='./output')
+ return kwargs
+
+ all_actions()
+
+ **Example 5** : Reading video and show converted frame in smart view
+
+.. code:: python
+
+ @cv2Decorator.TotalTimeTaken(show=True)
+ @cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+ @cv2Decorator.CalculateFps(draw=True)
+ @cv2Decorator.MirrorFrame()
+ @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+ def all_actions(**kwargs):
+ show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame'])
+ return kwargs
+
+ all_actions()
+
+--------------
+
+Future Updates
+==============
+
+- ☐ Face recognition
+- ☐ Eye detection
+- ☐ Object detection
+- ☐ Image classification
+- ☐ segmentation (decorator)
+- ☐ making whole program faster by atleast 10x using cython
-.. |GitHub stars| image:: https://img.shields.io/github/stars/rishi23root/opencv.wrap.svg
- :target: https://github.com/rishi23root/opencv.wrap/stargazers
-.. |PyPI| image:: https://img.shields.io/pypi/v/opencv.wrap.svg
- :target: https://pypi.org/project/opencv.wrap/
-.. |GitHub| image:: https://img.shields.io/github/license/rishi23root/opencv.wrap.svg
- :target: https://github.com/rishi23root/opencv.wrap/blob/master/LICENSE
+.. |GitHub stars| image:: https://img.shields.io/github/stars/rishi23root/opencv_wrap.svg
+ :target: https://github.com/rishi23root/opencv_wrap/stargazers
+.. |PyPI| image:: https://img.shields.io/pypi/v/opencv_wrap.svg
+ :target: https://pypi.org/project/opencv_wrap/
+.. |GitHub| image:: https://img.shields.io/github/license/rishi23root/opencv_wrap.svg
+ :target: https://github.com/rishi23root/opencv_wrap/blob/master/LICENSE
.. |PyPI - Python Version| image:: https://img.shields.io/pypi/pyversions/Django.svg
.. |Say Thanks!| image:: https://img.shields.io/badge/Say%20Thanks-:D-1EAEDB.svg
- :target: https://saythanks.io/to/rishi23root27@gmail.com
+ :target: https://saythanks.io/to/rishi23root
diff --git a/newBuild.py b/newBuild.py
index 4f4a26e..f219825 100644
--- a/newBuild.py
+++ b/newBuild.py
@@ -70,5 +70,5 @@ def updated_version(old_version):
# upload to test pypi using twine
print("[info]", "uploading to test pypi")
-# os.system(f'twine upload --repository testpypi dist/{FOLDER_NAME}-{NEW_VERSION}.tar.gz')
-os.system(f"twine upload dist/{FOLDER_NAME}-{NEW_VERSION}.tar.gz")
+os.system(f'twine upload --repository testpypi dist/{FOLDER_NAME}-{NEW_VERSION}.tar.gz')
+# os.system(f"twine upload dist/{FOLDER_NAME}-{NEW_VERSION}.tar.gz --verbose")
\ No newline at end of file
diff --git a/opencv_wrap/cv2Decorator.py b/opencv_wrap/cv2Decorator.py
index 38370b3..1c73749 100644
--- a/opencv_wrap/cv2Decorator.py
+++ b/opencv_wrap/cv2Decorator.py
@@ -1,4 +1,5 @@
import cv2
+import os
import time
from functools import wraps
import traceback
@@ -225,16 +226,17 @@ def wrapper(*args, **kwargs):
try:
# open the webcam capture of the
if videoPath:
+ # check if the path is valid
+ if not os.path.exists(videoPath):
+ raise Exception("Video Path is not valid")
+
cap = cv2.VideoCapture(videoPath)
default_fps = round(cap.get(cv2.CAP_PROP_FPS))
if fps < default_fps:
steps = round(default_fps / fps)
print(
- "default fps of video is :",
- default_fps,
- " moving with steps of :",
- steps,
+ f"video have {default_fps=}, moving with {steps=} in {videoPath}"
)
else:
@@ -297,8 +299,11 @@ def wrapper(*args, **kwargs):
# print(getattr(e, 'message', str(e)))
finally:
cv2.destroyAllWindows()
- cap.release()
-
+ try:
+ cap.release()
+ except UnboundLocalError as e:
+ pass
+
return return_kwargs
return wrapper
@@ -334,7 +339,7 @@ def wrapper(*args, **kwargs):
return inner_wrapper
- # default decorator for more (template)
+ # default decorator template to add more
# def default_decorator(args1 = 1):
# def inner_wrapper(function):
# @wraps(function)
@@ -347,7 +352,6 @@ def wrapper(*args, **kwargs):
# # call only one funtion to run all the basics things
if __name__ == "__main__":
-
@cv2Decorator.TotalTimeTaken(show=True)
@cv2Decorator.DetectInEachFrame(
detector=cv2.CascadeClassifier(
diff --git a/opencv_wrap/detectors/Face.py b/opencv_wrap/detectors/Face.py
index 876e112..7615539 100644
--- a/opencv_wrap/detectors/Face.py
+++ b/opencv_wrap/detectors/Face.py
@@ -1,21 +1,14 @@
# flake8: noqa: E501
import cv2
-from opencv_wrap.utils import Detector
-from opencv_wrap.utils.helper import detectionBox
-
import mediapipe as mp
+from opencv_wrap.utils import DetectorClass
+from opencv_wrap.utils.helper import detectionBox
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
-
-# functionalities
-# 4/ save faces and save them them in a folder
-# 5/ compare faces from folder and current face
-
-
-class FaceDetector(Detector):
+class FaceDetector(DetectorClass):
def __init__(
self,
@@ -32,14 +25,7 @@ def __init__(
min_detection_confidence=min_detection_confidence,
min_tracking_confidence=min_tracking_confidence,
)
- # detectorModule = cv2.CascadeClassifier(
- # cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
- # )
- # laod the detector here
- # self._detector = lambda x: detectorModule.detectMultiScale(x, 1.3, 5)
- # self._detector = self.faceDetectionModule
self._detector = self.processFrame
- # print(self._detector)
def processFrame(self, frame):
"""process frame and extract the face features
@@ -107,6 +93,13 @@ def getDetectionBox(self, processedFrame, frame, padding_ratio=0.2, draw=False):
cx_max += padding_x
cy_max += padding_y
+ # check if the box is within the frame from the given coordinates
+ cx_min = max(0, cx_min)
+ cy_min = max(0, cy_min)
+ cx_max = min(w, cx_max)
+ cy_max = min(h, cy_max)
+
+
face_boxes.append((cx_min, cy_min, cx_max - cx_min, cy_max - cy_min))
if draw:
@@ -114,7 +107,7 @@ def getDetectionBox(self, processedFrame, frame, padding_ratio=0.2, draw=False):
return face_boxes
- def drawLandmarks(self, processedFrame, frame):
+ def getLandmarks(self, processedFrame, frame, draw=False):
"""draw the landmarks on the frame
Parameters
@@ -123,8 +116,10 @@ def drawLandmarks(self, processedFrame, frame):
processed frame to get the landmarks from
frame : np.array
frame to draw on
+ draw : bool, optional
+ draw the landmarks on the frame, by default False
"""
- if processedFrame.multi_face_landmarks:
+ if processedFrame.multi_face_landmarks and draw:
for face_landmarks in processedFrame.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=frame,
@@ -148,22 +143,25 @@ def drawLandmarks(self, processedFrame, frame):
connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_iris_connections_style(),
)
- def saveFaces(self, frame, face_boxes, destination="./test2"):
- """save the detected faces in the destination folder
-
- Parameters
- ----------
- frame : np.array
- frame to save the faces from
- face_boxes : list
- list of face boxes (x,y,w,h)
- destination : str, optional
- destination to save the faces, by default "./test2"
- """
- for i, face_box in enumerate(face_boxes):
- x, y, w, h = face_box
- face = frame[y : y + h, x : x + w]
- cv2.imwrite(f"{destination}/face_{i}.jpg", face)
+ return processedFrame.multi_face_landmarks
+
+ # def compareFaceLandmarks(self, currrentLandmarks, targetLandmarksList: dict):
+ # """compare current landmark from target landmarks and return the matched face key else None
+
+ # Parameters
+ # ----------
+ # currrentLandmarks : face_landmarks
+ # 468 landmarks of the current face
+ # targetLandmarksList : dict
+ # dict of target landmarks with key as face key and value as landmarks
+
+ # Returns
+ # -------
+ # str
+ # matched face key else None
+ # """
+
+ # pass
if __name__ == "__main__":
d1 = FaceDetector(verbose=True)
diff --git a/opencv_wrap/detectors/Hand.py b/opencv_wrap/detectors/Hand.py
index 1e221c8..7700b6f 100644
--- a/opencv_wrap/detectors/Hand.py
+++ b/opencv_wrap/detectors/Hand.py
@@ -1,195 +1,132 @@
-from typing import overload
-import cv2
import mediapipe as mp
-import traceback
+from opencv_wrap.utils import DetectorClass
+from opencv_wrap.utils.helper import detectionBox
-mp_hands = mp.solutions.hands
-class handDetector():
- imgPadding= 30
- def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
- """initialization"""
- self.mode = mode
- self.maxHands = maxHands
- self.detectionCon = detectionCon
- self.trackCon = trackCon
-
- # 'initialization the mp hands'
- self.mpHands = mp_hands
- self.hands = mp_hands.Hands(
- self.mode,
- self.maxHands,
- # self.detectionCon,
- # self.trackCon
- )
- self.mpDraw = mp.solutions.drawing_utils
- self.overlap_shape = (200,200)
+mp_hands = mp.solutions.hands
+mp_drawing = mp.solutions.drawing_utils
+
+
+class HandDetector(DetectorClass):
+
+ def __init__(
+ self,
+ max_num_hands=2,
+ min_detection_confidence=0.5,
+ min_tracking_confidence=0.5,
+ verbose=False,
+ ):
+ if self._detector is None:
+ self.hands = mp_hands.Hands(
+ max_num_hands=max_num_hands,
+ min_detection_confidence=min_detection_confidence,
+ min_tracking_confidence=min_tracking_confidence,
+ )
+ self._detector = self.processFrame
+
+ def processFrame(self, frame):
+ """process frame and extract the hand features
+
+ Parameters
+ ----------
+ frame : np.array
+ frame to process
+
+ Returns
+ -------
+ landmarks
+ output of hands.process function
+ """
+ return self.hands.process(frame)
- def __exit__(self, exc_type, exc_value, traceback):
- self.hands.close()
-
- def findHands(self, frame, draw=False):
- """Detect the hand form the images and return the HandCount and frame"""
- frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- # To improve performance, optionally mark the image as not writeable to
- # pass by reference.
- frameRGB.flags.writeable = False
- self.results = self.hands.process(frameRGB)
- # print(self.results.multi_hand_landmarks)
- if self.results.multi_hand_landmarks:
- for handLms in self.results.multi_hand_landmarks:
- if draw:
- self.mpDraw.draw_landmarks(
- frame,
- handLms,
- self.mpHands.HAND_CONNECTIONS)
- else :
- return len(self.results.multi_hand_landmarks), frame
- else :
- return 0, frame
-
- def findHandsAndPosture(self, frame, handNo=0, draw=False):
- """detect the hand ,posture and then return a int and the frame
- 0: rock
- 1: paper
- 2: scissor
- 3: None
+
+ def getDetectionBox(self, processedFrame, frame, padding_ratio=0.2, draw=False):
+ """return the detected box from the processed frame, here face
+
+ Parameters
+ ----------
+ processedFrame :
+ output of processFrame function
+ frame : np.array
+ frame to draw the box on
+ padding_ratio : float, optional
+ padding ratio for the box, by default 0.2
+ draw : bool, optional
+ draw the box on the frame, by default False
+
+ Returns
+ -------
+ list
+ list of boxes
"""
- # extract the hands form the image
- frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- # To improve performance, optionally mark the image as not writeable to
- # pass by reference.
- frameRGB.flags.writeable = False
- self.results = self.hands.process(frameRGB)
- # print(self.results.multi_hand_landmarks)
-
- if self.results.multi_hand_landmarks:
- try :
- # get the frame shape and size for future calculation
- h, w, c = frame.shape
- # if hand in list
- handLms = self.results.multi_hand_landmarks[handNo]
- # for testing draw the landmarks
- if draw:
- # print(f"{draw=} Hand Founded ")
- self.mpDraw.draw_landmarks(
- frame,
- handLms,
- self.mpHands.HAND_CONNECTIONS)
-
- end_frame = [0,0]
- start_frame = [w,h]
- lmList = []
- for id,lm in enumerate(handLms.landmark):
- # convert landmark to pixle possitions
- entry = (id, int(lm.x * w), int(lm.y * h))
- # for find the small hand frame
- if entry[1] > end_frame[0]:
- end_frame[0] = entry[1]
- if entry[2] > end_frame[1]:
- end_frame[1] = entry[2]
- if entry[1] < start_frame[0]:
- start_frame[0] = entry[1]
- if entry[2] < start_frame[1]:
- start_frame[1] = entry[2]
- lmList.append(entry)
-
- if draw:
- cv2.rectangle(
+ hand_boxes = []
+ if processedFrame.multi_hand_landmarks:
+ for hand_landmarks in processedFrame.multi_hand_landmarks:
+ h, w, _ = frame.shape
+ cx_min = w
+ cy_min = h
+ cx_max = cy_max = 0
+ for lm in hand_landmarks.landmark:
+ cx, cy = int(lm.x * w), int(lm.y * h)
+ if cx < cx_min:
+ cx_min = cx
+ if cy < cy_min:
+ cy_min = cy
+ if cx > cx_max:
+ cx_max = cx
+ if cy > cy_max:
+ cy_max = cy
+
+ # if there is some space around the detected face then give some padding to the face box
+ padding_x = int((cx_max - cx_min) * padding_ratio)
+ padding_y = int((cy_max - cy_min) * padding_ratio)
+
+ # Apply padding to the face box
+ cx_min -= padding_x
+ cy_min -= padding_y
+ cx_max += padding_x
+ cy_max += padding_y
+
+ # check if the box is within the frame from the given coordinates
+ cx_min = max(0, cx_min)
+ cy_min = max(0, cy_min)
+ cx_max = min(w, cx_max)
+ cy_max = min(h, cy_max)
+
+
+ hand_boxes.append((cx_min, cy_min, cx_max - cx_min, cy_max - cy_min))
+
+ if draw:
+ detectionBox(detectedArr=hand_boxes, frame=frame)
+
+ return hand_boxes
+
+ def getLandmarks(self, processedFrame, frame, draw=False):
+ """return the detected landmarks from the processed frame, here face
+
+ Parameters
+ ----------
+ processedFrame :
+ output of processFrame function
+ frame : np.array
+ frame to draw the landmarks on
+ draw : bool, optional
+ draw the landmarks on the frame, by default False
+
+ Returns
+ -------
+ list
+ list of landmarks
+ """
+ if processedFrame.multi_hand_landmarks and draw:
+ for hand_landmarks in processedFrame.multi_hand_landmarks:
+ mp_drawing.draw_landmarks(
frame,
- (end_frame[0] + self.imgPadding,end_frame[1] + self.imgPadding),
- (start_frame[0] - self.imgPadding,start_frame[1] - self.imgPadding),
- (0,255,0),
- 2)
-
- # draw on the frame
- at = [0,0]
- try :
- roiImage = cv2.resize(
- frame[
- start_frame[1] - self.imgPadding : end_frame[1] + self.imgPadding,
- start_frame[0] - self.imgPadding : end_frame[0] + self.imgPadding
- ],
- self.overlap_shape)
- # strainght the image and detect the posture
- postureInt, roiImage = self.findPosture(roiImage)
- frame[at[1]:self.overlap_shape[0],at[0]:self.overlap_shape[1]] = roiImage
- except:
- # print(traceback.format_exc())
- postureInt = 0
-
- except IndexError:
- print(f"{handNo=} Hand not Found")
- except Exception as e :
- print(traceback.format_exc())
- # print(getattr(e, 'message', repr(e)))
- # print(getattr(e, 'message', str(e)))
- else :
- # no hand posture detected
- postureInt = 0
-
- return postureInt, frame
-
- def findPosture(self,frame):
- """find the fingurs """
- h,w,c = frame.shape
- count, roiImg = self.findHands(frame,draw=False)
-
-
-
- # convert landmark to pixle possitions
-
- # possitions = [(id, int(lm.x * w), int(lm.y * h)) for id,lm in enumerate(self.results.multi_hand_landmarks[0].landmark)]
-
- # pointing the figers up
- # WRIST
-
-
- # print(possitions)
- # print(possitions)
- # [(0, 103, 186), (1, 68, 173), (2, 47, 146), (3, 35, 122), (4, 22, 103), (5, 79, 105), (6, 76, 69), (7, 76, 47), (8, 76, 28), (9, 103, 105), (10, 108, 67), (11, 112, 44), (12, 117, 24), (13, 124, 113), (14, 134, 79), (15, 141, 57), (16, 146, 39), (17, 142, 127), (18, 159, 105), (19, 169, 90), (20, 177, 77)]
-
- # img = cv2.rotate(roiImage, cv2.ROTATE_90_COUNTERCLOCKWISE)
-
-
- # detect the postureInt and update the value of the variable
- # 1. detect all the figers open ==> paper
- # else :
- # 2. detect middle and index figers
- # else :
- # detect if hand all figure close
-
-
- # print(dir(self.mpHands.HandLandmark) ) --> ['INDEX_FINGER_DIP', 'INDEX_FINGER_MCP', 'INDEX_FINGER_PIP', 'INDEX_FINGER_TIP', 'MIDDLE_FINGER_DIP', 'MIDDLE_FINGER_MCP', 'MIDDLE_FINGER_PIP', 'MIDDLE_FINGER_TIP', 'PINKY_DIP', 'PINKY_MCP', 'PINKY_PIP', 'PINKY_TIP', 'RING_FINGER_DIP', 'RING_FINGER_MCP', 'RING_FINGER_PIP', 'RING_FINGER_TIP', 'THUMB_CMC', 'THUMB_IP', 'THUMB_MCP', 'THUMB_TIP', 'WRIST', '__class__', '__doc__', '__members__', '__module__']
-
- # hand detection first get all the extreams points
- # to cut the frame and use it for detection
- # 1. algo to get all the extream
-
- # sides of the figure
- # edges = [
- # [end_frame[0],-end_frame[1]],
- # [end_frame[0],-start_frame[1]],
- # [-end_frame[1],start_frame[0]],
- # [start_frame[0],-start_frame[1]]
- # ]
-
- # # make the side butoom to flaten the hand is the shortest distance from the base line
- # # print(f"{lmList[self.mpHands.HandLandmark.WRIST]=}")
- # for i in range(len(edges)):
- # if i < 2:
- # a = Cordinate(
- # edges[i],
- # edges[i+1]).perpendicular_distance(*lmList[self.mpHands.HandLandmark.WRIST][1:])
- # else :
- # a = Cordinate(
- # edges[i],
- # edges[0]).perpendicular_distance(*lmList[self.mpHands.HandLandmark.WRIST][1:])
- # print(a)
-
- return 0,roiImg
+ hand_landmarks,
+ mp_hands.HAND_CONNECTIONS)
+ return processedFrame.multi_hand_landmarks
if __name__ == "__main__":
- handDetector()
+ HandDetector()
pass
\ No newline at end of file
diff --git a/opencv_wrap/detectors/Pose.py b/opencv_wrap/detectors/Pose.py
new file mode 100644
index 0000000..ffce1a0
--- /dev/null
+++ b/opencv_wrap/detectors/Pose.py
@@ -0,0 +1,134 @@
+import mediapipe as mp
+from opencv_wrap.utils import DetectorClass
+from opencv_wrap.utils.helper import detectionBox
+
+mp_drawing = mp.solutions.drawing_utils
+mp_drawing_styles = mp.solutions.drawing_styles
+mp_pose = mp.solutions.pose
+
+
+# have to test with multiple people in the frame
+
+class PoseDetector(DetectorClass):
+
+ def __init__(
+ self,
+ min_detection_confidence=0.5,
+ min_tracking_confidence=0.5,
+ verbose=False,
+ ):
+ if self._detector is None:
+ self.pose = mp_pose.Pose(
+ min_detection_confidence=min_detection_confidence,
+ min_tracking_confidence=min_tracking_confidence,
+ )
+ self._detector = self.processFrame
+
+ def processFrame(self, frame):
+ """process frame and extract the pose features
+
+ Parameters
+ ----------
+ frame : np.array
+ frame to process
+
+ Returns
+ -------
+ landmarks
+ output of pose.process function
+ """
+ return self.pose.process(frame)
+
+ def getDetectionBox(self, processedFrame, frame, padding_ratio=0.2, draw=False):
+ """return the detected box from the processed frame, here pose
+
+ Parameters
+ ----------
+ processedFrame :
+ output of processFrame function
+ frame : np.array
+ frame to draw the box on
+ padding_ratio : float, optional
+ padding ratio for the box, by default 0.2
+ draw : bool, optional
+ draw the box on the frame, by default False
+
+ Returns
+ -------
+ list
+ list of boxes
+ """
+ # Implementation for detection box extraction for body pose detection
+ pose_box = []
+ if processedFrame.pose_landmarks:
+ h, w, _ = frame.shape
+ cx_min = w
+ cy_min = h
+ cx_max = cy_max = 0
+ for lm in processedFrame.pose_landmarks.landmark:
+ # get the bounding box of the body
+ cx, cy = int(lm.x * w), int(lm.y * h)
+ if cx < cx_min:
+ cx_min = cx
+ if cy < cy_min:
+ cy_min = cy
+ if cx > cx_max:
+ cx_max = cx
+ if cy > cy_max:
+ cy_max = cy
+
+ # if there is some space around the detected face then give some padding to the face box
+ padding_x = int((cx_max - cx_min) * padding_ratio)
+ padding_y = int((cy_max - cy_min) * padding_ratio)
+
+ # Apply padding to the face box
+ cx_min -= padding_x
+ cy_min -= padding_y
+ cx_max += padding_x
+ cy_max += padding_y
+
+ # check if the box is within the frame from the given coordinates
+ cx_min = max(0, cx_min)
+ cy_min = max(0, cy_min)
+ cx_max = min(w, cx_max)
+ cy_max = min(h, cy_max)
+
+ pose_box.append((cx_min, cy_min, cx_max - cx_min, cy_max - cy_min))
+
+
+
+ if draw:
+ detectionBox(detectedArr=pose_box, frame=frame)
+
+ return pose_box
+
+ def getLandmarks(self, processedFrame, frame, draw=False):
+ """return the detected landmarks from the processed frame, here pose
+
+ Parameters
+ ----------
+ processedFrame :
+ output of processFrame function
+ frame : np.array
+ frame to draw the landmarks on
+ draw : bool, optional
+ draw the landmarks on the frame, by default False
+
+ Returns
+ -------
+ list
+ list of landmarks
+ """
+ # Implementation for landmark extraction for body pose detection
+ mp_drawing.draw_landmarks(
+ frame,
+ processedFrame.pose_landmarks,
+ mp_pose.POSE_CONNECTIONS,
+ landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()
+ )
+
+ return processedFrame.pose_landmarks
+
+if __name__ == "__main__":
+ PoseDetector()
+ pass
diff --git a/opencv_wrap/detectors/__init__.py b/opencv_wrap/detectors/__init__.py
index fcbdc67..a16be44 100644
--- a/opencv_wrap/detectors/__init__.py
+++ b/opencv_wrap/detectors/__init__.py
@@ -1,2 +1,3 @@
from .Face import FaceDetector as Face
-from .Hand import handDetector as Hand
+from .Hand import HandDetector as Hand
+from .Pose import PoseDetector as Pose
diff --git a/opencv_wrap/utils/base.py b/opencv_wrap/utils/base.py
index 40ba5c1..4a6fa07 100644
--- a/opencv_wrap/utils/base.py
+++ b/opencv_wrap/utils/base.py
@@ -33,8 +33,8 @@ def isVerbose(self):
return self.verbose
-class Detector(Singleton):
- """Detector class using Singleton class.
+class DetectorClass(Singleton):
+ """DetectorClass class using Singleton class.
This class can be used as a base class for other detector classes.
handle creation and mangement of the detector instances.
"""
@@ -56,7 +56,7 @@ def detect(self, image: np.ndarray, *args, **kwargs):
"""
if self._detector is None:
raise NotImplementedError(
- f"[{self.__class__.__name__}] Detector not implemented."
+ f"[{self.__class__.__name__}] DetectorClass not implemented."
)
if image is None or not isinstance(image, np.ndarray):
diff --git a/opencv_wrap/utils/helper.py b/opencv_wrap/utils/helper.py
index d07e738..71de903 100644
--- a/opencv_wrap/utils/helper.py
+++ b/opencv_wrap/utils/helper.py
@@ -188,6 +188,8 @@ def combine_images(images, mWidth: int = 400, col: int = 2, compress=True):
# resize all images to the same size
if len(images) == 0:
raise Exception("No images to show, add atleast one image to show the image")
+
+
# first check for no of col to show in a row
eachImageWidth = (mWidth // col) - 2
@@ -206,11 +208,19 @@ def combine_images(images, mWidth: int = 400, col: int = 2, compress=True):
and (i.shape[0] == eachImageWidth and i.shape[1] == eachImageWidth)
]
+
+ # if anycase we filterout some images we need to send no images as the output
+ if len(images) == 0:
+ return np.zeros(shape=(eachImageWidth, eachImageWidth, 3), dtype=np.uint8)
+
+
# add border for the images
images = [
cv2.copyMakeBorder(i, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=(0, 0, 0))
for i in images
]
+
+
eachImageWidth += 2
if col == 1:
@@ -218,6 +228,7 @@ def combine_images(images, mWidth: int = 400, col: int = 2, compress=True):
else:
# get how many elements are require to comple the grid showcase
blankImagesToAdd = col - (len(images) % col)
+
if blankImagesToAdd < col:
# add blank images to the images array
images.extend(
@@ -230,6 +241,7 @@ def combine_images(images, mWidth: int = 400, col: int = 2, compress=True):
* blankImagesToAdd
)
+
return np.vstack(
[np.hstack(images[i : i + col]) for i in range(0, len(images), col)]
)
@@ -305,7 +317,7 @@ def show_all_frames(
# print(f"key {i} is array of length",len(dict[i]))
# calculate the space available in the window after showing the other images
# accoring to that show this list of images in HORIZONTAL or VERTICAL
- if expectedWidth > 400:
+ if expectedWidth > 400 and len(dict[i]) > 0:
showcase[i] = added_title(
combine_images(dict[i], mWidth=expectedWidth),
i,
diff --git a/setup.py b/setup.py
index e255c45..eba8479 100644
--- a/setup.py
+++ b/setup.py
@@ -1,15 +1,15 @@
# flake8: noqa: E501
from setuptools import setup, find_packages
-VERSION = "0.0.5"
-DESCRIPTION = "working with opencv can be quite a hussel, a lot of boiler code, nested functions for specific use cases, this package is designed to make it easier to work with opencv, while focusing on the main task in hand."
+VERSION = "0.1.1"
+DESCRIPTION = "Working with opencv can be quite a hussel, a lot of boiler code, nested functions for specific use cases, this package is designed to make it easier to work with opencv, while focusing on the main task in hand. best for prototyping and quick testing. second part is speed and performance, this package is designed to be fast and efficient."
setup(
- name="opencv-wrap",
+ name="opencv_wrap",
version=VERSION,
description=DESCRIPTION,
long_description=open("README.rst").read(),
- url="https://github.com/rishi23root/opencv.util",
+ url="https://github.com/rishi23root/opencv_wrap",
author="rishi23root",
author_email="rishi23root@gmail.com",
license="MIT",
@@ -26,7 +26,4 @@
packages=find_packages(),
install_requires=['absl-py>=2.1.0', 'attrs>=23.2.0', 'cffi>=1.16.0', 'contourpy>=1.2.0', 'cycler>=0.12.1', 'flatbuffers>=23.5.26', 'fonttools>=4.49.0', 'jax>=0.4.24', 'kiwisolver>=1.4.5', 'matplotlib>=3.8.3', 'mediapipe>=0.10.10', 'ml-dtypes>=0.3.2', 'numpy>=1.26.4', 'opencv-contrib-python>=4.9.0.80', 'opencv-python>=4.9.0.80', 'opt-einsum>=3.3.0', 'packaging>=23.2', 'pillow>=10.2.0', 'protobuf>=3.20.3', 'pycparser>=2.21', 'pyparsing>=3.1.1', 'python-dateutil>=2.8.2', 'scipy>=1.12.0', 'six>=1.16.0', 'sounddevice>=0.4.6'],
zip_safe=False,
- entry_points={
- "console_scripts": ["pyresparser=pyresparser.command_line:main"],
- },
)
diff --git a/static/Screenshot from 2024-04-16 05-21-50.png b/static/Screenshot from 2024-04-16 05-21-50.png
new file mode 100644
index 0000000..0917929
Binary files /dev/null and b/static/Screenshot from 2024-04-16 05-21-50.png differ
diff --git a/static/Screenshot from 2024-04-16 06-06-26.png b/static/Screenshot from 2024-04-16 06-06-26.png
new file mode 100644
index 0000000..035aa34
Binary files /dev/null and b/static/Screenshot from 2024-04-16 06-06-26.png differ
diff --git a/static/Screenshot-20240416071523-780x68.png b/static/Screenshot-20240416071523-780x68.png
new file mode 100644
index 0000000..ffdaf88
Binary files /dev/null and b/static/Screenshot-20240416071523-780x68.png differ
diff --git a/static/Screenshot-20240416071956-1175x661.png b/static/Screenshot-20240416071956-1175x661.png
new file mode 100644
index 0000000..4f45b13
Binary files /dev/null and b/static/Screenshot-20240416071956-1175x661.png differ
diff --git a/test.py b/test.py
new file mode 100644
index 0000000..adbd78e
--- /dev/null
+++ b/test.py
@@ -0,0 +1,170 @@
+# flake8: noqa: E501
+from opencv_wrap import cv2Decorator
+import cv2
+from opencv_wrap.detectors import Face , Hand, Pose
+from opencv_wrap.utils.helper import saveFrame, detectionBox, show_all_frames, clipImage
+
+
+# reading a single frame from the directory
+
+# @cv2Decorator.DetectInEachFrame(detector=cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml"),name='face')
+# @cv2Decorator.MirrorFrame()
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+# def all_actions(**kwargs):
+# frame = kwargs['frame']
+# # detect face from trainerd data and detectMultiScale use to deteat every size of face
+# face_coordinate = kwargs['face'].detectMultiScale(kwargs['greyScale'],1.3,5)
+# detectionBox(detectedArr=face_coordinate, frame=frame)
+# return kwargs
+
+# frame = cv2.imread('./opencv_wrap/testMedia/test.jpg')
+
+# kwargs = all_actions(frame=frame)
+# cv2.imshow('frame',kwargs['frame'])
+# key = cv2.waitKey(0)
+
+
+# reading a video from the directory
+
+
+# face detection
+# @cv2Decorator.DetectInEachFrame(
+# detector=Face(verbose=True),
+# name="face",
+# )
+# @cv2Decorator.TotalTimeTaken(show=True)
+# # @cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+# @cv2Decorator.AccessCamOrVideo(show=False, fps=12)
+# @cv2Decorator.CalculateFps(draw=True)
+# @cv2Decorator.MirrorFrame()
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+# def all_actions(**kwargs):
+# # detect face from trainerd data and detectMultiScale use to deteat every size of face
+# # face_coordinate = kwargs["face"].detectMultiScale(kwargs["greyScale"], 1.3, 5)
+# mainFrameCopy = kwargs["frame"].copy()
+# processed = kwargs["face"].detect(kwargs["bgr_frame"])
+# face_coordinate = kwargs["face"].getDetectionBox(
+# processed, kwargs["frame"], draw=True
+# )
+# kwargs["face"].getLandmarks(processed, kwargs["frame"])
+# # print(len(face_coordinate))
+
+# kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+# # saveFrame(frame=frame,count=kwargs['frame_count'],destination='./test2')
+
+# # detectionBox(detectedArr=face_coordinate, frame=kwargs["frame"])
+# # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame','detected'])
+# # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame'])
+# show_all_frames(kwargs, keysToShow=["frame", "detected"])
+# return kwargs
+
+
+# kwargs = all_actions()
+
+
+
+# Hand
+# @cv2Decorator.DetectInEachFrame(
+# detector=Hand(verbose=True),
+# name="face",
+# )
+# @cv2Decorator.TotalTimeTaken(show=True)
+# # @cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+# @cv2Decorator.AccessCamOrVideo(show=False, fps=12)
+# @cv2Decorator.CalculateFps(draw=True)
+# @cv2Decorator.MirrorFrame()
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+# def all_actions(**kwargs):
+# # detect face from trainerd data and detectMultiScale use to deteat every size of face
+# # face_coordinate = kwargs["face"].detectMultiScale(kwargs["greyScale"], 1.3, 5)
+# mainFrameCopy = kwargs["frame"].copy()
+# processed = kwargs["face"].detect(kwargs["bgr_frame"])
+# face_coordinate = kwargs["face"].getDetectionBox(
+# processed, kwargs["frame"], draw=True
+# )
+# kwargs["face"].getLandmarks(processed, kwargs["frame"],draw=True)
+# # print(len(face_coordinate))
+
+# kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+# # saveFrame(frame=frame,count=kwargs['frame_count'],destination='./test2')
+
+# # detectionBox(detectedArr=face_coordinate, frame=kwargs["frame"])
+# # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame','detected'])
+# # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame'])
+# show_all_frames(kwargs, keysToShow=["frame", "detected"])
+# return kwargs
+
+
+# kwargs = all_actions()
+
+
+# pose detection
+
+# @cv2Decorator.DetectInEachFrame(
+# detector=Pose(verbose=True),
+# name="face",
+# )
+# @cv2Decorator.TotalTimeTaken(show=True)
+# # @cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+# @cv2Decorator.AccessCamOrVideo(show=False, fps=12)
+# @cv2Decorator.CalculateFps(draw=True)
+# @cv2Decorator.MirrorFrame()
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
+# def all_actions(**kwargs):
+# # detect face from trainerd data and detectMultiScale use to deteat every size of face
+# # face_coordinate = kwargs["face"].detectMultiScale(kwargs["greyScale"], 1.3, 5)
+# mainFrameCopy = kwargs["frame"].copy()
+# processed = kwargs["face"].detect(kwargs["bgr_frame"])
+# face_coordinate = kwargs["face"].getDetectionBox(
+# processed, kwargs["frame"], draw=True
+# )
+# kwargs["face"].getLandmarks(processed, kwargs["frame"],draw=True)
+# # print(len(face_coordinate))
+
+# kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
+# # saveFrame(frame=frame,count=kwargs['frame_count'],destination='./test2')
+
+# # detectionBox(detectedArr=face_coordinate, frame=kwargs["frame"])
+# # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame','detected'])
+# # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame'])
+# show_all_frames(kwargs, keysToShow=["frame", "detected"])
+# return kwargs
+
+
+# kwargs = all_actions()
+
+
+# reading the cam feed
+# @cv2Decorator.TotalTimeTaken(show=True)
+# @cv2Decorator.DetectInEachFrame(detector=cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml"),name='face')
+# @cv2Decorator.AccessCamOrVideo(show=True, videoPath="./opencv_wrap/testMedia/test.mp4", )
+# @cv2Decorator.CalculateFps(draw = True)
+# @cv2Decorator.MirrorFrame()
+# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+# def all_actions(**kwargs):
+# frame = kwargs['frame']
+# # detect face from trainerd data and detectMultiScale use to deteat every size of face
+# face_coordinate = kwargs['face'].detectMultiScale(kwargs['greyScale'],1.3,5)
+# # saveFrame(frame=frame,count=kwargs['frame_count'],destination='./test2')
+# detectionBox(detectedArr=face_coordinate, frame=frame)
+# return kwargs
+
+# a = all_actions()
+# print(a['frame_count'])
+
+
+
+# show converted frames in smart view
+@cv2Decorator.TotalTimeTaken(show=True)
+@cv2Decorator.AccessCamOrVideo(show=False, videoPath="./opencv_wrap/testMedia/test.mp4", fps=12)
+@cv2Decorator.CalculateFps(draw=True)
+@cv2Decorator.MirrorFrame()
+@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
+def all_actions(**kwargs):
+ show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame'])
+ return kwargs
+
+all_actions()
\ No newline at end of file
diff --git a/test/test.py b/test/test.py
deleted file mode 100644
index 000f880..0000000
--- a/test/test.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# flake8: noqa: E501
-from opencv_wrap import cv2Decorator
-import cv2
-from opencv_wrap.detectors import Face
-from opencv_wrap.utils.helper import saveFrame, detectionBox, show_all_frames, clipImage
-
-
-# reading a single frame from the directory
-
-# @cv2Decorator.TotalTimeTaken(show=True)
-# @cv2Decorator.DetectInEachFrame(detector=cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml"),name='face')
-# @cv2Decorator.CalculateFps(draw = True)
-# @cv2Decorator.MirrorFrame()
-# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
-# def all_actions(**kwargs):
-# frame = kwargs['frame']
-# # detect face from trainerd data and detectMultiScale use to deteat every size of face
-# face_coordinate = kwargs['face'].detectMultiScale(kwargs['color_converted'],1.3,5)
-# detectionBox(detectedArr=face_coordinate, frame=frame)
-# return kwargs
-
-# frame = cv2.imread('./testMedia/test.jpg')
-
-# kwargs = all_actions(frame=frame)
-# cv2.imshow('frame',kwargs['frame'])
-# key = cv2.waitKey(0)
-
-
-# reading a video from the directory
-
-
-# @cv2Decorator.DetectInEachFrame(
-# detector=cv2.CascadeClassifier(
-# cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
-# ),
-# name="face",
-# )
-@cv2Decorator.DetectInEachFrame(
- detector=Face(verbose=True),
- name="face",
-)
-@cv2Decorator.TotalTimeTaken(show=True)
-# @cv2Decorator.AccessCamOrVideo(show=False, videoPath="./testMedia/test.mp4", fps=12)
-@cv2Decorator.AccessCamOrVideo(show=False, fps=12)
-@cv2Decorator.CalculateFps(draw=True)
-@cv2Decorator.MirrorFrame()
-@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
-@cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_RGB2BGR, frameName="bgr_frame")
-def all_actions(**kwargs):
- # detect face from trainerd data and detectMultiScale use to deteat every size of face
- # face_coordinate = kwargs["face"].detectMultiScale(kwargs["greyScale"], 1.3, 5)
- mainFrameCopy = kwargs["frame"].copy()
- processed = kwargs["face"].detect(kwargs["bgr_frame"])
- face_coordinate = kwargs["face"].getDetectionBox(
- processed, kwargs["frame"], draw=True
- )
- kwargs["face"].drawLandmarks(processed, kwargs["frame"])
- # print(len(face_coordinate))
-
- kwargs["detected"] = [clipImage(mainFrameCopy, i) for i in face_coordinate]
- # saveFrame(frame=frame,count=kwargs['frame_count'],destination='./test2')
-
- # detectionBox(detectedArr=face_coordinate, frame=kwargs["frame"])
- # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame','detected'])
- # show_all_frames(kwargs,keysToShow=['frame','greyScale','mirror_frame'])
- show_all_frames(kwargs, keysToShow=["frame", "detected"])
- return kwargs
-
-
-kwargs = all_actions()
-
-
-# reading the cam feed
-# @cv2Decorator.TotalTimeTaken(show=True)
-# @cv2Decorator.DetectInEachFrame(detector=cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml"),name='face')
-# @cv2Decorator.AccessCamOrVideo(show=True)
-# @cv2Decorator.CalculateFps(draw = True)
-# @cv2Decorator.MirrorFrame()
-# @cv2Decorator.ConvertCOLOR(converter=cv2.COLOR_BGR2GRAY)
-# def all_actions(**kwargs):
-# frame = kwargs['frame']
-# # detect face from trainerd data and detectMultiScale use to deteat every size of face
-# face_coordinate = kwargs['face'].detectMultiScale(kwargs['color_converted'],1.3,5)
-# # saveFrame(frame=frame,count=kwargs['frame_count'],destination='./test2')
-# detectionBox(detectedArr=face_coordinate, frame=frame)
-# return kwargs
-
-# kwargs = all_actions()
-
-# print("last ",all_actions().keys())
-# print("last ",all_actions().keys())
diff --git a/test/test2.py b/test/test2.py
deleted file mode 100644
index f8cb3e6..0000000
--- a/test/test2.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import cv2
-from detectors.Face import Face
-
-if __name__ == "__main__":
- d1 = Face(verbose=True)
- d2 = Face()
-
- image = cv2.imread("./testMedia/test.jpg")
- print(d1.detect(image))
- print("isinstance of face :", isinstance(d1, Face), end=" ")
- print("is i1 == i2 :", d1 == d2)