Code for a hand
Write Image (Jetson Nano 2gb) to SD Card using balenaEtcher
sudo docker pull nvcr.io/nvidia/l4t-ml:r32.7.1-py3
mkdir ~/l4t-data
lsusb
sudo docker run --runtime nvidia -it --network host --volume ~/l4t-data --device /dev/video0 nvcr.io/nvidia/l4t-ml:r32.7.1-py3
After Jupyter Notebook is running, open a terminal and run the following commands:
git clone https://github.com/NVIDIA-AI-IOT/torch2trt
cd torch2trt
python3 setup.py install --plugins
pip3 install tqdm cython pycocotools
apt-get install python3-matplotlib
cd /
git clone https://github.com/NVIDIA-AI-IOT/trt_pose
cd trt_pose
python3 setup.py install
cd /
git clone https://github.com/NVIDIA-AI-IOT/jetcam
cd jetcam
python3 setup.py install
cd /
git clone https://github.com/NVIDIA-AI-IOT/trt_pose_hand.git
cd trt_pose_hand
pip3 install scikit-learn
sudo docker run --name handV7 --runtime nvidia -it \
-e DISPLAY=$DISPLAY \
--network host \
-v ~/l4t-data:/l4t-data \
--device /dev/video* \
--device /dev/ttyUSB* \
--workdir /trt_hand_pose \
felipegalind0/trt_hand_pose:v7\
python3 gesture_classification_print.py
sudo docker run --name handV7 --runtime nvidia -it \
-e DISPLAY=$DISPLAY \
--network host \
-v ~/l4t-data:/l4t-data \
--device /dev/video0 \
--workdir /trt_hand_pose \
felipegalind0/trt_hand_pose:v7 \
python3 gesture_classification_print.py
sudo docker run --name handV7 --runtime nvidia -it \
-e DISPLAY=$DISPLAY \
--network host \
-v ~/l4t-data:/l4t-data \
--device /dev/video0 \
--workdir /trt_hand_pose \
felipegalind0/trt_hand_pose:v7 \
xhost +local:docker