Browse Source

Merge pull request #10 from bharatk-parallel/rapids

Verified and merged RAPIDS(Data Science)  and DeepStream ( Intelligent Video Analytics ) contents
Bharatkumar Sharma 3 years ago
parent
commit
bc358e6af3
100 changed files with 8089 additions and 0 deletions
  1. 14 0
      ai/DeepStream/Dockerfile
  2. 70 0
      ai/DeepStream/English/python/Start_Here.ipynb
  3. 604 0
      ai/DeepStream/English/python/jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb
  4. 275 0
      ai/DeepStream/English/python/jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb
  5. 597 0
      ai/DeepStream/English/python/jupyter_notebook/Introduction_to_Multi-DNN_pipeline.ipynb
  6. 729 0
      ai/DeepStream/English/python/jupyter_notebook/Multi-stream_Multi_DNN.ipynb
  7. 662 0
      ai/DeepStream/English/python/jupyter_notebook/Multi-stream_Multi_DNN_Solution.ipynb
  8. 624 0
      ai/DeepStream/English/python/jupyter_notebook/Multi-stream_pipeline.ipynb
  9. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/DeepStream_Plugin_Manual.pdf
  10. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/ds-perf.png
  11. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/ds-python-api.png
  12. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/ds-sample-pipeline.png
  13. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/ds-scalability.png
  14. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/ds-sdk.png
  15. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/ds-workflow-split.jpeg
  16. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/ds-workflow.jpg
  17. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/nvinfer.png
  18. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/nvmultistreamtiler.png
  19. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/nvosd.png
  20. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/nvstreammux.png
  21. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/nvtracker.png
  22. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/nvvidconv.png
  23. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/pads.png
  24. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/pipeline.png
  25. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/test1.png
  26. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/test2.png
  27. BIN
      ai/DeepStream/English/python/jupyter_notebook/images/test3.png
  28. 33 0
      ai/DeepStream/English/python/source_code/N1/config_infer_primary_peoplenet.txt
  29. 81 0
      ai/DeepStream/English/python/source_code/N1/dstest1_pgie_config.txt
  30. 31 0
      ai/DeepStream/English/python/source_code/N1/dstest1_pgie_config_trt.txt
  31. 3 0
      ai/DeepStream/English/python/source_code/N1/labels_peoplenet.txt
  32. 81 0
      ai/DeepStream/English/python/source_code/N2/dstest2_pgie_config.txt
  33. 87 0
      ai/DeepStream/English/python/source_code/N2/dstest2_sgie1_config.txt
  34. 87 0
      ai/DeepStream/English/python/source_code/N2/dstest2_sgie2_config.txt
  35. 87 0
      ai/DeepStream/English/python/source_code/N2/dstest2_sgie3_config.txt
  36. 37 0
      ai/DeepStream/English/python/source_code/N2/dstest2_tracker_config.txt
  37. 81 0
      ai/DeepStream/English/python/source_code/N3/dstest3_pgie_config.txt
  38. 81 0
      ai/DeepStream/English/python/source_code/N4/dstest4_pgie_config.txt
  39. 87 0
      ai/DeepStream/English/python/source_code/N4/dstest4_sgie1_config.txt
  40. 87 0
      ai/DeepStream/English/python/source_code/N4/dstest4_sgie2_config.txt
  41. 87 0
      ai/DeepStream/English/python/source_code/N4/dstest4_sgie3_config.txt
  42. 37 0
      ai/DeepStream/English/python/source_code/N4/dstest4_tracker_config.txt
  43. 50 0
      ai/DeepStream/English/python/source_code/common/FPS.py
  44. 0 0
      ai/DeepStream/English/python/source_code/common/__init__.py
  45. BIN
      ai/DeepStream/English/python/source_code/common/__pycache__/FPS.cpython-36.pyc
  46. BIN
      ai/DeepStream/English/python/source_code/common/__pycache__/__init__.cpython-36.pyc
  47. BIN
      ai/DeepStream/English/python/source_code/common/__pycache__/bus_call.cpython-36.pyc
  48. BIN
      ai/DeepStream/English/python/source_code/common/__pycache__/is_aarch_64.cpython-36.pyc
  49. 39 0
      ai/DeepStream/English/python/source_code/common/bus_call.py
  50. 30 0
      ai/DeepStream/English/python/source_code/common/is_aarch_64.py
  51. 29 0
      ai/DeepStream/English/python/source_code/common/utils.py
  52. 50 0
      ai/DeepStream/README.md
  53. 25 0
      ai/DeepStream/Singularity
  54. 38 0
      ai/DeepStream_Perf_Lab/Dockerfile
  55. 64 0
      ai/DeepStream_Perf_Lab/English/python/Start_Here.ipynb
  56. 847 0
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/Introduction_to_Performance_analysis.ipynb
  57. 512 0
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb
  58. 776 0
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb
  59. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/Nsight Diagram.png
  60. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/Optimization_Cycle.jpg
  61. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/app-2.png
  62. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/batch_size.PNG
  63. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/batch_size_nvinfer.png
  64. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/covid.png
  65. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/cpu.png
  66. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/cuda.png
  67. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/inference.png
  68. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/laplas3.png
  69. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nsight_open.png
  70. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nvstreamux-control.png
  71. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nvtracker.png
  72. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nvtx_domain.png
  73. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/pinning_row.png
  74. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/report4.PNG
  75. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/report5.PNG
  76. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/test1.png
  77. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/test2.png
  78. BIN
      ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/thread.png
  79. 81 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest1_pgie_config.txt
  80. 81 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_pgie_config.txt
  81. 87 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_sgie1_config.txt
  82. 87 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_sgie2_config.txt
  83. 87 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_sgie3_config.txt
  84. 37 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_tracker_config.txt
  85. 81 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_pgie_config.txt
  86. 87 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_sgie1_config.txt
  87. 87 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_sgie2_config.txt
  88. 87 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_sgie3_config.txt
  89. 37 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_tracker_config.txt
  90. 81 0
      ai/DeepStream_Perf_Lab/English/python/source_code/N3/dstest1_pgie_config.txt
  91. 49 0
      ai/DeepStream_Perf_Lab/English/python/source_code/common/FPS.py
  92. 0 0
      ai/DeepStream_Perf_Lab/English/python/source_code/common/__init__.py
  93. BIN
      ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/FPS.cpython-36.pyc
  94. BIN
      ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/__init__.cpython-36.pyc
  95. BIN
      ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/bus_call.cpython-36.pyc
  96. BIN
      ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/is_aarch_64.cpython-36.pyc
  97. 39 0
      ai/DeepStream_Perf_Lab/English/python/source_code/common/bus_call.py
  98. 30 0
      ai/DeepStream_Perf_Lab/English/python/source_code/common/is_aarch_64.py
  99. 29 0
      ai/DeepStream_Perf_Lab/English/python/source_code/common/utils.py
  100. 0 0
      ai/DeepStream_Perf_Lab/English/python/source_code/dataset/download_dataset.py

+ 14 - 0
ai/DeepStream/Dockerfile

@@ -0,0 +1,14 @@
+# Select Base Image
+FROM nvcr.io/nvidia/deepstream:5.0-20.07-triton
+# Update the repo
+RUN apt-get update
+# Install required dependencies
+RUN apt-get install ffmpeg python3-gi -y
+# Install required python packages
+WORKDIR /opt/nvidia/deepstream/deepstream/lib
+RUN python3 setup.py install
+WORKDIR /opt/nvidia/deepstream/deepstream-5.0
+RUN pip3 install jupyterlab
+COPY English /opt/nvidia/deepstream/deepstream-5.0
+
+CMD jupyter notebook --no-browser --allow-root --ip=0.0.0.0 --port=8889 --NotebookApp.token="" --notebook-dir=/opt/nvidia/deepstream/deepstream-5.0/python

+ 70 - 0
ai/DeepStream/English/python/Start_Here.ipynb

@@ -0,0 +1,70 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Welcome to Deepstream and Intelligent Video Analytics Bootcamp\n",
+    "\n",
+    "Welcome to the Deepstream and Intelligent Video Analytics Bootcamp ! \n",
+    "\n",
+    "The objective of this Bootcamp is to give you an introduction to NVIDIA Deepstream SDK 5.0. This Bootcamp will introduce you to the fundamentals of the DeepStream SDK and how they can be used to build and deploy AI-powered Intelligent Video Analytics apps and services. DeepStream offers a multi-platform scalable framework with TLS security to deploy on the edge and connect to any cloud.\n",
+    "\n",
+    "\n",
+    "The following contents will be covered during the bootcamp : \n",
+    "\n",
+    "\n",
+    "- [**Introduction to Deepstream and Gstreamer**](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb)\n",
+    "    - [DeepStream](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#DeepStream) \n",
+    "    - [Overview of the DeepStream SDK](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#Overview-of-the-DeepStream-SDK)\n",
+    "    - [GStreamer Foundations](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#GStreamer-Foundations-:)\n",
+    "        - [Elements](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#Elements)\n",
+    "        - [Pipeline](jupyter_notebook/jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#Pipeline)\n",
+    "        - [Pads](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#Pads)\n",
+    "        - [Caps](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#Caps)\n",
+    "        - [Buffers](jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb#Buffers)\n",
+    "        - [Plugin based Architecture](Introduction_to_Deepstream_and_Gstreamer.ipynb#Plugin-based-Architecture)\n",
+    "\n",
+    "- [**Getting started with Deepstream Pipeline**](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "    - [NVIDIA DeepStream Plugins](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb#NVIDIA-DeepStream-Plugins) \n",
+    "        - [Nvinfer](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb#Nvinfer)\n",
+    "        - [Nvvidconv](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb#Nvvidconv)\n",
+    "        - [Nvosd](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb#Nvosd)\n",
+    "    - [Building the pipeline](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb#Building-the-pipeline)\n",
+    "    - [Understanding the configuration file](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb#Understanding-the-configuration-file)\n",
+    "    - [Working with the Metadata](jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb#Working-with-the-Metadata)\n",
+    "- [**Introduction to Multi-DNN pipeline**](jupyter_notebook/Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "    - [Changes in configuration for a Multi-DNN pipeline](jupyter_notebook/Introduction_to_Multi-DNN_pipeline.ipynb#Changes-in-configuration)\n",
+    "    - [Nvtracker](jupyter_notebook/Introduction_to_Multi-DNN_pipeline.ipynb#Nvtracker)\n",
+    "    - [Building the pipeline](jupyter_notebook/Introduction_to_Multi-DNN_pipeline.ipynb#Building-the-Pipeline) \n",
+    "\n",
+    "- [**Multi-stream pipeline**](jupyter_notebook/Multi-stream_pipeline.ipynb)\n",
+    "    - [Nvmultistreamtiler](jupyter_notebook/Multi-stream_pipeline.ipynb#Nvmultistreamtiler) \n",
+    "    - [Building the pipeline](jupyter_notebook/Multi-stream_pipeline.ipynb#Building-the-pipeline)\n",
+    "\n",
+    "- [**Exercise : Multi-stream Multi-DNN pipeline**](jupyter_notebook/Multi-stream_Multi_DNN.ipynb)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

+ 604 - 0
ai/DeepStream/English/python/jupyter_notebook/Getting_started_with_Deepstream_Pipeline.ipynb

@@ -0,0 +1,604 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "     \n",
+    "     \n",
+    "     \n",
+    "     \n",
+    "     \n",
+    "  \n",
+    "[Home Page](Start_Here.ipynb)\n",
+    "    \n",
+    "    \n",
+    "[Previous Notebook](Introduction_to_Deepstream_and_Gstreamer.ipynb)\n",
+    "     \n",
+    "     \n",
+    "     \n",
+    "    \n",
+    "[1](Introduction_to_Deepstream_and_Gstreamer.ipynb#)\n",
+    "[2]\n",
+    "[3](Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5](Multi-stream_Multi_DNN.ipynb)\n",
+    "     \n",
+    "     \n",
+    "     \n",
+    "     \n",
+    "[Next Notebook](Introduction_to_Multi-DNN_pipeline.ipynb)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Getting started with Deepstream pipeline\n",
+    "\n",
+    "In this notebook, you will be get started with DeepStream's Python Bindings ,it's workflow and build a 4-class object detection pipeline. \n",
+    "\n",
+    "\n",
+    "**Contents of this Notebook :**\n",
+    "\n",
+    "- [NVIDIA DeepStream Plugins](#NVIDIA-DeepStream-Plugins) \n",
+    "    - [Nvinfer](#Nvinfer)\n",
+    "    - [Nvvidconv](#Nvvidconv)\n",
+    "    - [Nvosd](#Nvosd)\n",
+    "- [Building the pipeline](#Building-the-pipeline)\n",
+    "- [Understanding the configuration file](#Understanding-the-configuration-file)\n",
+    "- [Working with the Metadata](#Working-with-the-Metadata)    "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We will be building a 4-class object detection pipeline as shown in the illustration below. \n",
+    "\n",
+    "![Test1](images/test1.png)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We notice there are multiple DeepStream plugins used in the pipeline , Let us have a look at them and try to understand them. \n",
+    "\n",
+    "## NVIDIA DeepStream Plugins\n",
+    "\n",
+    "### Nvinfer\n",
+    "\n",
+    "The nvinfer plugin provides [TensorRT](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html)-based inference for detection and tracking. The lowlevel library (libnvds_infer) operates either on float RGB or BGR planar data with dimensions of Network Height and Network Width. The plugin accepts NV12/RGBA data from upstream components like the decoder, muxer, and dewarper.\n",
+    "The Gst-nvinfer plugin also performs preprocessing operations like format conversion, scaling, mean subtraction, and produces final float RGB/BGR planar data which is passed to the low-level library. The low-level library uses the TensorRT engine for inferencing. It outputs each classified object’s class and each detected object’s bounding boxes (Bboxes) after clustering.\n",
+    "\n",
+    "![NVINFER](images/nvinfer.png)\n",
+    "\n",
+    "### Nvvidconv \n",
+    "\n",
+    "We create the nvvidconv plugin that performs color format conversions, which is required to make data ready for the nvosd plugin.\n",
+    "\n",
+    "![NVVIDCONV](images/nvvidconv.png)\n",
+    "\n",
+    "\n",
+    "### Nvosd\n",
+    "\n",
+    "The nvosd plugin draws bounding boxes, text, and RoI (Regions of Interest) polygons (Polygons are presented as a set of lines). The plugin accepts an RGBA buffer with attached metadata from the upstream component. It\n",
+    "draws bounding boxes, which may be shaded depending on the configuration (e.g. width, color, and opacity) of a given bounding box. It also draws text and RoI polygons at specified locations in the frame. Text and polygon parameters are configurable through metadata.\n",
+    "\n",
+    "![NVOSD](images/nvosd.png)\n",
+    "\n",
+    "\n",
+    "Now with this idea , let us get started into building the pipeline."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Building the pipeline \n",
+    "\n",
+    "![Test1](images/test1.png)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Import Required Libraries \n",
+    "import sys\n",
+    "sys.path.append('../source_code')\n",
+    "import gi\n",
+    "import time\n",
+    "gi.require_version('Gst', '1.0')\n",
+    "from gi.repository import GObject, Gst\n",
+    "from common.bus_call import bus_call\n",
+    "import pyds\n",
+    "\n",
+    "# Defining the Class Labels\n",
+    "PGIE_CLASS_ID_VEHICLE = 0\n",
+    "PGIE_CLASS_ID_BICYCLE = 1\n",
+    "PGIE_CLASS_ID_PERSON = 2\n",
+    "PGIE_CLASS_ID_ROADSIGN = 3\n",
+    "\n",
+    "# Defining the input output video file \n",
+    "INPUT_VIDEO_NAME  = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "OUTPUT_VIDEO_NAME = \"../source_code/N1/ds_out.mp4\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We define a function `make_elm_or_print_err()` to create our elements and report any errors if the creation fails.\n",
+    "\n",
+    "Elements are created using the `Gst.ElementFactory.make()` function as part of Gstreamer library."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "## Make Element or Print Error and any other detail\n",
+    "def make_elm_or_print_err(factoryname, name, printedname, detail=\"\"):\n",
+    "  print(\"Creating\", printedname)\n",
+    "  elm = Gst.ElementFactory.make(factoryname, name)\n",
+    "  if not elm:\n",
+    "     sys.stderr.write(\"Unable to create \" + printedname + \" \\n\")\n",
+    "  if detail:\n",
+    "     sys.stderr.write(detail)\n",
+    "  return elm"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Initialise GStreamer and Create an Empty Pipeline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Standard GStreamer initialization\n",
+    "GObject.threads_init()\n",
+    "Gst.init(None)\n",
+    "\n",
+    "\n",
+    "# Create gstreamer elements\n",
+    "# Create Pipeline element that will form a connection of other elements\n",
+    "print(\"Creating Pipeline \\n \")\n",
+    "pipeline = Gst.Pipeline()\n",
+    "\n",
+    "if not pipeline:\n",
+    "    sys.stderr.write(\" Unable to create Pipeline \\n\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Create Elements that are required for our pipeline "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########### Create Elements required for the Pipeline ########### \n",
+    "# Source element for reading from the file\n",
+    "source = make_elm_or_print_err(\"filesrc\", \"file-source\",\"Source\")\n",
+    "# Since the data format in the input file is elementary h264 stream we need a h264parser\n",
+    "h264parser = make_elm_or_print_err(\"h264parse\", \"h264-parser\",\"h264 parse\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder\",\"Nvv4l2 Decoder\")\n",
+    "# Create nvstreammux instance to form batches from one or more sources.\n",
+    "streammux = make_elm_or_print_err(\"nvstreammux\", \"Stream-muxer\",'NvStreamMux')\n",
+    "# Use nvinfer to run inferencing on decoder's output,behaviour of inferencing is set through config file\n",
+    "pgie = make_elm_or_print_err(\"nvinfer\", \"primary-inference\" ,\"pgie\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv = make_elm_or_print_err(\"nvvideoconvert\", \"convertor\",\"nvvidconv\")\n",
+    "# Create OSD to draw on the converted RGBA buffer\n",
+    "nvosd = make_elm_or_print_err(\"nvdsosd\", \"onscreendisplay\",\"nvosd\")\n",
+    "# Finally encode and save the osd output\n",
+    "queue = make_elm_or_print_err(\"queue\", \"queue\", \"Queue\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv2 = make_elm_or_print_err(\"nvvideoconvert\", \"convertor2\",\"nvvidconv2\")\n",
+    "# Place an encoder instead of OSD to save as video file\n",
+    "encoder = make_elm_or_print_err(\"avenc_mpeg4\", \"encoder\", \"Encoder\")\n",
+    "# Parse output from Encoder \n",
+    "codeparser = make_elm_or_print_err(\"mpeg4videoparse\", \"mpeg4-parser\", 'Code Parser')\n",
+    "# Create a container\n",
+    "container = make_elm_or_print_err(\"qtmux\", \"qtmux\", \"Container\")\n",
+    "# Create Sink for storing the output \n",
+    "sink = make_elm_or_print_err(\"filesink\", \"filesink\", \"Sink\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now that we have created the elements ,we can now set various properties for out pipeline at this point. \n",
+    "\n",
+    "### Understanding the configuration file \n",
+    "\n",
+    "We set an `config-file-path` for our nvinfer ( Interference plugin ) and it points to the file `dstest1_pgie_config.txt`\n",
+    "\n",
+    "You can have a have a look at the [file](../source_code/N1/dstest1_pgie_config.txt)\n",
+    "\n",
+    "Here are some parts of the configuration file : \n",
+    "\n",
+    "```\n",
+    "# Copyright (c) 2020 NVIDIA Corporation.  All rights reserved.\n",
+    "#\n",
+    "# NVIDIA Corporation and its licensors retain all intellectual property\n",
+    "# and proprietary rights in and to this software, related documentation\n",
+    "# and any modifications thereto.  Any use, reproduction, disclosure or\n",
+    "# distribution of this software and related documentation without an express\n",
+    "# license agreement from NVIDIA Corporation is strictly prohibited.\n",
+    "\n",
+    "[property]\n",
+    "gpu-id=0\n",
+    "net-scale-factor=0.0039215697906911373\n",
+    "model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel\n",
+    "proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt\n",
+    "#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine\n",
+    "labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt\n",
+    "int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin\n",
+    "force-implicit-batch-dim=1\n",
+    "batch-size=1\n",
+    "network-mode=1\n",
+    "process-mode=1\n",
+    "model-color-format=0\n",
+    "num-detected-classes=4\n",
+    "interval=0\n",
+    "gie-unique-id=1\n",
+    "output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid\n",
+    "\n",
+    "[class-attrs-all]\n",
+    "pre-cluster-threshold=0.2\n",
+    "eps=0.2\n",
+    "group-threshold=1\n",
+    "```\n",
+    "\n",
+    "Here we define all the parameters of our model. In this example we use model-file `resnet10`. `Nvinfer` creates an TensorRT Engine specific to the Host GPU to accelerate it's inference performance."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "############ Set properties for the Elements ############\n",
+    "print(\"Playing file \",INPUT_VIDEO_NAME)\n",
+    "# Set Input File Name \n",
+    "source.set_property('location', INPUT_VIDEO_NAME)\n",
+    "# Set Input Width , Height and Batch Size \n",
+    "streammux.set_property('width', 1920)\n",
+    "streammux.set_property('height', 1080)\n",
+    "streammux.set_property('batch-size', 1)\n",
+    "# Timeout in microseconds to wait after the first buffer is available \n",
+    "# to push the batch even if a complete batch is not formed.\n",
+    "streammux.set_property('batched-push-timeout', 4000000)\n",
+    "# Set Congifuration file for nvinfer \n",
+    "pgie.set_property('config-file-path', \"../source_code/N1/dstest1_pgie_config.txt\")\n",
+    "# Set Encoder bitrate for output video\n",
+    "encoder.set_property(\"bitrate\", 2000000)\n",
+    "# Set Output file name and disable sync and async\n",
+    "sink.set_property(\"location\", OUTPUT_VIDEO_NAME)\n",
+    "sink.set_property(\"sync\", 0)\n",
+    "sink.set_property(\"async\", 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now link all the elements in the order we prefer and create Gstreamer bus to feed all messages through it. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########## Add and Link ELements in the Pipeline ########## \n",
+    "\n",
+    "print(\"Adding elements to Pipeline \\n\")\n",
+    "\n",
+    "pipeline.add(source)\n",
+    "pipeline.add(h264parser)\n",
+    "pipeline.add(decoder)\n",
+    "pipeline.add(streammux)\n",
+    "pipeline.add(pgie)\n",
+    "pipeline.add(nvvidconv)\n",
+    "pipeline.add(nvosd)\n",
+    "pipeline.add(queue)\n",
+    "pipeline.add(nvvidconv2)\n",
+    "pipeline.add(encoder)\n",
+    "pipeline.add(codeparser)\n",
+    "pipeline.add(container)\n",
+    "pipeline.add(sink)\n",
+    "\n",
+    "# We now  link the elements together \n",
+    "# file-source -> h264-parser -> nvh264-decoder -> nvinfer -> nvvidconv ->\n",
+    "# queue -> nvvidconv2 -> encoder -> parser -> container -> sink -> output-file\n",
+    "print(\"Linking elements in the Pipeline \\n\")\n",
+    "source.link(h264parser)\n",
+    "h264parser.link(decoder)\n",
+    "\n",
+    "##### Creating Sink pad and source pads and linking them together \n",
+    "\n",
+    "# Create Sinkpad to Streammux \n",
+    "sinkpad = streammux.get_request_pad(\"sink_0\")\n",
+    "if not sinkpad:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "# Create source pad from Decoder   \n",
+    "srcpad = decoder.get_static_pad(\"src\")\n",
+    "if not srcpad:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad.link(sinkpad)\n",
+    "streammux.link(pgie)\n",
+    "pgie.link(nvvidconv)\n",
+    "nvvidconv.link(nvosd)\n",
+    "nvosd.link(queue)\n",
+    "queue.link(nvvidconv2)\n",
+    "nvvidconv2.link(encoder)\n",
+    "encoder.link(codeparser)\n",
+    "codeparser.link(container)\n",
+    "container.link(sink)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# create an event loop and feed gstreamer bus mesages to it\n",
+    "loop = GObject.MainLoop()\n",
+    "bus = pipeline.get_bus()\n",
+    "bus.add_signal_watch()\n",
+    "bus.connect (\"message\", bus_call, loop)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Working with the Metadata \n",
+    "\n",
+    "Our pipeline now carries the metadata forward but we have not done anything with it until now, but as mentoioned in the above pipeline diagram , we will now create a callback function to write relevant data on the frame once called and create a sink pad in the `nvosd` element to call the function."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "############## Working with the Metadata ################\n",
+    "\n",
+    "def osd_sink_pad_buffer_probe(pad,info,u_data):\n",
+    "    \n",
+    "    #Intiallizing object counter with 0.\n",
+    "    obj_counter = {\n",
+    "        PGIE_CLASS_ID_VEHICLE:0,\n",
+    "        PGIE_CLASS_ID_PERSON:0,\n",
+    "        PGIE_CLASS_ID_BICYCLE:0,\n",
+    "        PGIE_CLASS_ID_ROADSIGN:0\n",
+    "    }\n",
+    "    # Set frame_number & rectangles to draw as 0 \n",
+    "    frame_number=0\n",
+    "    num_rects=0\n",
+    "    \n",
+    "    gst_buffer = info.get_buffer()\n",
+    "    if not gst_buffer:\n",
+    "        print(\"Unable to get GstBuffer \")\n",
+    "        return\n",
+    "\n",
+    "    # Retrieve batch metadata from the gst_buffer\n",
+    "    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the\n",
+    "    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)\n",
+    "    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))\n",
+    "    l_frame = batch_meta.frame_meta_list\n",
+    "    while l_frame is not None:\n",
+    "        try:\n",
+    "            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta\n",
+    "            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "        \n",
+    "        # Get frame number , number of rectables to draw and object metadata\n",
+    "        frame_number=frame_meta.frame_num\n",
+    "        num_rects = frame_meta.num_obj_meta\n",
+    "        l_obj=frame_meta.obj_meta_list\n",
+    "        \n",
+    "        while l_obj is not None:\n",
+    "            try:\n",
+    "                # Casting l_obj.data to pyds.NvDsObjectMeta\n",
+    "                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "            # Increment Object class by 1 and Set Box border to Red color     \n",
+    "            obj_counter[obj_meta.class_id] += 1\n",
+    "            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)\n",
+    "            try: \n",
+    "                l_obj=l_obj.next\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "        ################## Setting Metadata Display configruation ############### \n",
+    "        # Acquiring a display meta object.\n",
+    "        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n",
+    "        display_meta.num_labels = 1\n",
+    "        py_nvosd_text_params = display_meta.text_params[0]\n",
+    "        # Setting display text to be shown on screen\n",
+    "        py_nvosd_text_params.display_text = \"Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}\".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])\n",
+    "        # Now set the offsets where the string should appear\n",
+    "        py_nvosd_text_params.x_offset = 10\n",
+    "        py_nvosd_text_params.y_offset = 12\n",
+    "        # Font , font-color and font-size\n",
+    "        py_nvosd_text_params.font_params.font_name = \"Serif\"\n",
+    "        py_nvosd_text_params.font_params.font_size = 10\n",
+    "        # Set(red, green, blue, alpha); Set to White\n",
+    "        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)\n",
+    "        # Text background color\n",
+    "        py_nvosd_text_params.set_bg_clr = 1\n",
+    "        # Set(red, green, blue, alpha); set to Black\n",
+    "        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)\n",
+    "        # Using pyds.get_string() to get display_text as string to print in notebook\n",
+    "        print(pyds.get_string(py_nvosd_text_params.display_text))\n",
+    "        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n",
+    "        \n",
+    "        ############################################################################\n",
+    "        \n",
+    "        try:\n",
+    "            l_frame=l_frame.next\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "    return Gst.PadProbeReturn.OK"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Lets add probe to get informed of the meta data generated, we add probe to the sink pad  \n",
+    "# of the osd element, since by that time, the buffer would have had got all the metadata.\n",
+    "\n",
+    "osdsinkpad = nvosd.get_static_pad(\"sink\")\n",
+    "if not osdsinkpad:\n",
+    "    sys.stderr.write(\" Unable to get sink pad of nvosd \\n\")\n",
+    "    \n",
+    "osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now with everything defined , we can start the playback and listen the events."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# start play back and listen to events\n",
+    "print(\"Starting pipeline \\n\")\n",
+    "start_time = time.time()\n",
+    "pipeline.set_state(Gst.State.PLAYING)\n",
+    "try:\n",
+    "    loop.run()\n",
+    "except:\n",
+    "    pass\n",
+    "# cleanup\n",
+    "pipeline.set_state(Gst.State.NULL)\n",
+    "print(\"--- %s seconds ---\" % (time.time() - start_time))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Convert video profile to be compatible with Jupyter notebook\n",
+    "!ffmpeg -loglevel panic -y -an -i ../source_code/N1/ds_out.mp4 -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 ../source_code/N1/output.mp4"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Display the Output\n",
+    "from IPython.display import HTML\n",
+    "HTML(\"\"\"\n",
+    " <video width=\"640\" height=\"480\" controls>\n",
+    " <source src=\"../source_code/N1/output.mp4\"\n",
+    " </video>\n",
+    "\"\"\".format())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "\n",
+    "In the next notebook , we will learn  object tracking and build an attribute classification pipeline along with the primary inference build in the notebook.\n",
+    "\n",
+    "## Licensing\n",
+    "  \n",
+    "This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "[Previous Notebook](Introduction_to_Deepstream_and_Gstreamer.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;\n",
+    "[1](Introduction_to_Deepstream_and_Gstreamer.ipynb#)\n",
+    "[2]\n",
+    "[3](Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5](Multi-stream_Multi_DNN.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "[Next Notebook](Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&ensp;\n",
+    "[Home Page](Start_Here.ipynb)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

+ 275 - 0
ai/DeepStream/English/python/jupyter_notebook/Introduction_to_Deepstream_and_Gstreamer.ipynb

@@ -0,0 +1,275 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;\n",
+    "[Home Page](Start_Here.ipynb)\n",
+    "    \n",
+    "    \n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&ensp;\n",
+    "[1]\n",
+    "[2](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "[3](Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5](Multi-stream_Multi_DNN.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "[Next Notebook](Getting_started_with_Deepstream_Pipeline.ipynb)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Introduction to DeepStream\n",
+    "\n",
+    "In this notebook, you will be introduced to DeepStream ,it's workflow and the underlying principle upon which it works on. \n",
+    "\n",
+    "\n",
+    "**Contents of this Notebook :**\n",
+    "\n",
+    "- [DeepStream](#DeepStream) \n",
+    "- [Overview of the DeepStream SDK](#Overview-of-the-DeepStream-SDK)\n",
+    "- [GStreamer Foundations](#GStreamer-Foundations-:)\n",
+    "    - [Elements](#Elements)\n",
+    "    - [Pipeline](#Pipeline)\n",
+    "    - [Pads](#Pads)\n",
+    "    - [Caps](#Caps)\n",
+    "    - [Buffers](#Buffers)\n",
+    "    - [Plugin based Architecture](#Plugin-based-Architecture)\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## DeepStream \n",
+    "\n",
+    "![Worlflow](images/ds-workflow.jpg)\n",
+    "\n",
+    "DeepStream simplifies building IVA applications by separating the application into components managed and built by the user and components managed by DeepStream.\n",
+    "\n",
+    "As developers, we build components to manage important business tasks like :\n",
+    "- Selecting the kind and number of video streams we want to analyze\n",
+    "- Choosing the type of analysis, we want to do on the video\n",
+    "- Handling and interacting with the results of our analysis\n",
+    "    \n",
+    "We don't need to build components to manage difficult tasks like :\n",
+    "- Efficiently leverage the GPU for accelerated processing and inference\n",
+    "- Efficiently process data from multiple video streams at once\n",
+    "- Keeping track of metadata associated with each frame of video from multiple sources\n",
+    "- Optimizing our pipeline for maximum data throughput\n",
+    "- Optimizing our neural networks for high-speed inference\n",
+    "\n",
+    "These are mundane tasks that the **DeepStream SDK** manages for us. That lets us focus on the more important tasks related to the project's goal and impact. DeepStream lets us focus on the intelligence portion of the application.\n",
+    "\n",
+    "Here is an illustration on DeepStream Workflow that describes the tasks by DeepStream vs developer \n",
+    "\n",
+    "![Workflow_split](images/ds-workflow-split.jpeg)\n",
+    "\n",
+    "\n",
+    "### Performance and Scalability \n",
+    "\n",
+    "\n",
+    "### Performance  \n",
+    "\n",
+    "To give an overview of performance improvement when using DeepStream SDK, we will scrutinize the DeepStream-app reference application included within the release package. The below illustration shows that the performance is doubled using DeepStream 3.0 when tested on T4 compared to the previous version P4, while it consumes the same amount of power and equal number of streams.The reference application includes a primary detector, three classifiers and a tracker.\n",
+    "\n",
+    "![Performance](images/ds-perf.png)\n",
+    "\n",
+    "\n",
+    "### Scalability \n",
+    "\n",
+    "DeepStream provides scalability at different levels of the system hierarchy. \n",
+    "\n",
+    "For example, \n",
+    "- The DeepStream SDK 3.0 supports processing a higher number of concurrent streams, in addition to utilizing multiple GPUs upon availability. \n",
+    "- The DeepStream SDK 4.0 delivers a unified code base for all NVIDIA GPUs and quick integration with IoT services.\n",
+    "\n",
+    "Furthermore DeepStream-in-containers provide flexibility to the deployment phase as shown below:\n",
+    "\n",
+    "![Scalability](images/ds-scalability.png)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Overview of the DeepStream SDK\n",
+    "\n",
+    "The DeepStream SDK consists of a set of building blocks which bridge the gap between low level APIs (such as TensorRT, Video Codec SDK) and the user application. By utilizing the DeepStream SDK, you can accelerate development of the IVA applications by focusing on building core deep learning models instead of designing end-to-end applications from scratch.\n",
+    "\n",
+    "Below, you can see a schematic presentation of the DeepStream SDK in a series of potential\n",
+    "applications.\n",
+    "\n",
+    "![SDK](images/ds-sdk.png)\n",
+    "\n",
+    "\n",
+    "In addition, the DeepStream SDK extends these capabilities by providing several other hardware accelerated building blocks. This includes support for TensorRT 7 and CUDA 11. In addition, DeepStream applications can be deployed as a part of a larger multi-GPU cluster or a microservice in containers. This allows highly flexible system architectures and opens new application capabilities.\n",
+    "\n",
+    "Below, you can see a shortened list of new capabilities provided by DeepStream:\n",
+    "- Allowing addition and removal of video stream dynamically and during the pipeline execution,in addition to frame rate and resolution adjustments\n",
+    "- Extending the video processing capabilities by supporting custom layers, and user-defined parsing of detector outputs\n",
+    "- Providing Support for 360-degree camera using GPU-accelerated dewarping libraries\n",
+    "- Augmenting the meta-data with application-specific, user-defined insights\n",
+    "- Providing pruned and efficient inference models\n",
+    "- Getting detailed performance analysis with the NVIDIA Nsight system profiler tool.\n",
+    "\n",
+    "\n",
+    "The DeepStream SDK is based on the **GStreamer multimedia framework** and provides a pipeline of GPU accelerated plugins as shown below. The SDK facilitates application implementation procedure by providing plugins for video inputs, video decoding, image preprocessing, TensorRT-based inference, object tracking and display. You can utilize these capabilities to assemble flexible, multi-stream video analytics applications.\n",
+    "\n",
+    "![Sample_pipeline](images/ds-sample-pipeline.png)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# GStreamer Foundations :\n",
+    "\n",
+    "The DeepStream SDK is based on the open source [GStreamer multimedia framework](https://gstreamer.freedesktop.org/). There are a few key concepts in GStreamer that we need to\n",
+    "touch on before getting started. These include Pipelines,Elements, Pads, Buffers, and Caps. We will be\n",
+    "describing them at a high level, but encourage those who are interested in the details to read the\n",
+    "[GStreamer Basics](https://gstreamer.freedesktop.org/documentation/?gi-language=c) documentation to learn more.\n",
+    "\n",
+    "\n",
+    "### Elements \n",
+    "\n",
+    "Elements are the core building block with which we make pipelines. Every process in-between the source (i.e. input of the pipeline, e.g. camera and video files) and sink elements (e.g. screen display) is passed through elements. Video decoding and encoding, neural network inference, and displaying text on top of video streams are examples of \"element\". DeepStream allows us to instantiate elements and weave them into pipelines.\n",
+    "\n",
+    "\n",
+    "### Pipeline \n",
+    "\n",
+    "All elements in GStreamer must typically be contained inside a pipeline before they can be used, because it takes care of some clocking and messaging functions.\n",
+    "\n",
+    "A pipeline is a particular type of bin, which is the element used to contain other elements. Therefore all methods which apply to bins also apply to pipelines. We need to add the elements to the pipeline and are then linked ,this linking must be established following the data flow (this is, from source elements to sink elements). \n",
+    "\n",
+    "![pipeline](images/pipeline.png)\n",
+    "\n",
+    "### Pads\n",
+    "\n",
+    "Pads are the interfaces between elements. When data flows from element to another element in a pipeline, it flows from the sink pad of one element to the source pad of another. Note that each element might have zero, one or many source/sink elements.\n",
+    "\n",
+    "\n",
+    "![pads](images/pads.png)\n",
+    "\n",
+    "### Caps\n",
+    "\n",
+    "Caps (or Capabilities), are the data types that a pad is permitted to utilize or emit. Because pads\n",
+    "can allow multiple data types, sometimes the data flow is ambiguous. Pads are \"negotiated\" in\n",
+    "order to explicitly define the type of data that can flow through the pad. Caps streamline this\n",
+    "process and allow elements of our pipeline with ambiguous pads to negotiate the correct data flow\n",
+    "process. Later in this course, we will use caps to pass certain video data types (NV12, RGB) to the\n",
+    "downstream elements in the pipeline.\n",
+    "\n",
+    "### Buffers\n",
+    "\n",
+    "Buffers carry the data that will passed on through the pipeline. Buffers are timestamped, contain\n",
+    "metadata such as how many elements are using it, flags, and pointers to objects in memory. When\n",
+    "we write application code, we rely on accessing data attached to the buffer.\n",
+    "\n",
+    "### Plugin based Architecture\n",
+    "\n",
+    "DeepStream applications can be thought of as pipelines consisting of individual components\n",
+    "(plugins). Each plugin represents a functional block like inference using TensorRT or multi-stream\n",
+    "decode. Where applicable, plugins are accelerated using the underlying hardware to deliver\n",
+    "maximum performance. DeepStream’s key value is in making deep learning for video easily\n",
+    "accessible, to allow you to concentrate on quickly building and customizing efficient and scalable\n",
+    "video analytics applications.\n",
+    "\n",
+    "The plugin architecture provides functionality such as video encode/decode, scaling, inferencing, and more. By connecting plugins into a pipeline, we can build complex applications. Because DeepStream is built on top of GStreamer, we can inspect plugins using `gst-inspect-1.0`.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#To make sure that right paths to the NVidia Libraries are added run this cell\n",
+    "!rm ~/.cache/gstreamer-1.0/registry.x86_64.bin\n",
+    "!export LD_LIBRARY_PATH=/opt/tensorrtserver/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/.singularity.d/libs:$LD_LIBRARY_PATH"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Inspect the nvinfer plugin\n",
+    "!gst-inspect-1.0 nvinfer"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " ## Licensing\n",
+    "  \n",
+    "This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).\n",
+    "    \n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&ensp;\n",
+    "[1]\n",
+    "[2](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "[3](Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5](Multi-stream_Multi_DNN.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "[Next Notebook](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;\n",
+    "[Home Page](Start_Here.ipynb)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

+ 597 - 0
ai/DeepStream/English/python/jupyter_notebook/Introduction_to_Multi-DNN_pipeline.ipynb

@@ -0,0 +1,597 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&ensp;\n",
+    "[Home Page](Start_Here.ipynb)\n",
+    "    \n",
+    "    \n",
+    "[Previous Notebook](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;\n",
+    "[1](Introduction_to_Deepstream_and_Gstreamer.ipynb)\n",
+    "[2](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "[3]\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5](Multi-stream_Multi_DNN.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "[Next Notebook](Multi-stream_pipeline.ipynb)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Introduction to Multi-DNN pipeline \n",
+    "\n",
+    "In the previous notebook we learnt on how to make a simple DeepStream pipeline for object detection. In this notebook we take the idea forward and learn to build a  multi-class object detection,tracking and attribute classification pipeline\n",
+    "\n",
+    "\n",
+    "**Contents of this Notebook :**\n",
+    "\n",
+    "- [Changes in configuration for a Multi-DNN pipeline](#Changes-in-configuration)\n",
+    "- [Nvtracker](#Nvtracker)\n",
+    "- [Building the pipeline](#Building-the-Pipeline) \n",
+    "\n",
+    "![test2](images/test2.png)\n",
+    "\n",
+    "We can explore the architecture diagram of the application. Here, we have 3 additional models that identify car color, make and type respectively. Plugging in additional models is like adding the original classifier, however there are configuration considerations to take care of.A new idea that we will be using in this notebook is the nvtracker plugin.  \n",
+    "\n",
+    "### Changes in configuration\n",
+    "\n",
+    "Because these secondary classifiers are only intended to execute on objects that we believe are vehicles, we will need to add new configuration parameters to generate this behavior.Two new parameters, `operate-on-gie-id` and `operate-on-class-ids` will let us control this behavior.\n",
+    "\n",
+    "The first, `operate-on-gie-id`, lets us configure a classifier to only execute on objects from a\n",
+    "different classifier. In this case, we will configure the secondary classifier to only execute on\n",
+    "objects detected by the primary classifier. The second, `operate-on-class-ids`, lets us configure a\n",
+    "classifier to only execute on objects of a specific class. By combining these two, our secondary\n",
+    "classifiers will be configured to only evaluate the make, model, and color of objects classified as\n",
+    "cars by our primary model.\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Nvtracker\n",
+    "\n",
+    "The plugin accepts NV12/RGBA data from the upstream component and scales (converts) the input buffer to a Luma buffer with a specific tracker width and height. (Tracker width and height must be specified in the configuration file's [tracker] section.) \n",
+    "\n",
+    "The low-level library uses a CPU based implementation of the Kanade Lucas Tomasi (KLT)  tracker algorithm. The plugin also supports the Intersection of Union (IoU) tracker algorithm, which uses the intersection of the detector’s bounding boxes across frames to determine the object's unique ID.\n",
+    "\n",
+    "![nvtracker](images/nvtracker.png)\n",
+    "\n",
+    "The tracker component updates the object’s metadata with a tracker-id. After this component, we\n",
+    "add three cascaded secondary neural network classifiers. These classifiers work on the objects\n",
+    "detected as “vehicles or cars”. The first classifier classifies the car color. The second classifier\n",
+    "classifies the car make, and the third classifier classifies car type (e.g. coupe, sedan, etc.). Each\n",
+    "classifier, after inference on a car object, will append the metadata to their results. Then, the\n",
+    "application, using a callback function, can access the metadata to understand and analyze the\n",
+    "attributes of the objects."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Building the Pipeline\n",
+    "\n",
+    "Let us now build the pipeline in a similar fashion as describe in the previous notebook."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Import Required Libraries \n",
+    "import sys\n",
+    "sys.path.append('../source_code')\n",
+    "import gi\n",
+    "import time\n",
+    "import configparser\n",
+    "gi.require_version('Gst', '1.0')\n",
+    "from gi.repository import GObject, Gst\n",
+    "from common.bus_call import bus_call\n",
+    "import pyds\n",
+    "\n",
+    "# Defining the Class Labels\n",
+    "PGIE_CLASS_ID_VEHICLE = 0\n",
+    "PGIE_CLASS_ID_BICYCLE = 1\n",
+    "PGIE_CLASS_ID_PERSON = 2\n",
+    "PGIE_CLASS_ID_ROADSIGN = 3\n",
+    "\n",
+    "# Defining the input output video file \n",
+    "INPUT_VIDEO_NAME  = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "OUTPUT_VIDEO_NAME = \"../source_code/N2/ds_out.mp4\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We define a function `make_elm_or_print_err()` to create our elements and report any errors if the creation fails.\n",
+    "\n",
+    "Elements are created using the `Gst.ElementFactory.make()` function as part of Gstreamer library."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "## Make Element or Print Error and any other detail\n",
+    "def make_elm_or_print_err(factoryname, name, printedname, detail=\"\"):\n",
+    "  print(\"Creating\", printedname)\n",
+    "  elm = Gst.ElementFactory.make(factoryname, name)\n",
+    "  if not elm:\n",
+    "     sys.stderr.write(\"Unable to create \" + printedname + \" \\n\")\n",
+    "  if detail:\n",
+    "     sys.stderr.write(detail)\n",
+    "  return elm"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Initialise GStreamer and Create an Empty Pipeline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Standard GStreamer initialization\n",
+    "GObject.threads_init()\n",
+    "Gst.init(None)\n",
+    "\n",
+    "# Create gstreamer elements\n",
+    "# Create Pipeline element that will form a connection of other elements\n",
+    "print(\"Creating Pipeline \\n \")\n",
+    "pipeline = Gst.Pipeline()\n",
+    "\n",
+    "if not pipeline:\n",
+    "    sys.stderr.write(\" Unable to create Pipeline \\n\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Create Elements that are required for our pipeline "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########### Create Elements required for the Pipeline ########### \n",
+    "# Source element for reading from the file\n",
+    "source = make_elm_or_print_err(\"filesrc\", \"file-source\",\"Source\")\n",
+    "# Since the data format in the input file is elementary h264 stream we need a h264parser\n",
+    "h264parser = make_elm_or_print_err(\"h264parse\", \"h264-parser\",\"h264 parse\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder\",\"Nvv4l2 Decoder\")\n",
+    "# Create nvstreammux instance to form batches from one or more sources.\n",
+    "streammux = make_elm_or_print_err(\"nvstreammux\", \"Stream-muxer\",'NvStreamMux')\n",
+    "# Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file\n",
+    "pgie = make_elm_or_print_err(\"nvinfer\", \"primary-inference\" ,\"pgie\")\n",
+    "# Use nvtracker to give objects unique-ids\n",
+    "tracker = make_elm_or_print_err(\"nvtracker\", \"tracker\",'tracker')\n",
+    "# Seconday inference for Finding Car Color\n",
+    "sgie1 = make_elm_or_print_err(\"nvinfer\", \"secondary1-nvinference-engine\",'sgie1')\n",
+    "# Seconday inference for Finding Car Make\n",
+    "sgie2 = make_elm_or_print_err(\"nvinfer\", \"secondary2-nvinference-engine\",'sgie2')\n",
+    "# Seconday inference for Finding Car Type\n",
+    "sgie3 = make_elm_or_print_err(\"nvinfer\", \"secondary3-nvinference-engine\",'sgie3')\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv = make_elm_or_print_err(\"nvvideoconvert\", \"convertor\",\"nvvidconv\")\n",
+    "# Create OSD to draw on the converted RGBA buffer\n",
+    "nvosd = make_elm_or_print_err(\"nvdsosd\", \"onscreendisplay\",\"nvosd\")\n",
+    "# Finally encode and save the osd output\n",
+    "queue = make_elm_or_print_err(\"queue\", \"queue\", \"Queue\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv2 = make_elm_or_print_err(\"nvvideoconvert\", \"convertor2\",\"nvvidconv2\")\n",
+    "# Place an encoder instead of OSD to save as video file\n",
+    "encoder = make_elm_or_print_err(\"avenc_mpeg4\", \"encoder\", \"Encoder\")\n",
+    "# Parse output from Encoder \n",
+    "codeparser = make_elm_or_print_err(\"mpeg4videoparse\", \"mpeg4-parser\", 'Code Parser')\n",
+    "# Create a container\n",
+    "container = make_elm_or_print_err(\"qtmux\", \"qtmux\", \"Container\")\n",
+    "# Create Sink for storing the output \n",
+    "sink = make_elm_or_print_err(\"filesink\", \"filesink\", \"Sink\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now that we have created the elements ,we can now set various properties for out pipeline at this point. \n",
+    "\n",
+    "For the sgie1 , sgie2 and sgie3 , we use `operate-on-gie-id` and `operate-on-class-ids`, we configured the pipeline to only evaluate the make, model, and color of objects classified as cars by our primary model.\n",
+    "\n",
+    "You can access the configuration files here : [pgie](../source_code/N2/dstest2_pgie_config.txt) , [sgie1](../source_code/N2/dstest2_sgie1_config.txt) , [sgie2](../source_code/N2/dstest2_sgie2_config.txt) , [sgie3](../source_code/N2/dstest2_sgie3_config.txt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "############ Set properties for the Elements ############\n",
+    "print(\"Playing file %s\",INPUT_VIDEO_NAME)\n",
+    "# Set Input File Name \n",
+    "source.set_property('location', INPUT_VIDEO_NAME)\n",
+    "# Set Input Width , Height and Batch Size \n",
+    "streammux.set_property('width', 1920)\n",
+    "streammux.set_property('height', 1080)\n",
+    "streammux.set_property('batch-size', 1)\n",
+    "# Set Timeout in microseconds to wait after the first buffer is  \n",
+    "# available to push the batch even if a complete batch is not formed.\n",
+    "streammux.set_property('batched-push-timeout', 4000000)\n",
+    "# Set Congifuration file for nvinfer \n",
+    "pgie.set_property('config-file-path', \"../source_code/N2/dstest2_pgie_config.txt\")\n",
+    "sgie1.set_property('config-file-path', \"../source_code/N2/dstest2_sgie1_config.txt\")\n",
+    "sgie2.set_property('config-file-path', \"../source_code/N2/dstest2_sgie2_config.txt\")\n",
+    "sgie3.set_property('config-file-path', \"../source_code/N2/dstest2_sgie3_config.txt\")\n",
+    "#Set properties of tracker from tracker_config\n",
+    "config = configparser.ConfigParser()\n",
+    "config.read('../source_code/N2/dstest2_tracker_config.txt')\n",
+    "config.sections()\n",
+    "for key in config['tracker']:\n",
+    "    if key == 'tracker-width' :\n",
+    "        tracker_width = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-width', tracker_width)\n",
+    "    if key == 'tracker-height' :\n",
+    "        tracker_height = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-height', tracker_height)\n",
+    "    if key == 'gpu-id' :\n",
+    "        tracker_gpu_id = config.getint('tracker', key)\n",
+    "        tracker.set_property('gpu_id', tracker_gpu_id)\n",
+    "    if key == 'll-lib-file' :\n",
+    "        tracker_ll_lib_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-lib-file', tracker_ll_lib_file)\n",
+    "    if key == 'll-config-file' :\n",
+    "        tracker_ll_config_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-config-file', tracker_ll_config_file)\n",
+    "    if key == 'enable-batch-process' :\n",
+    "        tracker_enable_batch_process = config.getint('tracker', key)\n",
+    "        tracker.set_property('enable_batch_process', tracker_enable_batch_process)\n",
+    "\n",
+    "# Set Encoder bitrate for output video\n",
+    "encoder.set_property(\"bitrate\", 2000000)\n",
+    "# Set Output file name and disable sync and async\n",
+    "sink.set_property(\"location\", OUTPUT_VIDEO_NAME)\n",
+    "sink.set_property(\"sync\", 0)\n",
+    "sink.set_property(\"async\", 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now link all the elements in the order we prefer and create Gstreamer bus to feed all messages through it. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########## Add and Link ELements in the Pipeline ########## \n",
+    "\n",
+    "print(\"Adding elements to Pipeline \\n\")\n",
+    "pipeline.add(source)\n",
+    "pipeline.add(h264parser)\n",
+    "pipeline.add(decoder)\n",
+    "pipeline.add(streammux)\n",
+    "pipeline.add(pgie)\n",
+    "pipeline.add(tracker)\n",
+    "pipeline.add(sgie1)\n",
+    "pipeline.add(sgie2)\n",
+    "pipeline.add(sgie3)\n",
+    "pipeline.add(nvvidconv)\n",
+    "pipeline.add(nvosd)\n",
+    "pipeline.add(queue)\n",
+    "pipeline.add(nvvidconv2)\n",
+    "pipeline.add(encoder)\n",
+    "pipeline.add(codeparser)\n",
+    "pipeline.add(container)\n",
+    "pipeline.add(sink)\n",
+    "\n",
+    "# We now  link the elements together \n",
+    "# file-source -> h264-parser -> nvh264-decoder -> nvinfer -> nvtracker -> \n",
+    "# nvinfer_secondary1 -> nvinfer_secondary2 -> nvinfer_secondary2 -> nvvidconv ->\n",
+    "# queue -> nvvidconv2 -> encoder -> parser -> container -> sink -> output-file\n",
+    "print(\"Linking elements in the Pipeline \\n\")\n",
+    "\n",
+    "source.link(h264parser)\n",
+    "h264parser.link(decoder)\n",
+    "\n",
+    "sinkpad = streammux.get_request_pad(\"sink_0\")\n",
+    "if not sinkpad:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad = decoder.get_static_pad(\"src\")\n",
+    "if not srcpad:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad.link(sinkpad)\n",
+    "streammux.link(pgie)\n",
+    "pgie.link(tracker)\n",
+    "tracker.link(sgie1)\n",
+    "sgie1.link(sgie2)\n",
+    "sgie2.link(sgie3)\n",
+    "sgie3.link(nvvidconv)\n",
+    "nvvidconv.link(nvosd)\n",
+    "nvosd.link(queue)\n",
+    "queue.link(nvvidconv2)\n",
+    "nvvidconv2.link(encoder)\n",
+    "encoder.link(codeparser)\n",
+    "codeparser.link(container)\n",
+    "container.link(sink)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# create and event loop and feed gstreamer bus mesages to it\n",
+    "loop = GObject.MainLoop()\n",
+    "\n",
+    "bus = pipeline.get_bus()\n",
+    "bus.add_signal_watch()\n",
+    "bus.connect (\"message\", bus_call, loop)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Our pipeline now carries the metadata forward but we have not done anything with it until now, but as mentoioned in the above pipeline diagram , we will now create a callback function to write relevant data on the frame once called and create a sink pad in the nvosd element and link it to the callback function. \n",
+    "\n",
+    "This callback function is the same as used in the previous notebook."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "############## Working with the Metadata ################\n",
+    "\n",
+    "def osd_sink_pad_buffer_probe(pad,info,u_data):\n",
+    "    \n",
+    "    #Intiallizing object counter with 0.\n",
+    "    obj_counter = {\n",
+    "        PGIE_CLASS_ID_VEHICLE:0,\n",
+    "        PGIE_CLASS_ID_PERSON:0,\n",
+    "        PGIE_CLASS_ID_BICYCLE:0,\n",
+    "        PGIE_CLASS_ID_ROADSIGN:0\n",
+    "    }\n",
+    "    # Set frame_number & rectangles to draw as 0 \n",
+    "    frame_number=0\n",
+    "    num_rects=0\n",
+    "    \n",
+    "    gst_buffer = info.get_buffer()\n",
+    "    if not gst_buffer:\n",
+    "        print(\"Unable to get GstBuffer \")\n",
+    "        return\n",
+    "\n",
+    "    # Retrieve batch metadata from the gst_buffer\n",
+    "    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the\n",
+    "    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)\n",
+    "    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))\n",
+    "    l_frame = batch_meta.frame_meta_list\n",
+    "    while l_frame is not None:\n",
+    "        try:\n",
+    "            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta\n",
+    "            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "        \n",
+    "        # Get frame number , number of rectables to draw and object metadata\n",
+    "        frame_number=frame_meta.frame_num\n",
+    "        num_rects = frame_meta.num_obj_meta\n",
+    "        l_obj=frame_meta.obj_meta_list\n",
+    "        \n",
+    "        while l_obj is not None:\n",
+    "            try:\n",
+    "                # Casting l_obj.data to pyds.NvDsObjectMeta\n",
+    "                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "            # Increment Object class by 1 and Set Box border to Red color     \n",
+    "            obj_counter[obj_meta.class_id] += 1\n",
+    "            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)\n",
+    "            try: \n",
+    "                l_obj=l_obj.next\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "        ################## Setting Metadata Display configruation ############### \n",
+    "        # Acquiring a display meta object.\n",
+    "        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n",
+    "        display_meta.num_labels = 1\n",
+    "        py_nvosd_text_params = display_meta.text_params[0]\n",
+    "        # Setting display text to be shown on screen\n",
+    "        py_nvosd_text_params.display_text = \"Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}\".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])\n",
+    "        # Now set the offsets where the string should appear\n",
+    "        py_nvosd_text_params.x_offset = 10\n",
+    "        py_nvosd_text_params.y_offset = 12\n",
+    "        # Font , font-color and font-size\n",
+    "        py_nvosd_text_params.font_params.font_name = \"Serif\"\n",
+    "        py_nvosd_text_params.font_params.font_size = 10\n",
+    "        # Set(red, green, blue, alpha); Set to White\n",
+    "        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)\n",
+    "        # Text background color\n",
+    "        py_nvosd_text_params.set_bg_clr = 1\n",
+    "        # Set(red, green, blue, alpha); set to Black\n",
+    "        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)\n",
+    "        # Using pyds.get_string() to get display_text as string to print in notebook\n",
+    "        print(pyds.get_string(py_nvosd_text_params.display_text))\n",
+    "        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n",
+    "        \n",
+    "        ############################################################################\n",
+    "        \n",
+    "        try:\n",
+    "            l_frame=l_frame.next\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "    return Gst.PadProbeReturn.OK"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Lets add probe to get informed of the meta data generated, we add probe to the sink pad  \n",
+    "# of the osd element, since by that time, the buffer would have had got all the metadata.\n",
+    "\n",
+    "osdsinkpad = nvosd.get_static_pad(\"sink\")\n",
+    "if not osdsinkpad:\n",
+    "    sys.stderr.write(\" Unable to get sink pad of nvosd \\n\")\n",
+    "    \n",
+    "osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now with everything defined , we can start the playback and listen the events."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# start play back and listen to events\n",
+    "print(\"Starting pipeline \\n\")\n",
+    "start_time = time.time()\n",
+    "pipeline.set_state(Gst.State.PLAYING)\n",
+    "try:\n",
+    "    loop.run()\n",
+    "except:\n",
+    "    pass\n",
+    "# cleanup\n",
+    "pipeline.set_state(Gst.State.NULL)\n",
+    "print(\"--- %s seconds ---\" % (time.time() - start_time))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Convert video profile to be compatible with Jupyter notebook\n",
+    "!ffmpeg -loglevel panic -y -an -i ../source_code/N2/ds_out.mp4 -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 ../source_code/N2/output.mp4"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Display the Output\n",
+    "from IPython.display import HTML\n",
+    "HTML(\"\"\"\n",
+    " <video width=\"640\" height=\"480\" controls>\n",
+    " <source src=\"../source_code/N2/output.mp4\"\n",
+    " </video>\n",
+    "\"\"\".format())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In the next notebook , we will build an multi-stream pipeline performing 4-class object detection.\n",
+    "\n",
+    "## Licensing\n",
+    "  \n",
+    "This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "[Previous Notebook](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;\n",
+    "[1](Introduction_to_Deepstream_and_Gstreamer.ipynb)\n",
+    "[2](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "[3]\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5](Multi-stream_Multi_DNN.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "[Next Notebook](Multi-stream_pipeline.ipynb)\n",
+    "\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&ensp;\n",
+    "[Home Page](Start_Here.ipynb)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

+ 729 - 0
ai/DeepStream/English/python/jupyter_notebook/Multi-stream_Multi_DNN.ipynb

@@ -0,0 +1,729 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&ensp;\n",
+    "[Home Page](Start_Here.ipynb)\n",
+    "    \n",
+    "    \n",
+    "[Previous Notebook](Multi-stream_pipeline.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&ensp;\n",
+    "[1](Introduction_to_Deepstream_and_Gstreamer.ipynb)\n",
+    "[2](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "[3](Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5]\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Exercise : Multi-stream - Multi-DNN pipeline\n",
+    "\n",
+    "In this notebook, you will build an Multi-stream Multi-DNN pipeline using the concepts learned from the previous notebooks. \n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Building the pipeline\n",
+    "\n",
+    "We will the using batched on the Multi-DNN network from [Notebook 3](Introduction_to_Multi-DNN_pipeline.ipynb) and combine it with the knowledge learnt in [Notebook 4](Multi-stream_pipeline.ipynb). \n",
+    "\n",
+    "\n",
+    "Here are the illustrations of the Pipeline \n",
+    "![test2](images/test2.png)\n",
+    "![test3](images/test3.png)\n",
+    "\n",
+    "Let us get started with the Notebook , You will have to fill in the `TODO` parts of the code present in the Notebook to complete the pipeline. Feel free to refer to the previous notebooks for the commands."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Import required libraries \n",
+    "import sys\n",
+    "sys.path.append('../source_code')\n",
+    "import gi\n",
+    "import configparser\n",
+    "gi.require_version('Gst', '1.0')\n",
+    "from gi.repository import GObject, Gst\n",
+    "from gi.repository import GLib\n",
+    "from ctypes import *\n",
+    "import time\n",
+    "import sys\n",
+    "import math\n",
+    "import platform\n",
+    "from common.bus_call import bus_call\n",
+    "from common.FPS import GETFPS\n",
+    "import pyds\n",
+    "\n",
+    "\n",
+    "# Define variables to be used later\n",
+    "fps_streams={}\n",
+    "\n",
+    "PGIE_CLASS_ID_VEHICLE = 0\n",
+    "PGIE_CLASS_ID_BICYCLE = 1\n",
+    "PGIE_CLASS_ID_PERSON = 2\n",
+    "PGIE_CLASS_ID_ROADSIGN = 3\n",
+    "\n",
+    "MUXER_OUTPUT_WIDTH=1920\n",
+    "MUXER_OUTPUT_HEIGHT=1080\n",
+    "\n",
+    "TILED_OUTPUT_WIDTH=1920\n",
+    "TILED_OUTPUT_HEIGHT=1080\n",
+    "OSD_PROCESS_MODE= 0\n",
+    "OSD_DISPLAY_TEXT= 0\n",
+    "pgie_classes_str= [\"Vehicle\", \"TwoWheeler\", \"Person\",\"RoadSign\"]\n",
+    "\n",
+    "################ Three Stream Pipeline ###########\n",
+    "# Define Input and output Stream information \n",
+    "num_sources = 3 \n",
+    "INPUT_VIDEO_1 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "INPUT_VIDEO_2 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "INPUT_VIDEO_3 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "OUTPUT_VIDEO_NAME = \"../source_code/N4/ds_out.mp4\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We define a function `make_elm_or_print_err()` to create our elements and report any errors if the creation fails.\n",
+    "\n",
+    "Elements are created using the `Gst.ElementFactory.make()` function as part of Gstreamer library."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "## Make Element or Print Error and any other detail\n",
+    "def make_elm_or_print_err(factoryname, name, printedname, detail=\"\"):\n",
+    "  print(\"Creating\", printedname)\n",
+    "  elm = Gst.ElementFactory.make(factoryname, name)\n",
+    "  if not elm:\n",
+    "     sys.stderr.write(\"Unable to create \" + printedname + \" \\n\")\n",
+    "  if detail:\n",
+    "     sys.stderr.write(detail)\n",
+    "  return elm"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Initialise GStreamer and Create an Empty Pipeline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for i in range(0,num_sources):\n",
+    "        fps_streams[\"stream{0}\".format(i)]=GETFPS(i)\n",
+    "\n",
+    "# Standard GStreamer initialization\n",
+    "GObject.threads_init()\n",
+    "Gst.init(None)\n",
+    "\n",
+    "# Create gstreamer elements */\n",
+    "# Create Pipeline element that will form a connection of other elements\n",
+    "print(\"Creating Pipeline \\n \")\n",
+    "pipeline = Gst.Pipeline()\n",
+    "\n",
+    "if not pipeline:\n",
+    "    sys.stderr.write(\" Unable to create Pipeline \\n\")\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Create Elements that are required for our pipeline\n",
+    "\n",
+    "Compared to the first notebook , we use a lot of queues in this notebook to buffer data when it moves from one plugin to another."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########### Create Elements required for the Pipeline ########### \n",
+    "\n",
+    "\n",
+    "#################### ~~~~~~~ TODO ~~~~~~~~ ################\n",
+    "\n",
+    "# Source element for reading from the file\n",
+    "# Since the data format in the input file is elementary h264 stream,we need a h264parser\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "\n",
+    "######### Defining Stream 1 \n",
+    "source1 = \n",
+    "h264parser1 = \n",
+    "decoder1 = \n",
+    "##########\n",
+    "\n",
+    "########## Defining Stream 2 \n",
+    "source2 = \n",
+    "h264parser2 = \n",
+    "decoder3 = \n",
+    "########### \n",
+    "\n",
+    "########## Defining Stream 3\n",
+    "source3 = \n",
+    "h264parser3 = \n",
+    "decoder3 = \n",
+    "########### \n",
+    "\n",
+    "#################### ~~~~~~~ END ~~~~~~~~ ################\n",
+    "\n",
+    "# Create nvstreammux instance to form batches from one or more sources.\n",
+    "streammux = make_elm_or_print_err(\"nvstreammux\", \"Stream-muxer\",\"Stream-muxer\") \n",
+    "\n",
+    "#################### ~~~~~~~ TODO ~~~~~~~~ ################\n",
+    "\n",
+    "# Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file\n",
+    "pgie = \n",
+    "# Use nvtracker to give objects unique-ids\n",
+    "tracker = \n",
+    "# Seconday inference for Finding Car Color\n",
+    "sgie1 = \n",
+    "# Seconday inference for Finding Car Make\n",
+    "sgie2 = \n",
+    "# Seconday inference for Finding Car Type\n",
+    "sgie3 = \n",
+    "\n",
+    "\n",
+    "#################### ~~~~~~~ END ~~~~~~~~ ################\n",
+    "\n",
+    "# Creating Tiler to present more than one streams\n",
+    "tiler=make_elm_or_print_err(\"nvmultistreamtiler\", \"nvtiler\",\"nvtiler\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv = make_elm_or_print_err(\"nvvideoconvert\", \"convertor\",\"nvvidconv\")\n",
+    "# Create OSD to draw on the converted RGBA buffer\n",
+    "nvosd = make_elm_or_print_err(\"nvdsosd\", \"onscreendisplay\",\"nvosd\")\n",
+    "# Creating queue's to buffer incoming data from pgie\n",
+    "queue1=make_elm_or_print_err(\"queue\",\"queue1\",\"queue1\")\n",
+    "# Creating queue's to buffer incoming data from tiler\n",
+    "queue2=make_elm_or_print_err(\"queue\",\"queue2\",\"queue2\")\n",
+    "# Creating queue's to buffer incoming data from nvvidconv\n",
+    "queue3=make_elm_or_print_err(\"queue\",\"queue3\",\"queue3\")\n",
+    "# Creating queue's to buffer incoming data from nvosd\n",
+    "queue4=make_elm_or_print_err(\"queue\",\"queue4\",\"queue4\")\n",
+    "# Creating queue's to buffer incoming data from nvvidconv2\n",
+    "queue5=make_elm_or_print_err(\"queue\",\"queue5\",\"queue5\")\n",
+    "# Creating queue's to buffer incoming data from nvtracker\n",
+    "queue6=make_elm_or_print_err(\"queue\",\"queue6\",\"queue6\")\n",
+    "# Creating queue's to buffer incoming data from sgie1\n",
+    "queue7=make_elm_or_print_err(\"queue\",\"queue7\",\"queue7\")\n",
+    "# Creating queue's to buffer incoming data from sgie2\n",
+    "queue8=make_elm_or_print_err(\"queue\",\"queue8\",\"queue8\")\n",
+    "# Creating queue's to buffer incoming data from sgie3\n",
+    "queue9=make_elm_or_print_err(\"queue\",\"queue9\",\"queue9\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv2 = make_elm_or_print_err(\"nvvideoconvert\", \"convertor2\",\"nvvidconv2\")\n",
+    "# Place an encoder instead of OSD to save as video file\n",
+    "encoder = make_elm_or_print_err(\"avenc_mpeg4\", \"encoder\", \"Encoder\")\n",
+    "# Parse output from Encoder \n",
+    "codeparser = make_elm_or_print_err(\"mpeg4videoparse\", \"mpeg4-parser\", 'Code Parser')\n",
+    "# Create a container\n",
+    "container = make_elm_or_print_err(\"qtmux\", \"qtmux\", \"Container\")\n",
+    "# Create Sink for storing the output \n",
+    "sink = make_elm_or_print_err(\"filesink\", \"filesink\", \"Sink\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now that we have created the elements ,we can now set various properties for out pipeline at this point. \n",
+    "\n",
+    "Configuration file : [pgie](N3/dstest3_pgie_config.txt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "############ Set properties for the Elements ############\n",
+    "\n",
+    "\n",
+    "# Set Input Video files \n",
+    "source1.set_property('location', INPUT_VIDEO_1)\n",
+    "source2.set_property('location', INPUT_VIDEO_2)\n",
+    "source3.set_property('location', INPUT_VIDEO_3)\n",
+    "\n",
+    "\n",
+    "# Set Input Width , Height and Batch Size \n",
+    "streammux.set_property('width', 1920)\n",
+    "streammux.set_property('height', 1080)\n",
+    "streammux.set_property('batch-size', num_sources)\n",
+    "# Timeout in microseconds to wait after the first buffer is available \n",
+    "# to push the batch even if a complete batch is not formed.\n",
+    "streammux.set_property('batched-push-timeout', 4000000)\n",
+    "\n",
+    "# Set configuration file for nvinfer \n",
+    "pgie.set_property('config-file-path', \"../source_code/N4/dstest4_pgie_config.txt\")\n",
+    "sgie1.set_property('config-file-path', \"../source_code/N4/dstest4_sgie1_config.txt\")\n",
+    "sgie2.set_property('config-file-path', \"../source_code/N4/dstest4_sgie2_config.txt\")\n",
+    "sgie3.set_property('config-file-path', \"../source_code/N4/dstest4_sgie3_config.txt\")\n",
+    "\n",
+    "#Set properties of tracker from tracker_config\n",
+    "config = configparser.ConfigParser()\n",
+    "config.read('../source_code/N4/dstest4_tracker_config.txt')\n",
+    "config.sections()\n",
+    "for key in config['tracker']:\n",
+    "    if key == 'tracker-width' :\n",
+    "        tracker_width = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-width', tracker_width)\n",
+    "    if key == 'tracker-height' :\n",
+    "        tracker_height = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-height', tracker_height)\n",
+    "    if key == 'gpu-id' :\n",
+    "        tracker_gpu_id = config.getint('tracker', key)\n",
+    "        tracker.set_property('gpu_id', tracker_gpu_id)\n",
+    "    if key == 'll-lib-file' :\n",
+    "        tracker_ll_lib_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-lib-file', tracker_ll_lib_file)\n",
+    "    if key == 'll-config-file' :\n",
+    "        tracker_ll_config_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-config-file', tracker_ll_config_file)\n",
+    "    if key == 'enable-batch-process' :\n",
+    "        tracker_enable_batch_process = config.getint('tracker', key)\n",
+    "        tracker.set_property('enable_batch_process', tracker_enable_batch_process)\n",
+    "        \n",
+    "## Set batch size \n",
+    "pgie_batch_size=pgie.get_property(\"batch-size\")\n",
+    "print(\"PGIE batch size :\",end='')\n",
+    "print(pgie_batch_size)\n",
+    "if(pgie_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",pgie_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    pgie.set_property(\"batch-size\",num_sources)\n",
+    "    \n",
+    "## Set batch size \n",
+    "sgie1_batch_size=sgie1.get_property(\"batch-size\")\n",
+    "print(\"SGIE1 batch size :\",end='')\n",
+    "print(sgie1_batch_size)\n",
+    "if(sgie1_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",sgie1_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    sgie1.set_property(\"batch-size\",num_sources)\n",
+    "    \n",
+    "## Set batch size \n",
+    "sgie2_batch_size=sgie2.get_property(\"batch-size\")\n",
+    "print(\"SGIE2 batch size :\",end='')\n",
+    "print(sgie2_batch_size)\n",
+    "if(sgie2_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",sgie2_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    sgie2.set_property(\"batch-size\",num_sources)\n",
+    "\n",
+    "## Set batch size \n",
+    "sgie3_batch_size=sgie3.get_property(\"batch-size\")\n",
+    "print(\"SGIE3 batch size :\",end='')\n",
+    "print(sgie3_batch_size)\n",
+    "if(sgie3_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",sgie3_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    sgie3.set_property(\"batch-size\",num_sources)\n",
+    "    \n",
+    "# Set display configurations for nvmultistreamtiler   \n",
+    "\n",
+    "tiler_rows=int(2)\n",
+    "tiler_columns=int(2)\n",
+    "tiler.set_property(\"rows\",tiler_rows)\n",
+    "tiler.set_property(\"columns\",tiler_columns)\n",
+    "tiler.set_property(\"width\", TILED_OUTPUT_WIDTH)\n",
+    "tiler.set_property(\"height\", TILED_OUTPUT_HEIGHT)\n",
+    "\n",
+    "# Set encoding properties and Sink configs\n",
+    "encoder.set_property(\"bitrate\", 2000000)\n",
+    "sink.set_property(\"location\", OUTPUT_VIDEO_NAME)\n",
+    "sink.set_property(\"sync\", 0)\n",
+    "sink.set_property(\"async\", 0)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now link all the elements in the order we prefer and create Gstreamer bus to feed all messages through it. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########## Add and Link ELements in the Pipeline ########## \n",
+    "\n",
+    "print(\"Adding elements to Pipeline \\n\")\n",
+    "\n",
+    "pipeline.add(source1)\n",
+    "pipeline.add(h264parser1)\n",
+    "pipeline.add(decoder1)\n",
+    "pipeline.add(source2)\n",
+    "pipeline.add(h264parser2)\n",
+    "pipeline.add(decoder2)\n",
+    "pipeline.add(source3)\n",
+    "pipeline.add(h264parser3)\n",
+    "pipeline.add(decoder3)\n",
+    "pipeline.add(streammux)\n",
+    "pipeline.add(pgie)\n",
+    "pipeline.add(tracker)\n",
+    "pipeline.add(sgie1)\n",
+    "pipeline.add(sgie2)\n",
+    "pipeline.add(sgie3)\n",
+    "pipeline.add(tiler)\n",
+    "pipeline.add(nvvidconv)\n",
+    "pipeline.add(nvosd)\n",
+    "pipeline.add(queue1)\n",
+    "pipeline.add(queue2)\n",
+    "pipeline.add(queue3)\n",
+    "pipeline.add(queue4)\n",
+    "pipeline.add(queue5)\n",
+    "pipeline.add(queue6)\n",
+    "pipeline.add(queue7)\n",
+    "pipeline.add(queue8)\n",
+    "pipeline.add(queue9)\n",
+    "pipeline.add(nvvidconv2)\n",
+    "pipeline.add(encoder)\n",
+    "pipeline.add(codeparser)\n",
+    "pipeline.add(container)\n",
+    "pipeline.add(sink)\n",
+    "\n",
+    "print(\"Linking elements in the Pipeline \\n\")\n",
+    "\n",
+    "source1.link(h264parser1)\n",
+    "h264parser1.link(decoder1)\n",
+    "\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "sinkpad1 = streammux.get_request_pad(\"sink_0\")\n",
+    "if not sinkpad1:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad1 = decoder1.get_static_pad(\"src\")\n",
+    "if not srcpad1:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad1.link(sinkpad1)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "source2.link(h264parser2)\n",
+    "h264parser2.link(decoder2)\n",
+    "\n",
+    "sinkpad2 = streammux.get_request_pad(\"sink_1\")\n",
+    "if not sinkpad2:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad2 = decoder2.get_static_pad(\"src\")\n",
+    "if not srcpad2:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad2.link(sinkpad2)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "source3.link(h264parser3)\n",
+    "h264parser3.link(decoder3)\n",
+    "\n",
+    "sinkpad3 = streammux.get_request_pad(\"sink_2\")\n",
+    "if not sinkpad2:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad3 = decoder3.get_static_pad(\"src\")\n",
+    "if not srcpad3:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad3.link(sinkpad3)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "\n",
+    "streammux.link(queue1)\n",
+    "#################### ~~~~~~~ TODO ~~~~~~~~ ################\n",
+    "# Link the Elements using from the pipeline illustration\n",
+    "# Remember to add queue after every element\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "#################### ~~~~~~~ END ~~~~~~~~ ################\n",
+    "queue8.link(nvosd)\n",
+    "nvosd.link(queue9)\n",
+    "queue9.link(nvvidconv2)\n",
+    "nvvidconv2.link(encoder)\n",
+    "encoder.link(codeparser)\n",
+    "codeparser.link(container)\n",
+    "container.link(sink)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# create an event loop and feed gstreamer bus mesages to it\n",
+    "loop = GObject.MainLoop()\n",
+    "bus = pipeline.get_bus()\n",
+    "bus.add_signal_watch()\n",
+    "bus.connect (\"message\", bus_call, loop)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "This callback function is the same as used in the previous notebook."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# tiler_sink_pad_buffer_probe  will extract metadata received on OSD sink pad\n",
+    "# and update params for drawing rectangle, object information etc.\n",
+    "def tiler_src_pad_buffer_probe(pad,info,u_data):\n",
+    "    #Intiallizing object counter with 0.\n",
+    "    obj_counter = {\n",
+    "        PGIE_CLASS_ID_VEHICLE:0,\n",
+    "        PGIE_CLASS_ID_PERSON:0,\n",
+    "        PGIE_CLASS_ID_BICYCLE:0,\n",
+    "        PGIE_CLASS_ID_ROADSIGN:0\n",
+    "    }\n",
+    "    # Set frame_number & rectangles to draw as 0 \n",
+    "    frame_number=0\n",
+    "    num_rects=0\n",
+    "    \n",
+    "    gst_buffer = info.get_buffer()\n",
+    "    if not gst_buffer:\n",
+    "        print(\"Unable to get GstBuffer \")\n",
+    "        return\n",
+    "\n",
+    "    # Retrieve batch metadata from the gst_buffer\n",
+    "    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the\n",
+    "    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)\n",
+    "    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))\n",
+    "    l_frame = batch_meta.frame_meta_list\n",
+    "    while l_frame is not None:\n",
+    "        try:\n",
+    "            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta\n",
+    "            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "        \n",
+    "        # Get frame number , number of rectables to draw and object metadata\n",
+    "        frame_number=frame_meta.frame_num\n",
+    "        num_rects = frame_meta.num_obj_meta\n",
+    "        l_obj=frame_meta.obj_meta_list\n",
+    "        \n",
+    "        while l_obj is not None:\n",
+    "            try:\n",
+    "                # Casting l_obj.data to pyds.NvDsObjectMeta\n",
+    "                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "            # Increment Object class by 1 and Set Box border to Red color     \n",
+    "            obj_counter[obj_meta.class_id] += 1\n",
+    "            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)\n",
+    "            try: \n",
+    "                l_obj=l_obj.next\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "        ################## Setting Metadata Display configruation ############### \n",
+    "        # Acquiring a display meta object.\n",
+    "        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n",
+    "        display_meta.num_labels = 1\n",
+    "        py_nvosd_text_params = display_meta.text_params[0]\n",
+    "        # Setting display text to be shown on screen\n",
+    "        py_nvosd_text_params.display_text = \"Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}\".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])\n",
+    "        # Now set the offsets where the string should appear\n",
+    "        py_nvosd_text_params.x_offset = 10\n",
+    "        py_nvosd_text_params.y_offset = 12\n",
+    "        # Font , font-color and font-size\n",
+    "        py_nvosd_text_params.font_params.font_name = \"Serif\"\n",
+    "        py_nvosd_text_params.font_params.font_size = 10\n",
+    "        # Set(red, green, blue, alpha); Set to White\n",
+    "        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)\n",
+    "        # Text background color\n",
+    "        py_nvosd_text_params.set_bg_clr = 1\n",
+    "        # Set(red, green, blue, alpha); set to Black\n",
+    "        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)\n",
+    "        # Using pyds.get_string() to get display_text as string to print in notebook\n",
+    "        print(pyds.get_string(py_nvosd_text_params.display_text))\n",
+    "        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n",
+    "        \n",
+    "        ############################################################################\n",
+    "        # Get frame rate through this probe\n",
+    "        fps_streams[\"stream{0}\".format(frame_meta.pad_index)].get_fps()\n",
+    "        try:\n",
+    "            l_frame=l_frame.next\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "\n",
+    "    return Gst.PadProbeReturn.OK\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#################### ~~~~~~~ TODO ~~~~~~~~ ################\n",
+    "# Set Tiler source pad , Checkout Notebook 4 to understand where to attach this pad to.\n",
+    "tiler_src_pad=\n",
+    "\n",
+    "#################### ~~~~~~~ END ~~~~~~~~ ################\n",
+    "if not tiler_src_pad:\n",
+    "    sys.stderr.write(\" Unable to get src pad \\n\")\n",
+    "else:\n",
+    "    tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now with everything defined , we can start the playback and listen the events."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# List the sources\n",
+    "print(\"Now playing...\")\n",
+    "start_time = time.time()\n",
+    "print(\"Starting pipeline \\n\")\n",
+    "# start play back and listed to events\t\t\n",
+    "pipeline.set_state(Gst.State.PLAYING)\n",
+    "try:\n",
+    "    loop.run()\n",
+    "except:\n",
+    "    pass\n",
+    "# cleanup\n",
+    "print(\"Exiting app\\n\")\n",
+    "pipeline.set_state(Gst.State.NULL)\n",
+    "print(\"--- %s seconds ---\" % (time.time() - start_time))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Convert video profile to be compatible with Jupyter notebook\n",
+    "!ffmpeg -loglevel panic -y -an -i ../source_code/N4/ds_out.mp4 -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 ../source_code/N4/output.mp4"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Display the Output\n",
+    "from IPython.display import HTML\n",
+    "HTML(\"\"\"\n",
+    " <video width=\"960\" height=\"540\" controls>\n",
+    " <source src=\"../source_code/N4/output.mp4\"\n",
+    " </video>\n",
+    "\"\"\".format())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Licensing\n",
+    "  \n",
+    "This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).\n",
+    "\n",
+    "[Previous Notebook](Multi-stream_pipeline.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&ensp;\n",
+    "[1](Introduction_to_Deepstream_and_Gstreamer.ipynb)\n",
+    "[2](Getting_started_with_Deepstream_Pipeline.ipynb)\n",
+    "[3](Introduction_to_Multi-DNN_pipeline.ipynb)\n",
+    "[4](Multi-stream_pipeline.ipynb)\n",
+    "[5]\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&ensp;\n",
+    "[Home Page](Start_Here.ipynb)\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

+ 662 - 0
ai/DeepStream/English/python/jupyter_notebook/Multi-stream_Multi_DNN_Solution.ipynb

@@ -0,0 +1,662 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Hackathon Solution : Multi-stream - Multi-DNN pipeline\n",
+    "\n",
+    "In this notebook, you will build an Multi-stream Multi-DNN pipeline using the concepts learned from the previous notebooks. \n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Building the pipeline\n",
+    "\n",
+    "We will the using batched on the Multi-DNN network from [Notebook 3](Introduction_to_Multi-DNN_pipeline.ipynb) and combine it with the knowledge learnt in [Notebook 4](Multi-stream_pipeline.ipynb). \n",
+    "\n",
+    "\n",
+    "Here are the illustrations of the Pipeline \n",
+    "![test2](images/test2.png)\n",
+    "![test3](images/test3.png)\n",
+    "\n",
+    "Let us get started with the Notebook , You will have to fill in the `TODO` parts of the code present in the Notebook to complete the pipeline. Feel free to refer to the previous notebooks for the commands."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Import required libraries \n",
+    "import sys\n",
+    "sys.path.append('../source_code')\n",
+    "import gi\n",
+    "import configparser\n",
+    "gi.require_version('Gst', '1.0')\n",
+    "from gi.repository import GObject, Gst\n",
+    "from gi.repository import GLib\n",
+    "from ctypes import *\n",
+    "import time\n",
+    "import sys\n",
+    "import math\n",
+    "import platform\n",
+    "from common.bus_call import bus_call\n",
+    "from common.FPS import GETFPS\n",
+    "import pyds\n",
+    "\n",
+    "\n",
+    "# Define variables to be used later\n",
+    "fps_streams={}\n",
+    "\n",
+    "PGIE_CLASS_ID_VEHICLE = 0\n",
+    "PGIE_CLASS_ID_BICYCLE = 1\n",
+    "PGIE_CLASS_ID_PERSON = 2\n",
+    "PGIE_CLASS_ID_ROADSIGN = 3\n",
+    "\n",
+    "MUXER_OUTPUT_WIDTH=1920\n",
+    "MUXER_OUTPUT_HEIGHT=1080\n",
+    "\n",
+    "TILED_OUTPUT_WIDTH=1920\n",
+    "TILED_OUTPUT_HEIGHT=1080\n",
+    "OSD_PROCESS_MODE= 0\n",
+    "OSD_DISPLAY_TEXT= 0\n",
+    "pgie_classes_str= [\"Vehicle\", \"TwoWheeler\", \"Person\",\"RoadSign\"]\n",
+    "\n",
+    "################ Three Stream Pipeline ###########\n",
+    "# Define Input and output Stream information \n",
+    "num_sources = 3 \n",
+    "INPUT_VIDEO_1 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "INPUT_VIDEO_2 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "INPUT_VIDEO_3 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "OUTPUT_VIDEO_NAME = \"../source_code/N4/ds_out.mp4\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We define a function `make_elm_or_print_err()` to create our elements and report any errors if the creation fails.\n",
+    "\n",
+    "Elements are created using the `Gst.ElementFactory.make()` function as part of Gstreamer library."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "## Make Element or Print Error and any other detail\n",
+    "def make_elm_or_print_err(factoryname, name, printedname, detail=\"\"):\n",
+    "  print(\"Creating\", printedname)\n",
+    "  elm = Gst.ElementFactory.make(factoryname, name)\n",
+    "  if not elm:\n",
+    "     sys.stderr.write(\"Unable to create \" + printedname + \" \\n\")\n",
+    "  if detail:\n",
+    "     sys.stderr.write(detail)\n",
+    "  return elm"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Initialise GStreamer and Create an Empty Pipeline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for i in range(0,num_sources):\n",
+    "        fps_streams[\"stream{0}\".format(i)]=GETFPS(i)\n",
+    "\n",
+    "# Standard GStreamer initialization\n",
+    "GObject.threads_init()\n",
+    "Gst.init(None)\n",
+    "\n",
+    "# Create gstreamer elements */\n",
+    "# Create Pipeline element that will form a connection of other elements\n",
+    "print(\"Creating Pipeline \\n \")\n",
+    "pipeline = Gst.Pipeline()\n",
+    "\n",
+    "if not pipeline:\n",
+    "    sys.stderr.write(\" Unable to create Pipeline \\n\")\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Create Elements that are required for our pipeline\n",
+    "\n",
+    "Compared to the first notebook , we use a lot of queues in this notebook to buffer data when it moves from one plugin to another."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########### Create Elements required for the Pipeline ########### \n",
+    "\n",
+    "######### Defining Stream 1 \n",
+    "# Source element for reading from the file\n",
+    "source1 = make_elm_or_print_err(\"filesrc\", \"file-source-1\",'file-source-1')\n",
+    "# Since the data format in the input file is elementary h264 stream,we need a h264parser\n",
+    "h264parser1 = make_elm_or_print_err(\"h264parse\", \"h264-parser-1\",\"h264-parser-1\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder1 = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder-1\",\"nvv4l2-decoder-1\")\n",
+    "   \n",
+    "##########\n",
+    "\n",
+    "########## Defining Stream 2 \n",
+    "# Source element for reading from the file\n",
+    "source2 = make_elm_or_print_err(\"filesrc\", \"file-source-2\",\"file-source-2\")\n",
+    "# Since the data format in the input file is elementary h264 stream, we need a h264parser\n",
+    "h264parser2 = make_elm_or_print_err(\"h264parse\", \"h264-parser-2\", \"h264-parser-2\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder2 = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder-2\",\"nvv4l2-decoder-2\")\n",
+    "########### \n",
+    "\n",
+    "########## Defining Stream 3\n",
+    "# Source element for reading from the file\n",
+    "source3 = make_elm_or_print_err(\"filesrc\", \"file-source-3\",\"file-source-3\")\n",
+    "# Since the data format in the input file is elementary h264 stream, we need a h264parser\n",
+    "h264parser3 = make_elm_or_print_err(\"h264parse\", \"h264-parser-3\", \"h264-parser-3\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder3 = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder-3\",\"nvv4l2-decoder-3\")\n",
+    "########### \n",
+    "    \n",
+    "# Create nvstreammux instance to form batches from one or more sources.\n",
+    "streammux = make_elm_or_print_err(\"nvstreammux\", \"Stream-muxer\",\"Stream-muxer\") \n",
+    "# Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file\n",
+    "pgie = make_elm_or_print_err(\"nvinfer\", \"primary-inference\" ,\"pgie\")\n",
+    "# Use nvtracker to give objects unique-ids\n",
+    "tracker = make_elm_or_print_err(\"nvtracker\", \"tracker\",'tracker')\n",
+    "# Seconday inference for Finding Car Color\n",
+    "sgie1 = make_elm_or_print_err(\"nvinfer\", \"secondary1-nvinference-engine\",'sgie1')\n",
+    "# Seconday inference for Finding Car Make\n",
+    "sgie2 = make_elm_or_print_err(\"nvinfer\", \"secondary2-nvinference-engine\",'sgie2')\n",
+    "# Seconday inference for Finding Car Type\n",
+    "sgie3 = make_elm_or_print_err(\"nvinfer\", \"secondary3-nvinference-engine\",'sgie3')\n",
+    "# Creating Tiler to present more than one streams\n",
+    "tiler=make_elm_or_print_err(\"nvmultistreamtiler\", \"nvtiler\",\"nvtiler\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv = make_elm_or_print_err(\"nvvideoconvert\", \"convertor\",\"nvvidconv\")\n",
+    "# Create OSD to draw on the converted RGBA buffer\n",
+    "nvosd = make_elm_or_print_err(\"nvdsosd\", \"onscreendisplay\",\"nvosd\")\n",
+    "# Creating queue's to buffer incoming data from pgie\n",
+    "queue1=make_elm_or_print_err(\"queue\",\"queue1\",\"queue1\")\n",
+    "# Creating queue's to buffer incoming data from tiler\n",
+    "queue2=make_elm_or_print_err(\"queue\",\"queue2\",\"queue2\")\n",
+    "# Creating queue's to buffer incoming data from nvvidconv\n",
+    "queue3=make_elm_or_print_err(\"queue\",\"queue3\",\"queue3\")\n",
+    "# Creating queue's to buffer incoming data from nvosd\n",
+    "queue4=make_elm_or_print_err(\"queue\",\"queue4\",\"queue4\")\n",
+    "# Creating queue's to buffer incoming data from nvvidconv2\n",
+    "queue5=make_elm_or_print_err(\"queue\",\"queue5\",\"queue5\")\n",
+    "# Creating queue's to buffer incoming data from nvtracker\n",
+    "queue6=make_elm_or_print_err(\"queue\",\"queue6\",\"queue6\")\n",
+    "# Creating queue's to buffer incoming data from sgie1\n",
+    "queue7=make_elm_or_print_err(\"queue\",\"queue7\",\"queue7\")\n",
+    "# Creating queue's to buffer incoming data from sgie2\n",
+    "queue8=make_elm_or_print_err(\"queue\",\"queue8\",\"queue8\")\n",
+    "# Creating queue's to buffer incoming data from sgie3\n",
+    "queue9=make_elm_or_print_err(\"queue\",\"queue9\",\"queue9\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv2 = make_elm_or_print_err(\"nvvideoconvert\", \"convertor2\",\"nvvidconv2\")\n",
+    "# Place an encoder instead of OSD to save as video file\n",
+    "encoder = make_elm_or_print_err(\"avenc_mpeg4\", \"encoder\", \"Encoder\")\n",
+    "# Parse output from Encoder \n",
+    "codeparser = make_elm_or_print_err(\"mpeg4videoparse\", \"mpeg4-parser\", 'Code Parser')\n",
+    "# Create a container\n",
+    "container = make_elm_or_print_err(\"qtmux\", \"qtmux\", \"Container\")\n",
+    "# Create Sink for storing the output \n",
+    "sink = make_elm_or_print_err(\"filesink\", \"filesink\", \"Sink\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now that we have created the elements ,we can now set various properties for out pipeline at this point. The configuration files are the same as in [Multi-DNN Notebook](Introduction_to_Multi-DNN_pipeline.ipynb)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "############ Set properties for the Elements ############\n",
+    "# Set Input Video files \n",
+    "source1.set_property('location', INPUT_VIDEO_1)\n",
+    "source2.set_property('location', INPUT_VIDEO_2)\n",
+    "source3.set_property('location', INPUT_VIDEO_2)\n",
+    "# Set Input Width , Height and Batch Size \n",
+    "streammux.set_property('width', 1920)\n",
+    "streammux.set_property('height', 1080)\n",
+    "streammux.set_property('batch-size', num_sources)\n",
+    "# Timeout in microseconds to wait after the first buffer is available \n",
+    "# to push the batch even if a complete batch is not formed.\n",
+    "streammux.set_property('batched-push-timeout', 4000000)\n",
+    "# Set configuration file for nvinfer \n",
+    "# Set Congifuration file for nvinfer \n",
+    "pgie.set_property('config-file-path', \"../source_code/N4/dstest4_pgie_config.txt\")\n",
+    "sgie1.set_property('config-file-path', \"../source_code/N4/dstest4_sgie1_config.txt\")\n",
+    "sgie2.set_property('config-file-path', \"../source_code/N4/dstest4_sgie2_config.txt\")\n",
+    "sgie3.set_property('config-file-path', \"../source_code/N4/dstest4_sgie3_config.txt\")\n",
+    "#Set properties of tracker from tracker_config\n",
+    "config = configparser.ConfigParser()\n",
+    "config.read('../source_code/N4/dstest4_tracker_config.txt')\n",
+    "config.sections()\n",
+    "for key in config['tracker']:\n",
+    "    if key == 'tracker-width' :\n",
+    "        tracker_width = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-width', tracker_width)\n",
+    "    if key == 'tracker-height' :\n",
+    "        tracker_height = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-height', tracker_height)\n",
+    "    if key == 'gpu-id' :\n",
+    "        tracker_gpu_id = config.getint('tracker', key)\n",
+    "        tracker.set_property('gpu_id', tracker_gpu_id)\n",
+    "    if key == 'll-lib-file' :\n",
+    "        tracker_ll_lib_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-lib-file', tracker_ll_lib_file)\n",
+    "    if key == 'll-config-file' :\n",
+    "        tracker_ll_config_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-config-file', tracker_ll_config_file)\n",
+    "    if key == 'enable-batch-process' :\n",
+    "        tracker_enable_batch_process = config.getint('tracker', key)\n",
+    "        tracker.set_property('enable_batch_process', tracker_enable_batch_process)\n",
+    "        \n",
+    "## Set batch size \n",
+    "pgie_batch_size=pgie.get_property(\"batch-size\")\n",
+    "print(\"PGIE batch size :\",end='')\n",
+    "print(pgie_batch_size)\n",
+    "if(pgie_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",pgie_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    pgie.set_property(\"batch-size\",num_sources)\n",
+    "    \n",
+    "## Set batch size \n",
+    "sgie1_batch_size=sgie1.get_property(\"batch-size\")\n",
+    "print(\"SGIE1 batch size :\",end='')\n",
+    "print(sgie1_batch_size)\n",
+    "if(sgie1_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",sgie1_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    sgie1.set_property(\"batch-size\",num_sources)\n",
+    "    \n",
+    "## Set batch size \n",
+    "sgie2_batch_size=sgie2.get_property(\"batch-size\")\n",
+    "print(\"SGIE2 batch size :\",end='')\n",
+    "print(sgie2_batch_size)\n",
+    "if(sgie2_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",sgie2_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    sgie2.set_property(\"batch-size\",num_sources)\n",
+    "\n",
+    "## Set batch size \n",
+    "sgie3_batch_size=sgie3.get_property(\"batch-size\")\n",
+    "print(\"SGIE3 batch size :\",end='')\n",
+    "print(sgie3_batch_size)\n",
+    "if(sgie3_batch_size != num_sources):\n",
+    "    print(\"WARNING: Overriding infer-config batch-size\",sgie3_batch_size,\" with number of sources \", num_sources,\" \\n\")\n",
+    "    sgie3.set_property(\"batch-size\",num_sources)\n",
+    "    \n",
+    "# Set display configurations for nvmultistreamtiler    \n",
+    "tiler_rows=int(2)\n",
+    "tiler_columns=int(2)\n",
+    "tiler.set_property(\"rows\",tiler_rows)\n",
+    "tiler.set_property(\"columns\",tiler_columns)\n",
+    "tiler.set_property(\"width\", TILED_OUTPUT_WIDTH)\n",
+    "tiler.set_property(\"height\", TILED_OUTPUT_HEIGHT)\n",
+    "\n",
+    "# Set encoding properties and Sink configs\n",
+    "encoder.set_property(\"bitrate\", 2000000)\n",
+    "sink.set_property(\"location\", OUTPUT_VIDEO_NAME)\n",
+    "sink.set_property(\"sync\", 0)\n",
+    "sink.set_property(\"async\", 0)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now link all the elements in the order we prefer and create Gstreamer bus to feed all messages through it. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########## Add and Link ELements in the Pipeline ########## \n",
+    "\n",
+    "print(\"Adding elements to Pipeline \\n\")\n",
+    "pipeline.add(source1)\n",
+    "pipeline.add(h264parser1)\n",
+    "pipeline.add(decoder1)\n",
+    "pipeline.add(source2)\n",
+    "pipeline.add(h264parser2)\n",
+    "pipeline.add(decoder2)\n",
+    "pipeline.add(source3)\n",
+    "pipeline.add(h264parser3)\n",
+    "pipeline.add(decoder3)\n",
+    "pipeline.add(streammux)\n",
+    "pipeline.add(pgie)\n",
+    "pipeline.add(tracker)\n",
+    "pipeline.add(sgie1)\n",
+    "pipeline.add(sgie2)\n",
+    "pipeline.add(sgie3)\n",
+    "pipeline.add(tiler)\n",
+    "pipeline.add(nvvidconv)\n",
+    "pipeline.add(nvosd)\n",
+    "pipeline.add(queue1)\n",
+    "pipeline.add(queue2)\n",
+    "pipeline.add(queue3)\n",
+    "pipeline.add(queue4)\n",
+    "pipeline.add(queue5)\n",
+    "pipeline.add(queue6)\n",
+    "pipeline.add(queue7)\n",
+    "pipeline.add(queue8)\n",
+    "pipeline.add(queue9)\n",
+    "pipeline.add(nvvidconv2)\n",
+    "pipeline.add(encoder)\n",
+    "pipeline.add(codeparser)\n",
+    "pipeline.add(container)\n",
+    "pipeline.add(sink)\n",
+    "\n",
+    "print(\"Linking elements in the Pipeline \\n\")\n",
+    "\n",
+    "source1.link(h264parser1)\n",
+    "h264parser1.link(decoder1)\n",
+    "\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "sinkpad1 = streammux.get_request_pad(\"sink_0\")\n",
+    "if not sinkpad1:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad1 = decoder1.get_static_pad(\"src\")\n",
+    "if not srcpad1:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad1.link(sinkpad1)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "source2.link(h264parser2)\n",
+    "h264parser2.link(decoder2)\n",
+    "\n",
+    "sinkpad2 = streammux.get_request_pad(\"sink_1\")\n",
+    "if not sinkpad2:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad2 = decoder2.get_static_pad(\"src\")\n",
+    "if not srcpad2:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad2.link(sinkpad2)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "source3.link(h264parser3)\n",
+    "h264parser3.link(decoder3)\n",
+    "\n",
+    "sinkpad3 = streammux.get_request_pad(\"sink_2\")\n",
+    "if not sinkpad2:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad3 = decoder3.get_static_pad(\"src\")\n",
+    "if not srcpad3:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad3.link(sinkpad3)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "\n",
+    "streammux.link(queue1)\n",
+    "queue1.link(pgie)\n",
+    "pgie.link(queue2)\n",
+    "queue2.link(tracker)\n",
+    "tracker.link(queue3)\n",
+    "queue3.link(sgie1)\n",
+    "sgie1.link(queue4)\n",
+    "queue4.link(sgie2)\n",
+    "sgie2.link(queue5)\n",
+    "queue5.link(sgie3)\n",
+    "sgie3.link(queue6)\n",
+    "queue6.link(tiler)\n",
+    "tiler.link(queue7)\n",
+    "queue7.link(nvvidconv)\n",
+    "nvvidconv.link(queue8)\n",
+    "queue8.link(nvosd)\n",
+    "nvosd.link(queue9)\n",
+    "queue9.link(nvvidconv2)\n",
+    "nvvidconv2.link(encoder)\n",
+    "encoder.link(codeparser)\n",
+    "codeparser.link(container)\n",
+    "container.link(sink)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# create an event loop and feed gstreamer bus mesages to it\n",
+    "loop = GObject.MainLoop()\n",
+    "bus = pipeline.get_bus()\n",
+    "bus.add_signal_watch()\n",
+    "bus.connect (\"message\", bus_call, loop)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Our pipeline now carries the metadata forward but we have not done anything with it until now, but as mentoioned in the above pipeline diagram , we will now create a callback function to write relevant data on the frame once called and create a sink pad in the nvosd element to call the function. \n",
+    "\n",
+    "This callback function is the same as used in the previous notebook."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# tiler_sink_pad_buffer_probe  will extract metadata received on OSD sink pad\n",
+    "# and update params for drawing rectangle, object information etc.\n",
+    "def tiler_src_pad_buffer_probe(pad,info,u_data):\n",
+    "    #Intiallizing object counter with 0.\n",
+    "    obj_counter = {\n",
+    "        PGIE_CLASS_ID_VEHICLE:0,\n",
+    "        PGIE_CLASS_ID_PERSON:0,\n",
+    "        PGIE_CLASS_ID_BICYCLE:0,\n",
+    "        PGIE_CLASS_ID_ROADSIGN:0\n",
+    "    }\n",
+    "    # Set frame_number & rectangles to draw as 0 \n",
+    "    frame_number=0\n",
+    "    num_rects=0\n",
+    "    \n",
+    "    gst_buffer = info.get_buffer()\n",
+    "    if not gst_buffer:\n",
+    "        print(\"Unable to get GstBuffer \")\n",
+    "        return\n",
+    "\n",
+    "    # Retrieve batch metadata from the gst_buffer\n",
+    "    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the\n",
+    "    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)\n",
+    "    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))\n",
+    "    l_frame = batch_meta.frame_meta_list\n",
+    "    while l_frame is not None:\n",
+    "        try:\n",
+    "            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta\n",
+    "            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "        \n",
+    "        # Get frame number , number of rectables to draw and object metadata\n",
+    "        frame_number=frame_meta.frame_num\n",
+    "        num_rects = frame_meta.num_obj_meta\n",
+    "        l_obj=frame_meta.obj_meta_list\n",
+    "        \n",
+    "        while l_obj is not None:\n",
+    "            try:\n",
+    "                # Casting l_obj.data to pyds.NvDsObjectMeta\n",
+    "                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "            # Increment Object class by 1 and Set Box border to Red color     \n",
+    "            obj_counter[obj_meta.class_id] += 1\n",
+    "            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)\n",
+    "            try: \n",
+    "                l_obj=l_obj.next\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "        ################## Setting Metadata Display configruation ############### \n",
+    "        # Acquiring a display meta object.\n",
+    "        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n",
+    "        display_meta.num_labels = 1\n",
+    "        py_nvosd_text_params = display_meta.text_params[0]\n",
+    "        # Setting display text to be shown on screen\n",
+    "        py_nvosd_text_params.display_text = \"Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}\".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])\n",
+    "        # Now set the offsets where the string should appear\n",
+    "        py_nvosd_text_params.x_offset = 10\n",
+    "        py_nvosd_text_params.y_offset = 12\n",
+    "        # Font , font-color and font-size\n",
+    "        py_nvosd_text_params.font_params.font_name = \"Serif\"\n",
+    "        py_nvosd_text_params.font_params.font_size = 10\n",
+    "        # Set(red, green, blue, alpha); Set to White\n",
+    "        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)\n",
+    "        # Text background color\n",
+    "        py_nvosd_text_params.set_bg_clr = 1\n",
+    "        # Set(red, green, blue, alpha); set to Black\n",
+    "        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)\n",
+    "        # Using pyds.get_string() to get display_text as string to print in notebook\n",
+    "        print(pyds.get_string(py_nvosd_text_params.display_text))\n",
+    "        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n",
+    "        \n",
+    "        ############################################################################\n",
+    "        # Get frame rate through this probe\n",
+    "        fps_streams[\"stream{0}\".format(frame_meta.pad_index)].get_fps()\n",
+    "        try:\n",
+    "            l_frame=l_frame.next\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "\n",
+    "    return Gst.PadProbeReturn.OK\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tiler_src_pad=sgie3.get_static_pad(\"src\")\n",
+    "if not tiler_src_pad:\n",
+    "    sys.stderr.write(\" Unable to get src pad \\n\")\n",
+    "else:\n",
+    "    tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now with everything defined , we can start the playback and listen the events."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# List the sources\n",
+    "print(\"Now playing...\")\n",
+    "start_time = time.time()\n",
+    "print(\"Starting pipeline \\n\")\n",
+    "# start play back and listed to events\t\t\n",
+    "pipeline.set_state(Gst.State.PLAYING)\n",
+    "try:\n",
+    "    loop.run()\n",
+    "except:\n",
+    "    pass\n",
+    "# cleanup\n",
+    "print(\"Exiting app\\n\")\n",
+    "pipeline.set_state(Gst.State.NULL)\n",
+    "print(\"--- %s seconds ---\" % (time.time() - start_time))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Convert video profile to be compatible with Jupyter notebook\n",
+    "!ffmpeg -loglevel panic -y -an -i ../source_code/N4/ds_out.mp4 -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 ../source_code/N4/output.mp4"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Display the Output\n",
+    "from IPython.display import HTML\n",
+    "HTML(\"\"\"\n",
+    " <video width=\"960\" height=\"540\" controls>\n",
+    " <source src=\"../source_code/N4/output.mp4\"\n",
+    " </video>\n",
+    "\"\"\".format())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Licensing\n",
+    "  \n",
+    "This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0)."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

File diff suppressed because it is too large
+ 624 - 0
ai/DeepStream/English/python/jupyter_notebook/Multi-stream_pipeline.ipynb


BIN
ai/DeepStream/English/python/jupyter_notebook/images/DeepStream_Plugin_Manual.pdf


BIN
ai/DeepStream/English/python/jupyter_notebook/images/ds-perf.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/ds-python-api.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/ds-sample-pipeline.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/ds-scalability.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/ds-sdk.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/ds-workflow-split.jpeg


BIN
ai/DeepStream/English/python/jupyter_notebook/images/ds-workflow.jpg


BIN
ai/DeepStream/English/python/jupyter_notebook/images/nvinfer.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/nvmultistreamtiler.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/nvosd.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/nvstreammux.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/nvtracker.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/nvvidconv.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/pads.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/pipeline.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/test1.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/test2.png


BIN
ai/DeepStream/English/python/jupyter_notebook/images/test3.png


+ 33 - 0
ai/DeepStream/English/python/source_code/N1/config_infer_primary_peoplenet.txt

@@ -0,0 +1,33 @@
+# Copyright (c) 2020 NVIDIA Corporation.  All rights reserved.
+#
+# NVIDIA Corporation and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto.  Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA Corporation is strictly prohibited.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+tlt-model-key=tlt_encode
+tlt-encoded-model=../../models/tlt_pretrained_models/peoplenet/resnet34_peoplenet_pruned.etlt
+labelfile-path=labels_peoplenet.txt
+model-engine-file=../../models/tlt_pretrained_models/peoplenet/resnet34_peoplenet_pruned.etlt_b1_gpu0_fp16.engine
+input-dims=3;544;960;0
+uff-input-blob-name=input_1
+batch-size=1
+process-mode=1
+model-color-format=0
+## 0=FP32, 1=INT8, 2=FP16 mode
+network-mode=2
+num-detected-classes=3
+cluster-mode=1
+interval=0
+gie-unique-id=1
+output-blob-names=output_bbox/BiasAdd;output_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.4
+## Set eps=0.7 and minBoxes for cluster-mode=1(DBSCAN)
+eps=0.7
+minBoxes=1

+ 81 - 0
ai/DeepStream/English/python/source_code/N1/dstest1_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=1
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 31 - 0
ai/DeepStream/English/python/source_code/N1/dstest1_pgie_config_trt.txt

@@ -0,0 +1,31 @@
+# Copyright (c) 2020 NVIDIA Corporation.  All rights reserved.
+#
+# NVIDIA Corporation and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto.  Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA Corporation is strictly prohibited.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+#model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=1
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1
+

+ 3 - 0
ai/DeepStream/English/python/source_code/N1/labels_peoplenet.txt

@@ -0,0 +1,3 @@
+Person
+Bag
+Face

+ 81 - 0
ai/DeepStream/English/python/source_code/N2/dstest2_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=1
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 87 - 0
ai/DeepStream/English/python/source_code/N2/dstest2_sgie1_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#s
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=16
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=2
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream/English/python/source_code/N2/dstest2_sgie2_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=16
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=3
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream/English/python/source_code/N2/dstest2_sgie3_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=16
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+model-color-format=1
+process-mode=2
+gpu-id=0
+gie-unique-id=4
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 37 - 0
ai/DeepStream/English/python/source_code/N2/dstest2_tracker_config.txt

@@ -0,0 +1,37 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Mandatory properties for the tracker:
+#   tracker-width
+#   tracker-height: needs to be multiple of 6 for NvDCF
+#   gpu-id
+#   ll-lib-file: path to low-level tracker lib
+#   ll-config-file: required for NvDCF, optional for KLT and IOU
+#
+[tracker]
+tracker-width=640
+tracker-height=384
+gpu-id=0
+ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so
+#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so
+#ll-config-file=tracker_config.yml
+enable-batch-process=1

+ 81 - 0
ai/DeepStream/English/python/source_code/N3/dstest3_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+# model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=4
+network-mode=1
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 81 - 0
ai/DeepStream/English/python/source_code/N4/dstest4_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=1
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 87 - 0
ai/DeepStream/English/python/source_code/N4/dstest4_sgie1_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#s
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=16
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=2
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream/English/python/source_code/N4/dstest4_sgie2_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=16
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=3
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream/English/python/source_code/N4/dstest4_sgie3_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=16
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+model-color-format=1
+process-mode=2
+gpu-id=0
+gie-unique-id=4
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 37 - 0
ai/DeepStream/English/python/source_code/N4/dstest4_tracker_config.txt

@@ -0,0 +1,37 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Mandatory properties for the tracker:
+#   tracker-width
+#   tracker-height: needs to be multiple of 6 for NvDCF
+#   gpu-id
+#   ll-lib-file: path to low-level tracker lib
+#   ll-config-file: required for NvDCF, optional for KLT and IOU
+#
+[tracker]
+tracker-width=640
+tracker-height=384
+gpu-id=0
+ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so
+#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so
+#ll-config-file=tracker_config.yml
+enable-batch-process=1

+ 50 - 0
ai/DeepStream/English/python/source_code/common/FPS.py

@@ -0,0 +1,50 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import time
+start_time=time.time()
+frame_count=0
+
+class GETFPS:
+    def __init__(self,stream_id):
+        global start_time
+        self.start_time=start_time
+        self.is_first=True
+        global frame_count
+        self.frame_count=frame_count
+        self.stream_id=stream_id
+    def get_fps(self):
+        end_time=time.time()
+        if(self.is_first):
+            self.start_time=end_time
+            self.is_first=False
+        if(end_time-self.start_time>5):
+            print("**********************FPS*****************************************")
+            print("Fps of stream",self.stream_id,"is ", float(self.frame_count)/5.0)
+            self.frame_count=0
+            self.start_time=end_time
+        else:
+            self.frame_count=self.frame_count+1
+    def print_data(self):
+        print('frame_count=',self.frame_count)
+        print('start_time=',self.start_time)
+

+ 0 - 0
ai/DeepStream/English/python/source_code/common/__init__.py


BIN
ai/DeepStream/English/python/source_code/common/__pycache__/FPS.cpython-36.pyc


BIN
ai/DeepStream/English/python/source_code/common/__pycache__/__init__.cpython-36.pyc


BIN
ai/DeepStream/English/python/source_code/common/__pycache__/bus_call.cpython-36.pyc


BIN
ai/DeepStream/English/python/source_code/common/__pycache__/is_aarch_64.cpython-36.pyc


+ 39 - 0
ai/DeepStream/English/python/source_code/common/bus_call.py

@@ -0,0 +1,39 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import gi
+import sys
+gi.require_version('Gst', '1.0')
+from gi.repository import GObject, Gst
+def bus_call(bus, message, loop):
+    t = message.type
+    if t == Gst.MessageType.EOS:
+        sys.stdout.write("End-of-stream\n")
+        loop.quit()
+    elif t==Gst.MessageType.WARNING:
+        err, debug = message.parse_warning()
+        sys.stderr.write("Warning: %s: %s\n" % (err, debug))
+    elif t == Gst.MessageType.ERROR:
+        err, debug = message.parse_error()
+        sys.stderr.write("Error: %s: %s\n" % (err, debug))
+        loop.quit()
+    return True

+ 30 - 0
ai/DeepStream/English/python/source_code/common/is_aarch_64.py

@@ -0,0 +1,30 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import platform
+import sys
+
+
+def is_aarch64():
+    return platform.uname()[4] == 'aarch64'
+
+sys.path.append('/opt/nvidia/deepstream/deepstream/lib')

+ 29 - 0
ai/DeepStream/English/python/source_code/common/utils.py

@@ -0,0 +1,29 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import ctypes
+import sys
+sys.path.append('/opt/nvidia/deepstream/deepstream/lib')
+
+def long_to_int(l):
+    value = ctypes.c_int(l & 0xffffffff).value
+    return value

+ 50 - 0
ai/DeepStream/README.md

@@ -0,0 +1,50 @@
+
+# openacc-training-materials
+Training materials provided by OpenACC.org. The objective of this lab is to give an introduction to using Nvidia DeepStream Framework in a Intelligent Video Analytics Domain.  
+
+## Prerequisites
+To run this tutorial you will need a machine with NVIDIA GPU.
+
+- Install the latest [Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker) or [Singularity](https://sylabs.io/docs/).
+
+- The base containers required for the lab may require users to create a NGC account and generate an API key (https://docs.nvidia.com/ngc/ngc-catalog-user-guide/index.html#registering-activating-ngc-account)
+
+## Creating containers
+To start with, you will have to build a Docker or Singularity container.
+
+### Docker Container
+To build a docker container, run:
+`sudo docker build --network=host -t <imagename>:<tagnumber> .`
+
+For instance:
+`sudo docker build -t myimage:1.0 .`
+
+and to run the container, run:
+`sudo docker run --rm -it --gpus=all --network=host -p 8888:8888 myimage:1.0`
+
+Once inside the container launch the jupyter notebook by typing the following command
+`jupyter notebook --no-browser --allow-root --ip=0.0.0.0 --port=8888 --NotebookApp.token="" --notebook-dir=/opt/nvidia/deepstream/deepstream-5.0/python`
+
+Then, open the jupyter notebook in browser: http://localhost:8888
+Start working on the lab by clicking on the `Start_Here.ipynb` notebook.
+
+### Singularity Container
+
+To build the singularity container, run:
+`sudo singularity build --sandbox <image_name>.simg Singularity`
+
+and copy the files to your local machine to make sure changes are stored locally:
+`singularity run --writable <image_name>.simg cp -rT /opt/nvidia/deepstream/deepstream-5.0/ ~/workspace`
+
+
+Then, run the container:
+`singularity run --nv --writable <image_name>.simg jupyter notebook --no-browser --allow-root --ip=0.0.0.0 --port=8888 --NotebookApp.token="" --notebook-dir=~/workspace/python`
+
+Then, open the jupyter notebook in browser: http://localhost:8888
+Start working on the lab by clicking on the `Start_Here.ipynb` notebook.
+
+## Troubleshooting
+
+Q. "ResourceExhaustedError" error is observed while running the labs
+A. Currently the batch size and network model is set to consume 16GB GPU memory. In order to use the labs without any modifications it is recommended to have GPU with minimum 16GB GPU memory. Else the users can play with batch size to reduce the memory footprint
+

+ 25 - 0
ai/DeepStream/Singularity

@@ -0,0 +1,25 @@
+Bootstrap: docker
+From: nvcr.io/nvidia/deepstream:5.0-20.07-triton
+
+%runscript
+ 
+    "$@"
+
+%post
+
+    apt-get -y update
+    apt-get -y install ffmpeg python3-gi
+    cd /opt/nvidia/deepstream/deepstream/lib
+    python3 /opt/nvidia/deepstream/deepstream/lib/setup.py install
+    pip3 install jupyterlab
+
+%files
+
+    English/* /opt/nvidia/deepstream/deepstream-5.0/
+
+%environment
+XDG_RUNTIME_DIR=
+
+%labels
+
+AUTHOR bharatk

+ 38 - 0
ai/DeepStream_Perf_Lab/Dockerfile

@@ -0,0 +1,38 @@
+# Select Base Image
+FROM nvcr.io/nvidia/deepstream:5.0.1-20.09-triton
+
+# NVIDIA Nsight Systems 2020.2.1
+RUN apt-get update -y && \
+    DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+        apt-transport-https \
+        ca-certificates \
+        gnupg \
+        wget && \
+    rm -rf /var/lib/apt/lists/*
+RUN wget -qO - https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub | apt-key add - && \
+    echo "deb https://developer.download.nvidia.com/devtools/repo-deb/x86_64/ /" >> /etc/apt/sources.list.d/nsight.list && \
+    apt-get update -y && \
+    DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+        nsight-systems-2020.2.1 && \
+    rm -rf /var/lib/apt/lists/* 
+# Update the repo
+RUN apt-get update
+# Install required dependencies
+RUN apt-get install ffmpeg python3-gi python3-dev python3-pip cmake unzip -y
+# Install pybind11
+RUN pip3 install pybind11
+# Install required python packages
+WORKDIR /opt/nvidia/deepstream/deepstream/lib
+RUN python3 setup.py install
+WORKDIR /opt/nvidia/deepstream/deepstream-5.0
+RUN pip3 install jupyterlab gdown
+COPY English /opt/nvidia/deepstream/deepstream-5.0
+WORKDIR /opt/nvidia/deepstream/deepstream-5.0/python/source_code/dataset/
+RUN python3 /opt/nvidia/deepstream/deepstream-5.0/python/source_code/dataset/download_dataset.py
+RUN ls -l
+#RUN mv datasetlihvsi4dtmp  deepstream_dataset.zip
+RUN unzip deepstream_dataset.zip
+WORKDIR /opt/nvidia/deepstream/deepstream-5.0
+## Uncomment this line to run Jupyter notebook by default
+CMD jupyter notebook --no-browser --allow-root --ip=0.0.0.0 --port=8889 --NotebookApp.token="" --notebook-dir=/opt/nvidia/deepstream/deepstream-5.0/python
+

+ 64 - 0
ai/DeepStream_Perf_Lab/English/python/Start_Here.ipynb

@@ -0,0 +1,64 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Welcome to Deepstream Performance Analysis Bootcamp\n",
+    "\n",
+    "The objective of this Bootcamp is to introduce you to the performance analysis and optimizations for Pipelines built using DeepStream SDK 5.0. The Bootcamp assumes familiarity with DeepStream SDK and if you are using DeepStream for first time please go through the DeepStream Bootcamp prior.  \n",
+    "\n",
+    "The following contents will be covered during the Bootcamp : \n",
+    "\n",
+    "\n",
+    "- [**Introduction to Performance analysis :**](jupyter_notebook/Introduction_to_Performance_analysis.ipynb)\n",
+    "    - [Latency, Throughput, and GPU Metrics](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#Latency,-Throughput,-and-GPU-Metrics)\n",
+    "        - [Latency](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#Latency)\n",
+    "        - [GPU Metrics](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#GPU-Metrics)\n",
+    "        - [Throughput](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#Throughput)\n",
+    "    - [Case 1 : Multi-stream cascaded network pipeline](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#Case-1:-Multi-stream-cascaded-network-pipeline.)\n",
+    "        - [Bench-marking with GST Probes](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#Benchmarking-with-GST-Probes)\n",
+    "        - [Effects on OSD,Tiler & Queues](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#Effects-on-OSD,-Tiler,-and-Queues)\n",
+    "    - [Summary](jupyter_notebook/Introduction_to_Performance_analysis.ipynb#Summary)\n",
+    "\n",
+    "- [**Performance Analysis using NSight systems**](jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb)\n",
+    "    - [Using NSight Systems to generate a report and finding bottlenecks to solve](jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb#Using-NSight-Systems-to-generate-a-report-and-finding-bottlenecks-to-solve) \n",
+    "        - [Streammux parameters](jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb#Streammux-parameters)\n",
+    "        - [Batch size across cascaded networks](jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb#Batch-size-across-cascaded-networks)\n",
+    "        - [NVInfer](jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb#NVInfer)\n",
+    "        - [NVTracker](jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb#NVTracker)\n",
+    "    - [Summary](jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb#Summary)\n",
+    "    \n",
+    "- [**Performance Analysis using NSight systems - Continued**](jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb)\n",
+    "    - [Case 2:COVID-19 Social Distancing Application.](jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb#Case-2:-COVID-19-Social-Distancing-Application.)\n",
+    "        - [Finding distance between 2 people](jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb#Finding-distance-between-2-people)\n",
+    "    - [Solving the computational bottleneck](jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb#Solving-the-computational-bottleneck)\n",
+    "    - [Jetson specific optimizations](jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb#Jetson-specific-optimizations)\n",
+    "    - [Summary](jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb#Summary)\n",
+    "\n",
+    "**Disclaimer** : All the results mentioned in the notebooks were tested on a Tesla V100. The results would vary when using different hardware and would also depend on the PCI Express bandwidth and the thermal conditions of the hardware present in your machine."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

+ 847 - 0
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/Introduction_to_Performance_analysis.ipynb

@@ -0,0 +1,847 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&ensp;\n",
+    "[Home Page](Start_Here.ipynb)\n",
+    "    \n",
+    "    \n",
+    "\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&ensp;\n",
+    "[1]\n",
+    "[2](Performance_Analysis_using_NSight_systems.ipynb)\n",
+    "[3](Performance_Analysis_using_NSight_systems_Continued.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "[Next Notebook](Performance_Analysis_using_NSight_systems.ipynb)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Introduction to Performance analysis\n",
+    "\n",
+    "\n",
+    "In this notebook, we will get introduced to the various metrics used to measure the performance of a DeepStream pipeline and improve the performance of a DeepStream pipeline.\n",
+    "\n",
+    "- [Latency, Throughput, and GPU Metrics](#Latency,-Throughput,-and-GPU-Metrics)\n",
+    "    - [Latency](#Latency)\n",
+    "    - [GPU Metrics](#GPU-Metrics)\n",
+    "    - [Throughput](#Throughput)\n",
+    "- [Case 1 : Multi-stream cascaded network pipeline](#Case-1:-Multi-stream-cascaded-network-pipeline.)\n",
+    "    - [Bench-marking with GST Probes](#Benchmarking-with-GST-Probes)\n",
+    "    - [Effects on OSD,Tiler & Queues](#Effects-on-OSD,-Tiler,-and-Queues)\n",
+    "- [Summary](#Summary)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Latency, Throughput, and GPU Metrics\n",
+    "\n",
+    "\n",
+    "### Latency\n",
+    "\n",
+    "Latency is important for real-time pipelines that are time-critical. Latency in a DeepStream pipeline can be measured using GStreamer debugging capabilities. By setting the `GST-DEBUG` environment variable to `GST_SCHEDULING:7`, we get a trace log that contains details on when the buffers are modified from which we can obtain detailed information about our pipeline."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#To make sure that right paths to the NVidia Libraries are added run this cell first\n",
+    "!rm ~/.cache/gstreamer-1.0/registry.x86_64.bin\n",
+    "!export LD_LIBRARY_PATH=/opt/tensorrtserver/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/.singularity.d/libs:$LD_LIBRARY_PATH"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!GST_DEBUG=\"GST_SCHEDULING:7\" GST_DEBUG_FILE=../source_code/trace.log \\\n",
+    "python3 ../source_code/deepstream-app-1/deepstream_test_1.py '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `trace.log` file is huge, and here is a small portion of the file that highlights the time a buffer entered the decoder plugin and the time the buffer enters the next input.\n",
+    "\n",
+    "```txt\n",
+    "0:00:01.641136185 GST_SCHEDULING gstpad.c:4320:gst_pad_chain_data_unchecked:<nvv4l2-decoder:sink>\u001b[00m calling chainfunction &gst_video_decoder_chain with buffer buffer: 0x7ff010028d90, pts 99:99:99.999999999, dts 0:00:02.966666637, dur 0:00:00.033333333, size 30487, offset 947619, offset_end 1013155, flags 0x2000\n",
+    "\n",
+    "00:01.648137739 GST_SCHEDULING gstpad.c:4320:gst_pad_chain_data_unchecked:<Stream-muxer:sink_0>\u001b[00m calling chainfunction &gst_nvstreammux_chain with buffer buffer: 0x7ff01001c5f0, pts 0:00:02.966666637, dts 99:99:99.999999999, dur 0:00:00.033333333, size 64, offset none, offset_end none, flags 0x0\n",
+    "```\n",
+    "\n",
+    "Here latency can be calculated by looking at the time difference between the stream entering one element to the other in the pipeline. In the output shown above, it is ~7ms (00:01.648137739 - 0:00:01.641136185) , it is these timestamps that help us denote the latency. \n",
+    "\n",
+    "For more details, check [GStreamer's documentation on Latency](https://gstreamer.freedesktop.org/documentation/additional/design/latency.html?gi-language=c)\n",
+    "\n",
+    "### GPU Metrics\n",
+    "\n",
+    "We can use `nvidia-smi` to explore the GPU performance metrics while our application is running. GPU utilization is something we want to pay attention to, and we will discuss it below. Run the cell below to re-run the application while logging the results of `nvidia-smi`"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!nvidia-smi dmon -i 0 -s ucmt -c 8 > ../source_code/smi.log & \\\n",
+    "python3 ../source_code/deepstream-app-1/deepstream_test_1.py '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can open the `smi.log` file to investigate our utilization metrics. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!cat ../source_code/smi.log"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Understanding nvidia-smi\n",
+    "The cell block above passed the following arguments to `nvidia-smi` :\n",
+    "\n",
+    "- `dmon -i 0` \n",
+    "\n",
+    "    - Reports default metrics (device monitoring) for the devices selected by comma-separated device list. In this case, we are reporting default metrics for GPU with index 0 since that is the GPU we are using.\n",
+    "- `-s ucmt` : \n",
+    "    - We can choose which metrics we want to display. In this case, we supplied ucmt to indicate we want metrics for\n",
+    "        - u: Utilization (SM, Memory, Encoder and Decoder Utilization in %) \n",
+    "        - c: Proc and Mem Clocks (in MHz)\n",
+    "        - m: Frame Buffer and Bar1 memory usage (in MB)\n",
+    "        - t: PCIe Rx and Tx Throughput in MB/s (Maxwell and above)\n",
+    "- `-c 8`\n",
+    "    - We can configure the number of iterations for which we are monitoring. In this case, we choose 8 iterations.\n",
+    "\n",
+    "Let's dive a bit deeper into a few of the metrics that we've selected since they are particularly useful to\n",
+    "monitor.\n",
+    "\n",
+    "Utilization metrics report how busy each GPU is over time and can be used to determine how much an application is using the GPUs in the system. In particular, the `sm` column tracks the percent of the time over the past sample period during which one or more kernels were executing on the GPU. `fb` reports the GPU's frame buffer memory usage.\n",
+    "\n",
+    "### Throughput \n",
+    "\n",
+    "The Throughput of the pipeline gives us an idea of the dataflow, which helps us understand how many Streams it can process concurrently at a required FPS. In this set of notebooks, we would mainly concentrate on increasing our pipelines' FPS using various optimizations.\n",
+    "\n",
+    "\n",
+    "## Case 1: Multi-stream cascaded network pipeline.\n",
+    "\n",
+    "In this section, we will optimize a Multi-stream network that was part of the problem statement in the Introduction to DeepStream notebooks.\n",
+    "\n",
+    "We will utilize our `deepstream-test-2-app` to include multi-stream functionalities using the `Streammux` plugin.\n",
+    "\n",
+    "\n",
+    "![Pipeline](images/app-2.png)\n",
+    "\n",
+    "\n",
+    "### Benchmarking with GST-Probes\n",
+    "\n",
+    "\n",
+    "Here we'll import the `GETFPS` Class and use the `get_fps()` method inside it to calculate the average FPS of our stream. This is part of [DeepStream Python Apps Github Repository](https://github.com/NVIDIA-AI-IOT/deepstream_python_apps), here we have modified the average FPS output time from 5s to 1s for benchmarking purposes.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Import required libraries \n",
+    "import sys\n",
+    "sys.path.append('../source_code')\n",
+    "import gi\n",
+    "import configparser\n",
+    "gi.require_version('Gst', '1.0')\n",
+    "from gi.repository import GObject, Gst\n",
+    "from gi.repository import GLib\n",
+    "from ctypes import *\n",
+    "import time\n",
+    "import sys\n",
+    "import math\n",
+    "import platform\n",
+    "from common.bus_call import bus_call\n",
+    "from common.FPS import GETFPS\n",
+    "import pyds\n",
+    "\n",
+    "\n",
+    "# Define variables to be used later\n",
+    "fps_streams={}\n",
+    "\n",
+    "PGIE_CLASS_ID_VEHICLE = 0\n",
+    "PGIE_CLASS_ID_BICYCLE = 1\n",
+    "PGIE_CLASS_ID_PERSON = 2\n",
+    "PGIE_CLASS_ID_ROADSIGN = 3\n",
+    "\n",
+    "MUXER_OUTPUT_WIDTH=1920\n",
+    "MUXER_OUTPUT_HEIGHT=1080\n",
+    "\n",
+    "TILED_OUTPUT_WIDTH=1920\n",
+    "TILED_OUTPUT_HEIGHT=1080\n",
+    "OSD_PROCESS_MODE= 0\n",
+    "OSD_DISPLAY_TEXT= 0\n",
+    "pgie_classes_str= [\"Vehicle\", \"TwoWheeler\", \"Person\",\"RoadSign\"]\n",
+    "\n",
+    "################ Three Stream Pipeline ###########\n",
+    "# Define Input and output Stream information \n",
+    "num_sources = 3 \n",
+    "INPUT_VIDEO_1 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "INPUT_VIDEO_2 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "INPUT_VIDEO_3 = '/opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'\n",
+    "OUTPUT_VIDEO_NAME = \"../source_code/N1/ds_out.mp4\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We define a function `make_elm_or_print_err()` to create our elements and report any errors if the creation fails.\n",
+    "\n",
+    "Elements are created using the `Gst.ElementFactory.make()` function as part of Gstreamer library."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "## Make Element or Print Error and any other detail\n",
+    "def make_elm_or_print_err(factoryname, name, printedname, detail=\"\"):\n",
+    "  print(\"Creating\", printedname)\n",
+    "  elm = Gst.ElementFactory.make(factoryname, name)\n",
+    "  if not elm:\n",
+    "     sys.stderr.write(\"Unable to create \" + printedname + \" \\n\")\n",
+    "  if detail:\n",
+    "     sys.stderr.write(detail)\n",
+    "  return elm"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Initialise GStreamer and Create an Empty Pipeline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for i in range(0,num_sources):\n",
+    "        fps_streams[\"stream{0}\".format(i)]=GETFPS(i)\n",
+    "\n",
+    "# Standard GStreamer initialization\n",
+    "Gst.init(None)\n",
+    "\n",
+    "# Create gstreamer elements */\n",
+    "# Create Pipeline element that will form a connection of other elements\n",
+    "print(\"Creating Pipeline \\n \")\n",
+    "pipeline = Gst.Pipeline()\n",
+    "\n",
+    "if not pipeline:\n",
+    "    sys.stderr.write(\" Unable to create Pipeline \\n\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Create Elements that are required for our pipeline\n",
+    "\n",
+    "Compared to the first notebook , we use a lot of queues in this notebook to buffer data when it moves from one plugin to another."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########### Create Elements required for the Pipeline ########### \n",
+    "\n",
+    "######### Defining Stream 1 \n",
+    "# Source element for reading from the file\n",
+    "source1 = make_elm_or_print_err(\"filesrc\", \"file-source-1\",'file-source-1')\n",
+    "# Since the data format in the input file is elementary h264 stream,we need a h264parser\n",
+    "h264parser1 = make_elm_or_print_err(\"h264parse\", \"h264-parser-1\",\"h264-parser-1\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder1 = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder-1\",\"nvv4l2-decoder-1\")\n",
+    "   \n",
+    "##########\n",
+    "\n",
+    "########## Defining Stream 2 \n",
+    "# Source element for reading from the file\n",
+    "source2 = make_elm_or_print_err(\"filesrc\", \"file-source-2\",\"file-source-2\")\n",
+    "# Since the data format in the input file is elementary h264 stream, we need a h264parser\n",
+    "h264parser2 = make_elm_or_print_err(\"h264parse\", \"h264-parser-2\", \"h264-parser-2\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder2 = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder-2\",\"nvv4l2-decoder-2\")\n",
+    "########### \n",
+    "\n",
+    "########## Defining Stream 3\n",
+    "# Source element for reading from the file\n",
+    "source3 = make_elm_or_print_err(\"filesrc\", \"file-source-3\",\"file-source-3\")\n",
+    "# Since the data format in the input file is elementary h264 stream, we need a h264parser\n",
+    "h264parser3 = make_elm_or_print_err(\"h264parse\", \"h264-parser-3\", \"h264-parser-3\")\n",
+    "# Use nvdec_h264 for hardware accelerated decode on GPU\n",
+    "decoder3 = make_elm_or_print_err(\"nvv4l2decoder\", \"nvv4l2-decoder-3\",\"nvv4l2-decoder-3\")\n",
+    "########### \n",
+    "    \n",
+    "# Create nvstreammux instance to form batches from one or more sources.\n",
+    "streammux = make_elm_or_print_err(\"nvstreammux\", \"Stream-muxer\",\"Stream-muxer\") \n",
+    "# Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file\n",
+    "pgie = make_elm_or_print_err(\"nvinfer\", \"primary-inference\" ,\"pgie\")\n",
+    "# Use nvtracker to give objects unique-ids\n",
+    "tracker = make_elm_or_print_err(\"nvtracker\", \"tracker\",'tracker')\n",
+    "# Seconday inference for Finding Car Color\n",
+    "sgie1 = make_elm_or_print_err(\"nvinfer\", \"secondary1-nvinference-engine\",'sgie1')\n",
+    "# Seconday inference for Finding Car Make\n",
+    "sgie2 = make_elm_or_print_err(\"nvinfer\", \"secondary2-nvinference-engine\",'sgie2')\n",
+    "# Seconday inference for Finding Car Type\n",
+    "sgie3 = make_elm_or_print_err(\"nvinfer\", \"secondary3-nvinference-engine\",'sgie3')\n",
+    "# Creating Tiler to present more than one streams\n",
+    "tiler=make_elm_or_print_err(\"nvmultistreamtiler\", \"nvtiler\",\"nvtiler\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv = make_elm_or_print_err(\"nvvideoconvert\", \"convertor\",\"nvvidconv\")\n",
+    "# Create OSD to draw on the converted RGBA buffer\n",
+    "nvosd = make_elm_or_print_err(\"nvdsosd\", \"onscreendisplay\",\"nvosd\")\n",
+    "# Use convertor to convert from NV12 to RGBA as required by nvosd\n",
+    "nvvidconv2 = make_elm_or_print_err(\"nvvideoconvert\", \"convertor2\",\"nvvidconv2\")\n",
+    "# Place an encoder instead of OSD to save as video file\n",
+    "encoder = make_elm_or_print_err(\"avenc_mpeg4\", \"encoder\", \"Encoder\")\n",
+    "# Parse output from Encoder \n",
+    "codeparser = make_elm_or_print_err(\"mpeg4videoparse\", \"mpeg4-parser\", 'Code Parser')\n",
+    "# Create a container\n",
+    "container = make_elm_or_print_err(\"qtmux\", \"qtmux\", \"Container\")\n",
+    "# Create Sink for storing the output \n",
+    "sink = make_elm_or_print_err(\"filesink\", \"filesink\", \"Sink\")\n",
+    "\n",
+    "# # Create Sink for storing the output \n",
+    "# fksink = make_elm_or_print_err(\"fakesink\", \"fakesink\", \"Sink\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now that we have created the elements ,we can now set various properties for out pipeline at this point. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "############ Set properties for the Elements ############\n",
+    "# Set Input Video files \n",
+    "source1.set_property('location', INPUT_VIDEO_1)\n",
+    "source2.set_property('location', INPUT_VIDEO_2)\n",
+    "source3.set_property('location', INPUT_VIDEO_3)\n",
+    "# Set Input Width , Height and Batch Size \n",
+    "streammux.set_property('width', 1920)\n",
+    "streammux.set_property('height', 1080)\n",
+    "streammux.set_property('batch-size', 1)\n",
+    "# Timeout in microseconds to wait after the first buffer is available \n",
+    "# to push the batch even if a complete batch is not formed.\n",
+    "streammux.set_property('batched-push-timeout', 4000000)\n",
+    "# Set configuration file for nvinfer \n",
+    "# Set Congifuration file for nvinfer \n",
+    "pgie.set_property('config-file-path', \"../source_code/N1/dstest4_pgie_config.txt\")\n",
+    "sgie1.set_property('config-file-path', \"../source_code/N1/dstest4_sgie1_config.txt\")\n",
+    "sgie2.set_property('config-file-path', \"../source_code/N1/dstest4_sgie2_config.txt\")\n",
+    "sgie3.set_property('config-file-path', \"../source_code/N1/dstest4_sgie3_config.txt\")\n",
+    "#Set properties of tracker from tracker_config\n",
+    "config = configparser.ConfigParser()\n",
+    "config.read('../source_code/N1/dstest4_tracker_config.txt')\n",
+    "config.sections()\n",
+    "for key in config['tracker']:\n",
+    "    if key == 'tracker-width' :\n",
+    "        tracker_width = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-width', tracker_width)\n",
+    "    if key == 'tracker-height' :\n",
+    "        tracker_height = config.getint('tracker', key)\n",
+    "        tracker.set_property('tracker-height', tracker_height)\n",
+    "    if key == 'gpu-id' :\n",
+    "        tracker_gpu_id = config.getint('tracker', key)\n",
+    "        tracker.set_property('gpu_id', tracker_gpu_id)\n",
+    "    if key == 'll-lib-file' :\n",
+    "        tracker_ll_lib_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-lib-file', tracker_ll_lib_file)\n",
+    "    if key == 'll-config-file' :\n",
+    "        tracker_ll_config_file = config.get('tracker', key)\n",
+    "        tracker.set_property('ll-config-file', tracker_ll_config_file)\n",
+    "    if key == 'enable-batch-process' :\n",
+    "        tracker_enable_batch_process = config.getint('tracker', key)\n",
+    "        tracker.set_property('enable_batch_process', tracker_enable_batch_process)\n",
+    "        \n",
+    "    \n",
+    "# Set display configurations for nvmultistreamtiler    \n",
+    "tiler_rows=int(2)\n",
+    "tiler_columns=int(2)\n",
+    "tiler.set_property(\"rows\",tiler_rows)\n",
+    "tiler.set_property(\"columns\",tiler_columns)\n",
+    "tiler.set_property(\"width\", TILED_OUTPUT_WIDTH)\n",
+    "tiler.set_property(\"height\", TILED_OUTPUT_HEIGHT)\n",
+    "\n",
+    "# Set encoding properties and Sink configs\n",
+    "encoder.set_property(\"bitrate\", 2000000)\n",
+    "sink.set_property(\"location\", OUTPUT_VIDEO_NAME)\n",
+    "sink.set_property(\"sync\", 0)\n",
+    "sink.set_property(\"async\", 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now link all the elements in the order we prefer and create Gstreamer bus to feed all messages through it. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "########## Add and Link ELements in the Pipeline ########## \n",
+    "\n",
+    "print(\"Adding elements to Pipeline \\n\")\n",
+    "pipeline.add(source1)\n",
+    "pipeline.add(h264parser1)\n",
+    "pipeline.add(decoder1)\n",
+    "pipeline.add(source2)\n",
+    "pipeline.add(h264parser2)\n",
+    "pipeline.add(decoder2)\n",
+    "pipeline.add(source3)\n",
+    "pipeline.add(h264parser3)\n",
+    "pipeline.add(decoder3)\n",
+    "pipeline.add(streammux)\n",
+    "pipeline.add(pgie)\n",
+    "pipeline.add(tracker)\n",
+    "pipeline.add(sgie1)\n",
+    "pipeline.add(sgie2)\n",
+    "pipeline.add(sgie3)\n",
+    "pipeline.add(tiler)\n",
+    "pipeline.add(nvvidconv)\n",
+    "pipeline.add(nvosd)\n",
+    "pipeline.add(nvvidconv2)\n",
+    "pipeline.add(encoder)\n",
+    "pipeline.add(codeparser)\n",
+    "pipeline.add(container)\n",
+    "pipeline.add(sink)\n",
+    "\n",
+    "\n",
+    "print(\"Linking elements in the Pipeline \\n\")\n",
+    "\n",
+    "source1.link(h264parser1)\n",
+    "h264parser1.link(decoder1)\n",
+    "\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "sinkpad1 = streammux.get_request_pad(\"sink_0\")\n",
+    "if not sinkpad1:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad1 = decoder1.get_static_pad(\"src\")\n",
+    "if not srcpad1:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad1.link(sinkpad1)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "source2.link(h264parser2)\n",
+    "h264parser2.link(decoder2)\n",
+    "\n",
+    "sinkpad2 = streammux.get_request_pad(\"sink_1\")\n",
+    "if not sinkpad2:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad2 = decoder2.get_static_pad(\"src\")\n",
+    "if not srcpad2:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad2.link(sinkpad2)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "###### Create Sink pad and connect to decoder's source pad \n",
+    "source3.link(h264parser3)\n",
+    "h264parser3.link(decoder3)\n",
+    "\n",
+    "sinkpad3 = streammux.get_request_pad(\"sink_2\")\n",
+    "if not sinkpad2:\n",
+    "    sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n",
+    "    \n",
+    "srcpad3 = decoder3.get_static_pad(\"src\")\n",
+    "if not srcpad3:\n",
+    "    sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n",
+    "    \n",
+    "srcpad3.link(sinkpad3)\n",
+    "\n",
+    "######\n",
+    "\n",
+    "\n",
+    "streammux.link(pgie)\n",
+    "pgie.link(tracker)\n",
+    "tracker.link(sgie1)\n",
+    "sgie1.link(sgie2)\n",
+    "sgie2.link(sgie3)\n",
+    "sgie3.link(tiler)\n",
+    "tiler.link(nvvidconv)\n",
+    "nvvidconv.link(nvosd)\n",
+    "nvosd.link(nvvidconv2)\n",
+    "nvvidconv2.link(encoder)\n",
+    "encoder.link(codeparser)\n",
+    "codeparser.link(container)\n",
+    "container.link(sink)\n",
+    "\n",
+    "# create an event loop and feed gstreamer bus mesages to it\n",
+    "loop = GLib.MainLoop()\n",
+    "bus = pipeline.get_bus()\n",
+    "bus.add_signal_watch()\n",
+    "bus.connect (\"message\", bus_call, loop)\n",
+    "\n",
+    "print(\"Added and Linked elements to pipeline\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Our pipeline now carries the metadata forward, but we have not done anything with it until now. And as mentioned in the above pipeline diagram, we will create a callback function to write relevant data on the frame once called and create a sink pad in the nvosd element to call the function."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# tiler_sink_pad_buffer_probe  will extract metadata received on OSD sink pad\n",
+    "# and update params for drawing rectangle, object information etc.\n",
+    "def tiler_src_pad_buffer_probe(pad,info,u_data):\n",
+    "    #Intiallizing object counter with 0.\n",
+    "    obj_counter = {\n",
+    "        PGIE_CLASS_ID_VEHICLE:0,\n",
+    "        PGIE_CLASS_ID_PERSON:0,\n",
+    "        PGIE_CLASS_ID_BICYCLE:0,\n",
+    "        PGIE_CLASS_ID_ROADSIGN:0\n",
+    "    }\n",
+    "    # Set frame_number & rectangles to draw as 0 \n",
+    "    frame_number=0\n",
+    "    num_rects=0\n",
+    "    \n",
+    "    gst_buffer = info.get_buffer()\n",
+    "    if not gst_buffer:\n",
+    "        print(\"Unable to get GstBuffer \")\n",
+    "        return\n",
+    "\n",
+    "    # Retrieve batch metadata from the gst_buffer\n",
+    "    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the\n",
+    "    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)\n",
+    "    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))\n",
+    "    l_frame = batch_meta.frame_meta_list\n",
+    "    while l_frame is not None:\n",
+    "        try:\n",
+    "            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta\n",
+    "            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "        \n",
+    "        # Get frame number , number of rectables to draw and object metadata\n",
+    "        frame_number=frame_meta.frame_num\n",
+    "        num_rects = frame_meta.num_obj_meta\n",
+    "        l_obj=frame_meta.obj_meta_list\n",
+    "        \n",
+    "        while l_obj is not None:\n",
+    "            try:\n",
+    "                # Casting l_obj.data to pyds.NvDsObjectMeta\n",
+    "                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "            # Increment Object class by 1 and Set Box border to Red color     \n",
+    "            obj_counter[obj_meta.class_id] += 1\n",
+    "            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)\n",
+    "            try: \n",
+    "                l_obj=l_obj.next\n",
+    "            except StopIteration:\n",
+    "                break\n",
+    "        ################## Setting Metadata Display configruation ############### \n",
+    "        # Acquiring a display meta object.\n",
+    "        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n",
+    "        display_meta.num_labels = 1\n",
+    "        py_nvosd_text_params = display_meta.text_params[0]\n",
+    "        # Setting display text to be shown on screen\n",
+    "        py_nvosd_text_params.display_text = \"Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}\".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])\n",
+    "        # Now set the offsets where the string should appear\n",
+    "        py_nvosd_text_params.x_offset = 10\n",
+    "        py_nvosd_text_params.y_offset = 12\n",
+    "        # Font , font-color and font-size\n",
+    "        py_nvosd_text_params.font_params.font_name = \"Serif\"\n",
+    "        py_nvosd_text_params.font_params.font_size = 10\n",
+    "        # Set(red, green, blue, alpha); Set to White\n",
+    "        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)\n",
+    "        # Text background color\n",
+    "        py_nvosd_text_params.set_bg_clr = 1\n",
+    "        # Set(red, green, blue, alpha); set to Black\n",
+    "        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)\n",
+    "        # Using pyds.get_string() to get display_text as string to print in notebook\n",
+    "        print(pyds.get_string(py_nvosd_text_params.display_text))\n",
+    "        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n",
+    "        \n",
+    "        ############################################################################\n",
+    "         # FPS Probe      \n",
+    "        fps_streams[\"stream{0}\".format(frame_meta.pad_index)].get_fps()\n",
+    "        try:\n",
+    "            l_frame=l_frame.next\n",
+    "        except StopIteration:\n",
+    "            break\n",
+    "\n",
+    "    return Gst.PadProbeReturn.OK\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tiler_src_pad=sgie3.get_static_pad(\"src\")\n",
+    "if not tiler_src_pad:\n",
+    "    sys.stderr.write(\" Unable to get src pad \\n\")\n",
+    "else:\n",
+    "    tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now with everything defined , we can start the playback and listen to the events."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# List the sources\n",
+    "print(\"Now playing...\")\n",
+    "print(\"Starting pipeline \\n\")\n",
+    "# start play back and listed to events\t\t\n",
+    "pipeline.set_state(Gst.State.PLAYING)\n",
+    "start_time = time.time()\n",
+    "try:\n",
+    "    loop.run()\n",
+    "except:\n",
+    "    pass\n",
+    "# cleanup\n",
+    "print(\"Exiting app\\n\")\n",
+    "pipeline.set_state(Gst.State.NULL)\n",
+    "Gst.Object.unref(pipeline)\n",
+    "Gst.Object.unref(bus)\n",
+    "print(\"--- %s seconds ---\" % (time.time() - start_time))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Convert video profile to be compatible with Jupyter notebook\n",
+    "!ffmpeg -loglevel panic -y -an -i ../source_code/N1/ds_out.mp4 -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 ../source_code/N1/output.mp4"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Display the Output\n",
+    "from IPython.display import HTML\n",
+    "HTML(\"\"\"\n",
+    " <video width=\"960\" height=\"540\" controls>\n",
+    " <source src=\"../source_code/N1/output.mp4\"\n",
+    " </video>\n",
+    "\"\"\".format())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let us now see how buffering can help us make the FPS higher by attaching the src_pad to a queue.\n",
+    "\n",
+    "#### Queues \n",
+    "\n",
+    "The queue element adds a thread boundary to the pipeline and enables support for buffering. The input side will add buffers into a queue, which is then emptied on the output side from another thread via properties set on the queue element.\n",
+    "\n",
+    "Let us now implement them in our pipeline and attach our callback function to the queue.\n",
+    "\n",
+    "More details on the queues can be found from the GStreamer documentation [here](https://gstreamer.freedesktop.org/documentation/coreelements/queue.html)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!python3 ../source_code/utils/deepstream-osd-queue.py"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Effects on OSD, Tiler, and Queues\n",
+    "\n",
+    "In the above case, OSD ( On-screen display and Tiling ) can slow down the pipeline. We can design our pipeline such that we get the Inference metadata without the need for visual outputs. This is particularly useful when using Edge devices that only need to send real-time inference metadata to the cloud server for further processing.\n",
+    "\n",
+    "#### Disabling OSD & Tiler\n",
+    "\n",
+    "We will now design a pipeline that doesn't include the on-screen display element and tiler element. For simplicity we have bundled all the code in one python file. Open the file [here](../source_code/utils/deepstream-no-osd.py)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!python3 ../source_code/utils/deepstream-no-osd.py"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "As you could observe in the total time printed we have improved the throughput of the pipeline using both Queues and removing OSD. Let us combine both methods and see if we can acheive any more performance gain. For simplicity we have bundled all the code in one python file. Open the file [here](.../source_code/utils/deepstream-no-osd-queue.py)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!python3 ../source_code/utils/deepstream-no-osd-queue.py --num-sources 3"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary \n",
+    "\n",
+    "Let us summarise our above benchmarks using a table.\n",
+    "\n",
+    "|Pipeline|Relative Time|\n",
+    "|---|----|\n",
+    "|Default Pipeline|baseline|\n",
+    "|With Queues|~3x|\n",
+    "|Without OSD |~3.1x|\n",
+    "|With Queues and without OSD|~3.15x|\n",
+    "\n",
+    "\n",
+    "We can now move on to benchmark our code further using NSight systems in the upcoming notebook."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Licensing\n",
+    "  \n",
+    "This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).\n",
+    "\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&ensp;\n",
+    "[1]\n",
+    "[2](Performance_Analysis_using_NSight_systems.ipynb)\n",
+    "[3](Performance_Analysis_using_NSight_systems_Continued.ipynb)\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "[Next Notebook](Performance_Analysis_using_NSight_systems.ipynb)\n",
+    "\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&emsp;&emsp;&emsp;&emsp;\n",
+    "&emsp;&ensp;\n",
+    "[Home Page](Start_Here.ipynb)\n",
+    "    "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

File diff suppressed because it is too large
+ 512 - 0
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/Performance_Analysis_using_NSight_systems.ipynb


File diff suppressed because it is too large
+ 776 - 0
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/Performance_Analysis_using_NSight_systems_Continued.ipynb


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/Nsight Diagram.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/Optimization_Cycle.jpg


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/app-2.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/batch_size.PNG


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/batch_size_nvinfer.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/covid.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/cpu.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/cuda.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/inference.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/laplas3.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nsight_open.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nvstreamux-control.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nvtracker.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/nvtx_domain.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/pinning_row.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/report4.PNG


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/report5.PNG


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/test1.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/test2.png


BIN
ai/DeepStream_Perf_Lab/English/python/jupyter_notebook/images/thread.png


+ 81 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest1_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+# int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=0
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 81 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+# int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=0
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 87 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_sgie1_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#s
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/labels.txt
+# int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+# 0=FP32 and 1=INT8 mode
+network-mode=0
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=2
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_sgie2_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/labels.txt
+# int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+# 0=FP32 and 1=INT8 mode
+network-mode=0
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=3
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_sgie3_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/labels.txt
+# int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+# 0=FP32 and 1=INT8 mode
+network-mode=0
+input-object-min-width=64
+input-object-min-height=64
+model-color-format=1
+process-mode=2
+gpu-id=0
+gie-unique-id=4
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 37 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N1/dstest4_tracker_config.txt

@@ -0,0 +1,37 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Mandatory properties for the tracker:
+#   tracker-width
+#   tracker-height: needs to be multiple of 6 for NvDCF
+#   gpu-id
+#   ll-lib-file: path to low-level tracker lib
+#   ll-config-file: required for NvDCF, optional for KLT and IOU
+#
+[tracker]
+tracker-width=640
+tracker-height=384
+gpu-id=0
+ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so
+#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so
+#ll-config-file=tracker_config.yml
+enable-batch-process=1

+ 81 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=1
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=1
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 87 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_sgie1_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#s
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarColor/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=2
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_sgie2_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_CarMake/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+process-mode=2
+model-color-format=1
+gpu-id=0
+gie-unique-id=3
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 87 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_sgie3_config.txt

@@ -0,0 +1,87 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=1
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/resnet18.caffemodel_b16_gpu0_fp32.engine
+mean-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/mean.ppm
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/labels.txt
+int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Secondary_VehicleTypes/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+# 0=FP32 and 1=INT8 mode
+network-mode=1
+input-object-min-width=64
+input-object-min-height=64
+model-color-format=1
+process-mode=2
+gpu-id=0
+gie-unique-id=4
+operate-on-gie-id=1
+operate-on-class-ids=0
+is-classifier=1
+output-blob-names=predictions/Softmax
+classifier-async-mode=1
+classifier-threshold=0.51
+process-mode=2
+#scaling-filter=0
+#scaling-compute-hw=0

+ 37 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N2/dstest4_tracker_config.txt

@@ -0,0 +1,37 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Mandatory properties for the tracker:
+#   tracker-width
+#   tracker-height: needs to be multiple of 6 for NvDCF
+#   gpu-id
+#   ll-lib-file: path to low-level tracker lib
+#   ll-config-file: required for NvDCF, optional for KLT and IOU
+#
+[tracker]
+tracker-width=640
+tracker-height=384
+gpu-id=0
+ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_iou.so
+#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so
+#ll-config-file=tracker_config.yml
+enable-batch-process=1

+ 81 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/N3/dstest1_pgie_config.txt

@@ -0,0 +1,81 @@
+################################################################################
+# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+# Following properties are mandatory when engine files are not specified:
+#   int8-calib-file(Only in INT8)
+#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
+#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
+#   ONNX: onnx-file
+#
+# Mandatory properties for detectors:
+#   num-detected-classes
+#
+# Optional properties for detectors:
+#   cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
+#   custom-lib-path,
+#   parse-bbox-func-name
+#
+# Mandatory properties for classifiers:
+#   classifier-threshold, is-classifier
+#
+# Optional properties for classifiers:
+#   classifier-async-mode(Secondary mode only, Default=false)
+#
+# Optional properties in secondary mode:
+#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
+#   input-object-min-width, input-object-min-height, input-object-max-width,
+#   input-object-max-height
+#
+# Following properties are always recommended:
+#   batch-size(Default=1)
+#
+# Other optional properties:
+#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
+#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
+#   mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
+#   custom-lib-path, network-mode(Default=0 i.e FP32)
+#
+# The values in the config file are overridden by values set through GObject
+# properties.
+
+[property]
+gpu-id=0
+net-scale-factor=0.0039215697906911373
+model-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel
+proto-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.prototxt
+#model-engine-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp32.engine
+labelfile-path=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/labels.txt
+# int8-calib-file=/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/cal_trt.bin
+force-implicit-batch-dim=1
+batch-size=1
+network-mode=0
+process-mode=1
+model-color-format=0
+num-detected-classes=4
+interval=0
+gie-unique-id=1
+output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
+
+[class-attrs-all]
+pre-cluster-threshold=0.2
+eps=0.2
+group-threshold=1

+ 49 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/common/FPS.py

@@ -0,0 +1,49 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import time
+start_time=time.time()
+frame_count=0
+
+class GETFPS:
+    def __init__(self,stream_id):
+        global start_time
+        self.start_time=start_time
+        self.is_first=True
+        global frame_count
+        self.frame_count=frame_count
+        self.stream_id=stream_id
+    def get_fps(self):
+        end_time=time.time()
+        if(self.is_first):
+            self.start_time=end_time
+            self.is_first=False
+        if(end_time-self.start_time>1):
+            print("**********************FPS*****************************************")
+            print("Fps of stream",self.stream_id,"is ", float(self.frame_count))
+            self.frame_count=0
+            self.start_time=end_time
+        else:
+            self.frame_count=self.frame_count+1
+    def print_data(self):
+        print('frame_count=',self.frame_count)
+        print('start_time=',self.start_time)

+ 0 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/common/__init__.py


BIN
ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/FPS.cpython-36.pyc


BIN
ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/__init__.cpython-36.pyc


BIN
ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/bus_call.cpython-36.pyc


BIN
ai/DeepStream_Perf_Lab/English/python/source_code/common/__pycache__/is_aarch_64.cpython-36.pyc


+ 39 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/common/bus_call.py

@@ -0,0 +1,39 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import gi
+import sys
+gi.require_version('Gst', '1.0')
+from gi.repository import GObject, Gst
+def bus_call(bus, message, loop):
+    t = message.type
+    if t == Gst.MessageType.EOS:
+        sys.stdout.write("End-of-stream\n")
+        loop.quit()
+    elif t==Gst.MessageType.WARNING:
+        err, debug = message.parse_warning()
+        sys.stderr.write("Warning: %s: %s\n" % (err, debug))
+    elif t == Gst.MessageType.ERROR:
+        err, debug = message.parse_error()
+        sys.stderr.write("Error: %s: %s\n" % (err, debug))
+        loop.quit()
+    return True

+ 30 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/common/is_aarch_64.py

@@ -0,0 +1,30 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import platform
+import sys
+
+
+def is_aarch64():
+    return platform.uname()[4] == 'aarch64'
+
+sys.path.append('/opt/nvidia/deepstream/deepstream/lib')

+ 29 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/common/utils.py

@@ -0,0 +1,29 @@
+################################################################################
+# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+################################################################################
+
+import ctypes
+import sys
+sys.path.append('/opt/nvidia/deepstream/deepstream/lib')
+
+def long_to_int(l):
+    value = ctypes.c_int(l & 0xffffffff).value
+    return value

+ 0 - 0
ai/DeepStream_Perf_Lab/English/python/source_code/dataset/download_dataset.py


Some files were not shown because too many files changed in this diff