Browse Source

adding TAO to experimentals directory

Tosin Akinwale Adesuyi 2 years ago
parent
commit
5e3ea146d9

+ 1 - 0
experimental/README.md

@@ -2,6 +2,7 @@
 - TAO toolkit Architecture
 - NGC API KEY registration
 - Image classification Pretrained Models
+- Yolov4 object detection 
 - Training, Evaluation, Pruning, Retraining, Inferencing, and INT8 Optimization using resnet18 pretrained model
 
 The lab contains docker and singularity definition files. Follow the readme files inside each on how to build the container and run the labs inside it. 

+ 4 - 1
experimental/TAO_Transfer_Learning/English/data/README.txt

@@ -2,4 +2,7 @@ The Pascal VOC challenge is a very popular dataset however, the website frequent
 In case you are unable to download the VOCtrainval_11-May-2012.tar within the Dockerfile or Singularity file,
 please uncomment:
 `#url = 'https://drive.google.com/file/d/1YkakD_qJuKmwAZF_HwLGLNb8fcqZbtx7/view?usp=sharing'`  
-in /source_code/dataset.py code.
+in /source_code/dataset.py code.
+
+Yolov4:
+Download KITTI detection dataset

experimental/TAO_Transfer_Learning/English/image/ngc_key.PNG → experimental/TAO_Transfer_Learning/English/images/ngc_key.PNG


experimental/TAO_Transfer_Learning/English/image/ngc_setup_key.PNG → experimental/TAO_Transfer_Learning/English/images/ngc_setup_key.PNG


+ 7 - 0
experimental/TAO_Transfer_Learning/English/source_code/__init__.py

@@ -0,0 +1,7 @@
+# Copyright (c) 2017-2020, NVIDIA CORPORATION.  All rights reserved.
+
+"""TLT SSD example."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function

+ 17 - 6
experimental/TAO_Transfer_Learning/English/source_code/dataset.py

@@ -4,10 +4,21 @@ import gdown
 import os
 
 ## VOC dataset
-# uncomment the file
-url = 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar'
 
-# if the url above fails, uncomment the url below and rebiuld your container
-#url = 'https://drive.google.com/file/d/1YkakD_qJuKmwAZF_HwLGLNb8fcqZbtx7/view?usp=sharing'
-output_python = '/workspace/tlt-experiments/data/VOCtrainval_11-May-2012.tar'
-gdown.download(url, output_python, quiet=False, proxy=None)
+url0 = 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar'
+
+# if the url0 above fails, uncomment the url0 below and rebiuld your container
+#url0 = 'https://drive.google.com/file/d/1YkakD_qJuKmwAZF_HwLGLNb8fcqZbtx7/view?usp=sharing'
+
+output_python0 = '/workspace/tlt-experiments/data/VOCtrainval_11-May-2012.tar'
+
+## Kitti detection dataset for yolov4
+url1 = 'http://www.cvlibs.net/download.php?file=data_object_label_2.zip'
+url2 = 'http://www.cvlibs.net/download.php?file=data_object_image_2.zip'
+
+output_python1 = '/workspace/tlt-experiments/data/data_object_label_2.zip'
+output_python2 = '/workspace/tlt-experiments/data/data_object_image_2.zip'
+
+gdown.download(url0, output_python0, quiet=False, proxy=None)
+gdown.download(url1, output_python1, quiet=False, proxy=None)
+gdown.download(url2, output_python2, quiet=False, proxy=None)

+ 92 - 0
experimental/TAO_Transfer_Learning/English/source_code/generate_val_dataset.py

@@ -0,0 +1,92 @@
+# Copyright (c) 2017-2020, NVIDIA CORPORATION.  All rights reserved.
+
+"""Script to generate val dataset for SSD/DSSD tutorial."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import os
+
+
+def parse_args(args=None):
+    """parse the arguments."""
+    parser = argparse.ArgumentParser(description='Generate val dataset for SSD/DSSD tutorial')
+
+    parser.add_argument(
+        "--input_image_dir",
+        type=str,
+        required=True,
+        help="Input directory to KITTI training dataset images."
+    )
+
+    parser.add_argument(
+        "--input_label_dir",
+        type=str,
+        required=True,
+        help="Input directory to KITTI training dataset labels."
+    )
+
+    parser.add_argument(
+        "--output_dir",
+        type=str,
+        required=True,
+        help="Ouput directory to TLT val dataset."
+    )
+
+    parser.add_argument(
+        "--val_split",
+        type=int,
+        required=False,
+        default=10,
+        help="Percentage of training dataset for generating val dataset"
+    )
+
+    return parser.parse_args(args)
+
+
+def main(args=None):
+    """Main function for data preparation."""
+
+    args = parse_args(args)
+
+    img_files = []
+    for file_name in os.listdir(args.input_image_dir):
+        if file_name.split(".")[-1] == "png":
+            img_files.append(file_name)
+
+    total_cnt = len(img_files)
+    val_ratio = float(args.val_split) / 100.0
+    val_cnt = int(total_cnt * val_ratio)
+    train_cnt = total_cnt - val_cnt
+    val_img_list = img_files[0:val_cnt]
+
+    target_img_path = os.path.join(args.output_dir, "image")
+    target_label_path = os.path.join(args.output_dir, "label")
+
+    if not os.path.exists(target_img_path):
+        os.makedirs(target_img_path)
+    else:
+        print("This script will not run as output image path already exists.")
+        return
+
+    if not os.path.exists(target_label_path):
+        os.makedirs(target_label_path)
+    else:
+        print("This script will not run as output label path already exists.")
+        return
+
+    print("Total {} samples in KITTI training dataset".format(total_cnt))
+    print("{} for train and {} for val".format(train_cnt, val_cnt))
+
+    for img_name in val_img_list:
+        label_name = img_name.split(".")[0] + ".txt"
+        os.rename(os.path.join(args.input_image_dir, img_name),
+                  os.path.join(target_img_path, img_name))
+        os.rename(os.path.join(args.input_label_dir, label_name),
+                  os.path.join(target_label_path, label_name))
+
+
+if __name__ == "__main__":
+    main()

+ 100 - 0
experimental/TAO_Transfer_Learning/English/specs/yolo_v4_retrain_resnet18_kitti.txt

@@ -0,0 +1,100 @@
+random_seed: 42
+yolov4_config {
+  big_anchor_shape: "[(114.94, 60.67), (159.06, 114.59), (297.59, 176.38)]"
+  mid_anchor_shape: "[(42.99, 31.91), (79.57, 31.75), (56.80, 56.93)]"
+  small_anchor_shape: "[(15.60, 13.88), (30.25, 20.25), (20.67, 49.63)]"
+  box_matching_iou: 0.25
+  arch: "resnet"
+  nlayers: 18
+  arch_conv_blocks: 2
+  loss_loc_weight: 0.8
+  loss_neg_obj_weights: 100.0
+  loss_class_weights: 0.5
+  label_smoothing: 0.0
+  big_grid_xy_extend: 0.05
+  mid_grid_xy_extend: 0.1
+  small_grid_xy_extend: 0.2
+  freeze_bn: false
+  #freeze_blocks: 0
+  force_relu: false
+}
+training_config {
+  batch_size_per_gpu: 8
+  num_epochs: 80
+  enable_qat: false
+  checkpoint_interval: 10
+  learning_rate {
+    soft_start_cosine_annealing_schedule {
+      min_learning_rate: 1e-7
+      max_learning_rate: 1e-4
+      soft_start: 0.3
+    }
+  }
+  regularizer {
+    type: NO_REG
+    weight: 3e-9
+  }
+  optimizer {
+    adam {
+      epsilon: 1e-7
+      beta1: 0.9
+      beta2: 0.999
+      amsgrad: false
+    }
+  }
+  pruned_model_path: "EXPERIMENT_DIR/experiment_dir_pruned/yolov4_resnet18_pruned.tlt"
+}
+eval_config {
+  average_precision_mode: SAMPLE
+  batch_size: 8
+  matching_iou_threshold: 0.5
+}
+nms_config {
+  confidence_threshold: 0.001
+  clustering_iou_threshold: 0.5
+  top_k: 200
+}
+augmentation_config {
+  hue: 0.1
+  saturation: 1.5
+  exposure:1.5
+  vertical_flip:0
+  horizontal_flip: 0.5
+  jitter: 0.3
+  output_width: 1248
+  output_height: 384
+  randomize_input_shape_period: 0
+  mosaic_prob: 0.5
+  mosaic_min_ratio:0.2
+}
+dataset_config {
+  data_sources: {
+      label_directory_path: "/workspace/tlt-experiments/data/training/label_2"
+      image_directory_path: "/workspace/tlt-experiments/data/training/image_2"
+  }
+  include_difficult_in_training: true
+  target_class_mapping {
+      key: "car"
+      value: "car"
+  }
+  target_class_mapping {
+      key: "pedestrian"
+      value: "pedestrian"
+  }
+  target_class_mapping {
+      key: "cyclist"
+      value: "cyclist"
+  }
+  target_class_mapping {
+      key: "van"
+      value: "car"
+  }
+  target_class_mapping {
+      key: "person_sitting"
+      value: "pedestrian"
+  }
+  validation_data_sources: {
+      label_directory_path: "/workspace/tlt-experiments/data/val/label"
+      image_directory_path: "/workspace/tlt-experiments/data/val/image"
+  }
+}

+ 100 - 0
experimental/TAO_Transfer_Learning/English/specs/yolo_v4_train_resnet18_kitti.txt

@@ -0,0 +1,100 @@
+random_seed: 42
+yolov4_config {
+  big_anchor_shape: "[(114.94, 60.67), (159.06, 114.59), (297.59, 176.38)]"
+  mid_anchor_shape: "[(42.99, 31.91), (79.57, 31.75), (56.80, 56.93)]"
+  small_anchor_shape: "[(15.60, 13.88), (30.25, 20.25), (20.67, 49.63)]"
+  box_matching_iou: 0.25
+  arch: "resnet"
+  nlayers: 18
+  arch_conv_blocks: 2
+  loss_loc_weight: 0.8
+  loss_neg_obj_weights: 100.0
+  loss_class_weights: 0.5
+  label_smoothing: 0.0
+  big_grid_xy_extend: 0.05
+  mid_grid_xy_extend: 0.1
+  small_grid_xy_extend: 0.2
+  freeze_bn: false
+  #freeze_blocks: 0
+  force_relu: false
+}
+training_config {
+  batch_size_per_gpu: 8
+  num_epochs: 80
+  enable_qat: false
+  checkpoint_interval: 10
+  learning_rate {
+    soft_start_cosine_annealing_schedule {
+      min_learning_rate: 1e-7
+      max_learning_rate: 1e-4
+      soft_start: 0.3
+    }
+  }
+  regularizer {
+    type: L1
+    weight: 3e-5
+  }
+  optimizer {
+    adam {
+      epsilon: 1e-7
+      beta1: 0.9
+      beta2: 0.999
+      amsgrad: false
+    }
+  }
+  pretrain_model_path: "EXPERIMENT_DIR/pretrained_resnet18/tlt_pretrained_object_detection_vresnet18/resnet_18.hdf5"
+}
+eval_config {
+  average_precision_mode: SAMPLE
+  batch_size: 8
+  matching_iou_threshold: 0.5
+}
+nms_config {
+  confidence_threshold: 0.001
+  clustering_iou_threshold: 0.5
+  top_k: 200
+}
+augmentation_config {
+  hue: 0.1
+  saturation: 1.5
+  exposure:1.5
+  vertical_flip:0
+  horizontal_flip: 0.5
+  jitter: 0.3
+  output_width: 1248
+  output_height: 384
+  randomize_input_shape_period: 0
+  mosaic_prob: 0.5
+  mosaic_min_ratio:0.2
+}
+dataset_config {
+  data_sources: {
+      label_directory_path: "/workspace/tlt-experiments/data/training/label_2"
+      image_directory_path: "/workspace/tlt-experiments/data/training/image_2"
+  }
+  include_difficult_in_training: true
+  target_class_mapping {
+      key: "car"
+      value: "car"
+  }
+  target_class_mapping {
+      key: "pedestrian"
+      value: "pedestrian"
+  }
+  target_class_mapping {
+      key: "cyclist"
+      value: "cyclist"
+  }
+  target_class_mapping {
+      key: "van"
+      value: "car"
+  }
+  target_class_mapping {
+      key: "person_sitting"
+      value: "pedestrian"
+  }
+  validation_data_sources: {
+      label_directory_path: "/workspace/tlt-experiments/data/val/label"
+      image_directory_path: "/workspace/tlt-experiments/data/val/image"
+  }
+}

+ 4 - 4
experimental/TAO_Transfer_Learning/English/start_here.ipynb

@@ -62,8 +62,8 @@
     "- Next, you would find at the top right corner a `Generate API Key` button, click on this button. A dialog box would appear after the click, you must click on the `confirm` button on it.\n",
     "- Finally, copy your generated API Key and Username, and save it somewhere on your local system.\n",
     "\n",
-    "<img align=\"center\" src=\" image/ngc_setup_key.PNG\" width=\"600\"> \n",
-    "<img align=\"center\" src=\" image/ngc_key.PNG\" width=\"700\">\n",
+    "<img align=\"center\" src=\"images/ngc_setup_key.PNG\" width=\"600\"> \n",
+    "<img align=\"center\" src=\"images/ngc_key.PNG\" width=\"700\">\n",
     "\n",
     "## API Key\n",
     "\n",
@@ -489,7 +489,7 @@
    "source": [
     "## 6. Retrain pruned models <a class=\"anchor\" id=\"head-6\"></a>\n",
     "* Model needs to be re-trained to bring back accuracy after pruning\n",
-    "- Run the cell below to view the retrain spec configuration file. Your task would be to modify the hyper-parameters to achieve desirable accuracy result. You can access the `classification_retrain_spec.cfg` file in the `spec folder` seen at the top left-side of the jupyter lab. Please, remember to save the file with `ctl s` after modification and then rerun the cell below to see if your changes have reflected."
+    "- Run the cell below to view the retrain spec configuration file. Your task would be to modify the hyper-parameters to achieve desirable accuracy result. You can access the `classification_retrain_spec.cfg` file in the `specs folder` seen at the top left-side of the jupyter lab. Please, remember to save the file with `ctl s` after modification and then rerun the cell below to see if your changes have reflected."
    ]
   },
   {
@@ -735,7 +735,7 @@
     "---\n",
     "### Source\n",
     "\n",
-    "This Notebook was adapted from examples within NVIDIA TLT container.\n",
+    "This Notebook was adapted from examples within NVIDIA TLT/TAO Docker container pulled from ngc.nvidia.com\n",
     "\n",
     "### Licensing \n",
     "\n",

+ 800 - 0
experimental/TAO_Transfer_Learning/English/yolo_v4.ipynb

@@ -0,0 +1,800 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# TLT YOLO v4  Object Detection\n",
+    "---\n",
+    "## Learning Objectives\n",
+    "This notebook shows an example usecase of YOLO v4 object detection using Transfer Learning Toolkit. You will learn how to leverage the simplicity and convenience of TLT to:\n",
+    "\n",
+    "* Train model with resen using KITTI detection dataset\n",
+    "* Prune the finetuned model\n",
+    "* Retrain the pruned model to recover lost accuracy\n",
+    "* Export the pruned model\n",
+    "* Run Inference on the trained model\n",
+    "* Export the pruned and retrained model to a .etlt file for deployment to DeepStream\n",
+    "\n",
+    "### Table of Contents\n",
+    "\n",
+    "0. [Set up env variables](#head-0)\n",
+    "1. [Prepare dataset and pre-trained model](#head-1) <br>\n",
+    "    1.1 [Download pre-trained model](#head-1-1) <br>\n",
+    "2. [Provide training specification](#head-2)\n",
+    "3. [Run TLT training](#head-3)\n",
+    "4. [Evaluate trained models](#head-4)\n",
+    "5. [Prune trained models](#head-5)\n",
+    "6. [Retrain pruned models](#head-6)\n",
+    "7. [Evaluate retrained model](#head-7)\n",
+    "8. [Visualize inferences](#head-8)\n",
+    "9. [Deploy](#head-9)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Transfer Learning with TLT\n",
+    "​\n",
+    "Transfer learning is the process of transferring learned features from one application to another. It is a commonly used training technique where you use a model trained on one task and re-train to use it on a different task. \n",
+    "​\n",
+    "Transfer Learning Toolkit (TLT) is a simple and easy-to-use Python based AI toolkit for taking purpose-built AI models and customizing them with users' own data.\n",
+    "​\n",
+    "<img align=\"center\" src=\"https://developer.nvidia.com/sites/default/files/akamai/embedded-transfer-learning-toolkit-software-stack-1200x670px.png\" width=\"720\"> "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Before TLT can be use, you need to register at ngc.nvidia.com and proceed to generate an API Key. A step-by-step process to achieving this is given below:\n",
+    "- From your browser visit `ngc.nvidia.com`\n",
+    "- Click on `Welcome Guest` and you would see a dropdown menu and then click on `Sign In/Sign Up`.  \n",
+    "- Click on `continue` button where `NVIDIA Account (use existing or create a new NVIDIA ac-)` is written.\n",
+    "- Click on `Create account` and get registered. Thereafter you may proceed to login with your new account credentials.\n",
+    "- At the top right corner, click on your `username`, you would see a dropdown menu, then click on `Setup`.\n",
+    "- proceed and click on `Get API Key` button.\n",
+    "- Next, you would find at the top right corner a `Generate API Key` button, click on this button. A dialog box would appear after the click, you must click on the `confirm` button on it.\n",
+    "- Finally, copy your generated API Key and Username, and save it somewhere on your local system.\n",
+    "\n",
+    "<img align=\"center\" src=\"images/ngc_setup_key.PNG\" width=\"600\"> \n",
+    "<img align=\"center\" src=\"images/ngc_key.PNG\" width=\"700\">\n",
+    "\n",
+    "## API Key\n",
+    "\n",
+    "- Your API key represents your credentials\n",
+    "  - Used for programmatic interaction (e.g., docker, REST API, etc.)\n",
+    "  - Uniquely identifies you (think “Username & Password”)\n",
+    "  - There can be only one (regenerating your API key invalidates the old one)\n",
+    "- Programmatic interface at `nvcr.io`: Use API Key"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 0. Set up env variables <a class=\"anchor\" id=\"head-0\"></a>\n",
+    "\n",
+    "Please copy your API Key from where you saved it and paste it within the empty single quote in front of `%env KEY=''`.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%set_env KEY='place your ngc api key here'\n",
+    "%set_env GPU_INDEX=0\n",
+    "%set_env USER_EXPERIMENT_DIR=/workspace/tlt-experiments/yolo_v4\n",
+    "%set_env DATA_DOWNLOAD_DIR=/workspace/tlt-experiments/data\n",
+    "%set_env SPECS_DIR=/workspace/examples/yolo_v4/specs\n",
+    "!mkdir -p $DATA_DOWNLOAD_DIR"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 1. Prepare dataset and pre-trained model <a class=\"anchor\" id=\"head-1\"></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " We will be using the KITTI detection dataset for the tutorial. To find more details please visit\n",
+    " http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=2d. If you intend to run this notebook on your local workstation without using a container, please download the KITTI detection images (http://www.cvlibs.net/download.php?file=data_object_image_2.zip) and labels (http://www.cvlibs.net/download.php?file=data_object_label_2.zip) to $DATA_DOWNLOAD_DIR or `workspace/tlt-experiments/data`.\n",
+    "\n",
+    "-  Check the dataset is present"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!mkdir -p $DATA_DOWNLOAD_DIR\n",
+    "!if [ ! -f $DATA_DOWNLOAD_DIR/data_object_image_2.zip ]; then echo 'Image zip file not found, please download.'; else echo 'Found Image zip file.';fi\n",
+    "!if [ ! -f $DATA_DOWNLOAD_DIR/data_object_label_2.zip ]; then echo 'Label zip file not found, please download.'; else echo 'Found Labels zip file.';fi"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Unpack the zip files and Verify"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!unzip -u $DATA_DOWNLOAD_DIR/data_object_image_2.zip -d $DATA_DOWNLOAD_DIR\n",
+    "!unzip -u $DATA_DOWNLOAD_DIR/data_object_label_2.zip -d $DATA_DOWNLOAD_DIR"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ls -l $DATA_DOWNLOAD_DIR/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Generate validation set out of training dataset"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!python /workspace/tlt-experiments/source_code/generate_val_dataset.py --input_image_dir=$DATA_DOWNLOAD_DIR/training/image_2 \\\n",
+    "                                       --input_label_dir=$DATA_DOWNLOAD_DIR/training/label_2 \\\n",
+    "                                       --output_dir=$DATA_DOWNLOAD_DIR/val"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Additionally, if you have your own dataset already in a volume (or folder), you can mount the volume on `DATA_DOWNLOAD_DIR` (or create a soft link). Below shows an example:\n",
+    "\n",
+    "```bash\n",
+    "# if your dataset is in /dev/sdc1\n",
+    "mount /dev/sdc1 $DATA_DOWNLOAD_DIR\n",
+    "\n",
+    "# if your dataset is in folder /var/dataset\n",
+    "ln -sf /var/dataset $DATA_DOWNLOAD_DIR\n",
+    "```\n",
+    "\n",
+    "- You will also need to run the cell below to generate the best anchor shape\n",
+    "- The anchor shape generated by this script is sorted. `Write the first 3 into small_anchor_shape in the config file`, `write middle 3 into mid_anchor_shape`, and `write last 3 into big_anchor_shape`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# !yolo_v4 kmeans -l $DATA_DOWNLOAD_DIR/training/label_2 \\\n",
+    "#                 -i $DATA_DOWNLOAD_DIR/training/image_2 \\\n",
+    "#                 -n 9 \\\n",
+    "#                 -x 1248 \\\n",
+    "#                 -y 384\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 1.1 Download pre-trained model <a class=\"anchor\" id=\"head-1-1\"></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We will use NGC CLI to get the pre-trained models. For more details, go to [ngc.nvidia.com](ngc.nvidia.com) and click the SETUP on the navigation bar."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ngc registry model list nvidia/tlt_pretrained_object_detection:*"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!mkdir -p $USER_EXPERIMENT_DIR/pretrained_resnet18/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Pull pretrained model from NGC"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ngc registry model download-version nvidia/tlt_pretrained_object_detection:resnet18 --dest $USER_EXPERIMENT_DIR/pretrained_resnet18"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Check that model is downloaded into directory"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ls -l $USER_EXPERIMENT_DIR/pretrained_resnet18/tlt_pretrained_object_detection_vresnet18"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 2. Provide training specification <a class=\"anchor\" id=\"head-2\"></a>\n",
+    "* Augmentation parameters for on-the-fly:\n",
+    "    * training (hyper-)parameters such as batch size, number of epochs, learning rate etc.\n",
+    "* Training dataset\n",
+    "* Validation dataset\n",
+    "* Pre-trained models"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Provide pretrained model path on-the-fly by runing the cell below"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!sed -i 's,EXPERIMENT_DIR,'\"$USER_EXPERIMENT_DIR\"',' $SPECS_DIR/yolo_v4_train_resnet18_kitti.txt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- To enable QAT training on sample spec file, uncomment following lines in the cell below"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# !sed -i \"s/enable_qat: false/enable_qat: true/g\" $SPECS_DIR/yolo_v4_train_resnet18_kitti.txt\n",
+    "# !sed -i \"s/enable_qat: false/enable_qat: true/g\" $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- By default, the sample spec file `(yolo_v4_train_resnet18_kitti.txt)` disables QAT training. You can force non-QAT training by uncomment and run the cell below"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# !sed -i \"s/enable_qat: true/enable_qat: false/g\" $SPECS_DIR/yolo_v4_train_resnet18_kitti.txt\n",
+    "# !sed -i \"s/enable_qat: true/enable_qat: false/g\" $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Run the cell below to view the model spec configuration file. **Your task would be to modify the hyper-parameters to achieve desirable accuracy result**. You can access the `yolo_v4_train_resnet18_kitti.txt` file in the `spec folder` seen at the top left-side of the jupyter lab. Please, remember to save the file with `ctl s` after modification and then rerun the cell below to see if your changes have reflected.\n",
+    "- Note that in the spec file `arch` is set to `resnet` as the backbone for feature extraction. Others include `\"vgg\", \"darknet\", \"googlenet\", \"mobilenet_V1\", \"mobilenet_V2\", \"cspdarknet\", and \"squeeznet\"`.  "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!cat $SPECS_DIR/yolo_v4_train_resnet18_kitti.txt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 3. Run TLT training <a class=\"anchor\" id=\"head-3\"></a>\n",
+    "* Provide the sample spec file and the output directory location for models\n",
+    "* WARNING: training will take several hours or one day to complete  \n",
+    "- Please note some parameter definition: \n",
+    "     - -e: `spec file`; -k: `API key encoding`;  -r: `result directory`; --gpus: `number of GPU`\n",
+    "- To run with multigpu, please change `--gpus` based on the number of available GPUs in your machine"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!mkdir -p $USER_EXPERIMENT_DIR/experiment_dir_unpruned"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "!yolo_v4 train -e $SPECS_DIR/yolo_v4_train_resnet18_kitti.txt \\\n",
+    "               -r $USER_EXPERIMENT_DIR/experiment_dir_unpruned \\\n",
+    "               -k $KEY \\\n",
+    "               --gpus 2"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- To resume from checkpoint, please change pretrain_model_path to resume_model_path in config file"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print('Model for each epoch:')\n",
+    "print('---------------------')\n",
+    "!ls -ltrh $USER_EXPERIMENT_DIR/experiment_dir_unpruned/weights"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Now check the evaluation stats in the `csv file` and pick the model with highest eval `accuracy`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!cat $USER_EXPERIMENT_DIR/experiment_dir_unpruned/yolov4_training_log_resnet18.csv\n",
+    "%set_env EPOCH=080"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 4. Evaluate trained models <a class=\"anchor\" id=\"head-4\"></a>"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "!yolo_v4 evaluate -e $SPECS_DIR/yolo_v4_train_resnet18_kitti.txt \\\n",
+    "                  -m $USER_EXPERIMENT_DIR/experiment_dir_unpruned/weights/yolov4_resnet18_epoch_$EPOCH.tlt \\\n",
+    "                  -k $KEY"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 5. Prune trained models <a class=\"anchor\" id=\"head-5\"></a>\n",
+    "* Specify pre-trained model\n",
+    "* Equalization criterion (`Only for resnets as they have element wise operations or MobileNets.`)\n",
+    "* Threshold for pruning.\n",
+    "* A key to save and load the model\n",
+    "* Output directory to store the model\n",
+    "\n",
+    "Usually, you just need to adjust `-pth` (threshold) for accuracy and model size trade off. Higher `pth` gives you smaller model (and thus higher inference speed) but worse accuracy. The threshold value depends on the dataset and the model. `0.5` in the block below is just a start point. If the retrain accuracy is good, you can increase this value to get smaller models. Otherwise, lower this value to get better accuracy."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!mkdir -p $USER_EXPERIMENT_DIR/experiment_dir_pruned"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "!yolo_v4 prune -m $USER_EXPERIMENT_DIR/experiment_dir_unpruned/weights/yolov4_resnet18_epoch_$EPOCH.tlt \\\n",
+    "               -e $SPECS_DIR/yolo_v4_train_resnet18_kitti.txt \\\n",
+    "               -o $USER_EXPERIMENT_DIR/experiment_dir_pruned/yolov4_resnet18_pruned.tlt \\\n",
+    "               -eq intersection \\\n",
+    "               -pth 0.1 \\\n",
+    "               -k $KEY"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ls -rlt $USER_EXPERIMENT_DIR/experiment_dir_pruned/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 6. Retrain pruned models <a class=\"anchor\" id=\"head-6\"></a>\n",
+    "* Model needs to be re-trained to bring back accuracy after pruning.\n",
+    "- Run the cell below to view the retrain specification configuration file. You may need to modify the hyper-parameters to achieve desirable accuracy result. You can access the `yolo_v4_retrain_resnet18_kitti.txt` file in the `specs folder` seen at the top left-side of the jupyter lab. Please, remember to save the file with `ctl s` after modification and then rerun the cell below to see if your changes have reflected.\n",
+    "- WARNING: training will take several hours or one day to complete."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# Printing the retrain spec file. \n",
+    "# Here we have updated the spec file to include the newly pruned model as a pretrained weights.\n",
+    "!sed -i 's,EXPERIMENT_DIR,'\"$USER_EXPERIMENT_DIR\"',' $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt\n",
+    "!cat $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!mkdir -p $USER_EXPERIMENT_DIR/experiment_dir_retrain"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Retraining using the pruned model as pretrained weights \n",
+    "!yolo_v4 train --gpus 2 \\\n",
+    "               -e $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt \\\n",
+    "               -r $USER_EXPERIMENT_DIR/experiment_dir_retrain \\\n",
+    "               -k $KEY"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Listing the newly retrained model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ls -rlt $USER_EXPERIMENT_DIR/experiment_dir_retrain/weights"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Now check the evaluation stats in the `csv file` and pick the model with highest eval `accuracy`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!cat $USER_EXPERIMENT_DIR/experiment_dir_retrain/yolov4_training_log_resnet18.csv\n",
+    "%set_env EPOCH=080"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 7. Evaluate retrained model <a class=\"anchor\" id=\"head-7\"></a>"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!yolo_v4 evaluate -e $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt \\\n",
+    "                  -m $USER_EXPERIMENT_DIR/experiment_dir_retrain/weights/yolov4_resnet18_epoch_$EPOCH.tlt \\\n",
+    "                  -k $KEY"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 8. Visualize inferences <a class=\"anchor\" id=\"head-8\"></a>\n",
+    "- In this section, we run the tlt-infer tool to generate inferences on the trained models and visualize the results.\n",
+    "- Please note some parameter definition:\n",
+    "   - -m:`retrained model;` -e:`retrain spec file;` -k: `encoding key;` -b: `batch size;` -i: `test data dir`; -o: `output images`; -l: `frame by frame bbox labels output`  "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Copy some test images\n",
+    "!mkdir -p /workspace/examples/yolo_v4/test_samples\n",
+    "!cp $DATA_DOWNLOAD_DIR/testing/image_2/00000* /workspace/examples/yolo_v4/test_samples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Running inference for detection on n images\n",
+    "!yolo_v4 inference -i /workspace/examples/yolo_v4/test_samples \\\n",
+    "                   -o $USER_EXPERIMENT_DIR/yolo_infer_images \\\n",
+    "                   -e $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt \\\n",
+    "                   -m $USER_EXPERIMENT_DIR/experiment_dir_retrain/weights/yolov4_resnet18_epoch_$EPOCH.tlt \\\n",
+    "                   -l $USER_EXPERIMENT_DIR/yolo_infer_labels \\\n",
+    "                   -k $KEY"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `tlt-infer` tool produces two outputs. \n",
+    "1. Overlain images in `$USER_EXPERIMENT_DIR/yolo_infer_images`\n",
+    "2. Frame by frame bbox labels in kitti format located in `$USER_EXPERIMENT_DIR/yolo_infer_labels`"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Simple grid visualizer\n",
+    "import matplotlib.pyplot as plt\n",
+    "import os\n",
+    "from math import ceil\n",
+    "valid_image_ext = ['.jpg', '.png', '.jpeg', '.ppm']\n",
+    "\n",
+    "def visualize_images(image_dir, num_cols=4, num_images=10):\n",
+    "    output_path = os.path.join(os.environ['USER_EXPERIMENT_DIR'], image_dir)\n",
+    "    num_rows = int(ceil(float(num_images) / float(num_cols)))\n",
+    "    f, axarr = plt.subplots(num_rows, num_cols, figsize=[80,30])\n",
+    "    f.tight_layout()\n",
+    "    a = [os.path.join(output_path, image) for image in os.listdir(output_path) \n",
+    "         if os.path.splitext(image)[1].lower() in valid_image_ext]\n",
+    "    for idx, img_path in enumerate(a[:num_images]):\n",
+    "        col_id = idx % num_cols\n",
+    "        row_id = idx // num_cols\n",
+    "        img = plt.imread(img_path)\n",
+    "        axarr[row_id, col_id].imshow(img) "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- Visualize the sample images."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "OUTPUT_PATH = 'yolo_infer_images' # relative path from $USER_EXPERIMENT_DIR.\n",
+    "COLS = 3 # number of columns in the visualizer grid.\n",
+    "IMAGES = 9 # number of images to visualize.\n",
+    "\n",
+    "visualize_images(OUTPUT_PATH, num_cols=COLS, num_images=IMAGES)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## 9. Deploy! <a class=\"anchor\" id=\"head-9\"></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you trained a non-QAT model, you may export in `FP32`, `FP16` or `INT8` mode using the code block below. For `INT8`, you need to provide calibration image directory."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# tlt-export will fail if .etlt already exists. So we clear the export folder before tlt-export\n",
+    "!rm -rf $USER_EXPERIMENT_DIR/export\n",
+    "!mkdir -p $USER_EXPERIMENT_DIR/export\n",
+    "# Export in FP32 mode. Change --data_type to fp16 for FP16 mode\n",
+    "!yolo_v4 export -m $USER_EXPERIMENT_DIR/experiment_dir_retrain/weights/yolov4_resnet18_epoch_$EPOCH.tlt \\\n",
+    "                -k $KEY \\\n",
+    "                -o $USER_EXPERIMENT_DIR/export/yolov4_resnet18_epoch_$EPOCH.etlt \\\n",
+    "                -e $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt \\\n",
+    "                --batch_size 16 \\\n",
+    "                --data_type fp32\n",
+    "\n",
+    "# Uncomment to export in INT8 mode (generate calibration cache file). \n",
+    "# !yolo_v4 export -m $USER_EXPERIMENT_DIR/experiment_dir_retrain/weights/yolov4_resnet18_epoch_$EPOCH.tlt  \\\n",
+    "#                 -o $USER_EXPERIMENT_DIR/export/yolov4_resnet18_epoch_$EPOCH.etlt \\\n",
+    "#                 -e $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt \\\n",
+    "#                 -k $KEY \\\n",
+    "#                 --cal_image_dir  $USER_EXPERIMENT_DIR/data/testing/image_2 \\\n",
+    "#                 --data_type int8 \\\n",
+    "#                 --batch_size 16 \\\n",
+    "#                 --batches 10 \\\n",
+    "#                 --cal_cache_file $USER_EXPERIMENT_DIR/export/cal.bin  \\\n",
+    "#                 --cal_data_file $USER_EXPERIMENT_DIR/export/cal.tensorfile"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`Note:` In this example, for ease of execution we restrict the number of calibrating batches to 10. TLT recommends the use of at least 10% of the training dataset for int8 calibration.\n",
+    "\n",
+    "If you train a QAT model, you may only export in INT8 mode using following code block. This generates an etlt file and the corresponding calibration cache. You can throw away the calibration cache and just use the etlt file in tlt-converter or DeepStream for FP32 or FP16 mode. But please note this gives sub-optimal results. If you want to deploy in FP32 or FP16, you should disable QAT in training."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Uncomment to export QAT model in INT8 mode (generate calibration cache file).\n",
+    "# !rm -rf $USER_EXPERIMENT_DIR/export\n",
+    "# !mkdir -p $USER_EXPERIMENT_DIR/export\n",
+    "# !yolo_v4 export -m $USER_EXPERIMENT_DIR/experiment_dir_retrain/weights/yolov4_resnet18_epoch_$EPOCH.tlt  \\\n",
+    "#                 -o $USER_EXPERIMENT_DIR/export/yolov4_resnet18_epoch_$EPOCH.etlt \\\n",
+    "#                 -e $SPECS_DIR/yolo_v4_retrain_resnet18_kitti.txt \\\n",
+    "#                 -k $KEY \\\n",
+    "#                 --data_type int8 \\\n",
+    "#                 --cal_cache_file $USER_EXPERIMENT_DIR/export/cal.bin"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print('Exported model:')\n",
+    "print('------------')\n",
+    "!ls -lh $USER_EXPERIMENT_DIR/export"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Verify engine generation using the `tlt-converter` utility included with the docker.\n",
+    "\n",
+    "The `tlt-converter` produces optimized tensorrt engines for the platform that it resides on. Therefore, to get maximum performance, please instantiate this docker and execute the `tlt-converter` command, with the exported `.etlt` file and calibration cache (for int8 mode) on your target device. The converter utility included in this docker only works for x86 devices, with discrete NVIDIA GPU's. \n",
+    "\n",
+    "For the jetson devices, please download the converter for jetson from the dev zone link [here](https://developer.nvidia.com/tlt-converter). \n",
+    "\n",
+    "If you choose to integrate your model into deepstream directly, you may do so by simply copying the exported `.etlt` file along with the calibration cache to the target device and updating the spec file that configures the `gst-nvinfer` element to point to this newly exported model. Usually this file is called `config_infer_primary.txt` for detection models and `config_infer_secondary_*.txt` for classification models."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---\n",
+    "### Source\n",
+    "\n",
+    "This Notebook was adapted from examples within NVIDIA TLT/TAO Docker container pulled from ngc.nvidia.com\n",
+    "\n",
+    "### Licensing \n",
+    "\n",
+    "This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0)."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.5"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 1 - 0
experimental/TAO_Transfer_Learning/English/yolo_v4/ignore.txt

@@ -0,0 +1 @@
+Yolov4 experimental folder

+ 2 - 2
experimental/TAO_Transfer_Learning/README.md

@@ -4,7 +4,7 @@ This folder contains contents for TLT learning bootcamp.
 
 - Transfer learning with NVIDIA TAO
 - Pretrained Model from NGC
-- Hands-on on Image classification that involes training, evaluation, pruning, retraining, inferencing, model export, and INT8 optimization
+- Hands-on on image classification and YOLOv4 object detection that involes training, evaluation, pruning, retraining, inferencing, model export, and INT8 optimization
 
 
 ## Prerequisites
@@ -15,7 +15,7 @@ To run this tutorial you will need a machine with NVIDIA GPU.
 - The base containers required for the lab may require users to create a NGC account and generate an API key (https://docs.nvidia.com/ngc/ngc-catalog-user-guide/index.html#registering-activating-ngc-account)
 
 #Tutorial Duration
-The total bootcamp material would take approximately 2 hours.
+The total bootcamp material would take approximately 6 hours.
 
 ## Creating containers
 To start with, you will have to build a Docker or Singularity container.