Jelajahi Sumber

Merge pull request #58 from aswinkumar1999/master

Added AI/Distributed_Deep_Learning_Bootcamp
Aswinkumar 2 tahun lalu
induk
melakukan
638011d4e8
60 mengubah file dengan 8142 tambahan dan 0 penghapusan
  1. 27 0
      ai/Distributed_Deep_Learning/Dockerfile
  2. 129 0
      ai/Distributed_Deep_Learning/English/python/Start_Here.ipynb
  3. 357 0
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/1.Introduction-to-Distributed-Deep-Learning.ipynb
  4. 678 0
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/2.1.System-Topology.ipynb
  5. 272 0
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/2.2.DLProf.ipynb
  6. 717 0
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/3.Hands-on-Multi-GPU.ipynb
  7. 444 0
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/4.Convergence.ipynb
  8. 93 0
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/References.ipynb
  9. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/0_3.png
  10. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/2_7.png
  11. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/DlProf_layers.png
  12. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/accuracy.png
  13. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/ai-and-compute-modern-log.png
  14. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/async.png
  15. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dgx1_8x_tesla_v100_topo.png
  16. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_expert.png
  17. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_gpus.png
  18. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_iter.png
  19. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_iters.png
  20. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ker_iter.png
  21. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ker_ops.png
  22. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_keynode.png
  23. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ops.png
  24. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ops_ker.png
  25. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ops_n.png
  26. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprofviewer.png
  27. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/horovod_exascale_2.png
  28. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/intra_node_topology_map.png
  29. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/memcpy_host_staging.png
  30. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/memcpy_p2p_overview.png
  31. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/minima.png
  32. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/model_parallelism.jpg
  33. 664 0
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/noise-summary-3.svg
  34. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/nvidia_smi_p2p_gpu0.png
  35. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/nvidia_smi_topo_output.png
  36. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/open_terminal.png
  37. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/open_terminal_session.png
  38. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/open_terminal_session_jl.png
  39. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/paper1.png
  40. TEMPAT SAMPAH
      ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/sync.png
  41. 224 0
      ai/Distributed_Deep_Learning/English/python/source_code/N1/GPT.py
  42. 103 0
      ai/Distributed_Deep_Learning/English/python/source_code/N1/cnn_fmnist.py
  43. 151 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/exception.h
  44. 967 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/helper_cuda.h
  45. 368 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/helper_string.h
  46. 465 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/helper_timer.h
  47. 28 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/LICENSE
  48. 337 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/Samples/p2pBandwidthLatencyTest/Makefile
  49. 70 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/Samples/p2pBandwidthLatencyTest/README.md
  50. 695 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/Samples/p2pBandwidthLatencyTest/p2pBandwidthLatencyTest.cu
  51. 103 0
      ai/Distributed_Deep_Learning/English/python/source_code/N2/cnn_fmnist.py
  52. 103 0
      ai/Distributed_Deep_Learning/English/python/source_code/N3/cnn_fmnist.py
  53. 178 0
      ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar.py
  54. 174 0
      ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_base.py
  55. 177 0
      ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_batch_norm.py
  56. 178 0
      ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_lamb.py
  57. 176 0
      ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_scalelr.py
  58. 177 0
      ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_warmup.py
  59. 58 0
      ai/Distributed_Deep_Learning/README.md
  60. 29 0
      ai/Distributed_Deep_Learning/Singularity

+ 27 - 0
ai/Distributed_Deep_Learning/Dockerfile

@@ -0,0 +1,27 @@
+# Copyright (c) 2020 NVIDIA Corporation.  All rights reserved.
+
+# To build the docker container, run: $ sudo docker build -t ai-multi-gpu:latest .
+# To run: $ sudo docker run --rm -it --gpus=all -p 8888:8888 -p 8000:8000 ai-multi-gpu:latest
+# Finally, open http://127.0.0.1:8888/
+
+# Select Base Image 
+FROM nvcr.io/nvidia/tensorflow:21.07-tf2-py3
+# Update the repo
+RUN apt-get update -y
+# Install required dependencies
+RUN apt-get install -y git nvidia-modprobe
+# Install required python packages
+RUN pip3 install ipywidgets
+
+##### TODO - From the Final Repo Changing this 
+
+# TO COPY the data 
+COPY English/ /workspace/
+
+# To Download Wikitext-2 dataaset
+RUN mkdir /workspace/python/source_code/Data
+RUN curl https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip -o /workspace/python/source_code/Data/wikitext-2-v1.zip
+RUN unzip /workspace/python/source_code/Data/wikitext-2-v1.zip -d /workspace/python/source_code/Data
+
+## Uncomment this line to run Jupyter notebook by default
+CMD jupyter-lab --no-browser --allow-root --ip=0.0.0.0 --port=8888 --NotebookApp.token="" --notebook-dir=/workspace/python/

File diff ditekan karena terlalu besar
+ 129 - 0
ai/Distributed_Deep_Learning/English/python/Start_Here.ipynb


File diff ditekan karena terlalu besar
+ 357 - 0
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/1.Introduction-to-Distributed-Deep-Learning.ipynb


File diff ditekan karena terlalu besar
+ 678 - 0
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/2.1.System-Topology.ipynb


+ 272 - 0
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/2.2.DLProf.ipynb

@@ -0,0 +1,272 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "<div>\n",
+    "    <span style=\"float: left; width: 33%; text-align: left;\"><a href=\"2.1.System-Topology.ipynb\">Previous Notebook</a></span>\n",
+    "    <span style=\"float: left; width: 33%; text-align: center;\">\n",
+    "        <a href=\"../Start_Here.ipynb\">Home Page</a>\n",
+    "    </span>\n",
+    "</div>\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Getting started with DLProf\n",
+    "\n",
+    "**Contents of this notebook:**\n",
+    "\n",
+    "- [Introduction to DLProf](#Introduction-to-DLProf)\n",
+    "- [Profiling using DLProf](#Profiling-using-DLProf)\n",
+    "- [Visualising profiles using DLProfviewer](#Visualising-profiles-using-DLProfviewer)\n",
+    "- [Improving throughput using DLProf Expert system](#Improving-throughput-using-DLProf-Expert-system)\n",
+    "\n",
+    "\n",
+    "**By the End of this Notebook you will:**\n",
+    "\n",
+    "- Learn the basics of `dlProf`\n",
+    "- Learn how to profile using `dlprof`\n",
+    "- Learn to visualise profile output using `dlprofviewer`"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Introduction to DLProf\n",
+    "\n",
+    "NVIDIA Deep Learning Profiler is a tool built to help data scientists understand and improve the performance of their models. It is built on top of the NVIDIA NSight Systems tools to collect the profiling data and aggregate them into the format that can be best consumable by Deep Leaning researchers, data scientists and engineers.\n",
+    "\n",
+    "<center><img src=\"images/DlProf_layers.png\" width=\"400\"/></center>\n",
+    "\n",
+    "\n",
+    "\n",
+    "DLProf was created with the following in mind :\n",
+    "\n",
+    "- Deep learning profiling for correlating to model layer and iterations. \n",
+    "- Framework support built it.\n",
+    "- A method to quickly visualise the profile in an intuitive manner to understand different aspects of the deep learning system. \n",
+    "- Providing high-level information regarding different aspects of the DL workload.\n",
+    "\n",
+    "Let us now begin profiling using DLProf."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Profiling using DLProf\n",
+    "\n",
+    "Let us now begin by running the `dlprof` command , let us now profile the application we used in Introduction to Distributed Deep learning notebook.\n",
+    "\n",
+    "**Note** : For the first run , we need to run `dlprof ` for a shorter duration so that we can define iterations called as `key node` which we will later pass to `dlprof` as a parameter to define iterations in the training process for a complete run."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [],
+   "source": [
+    "!TF_CPP_MIN_LOG_LEVEL=3 dlprof --mode=tensorflow2 --reports=detail --delay=5 --duration=30 --output_path=\"Profile/Prof1\" horovodrun -np 1 python3 ../source_code/N1/cnn_fmnist.py --batch-size=2048"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let us understand the parameters from above : \n",
+    "\n",
+    "- `--mode=tensorflow2` : This sets the target framework to generate detailed metrics and reports specific to the framework.\n",
+    "- `--reports=details` : This selects the aggregated report(s) to generate.We use `details` so that we can identify the operation to define an iteration.\n",
+    "- `--delay=15dlprofviewer -p 8000 /Profile/Prof1/dlprof_dldb.sqlite` : Collection start delay in seconds , we set this to a positive value considering dataset download and processing time.\n",
+    "- `--duration=30` : Collection duration in seconds. \n",
+    "- `--output_path=/Profile/Prof1` : Setting an output path to store the profile output.\n",
+    "\n",
+    "\n",
+    "With that let us now visualise the results of the profile. \n",
+    "\n",
+    "## Visualising profiles using DLProfviewer\n",
+    "\n",
+    "Launch a Terminal session by clicking on `File` $\\rightarrow$ `New` $\\rightarrow$ `Terminal` \n",
+    "\n",
+    "<center><img src=\"images/open_terminal.png\" /></center>\n",
+    "\n",
+    "and run the following command to launch the `dlprofviewer` server with the port `8000` . Kindly change it to a port that you will be using. \n",
+    "\n",
+    "```bash\n",
+    "dlprofviewer -b 0.0.0.0 -p 8000 /Profile/Prof1/dlprof_dldb.sqlite\n",
+    "```\n",
+    "\n",
+    "You should now have a `dlprofviewer` server running on the port specific. \n",
+    "\n",
+    "Open a new tab in your browser and navigate to `127.0.0.1:8000` to access the `dlprofviewer` application. You need to change the port number here to the one you specified while launching the server. \n",
+    "\n",
+    "You should be seeing the following page , this is called the DLProf Dashboard. The Dashboard view provides a high level summary of the performance results in a panelized view. This view serves as a starting point in analyzing the results and provides several key metrics.\n",
+    "\n",
+    "**Note** : If you are not able to access the DLProf dashboard , kindly verify if you have access to port and verify if the port number forwarded matches the port dlprofviewer server is running on.\n",
+    "\n",
+    "![dlprofviewer](images/dlprofviewer.png)\n",
+    "\n",
+    "Let us now focus on the Dashboard and understand what the differnet panels in the Dashboard are for.\n",
+    "\n",
+    "- **GPU Utilization Chart**: Shows the percentage of the wall clock time that the GPU is active. For multi-gpu, it is an average utilization across all GPUs\n",
+    "- **Op GPU Time Chart**: Splits all operations into 3 categories: Operations that used tensor cores, operations that were eligible to use tensor cores but didn't, and operations that were ineligible to use tensor cores\n",
+    "- **Kernel GPU Time Chart**: Breaks down all kernel time into 3 categories: Kernels that used tensor cores, memory kernels, and all other kernels\n",
+    "- **Tensor Core Kernel Efficiency Chart**: Gives a single number that measures what percentage of GPU time inside of TC-eligible ops are using tensor cores. \n",
+    "- **Performance summary**:  A straightforward panel that shows all of the key metrics from the run in one place\n",
+    "- **Iteration Summary**: A bar chart that shows the amount of time each iteration took during the run. The colored bars are the ones that were used to generate all of the statistics, while the gray bars are iterations that were outside the selected range.  Each colored bar shows the breakdown of iteration time into GPU using TC, GPU not using TC, and all other non-GPU time.\n",
+    "- **Top 10 GPU Ops**: Shows the top 10 operations in the run sorted by the amount of GPU time they took. This is a great starting point for trying to find potential for improvements \n",
+    "- **System Config**: Shows the system configuration for the run.\n",
+    "- **Expert Systems Recommendations**: Shows any potential problems that DLProf found and recommendations for how to fix them.\n",
+    "- **Guidance Panel**: Provides some helpful links to learn more about GPU utilization and performance improvements\n",
+    "\n",
+    "\n",
+    "Let us now look at some more details provided by the DLProf Viewer \n",
+    "\n",
+    "\n",
+    "**Op Type Summary** : This page contains tables that aggregates metrics over all op types and enables users to see the performance of all the ops in terms of its types, such as Convolutions, Matrix Multiplications, etc.\n",
+    "\n",
+    "![Op-type-summary](images/dlprof_ops_n.png)\n",
+    "\n",
+    "In the above image we can notice the tabular data is sorted by the time taken by the GPU for every operation. This allows us to understand the number of times an operation is called and the time taken by them , this will be used in the System Topology notebook to differentiate between the different types of GPU-GPU connectivity.\n",
+    "\n",
+    "**Ops and Kernels** : This view enables users to view, search, sort all ops and their corresponding kernels in the entire network.\n",
+    "\n",
+    "![Ops_kernels](images/dlprof_ops_ker.png)\n",
+    "\n",
+    "We will look into the remaining tabs in the following section.\n",
+    "\n",
+    "Let us now profile again with `key_node` parameter , remember the `key_node` parameters is used to define a iteration , so we need to look for an operation in the **Ops and Kernels Summary** tab that occurs at every iteration.\n",
+    "\n",
+    "Here , let us choose the loss function operation name as `key_node` as we are aware this is calculated at the end of every iteration.\n",
+    "\n",
+    "\n",
+    "![Keynode](images/dlprof_keynode.png)\n",
+    "\n",
+    "Let us now add this parameter to profile our deep learning workload.\n",
+    "\n",
+    "```bash\n",
+    "--key_node=sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits\n",
+    "```\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!TF_CPP_MIN_LOG_LEVEL=3 dlprof --mode=tensorflow2 --key_node=sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits --output_path=\"Profile/Prof2\" horovodrun -np 1 python3 ../source_code/N1/cnn_fmnist.py --batch-size=2048"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Close the already running `dlprofviewer` server and run it again with the latest profile. \n",
+    "\n",
+    "```bash\n",
+    "dlprofviewer -b 0.0.0.0 -p 8000 Profile/Prof2/dlprof_dldb.sqlite\n",
+    "```"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We again come across the Dashboard , but this time we will be having a different Dashboard compared the the previous one as we have added the `key_node` parameter thus defining an iteration. This allows us to compare multiple parameters between different iterations. \n",
+    "\n",
+    "Here's a short brief on the remaining tabs that utilise the `key_node` parameter to display information tagged with iterations : \n",
+    "\n",
+    "- **Kernels by Iteration** : The Kernels by Iterations view shows operations and their execution time for every iteration. At a glance, you can compare iterations with respect to time as well as Kernels executed.\n",
+    "\n",
+    "- **Kernels by Op** : The Kernels by Op view is a variation of the Kernels by Iterations view. It has the capability to filter the list of kernels by iterations and op.\n",
+    "\n",
+    "- **Iterations** : This view displays iterations visually. Users can quickly see how many iterations are in the model, the iterations that were aggregated/profiled, and the accumulated durations of tensor core kernels in each iteration.\n",
+    "\n",
+    "Here is an example of the iterations tab where we have access to information specific to each iteration of training : \n",
+    "\n",
+    "<center><img src=\"images/dlprof_iters.png\"/></center>\n",
+    "\n",
+    "\n",
+    "\n",
+    "The final tab give us the summary of GPU Utilisation :\n",
+    "\n",
+    "- **GPUs** : This view shows the utilization of all GPUs during the profile run.\n",
+    "\n",
+    "<center><img src=\"images/dlprof_gpus.png\"/></center>\n",
+    "\n",
+    "Now that we understand the types of information that DLProf provides us with , let us now take a look on how to improve our throughput using DLProf."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Improving throughput using DLProf Expert system\n",
+    "\n",
+    "Until now we understand the amount of information made available to us via DLProf , but for an user trying to optimize their model and make use of new techniques, this information would not be straighforward , in that case the Expert Systems Recommendations is very helpful to find potential problems and recommendations for how to fix them.\n",
+    "\n",
+    "Let us take a closer look from the above profile.\n",
+    "\n",
+    "<center><img src=\"images/dlprof_expert.png\"/></center>\n",
+    "\n",
+    "Now that we have learnt the basics of DLProf and how to improve throughput using the DLProf expert systems.Let us now go back to the System topology notebook to use DLProf to understand the difference in communication times taken in different cases."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "***\n",
+    "\n",
+    "## Licensing\n",
+    "\n",
+    "This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "<div>\n",
+    "    <span style=\"float: left; width: 33%; text-align: left;\"><a href=\"2.1.System-Topology.ipynb\">Previous Notebook</a></span>\n",
+    "    <span style=\"float: left; width: 33%; text-align: center;\">\n",
+    "        <a href=\"../Start_Here.ipynb\">Home Page</a>\n",
+    "    </span>\n",
+    "</div>\n",
+    "\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

File diff ditekan karena terlalu besar
+ 717 - 0
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/3.Hands-on-Multi-GPU.ipynb


File diff ditekan karena terlalu besar
+ 444 - 0
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/4.Convergence.ipynb


+ 93 - 0
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/References.ipynb

@@ -0,0 +1,93 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "<p> <center> <a href=\"../Start_Here.ipynb\">Home Page</a> </center> </p>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# References\n",
+    "\n",
+    "This notebook contains references used in the material."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- [**Introduction to Distributed deep learning**](jupyter_notebook/1.Introduction-to-Distributed-Deep-Learning.ipynb)\n",
+    "   - [AI and Compute](https://openai.com/blog/ai-and-compute/)\n",
+    "   - [Multiple GPUs Using Model Parallelism](https://mxnet.apache.org/versions/1.8.0/api/faq/model_parallel_lstm)\n",
+    "   - [Model Parallelism](https://docs.aws.amazon.com/sagemaker/latest/dg/model-parallel-intro.html#:~:text=Model%20parallelism%20is%20the%20process,devices%20to%20maximize%20GPU%20utilization)\n",
+    "   - [Data Parallelism](https://leimao.github.io/blog/Data-Parallelism-vs-Model-Paralelism/)\n",
+    "   - [Tensorflow-parallel computing](https://www.programmersought.com/article/26705294588/)\n",
+    "   - [Data parallel and model parallel distributed training with Tensorflow](http://kuozhangub.blogspot.com/2017/08/data-parallel-and-model-parallel.html)\n",
+    "   - [Exascale Deep Learning for Climate Analytics](https://arxiv.org/pdf/1810.01993.pdf)\n",
+    "\n",
+    "\n",
+    "- [**System Topology**](jupyter_notebook/2.1.System-Topology.ipynb)\n",
+    "   - [DLProf](https://docs.nvidia.com/deeplearning/frameworks/dlprof-user-guide/)\n",
+    "   - [CUDA Samples](https://github.com/NVIDIA/cuda-samples)\n",
+    "\n",
+    "\n",
+    "- [**Hands-on with Distributed training**](jupyter_notebook/3.Hands-on-Multi-GPU.ipynb)\n",
+    "   - [Distributed training with TensorFlow](https://www.tensorflow.org/guide/distributed_training#overview)\n",
+    "   - [Horovod](https://github.com/horovod/horovod)\n",
+    "\n",
+    "\n",
+    "- [**Challenges with convergence**](jupyter_notebook/4.Convergence.ipynb)\n",
+    "   - [Measuring the Effects of Data Parallelism\n",
+    "on Neural Network Training](https://arxiv.org/pdf/1811.03600.pdf)\n",
+    "   - [An Empirical Model of Large-Batch Training](https://arxiv.org/pdf/1812.06162)\n",
+    "   - [How AI Training Scales](https://openai.com/blog/science-of-ai/)\n",
+    "   - [ImageNet Training in Minutes](https://arxiv.org/pdf/1709.05011.pdf)\n",
+    "   - [On Large-Batch Training for Deep Learning:\n",
+    "Generalization Gap and Sharp Minima](https://arxiv.org/pdf/1609.04836.pdf)\n",
+    "   - [Train longer, generalize better: closing the\n",
+    "generalization gap in large batch training of neural\n",
+    "networks](https://arxiv.org/pdf/1705.08741.pdf)\n",
+    "   - [Accurate, Large Minibatch SGD:\n",
+    "Training ImageNet in 1 Hour](https://arxiv.org/pdf/1706.02677.pdf)\n",
+    "   - [Large Batch Optimization for Deep Learning:\n",
+    "Training BERT in 76 Minutes](https://arxiv.org/pdf/1904.00962.pdf)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "***\n",
+    "\n",
+    "## Licensing\n",
+    "\n",
+    "This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0)."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}

TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/0_3.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/2_7.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/DlProf_layers.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/accuracy.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/ai-and-compute-modern-log.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/async.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dgx1_8x_tesla_v100_topo.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_expert.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_gpus.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_iter.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_iters.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ker_iter.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ker_ops.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_keynode.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ops.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ops_ker.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprof_ops_n.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/dlprofviewer.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/horovod_exascale_2.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/intra_node_topology_map.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/memcpy_host_staging.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/memcpy_p2p_overview.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/minima.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/model_parallelism.jpg


+ 664 - 0
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/noise-summary-3.svg

@@ -0,0 +1,664 @@
+<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1200 1040">
+  <defs>
+    <style>
+      .cls-1 {
+        fill: #f7fbfb;
+      }
+
+      .cls-10, .cls-11, .cls-12, .cls-2, .cls-4, .cls-5 {
+        fill: none;
+      }
+
+      .cls-2, .cls-4, .cls-5 {
+        stroke: #c8c8d3;
+      }
+
+      .cls-11, .cls-2, .cls-4, .cls-5 {
+        stroke-miterlimit: 10;
+      }
+
+      .cls-10, .cls-11, .cls-12, .cls-2, .cls-4 {
+        stroke-width: 1.25px;
+      }
+
+      .cls-2 {
+        opacity: 0.3;
+      }
+
+      .cls-3 {
+        fill: #6e6e80;
+      }
+
+      .cls-5 {
+        stroke-width: 1.25px;
+      }
+
+      .cls-6 {
+        fill: #27b5ea;
+      }
+
+      .cls-7 {
+        fill: #e86c09;
+      }
+
+      .cls-8 {
+        fill: #10a37f;
+      }
+
+      .cls-9 {
+        opacity: 0.5;
+      }
+
+      .cls-10 {
+        stroke: #6e6e80;
+        stroke-linecap: square;
+        stroke-linejoin: round;
+        opacity: 0.8;
+      }
+
+      .cls-11 {
+        stroke: #c5c5d2;
+      }
+
+      .cls-12 {
+        stroke: #10a37f;
+      }
+    </style>
+  </defs>
+  <title>compute_diagram-log copy</title>
+  <g id="Layer_1" data-name="Layer 1">
+    <rect class="cls-1" x="320" y="50" width="829.89" height="689.49"/>
+  </g>
+  <g id="New_Labels" data-name="New Labels">
+    <g>
+      <path class="cls-2" d="M357.73,739.7V50.22"/>
+      <path class="cls-2" d="M483.48,739.7V50.22"/>
+      <path class="cls-2" d="M609.24,739.7V50.22"/>
+      <path class="cls-2" d="M735,739.7V50.22"/>
+      <path class="cls-2" d="M860.75,739.7V50.22"/>
+      <path class="cls-2" d="M986.5,739.7V50.22"/>
+      <path class="cls-2" d="M1112.24,739.7V50.22"/>
+      <path class="cls-2" d="M320,651.89h829.88"/>
+      <path class="cls-2" d="M320,563.56h829.88"/>
+      <path class="cls-2" d="M320,475.22h829.88"/>
+      <path class="cls-2" d="M320,386.89h829.88"/>
+      <path class="cls-2" d="M320,298.55h829.88"/>
+      <path class="cls-2" d="M320,210.22h829.88"/>
+      <path class="cls-2" d="M320,121.88h829.88"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M441.32,821.61v-2.7c0-4.83,2.56-8.48,8.26-8.48,4.78,0,7.72,3.24,7.91,7.1H454a4.17,4.17,0,0,0-4.4-4c-3.16,0-4.51,2.16-4.51,5.18v3.08c0,3,1.35,5.18,4.51,5.18a4.17,4.17,0,0,0,4.4-4h3.51c-.19,3.86-3.13,7.1-7.91,7.1C443.88,830.08,441.32,826.44,441.32,821.61Z"/>
+      <path class="cls-3" d="M462,815.67h3.48v2.72c.46-2.1,2.11-2.83,4.21-2.83h.43v3.64h-.75c-2.41,0-3.7,1.19-3.7,4v6.47H462Z"/>
+      <path class="cls-3" d="M476.09,809.86A2.14,2.14,0,1,1,474,812,2.08,2.08,0,0,1,476.09,809.86Zm-1.83,5.81h3.67v14h-3.67Z"/>
+      <path class="cls-3" d="M484.06,825.71v-7.07h-2v-3h2v-3.83h3.7v3.83h3v3h-3v6.8c0,.92.35,1.3,1.32,1.3h1.48v3H488C485.33,829.7,484.06,828.19,484.06,825.71Z"/>
+      <path class="cls-3" d="M496.94,809.86A2.14,2.14,0,1,1,494.8,812,2.07,2.07,0,0,1,496.94,809.86Zm-1.84,5.81h3.67v14H495.1Z"/>
+      <path class="cls-3" d="M503.36,823.68v-2c0-3.67,2.41-6.35,6.64-6.35,3.76,0,6.29,2.35,6.4,5.56h-3.35a2.75,2.75,0,0,0-3-2.59c-2.07,0-3.05,1.35-3.05,3.24v2.27c0,1.89,1,3.24,3.05,3.24a2.75,2.75,0,0,0,3-2.59h3.35c-.11,3.21-2.64,5.56-6.4,5.56C505.77,830,503.36,827.36,503.36,823.68Z"/>
+      <path class="cls-3" d="M520.16,825.66c0-3,2.29-4.46,5.67-4.46a5.64,5.64,0,0,1,3.56,1.19v-1.92a2.23,2.23,0,0,0-2.43-2.26,2.51,2.51,0,0,0-2.43,1.32h-3.4c.81-2.73,3-4.19,6-4.19,3.51,0,5.91,2.14,5.91,5.4v9h-3.54v-1.29c-.64.94-2.43,1.62-4,1.62C522.32,830,520.16,828.38,520.16,825.66Zm9.23-.14c0-1.24-1.22-2-2.86-2s-2.81.7-2.81,2,1.16,2.05,2.7,2.05S529.39,826.71,529.39,825.52Z"/>
+      <path class="cls-3" d="M538,810h3.67v19.7H538Z"/>
+      <path class="cls-3" d="M554.26,810.81h8.47c3.32,0,5.4,1.94,5.4,4.78a4.76,4.76,0,0,1-2.37,4.34,4.28,4.28,0,0,1,3.51,4.46c0,3.16-2.62,5.31-6.4,5.31h-8.61Zm7.64,7.83a2.31,2.31,0,0,0,2.45-2.51,2.24,2.24,0,0,0-2.48-2.35h-3.94v4.86Zm.78,8.1a2.48,2.48,0,0,0,2.81-2.6c0-1.56-1.06-2.56-2.84-2.56h-4.72v5.16Z"/>
+      <path class="cls-3" d="M572.64,825.66c0-3,2.3-4.46,5.67-4.46a5.65,5.65,0,0,1,3.57,1.19v-1.92a2.23,2.23,0,0,0-2.43-2.26,2.51,2.51,0,0,0-2.43,1.32h-3.4c.81-2.73,3-4.19,6-4.19,3.5,0,5.91,2.14,5.91,5.4v9H582v-1.29a5.33,5.33,0,0,1-4,1.62C574.8,830,572.64,828.38,572.64,825.66Zm9.24-.14c0-1.24-1.22-2-2.87-2s-2.8.7-2.8,2,1.16,2.05,2.7,2.05S581.88,826.71,581.88,825.52Z"/>
+      <path class="cls-3" d="M591,825.71v-7.07h-2v-3h2v-3.83h3.69v3.83h3v3h-3v6.8c0,.92.36,1.3,1.33,1.3h1.48v3H595C592.25,829.7,591,828.19,591,825.71Z"/>
+      <path class="cls-3" d="M601.16,823.68v-2c0-3.67,2.4-6.35,6.64-6.35,3.75,0,6.28,2.35,6.39,5.56h-3.34a2.76,2.76,0,0,0-3-2.59c-2.08,0-3,1.35-3,3.24v2.27c0,1.89,1,3.24,3,3.24a2.76,2.76,0,0,0,3-2.59h3.34c-.11,3.21-2.64,5.56-6.39,5.56C603.56,830,601.16,827.36,601.16,823.68Z"/>
+      <path class="cls-3" d="M618.54,810h3.67v7.34a4.48,4.48,0,0,1,4-2c3.05,0,5,2.06,5,5.38v9h-3.67v-8.39c0-1.62-.78-2.78-2.57-2.78a2.53,2.53,0,0,0-2.7,2.86v8.31h-3.67Z"/>
+      <path class="cls-3" d="M643,824.22v-.48h3.62v.4c0,1.76,1.38,3,3.91,3,2.24,0,3.7-1,3.7-2.76a2.05,2.05,0,0,0-1.78-2.13L647.46,821c-2.86-.73-4.21-2-4.21-4.88,0-3.57,2.7-5.7,6.94-5.7s6.82,2.32,6.82,5.64v.46h-3.48v-.38c0-1.59-1-2.8-3.34-2.8-2,0-3.3.81-3.3,2.45a1.89,1.89,0,0,0,1.62,2l5.18,1.38c2.78.73,4.13,2.37,4.13,4.94,0,3.59-3.21,6-7.47,6C645.89,830.08,643,827.92,643,824.22Z"/>
+      <path class="cls-3" d="M664.36,809.86a2.14,2.14,0,1,1-2.13,2.14A2.07,2.07,0,0,1,664.36,809.86Zm-1.83,5.81h3.67v14h-3.67Z"/>
+      <path class="cls-3" d="M670.82,827.06l6.93-8.56h-6.61v-2.83H682v2.64l-6.94,8.56h7.1v2.83H670.82Z"/>
+      <path class="cls-3" d="M685.64,823.77v-1.92c0-3.81,2.27-6.51,6.48-6.51s6.26,2.78,6.26,6.16V824H689.2v.36a2.77,2.77,0,0,0,3,3.07,2.87,2.87,0,0,0,2.89-1.78h3.21c-.49,2.57-3,4.43-6.1,4.43C688.2,830,685.64,827.71,685.64,823.77Zm3.56-2.41H695V821c0-1.75-.91-3.07-2.88-3.07s-2.92,1.4-2.92,3.29Z"/>
+      <path class="cls-3" d="M710.94,814.46a1,1,0,1,1,0,2,1,1,0,1,1,0-2Zm-.84,4h1.67V829.7H710.1Z"/>
+      <path class="cls-3" d="M715.47,826.43v-.11H717v.06c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.58,1.58,0,0,0-1.41-1.56l-3.34-.77c-1.68-.38-2.64-1.39-2.64-2.86,0-2.12,1.78-3.22,4.26-3.22s4.19,1.43,4.19,3.53v.11h-1.52v-.07c0-1.39-1-2.31-2.69-2.31s-2.7.66-2.7,1.91a1.37,1.37,0,0,0,1.18,1.46l3.46.81c1.74.42,2.77,1.28,2.77,2.9,0,2-1.93,3.46-4.58,3.46S715.47,828.6,715.47,826.43Z"/>
+      <path class="cls-3" d="M737.49,829.7c-2.16,0-3.06-1.07-3.06-3V819.8h-1.91v-1.36h1.91V815.2h1.68v3.24H739v1.36h-2.88v6.91c0,1.15.41,1.63,1.67,1.63h1.12v1.36Z"/>
+      <path class="cls-3" d="M742.26,813.64h1.68v6.54a3.89,3.89,0,0,1,3.67-2c2.53,0,4.14,1.72,4.14,4.43v7.1h-1.67v-6.86c0-1.92-.86-3.21-2.89-3.21-1.85,0-3.25,1.12-3.25,3.45v6.62h-1.68Z"/>
+      <path class="cls-3" d="M755.2,825.06v-1.8c0-3.06,1.74-5.09,4.91-5.09s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,2,1,3.54,3.36,3.54a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.46A4.65,4.65,0,0,1,755.2,825.06Zm1.61-1.58h6.51V823c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M773.88,818.44h1.58v1.91a3.72,3.72,0,0,1,3.72-2.18,3.79,3.79,0,0,1,3.59,2.21,4.46,4.46,0,0,1,4.09-2.21c2.46,0,4,1.76,4,4.43v7.1h-1.67v-6.86c0-1.92-1-3.21-2.8-3.21s-3.21,1.12-3.21,3.45v6.62h-1.67v-6.86c0-1.92-.95-3.21-2.8-3.21s-3.21,1.12-3.21,3.45v6.62h-1.67Z"/>
+      <path class="cls-3" d="M802.54,829.7v-1.4a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.5-4.27-3.54s1.79-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.4v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.83,2.83,0,0,0-2.81,1.7h-1.54a4.26,4.26,0,0,1,4.37-3c2.8,0,4.47,1.83,4.47,4.05v7.48Zm-.06-3.3c0-1.4-1.67-2.39-3.37-2.39-1.87,0-3.12.88-3.12,2.44s1.3,2.44,3,2.44S802.48,827.86,802.48,826.4Z"/>
+      <path class="cls-3" d="M816.76,829.7H815l-3.06-4.48-3.06,4.48h-1.76l4-5.72-3.81-5.54H809l2.93,4.29,2.93-4.29h1.73L812.8,824Z"/>
+      <path class="cls-3" d="M820.8,814.46a1,1,0,1,1-1,1A1,1,0,0,1,820.8,814.46Zm-.83,4h1.67V829.7H820Z"/>
+      <path class="cls-3" d="M825.86,818.44h1.59v1.91a3.72,3.72,0,0,1,3.72-2.18,3.77,3.77,0,0,1,3.58,2.21,4.47,4.47,0,0,1,4.09-2.21c2.47,0,4,1.76,4,4.43v7.1h-1.67v-6.86c0-1.92-.94-3.21-2.79-3.21s-3.22,1.12-3.22,3.45v6.62h-1.67v-6.86c0-1.92-.94-3.21-2.79-3.21s-3.21,1.12-3.21,3.45v6.62h-1.68Z"/>
+      <path class="cls-3" d="M856.16,829.7h-1.59v-1.91a3.75,3.75,0,0,1-3.76,2.18c-2.46,0-4-1.72-4-4.42v-7.11h1.67v6.86c0,1.92.83,3.26,2.77,3.26s3.26-1.17,3.26-3.5v-6.62h1.67Z"/>
+      <path class="cls-3" d="M860.27,818.44h1.59v1.91a3.7,3.7,0,0,1,3.71-2.18,3.78,3.78,0,0,1,3.59,2.21,4.46,4.46,0,0,1,4.09-2.21c2.47,0,4,1.76,4,4.43v7.1h-1.67v-6.86c0-1.92-1-3.21-2.8-3.21s-3.21,1.12-3.21,3.45v6.62H868v-6.86c0-1.92-1-3.21-2.79-3.21s-3.22,1.12-3.22,3.45v6.62h-1.67Z"/>
+      <path class="cls-3" d="M888.37,813.64v6.47a4.22,4.22,0,0,1,3.76-1.94c2.84,0,4.66,2.12,4.66,5v1.72c0,2.82-1.74,5-4.71,5a4.17,4.17,0,0,1-3.8-2v1.76h-1.59V813.64Zm6.75,9.64c0-2.07-1.17-3.74-3.3-3.74a3.48,3.48,0,0,0-3.45,3.74v1.45c0,2.18,1.21,3.87,3.43,3.87s3.32-1.67,3.32-3.74Z"/>
+      <path class="cls-3" d="M908,829.7v-1.4a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.5-4.27-3.54s1.78-3.52,4.47-3.52a4.9,4.9,0,0,1,3.6,1.4v-2.09a2.61,2.61,0,0,0-2.81-2.73,2.85,2.85,0,0,0-2.82,1.7h-1.54a4.27,4.27,0,0,1,4.38-3c2.79,0,4.47,1.83,4.47,4.05v7.48Zm-.07-3.3c0-1.4-1.67-2.39-3.36-2.39-1.87,0-3.13.88-3.13,2.44s1.3,2.44,3,2.44S907.92,827.86,907.92,826.4Z"/>
+      <path class="cls-3" d="M917.41,829.7c-2.16,0-3.06-1.07-3.06-3V819.8h-1.92v-1.36h1.92V815.2H916v3.24h2.88v1.36H916v6.91c0,1.15.42,1.63,1.67,1.63h1.12v1.36Z"/>
+      <path class="cls-3" d="M921.52,825v-1.89a4.7,4.7,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.49a3.13,3.13,0,0,0-3.3-2.91c-2.32,0-3.39,1.5-3.39,3.61V825c0,2.11,1.07,3.61,3.39,3.61a3.13,3.13,0,0,0,3.3-2.91h1.49a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,921.52,825Z"/>
+      <path class="cls-3" d="M934.94,813.64h1.67v6.54a3.9,3.9,0,0,1,3.68-2c2.53,0,4.13,1.72,4.13,4.43v7.1h-1.67v-6.86c0-1.92-.86-3.21-2.88-3.21a3.1,3.1,0,0,0-3.26,3.45v6.62h-1.67Z"/>
+      <path class="cls-3" d="M953.29,826.43v-.11h1.51v.06c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.58,1.58,0,0,0-1.41-1.56l-3.35-.77c-1.67-.38-2.64-1.39-2.64-2.86,0-2.12,1.78-3.22,4.27-3.22S962,819.6,962,821.7v.11H960.5v-.07c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.38,1.38,0,0,0,1.19,1.46l3.45.81c1.74.42,2.78,1.28,2.78,2.9,0,2-1.94,3.46-4.58,3.46S953.29,828.6,953.29,826.43Z"/>
+      <path class="cls-3" d="M966.88,814.46a1,1,0,1,1-1,1A1,1,0,0,1,966.88,814.46Zm-.83,4h1.67V829.7h-1.67Z"/>
+      <path class="cls-3" d="M971.41,828.45l6.65-8.69h-6.38v-1.32h8.23v1.25l-6.65,8.69H980v1.32h-8.58Z"/>
+      <path class="cls-3" d="M982.72,825.06v-1.8c0-3.06,1.74-5.09,4.91-5.09s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,2,1,3.54,3.36,3.54a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.46A4.65,4.65,0,0,1,982.72,825.06Zm1.61-1.58h6.51V823c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M1009,829.7v-1.4a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.5-4.27-3.54s1.79-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.4v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.83,2.83,0,0,0-2.81,1.7h-1.54a4.26,4.26,0,0,1,4.37-3c2.8,0,4.47,1.83,4.47,4.05v7.48Zm-.06-3.3c0-1.4-1.67-2.39-3.37-2.39-1.87,0-3.12.88-3.12,2.44s1.3,2.44,3,2.44S1009,827.86,1009,826.4Z"/>
+      <path class="cls-3" d="M1016.29,813.64v6.47a4.25,4.25,0,0,1,3.77-1.94c2.83,0,4.66,2.12,4.66,5v1.72c0,2.82-1.74,5-4.71,5a4.17,4.17,0,0,1-3.8-2v1.76h-1.59V813.64Zm6.76,9.64c0-2.07-1.17-3.74-3.3-3.74a3.49,3.49,0,0,0-3.46,3.74v1.45c0,2.18,1.21,3.87,3.44,3.87s3.32-1.67,3.32-3.74Z"/>
+      <path class="cls-3" d="M1027.73,825v-1.85a5,5,0,0,1,10.06,0V825a5,5,0,0,1-10.06,0Zm5-5.48c-2.22,0-3.41,1.51-3.41,3.65V825c0,2.14,1.19,3.66,3.41,3.66s3.41-1.52,3.41-3.66v-1.8C1036.18,821,1035,819.52,1032.77,819.52Z"/>
+      <path class="cls-3" d="M1045.77,829.7H1044l-4.13-11.26h1.65l3.36,9.59,3.37-9.59h1.65Z"/>
+      <path class="cls-3" d="M1052,825.06v-1.8c0-3.06,1.74-5.09,4.91-5.09s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,2,1,3.54,3.36,3.54a3,3,0,0,0,3.1-2.18h1.5A4.44,4.44,0,0,1,1057,830,4.65,4.65,0,0,1,1052,825.06Zm1.61-1.58h6.51V823c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M510.93,856.1h-1.59l-3-9.5-3,9.5h-1.59l-3.08-11.26h1.59l2.37,9.26,2.91-9.26h1.58l2.91,9.26,2.37-9.26H514Z"/>
+      <path class="cls-3" d="M517,840h1.67v6.54a3.9,3.9,0,0,1,3.68-2c2.53,0,4.13,1.72,4.13,4.43v7.1h-1.67v-6.86c0-1.92-.86-3.21-2.88-3.21a3.1,3.1,0,0,0-3.26,3.45v6.62H517Z"/>
+      <path class="cls-3" d="M531.47,840.86a1,1,0,1,1-1.05,1A1,1,0,0,1,531.47,840.86Zm-.83,4h1.67V856.1h-1.67Z"/>
+      <path class="cls-3" d="M536,851.42v-1.89a4.71,4.71,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.5a3.13,3.13,0,0,0-3.3-2.91c-2.31,0-3.39,1.5-3.39,3.61v1.94c0,2.11,1.08,3.61,3.39,3.61a3.13,3.13,0,0,0,3.3-2.91h1.5a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,536,851.42Z"/>
+      <path class="cls-3" d="M549.4,840h1.67v6.54a3.9,3.9,0,0,1,3.68-2c2.53,0,4.14,1.72,4.14,4.43v7.1h-1.68v-6.86c0-1.92-.85-3.21-2.88-3.21a3.1,3.1,0,0,0-3.26,3.45v6.62H549.4Z"/>
+      <path class="cls-3" d="M567.75,852.83v-.11h1.52v.06c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.58,1.58,0,0,0-1.41-1.56l-3.34-.77c-1.68-.38-2.64-1.39-2.64-2.86,0-2.12,1.78-3.22,4.26-3.22s4.18,1.43,4.18,3.52v.12H575v-.07c0-1.39-1-2.31-2.69-2.31s-2.7.66-2.7,1.91a1.37,1.37,0,0,0,1.18,1.46l3.46.81c1.74.42,2.77,1.28,2.77,2.9,0,2-1.94,3.46-4.58,3.46S567.75,855,567.75,852.83Z"/>
+      <path class="cls-3" d="M579.85,851.42v-1.89a4.7,4.7,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.49a3.13,3.13,0,0,0-3.3-2.91c-2.31,0-3.39,1.5-3.39,3.61v1.94c0,2.11,1.08,3.61,3.39,3.61a3.13,3.13,0,0,0,3.3-2.91h1.49a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,579.85,851.42Z"/>
+      <path class="cls-3" d="M600.9,856.1v-1.4a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.5-4.27-3.54s1.79-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.4v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.83,2.83,0,0,0-2.81,1.7h-1.54a4.26,4.26,0,0,1,4.37-3c2.8,0,4.47,1.83,4.47,4v7.48Zm-.06-3.3c0-1.4-1.67-2.39-3.37-2.39-1.87,0-3.12.88-3.12,2.44s1.3,2.44,3,2.44S600.84,854.26,600.84,852.8Z"/>
+      <path class="cls-3" d="M606.6,840h1.67V856.1H606.6Z"/>
+      <path class="cls-3" d="M613.44,840.86a1,1,0,1,1-1.05,1A1,1,0,0,1,613.44,840.86Zm-.83,4h1.67V856.1h-1.67Z"/>
+      <path class="cls-3" d="M618.5,844.84h1.59v1.91a3.77,3.77,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.43v7.1h-1.68v-6.86c0-1.92-.85-3.21-2.88-3.21a3.1,3.1,0,0,0-3.26,3.45v6.62H618.5Z"/>
+      <path class="cls-3" d="M631.64,857.31h1.65a3.36,3.36,0,0,0,3.23,1.57c2,0,3.3-1.1,3.3-2.82v-2.55a4,4,0,0,1-3.78,2.13,4.39,4.39,0,0,1-4.6-4.6v-1.87a4.36,4.36,0,0,1,4.62-4.6,4.07,4.07,0,0,1,3.83,2.14v-1.87h1.6V856.1c0,2.34-2.11,4.1-5,4.1C634.28,860.2,632.25,859.16,631.64,857.31Zm8.18-6.44v-1.45a3.39,3.39,0,0,0-3.39-3.48,3,3,0,0,0-3.34,3.37v1.6c0,2.11,1.12,3.37,3.28,3.37A3.28,3.28,0,0,0,639.82,850.87Z"/>
+      <path class="cls-3" d="M650.45,851.46v-1.8c0-3.06,1.73-5.09,4.9-5.09s4.78,2.14,4.78,4.91v1.67h-8.08v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18H660a4.45,4.45,0,0,1-4.6,3.46A4.65,4.65,0,0,1,650.45,851.46Zm1.6-1.58h6.52v-.53c0-2-1-3.5-3.22-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M664.42,846.2h-1.65v-1.36h1.65v-1.5c0-2.18,1-3.3,3.25-3.3h1.59v1.37H667.8c-1.18,0-1.71.55-1.71,1.8v1.63h2.77v1.36h-2.77v9.9h-1.67Z"/>
+      <path class="cls-3" d="M672.62,846.2H671v-1.36h1.65v-1.5c0-2.18,1-3.3,3.26-3.3h1.58v1.37H676c-1.19,0-1.72.55-1.72,1.8v1.63h2.78v1.36h-2.78v9.9h-1.67Zm7.9-5.34a1,1,0,1,1-1.06,1A1,1,0,0,1,680.52,840.86Zm-.83,4h1.67V856.1h-1.67Z"/>
+      <path class="cls-3" d="M685,851.42v-1.89a4.7,4.7,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.49a3.13,3.13,0,0,0-3.3-2.91c-2.31,0-3.39,1.5-3.39,3.61v1.94c0,2.11,1.08,3.61,3.39,3.61a3.13,3.13,0,0,0,3.3-2.91h1.49a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,685,851.42Z"/>
+      <path class="cls-3" d="M699.4,840.86a1,1,0,1,1-1.06,1A1,1,0,0,1,699.4,840.86Zm-.84,4h1.67V856.1h-1.67Z"/>
+      <path class="cls-3" d="M703.91,851.46v-1.8c0-3.06,1.73-5.09,4.9-5.09s4.78,2.14,4.78,4.91v1.67h-8.08v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.5a4.45,4.45,0,0,1-4.6,3.46A4.65,4.65,0,0,1,703.91,851.46Zm1.6-1.58H712v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M717.19,844.84h1.59v1.91a3.77,3.77,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.43v7.1H725v-6.86c0-1.92-.85-3.21-2.88-3.21a3.1,3.1,0,0,0-3.26,3.45v6.62h-1.67Z"/>
+      <path class="cls-3" d="M730.13,851.42v-1.89a4.7,4.7,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.49a3.13,3.13,0,0,0-3.3-2.91c-2.32,0-3.39,1.5-3.39,3.61v1.94c0,2.11,1.07,3.61,3.39,3.61a3.13,3.13,0,0,0,3.3-2.91h1.49a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,730.13,851.42Z"/>
+      <path class="cls-3" d="M744.94,859.93l1.43-3.83-4.21-11.26h1.65l3.44,9.57,3.3-9.57h1.65L748,856.1l-1.48,3.83Z"/>
+      <path class="cls-3" d="M770,840V856.1h-1.59v-1.76a4.17,4.17,0,0,1-3.8,2c-3,0-4.71-2.22-4.71-5v-1.72c0-2.92,1.82-5,4.66-5a4.25,4.25,0,0,1,3.77,1.94V840Zm-8.43,11.22c0,2.07,1,3.74,3.32,3.74s3.44-1.69,3.44-3.87v-1.45a3.49,3.49,0,0,0-3.46-3.74c-2.13,0-3.3,1.67-3.3,3.74Z"/>
+      <path class="cls-3" d="M773.6,851.46v-1.8c0-3.06,1.74-5.09,4.91-5.09s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,2,1,3.54,3.36,3.54a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.46A4.65,4.65,0,0,1,773.6,851.46Zm1.61-1.58h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M786.34,851.42v-1.89a4.7,4.7,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.49a3.13,3.13,0,0,0-3.31-2.91c-2.31,0-3.38,1.5-3.38,3.61v1.94c0,2.11,1.07,3.61,3.38,3.61a3.13,3.13,0,0,0,3.31-2.91h1.49a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,786.34,851.42Z"/>
+      <path class="cls-3" d="M799.76,844.84h1.56v2.59c.42-1.73,1.63-2.66,3.67-2.66h.27v1.56h-.33c-2.14,0-3.5,1.41-3.5,4.25v5.52h-1.67Z"/>
+      <path class="cls-3" d="M807.35,851.46v-1.8c0-3.06,1.74-5.09,4.9-5.09s4.78,2.14,4.78,4.91v1.67H809v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.5a4.45,4.45,0,0,1-4.6,3.46A4.65,4.65,0,0,1,807.35,851.46Zm1.6-1.58h6.52v-.53c0-2-1-3.5-3.22-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M828.2,856.1v-1.4a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.5-4.27-3.54s1.79-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.4v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.83,2.83,0,0,0-2.81,1.7H821a4.26,4.26,0,0,1,4.37-3c2.8,0,4.47,1.83,4.47,4v7.48Zm-.06-3.3c0-1.4-1.67-2.39-3.37-2.39-1.87,0-3.12.88-3.12,2.44s1.3,2.44,3,2.44S828.14,854.26,828.14,852.8Z"/>
+      <path class="cls-3" d="M833.26,852.83v-.11h1.52v.06c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.57,1.57,0,0,0-1.41-1.56l-3.34-.77c-1.67-.38-2.64-1.39-2.64-2.86,0-2.12,1.78-3.22,4.27-3.22S842,846,842,848.09v.12h-1.52v-.07c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.37,1.37,0,0,0,1.19,1.46l3.45.81c1.74.42,2.77,1.28,2.77,2.9,0,2-1.93,3.46-4.57,3.46S833.26,855,833.26,852.83Z"/>
+      <path class="cls-3" d="M845.36,851.46v-1.8c0-3.06,1.74-5.09,4.91-5.09s4.77,2.14,4.77,4.91v1.67H847v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.49a4.44,4.44,0,0,1-4.59,3.46A4.65,4.65,0,0,1,845.36,851.46Zm1.61-1.58h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M858.12,852.83v-.11h1.52v.06c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.58,1.58,0,0,0-1.41-1.56l-3.34-.77c-1.67-.38-2.64-1.39-2.64-2.86,0-2.12,1.78-3.22,4.27-3.22s4.18,1.43,4.18,3.52v.12h-1.52v-.07c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.37,1.37,0,0,0,1.19,1.46l3.45.81c1.74.42,2.77,1.28,2.77,2.9,0,2-1.93,3.46-4.57,3.46S858.12,855,858.12,852.83Z"/>
+      <path class="cls-3" d="M875.63,852.83v-.11h1.52v.06c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.57,1.57,0,0,0-1.4-1.56l-3.35-.77c-1.67-.38-2.64-1.39-2.64-2.86,0-2.12,1.78-3.22,4.27-3.22s4.18,1.43,4.18,3.52v.12h-1.52v-.07c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.37,1.37,0,0,0,1.19,1.46l3.45.81c1.74.42,2.78,1.28,2.78,2.9,0,2-1.94,3.46-4.58,3.46S875.63,855,875.63,852.83Z"/>
+      <path class="cls-3" d="M889.23,840.86a1,1,0,1,1-1.06,1A1,1,0,0,1,889.23,840.86Zm-.84,4h1.68V856.1h-1.68Z"/>
+      <path class="cls-3" d="M893.94,857.31h1.65a3.36,3.36,0,0,0,3.23,1.57c2,0,3.3-1.1,3.3-2.82v-2.55a4,4,0,0,1-3.78,2.13,4.39,4.39,0,0,1-4.6-4.6v-1.87a4.36,4.36,0,0,1,4.62-4.6,4.07,4.07,0,0,1,3.83,2.14v-1.87h1.61V856.1c0,2.34-2.12,4.1-5,4.1C896.58,860.2,894.55,859.16,893.94,857.31Zm8.18-6.44v-1.45a3.39,3.39,0,0,0-3.39-3.48,3,3,0,0,0-3.34,3.37v1.6c0,2.11,1.12,3.37,3.28,3.37A3.28,3.28,0,0,0,902.12,850.87Z"/>
+      <path class="cls-3" d="M907.91,844.84h1.58v1.91a3.79,3.79,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.43v7.1h-1.67v-6.86c0-1.92-.86-3.21-2.88-3.21a3.1,3.1,0,0,0-3.26,3.45v6.62h-1.67Z"/>
+      <path class="cls-3" d="M922.34,840.86a1,1,0,1,1-1.06,1A1,1,0,0,1,922.34,840.86Zm-.84,4h1.68V856.1H921.5Z"/>
+      <path class="cls-3" d="M928.19,846.2h-1.65v-1.36h1.65v-1.5c0-2.18,1-3.3,3.26-3.3H933v1.37h-1.45c-1.19,0-1.72.55-1.72,1.8v1.63h2.78v1.36h-2.78v9.9h-1.67Zm7.9-5.34a1,1,0,1,1-1.06,1A1,1,0,0,1,936.09,840.86Zm-.84,4h1.68V856.1h-1.68Z"/>
+      <path class="cls-3" d="M940.6,851.42v-1.89a4.7,4.7,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.49a3.13,3.13,0,0,0-3.3-2.91c-2.32,0-3.39,1.5-3.39,3.61v1.94c0,2.11,1.07,3.61,3.39,3.61a3.13,3.13,0,0,0,3.3-2.91h1.49a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,940.6,851.42Z"/>
+      <path class="cls-3" d="M961.65,856.1v-1.4a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.5-4.27-3.54s1.78-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.4v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.83,2.83,0,0,0-2.81,1.7h-1.54a4.26,4.26,0,0,1,4.37-3c2.8,0,4.47,1.83,4.47,4v7.48Zm-.06-3.3c0-1.4-1.67-2.39-3.37-2.39-1.87,0-3.12.88-3.12,2.44s1.29,2.44,3,2.44S961.59,854.26,961.59,852.8Z"/>
+      <path class="cls-3" d="M967.24,844.84h1.58v1.91a3.79,3.79,0,0,1,3.77-2.18c2.53,0,4.13,1.72,4.13,4.43v7.1h-1.67v-6.86c0-1.92-.86-3.21-2.88-3.21a3.1,3.1,0,0,0-3.26,3.45v6.62h-1.67Z"/>
+      <path class="cls-3" d="M984.6,856.1c-2.16,0-3.06-1.07-3.06-3V846.2h-1.91v-1.36h1.91V841.6h1.67v3.24h2.89v1.36h-2.89v6.91c0,1.15.42,1.63,1.68,1.63H986v1.36Z"/>
+      <path class="cls-3" d="M989.48,840h1.67V856.1h-1.67Z"/>
+      <path class="cls-3" d="M996.94,859.93l1.43-3.83-4.2-11.26h1.65l3.43,9.57,3.3-9.57h1.65L1000,856.1l-1.47,3.83Z"/>
+    </g>
+    <path id="m728421d6d4" class="cls-4" d="M300,298.55h20"/>
+    <path class="cls-4" d="M320,50.23V740h829.89"/>
+    <g>
+      <path class="cls-3" d="M263.22,292.39,260,293.91v-1.54l3.5-1.72h1.41v15.4h-1.69Z"/>
+      <path class="cls-3" d="M269.12,301.23v-5.76c0-2.93,2-5.13,5.39-5.13s5.39,2.2,5.39,5.13v5.76c0,2.93-2,5.13-5.39,5.13S269.12,304.16,269.12,301.23Zm9.09.07v-5.9a3.7,3.7,0,0,0-7.4,0v5.9a3.7,3.7,0,0,0,7.4,0Z"/>
+      <path class="cls-3" d="M282.93,296.52h.94a2.06,2.06,0,0,0,2.1,1.58,2.13,2.13,0,0,0,2.27-2.21,2.1,2.1,0,0,0-2.14-2.18,2.46,2.46,0,0,0-2,1.05h-.94l.85-5h4.48v.81h-3.7l-.56,3.22a2.79,2.79,0,0,1,2.1-.87,2.87,2.87,0,0,1,2.91,3,3.07,3.07,0,0,1-3.23,3.05A2.94,2.94,0,0,1,282.93,296.52Z"/>
+    </g>
+    <path id="m728421d6d4-2" data-name="m728421d6d4" class="cls-4" d="M300,210.22h20"/>
+    <g>
+      <path class="cls-3" d="M263.52,204.05l-3.21,1.52V204l3.5-1.71h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M269.42,212.9v-5.77c0-2.92,2-5.12,5.4-5.12s5.39,2.2,5.39,5.12v5.77c0,2.93-2,5.13-5.39,5.13S269.42,215.83,269.42,212.9Zm9.1.07v-5.9a3.7,3.7,0,0,0-7.4,0V213a3.7,3.7,0,0,0,7.4,0Z"/>
+      <path class="cls-3" d="M287.29,201.41l-2.6,4v0a2.21,2.21,0,0,1,1.81-.8,2.8,2.8,0,0,1,2.82,2.95,3.06,3.06,0,0,1-3.13,3,3,3,0,0,1-3.13-3,4.94,4.94,0,0,1,.95-2.64l2.23-3.53Zm-1.11,8.38a2.12,2.12,0,0,0,2.18-2.22,2.07,2.07,0,0,0-2.13-2.22,2.17,2.17,0,0,0-2.21,2.24A2.07,2.07,0,0,0,286.18,209.79Z"/>
+    </g>
+    <path id="m728421d6d4-3" data-name="m728421d6d4" class="cls-4" d="M300,121.88h20"/>
+    <g>
+      <path class="cls-3" d="M264.05,115.72l-3.21,1.52V115.7l3.5-1.72h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M270,124.56V118.8c0-2.93,2-5.13,5.39-5.13s5.4,2.2,5.4,5.13v5.76c0,2.93-2,5.13-5.4,5.13S270,127.49,270,124.56Zm9.09.07v-5.9a3.7,3.7,0,0,0-7.39,0v5.9a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M288.28,113.88h-4.62v-.81h5.63v.7l-4,8.29h-1Z"/>
+    </g>
+    <path id="m728421d6d4-4" data-name="m728421d6d4" class="cls-4" d="M300,386.89h20"/>
+    <g>
+      <path class="cls-3" d="M263.37,380.72l-3.21,1.52V380.7l3.5-1.71h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M269.27,389.57V383.8c0-2.92,2-5.12,5.39-5.12s5.4,2.2,5.4,5.12v5.77c0,2.93-2,5.13-5.4,5.13S269.27,392.5,269.27,389.57Zm9.09.07v-5.9a3.7,3.7,0,0,0-7.4,0v5.9a3.7,3.7,0,0,0,7.4,0Z"/>
+      <path class="cls-3" d="M287.19,384.92h-4.26v-.73l3-6.11h.9l-2.93,6.09h3.3V382h.91v2.13h1.25v.75H288.1v2.14h-.91Z"/>
+    </g>
+    <path id="m728421d6d4-5" data-name="m728421d6d4" class="cls-4" d="M300,475.22h20"/>
+    <g>
+      <path class="cls-3" d="M263.42,469.06l-3.21,1.52V469l3.5-1.72h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M269.32,477.9v-5.76c0-2.93,2-5.13,5.4-5.13s5.39,2.2,5.39,5.13v5.76c0,2.93-2,5.13-5.39,5.13S269.32,480.83,269.32,477.9Zm9.09.07v-5.9a3.7,3.7,0,0,0-7.39,0V478a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M283.61,466.41h5.23v.81L286,470.37h.35a2.55,2.55,0,0,1,2.82,2.52,2.88,2.88,0,0,1-3.12,2.69,2.81,2.81,0,0,1-3-2.63h.91a2,2,0,0,0,2.13,1.83,1.93,1.93,0,0,0,2.15-1.86,1.84,1.84,0,0,0-2.08-1.82h-1.22v-.77l2.77-3.12h-4Z"/>
+    </g>
+    <path id="m728421d6d4-6" data-name="m728421d6d4" class="cls-4" d="M300,563.56h20"/>
+    <g>
+      <path class="cls-3" d="M263.56,557.39l-3.21,1.52v-1.54l3.5-1.71h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M269.46,566.24v-5.77c0-2.92,2-5.12,5.4-5.12s5.39,2.2,5.39,5.12v5.77c0,2.93-2,5.13-5.39,5.13S269.46,569.17,269.46,566.24Zm9.1.07v-5.9a3.7,3.7,0,0,0-7.4,0v5.9a3.7,3.7,0,0,0,7.4,0Z"/>
+      <path class="cls-3" d="M283.24,563l3.8-4a2.86,2.86,0,0,0,1-1.94,1.68,1.68,0,0,0-1.84-1.65,1.71,1.71,0,0,0-1.89,1.81v.2h-.93v-.24A2.84,2.84,0,0,1,289,557a4,4,0,0,1-1.47,2.66l-3.17,3.23H289v.81h-5.8Z"/>
+    </g>
+    <path id="m728421d6d4-7" data-name="m728421d6d4" class="cls-4" d="M300,651.89h20"/>
+    <g>
+      <path class="cls-3" d="M266.73,645.73l-3.21,1.52v-1.54L267,644h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M272.63,654.57v-5.76c0-2.93,2-5.13,5.4-5.13s5.39,2.2,5.39,5.13v5.76c0,2.93-2,5.13-5.39,5.13S272.63,657.5,272.63,654.57Zm9.1.07v-5.9a3.7,3.7,0,0,0-7.4,0v5.9a3.7,3.7,0,0,0,7.4,0Z"/>
+      <path class="cls-3" d="M287.78,644.1l-1.87.88v-.89l2-1h.82v9h-1Z"/>
+    </g>
+    <path id="m728421d6d4-8" data-name="m728421d6d4" class="cls-4" d="M300,740h20"/>
+    <g>
+      <path class="cls-3" d="M263.14,733.84l-3.22,1.51v-1.54l3.51-1.71h1.4v15.4h-1.69Z"/>
+      <path class="cls-3" d="M269,742.68v-5.76c0-2.93,2-5.13,5.39-5.13s5.4,2.2,5.4,5.13v5.76c0,2.93-2,5.13-5.4,5.13S269,745.61,269,742.68Zm9.09.07v-5.9a3.7,3.7,0,0,0-7.4,0v5.9a3.7,3.7,0,0,0,7.4,0Z"/>
+      <path class="cls-3" d="M282.81,737.36V734a3.15,3.15,0,0,1,6.29,0v3.36a3.15,3.15,0,0,1-6.29,0Zm5.3,0V734a2.16,2.16,0,0,0-4.31,0v3.44a2.16,2.16,0,0,0,4.31,0Z"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M348.05,776l-3.22,1.52V776l3.5-1.72h1.41v15.4h-1.69Z"/>
+      <path class="cls-3" d="M353.94,784.89v-5.77c0-2.93,2-5.13,5.39-5.13s5.39,2.2,5.39,5.13v5.77c0,2.92-2,5.12-5.39,5.12S353.94,787.81,353.94,784.89ZM363,785v-5.9a3.7,3.7,0,0,0-7.39,0V785a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M369.08,774.41l-1.87.88v-.89l2-1h.82v9h-1Z"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M472.22,776,469,777.56V776l3.49-1.72h1.41v15.4h-1.69Z"/>
+      <path class="cls-3" d="M478.11,784.89v-5.77c0-2.93,2-5.13,5.39-5.13s5.4,2.2,5.4,5.13v5.77c0,2.92-2,5.12-5.4,5.12S478.11,787.81,478.11,784.89Zm9.09.06v-5.9a3.7,3.7,0,0,0-7.39,0V785a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M491.88,781.61l3.8-4a2.85,2.85,0,0,0,1-1.94,1.68,1.68,0,0,0-1.83-1.66,1.71,1.71,0,0,0-1.89,1.81V776H492v-.24a2.84,2.84,0,0,1,5.65-.12,4,4,0,0,1-1.48,2.66L493,781.57h4.66v.81h-5.8Z"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M597.9,776l-3.21,1.52V776l3.5-1.72h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M603.8,784.89v-5.77c0-2.93,2-5.13,5.39-5.13s5.39,2.2,5.39,5.13v5.77c0,2.92-2,5.12-5.39,5.12S603.8,787.81,603.8,784.89Zm9.08.06v-5.9a3.7,3.7,0,0,0-7.39,0V785a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M618.08,773.4h5.23v.8l-2.87,3.15h.35a2.55,2.55,0,0,1,2.82,2.53,2.88,2.88,0,0,1-3.12,2.68,2.81,2.81,0,0,1-3-2.63h.91a2,2,0,0,0,2.13,1.83,1.93,1.93,0,0,0,2.15-1.86,1.84,1.84,0,0,0-2.08-1.82h-1.22v-.77l2.77-3.12h-4Z"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M723.63,776l-3.21,1.52V776l3.5-1.72h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M729.53,784.89v-5.77c0-2.93,2-5.13,5.39-5.13s5.39,2.2,5.39,5.13v5.77c0,2.92-2,5.12-5.39,5.12S729.53,787.81,729.53,784.89Zm9.08.06v-5.9a3.7,3.7,0,0,0-7.39,0V785a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M747.44,780.24h-4.26v-.74l3-6.1h.9l-2.93,6.09h3.3v-2.13h.91v2.13h1.24v.75h-1.24v2.14h-.91Z"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M849.31,776l-3.21,1.52V776l3.49-1.72H851v15.4h-1.69Z"/>
+      <path class="cls-3" d="M855.2,784.89v-5.77c0-2.93,2-5.13,5.39-5.13s5.4,2.2,5.4,5.13v5.77c0,2.92-2,5.12-5.4,5.12S855.2,787.81,855.2,784.89Zm9.09.06v-5.9a3.7,3.7,0,0,0-7.39,0V785a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M869,780.17H870a2.06,2.06,0,0,0,2.1,1.58,2.13,2.13,0,0,0,2.27-2.21,2.11,2.11,0,0,0-2.14-2.18,2.49,2.49,0,0,0-2,1h-.94l.85-5h4.48v.8h-3.7l-.56,3.22a2.79,2.79,0,0,1,2.1-.87,2.87,2.87,0,0,1,2.92,3,3.07,3.07,0,0,1-3.24,3A2.93,2.93,0,0,1,869,780.17Z"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M975.22,776,972,777.56V776l3.5-1.72h1.41v15.4h-1.69Z"/>
+      <path class="cls-3" d="M981.11,784.89v-5.77c0-2.93,2-5.13,5.39-5.13s5.39,2.2,5.39,5.13v5.77c0,2.92-2,5.12-5.39,5.12S981.11,787.81,981.11,784.89Zm9.09.06v-5.9a3.7,3.7,0,0,0-7.39,0V785a3.7,3.7,0,0,0,7.39,0Z"/>
+      <path class="cls-3" d="M999,773.4l-2.6,4h0a2.23,2.23,0,0,1,1.81-.79,2.8,2.8,0,0,1,2.82,2.95,3.06,3.06,0,0,1-3.13,3,3,3,0,0,1-3.13-3,4.94,4.94,0,0,1,1-2.65l2.23-3.52Zm-1.11,8.38a2.12,2.12,0,0,0,2.18-2.23,2.08,2.08,0,0,0-2.13-2.22,2.18,2.18,0,0,0-2.21,2.25A2.07,2.07,0,0,0,997.86,781.78Z"/>
+    </g>
+    <g>
+      <path class="cls-3" d="M1101.23,776l-3.21,1.52V776l3.5-1.72h1.41v15.4h-1.7Z"/>
+      <path class="cls-3" d="M1107.13,784.89v-5.77c0-2.93,2-5.13,5.39-5.13s5.39,2.2,5.39,5.13v5.77c0,2.92-2,5.12-5.39,5.12S1107.13,787.81,1107.13,784.89Zm9.09.06v-5.9a3.7,3.7,0,0,0-7.4,0V785a3.7,3.7,0,0,0,7.4,0Z"/>
+      <path class="cls-3" d="M1125.45,774.2h-4.62v-.8h5.64v.69l-4,8.29h-1Z"/>
+    </g>
+    <path id="m93b0483c22" class="cls-4" d="M357.73,759.7v-20"/>
+    <path id="m93b0483c22-2" data-name="m93b0483c22" class="cls-4" d="M483.48,759.7v-20"/>
+    <path id="m93b0483c22-3" data-name="m93b0483c22" class="cls-4" d="M609.24,759.7v-20"/>
+    <path id="m93b0483c22-4" data-name="m93b0483c22" class="cls-4" d="M735,759.7v-20"/>
+    <path id="m93b0483c22-5" data-name="m93b0483c22" class="cls-4" d="M860.75,759.7v-20"/>
+    <path id="m93b0483c22-6" data-name="m93b0483c22" class="cls-4" d="M986.5,759.7v-20"/>
+    <path id="m93b0483c22-7" data-name="m93b0483c22" class="cls-4" d="M1112.25,759.7v-20"/>
+    <line class="cls-5" x1="320" y1="900" x2="1150" y2="900"/>
+    <g>
+      <path class="cls-3" d="M360.07,936.68c-.06-2.16-1.65-3.94-4.42-3.94-3.17,0-4.6,2.18-4.6,5.26v2.55c0,3,1.37,5.39,4.71,5.39,2.75,0,4.45-1.56,4.45-4.53V941h-4.32v-1.43h6v1.63c0,3.54-2.17,6.12-6.11,6.12-4.29,0-6.41-3.06-6.41-6.82v-2.44c0-3.94,2.23-6.76,6.3-6.76,3.76,0,6,2.33,6.07,5.37Z"/>
+      <path class="cls-3" d="M365.18,942.38v-1.81c0-3.06,1.73-5.08,4.9-5.08s4.78,2.14,4.78,4.91v1.67h-8.08v.4c0,1.95,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.5a4.46,4.46,0,0,1-4.6,3.45A4.64,4.64,0,0,1,365.18,942.38Zm1.6-1.59h6.51v-.52c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+      <path class="cls-3" d="M378.46,935.76h1.59v1.91a3.76,3.76,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.42V947h-1.68v-6.86c0-1.92-.85-3.22-2.88-3.22a3.1,3.1,0,0,0-3.26,3.46V947h-1.67Z"/>
+      <path class="cls-3" d="M391.4,942.38v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.14,4.77,4.91v1.67H393v.4c0,1.95,1,3.54,3.36,3.54a3,3,0,0,0,3.1-2.18H401a4.44,4.44,0,0,1-4.6,3.45A4.65,4.65,0,0,1,391.4,942.38Zm1.61-1.59h6.51v-.52c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+      <path class="cls-3" d="M404.69,935.76h1.56v2.59c.42-1.74,1.63-2.66,3.67-2.66h.27v1.56h-.33c-2.14,0-3.5,1.41-3.5,4.25V947h-1.67Z"/>
+      <path class="cls-3" d="M420,947v-1.41a4.77,4.77,0,0,1-3.87,1.67c-2.67,0-4.27-1.49-4.27-3.54s1.78-3.52,4.46-3.52a4.92,4.92,0,0,1,3.61,1.41v-2.09a2.61,2.61,0,0,0-2.81-2.73,2.85,2.85,0,0,0-2.82,1.7h-1.54a4.26,4.26,0,0,1,4.38-3c2.79,0,4.46,1.83,4.46,4V947Zm-.07-3.3c0-1.41-1.67-2.4-3.36-2.4-1.88,0-3.13.88-3.13,2.44s1.3,2.45,3,2.45S419.91,945.17,419.91,943.72Z"/>
+      <path class="cls-3" d="M429.39,947c-2.15,0-3.06-1.08-3.06-3v-6.91h-1.91v-1.36h1.91v-3.24H428v3.24h2.88v1.36H428V944c0,1.14.41,1.63,1.67,1.63h1.12V947Z"/>
+      <path class="cls-3" d="M435.11,931.77a1,1,0,1,1-1.05,1A1,1,0,0,1,435.11,931.77Zm-.83,4H436V947h-1.67Z"/>
+      <path class="cls-3" d="M444.86,947H443.1L439,935.76h1.65l3.37,9.59,3.36-9.59H449Z"/>
+      <path class="cls-3" d="M451.08,942.38v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,1.95,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.5a4.46,4.46,0,0,1-4.6,3.45A4.65,4.65,0,0,1,451.08,942.38Zm1.61-1.59h6.51v-.52c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+      <path class="cls-3" d="M483.36,947h-1.63v-12.1l-4.25,8.38h-1.62l-4.25-8.38V947H470v-15.4h1.61l5.08,10.16,5.08-10.16h1.61Z"/>
+      <path class="cls-3" d="M487.14,942.31v-1.85a5,5,0,0,1,10.06,0v1.85a5,5,0,0,1-10.06,0Zm5-5.48c-2.22,0-3.41,1.52-3.41,3.66v1.8c0,2.13,1.19,3.65,3.41,3.65s3.41-1.52,3.41-3.65v-1.8C495.59,938.35,494.4,936.83,492.18,936.83Z"/>
+      <path class="cls-3" d="M510.31,931V947h-1.59v-1.76a4.19,4.19,0,0,1-3.8,2c-3,0-4.71-2.22-4.71-5v-1.72c0-2.93,1.83-5,4.66-5a4.23,4.23,0,0,1,3.77,1.94V931Zm-8.43,11.22c0,2.07,1,3.74,3.32,3.74s3.44-1.69,3.44-3.87V940.6a3.49,3.49,0,0,0-3.46-3.74c-2.13,0-3.3,1.67-3.3,3.74Z"/>
+      <path class="cls-3" d="M513.87,942.38v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,1.95,1,3.54,3.36,3.54a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.45A4.65,4.65,0,0,1,513.87,942.38Zm1.61-1.59H522v-.52c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+      <path class="cls-3" d="M527.27,931h1.67V947h-1.67Z"/>
+      <path class="cls-3" d="M532.64,943.74v-.11h1.51v.07c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.58,1.58,0,0,0-1.41-1.57l-3.35-.77c-1.67-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.27-3.21s4.18,1.43,4.18,3.52v.11h-1.52v-.06c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.37,1.37,0,0,0,1.19,1.45l3.45.82c1.74.41,2.78,1.27,2.78,2.9,0,2-1.94,3.45-4.58,3.45S532.64,945.92,532.64,943.74Z"/>
+    </g>
+    <circle class="cls-6" cx="330" cy="939.56" r="10"/>
+    <g>
+      <g>
+        <path class="cls-3" d="M780.76,947V945.7h3.8V932.94H780.8v-1.32H790v1.32h-3.76V945.7h3.81V947Z"/>
+        <path class="cls-3" d="M794,935.76h1.59v1.91a3.69,3.69,0,0,1,3.71-2.18,3.79,3.79,0,0,1,3.59,2.2,4.48,4.48,0,0,1,4.09-2.2c2.47,0,4,1.76,4,4.42V947h-1.67v-6.86c0-1.92-1-3.22-2.79-3.22s-3.22,1.13-3.22,3.46V947h-1.67v-6.86c0-1.92-.94-3.22-2.79-3.22s-3.22,1.13-3.22,3.46V947H794Z"/>
+        <path class="cls-3" d="M822.67,947v-1.41a4.76,4.76,0,0,1-3.87,1.67c-2.67,0-4.27-1.49-4.27-3.54s1.78-3.52,4.47-3.52a4.92,4.92,0,0,1,3.6,1.41v-2.09a2.61,2.61,0,0,0-2.81-2.73,2.85,2.85,0,0,0-2.82,1.7h-1.54a4.26,4.26,0,0,1,4.38-3c2.79,0,4.47,1.83,4.47,4V947Zm-.07-3.3c0-1.41-1.67-2.4-3.36-2.4-1.87,0-3.13.88-3.13,2.44s1.3,2.45,3,2.45S822.6,945.17,822.6,943.72Z"/>
+        <path class="cls-3" d="M827.9,948.23h1.65a3.38,3.38,0,0,0,3.24,1.56c2,0,3.3-1.1,3.3-2.81v-2.56a4,4,0,0,1-3.79,2.14,4.38,4.38,0,0,1-4.59-4.6v-1.87a4.36,4.36,0,0,1,4.62-4.6,4.06,4.06,0,0,1,3.83,2.14v-1.87h1.6V947c0,2.33-2.11,4.09-5,4.09C830.54,951.11,828.52,950.08,827.9,948.23Zm8.19-6.45v-1.45a3.39,3.39,0,0,0-3.39-3.47,3,3,0,0,0-3.34,3.36v1.61c0,2.11,1.12,3.36,3.27,3.36A3.28,3.28,0,0,0,836.09,941.78Z"/>
+        <path class="cls-3" d="M841.32,942.38v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,1.95,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.49a4.44,4.44,0,0,1-4.59,3.45A4.65,4.65,0,0,1,841.32,942.38Zm1.61-1.59h6.51v-.52c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+        <path class="cls-3" d="M859.76,940.62V938c0-3.83,2.16-6.71,6.4-6.71a5.83,5.83,0,0,1,6.07,5.39h-1.65a4.23,4.23,0,0,0-4.44-4c-3.15,0-4.66,2.2-4.66,5.22v2.72c0,3,1.51,5.22,4.66,5.22a4.23,4.23,0,0,0,4.44-4h1.65a5.83,5.83,0,0,1-6.07,5.39C861.92,947.33,859.76,944.45,859.76,940.62Z"/>
+        <path class="cls-3" d="M876.08,931h1.67V947h-1.67Z"/>
+        <path class="cls-3" d="M889.61,947v-1.41a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.49-4.27-3.54s1.78-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.41v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.83,2.83,0,0,0-2.81,1.7h-1.54a4.25,4.25,0,0,1,4.37-3c2.8,0,4.47,1.83,4.47,4V947Zm-.06-3.3c0-1.41-1.67-2.4-3.37-2.4-1.87,0-3.12.88-3.12,2.44s1.29,2.45,3,2.45S889.55,945.17,889.55,943.72Z"/>
+        <path class="cls-3" d="M894.67,943.74v-.11h1.52v.07c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.58,1.58,0,0,0-1.41-1.57l-3.34-.77c-1.67-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.27-3.21s4.18,1.43,4.18,3.52v.11h-1.52v-.06c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.36,1.36,0,0,0,1.19,1.45l3.45.82c1.74.41,2.77,1.27,2.77,2.9,0,2-1.93,3.45-4.57,3.45S894.67,945.92,894.67,943.74Z"/>
+        <path class="cls-3" d="M906.79,943.74v-.11h1.52v.07c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.57,1.57,0,0,0-1.4-1.57l-3.35-.77c-1.67-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.27-3.21s4.18,1.43,4.18,3.52v.11H914v-.06c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.36,1.36,0,0,0,1.19,1.45l3.45.82c1.74.41,2.78,1.27,2.78,2.9,0,2-1.94,3.45-4.58,3.45S906.79,945.92,906.79,943.74Z"/>
+        <path class="cls-3" d="M920.39,931.77a1,1,0,1,1-1.06,1A1,1,0,0,1,920.39,931.77Zm-.84,4h1.68V947h-1.68Z"/>
+        <path class="cls-3" d="M926.24,937.12h-1.65v-1.36h1.65v-1.5c0-2.18,1-3.3,3.26-3.3h1.58v1.36h-1.45c-1.19,0-1.72.55-1.72,1.81v1.63h2.78v1.36h-2.78V947h-1.67Zm7.9-5.35a1,1,0,1,1-1.06,1A1,1,0,0,1,934.14,931.77Zm-.84,4H935V947H933.3Z"/>
+        <path class="cls-3" d="M938.65,942.38v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,1.95,1,3.54,3.36,3.54a3,3,0,0,0,3.1-2.18h1.5a4.44,4.44,0,0,1-4.6,3.45A4.65,4.65,0,0,1,938.65,942.38Zm1.61-1.59h6.51v-.52c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+        <path class="cls-3" d="M951.94,935.76h1.56v2.59c.42-1.74,1.63-2.66,3.67-2.66h.27v1.56h-.33c-2.14,0-3.5,1.41-3.5,4.25V947h-1.67Z"/>
+        <path class="cls-3" d="M959.61,943.74v-.11h1.52v.07c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13a1.57,1.57,0,0,0-1.4-1.57l-3.35-.77c-1.67-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.27-3.21s4.18,1.43,4.18,3.52v.11h-1.52v-.06c0-1.39-1-2.31-2.68-2.31s-2.71.66-2.71,1.91a1.36,1.36,0,0,0,1.19,1.45l3.45.82c1.74.41,2.78,1.27,2.78,2.9,0,2-1.94,3.45-4.58,3.45S959.61,945.92,959.61,943.74Z"/>
+      </g>
+      <circle class="cls-7" cx="761.38" cy="939.56" r="10"/>
+    </g>
+    <g>
+      <g>
+        <path class="cls-3" d="M349.8,972.06h6.36c3.38,0,5.17,2.18,5.17,4.91a4.69,4.69,0,0,1-3.33,4.6l3.35,5.89h-2l-3.12-5.63h-4.76v5.63H349.8Zm6.14,8.41a3.31,3.31,0,0,0,3.67-3.5,3.35,3.35,0,0,0-3.67-3.52h-4.43v7Z"/>
+        <path class="cls-3" d="M364.27,982.82V981c0-3,1.74-5.08,4.91-5.08s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.49a4.44,4.44,0,0,1-4.59,3.46A4.65,4.65,0,0,1,364.27,982.82Zm1.61-1.58h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+        <path class="cls-3" d="M378.51,972.21a1,1,0,1,1-1.06,1A1,1,0,0,1,378.51,972.21Zm-.84,4h1.67v11.26h-1.67Z"/>
+        <path class="cls-3" d="M383.57,976.2h1.58v1.91a3.77,3.77,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.42v7.11h-1.67V980.6c0-1.92-.86-3.22-2.88-3.22a3.1,3.1,0,0,0-3.26,3.46v6.62h-1.67Z"/>
+        <path class="cls-3" d="M397.84,977.56h-1.65V976.2h1.65v-1.5c0-2.18,1-3.3,3.26-3.3h1.59v1.36h-1.46c-1.19,0-1.71.55-1.71,1.81v1.63h2.77v1.36h-2.77v9.9h-1.68Z"/>
+        <path class="cls-3" d="M404.18,982.75v-1.84a5,5,0,0,1,10.06,0v1.84a5,5,0,0,1-10.06,0Zm5-5.48c-2.22,0-3.41,1.52-3.41,3.66v1.8c0,2.14,1.19,3.65,3.41,3.65s3.41-1.51,3.41-3.65v-1.8C412.63,978.79,411.44,977.27,409.22,977.27Z"/>
+        <path class="cls-3" d="M417.8,976.2h1.56v2.59c.42-1.74,1.63-2.66,3.67-2.66h.27v1.56H423c-2.14,0-3.5,1.41-3.5,4.25v5.52H417.8Z"/>
+        <path class="cls-3" d="M425.39,982.78v-1.9a4.7,4.7,0,0,1,5-5,4.5,4.5,0,0,1,4.82,4.23h-1.49a3.13,3.13,0,0,0-3.31-2.91c-2.31,0-3.38,1.5-3.38,3.61v1.94c0,2.11,1.07,3.61,3.38,3.61a3.13,3.13,0,0,0,3.31-2.91h1.49a4.5,4.5,0,0,1-4.82,4.23A4.7,4.7,0,0,1,425.39,982.78Z"/>
+        <path class="cls-3" d="M438.26,982.82V981c0-3,1.74-5.08,4.9-5.08s4.78,2.14,4.78,4.91v1.67h-8.08v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.5a4.45,4.45,0,0,1-4.6,3.46A4.65,4.65,0,0,1,438.26,982.82Zm1.6-1.58h6.52v-.53c0-2-1-3.5-3.22-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+        <path class="cls-3" d="M451.55,976.2h1.58v1.91a3.71,3.71,0,0,1,3.72-2.18,3.8,3.8,0,0,1,3.59,2.2,4.47,4.47,0,0,1,4.09-2.2c2.46,0,4.05,1.76,4.05,4.42v7.11H466.9V980.6c0-1.92-.94-3.22-2.79-3.22s-3.21,1.13-3.21,3.46v6.62h-1.68V980.6c0-1.92-.94-3.22-2.79-3.22s-3.21,1.13-3.21,3.46v6.62h-1.67Z"/>
+        <path class="cls-3" d="M472,982.82V981c0-3,1.74-5.08,4.9-5.08s4.78,2.14,4.78,4.91v1.67h-8.08v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.5a4.45,4.45,0,0,1-4.6,3.46A4.65,4.65,0,0,1,472,982.82Zm1.6-1.58h6.52v-.53c0-2-1-3.5-3.22-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+        <path class="cls-3" d="M485.32,976.2h1.58v1.91a3.76,3.76,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.42v7.11h-1.67V980.6c0-1.92-.86-3.22-2.89-3.22a3.1,3.1,0,0,0-3.25,3.46v6.62h-1.67Z"/>
+        <path class="cls-3" d="M502.67,987.46c-2.15,0-3.05-1.08-3.05-3v-6.91H497.7V976.2h1.92V973h1.67v3.24h2.88v1.36h-2.88v6.91c0,1.14.42,1.63,1.67,1.63h1.12v1.36Z"/>
+        <path class="cls-3" d="M513.06,972.06h1.71v14h7.9v1.43h-9.61Z"/>
+        <path class="cls-3" d="M524.94,982.82V981c0-3,1.73-5.08,4.9-5.08s4.78,2.14,4.78,4.91v1.67h-8.08v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.5a4.45,4.45,0,0,1-4.6,3.46A4.65,4.65,0,0,1,524.94,982.82Zm1.6-1.58h6.52v-.53c0-2-1-3.5-3.22-3.5-2.42,0-3.3,1.6-3.3,3.67Z"/>
+        <path class="cls-3" d="M545.79,987.46v-1.41a4.73,4.73,0,0,1-3.87,1.68c-2.66,0-4.27-1.5-4.27-3.55s1.78-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.41V980a2.62,2.62,0,0,0-2.82-2.73,2.86,2.86,0,0,0-2.82,1.7h-1.54a4.28,4.28,0,0,1,4.38-3c2.8,0,4.47,1.83,4.47,4.05v7.48Zm-.06-3.3c0-1.41-1.68-2.4-3.37-2.4-1.87,0-3.12.88-3.12,2.45s1.29,2.44,3,2.44S545.73,985.61,545.73,984.16Z"/>
+        <path class="cls-3" d="M551.38,976.2h1.56v2.59c.42-1.74,1.63-2.66,3.68-2.66h.26v1.56h-.33c-2.13,0-3.5,1.41-3.5,4.25v5.52h-1.67Z"/>
+        <path class="cls-3" d="M559.7,976.2h1.58v1.91a3.76,3.76,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.42v7.11h-1.67V980.6c0-1.92-.86-3.22-2.89-3.22a3.1,3.1,0,0,0-3.25,3.46v6.62H559.7Z"/>
+        <path class="cls-3" d="M574.13,972.21a1,1,0,1,1-1.06,1A1,1,0,0,1,574.13,972.21Zm-.84,4H575v11.26h-1.67Z"/>
+        <path class="cls-3" d="M579.19,976.2h1.58v1.91a3.77,3.77,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.42v7.11H587V980.6c0-1.92-.86-3.22-2.88-3.22a3.1,3.1,0,0,0-3.26,3.46v6.62h-1.67Z"/>
+        <path class="cls-3" d="M592.32,988.67H594a3.38,3.38,0,0,0,3.24,1.56c2,0,3.3-1.1,3.3-2.81v-2.55a4.06,4.06,0,0,1-3.79,2.13,4.39,4.39,0,0,1-4.6-4.6v-1.87a4.36,4.36,0,0,1,4.62-4.6,4.06,4.06,0,0,1,3.83,2.14V976.2h1.61v11.26c0,2.33-2.11,4.09-5,4.09C595,991.55,592.94,990.52,592.32,988.67Zm8.19-6.44v-1.46a3.4,3.4,0,0,0-3.39-3.47,3,3,0,0,0-3.35,3.36v1.61c0,2.11,1.13,3.37,3.28,3.37A3.28,3.28,0,0,0,600.51,982.23Z"/>
+      </g>
+      <circle class="cls-8" cx="330" cy="980" r="10"/>
+    </g>
+  </g>
+  <g id="Layer_7" data-name="Layer 7">
+    <g class="cls-9">
+      <g>
+        <polyline class="cls-10" points="674.22 429.58 681.1 424.75 696.49 413.93 711.89 403.11 727.29 392.3 742.69 381.48 758.09 370.66 773.49 359.85 788.88 349.03 804.28 338.21 819.68 327.4 835.08 316.58 850.48 305.76 865.88 294.95 881.28 284.13 896.67 273.31 912.07 262.5 927.47 251.68 942.87 240.86 958.27 230.05 973.67 219.23 989.07 208.41 1004.47 197.6 1019.86 186.78 1035.26 175.97 1050.66 165.15 1066.06 154.33 1081.46 143.51 1096.86 132.7 1112.25 121.88"/>
+        <polyline class="cls-10" points="357.73 651.89 373.13 641.08 388.52 630.26 403.92 619.44 419.32 608.63 434.72 597.81 450.12 586.99 465.52 576.18 480.92 565.36 496.31 554.54 511.71 543.73 527.11 532.91 542.51 522.09 557.91 511.28 573.31 500.46 588.71 489.64 604.1 478.83 619.5 468.01 634.9 457.19"/>
+      </g>
+    </g>
+    <g>
+      <line class="cls-11" x1="675.31" y1="391.45" x2="524.4" y2="374.45"/>
+      <line class="cls-11" x1="778.68" y1="386.95" x2="684.77" y2="374.95"/>
+      <line class="cls-11" x1="776.76" y1="416.93" x2="693.59" y2="391.93"/>
+      <line class="cls-11" x1="777.19" y1="445.93" x2="691.63" y2="407.45"/>
+      <g>
+        <g>
+          <path class="cls-6" d="M537,657h-3.48l-5.1-15.4h3.14l3.7,12,3.69-12h3.15Z"/>
+          <path class="cls-6" d="M555.87,657h-3.06l-1-3.23h-6l-1,3.23H541.7l5.52-15.4h3.13Zm-9.31-5.67H551l-2.2-7Z"/>
+          <path class="cls-6" d="M558.62,641.59H569.4v2.55h-7.7v3.77h7v2.57h-7v4h7.72V657h-10.8Z"/>
+          <path class="cls-6" d="M583.46,639.79c-1.66,3.85-2.51,6.84-2.51,10.07s.85,6.23,2.51,10.08h-2.89a20.28,20.28,0,0,1,0-20.15Z"/>
+          <path class="cls-6" d="M585.15,652.53v-.4h3v.33c0,1.43,1.12,2.46,3.19,2.46,1.82,0,3-.81,3-2.24a1.68,1.68,0,0,0-1.45-1.74l-4-1c-2.33-.6-3.43-1.67-3.43-4,0-2.91,2.2-4.65,5.65-4.65s5.57,1.89,5.57,4.6v.38h-2.84V646c0-1.3-.83-2.29-2.73-2.29-1.62,0-2.68.66-2.68,2a1.54,1.54,0,0,0,1.32,1.63l4.23,1.12c2.26.6,3.36,1.94,3.36,4,0,2.92-2.62,4.86-6.09,4.86C587.55,657.3,585.15,655.54,585.15,652.53Z"/>
+          <path class="cls-6" d="M607.15,657h-3.48l-5.1-15.4h3.14l3.7,12,3.7-12h3.14Z"/>
+          <path class="cls-6" d="M615,641.59h3.08v6.32h6.32v-6.32h3.08V657h-3.08v-6.36h-6.32V657H615Z"/>
+          <path class="cls-6" d="M631.39,641.59h2.93L641,651.84V641.59h3V657h-2.92l-6.71-10.25V657h-3Z"/>
+          <path class="cls-6" d="M649.5,639.79a20.28,20.28,0,0,1,0,20.15h-2.88c1.65-3.85,2.5-6.84,2.5-10.08s-.85-6.22-2.5-10.07Z"/>
+        </g>
+        <path id="m5552310466" class="cls-6" d="M518.27,654.68a5,5,0,1,0-3.53-1.46A5,5,0,0,0,518.27,654.68Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M1061.41,251.76c4.13,0,6.31,2.55,6.31,6.05v3.3c0,3.5-2.18,6.05-6.31,6.05h-6.25v-15.4Zm3.25,6c0-2.25-1.3-3.53-3.78-3.53h-2.66v10.39h2.66c2.48,0,3.78-1.28,3.78-3.52Z"/>
+          <path class="cls-8" d="M1070.45,262.25v-1.63c0-2.92,2-5.17,5.39-5.17s5.39,2.25,5.39,5.17v1.63c0,2.93-2,5.17-5.39,5.17S1070.45,265.18,1070.45,262.25Zm7.87-1.82a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-8" d="M1084.81,263.9v-5.76h-1.63v-2.42h1.63v-3.13h3v3.13h2.44v2.42h-2.44v5.54c0,.75.28,1.06,1.08,1.06h1.21v2.42h-2.07A2.91,2.91,0,0,1,1084.81,263.9Z"/>
+          <path class="cls-8" d="M1092.53,263.86c0-2.49,1.87-3.63,4.62-3.63a4.61,4.61,0,0,1,2.91,1v-1.57a1.81,1.81,0,0,0-2-1.84,2.05,2.05,0,0,0-2,1.07h-2.77a4.7,4.7,0,0,1,4.9-3.41c2.86,0,4.82,1.74,4.82,4.4v7.31h-2.88V266.1a4.33,4.33,0,0,1-3.3,1.32C1094.29,267.42,1092.53,266.08,1092.53,263.86Zm7.53-.11c0-1-1-1.61-2.33-1.61s-2.29.57-2.29,1.63.94,1.67,2.2,1.67S1100.06,264.72,1100.06,263.75Z"/>
+          <path class="cls-8" d="M1114.18,254.84l-3.28,1.21v-2.69l3.81-1.6h2.51v15.4h-3Z"/>
+          <path class="cls-8" d="M1126.83,267.16h-3.12l-3.9-11.44h3l2.46,8.38,2.47-8.38h3Z"/>
+          <path class="cls-8" d="M1135.35,254.84l-3.28,1.21v-2.69l3.8-1.6h2.51v15.4h-3Z"/>
+        </g>
+        <path id="m5552310466-2" data-name="m5552310466" class="cls-8" d="M1043.77,264.85a5,5,0,1,0-3.54-1.46A5,5,0,0,0,1043.77,264.85Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M556.55,451.79H563c3.55,0,5.72,2.09,5.72,5.17s-2.26,5.26-5.72,5.26h-3.32v5h-3.08Zm6.2,7.9a2.56,2.56,0,0,0,2.84-2.73c0-1.52-.88-2.64-2.84-2.64h-3.12v5.37Z"/>
+          <path class="cls-8" d="M570.65,462.29v-1.63c0-2.93,2-5.17,5.39-5.17s5.39,2.24,5.39,5.17v1.63c0,2.92-2,5.17-5.39,5.17S570.65,465.21,570.65,462.29Zm7.88-1.83a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-8" d="M584.42,455.75h2.86v1.37a3.55,3.55,0,0,1,3.37-1.63c2.49,0,4.05,1.63,4.05,4.38v7.32h-3v-6.84a2,2,0,0,0-2.09-2.27,2.07,2.07,0,0,0-2.2,2.34v6.77h-3Z"/>
+          <path class="cls-8" d="M597.71,467.88h2.91a2.52,2.52,0,0,0,2.46,1.14c1.52,0,2.49-.84,2.49-2.11V465.1a4,4,0,0,1-3.28,1.57,4.52,4.52,0,0,1-4.71-4.76v-1.67a4.48,4.48,0,0,1,4.73-4.75,3.82,3.82,0,0,1,3.39,1.65v-1.39h2.86V467c0,2.44-2.29,4.34-5.48,4.34C600.4,471.29,598.17,470,597.71,467.88Zm7.86-6.12v-1.32a2.41,2.41,0,0,0-2.49-2.55,2.31,2.31,0,0,0-2.53,2.55v1.32a2.28,2.28,0,0,0,2.44,2.51A2.42,2.42,0,0,0,605.57,461.76Z"/>
+        </g>
+        <path id="m5552310466-3" data-name="m5552310466" class="cls-8" d="M545.16,464.88a5,5,0,1,0-3.54-1.46A5,5,0,0,0,545.16,464.88Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M781.88,437.86h6.91c2.71,0,4.4,1.59,4.4,3.9a3.88,3.88,0,0,1-1.94,3.54,3.48,3.48,0,0,1,2.87,3.63c0,2.58-2.14,4.34-5.22,4.34h-7Zm6.23,6.39a1.88,1.88,0,0,0,2-2.05,1.82,1.82,0,0,0-2-1.92h-3.22v4Zm.64,6.6a2,2,0,0,0,2.29-2.12,2,2,0,0,0-2.32-2.09h-3.85v4.21Z"/>
+          <path class="cls-8" d="M796.8,441.82h2.84v2.23c.37-1.72,1.71-2.31,3.43-2.31h.35v3h-.62c-1.95,0-3,1-3,3.28v5.28h-3Z"/>
+          <path class="cls-8" d="M805.29,448.43v-1.57c0-3.1,1.85-5.3,5.28-5.3a4.74,4.74,0,0,1,5.1,5v2h-7.48v.29a2.26,2.26,0,0,0,2.42,2.5,2.35,2.35,0,0,0,2.36-1.45h2.62a4.83,4.83,0,0,1-5,3.61C807.38,453.53,805.29,451.64,805.29,448.43Zm2.9-2h4.73v-.29a2.23,2.23,0,0,0-2.35-2.51c-1.72,0-2.38,1.15-2.38,2.69Z"/>
+          <path class="cls-8" d="M818.14,450c0-2.49,1.87-3.63,4.62-3.63a4.6,4.6,0,0,1,2.9,1v-1.56a1.82,1.82,0,0,0-2-1.85,2.07,2.07,0,0,0-2,1.08h-2.77a4.7,4.7,0,0,1,4.91-3.41c2.86,0,4.82,1.74,4.82,4.4v7.31h-2.89v-1.06a4.32,4.32,0,0,1-3.3,1.32C819.9,453.53,818.14,452.19,818.14,450Zm7.52-.11c0-1-1-1.61-2.33-1.61s-2.29.57-2.29,1.63,1,1.67,2.2,1.67S825.66,450.82,825.66,449.86Z"/>
+          <path class="cls-8" d="M832,437.2h3v9.95l3.64-5.33h3.41l-3.94,5.59,4.07,5.86H838.8L835,447.68v5.59h-3Z"/>
+          <path class="cls-8" d="M843.61,448.36v-1.63c0-2.93,2-5.17,5.39-5.17s5.4,2.24,5.4,5.17v1.63c0,2.93-2,5.17-5.4,5.17S843.61,451.29,843.61,448.36Zm7.88-1.83a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-8" d="M867.46,453.27H864.6V451.9a3.77,3.77,0,0,1-3.32,1.63c-2.57,0-4-1.78-4-4.38v-7.33h3v6.91c0,1.46.68,2.29,2,2.29a2.06,2.06,0,0,0,2.18-2.31v-6.89h3Z"/>
+          <path class="cls-8" d="M871.71,450v-5.76h-1.63v-2.43h1.63V438.7h3v3.12h2.44v2.43h-2.44v5.54c0,.75.29,1.06,1.08,1.06H877v2.42h-2.07A2.91,2.91,0,0,1,871.71,450Z"/>
+        </g>
+        <path id="m5552310466-4" data-name="m5552310466" class="cls-8" d="M691.63,412.45a5,5,0,1,0-3.54-1.47A5,5,0,0,0,691.63,412.45Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M794.72,417.35a6.83,6.83,0,0,1-2.2,5.37l1.15,1.71-2.16,1.41-1.25-1.91a9.44,9.44,0,0,1-2.4.3c-4.51,0-6.86-3-6.86-6.88V415.1c0-3.91,2.35-6.88,6.86-6.88s6.86,3,6.86,6.88Zm-6,4.27-1.21-1.87,2.13-1.33,1.24,1.85a4.76,4.76,0,0,0,.74-2.77V415c0-2.44-1.21-4.18-3.78-4.18s-3.78,1.74-3.78,4.18v2.55c0,2.44,1.21,4.18,3.78,4.18A6,6,0,0,0,788.74,421.62Z"/>
+          <path class="cls-8" d="M800.84,422.56v1.37H798V407.86h3v5.9a4,4,0,0,1,3.28-1.54c3.08,0,4.8,2.29,4.8,5v1.89a4.72,4.72,0,0,1-4.82,5A3.77,3.77,0,0,1,800.84,422.56Zm5.21-3.54v-1.63a2.55,2.55,0,1,0-5.08,0V419a2.52,2.52,0,0,0,2.53,2.75A2.46,2.46,0,0,0,806.05,419Z"/>
+          <path class="cls-8" d="M811.53,419.09v-1.57c0-3.1,1.85-5.3,5.28-5.3a4.75,4.75,0,0,1,5.11,5v2h-7.48v.29a2.26,2.26,0,0,0,2.42,2.5,2.34,2.34,0,0,0,2.35-1.45h2.62a4.81,4.81,0,0,1-5,3.61C813.62,424.19,811.53,422.3,811.53,419.09Zm2.91-2h4.73v-.29a2.24,2.24,0,0,0-2.36-2.51c-1.72,0-2.37,1.15-2.37,2.69Z"/>
+          <path class="cls-8" d="M824.91,412.48h2.84v2.23c.37-1.72,1.71-2.31,3.43-2.31h.35v3h-.62c-1.95,0-3,1-3,3.28v5.28h-3Z"/>
+          <path class="cls-8" d="M834.87,420.67V414.9h-1.63v-2.42h1.63v-3.12h3v3.12h2.44v2.42h-2.44v5.55c0,.75.28,1.06,1.07,1.06h1.22v2.42h-2.07A2.92,2.92,0,0,1,834.87,420.67Z"/>
+        </g>
+        <path id="m5552310466-5" data-name="m5552310466" class="cls-8" d="M693.59,396.93a5,5,0,1,0-3.54-1.47A5,5,0,0,0,693.59,396.93Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M783.37,378.54h6.91c2.7,0,4.4,1.59,4.4,3.9a3.88,3.88,0,0,1-1.94,3.54,3.47,3.47,0,0,1,2.86,3.63c0,2.58-2.13,4.34-5.21,4.34h-7Zm6.23,6.38a1.88,1.88,0,0,0,2-2,1.83,1.83,0,0,0-2-1.92h-3.21v4Zm.63,6.61a2,2,0,0,0,2.29-2.12,2,2,0,0,0-2.31-2.09h-3.85v4.21Z"/>
+          <path class="cls-8" d="M797.78,389.11v-1.57c0-3.1,1.85-5.3,5.28-5.3a4.74,4.74,0,0,1,5.1,5v2h-7.48v.29a2.26,2.26,0,0,0,2.42,2.5,2.35,2.35,0,0,0,2.36-1.45h2.62a4.83,4.83,0,0,1-5,3.61C799.87,394.21,797.78,392.32,797.78,389.11Zm2.9-2h4.73v-.29a2.23,2.23,0,0,0-2.35-2.51c-1.72,0-2.38,1.15-2.38,2.69Z"/>
+          <path class="cls-8" d="M810.63,390.65c0-2.49,1.87-3.64,4.62-3.64a4.6,4.6,0,0,1,2.9,1v-1.56a1.82,1.82,0,0,0-2-1.85,2.07,2.07,0,0,0-2,1.08h-2.77a4.7,4.7,0,0,1,4.91-3.41c2.86,0,4.81,1.74,4.81,4.4V394h-2.88v-1.06a4.33,4.33,0,0,1-3.3,1.32C812.39,394.21,810.63,392.87,810.63,390.65Zm7.52-.11c0-1-1-1.61-2.33-1.61s-2.29.57-2.29,1.63,1,1.67,2.2,1.67S818.15,391.5,818.15,390.54Z"/>
+          <path class="cls-8" d="M824.53,382.5h2.86v1.37a3.42,3.42,0,0,1,3.3-1.63,3.74,3.74,0,0,1,3.3,1.63,4.52,4.52,0,0,1,3.79-1.63c2.39,0,4.09,1.63,4.09,4.38V394h-3V387.1c0-1.32-.6-2.26-2-2.26s-2.13.79-2.13,2.33V394h-3V387.1c0-1.32-.59-2.26-2-2.26s-2.14.79-2.14,2.33V394h-3Z"/>
+          <path class="cls-8" d="M845.25,382.5h2.84v2.23c.38-1.72,1.72-2.31,3.44-2.31h.35v3h-.62c-2,0-3,1-3,3.28V394h-3Z"/>
+          <path class="cls-8" d="M856.17,377.77a1.74,1.74,0,1,1-1.74,1.74A1.68,1.68,0,0,1,856.17,377.77Zm-1.5,4.73h3V394h-3Z"/>
+          <path class="cls-8" d="M860.85,389.17v-1.89c0-2.75,1.72-5,4.8-5a4,4,0,0,1,3.28,1.5v-5.86h3V394h-2.86v-1.37a3.93,3.93,0,0,1-3.39,1.63A4.72,4.72,0,0,1,860.85,389.17Zm8.08-.13v-1.67a2.55,2.55,0,1,0-5.09,0V389a2.47,2.47,0,0,0,2.56,2.75A2.52,2.52,0,0,0,868.93,389Z"/>
+          <path class="cls-8" d="M874.91,389.11v-1.57c0-3.1,1.85-5.3,5.28-5.3a4.75,4.75,0,0,1,5.11,5v2h-7.49v.29a2.26,2.26,0,0,0,2.43,2.5,2.34,2.34,0,0,0,2.35-1.45h2.62a4.81,4.81,0,0,1-5,3.61C877,394.21,874.91,392.32,874.91,389.11Zm2.9-2h4.74v-.29a2.24,2.24,0,0,0-2.36-2.51c-1.72,0-2.38,1.15-2.38,2.69Z"/>
+          <path class="cls-8" d="M888.29,382.5h2.83v2.23c.38-1.72,1.72-2.31,3.44-2.31h.35v3h-.62c-2,0-3,1-3,3.28V394h-3Z"/>
+        </g>
+        <path id="m5552310466-6" data-name="m5552310466" class="cls-8" d="M684.77,380a5,5,0,1,0-3.54-1.47A5,5,0,0,0,684.77,380Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-7" d="M614.36,444.77v-2.2c0-3.94,2.09-6.91,6.73-6.91a6.07,6.07,0,0,1,6.45,5.79h-2.86a3.41,3.41,0,0,0-3.59-3.26c-2.57,0-3.67,1.76-3.67,4.23v2.5c0,2.47,1.1,4.23,3.67,4.23a3.41,3.41,0,0,0,3.59-3.26h2.86a6.07,6.07,0,0,1-6.45,5.79C616.45,451.68,614.36,448.71,614.36,444.77Z"/>
+          <path class="cls-7" d="M630.46,451.37V449h3.21V438.39h-3.12V436h9.33v2.42h-3.13V449H640v2.42Z"/>
+          <path class="cls-7" d="M643.46,436H654v2.55h-7.46v4.07h6.8v2.55h-6.8v6.23h-3.08Z"/>
+          <path class="cls-7" d="M668.7,451.37h-3.06l-1-3.23h-6l-1,3.23h-3.06l5.52-15.4h3.13Zm-9.31-5.68h4.42l-2.2-6.95Z"/>
+          <path class="cls-7" d="M677.76,446.44h-3.23v4.93h-3.08V436h6.95c3.63,0,5.57,2.24,5.57,5.23a5,5,0,0,1-3,4.76l3.21,5.41h-3.56ZM678,444a2.61,2.61,0,0,0,2.86-2.78,2.54,2.54,0,0,0-2.86-2.7h-3.5V444Z"/>
+          <path class="cls-7" d="M689.49,439.05l-3.28,1.21v-2.69L690,436h2.51v15.4h-3Z"/>
+          <path class="cls-7" d="M696.11,446.31V441c0-3.15,2.26-5.37,5.81-5.37s5.81,2.22,5.81,5.37v5.28c0,3.15-2.27,5.37-5.81,5.37S696.11,449.46,696.11,446.31Zm8.58-5.32a2.78,2.78,0,1,0-5.55,0v5.36a2.78,2.78,0,1,0,5.55,0Z"/>
+        </g>
+        <path id="m5552310466-7" data-name="m5552310466" class="cls-7" d="M603.43,449.06a5,5,0,0,0,5-5,5,5,0,1,0-8.54,3.54A5,5,0,0,0,603.43,449.06Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-6" d="M457.81,721h-3.06l-1-3.23h-6l-1,3.23h-3.06l5.53-15.4h3.12Zm-9.31-5.67h4.43l-2.2-7Z"/>
+          <path class="cls-6" d="M470,721h-2.86v-1.36a3.77,3.77,0,0,1-3.32,1.63c-2.58,0-4-1.79-4-4.38v-7.33h3v6.91c0,1.45.68,2.29,2,2.29A2.06,2.06,0,0,0,467,716.4v-6.89h3Z"/>
+          <path class="cls-6" d="M474.27,717.7v-5.77h-1.63v-2.42h1.63v-3.12h3v3.12h2.44v2.42h-2.44v5.55c0,.74.29,1,1.08,1h1.21V721H477.5A2.9,2.9,0,0,1,474.27,717.7Z"/>
+          <path class="cls-6" d="M482,716.05v-1.63c0-2.93,2-5.17,5.39-5.17s5.39,2.24,5.39,5.17v1.63c0,2.92-2,5.17-5.39,5.17S482,719,482,716.05Zm7.88-1.83a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-6" d="M495.27,716.11v-1.56c0-3.1,1.85-5.3,5.29-5.3a4.74,4.74,0,0,1,5.1,5v2h-7.48v.28a2.26,2.26,0,0,0,2.42,2.51,2.32,2.32,0,0,0,2.35-1.45h2.62a4.81,4.81,0,0,1-5,3.61C497.37,721.22,495.27,719.32,495.27,716.11Zm2.91-2h4.73v-.28a2.24,2.24,0,0,0-2.35-2.51c-1.72,0-2.38,1.14-2.38,2.68Z"/>
+          <path class="cls-6" d="M508.65,709.51h2.86v1.36a3.55,3.55,0,0,1,3.37-1.62c2.48,0,4,1.62,4,4.37V721h-3v-6.84c0-1.32-.63-2.27-2.09-2.27a2.06,2.06,0,0,0-2.2,2.33V721h-3Z"/>
+          <path class="cls-6" d="M521.81,716.05v-1.63c0-3,2-5.17,5.41-5.17,3.06,0,5.13,1.91,5.21,4.53h-2.72a2.25,2.25,0,0,0-2.42-2.11,2.34,2.34,0,0,0-2.49,2.64v1.84a2.35,2.35,0,0,0,2.49,2.65,2.25,2.25,0,0,0,2.42-2.12h2.72c-.08,2.62-2.15,4.54-5.21,4.54C523.77,721.22,521.81,719,521.81,716.05Z"/>
+          <path class="cls-6" d="M534.92,716.05v-1.63c0-2.93,2-5.17,5.39-5.17s5.39,2.24,5.39,5.17v1.63c0,2.92-2,5.17-5.39,5.17S534.92,719,534.92,716.05Zm7.88-1.83a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-6" d="M548.18,716.18v-1.9c0-2.75,1.72-5,4.8-5a4,4,0,0,1,3.28,1.49v-5.85h3V721h-2.86v-1.36a3.93,3.93,0,0,1-3.39,1.63A4.73,4.73,0,0,1,548.18,716.18Zm8.08-.13v-1.68a2.55,2.55,0,1,0-5.08,0v1.63a2.47,2.47,0,0,0,2.55,2.75A2.53,2.53,0,0,0,556.26,716.05Z"/>
+          <path class="cls-6" d="M562.24,716.11v-1.56c0-3.1,1.85-5.3,5.28-5.3a4.74,4.74,0,0,1,5.11,5v2h-7.48v.28a2.26,2.26,0,0,0,2.42,2.51,2.33,2.33,0,0,0,2.35-1.45h2.62a4.81,4.81,0,0,1-5,3.61C564.33,721.22,562.24,719.32,562.24,716.11Zm2.91-2h4.73v-.28a2.24,2.24,0,0,0-2.36-2.51c-1.71,0-2.37,1.14-2.37,2.68Z"/>
+          <path class="cls-6" d="M575.62,709.51h2.84v2.22c.37-1.71,1.71-2.31,3.43-2.31h.35v3h-.61c-2,0-3,1-3,3.28V721h-3Z"/>
+          <path class="cls-6" d="M595.75,703.75c-1.65,3.85-2.51,6.84-2.51,10.07s.86,6.23,2.51,10.08h-2.88a20.21,20.21,0,0,1,0-20.15Z"/>
+          <path class="cls-6" d="M597.44,716.49v-.4h2.95v.33c0,1.43,1.12,2.46,3.19,2.46,1.83,0,3-.81,3-2.24a1.68,1.68,0,0,0-1.46-1.74l-4-1c-2.33-.6-3.44-1.68-3.44-4,0-2.9,2.2-4.64,5.66-4.64s5.57,1.89,5.57,4.6v.37h-2.84v-.3c0-1.3-.84-2.29-2.73-2.29-1.63,0-2.68.66-2.68,2a1.54,1.54,0,0,0,1.32,1.63l4.22,1.12c2.27.59,3.37,1.94,3.37,4,0,2.92-2.62,4.86-6.1,4.86S597.44,719.5,597.44,716.49Z"/>
+          <path class="cls-6" d="M619.44,721H616l-5.11-15.4H614l3.7,12,3.69-12h3.15Z"/>
+          <path class="cls-6" d="M627.27,705.55h3.08v6.31h6.32v-6.31h3.08V721h-3.08v-6.36h-6.32V721h-3.08Z"/>
+          <path class="cls-6" d="M643.69,705.55h2.92l6.71,10.25V705.55h3V721h-2.93L646.7,710.7V721h-3Z"/>
+          <path class="cls-6" d="M661.79,703.75a20.21,20.21,0,0,1,0,20.15h-2.88c1.65-3.85,2.51-6.84,2.51-10.08s-.86-6.22-2.51-10.07Z"/>
+        </g>
+        <path id="m5552310466-8" data-name="m5552310466" class="cls-6" d="M433.44,718.64a5,5,0,1,0-3.54-1.46A5,5,0,0,0,433.44,718.64Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-6" d="M653.34,276.62h6.91c2.71,0,4.4,1.59,4.4,3.9a3.89,3.89,0,0,1-1.93,3.54,3.48,3.48,0,0,1,2.86,3.63c0,2.58-2.14,4.34-5.22,4.34h-7Zm6.23,6.39a1.88,1.88,0,0,0,2-2.05,1.81,1.81,0,0,0-2-1.91h-3.21v4Zm.64,6.6a2.11,2.11,0,1,0,0-4.21h-3.85v4.21Z"/>
+          <path class="cls-6" d="M670,275.85a1.74,1.74,0,1,1-1.73,1.74A1.69,1.69,0,0,1,670,275.85Zm-1.49,4.74h3V292h-3Z"/>
+          <path class="cls-6" d="M675.21,276h3V292h-3Z"/>
+          <path class="cls-6" d="M681.83,276h3V292h-3Z"/>
+          <path class="cls-6" d="M690.08,275.85a1.74,1.74,0,1,1-1.74,1.74A1.69,1.69,0,0,1,690.08,275.85Zm-1.49,4.74h3V292h-3Z"/>
+          <path class="cls-6" d="M694.77,287.12v-1.63c0-2.92,2-5.17,5.39-5.17s5.39,2.25,5.39,5.17v1.63c0,2.93-2,5.17-5.39,5.17S694.77,290.05,694.77,287.12Zm7.88-1.83a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-6" d="M708.54,280.59h2.86V282a3.54,3.54,0,0,1,3.37-1.63c2.48,0,4.05,1.63,4.05,4.38V292h-3v-6.85c0-1.32-.63-2.26-2.09-2.26a2.06,2.06,0,0,0-2.2,2.33V292h-3Z"/>
+          <path class="cls-6" d="M741.17,287.76l2.22-11.14h3L742.8,292h-3l-3.37-11.86L733.09,292h-3l-3.63-15.41h3l2.22,11.14L735,276.62h3Z"/>
+          <path class="cls-6" d="M747.83,287.12v-1.63c0-2.92,2-5.17,5.39-5.17s5.39,2.25,5.39,5.17v1.63c0,2.93-2,5.17-5.39,5.17S747.83,290.05,747.83,287.12Zm7.88-1.83a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-6" d="M761.6,280.59h2.84v2.22c.38-1.72,1.72-2.31,3.44-2.31h.35v3h-.62c-2,0-3,1-3,3.28V292h-3Z"/>
+          <path class="cls-6" d="M770.1,287.25v-1.89c0-2.75,1.71-5,4.79-5a4,4,0,0,1,3.28,1.5V276h3V292H778.3v-1.37a3.91,3.91,0,0,1-3.38,1.63A4.72,4.72,0,0,1,770.1,287.25Zm8.07-.13v-1.67a2.55,2.55,0,1,0-5.08,0v1.63a2.46,2.46,0,0,0,2.55,2.75A2.52,2.52,0,0,0,778.17,287.12Z"/>
+          <path class="cls-6" d="M790.27,276.62h3.08v12.85h7.08V292H790.27Z"/>
+          <path class="cls-6" d="M802.81,287.56v-.4h3v.33c0,1.43,1.12,2.47,3.19,2.47,1.83,0,3-.82,3-2.25a1.67,1.67,0,0,0-1.45-1.73l-4-1c-2.33-.59-3.43-1.67-3.43-4,0-2.91,2.2-4.64,5.66-4.64s5.56,1.89,5.56,4.6v.37h-2.84V281c0-1.3-.83-2.29-2.72-2.29-1.63,0-2.69.66-2.69,2a1.53,1.53,0,0,0,1.32,1.62l4.23,1.13c2.26.59,3.36,1.93,3.36,4,0,2.93-2.61,4.87-6.09,4.87S802.81,290.57,802.81,287.56Z"/>
+          <path class="cls-6" d="M820.87,279.18h-4.35v-2.56h11.79v2.56H824V292h-3.08Z"/>
+          <path class="cls-6" d="M845.51,292h-2.94v-9.46l-2.89,5.72H837l-2.89-5.72V292h-3V276.62H834l4.36,8.81,4.36-8.81h2.83Z"/>
+        </g>
+        <path id="m5552310466-9" data-name="m5552310466" class="cls-6" d="M858,289.72a5,5,0,0,0,0-10,5,5,0,0,0-5,5,5,5,0,0,0,5,5Z"/>
+      </g>
+      <g>
+        <path class="cls-7" d="M651.1,336.3v-2.42h3.22V323.32h-3.13V320.9h9.33v2.42H657.4v10.56h3.21v2.42Z"/>
+        <path class="cls-7" d="M663.88,324.86h2.87v1.36a3.43,3.43,0,0,1,3.29-1.63,3.79,3.79,0,0,1,3.31,1.63,4.5,4.5,0,0,1,3.78-1.63c2.4,0,4.09,1.63,4.09,4.38v7.33h-3v-6.84c0-1.32-.59-2.27-2.05-2.27s-2.13.79-2.13,2.33v6.78h-3v-6.84c0-1.32-.6-2.27-2-2.27s-2.13.79-2.13,2.33v6.78h-3Z"/>
+        <path class="cls-7" d="M684.12,333c0-2.49,1.87-3.63,4.62-3.63a4.61,4.61,0,0,1,2.91,1v-1.57a1.82,1.82,0,0,0-2-1.85,2.06,2.06,0,0,0-2,1.08h-2.77a4.7,4.7,0,0,1,4.9-3.41c2.86,0,4.82,1.74,4.82,4.4v7.31h-2.88v-1.06a4.33,4.33,0,0,1-3.3,1.32C685.88,336.56,684.12,335.22,684.12,333Zm7.53-.11c0-1-1-1.61-2.33-1.61s-2.29.57-2.29,1.63.94,1.67,2.2,1.67S691.65,333.86,691.65,332.89Z"/>
+        <path class="cls-7" d="M697.65,337h2.91a2.53,2.53,0,0,0,2.46,1.15c1.52,0,2.49-.84,2.49-2.12v-1.8a4,4,0,0,1-3.28,1.56,4.51,4.51,0,0,1-4.71-4.75v-1.67a4.48,4.48,0,0,1,4.73-4.76,3.81,3.81,0,0,1,3.39,1.65v-1.38h2.86v11.2c0,2.44-2.29,4.33-5.48,4.33C700.34,340.39,698.12,339.14,697.65,337Zm7.86-6.12v-1.32A2.41,2.41,0,0,0,703,327a2.31,2.31,0,0,0-2.53,2.55v1.32a2.28,2.28,0,0,0,2.44,2.51A2.42,2.42,0,0,0,705.51,330.86Z"/>
+        <path class="cls-7" d="M711.49,331.46V329.9c0-3.11,1.85-5.31,5.28-5.31a4.75,4.75,0,0,1,5.11,5v2H714.4v.29a2.26,2.26,0,0,0,2.42,2.51,2.34,2.34,0,0,0,2.35-1.46h2.62a4.81,4.81,0,0,1-5,3.61C713.58,336.56,711.49,334.67,711.49,331.46Zm2.91-2h4.73v-.29a2.23,2.23,0,0,0-2.36-2.5c-1.71,0-2.37,1.14-2.37,2.68Z"/>
+        <path class="cls-7" d="M725.09,320.9H728l6.72,10.25V320.9h3v15.4h-2.93l-6.71-10.25V336.3h-3Z"/>
+        <path class="cls-7" d="M741,331.46V329.9c0-3.11,1.85-5.31,5.28-5.31a4.74,4.74,0,0,1,5.1,5v2h-7.48v.29a2.27,2.27,0,0,0,2.42,2.51,2.34,2.34,0,0,0,2.36-1.46h2.62a4.83,4.83,0,0,1-5,3.61C743,336.56,741,334.67,741,331.46Zm2.9-2h4.73v-.29a2.23,2.23,0,0,0-2.35-2.5c-1.72,0-2.38,1.14-2.38,2.68Z"/>
+        <path class="cls-7" d="M755,333v-5.76h-1.63v-2.42H755v-3.13h3v3.13h2.44v2.42H758v5.54c0,.75.29,1.06,1.08,1.06h1.21v2.42h-2.07C756,336.3,755,335.07,755,333Z"/>
+      </g>
+      <path id="m5552310466-10" data-name="m5552310466" class="cls-7" d="M756.2,353.43a5,5,0,1,0-3.54-1.46A5,5,0,0,0,756.2,353.43Z"/>
+      <g>
+        <g>
+          <path class="cls-8" d="M542.17,362.23v-.39h3v.33c0,1.43,1.12,2.46,3.19,2.46,1.82,0,3-.81,3-2.24a1.67,1.67,0,0,0-1.45-1.74l-4-1c-2.33-.6-3.43-1.68-3.43-4,0-2.9,2.2-4.64,5.66-4.64s5.56,1.89,5.56,4.6V356h-2.84v-.3c0-1.3-.83-2.29-2.72-2.29-1.63,0-2.69.66-2.69,2A1.54,1.54,0,0,0,546.7,357l4.23,1.12c2.26.59,3.36,1.94,3.36,4,0,2.92-2.62,4.86-6.09,4.86C544.57,367,542.17,365.25,542.17,362.23Z"/>
+          <path class="cls-8" d="M568.44,360v1.9c0,2.75-1.72,5-4.8,5a3.86,3.86,0,0,1-3.28-1.5v5.06h-3V355.26h2.86v1.36a3.93,3.93,0,0,1,3.39-1.62C566.77,355,568.44,357.33,568.44,360Zm-8.08.14v1.67a2.55,2.55,0,1,0,5.09,0v-1.62a2.47,2.47,0,0,0-2.56-2.75A2.53,2.53,0,0,0,560.36,360.17Z"/>
+          <path class="cls-8" d="M571,363.4c0-2.49,1.87-3.63,4.62-3.63a4.6,4.6,0,0,1,2.9,1v-1.56a1.82,1.82,0,0,0-2-1.85,2.05,2.05,0,0,0-2,1.08h-2.77a4.69,4.69,0,0,1,4.9-3.41c2.86,0,4.82,1.73,4.82,4.4v7.3h-2.88v-1.05a4.33,4.33,0,0,1-3.3,1.32C572.71,367,571,365.62,571,363.4Zm7.52-.11c0-1-1-1.61-2.33-1.61s-2.29.58-2.29,1.63.95,1.68,2.2,1.68S578.47,364.26,578.47,363.29Z"/>
+          <path class="cls-8" d="M584.34,361.79v-1.62c0-3,2-5.17,5.42-5.17,3,0,5.12,1.91,5.21,4.53h-2.73a2.23,2.23,0,0,0-2.42-2.11,2.34,2.34,0,0,0-2.48,2.64v1.84a2.34,2.34,0,0,0,2.48,2.65,2.24,2.24,0,0,0,2.42-2.12H595c-.09,2.62-2.16,4.54-5.21,4.54C586.3,367,584.34,364.79,584.34,361.79Z"/>
+          <path class="cls-8" d="M597.45,361.86V360.3c0-3.1,1.85-5.3,5.29-5.3a4.74,4.74,0,0,1,5.1,5v2h-7.48v.29a2.26,2.26,0,0,0,2.42,2.51,2.32,2.32,0,0,0,2.35-1.45h2.62a4.81,4.81,0,0,1-5,3.61C599.54,367,597.45,365.07,597.45,361.86Zm2.91-2h4.73v-.28a2.24,2.24,0,0,0-2.35-2.51c-1.72,0-2.38,1.14-2.38,2.68Z"/>
+          <path class="cls-8" d="M616,366.7v-2.42h3.21V353.72h-3.12V351.3h9.33v2.42h-3.13v10.56h3.22v2.42Z"/>
+          <path class="cls-8" d="M628.78,355.26h2.86v1.36A3.55,3.55,0,0,1,635,355c2.49,0,4,1.62,4,4.37v7.33h-3v-6.84c0-1.32-.63-2.27-2.09-2.27a2.06,2.06,0,0,0-2.2,2.33v6.78h-3Z"/>
+          <path class="cls-8" d="M648.14,366.7H645l-3.9-11.44h3l2.46,8.38,2.46-8.38h3Z"/>
+          <path class="cls-8" d="M653.18,363.4c0-2.49,1.87-3.63,4.62-3.63a4.63,4.63,0,0,1,2.91,1v-1.56a1.82,1.82,0,0,0-2-1.85,2.05,2.05,0,0,0-2,1.08H654a4.69,4.69,0,0,1,4.91-3.41c2.86,0,4.82,1.73,4.82,4.4v7.3h-2.89v-1.05a4.32,4.32,0,0,1-3.3,1.32C654.94,367,653.18,365.62,653.18,363.4Zm7.53-.11c0-1-1-1.61-2.34-1.61s-2.29.58-2.29,1.63.95,1.68,2.2,1.68S660.71,364.26,660.71,363.29Z"/>
+          <path class="cls-8" d="M666.58,361.93V360c0-2.75,1.71-5,4.79-5a4,4,0,0,1,3.28,1.49v-5.85h3V366.7h-2.86v-1.36A3.94,3.94,0,0,1,671.4,367,4.73,4.73,0,0,1,666.58,361.93Zm8.07-.14v-1.67a2.55,2.55,0,1,0-5.08.05v1.62a2.47,2.47,0,0,0,2.55,2.76A2.53,2.53,0,0,0,674.65,361.79Z"/>
+          <path class="cls-8" d="M680.64,361.86V360.3c0-3.1,1.84-5.3,5.28-5.3a4.73,4.73,0,0,1,5.1,5v2h-7.48v.29a2.26,2.26,0,0,0,2.42,2.51,2.35,2.35,0,0,0,2.36-1.45h2.61a4.81,4.81,0,0,1-5,3.61C682.73,367,680.64,365.07,680.64,361.86Zm2.9-2h4.73v-.28a2.24,2.24,0,0,0-2.35-2.51c-1.72,0-2.38,1.14-2.38,2.68Z"/>
+          <path class="cls-8" d="M694,355.26h2.84v2.22c.37-1.71,1.72-2.31,3.43-2.31h.35v3H700c-2,0-3,1-3,3.28v5.28h-3Z"/>
+          <path class="cls-8" d="M702.68,363.31v-.2h2.64v.16c0,1,.9,1.58,2.31,1.58,1.25,0,2.18-.5,2.18-1.38,0-.55-.29-.88-.95-1l-3.39-.77c-1.71-.39-2.61-1.27-2.61-3,0-2.36,1.91-3.65,4.62-3.65s4.53,1.38,4.53,3.65v.17h-2.62v-.19c0-.88-.55-1.5-1.87-1.5s-1.87.44-1.87,1.25c0,.58.29.84.88,1l3.48.86c1.69.42,2.55,1.32,2.55,2.93,0,2.22-2.11,3.83-4.95,3.83S702.68,365.76,702.68,363.31Z"/>
+        </g>
+        <path id="m5552310466-11" data-name="m5552310466" class="cls-8" d="M724.17,364.39a5,5,0,1,0-3.53-1.46A5,5,0,0,0,724.17,364.39Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M509.43,403.51v-.4h2.95v.33c0,1.43,1.12,2.47,3.19,2.47,1.83,0,3-.82,3-2.25a1.66,1.66,0,0,0-1.45-1.73l-4-1c-2.33-.59-3.43-1.67-3.43-4,0-2.9,2.2-4.64,5.66-4.64s5.56,1.89,5.56,4.6v.37h-2.84v-.31c0-1.3-.83-2.29-2.72-2.29-1.63,0-2.69.66-2.69,2a1.53,1.53,0,0,0,1.32,1.62l4.23,1.13c2.26.59,3.36,1.93,3.36,4,0,2.93-2.61,4.87-6.09,4.87C511.83,408.29,509.43,406.53,509.43,403.51Z"/>
+          <path class="cls-8" d="M524.12,403.14v-1.57c0-3.1,1.85-5.3,5.29-5.3a4.75,4.75,0,0,1,5.1,5v2H527v.29a2.26,2.26,0,0,0,2.42,2.51,2.33,2.33,0,0,0,2.35-1.46h2.62a4.81,4.81,0,0,1-5,3.61C526.22,408.24,524.12,406.35,524.12,403.14Zm2.91-2h4.73v-.29a2.24,2.24,0,0,0-2.35-2.51c-1.72,0-2.38,1.15-2.38,2.69Z"/>
+          <path class="cls-8" d="M537,404.68c0-2.49,1.87-3.63,4.62-3.63a4.61,4.61,0,0,1,2.91,1v-1.56a1.82,1.82,0,0,0-2-1.85,2.06,2.06,0,0,0-2,1.08h-2.77a4.7,4.7,0,0,1,4.9-3.41c2.86,0,4.82,1.74,4.82,4.4V408h-2.88v-1.06a4.33,4.33,0,0,1-3.3,1.32C538.73,408.24,537,406.9,537,404.68Zm7.53-.11c0-1-1-1.61-2.33-1.61s-2.29.57-2.29,1.63.94,1.67,2.2,1.67S544.5,405.53,544.5,404.57Z"/>
+          <path class="cls-8" d="M558.58,397.9v-1.36h2.86v15.27h-3v-5a4,4,0,0,1-3.28,1.47c-3.08,0-4.8-2.29-4.8-5v-1.89a4.71,4.71,0,0,1,4.82-5A3.79,3.79,0,0,1,558.58,397.9Zm-5.22,3.54v1.63a2.55,2.55,0,1,0,5.09,0v-1.67a2.53,2.53,0,0,0-2.53-2.75A2.47,2.47,0,0,0,553.36,401.44Z"/>
+          <path class="cls-8" d="M575,408h-2.86v-1.37a3.77,3.77,0,0,1-3.32,1.63c-2.58,0-4-1.78-4-4.38v-7.32h3v6.9c0,1.46.68,2.29,2,2.29a2.06,2.06,0,0,0,2.18-2.31v-6.88h3Z"/>
+          <path class="cls-8" d="M578,403.14v-1.57c0-3.1,1.85-5.3,5.28-5.3a4.75,4.75,0,0,1,5.11,5v2h-7.48v.29a2.26,2.26,0,0,0,2.42,2.51,2.34,2.34,0,0,0,2.35-1.46h2.62a4.81,4.81,0,0,1-5,3.61C580.09,408.24,578,406.35,578,403.14Zm2.91-2h4.73v-.29a2.24,2.24,0,0,0-2.36-2.51c-1.71,0-2.37,1.15-2.37,2.69Z"/>
+          <path class="cls-8" d="M590.9,404.59v-.2h2.64v.15c0,1,.9,1.59,2.31,1.59,1.25,0,2.17-.51,2.17-1.39,0-.55-.28-.88-.94-1l-3.39-.77c-1.72-.4-2.62-1.28-2.62-3,0-2.35,1.92-3.65,4.62-3.65s4.53,1.39,4.53,3.65v.18h-2.61v-.2c0-.88-.55-1.49-1.87-1.49s-1.87.44-1.87,1.25c0,.57.28.84.88,1l3.47.86c1.7.41,2.55,1.32,2.55,2.92,0,2.22-2.11,3.83-4.95,3.83S590.9,407,590.9,404.59Z"/>
+          <path class="cls-8" d="M604.14,404.72V399h-1.63v-2.42h1.63v-3.13h3v3.13h2.45V399h-2.45v5.54c0,.75.29,1.06,1.08,1.06h1.21V408h-2.07C605.17,408,604.14,406.75,604.14,404.72Z"/>
+        </g>
+        <path id="m5552310466-12" data-name="m5552310466" class="cls-8" d="M621.17,405.67a5,5,0,1,0-3.54-1.47A5,5,0,0,0,621.17,405.67Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-7" d="M500.75,426.83v-.4h2.95v.33c0,1.43,1.12,2.47,3.19,2.47,1.83,0,3-.82,3-2.25a1.68,1.68,0,0,0-1.46-1.74l-4-1c-2.34-.6-3.44-1.67-3.44-4,0-2.91,2.2-4.65,5.66-4.65s5.57,1.9,5.57,4.6v.38h-2.84v-.31c0-1.3-.84-2.29-2.73-2.29-1.63,0-2.69.66-2.69,2a1.55,1.55,0,0,0,1.33,1.63l4.22,1.12c2.27.6,3.37,1.94,3.37,4,0,2.93-2.62,4.86-6.1,4.86S500.75,429.84,500.75,426.83Z"/>
+          <path class="cls-7" d="M522.75,431.29h-3.47l-5.11-15.4h3.15l3.69,12,3.7-12h3.15Z"/>
+          <path class="cls-7" d="M530.58,415.89h3.08v6.32H540v-6.32h3.08v15.4H540v-6.36h-6.32v6.36h-3.08Z"/>
+          <path class="cls-7" d="M547,415.89h2.92l6.71,10.25V415.89h3v15.4h-2.93L550,421v10.25h-3Z"/>
+        </g>
+        <path id="m5552310466-13" data-name="m5552310466" class="cls-7" d="M572.14,429a5,5,0,1,0-3.53-1.47A5,5,0,0,0,572.14,429Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M480.86,381.45H477.8l-1-3.24h-6l-1,3.24h-3.06L472.21,366h3.13Zm-9.31-5.68H476l-2.2-6.95Z"/>
+          <path class="cls-8" d="M483.46,365.38h3v16.07h-3Z"/>
+          <path class="cls-8" d="M491.71,365.27A1.74,1.74,0,1,1,490,367,1.68,1.68,0,0,1,491.71,365.27Zm-1.5,4.73h3v11.45h-3Z"/>
+          <path class="cls-8" d="M496.39,376.6V375c0-3.1,1.85-5.3,5.28-5.3a4.75,4.75,0,0,1,5.11,5v2H499.3V377a2.26,2.26,0,0,0,2.42,2.51,2.34,2.34,0,0,0,2.35-1.45h2.62a4.81,4.81,0,0,1-5,3.61C498.48,381.71,496.39,379.82,496.39,376.6Zm2.91-2H504v-.29a2.24,2.24,0,0,0-2.36-2.51c-1.71,0-2.37,1.15-2.37,2.69Z"/>
+          <path class="cls-8" d="M509.77,370h2.86v1.37a3.53,3.53,0,0,1,3.36-1.63c2.49,0,4,1.63,4,4.38v7.33h-3V374.6a2,2,0,0,0-2.09-2.26,2.06,2.06,0,0,0-2.2,2.33v6.78h-3Z"/>
+        </g>
+        <path id="m5552310466-14" data-name="m5552310466" class="cls-8" d="M675.31,396.45a5,5,0,1,0-3.54-1.47A5,5,0,0,0,675.31,396.45Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-7" d="M439.68,487.58h-2.94v-9.46l-2.89,5.72h-2.72l-2.89-5.72v9.46h-2.95v-15.4h2.84l4.36,8.8,4.36-8.8h2.83Z"/>
+          <path class="cls-7" d="M443.62,472.18h2.93l6.71,10.25V472.18h3v15.4h-2.92l-6.71-10.25v10.25h-3Z"/>
+          <path class="cls-7" d="M459.77,487.58v-2.42H463V474.6h-3.12v-2.42h9.33v2.42h-3.13v10.56h3.21v2.42Z"/>
+          <path class="cls-7" d="M472.29,483.11v-.39h2.94v.33c0,1.43,1.13,2.46,3.19,2.46,1.83,0,3-.81,3-2.24a1.67,1.67,0,0,0-1.45-1.74l-4-1c-2.33-.59-3.43-1.67-3.43-4,0-2.9,2.2-4.64,5.65-4.64s5.57,1.89,5.57,4.6v.37h-2.84v-.31c0-1.29-.83-2.28-2.73-2.28-1.63,0-2.68.66-2.68,2a1.54,1.54,0,0,0,1.32,1.63L481,479c2.27.59,3.37,1.93,3.37,4,0,2.93-2.62,4.87-6.09,4.87C474.68,487.89,472.29,486.13,472.29,483.11Z"/>
+          <path class="cls-7" d="M490.35,474.73H486v-2.55h11.8v2.55h-4.36v12.85h-3.08Z"/>
+        </g>
+        <path id="m5552310466-15" data-name="m5552310466" class="cls-7" d="M509.16,485.27a5,5,0,1,0-3.54-1.46A5,5,0,0,0,509.16,485.27Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-8" d="M1000.37,80.34c4.13,0,6.31,2.55,6.31,6v3.3c0,3.5-2.18,6-6.31,6h-6.25V80.34Zm3.25,6c0-2.25-1.3-3.52-3.78-3.52h-2.67V93.23h2.67c2.48,0,3.78-1.27,3.78-3.52Z"/>
+          <path class="cls-8" d="M1009.41,90.84V89.21c0-2.93,2-5.17,5.39-5.17s5.39,2.24,5.39,5.17v1.63c0,2.92-2,5.17-5.39,5.17S1009.41,93.76,1009.41,90.84Zm7.87-1.83a2.49,2.49,0,1,0-5,0v2a2.49,2.49,0,1,0,5,0Z"/>
+          <path class="cls-8" d="M1023.77,92.49V86.72h-1.63V84.3h1.63V81.18h3V84.3h2.44v2.42h-2.44v5.54c0,.75.28,1.06,1.07,1.06h1.22v2.42H1027A2.91,2.91,0,0,1,1023.77,92.49Z"/>
+          <path class="cls-8" d="M1031.49,92.44c0-2.48,1.87-3.63,4.62-3.63a4.61,4.61,0,0,1,2.91,1V88.22a1.82,1.82,0,0,0-2-1.85,2,2,0,0,0-2,1.08h-2.77a4.69,4.69,0,0,1,4.9-3.41c2.86,0,4.82,1.73,4.82,4.4v7.3h-2.88v-1a4.33,4.33,0,0,1-3.3,1.32C1033.25,96,1031.49,94.66,1031.49,92.44Zm7.53-.11c0-1-1-1.61-2.33-1.61s-2.29.58-2.29,1.63.94,1.68,2.2,1.68S1039,93.3,1039,92.33Z"/>
+          <path class="cls-8" d="M1050.54,91.65h2.89a2.52,2.52,0,0,0,2.64,1.94,2.73,2.73,0,0,0,2.9-2.87,2.68,2.68,0,0,0-2.81-2.79,2.82,2.82,0,0,0-2.56,1.34h-2.79l1.45-8.93h8.25v2.42h-5.85l-.64,3.7a4.23,4.23,0,0,1,2.91-1,4.85,4.85,0,0,1,5,5.15c0,3-2.42,5.41-5.87,5.41C1053.36,96.05,1050.94,94.58,1050.54,91.65Z"/>
+          <path class="cls-8" d="M1070.85,95.74h-3.12l-3.9-11.44h3l2.46,8.38,2.46-8.38h3Z"/>
+          <path class="cls-8" d="M1076.77,91.65h2.88a2.52,2.52,0,0,0,2.64,1.94,2.73,2.73,0,0,0,2.91-2.87,2.68,2.68,0,0,0-2.82-2.79,2.8,2.8,0,0,0-2.55,1.34H1077l1.45-8.93h8.26v2.42h-5.86l-.63,3.7a4.21,4.21,0,0,1,2.9-1,4.85,4.85,0,0,1,5,5.15c0,3-2.42,5.41-5.88,5.41C1079.58,96.05,1077.16,94.58,1076.77,91.65Z"/>
+        </g>
+        <g>
+          <path class="cls-8" d="M976.91,100.36a20.68,20.68,0,0,0,0,20.15h-1.63a20.41,20.41,0,0,1,0-20.15Z"/>
+          <path class="cls-8" d="M980.3,102.16H982v14h7.9v1.43H980.3Z"/>
+          <path class="cls-8" d="M992.18,112.85V111a5,5,0,0,1,10.05,0v1.84a5,5,0,0,1-10.05,0Zm5-5.47c-2.22,0-3.41,1.51-3.41,3.65v1.8c0,2.14,1.19,3.66,3.41,3.66s3.42-1.52,3.42-3.66V111C1000.63,108.89,999.44,107.38,997.21,107.38Z"/>
+          <path class="cls-8" d="M1016.77,117.56h-1.58l-3-9.5-3,9.5h-1.59l-3.08-11.26h1.58l2.38,9.26,2.91-9.26H1013l2.9,9.26,2.38-9.26h1.59Z"/>
+          <path class="cls-8" d="M1022.16,112.92v-1.8c0-3.06,1.74-5.09,4.91-5.09s4.77,2.14,4.77,4.91v1.67h-8.07v.4c0,2,1,3.54,3.37,3.54a3,3,0,0,0,3.1-2.18h1.49a4.44,4.44,0,0,1-4.59,3.46A4.65,4.65,0,0,1,1022.16,112.92Zm1.61-1.58h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.67Z"/>
+          <path class="cls-8" d="M1035.45,106.3H1037v2.59c.42-1.73,1.63-2.66,3.68-2.66h.26v1.56h-.33c-2.13,0-3.5,1.41-3.5,4.25v5.52h-1.67Z"/>
+          <path class="cls-8" d="M1049.38,102.16h6.36c2.72,0,4.2,1.61,4.2,3.83a3.79,3.79,0,0,1-2.27,3.61,3.7,3.7,0,0,1,3.17,3.78c0,2.58-1.91,4.18-4.95,4.18h-6.51Zm6,6.87a2.71,2.71,0,0,0,2.91-2.82,2.63,2.63,0,0,0-2.91-2.71H1051V109Zm.4,7.19c2.2,0,3.41-1.06,3.41-3,0-1.71-1.26-2.9-3.39-2.9H1051v5.85Z"/>
+          <path class="cls-8" d="M1063.74,112.85V111a5,5,0,0,1,10.06,0v1.84a5,5,0,0,1-10.06,0Zm5-5.47c-2.22,0-3.41,1.51-3.41,3.65v1.8c0,2.14,1.19,3.66,3.41,3.66s3.41-1.52,3.41-3.66V111C1072.19,108.89,1071,107.38,1068.78,107.38Z"/>
+          <path class="cls-8" d="M1086.62,117.56H1085v-1.91a3.79,3.79,0,0,1-3.76,2.18c-2.47,0-4-1.72-4-4.43v-7.1h1.67v6.86c0,1.92.84,3.26,2.77,3.26s3.26-1.17,3.26-3.5V106.3h1.67Z"/>
+          <path class="cls-8" d="M1090.74,106.3h1.58v1.91a3.78,3.78,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.43v7.1h-1.67V110.7c0-1.92-.86-3.21-2.89-3.21a3.09,3.09,0,0,0-3.25,3.45v6.62h-1.67Z"/>
+          <path class="cls-8" d="M1113.77,101.5v16.06h-1.58V115.8a4.2,4.2,0,0,1-3.81,2c-3,0-4.71-2.23-4.71-5v-1.72c0-2.92,1.83-5,4.67-5a4.22,4.22,0,0,1,3.76,1.94V101.5Zm-8.43,11.22c0,2.07,1,3.74,3.33,3.74s3.43-1.69,3.43-3.87v-1.45a3.49,3.49,0,0,0-3.46-3.74c-2.13,0-3.3,1.67-3.3,3.74Z"/>
+          <path class="cls-8" d="M1118.57,100.36a20.41,20.41,0,0,1,0,20.15h-1.63a20.75,20.75,0,0,0,0-20.15Z"/>
+        </g>
+        <path id="m5552310466-16" data-name="m5552310466" class="cls-8" d="M1100.07,93.43a5,5,0,1,0-3.54-1.46A5,5,0,0,0,1100.07,93.43Z"/>
+      </g>
+    </g>
+    <g>
+      <line class="cls-12" x1="1100.07" y1="88.43" x2="1127.73" y2="88.43"/>
+      <polygon class="cls-8" points="1125.26 92.06 1138.81 88.43 1125.26 84.8 1125.26 92.06"/>
+    </g>
+  </g>
+  <g id="Layer_4" data-name="Layer 4">
+    <g>
+      <path class="cls-3" d="M54,308.8a3.85,3.85,0,0,0-4.21-3.7c-3.08,0-4.54,2-4.54,5.18v3c0,3.16,1.3,5.37,4.57,5.37,2.75,0,4.26-1.7,4.26-3.78v-.27H49.85V311.5h7.94V314c0,4.54-3.16,7.7-8,7.7-5.61,0-8.26-3.78-8.26-8.48v-2.75c0-5.08,3-8.42,8.18-8.42,4.89,0,7.8,2.88,7.91,6.8Z"/>
+      <path class="cls-3" d="M62.46,307.23H66V310c.45-2.11,2.1-2.84,4.21-2.84h.43v3.65h-.76c-2.4,0-3.7,1.19-3.7,4v6.48H62.46Z"/>
+      <path class="cls-3" d="M73.26,317.22c0-3.05,2.3-4.45,5.67-4.45A5.66,5.66,0,0,1,82.5,314V312a2.23,2.23,0,0,0-2.43-2.27,2.52,2.52,0,0,0-2.43,1.32h-3.4c.8-2.72,3-4.18,6-4.18,3.51,0,5.92,2.13,5.92,5.4v9H82.63V320a5.3,5.3,0,0,1-4,1.62C75.42,321.59,73.26,320,73.26,317.22Zm9.24-.13c0-1.25-1.22-2-2.87-2s-2.8.71-2.8,2,1.16,2.05,2.7,2.05S82.5,318.27,82.5,317.09Z"/>
+      <path class="cls-3" d="M90.38,315.41v-2.32c0-3.37,2.11-6.18,5.89-6.18a4.86,4.86,0,0,1,4,1.83v-7.18H104v19.71h-3.51v-1.68a4.82,4.82,0,0,1-4.16,2C92.51,321.59,90.38,318.73,90.38,315.41Zm9.91-.16V313.2a3.05,3.05,0,0,0-3.08-3.32c-2.08,0-3.16,1.4-3.16,3.37v2c0,1.86,1,3.37,3.13,3.37A3.09,3.09,0,0,0,100.29,315.25Z"/>
+      <path class="cls-3" d="M111,301.43a2.13,2.13,0,1,1-2.13,2.13A2.07,2.07,0,0,1,111,301.43Zm-1.84,5.8h3.67v14h-3.67Z"/>
+      <path class="cls-3" d="M117.44,315.33v-1.92c0-3.8,2.26-6.5,6.47-6.5s6.27,2.78,6.27,6.15v2.46H121v.35a2.78,2.78,0,0,0,3,3.08,2.88,2.88,0,0,0,2.89-1.78h3.21c-.49,2.56-3,4.42-6.1,4.42C120,321.59,117.44,319.27,117.44,315.33Zm3.56-2.4h5.8v-.35c0-1.76-.92-3.08-2.89-3.08s-2.91,1.4-2.91,3.29Z"/>
+      <path class="cls-3" d="M134.53,307.23H138v1.68c.7-1.3,2.13-2,4.13-2,3.05,0,5,2,5,5.37v9h-3.67v-8.4c0-1.62-.78-2.78-2.56-2.78a2.54,2.54,0,0,0-2.7,2.87v8.31h-3.67Z"/>
+      <path class="cls-3" d="M152.75,317.27V310.2h-2v-3h2V303.4h3.7v3.83h3v3h-3V317c0,.92.35,1.3,1.32,1.3h1.49v3h-2.54C154,321.27,152.75,319.76,152.75,317.27Z"/>
+      <path class="cls-3" d="M42.08,334.77h3.59l8.23,12.58V334.77h3.7v18.9H54l-8.23-12.58v12.58h-3.7Z"/>
+      <path class="cls-3" d="M62.22,347.65v-2c0-3.59,2.4-6.34,6.61-6.34s6.62,2.75,6.62,6.34v2c0,3.59-2.41,6.34-6.62,6.34S62.22,351.24,62.22,347.65Zm9.66-2.24c0-1.84-1-3.21-3-3.21s-3,1.37-3,3.21v2.48c0,1.84,1,3.21,3,3.21s3-1.37,3-3.21Z"/>
+      <path class="cls-3" d="M81.88,333.83A2.13,2.13,0,1,1,79.74,336,2.07,2.07,0,0,1,81.88,333.83ZM80,339.63h3.67v14H80Z"/>
+      <path class="cls-3" d="M88.33,349.51v-.24h3.24v.19c0,1.21,1.1,1.94,2.83,1.94,1.54,0,2.67-.62,2.67-1.7,0-.67-.35-1.08-1.16-1.27l-4.15-.94c-2.11-.49-3.22-1.57-3.22-3.7,0-2.89,2.35-4.48,5.67-4.48s5.56,1.7,5.56,4.48V344H96.56v-.25c0-1.08-.67-1.83-2.29-1.83s-2.3.54-2.3,1.54c0,.7.35,1,1.08,1.18l4.27,1.06c2.08.51,3.13,1.62,3.13,3.59,0,2.72-2.59,4.69-6.07,4.69S88.33,352.51,88.33,349.51Z"/>
+      <path class="cls-3" d="M104,347.73v-1.92c0-3.8,2.27-6.5,6.48-6.5s6.26,2.78,6.26,6.15v2.46h-9.18v.35a2.78,2.78,0,0,0,3,3.08,2.87,2.87,0,0,0,2.89-1.78h3.21c-.49,2.56-3,4.42-6.1,4.42C106.55,354,104,351.67,104,347.73Zm3.56-2.4h5.8V345c0-1.76-.91-3.08-2.88-3.08s-2.92,1.4-2.92,3.29Z"/>
+      <path class="cls-3" d="M128,348.19v-.49h3.62v.41c0,1.75,1.38,3,3.92,3,2.24,0,3.69-1,3.69-2.75a2,2,0,0,0-1.78-2.13L132.55,345c-2.86-.73-4.21-2.05-4.21-4.89,0-3.56,2.7-5.69,6.94-5.69s6.83,2.32,6.83,5.64v.46h-3.48v-.38c0-1.59-1-2.81-3.35-2.81-2,0-3.29.81-3.29,2.46a1.88,1.88,0,0,0,1.62,2l5.18,1.38c2.78.73,4.13,2.38,4.13,4.94,0,3.59-3.21,6-7.48,6C131,354.05,128,351.89,128,348.19Z"/>
+      <path class="cls-3" d="M146.76,347.65v-2c0-3.67,2.4-6.34,6.64-6.34,3.75,0,6.29,2.35,6.39,5.56h-3.34a2.75,2.75,0,0,0-3-2.59c-2.08,0-3,1.35-3,3.24v2.26c0,1.89,1,3.24,3,3.24a2.75,2.75,0,0,0,3-2.59h3.34c-.1,3.21-2.64,5.56-6.39,5.56C149.16,354,146.76,351.32,146.76,347.65Z"/>
+      <path class="cls-3" d="M163.55,349.62c0-3.05,2.29-4.45,5.67-4.45a5.65,5.65,0,0,1,3.56,1.18v-1.91a2.23,2.23,0,0,0-2.43-2.27,2.53,2.53,0,0,0-2.43,1.32h-3.4c.81-2.72,3-4.18,6-4.18,3.51,0,5.91,2.13,5.91,5.4v9h-3.53v-1.3a5.3,5.3,0,0,1-4,1.62C165.71,354,163.55,352.35,163.55,349.62Zm9.23-.13c0-1.25-1.21-2-2.86-2s-2.81.71-2.81,2,1.16,2.05,2.7,2.05S172.78,350.67,172.78,349.49Z"/>
+      <path class="cls-3" d="M181.37,334H185v19.71h-3.67Z"/>
+      <path class="cls-3" d="M189.47,347.73v-1.92c0-3.8,2.27-6.5,6.48-6.5s6.26,2.78,6.26,6.15v2.46H193v.35a2.78,2.78,0,0,0,3,3.08,2.87,2.87,0,0,0,2.89-1.78h3.21c-.48,2.56-3,4.42-6.1,4.42C192,354,189.47,351.67,189.47,347.73Zm3.56-2.4h5.8V345c0-1.76-.91-3.08-2.88-3.08s-2.92,1.4-2.92,3.29Z"/>
+      <path class="cls-3" d="M41.78,368.8h1.59v1.92a3.69,3.69,0,0,1,3.71-2.18,3.79,3.79,0,0,1,3.59,2.2,4.48,4.48,0,0,1,4.09-2.2c2.47,0,4.05,1.76,4.05,4.42v7.11H57.14V373.2c0-1.91-1-3.21-2.79-3.21s-3.22,1.12-3.22,3.46v6.62H49.46V373.2c0-1.91-.94-3.21-2.79-3.21s-3.22,1.12-3.22,3.46v6.62H41.78Z"/>
+      <path class="cls-3" d="M62.26,375.43v-1.81c0-3.06,1.74-5.08,4.91-5.08S72,370.67,72,373.45v1.67H63.87v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18h1.5a4.46,4.46,0,0,1-4.6,3.45A4.65,4.65,0,0,1,62.26,375.43Zm1.61-1.59h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M83.12,380.07v-1.41a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.49-4.27-3.54s1.78-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.41v-2.09a2.62,2.62,0,0,0-2.82-2.73,2.86,2.86,0,0,0-2.82,1.69H75.88a4.26,4.26,0,0,1,4.38-3c2.8,0,4.47,1.83,4.47,4v7.48Zm-.06-3.3c0-1.41-1.68-2.4-3.37-2.4-1.87,0-3.13.88-3.13,2.44s1.3,2.44,3,2.44S83.06,378.22,83.06,376.77Z"/>
+      <path class="cls-3" d="M88.18,376.79v-.11H89.7v.07c0,1.4,1.32,2.33,3.17,2.33s3-.68,3-2.14a1.58,1.58,0,0,0-1.41-1.56l-3.34-.77c-1.68-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.26-3.21s4.19,1.43,4.19,3.52v.11H95.4v-.07c0-1.38-1-2.31-2.69-2.31s-2.7.66-2.7,1.92a1.36,1.36,0,0,0,1.18,1.45l3.46.81c1.74.42,2.77,1.28,2.77,2.91,0,2-1.93,3.45-4.58,3.45S88.18,379,88.18,376.79Z"/>
+      <path class="cls-3" d="M110.09,380.07h-1.58v-1.92a3.77,3.77,0,0,1-3.76,2.18c-2.47,0-4-1.71-4-4.42V368.8h1.67v6.87c0,1.91.84,3.25,2.77,3.25s3.26-1.16,3.26-3.49V368.8h1.67Z"/>
+      <path class="cls-3" d="M114.21,368.8h1.56v2.6c.42-1.74,1.63-2.66,3.67-2.66h.27v1.56h-.33c-2.14,0-3.5,1.41-3.5,4.25v5.52h-1.67Z"/>
+      <path class="cls-3" d="M121.8,375.43v-1.81c0-3.06,1.73-5.08,4.9-5.08s4.78,2.13,4.78,4.91v1.67H123.4v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18h1.5a4.46,4.46,0,0,1-4.6,3.45A4.64,4.64,0,0,1,121.8,375.43Zm1.6-1.59h6.52v-.53c0-2-1-3.5-3.22-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M134.55,376.79v-.11h1.52v.07c0,1.4,1.32,2.33,3.17,2.33s3-.68,3-2.14a1.57,1.57,0,0,0-1.4-1.56l-3.35-.77c-1.67-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.27-3.21s4.18,1.43,4.18,3.52v.11h-1.52v-.07c0-1.38-1-2.31-2.68-2.31s-2.71.66-2.71,1.92a1.36,1.36,0,0,0,1.19,1.45L141,374c1.74.42,2.78,1.28,2.78,2.91,0,2-1.94,3.45-4.58,3.45S134.55,379,134.55,376.79Z"/>
+      <path class="cls-3" d="M156.58,380.07c-2.16,0-3.06-1.08-3.06-3v-6.91H151.6V368.8h1.92v-3.23h1.67v3.23h2.88v1.37h-2.88v6.91c0,1.14.42,1.62,1.67,1.62H158v1.37Z"/>
+      <path class="cls-3" d="M161.35,364H163v6.53a3.91,3.91,0,0,1,3.68-2c2.53,0,4.13,1.72,4.13,4.42v7.11h-1.67V373.2c0-1.91-.86-3.21-2.88-3.21-1.85,0-3.26,1.12-3.26,3.46v6.62h-1.67Z"/>
+      <path class="cls-3" d="M174.29,375.43v-1.81c0-3.06,1.73-5.08,4.9-5.08s4.78,2.13,4.78,4.91v1.67h-8.08v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18h1.5a4.46,4.46,0,0,1-4.6,3.45A4.64,4.64,0,0,1,174.29,375.43Zm1.6-1.59h6.52v-.53c0-2-1-3.5-3.22-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M46.47,406.47H44.71L40.57,395.2h1.65l3.37,9.6L49,395.2h1.65Z"/>
+      <path class="cls-3" d="M60.44,406.47v-1.41a4.76,4.76,0,0,1-3.87,1.67c-2.67,0-4.27-1.49-4.27-3.54s1.78-3.52,4.46-3.52a4.92,4.92,0,0,1,3.61,1.41V399a2.61,2.61,0,0,0-2.81-2.73A2.86,2.86,0,0,0,54.74,398H53.2a4.26,4.26,0,0,1,4.38-3c2.79,0,4.46,1.83,4.46,4.05v7.48Zm-.07-3.3c0-1.41-1.67-2.4-3.36-2.4-1.87,0-3.13.88-3.13,2.44s1.3,2.44,3,2.44S60.37,404.62,60.37,403.17Z"/>
+      <path class="cls-3" d="M66,395.2h1.56v2.6c.42-1.74,1.63-2.66,3.67-2.66h.27v1.56H71.2c-2.14,0-3.5,1.41-3.5,4.25v5.52H66Z"/>
+      <path class="cls-3" d="M75.29,391.22a1,1,0,1,1,0,2,1,1,0,1,1,0-2Zm-.84,4h1.67v11.27H74.45Z"/>
+      <path class="cls-3" d="M88,406.47v-1.41a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.49-4.27-3.54s1.78-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.41V399a2.62,2.62,0,0,0-2.82-2.73A2.87,2.87,0,0,0,82.28,398H80.74a4.27,4.27,0,0,1,4.38-3c2.8,0,4.47,1.83,4.47,4.05v7.48Zm-.06-3.3c0-1.41-1.68-2.4-3.37-2.4-1.87,0-3.12.88-3.12,2.44s1.29,2.44,3,2.44S87.92,404.62,87.92,403.17Z"/>
+      <path class="cls-3" d="M97.4,406.47c-2.16,0-3.06-1.08-3.06-3v-6.91H92.43V395.2h1.91V392H96v3.23h2.88v1.37H96v6.91c0,1.14.42,1.62,1.67,1.62h1.13v1.37Z"/>
+      <path class="cls-3" d="M103.12,391.22a1,1,0,1,1-1.06,1A1,1,0,0,1,103.12,391.22Zm-.84,4H104v11.27h-1.67Z"/>
+      <path class="cls-3" d="M107.63,401.76v-1.85a5,5,0,0,1,10.05,0v1.85a5,5,0,0,1-10.05,0Zm5-5.48c-2.23,0-3.42,1.52-3.42,3.65v1.81c0,2.13,1.19,3.65,3.42,3.65s3.41-1.52,3.41-3.65v-1.81C116.08,397.8,114.89,396.28,112.67,396.28Z"/>
+      <path class="cls-3" d="M121.24,395.2h1.59v1.92a3.76,3.76,0,0,1,3.76-2.18c2.53,0,4.14,1.72,4.14,4.42v7.11h-1.67V399.6c0-1.91-.86-3.21-2.89-3.21-1.85,0-3.25,1.12-3.25,3.46v6.62h-1.68Z"/>
+      <path class="cls-3" d="M139.57,401.76v-1.85a5,5,0,0,1,10.06,0v1.85a5,5,0,0,1-10.06,0Zm5-5.48c-2.22,0-3.41,1.52-3.41,3.65v1.81c0,2.13,1.19,3.65,3.41,3.65s3.41-1.52,3.41-3.65v-1.81C148,397.8,146.83,396.28,144.61,396.28Z"/>
+      <path class="cls-3" d="M153.87,396.57h-1.65V395.2h1.65v-1.49c0-2.18,1-3.3,3.26-3.3h1.58v1.36h-1.45c-1.19,0-1.72.55-1.72,1.81v1.62h2.77v1.37h-2.77v9.9h-1.67Z"/>
+      <path class="cls-3" d="M170.74,406.47c-2.15,0-3.05-1.08-3.05-3v-6.91h-1.92V395.2h1.92V392h1.67v3.23h2.88v1.37h-2.88v6.91c0,1.14.42,1.62,1.67,1.62h1.12v1.37Z"/>
+      <path class="cls-3" d="M175.52,390.41h1.67v6.53a3.9,3.9,0,0,1,3.67-2c2.53,0,4.14,1.72,4.14,4.42v7.11h-1.67V399.6c0-1.91-.86-3.21-2.88-3.21-1.85,0-3.26,1.12-3.26,3.46v6.62h-1.67Z"/>
+      <path class="cls-3" d="M188.45,401.83V400c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67h-8.07v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18H198a4.44,4.44,0,0,1-4.59,3.45A4.65,4.65,0,0,1,188.45,401.83Zm1.61-1.59h6.51v-.53c0-2-1-3.49-3.21-3.49-2.42,0-3.3,1.6-3.3,3.67Z"/>
+      <path class="cls-3" d="M41.43,434.08h1.65a3.37,3.37,0,0,0,3.23,1.56c2,0,3.31-1.1,3.31-2.82v-2.55a4.06,4.06,0,0,1-3.79,2.14,4.39,4.39,0,0,1-4.6-4.6v-1.87a4.35,4.35,0,0,1,4.62-4.6,4.06,4.06,0,0,1,3.83,2.13V421.6h1.61v11.27c0,2.33-2.11,4.09-5,4.09C44.07,437,42.05,435.93,41.43,434.08Zm8.19-6.45v-1.45a3.4,3.4,0,0,0-3.39-3.48,3,3,0,0,0-3.35,3.37v1.61c0,2.11,1.12,3.36,3.28,3.36A3.28,3.28,0,0,0,49.62,427.63Z"/>
+      <path class="cls-3" d="M55.4,421.6H57v2.6c.42-1.74,1.63-2.66,3.68-2.66h.26v1.56h-.33c-2.13,0-3.5,1.41-3.5,4.25v5.52H55.4Z"/>
+      <path class="cls-3" d="M70.69,432.87v-1.41a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.49-4.27-3.54s1.78-3.52,4.47-3.52a4.9,4.9,0,0,1,3.6,1.41v-2.09a2.61,2.61,0,0,0-2.81-2.73A2.86,2.86,0,0,0,65,424.35H63.45a4.26,4.26,0,0,1,4.38-3c2.79,0,4.47,1.83,4.47,4.05v7.48Zm-.07-3.3c0-1.41-1.67-2.4-3.36-2.4-1.87,0-3.13.88-3.13,2.44s1.3,2.44,3,2.44S70.62,431,70.62,429.57Z"/>
+      <path class="cls-3" d="M85.83,416.81v16.06H84.24v-1.76a4.19,4.19,0,0,1-3.8,2c-3,0-4.71-2.22-4.71-5v-1.71c0-2.93,1.82-5,4.66-5a4.21,4.21,0,0,1,3.76,1.93v-6.46ZM77.4,428c0,2.07,1,3.74,3.32,3.74s3.43-1.7,3.43-3.87v-1.46a3.48,3.48,0,0,0-3.45-3.74c-2.13,0-3.3,1.68-3.3,3.74Z"/>
+      <path class="cls-3" d="M90.89,417.62a1,1,0,1,1-1.06,1A1,1,0,0,1,90.89,417.62Zm-.84,4h1.67v11.27H90.05Z"/>
+      <path class="cls-3" d="M95.4,428.23v-1.81c0-3.06,1.73-5.08,4.9-5.08s4.78,2.13,4.78,4.91v1.67H97v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18H105a4.46,4.46,0,0,1-4.6,3.45A4.64,4.64,0,0,1,95.4,428.23Zm1.6-1.59h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M108.68,421.6h1.59v1.92a3.75,3.75,0,0,1,3.76-2.18c2.53,0,4.14,1.71,4.14,4.42v7.11h-1.68V426c0-1.91-.85-3.21-2.88-3.21-1.85,0-3.26,1.12-3.26,3.46v6.62h-1.67Z"/>
+      <path class="cls-3" d="M126,432.87c-2.15,0-3.06-1.08-3.06-3V423h-1.91V421.6H123v-3.23h1.68v3.23h2.88V423h-2.88v6.91c0,1.14.41,1.62,1.67,1.62h1.12v1.37Z"/>
+      <path class="cls-3" d="M130.22,429.59v-.11h1.52v.07c0,1.4,1.32,2.33,3.17,2.33s3-.68,3-2.14a1.58,1.58,0,0,0-1.41-1.56l-3.34-.77c-1.67-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.27-3.21s4.18,1.43,4.18,3.52V425h-1.52v-.07c0-1.38-1-2.31-2.69-2.31s-2.7.66-2.7,1.92a1.36,1.36,0,0,0,1.19,1.45l3.45.81c1.74.42,2.77,1.28,2.77,2.91,0,2-1.93,3.45-4.57,3.45S130.22,431.77,130.22,429.59Z"/>
+      <path class="cls-3" d="M149.93,416.81v6.46a4.24,4.24,0,0,1,3.77-1.93c2.83,0,4.66,2.11,4.66,5v1.71c0,2.82-1.74,5-4.71,5a4.22,4.22,0,0,1-3.81-2v1.76h-1.58V416.81Zm6.76,9.63c0-2.06-1.17-3.74-3.3-3.74a3.49,3.49,0,0,0-3.46,3.74v1.46c0,2.17,1.21,3.87,3.44,3.87s3.32-1.67,3.32-3.74Z"/>
+      <path class="cls-3" d="M161.37,428.23v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67H163v.39c0,2,1,3.55,3.36,3.55a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.45A4.65,4.65,0,0,1,161.37,428.23Zm1.61-1.59h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M178.58,432.87c-2.16,0-3.06-1.08-3.06-3V423H173.6V421.6h1.92v-3.23h1.67v3.23h2.88V423h-2.88v6.91c0,1.14.42,1.62,1.67,1.62H180v1.37Z"/>
+      <path class="cls-3" d="M194.62,432.87H193l-3-9.51-3,9.51h-1.59l-3.08-11.27H184l2.37,9.27,2.91-9.27h1.58l2.91,9.27,2.37-9.27h1.59Z"/>
+      <path class="cls-3" d="M200,428.23v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67h-8.07v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18h1.5a4.46,4.46,0,0,1-4.6,3.45A4.65,4.65,0,0,1,200,428.23Zm1.61-1.59h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M212.74,428.23v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67h-8.07v.39c0,2,1,3.55,3.36,3.55a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.45A4.65,4.65,0,0,1,212.74,428.23Zm1.61-1.59h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M226,421.6h1.58v1.92a3.78,3.78,0,0,1,3.77-2.18c2.53,0,4.13,1.71,4.13,4.42v7.11h-1.67V426c0-1.91-.86-3.21-2.88-3.21-1.85,0-3.26,1.12-3.26,3.46v6.62H226Z"/>
+      <path class="cls-3" d="M51.33,443.21v16.06H49.75v-1.76a4.22,4.22,0,0,1-3.81,2c-3,0-4.71-2.22-4.71-5v-1.71c0-2.93,1.83-5,4.67-5a4.21,4.21,0,0,1,3.76,1.93v-6.46ZM42.9,454.43c0,2.07,1,3.74,3.33,3.74s3.43-1.7,3.43-3.87v-1.46a3.48,3.48,0,0,0-3.45-3.74c-2.14,0-3.31,1.68-3.31,3.74Z"/>
+      <path class="cls-3" d="M56.39,444a1,1,0,1,1-1.06,1A1,1,0,0,1,56.39,444Zm-.84,4h1.68v11.27H55.55Z"/>
+      <path class="cls-3" d="M62.24,449.37H60.59V448h1.65v-1.49c0-2.18,1-3.3,3.26-3.3h1.58v1.36H65.63c-1.19,0-1.71.55-1.71,1.81V448h2.77v1.37H63.92v9.9H62.24Z"/>
+      <path class="cls-3" d="M70.45,449.37H68.8V448h1.65v-1.49c0-2.18,1-3.3,3.25-3.3h1.59v1.36H73.84c-1.19,0-1.72.55-1.72,1.81V448h2.77v1.37H72.12v9.9H70.45Z"/>
+      <path class="cls-3" d="M76.78,454.63v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67H78.39v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18h1.49a4.44,4.44,0,0,1-4.59,3.45A4.65,4.65,0,0,1,76.78,454.63ZM78.39,453H84.9v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M90.07,448h1.56v2.6c.42-1.74,1.63-2.66,3.68-2.66h.26v1.56h-.33c-2.13,0-3.5,1.41-3.5,4.25v5.52H90.07Z"/>
+      <path class="cls-3" d="M97.66,454.63v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67H99.27v.39c0,2,1,3.55,3.36,3.55a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.45A4.65,4.65,0,0,1,97.66,454.63ZM99.27,453h6.51v-.53c0-2-1-3.5-3.21-3.5-2.42,0-3.3,1.61-3.3,3.68Z"/>
+      <path class="cls-3" d="M111,448h1.58v1.92a3.78,3.78,0,0,1,3.77-2.18c2.53,0,4.13,1.72,4.13,4.42v7.11h-1.67V452.4c0-1.91-.86-3.21-2.88-3.21-1.85,0-3.26,1.12-3.26,3.46v6.62H111Z"/>
+      <path class="cls-3" d="M128.31,459.27c-2.16,0-3.06-1.08-3.06-3v-6.91h-1.92V448h1.92v-3.23h1.67V448h2.88v1.37h-2.88v6.91c0,1.14.42,1.62,1.67,1.62h1.13v1.37Z"/>
+      <path class="cls-3" d="M142.45,459.27c-2.15,0-3.06-1.08-3.06-3v-6.91h-1.91V448h1.91v-3.23h1.68V448H144v1.37h-2.88v6.91c0,1.14.42,1.62,1.67,1.62h1.12v1.37Z"/>
+      <path class="cls-3" d="M147.23,448h1.56v2.6c.42-1.74,1.63-2.66,3.67-2.66h.27v1.56h-.33c-2.14,0-3.5,1.41-3.5,4.25v5.52h-1.67Z"/>
+      <path class="cls-3" d="M162.52,459.27v-1.41a4.8,4.8,0,0,1-3.88,1.67c-2.66,0-4.26-1.49-4.26-3.54s1.78-3.52,4.46-3.52a4.94,4.94,0,0,1,3.61,1.41v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.84,2.84,0,0,0-2.81,1.69h-1.54a4.25,4.25,0,0,1,4.38-3c2.79,0,4.46,1.83,4.46,4.05v7.48Zm-.07-3.3c0-1.41-1.67-2.4-3.37-2.4-1.87,0-3.12.88-3.12,2.44s1.3,2.44,3,2.44S162.45,457.42,162.45,456Z"/>
+      <path class="cls-3" d="M169.05,444a1,1,0,1,1-1.06,1A1,1,0,0,1,169.05,444Zm-.84,4h1.68v11.27h-1.68Z"/>
+      <path class="cls-3" d="M174.11,448h1.58v1.92a3.78,3.78,0,0,1,3.77-2.18c2.53,0,4.13,1.72,4.13,4.42v7.11h-1.67V452.4c0-1.91-.86-3.21-2.88-3.21-1.85,0-3.26,1.12-3.26,3.46v6.62h-1.67Z"/>
+      <path class="cls-3" d="M188.54,444a1,1,0,1,1-1,1A1,1,0,0,1,188.54,444Zm-.83,4h1.67v11.27h-1.67Z"/>
+      <path class="cls-3" d="M193.6,448h1.58v1.92a3.78,3.78,0,0,1,3.77-2.18c2.53,0,4.13,1.72,4.13,4.42v7.11h-1.67V452.4c0-1.91-.86-3.21-2.88-3.21-1.85,0-3.26,1.12-3.26,3.46v6.62H193.6Z"/>
+      <path class="cls-3" d="M206.74,460.48h1.64a3.38,3.38,0,0,0,3.24,1.56c2,0,3.3-1.1,3.3-2.82v-2.55a4,4,0,0,1-3.78,2.14,4.39,4.39,0,0,1-4.6-4.6v-1.87a4.36,4.36,0,0,1,4.62-4.6,4.08,4.08,0,0,1,3.83,2.13V448h1.6v11.27c0,2.33-2.11,4.09-5,4.09C209.38,463.36,207.35,462.33,206.74,460.48Zm8.18-6.45v-1.45a3.4,3.4,0,0,0-3.39-3.48,3,3,0,0,0-3.34,3.37v1.61c0,2.11,1.12,3.36,3.28,3.36A3.27,3.27,0,0,0,214.92,454Z"/>
+      <path class="cls-3" d="M41.23,481v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67H42.84v.39c0,2,1,3.55,3.37,3.55a3,3,0,0,0,3.1-2.18H50.8a4.44,4.44,0,0,1-4.59,3.45A4.65,4.65,0,0,1,41.23,481Zm1.61-1.59h6.51v-.53c0-2-1-3.49-3.21-3.49-2.42,0-3.3,1.6-3.3,3.67Z"/>
+      <path class="cls-3" d="M62.92,485.67H61.17l-3.06-4.49-3.06,4.49H53.29l4-5.72-3.81-5.55h1.74l2.93,4.29L61,474.4h1.74L59,480Z"/>
+      <path class="cls-3" d="M73.26,485.67v-1.41a4.76,4.76,0,0,1-3.87,1.67c-2.66,0-4.27-1.49-4.27-3.54s1.79-3.52,4.47-3.52a4.94,4.94,0,0,1,3.61,1.41v-2.09a2.61,2.61,0,0,0-2.82-2.73,2.84,2.84,0,0,0-2.81,1.69H66a4.25,4.25,0,0,1,4.37-3c2.8,0,4.47,1.83,4.47,4.05v7.48Zm-.06-3.3c0-1.41-1.67-2.4-3.37-2.4-1.87,0-3.12.88-3.12,2.44s1.3,2.44,3,2.44S73.2,483.82,73.2,482.37Z"/>
+      <path class="cls-3" d="M78.85,474.4h1.59v1.92a3.69,3.69,0,0,1,3.71-2.18,3.79,3.79,0,0,1,3.59,2.2,4.48,4.48,0,0,1,4.09-2.2c2.47,0,4,1.76,4,4.42v7.11H94.21V478.8c0-1.91-.95-3.21-2.79-3.21s-3.22,1.12-3.22,3.46v6.62H86.53V478.8c0-1.91-1-3.21-2.79-3.21s-3.22,1.12-3.22,3.46v6.62H78.85Z"/>
+      <path class="cls-3" d="M99.88,489.5V474.4h1.59v1.76a4.19,4.19,0,0,1,3.8-2c3,0,4.71,2.18,4.71,5v1.71c0,2.93-1.82,5-4.66,5a4.21,4.21,0,0,1-3.76-1.93v5.5Zm8.43-10.26c0-2.06-1-3.74-3.32-3.74s-3.43,1.7-3.43,3.88v1.45a3.48,3.48,0,0,0,3.45,3.74c2.13,0,3.3-1.67,3.3-3.74Z"/>
+      <path class="cls-3" d="M113.66,469.61h1.67v16.06h-1.67Z"/>
+      <path class="cls-3" d="M119,481v-1.81c0-3.06,1.74-5.08,4.91-5.08s4.77,2.13,4.77,4.91v1.67h-8.07v.39c0,2,1,3.55,3.36,3.55a3,3,0,0,0,3.11-2.18h1.49a4.44,4.44,0,0,1-4.6,3.45A4.65,4.65,0,0,1,119,481Zm1.61-1.59h6.51v-.53c0-2-1-3.49-3.21-3.49-2.42,0-3.3,1.6-3.3,3.67Z"/>
+      <path class="cls-3" d="M131.76,482.39v-.11h1.52v.07c0,1.41,1.32,2.33,3.17,2.33s3-.68,3-2.13A1.58,1.58,0,0,0,138,481l-3.34-.77c-1.67-.37-2.64-1.38-2.64-2.86,0-2.11,1.78-3.21,4.27-3.21s4.18,1.43,4.18,3.52v.11H139v-.07c0-1.38-1-2.31-2.69-2.31s-2.7.66-2.7,1.92a1.36,1.36,0,0,0,1.19,1.45l3.45.81c1.74.42,2.77,1.28,2.77,2.91,0,2-1.93,3.45-4.57,3.45S131.76,484.57,131.76,482.39Z"/>
+      <path class="cls-3" d="M145.25,483.64a1.08,1.08,0,1,1,0,2.16,1.08,1.08,0,1,1,0-2.16Z"/>
+    </g>
+  </g>
+</svg>

TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/nvidia_smi_p2p_gpu0.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/nvidia_smi_topo_output.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/open_terminal.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/open_terminal_session.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/open_terminal_session_jl.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/paper1.png


TEMPAT SAMPAH
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/images/sync.png


+ 224 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N1/GPT.py

@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+
+# ## Introduction
+# 
+# This example demonstrates how to implement an autoregressive language model
+# using a miniature version of the GPT model.
+# The model consists of a single Transformer block with causal masking
+# in its attention layer.
+# 
+# 
+# **References:**
+# 
+# - [GPT](https://www.semanticscholar.org/paper/Improving-Language-Understanding-by-Generative-Radford/cd18800a0fe0b668a1cc19f2ec95b5003d0a5035)
+# - [GPT-2](https://www.semanticscholar.org/paper/Language-Models-are-Unsupervised-Multitask-Learners-Radford-Wu/9405cc0d6169988371b2755e573cc28650d14dfe)
+# - [GPT-3](https://arxiv.org/abs/2005.14165)
+
+# ## Setup
+import argparse
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.keras import layers
+from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
+import numpy as np
+import re
+import string
+import random
+import os
+import sys
+import time
+# Disable warning , info etc.
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 
+os.environ['CUDA_VISIBLE_DEVICES']='0'
+### Prepare the data for word-level language modelling
+
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+# ## Implement a Transformer block as a layer
+def causal_attention_mask(batch_size, n_dest, n_src, dtype):
+    """
+    Mask the upper half of the dot product matrix in self attention.
+    This prevents flow of information from future tokens to current token.
+    1's in the lower triangle, counting from the lower right corner.
+    """
+    i = tf.range(n_dest)[:, None]
+    j = tf.range(n_src)
+    m = i >= j - n_src + n_dest
+    mask = tf.cast(m, dtype)
+    mask = tf.reshape(mask, [1, n_dest, n_src])
+    mult = tf.concat(
+        [tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)], 0
+    )
+    return tf.tile(mask, mult)
+
+
+class TransformerBlock(layers.Layer):
+    def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
+        super(TransformerBlock, self).__init__()
+        self.att = layers.MultiHeadAttention(num_heads, embed_dim)
+        self.ffn = keras.Sequential(
+            [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),]
+        )
+        self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
+        self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
+        self.dropout1 = layers.Dropout(rate)
+        self.dropout2 = layers.Dropout(rate)
+
+    def call(self, inputs):
+        input_shape = tf.shape(inputs)
+        batch_size = input_shape[0]
+        seq_len = input_shape[1]
+        causal_mask = causal_attention_mask(batch_size, seq_len, seq_len, tf.bool)
+        attention_output = self.att(inputs, inputs, attention_mask=causal_mask)
+        attention_output = self.dropout1(attention_output)
+        out1 = self.layernorm1(inputs + attention_output)
+        ffn_output = self.ffn(out1)
+        ffn_output = self.dropout2(ffn_output)
+        return self.layernorm2(out1 + ffn_output)
+
+
+# ## Implement an embedding layer
+# 
+# Create two seperate embedding layers: one for tokens and one for token index
+# (positions).
+
+class TokenAndPositionEmbedding(layers.Layer):
+    def __init__(self, maxlen, vocab_size, embed_dim):
+        super(TokenAndPositionEmbedding, self).__init__()
+        self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
+        self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
+
+    def call(self, x):
+        maxlen = tf.shape(x)[-1]
+        positions = tf.range(start=0, limit=maxlen, delta=1)
+        positions = self.pos_emb(positions)
+        x = self.token_emb(x)
+        return x + positions
+
+
+
+
+
+def create_model():
+    inputs = layers.Input(shape=(maxlen,), dtype=tf.int32)
+    embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
+    x = embedding_layer(inputs)
+    transformer_block = TransformerBlock(embed_dim, num_heads, feed_forward_dim)
+    x = transformer_block(x)
+    outputs = layers.Dense(vocab_size)(x)
+    
+    model = keras.Model(inputs=inputs, outputs=[outputs, x])
+    
+    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+    
+    model.compile(
+        "adam", loss=[loss_fn, None],
+    )  # No loss and optimization based on word embeddings from transformer block
+    return model
+
+
+def main():
+    args = parse_args()
+    global g_args
+    g_args = args
+    batch_size = args.batch_size
+    print("Batch size: "+str(batch_size))
+    ### Implement the miniature GPT model
+
+    global vocab_size
+    vocab_size = 20000  # Only consider the top 20k words
+    global maxlen
+    maxlen = 80  # Max sequence size
+    global embed_dim
+    embed_dim = 256  # Embedding size for each token
+    global num_heads
+    num_heads = 2  # Number of attention heads
+    global feed_forward_dim
+    feed_forward_dim = 256  # Hidden layer size in feed forward network inside transformer
+
+    # The dataset contains each review in a separate text file
+    # The text files are present in four different folders
+    # Create a list all files
+    filenames = []
+    directories = [
+        "/workspace/python/source_code/Data/wikitext-2"
+    ]
+    for dir in directories:
+        for f in os.listdir(dir):
+            filenames.append(os.path.join(dir, f))
+
+    # print(f"{len(filenames)} files")
+
+    # Create a dataset from text files
+    random.shuffle(filenames)
+    text_ds = tf.data.TextLineDataset(filenames)
+    text_ds = text_ds.shuffle(buffer_size=256)
+    text_ds = text_ds.batch(batch_size)
+
+
+    def custom_standardization(input_string):
+        """ Remove html line-break tags and handle punctuation """
+        lowercased = tf.strings.lower(input_string)
+        stripped_html = tf.strings.regex_replace(lowercased, "<br />", " ")
+        return tf.strings.regex_replace(stripped_html, f"([{string.punctuation}])", r" \1")
+
+
+    # Create a vectorization layer and adapt it to the text
+    vectorize_layer = TextVectorization(
+        standardize=custom_standardization,
+        max_tokens=vocab_size - 1,
+        output_mode="int",
+        output_sequence_length=maxlen + 1,
+    )
+    vectorize_layer.adapt(text_ds)
+    vocab = vectorize_layer.get_vocabulary()  # To get words back from token indices
+
+
+    def prepare_lm_inputs_labels(text):
+        """
+        Shift word sequences by 1 position so that the target for position (i) is
+        word at position (i+1). The model will use all words up till position (i)
+        to predict the next word.
+        """
+        text = tf.expand_dims(text, -1)
+        tokenized_sentences = vectorize_layer(text)
+        x = tokenized_sentences[:, :-1]
+        y = tokenized_sentences[:, 1:]
+        return x, y
+
+
+    text_ds = text_ds.map(prepare_lm_inputs_labels)
+    text_ds = text_ds.prefetch(tf.data.experimental.AUTOTUNE)
+
+
+    # ## Implement a Keras callback for generating text
+
+    # %%
+    class PrintLR(tf.keras.callbacks.Callback):
+        def __init__(self, total_images=0):
+            self.total_images = total_images
+        def on_epoch_begin(self, epoch, logs=None):
+            self.epoch_start_time = time.time()
+        def on_epoch_end(self, epoch, logs=None):
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Units/sec: {}'.format(images_per_sec))
+
+
+    model = create_model()
+    model.fit(text_ds, verbose=1, epochs=3, callbacks=[PrintLR(total_images=44880)])
+
+
+main()
+
+

+ 103 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N1/cnn_fmnist.py

@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(mnist_images, mnist_labels), _ =     tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank())
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
+             tf.cast(mnist_labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+mnist_model = tf.keras.Sequential([
+    tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),
+    tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),
+    tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
+    tf.keras.layers.Dropout(0.25),
+    tf.keras.layers.Flatten(),
+    tf.keras.layers.Dense(128, activation='relu'),
+    tf.keras.layers.Dropout(0.5),
+    tf.keras.layers.Dense(10, activation='softmax')
+])
+
+# Horovod: adjust learning rate based on number of GPUs.
+opt = tf.optimizers.Adam(0.001)
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+mnist_model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_train_begin(self, epoch, logs=None):
+        global seconds1
+        seconds1 = time.time()
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    #Throughput calculator
+    PrintLR(total_images=len(mnist_labels)),
+
+]
+
+
+# Horovod: write logs on worker 0.
+verbose = 2 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+mnist_model.fit(dataset, steps_per_epoch=len(mnist_labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=4, verbose=verbose)
+

+ 151 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/exception.h

@@ -0,0 +1,151 @@
+/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  * Neither the name of NVIDIA CORPORATION nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* CUda UTility Library */
+#ifndef COMMON_EXCEPTION_H_
+#define COMMON_EXCEPTION_H_
+
+// includes, system
+#include <stdlib.h>
+#include <exception>
+#include <iostream>
+#include <stdexcept>
+#include <string>
+
+//! Exception wrapper.
+//! @param Std_Exception Exception out of namespace std for easy typing.
+template <class Std_Exception>
+class Exception : public Std_Exception {
+ public:
+  //! @brief Static construction interface
+  //! @return Alwayss throws ( Located_Exception<Exception>)
+  //! @param file file in which the Exception occurs
+  //! @param line line in which the Exception occurs
+  //! @param detailed details on the code fragment causing the Exception
+  static void throw_it(const char *file, const int line,
+                       const char *detailed = "-");
+
+  //! Static construction interface
+  //! @return Alwayss throws ( Located_Exception<Exception>)
+  //! @param file file in which the Exception occurs
+  //! @param line line in which the Exception occurs
+  //! @param detailed details on the code fragment causing the Exception
+  static void throw_it(const char *file, const int line,
+                       const std::string &detailed);
+
+  //! Destructor
+  virtual ~Exception() throw();
+
+ private:
+  //! Constructor, default (private)
+  Exception();
+
+  //! Constructor, standard
+  //! @param str string returned by what()
+  explicit Exception(const std::string &str);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+//! Exception handler function for arbitrary exceptions
+//! @param ex exception to handle
+////////////////////////////////////////////////////////////////////////////////
+template <class Exception_Typ>
+inline void handleException(const Exception_Typ &ex) {
+  std::cerr << ex.what() << std::endl;
+
+  exit(EXIT_FAILURE);
+}
+
+//! Convenience macros
+
+//! Exception caused by dynamic program behavior, e.g. file does not exist
+#define RUNTIME_EXCEPTION(msg) \
+  Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
+
+//! Logic exception in program, e.g. an assert failed
+#define LOGIC_EXCEPTION(msg) \
+  Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
+
+//! Out of range exception
+#define RANGE_EXCEPTION(msg) \
+  Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
+
+////////////////////////////////////////////////////////////////////////////////
+//! Implementation
+
+// includes, system
+#include <sstream>
+
+////////////////////////////////////////////////////////////////////////////////
+//! Static construction interface.
+//! @param  Exception causing code fragment (file and line) and detailed infos.
+////////////////////////////////////////////////////////////////////////////////
+/*static*/ template <class Std_Exception>
+void Exception<Std_Exception>::throw_it(const char *file, const int line,
+                                        const char *detailed) {
+  std::stringstream s;
+
+  // Quiet heavy-weight but exceptions are not for
+  // performance / release versions
+  s << "Exception in file '" << file << "' in line " << line << "\n"
+    << "Detailed description: " << detailed << "\n";
+
+  throw Exception(s.str());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Static construction interface.
+//! @param  Exception causing code fragment (file and line) and detailed infos.
+////////////////////////////////////////////////////////////////////////////////
+/*static*/ template <class Std_Exception>
+void Exception<Std_Exception>::throw_it(const char *file, const int line,
+                                        const std::string &msg) {
+  throw_it(file, line, msg.c_str());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Constructor, default (private).
+////////////////////////////////////////////////////////////////////////////////
+template <class Std_Exception>
+Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Constructor, standard (private).
+//! String returned by what().
+////////////////////////////////////////////////////////////////////////////////
+template <class Std_Exception>
+Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Destructor
+////////////////////////////////////////////////////////////////////////////////
+template <class Std_Exception>
+Exception<Std_Exception>::~Exception() throw() {}
+
+  // functions, exported
+
+#endif  // COMMON_EXCEPTION_H_

+ 967 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/helper_cuda.h

@@ -0,0 +1,967 @@
+/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  * Neither the name of NVIDIA CORPORATION nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+////////////////////////////////////////////////////////////////////////////////
+// These are CUDA Helper functions for initialization and error checking
+
+#ifndef COMMON_HELPER_CUDA_H_
+#define COMMON_HELPER_CUDA_H_
+
+#pragma once
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <helper_string.h>
+
+#ifndef EXIT_WAIVED
+#define EXIT_WAIVED 2
+#endif
+
+// Note, it is required that your SDK sample to include the proper header
+// files, please refer the CUDA examples for examples of the needed CUDA
+// headers, which may change depending on which CUDA functions are used.
+
+// CUDA Runtime error messages
+#ifdef __DRIVER_TYPES_H__
+static const char *_cudaGetErrorEnum(cudaError_t error) {
+  return cudaGetErrorName(error);
+}
+#endif
+
+#ifdef CUDA_DRIVER_API
+// CUDA Driver API errors
+static const char *_cudaGetErrorEnum(CUresult error) {
+  static char unknown[] = "<unknown>";
+  const char *ret = NULL;
+  cuGetErrorName(error, &ret);
+  return ret ? ret : unknown;
+}
+#endif
+
+#ifdef CUBLAS_API_H_
+// cuBLAS API errors
+static const char *_cudaGetErrorEnum(cublasStatus_t error) {
+  switch (error) {
+    case CUBLAS_STATUS_SUCCESS:
+      return "CUBLAS_STATUS_SUCCESS";
+
+    case CUBLAS_STATUS_NOT_INITIALIZED:
+      return "CUBLAS_STATUS_NOT_INITIALIZED";
+
+    case CUBLAS_STATUS_ALLOC_FAILED:
+      return "CUBLAS_STATUS_ALLOC_FAILED";
+
+    case CUBLAS_STATUS_INVALID_VALUE:
+      return "CUBLAS_STATUS_INVALID_VALUE";
+
+    case CUBLAS_STATUS_ARCH_MISMATCH:
+      return "CUBLAS_STATUS_ARCH_MISMATCH";
+
+    case CUBLAS_STATUS_MAPPING_ERROR:
+      return "CUBLAS_STATUS_MAPPING_ERROR";
+
+    case CUBLAS_STATUS_EXECUTION_FAILED:
+      return "CUBLAS_STATUS_EXECUTION_FAILED";
+
+    case CUBLAS_STATUS_INTERNAL_ERROR:
+      return "CUBLAS_STATUS_INTERNAL_ERROR";
+
+    case CUBLAS_STATUS_NOT_SUPPORTED:
+      return "CUBLAS_STATUS_NOT_SUPPORTED";
+
+    case CUBLAS_STATUS_LICENSE_ERROR:
+      return "CUBLAS_STATUS_LICENSE_ERROR";
+  }
+
+  return "<unknown>";
+}
+#endif
+
+#ifdef _CUFFT_H_
+// cuFFT API errors
+static const char *_cudaGetErrorEnum(cufftResult error) {
+  switch (error) {
+    case CUFFT_SUCCESS:
+      return "CUFFT_SUCCESS";
+
+    case CUFFT_INVALID_PLAN:
+      return "CUFFT_INVALID_PLAN";
+
+    case CUFFT_ALLOC_FAILED:
+      return "CUFFT_ALLOC_FAILED";
+
+    case CUFFT_INVALID_TYPE:
+      return "CUFFT_INVALID_TYPE";
+
+    case CUFFT_INVALID_VALUE:
+      return "CUFFT_INVALID_VALUE";
+
+    case CUFFT_INTERNAL_ERROR:
+      return "CUFFT_INTERNAL_ERROR";
+
+    case CUFFT_EXEC_FAILED:
+      return "CUFFT_EXEC_FAILED";
+
+    case CUFFT_SETUP_FAILED:
+      return "CUFFT_SETUP_FAILED";
+
+    case CUFFT_INVALID_SIZE:
+      return "CUFFT_INVALID_SIZE";
+
+    case CUFFT_UNALIGNED_DATA:
+      return "CUFFT_UNALIGNED_DATA";
+
+    case CUFFT_INCOMPLETE_PARAMETER_LIST:
+      return "CUFFT_INCOMPLETE_PARAMETER_LIST";
+
+    case CUFFT_INVALID_DEVICE:
+      return "CUFFT_INVALID_DEVICE";
+
+    case CUFFT_PARSE_ERROR:
+      return "CUFFT_PARSE_ERROR";
+
+    case CUFFT_NO_WORKSPACE:
+      return "CUFFT_NO_WORKSPACE";
+
+    case CUFFT_NOT_IMPLEMENTED:
+      return "CUFFT_NOT_IMPLEMENTED";
+
+    case CUFFT_LICENSE_ERROR:
+      return "CUFFT_LICENSE_ERROR";
+
+    case CUFFT_NOT_SUPPORTED:
+      return "CUFFT_NOT_SUPPORTED";
+  }
+
+  return "<unknown>";
+}
+#endif
+
+#ifdef CUSPARSEAPI
+// cuSPARSE API errors
+static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
+  switch (error) {
+    case CUSPARSE_STATUS_SUCCESS:
+      return "CUSPARSE_STATUS_SUCCESS";
+
+    case CUSPARSE_STATUS_NOT_INITIALIZED:
+      return "CUSPARSE_STATUS_NOT_INITIALIZED";
+
+    case CUSPARSE_STATUS_ALLOC_FAILED:
+      return "CUSPARSE_STATUS_ALLOC_FAILED";
+
+    case CUSPARSE_STATUS_INVALID_VALUE:
+      return "CUSPARSE_STATUS_INVALID_VALUE";
+
+    case CUSPARSE_STATUS_ARCH_MISMATCH:
+      return "CUSPARSE_STATUS_ARCH_MISMATCH";
+
+    case CUSPARSE_STATUS_MAPPING_ERROR:
+      return "CUSPARSE_STATUS_MAPPING_ERROR";
+
+    case CUSPARSE_STATUS_EXECUTION_FAILED:
+      return "CUSPARSE_STATUS_EXECUTION_FAILED";
+
+    case CUSPARSE_STATUS_INTERNAL_ERROR:
+      return "CUSPARSE_STATUS_INTERNAL_ERROR";
+
+    case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
+      return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
+  }
+
+  return "<unknown>";
+}
+#endif
+
+#ifdef CUSOLVER_COMMON_H_
+// cuSOLVER API errors
+static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
+  switch (error) {
+    case CUSOLVER_STATUS_SUCCESS:
+      return "CUSOLVER_STATUS_SUCCESS";
+    case CUSOLVER_STATUS_NOT_INITIALIZED:
+      return "CUSOLVER_STATUS_NOT_INITIALIZED";
+    case CUSOLVER_STATUS_ALLOC_FAILED:
+      return "CUSOLVER_STATUS_ALLOC_FAILED";
+    case CUSOLVER_STATUS_INVALID_VALUE:
+      return "CUSOLVER_STATUS_INVALID_VALUE";
+    case CUSOLVER_STATUS_ARCH_MISMATCH:
+      return "CUSOLVER_STATUS_ARCH_MISMATCH";
+    case CUSOLVER_STATUS_MAPPING_ERROR:
+      return "CUSOLVER_STATUS_MAPPING_ERROR";
+    case CUSOLVER_STATUS_EXECUTION_FAILED:
+      return "CUSOLVER_STATUS_EXECUTION_FAILED";
+    case CUSOLVER_STATUS_INTERNAL_ERROR:
+      return "CUSOLVER_STATUS_INTERNAL_ERROR";
+    case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
+      return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
+    case CUSOLVER_STATUS_NOT_SUPPORTED:
+      return "CUSOLVER_STATUS_NOT_SUPPORTED ";
+    case CUSOLVER_STATUS_ZERO_PIVOT:
+      return "CUSOLVER_STATUS_ZERO_PIVOT";
+    case CUSOLVER_STATUS_INVALID_LICENSE:
+      return "CUSOLVER_STATUS_INVALID_LICENSE";
+  }
+
+  return "<unknown>";
+}
+#endif
+
+#ifdef CURAND_H_
+// cuRAND API errors
+static const char *_cudaGetErrorEnum(curandStatus_t error) {
+  switch (error) {
+    case CURAND_STATUS_SUCCESS:
+      return "CURAND_STATUS_SUCCESS";
+
+    case CURAND_STATUS_VERSION_MISMATCH:
+      return "CURAND_STATUS_VERSION_MISMATCH";
+
+    case CURAND_STATUS_NOT_INITIALIZED:
+      return "CURAND_STATUS_NOT_INITIALIZED";
+
+    case CURAND_STATUS_ALLOCATION_FAILED:
+      return "CURAND_STATUS_ALLOCATION_FAILED";
+
+    case CURAND_STATUS_TYPE_ERROR:
+      return "CURAND_STATUS_TYPE_ERROR";
+
+    case CURAND_STATUS_OUT_OF_RANGE:
+      return "CURAND_STATUS_OUT_OF_RANGE";
+
+    case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
+      return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
+
+    case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
+      return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
+
+    case CURAND_STATUS_LAUNCH_FAILURE:
+      return "CURAND_STATUS_LAUNCH_FAILURE";
+
+    case CURAND_STATUS_PREEXISTING_FAILURE:
+      return "CURAND_STATUS_PREEXISTING_FAILURE";
+
+    case CURAND_STATUS_INITIALIZATION_FAILED:
+      return "CURAND_STATUS_INITIALIZATION_FAILED";
+
+    case CURAND_STATUS_ARCH_MISMATCH:
+      return "CURAND_STATUS_ARCH_MISMATCH";
+
+    case CURAND_STATUS_INTERNAL_ERROR:
+      return "CURAND_STATUS_INTERNAL_ERROR";
+  }
+
+  return "<unknown>";
+}
+#endif
+
+#ifdef NVJPEGAPI
+// nvJPEG API errors
+static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
+  switch (error) {
+    case NVJPEG_STATUS_SUCCESS:
+      return "NVJPEG_STATUS_SUCCESS";
+
+    case NVJPEG_STATUS_NOT_INITIALIZED:
+      return "NVJPEG_STATUS_NOT_INITIALIZED";
+
+    case NVJPEG_STATUS_INVALID_PARAMETER:
+      return "NVJPEG_STATUS_INVALID_PARAMETER";
+
+    case NVJPEG_STATUS_BAD_JPEG:
+      return "NVJPEG_STATUS_BAD_JPEG";
+
+    case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
+      return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
+
+    case NVJPEG_STATUS_ALLOCATOR_FAILURE:
+      return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
+
+    case NVJPEG_STATUS_EXECUTION_FAILED:
+      return "NVJPEG_STATUS_EXECUTION_FAILED";
+
+    case NVJPEG_STATUS_ARCH_MISMATCH:
+      return "NVJPEG_STATUS_ARCH_MISMATCH";
+
+    case NVJPEG_STATUS_INTERNAL_ERROR:
+      return "NVJPEG_STATUS_INTERNAL_ERROR";
+  }
+
+  return "<unknown>";
+}
+#endif
+
+#ifdef NV_NPPIDEFS_H
+// NPP API errors
+static const char *_cudaGetErrorEnum(NppStatus error) {
+  switch (error) {
+    case NPP_NOT_SUPPORTED_MODE_ERROR:
+      return "NPP_NOT_SUPPORTED_MODE_ERROR";
+
+    case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
+      return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
+
+    case NPP_RESIZE_NO_OPERATION_ERROR:
+      return "NPP_RESIZE_NO_OPERATION_ERROR";
+
+    case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
+      return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
+
+#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
+
+    case NPP_BAD_ARG_ERROR:
+      return "NPP_BAD_ARGUMENT_ERROR";
+
+    case NPP_COEFF_ERROR:
+      return "NPP_COEFFICIENT_ERROR";
+
+    case NPP_RECT_ERROR:
+      return "NPP_RECTANGLE_ERROR";
+
+    case NPP_QUAD_ERROR:
+      return "NPP_QUADRANGLE_ERROR";
+
+    case NPP_MEM_ALLOC_ERR:
+      return "NPP_MEMORY_ALLOCATION_ERROR";
+
+    case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
+      return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
+
+    case NPP_INVALID_INPUT:
+      return "NPP_INVALID_INPUT";
+
+    case NPP_POINTER_ERROR:
+      return "NPP_POINTER_ERROR";
+
+    case NPP_WARNING:
+      return "NPP_WARNING";
+
+    case NPP_ODD_ROI_WARNING:
+      return "NPP_ODD_ROI_WARNING";
+#else
+
+    // These are for CUDA 5.5 or higher
+    case NPP_BAD_ARGUMENT_ERROR:
+      return "NPP_BAD_ARGUMENT_ERROR";
+
+    case NPP_COEFFICIENT_ERROR:
+      return "NPP_COEFFICIENT_ERROR";
+
+    case NPP_RECTANGLE_ERROR:
+      return "NPP_RECTANGLE_ERROR";
+
+    case NPP_QUADRANGLE_ERROR:
+      return "NPP_QUADRANGLE_ERROR";
+
+    case NPP_MEMORY_ALLOCATION_ERR:
+      return "NPP_MEMORY_ALLOCATION_ERROR";
+
+    case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
+      return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
+
+    case NPP_INVALID_HOST_POINTER_ERROR:
+      return "NPP_INVALID_HOST_POINTER_ERROR";
+
+    case NPP_INVALID_DEVICE_POINTER_ERROR:
+      return "NPP_INVALID_DEVICE_POINTER_ERROR";
+#endif
+
+    case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
+      return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
+
+    case NPP_TEXTURE_BIND_ERROR:
+      return "NPP_TEXTURE_BIND_ERROR";
+
+    case NPP_WRONG_INTERSECTION_ROI_ERROR:
+      return "NPP_WRONG_INTERSECTION_ROI_ERROR";
+
+    case NPP_NOT_EVEN_STEP_ERROR:
+      return "NPP_NOT_EVEN_STEP_ERROR";
+
+    case NPP_INTERPOLATION_ERROR:
+      return "NPP_INTERPOLATION_ERROR";
+
+    case NPP_RESIZE_FACTOR_ERROR:
+      return "NPP_RESIZE_FACTOR_ERROR";
+
+    case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
+      return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
+
+#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
+
+    case NPP_MEMFREE_ERR:
+      return "NPP_MEMFREE_ERR";
+
+    case NPP_MEMSET_ERR:
+      return "NPP_MEMSET_ERR";
+
+    case NPP_MEMCPY_ERR:
+      return "NPP_MEMCPY_ERROR";
+
+    case NPP_MIRROR_FLIP_ERR:
+      return "NPP_MIRROR_FLIP_ERR";
+#else
+
+    case NPP_MEMFREE_ERROR:
+      return "NPP_MEMFREE_ERROR";
+
+    case NPP_MEMSET_ERROR:
+      return "NPP_MEMSET_ERROR";
+
+    case NPP_MEMCPY_ERROR:
+      return "NPP_MEMCPY_ERROR";
+
+    case NPP_MIRROR_FLIP_ERROR:
+      return "NPP_MIRROR_FLIP_ERROR";
+#endif
+
+    case NPP_ALIGNMENT_ERROR:
+      return "NPP_ALIGNMENT_ERROR";
+
+    case NPP_STEP_ERROR:
+      return "NPP_STEP_ERROR";
+
+    case NPP_SIZE_ERROR:
+      return "NPP_SIZE_ERROR";
+
+    case NPP_NULL_POINTER_ERROR:
+      return "NPP_NULL_POINTER_ERROR";
+
+    case NPP_CUDA_KERNEL_EXECUTION_ERROR:
+      return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
+
+    case NPP_NOT_IMPLEMENTED_ERROR:
+      return "NPP_NOT_IMPLEMENTED_ERROR";
+
+    case NPP_ERROR:
+      return "NPP_ERROR";
+
+    case NPP_SUCCESS:
+      return "NPP_SUCCESS";
+
+    case NPP_WRONG_INTERSECTION_QUAD_WARNING:
+      return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
+
+    case NPP_MISALIGNED_DST_ROI_WARNING:
+      return "NPP_MISALIGNED_DST_ROI_WARNING";
+
+    case NPP_AFFINE_QUAD_INCORRECT_WARNING:
+      return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
+
+    case NPP_DOUBLE_SIZE_WARNING:
+      return "NPP_DOUBLE_SIZE_WARNING";
+
+    case NPP_WRONG_INTERSECTION_ROI_WARNING:
+      return "NPP_WRONG_INTERSECTION_ROI_WARNING";
+
+#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
+    /* These are 6.0 or higher */
+    case NPP_LUT_PALETTE_BITSIZE_ERROR:
+      return "NPP_LUT_PALETTE_BITSIZE_ERROR";
+
+    case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
+      return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
+
+    case NPP_QUALITY_INDEX_ERROR:
+      return "NPP_QUALITY_INDEX_ERROR";
+
+    case NPP_CHANNEL_ORDER_ERROR:
+      return "NPP_CHANNEL_ORDER_ERROR";
+
+    case NPP_ZERO_MASK_VALUE_ERROR:
+      return "NPP_ZERO_MASK_VALUE_ERROR";
+
+    case NPP_NUMBER_OF_CHANNELS_ERROR:
+      return "NPP_NUMBER_OF_CHANNELS_ERROR";
+
+    case NPP_COI_ERROR:
+      return "NPP_COI_ERROR";
+
+    case NPP_DIVISOR_ERROR:
+      return "NPP_DIVISOR_ERROR";
+
+    case NPP_CHANNEL_ERROR:
+      return "NPP_CHANNEL_ERROR";
+
+    case NPP_STRIDE_ERROR:
+      return "NPP_STRIDE_ERROR";
+
+    case NPP_ANCHOR_ERROR:
+      return "NPP_ANCHOR_ERROR";
+
+    case NPP_MASK_SIZE_ERROR:
+      return "NPP_MASK_SIZE_ERROR";
+
+    case NPP_MOMENT_00_ZERO_ERROR:
+      return "NPP_MOMENT_00_ZERO_ERROR";
+
+    case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
+      return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
+
+    case NPP_THRESHOLD_ERROR:
+      return "NPP_THRESHOLD_ERROR";
+
+    case NPP_CONTEXT_MATCH_ERROR:
+      return "NPP_CONTEXT_MATCH_ERROR";
+
+    case NPP_FFT_FLAG_ERROR:
+      return "NPP_FFT_FLAG_ERROR";
+
+    case NPP_FFT_ORDER_ERROR:
+      return "NPP_FFT_ORDER_ERROR";
+
+    case NPP_SCALE_RANGE_ERROR:
+      return "NPP_SCALE_RANGE_ERROR";
+
+    case NPP_DATA_TYPE_ERROR:
+      return "NPP_DATA_TYPE_ERROR";
+
+    case NPP_OUT_OFF_RANGE_ERROR:
+      return "NPP_OUT_OFF_RANGE_ERROR";
+
+    case NPP_DIVIDE_BY_ZERO_ERROR:
+      return "NPP_DIVIDE_BY_ZERO_ERROR";
+
+    case NPP_RANGE_ERROR:
+      return "NPP_RANGE_ERROR";
+
+    case NPP_NO_MEMORY_ERROR:
+      return "NPP_NO_MEMORY_ERROR";
+
+    case NPP_ERROR_RESERVED:
+      return "NPP_ERROR_RESERVED";
+
+    case NPP_NO_OPERATION_WARNING:
+      return "NPP_NO_OPERATION_WARNING";
+
+    case NPP_DIVIDE_BY_ZERO_WARNING:
+      return "NPP_DIVIDE_BY_ZERO_WARNING";
+#endif
+
+#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
+    /* These are 7.0 or higher */
+    case NPP_OVERFLOW_ERROR:
+      return "NPP_OVERFLOW_ERROR";
+
+    case NPP_CORRUPTED_DATA_ERROR:
+      return "NPP_CORRUPTED_DATA_ERROR";
+#endif
+  }
+
+  return "<unknown>";
+}
+#endif
+
+template <typename T>
+void check(T result, char const *const func, const char *const file,
+           int const line) {
+  if (result) {
+    fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
+            static_cast<unsigned int>(result), _cudaGetErrorEnum(result), func);
+    exit(EXIT_FAILURE);
+  }
+}
+
+#ifdef __DRIVER_TYPES_H__
+// This will output the proper CUDA error strings in the event
+// that a CUDA host call returns an error
+#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
+
+// This will output the proper error string when calling cudaGetLastError
+#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
+
+inline void __getLastCudaError(const char *errorMessage, const char *file,
+                               const int line) {
+  cudaError_t err = cudaGetLastError();
+
+  if (cudaSuccess != err) {
+    fprintf(stderr,
+            "%s(%i) : getLastCudaError() CUDA error :"
+            " %s : (%d) %s.\n",
+            file, line, errorMessage, static_cast<int>(err),
+            cudaGetErrorString(err));
+    exit(EXIT_FAILURE);
+  }
+}
+
+// This will only print the proper error string when calling cudaGetLastError
+// but not exit program incase error detected.
+#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
+
+inline void __printLastCudaError(const char *errorMessage, const char *file,
+                                 const int line) {
+  cudaError_t err = cudaGetLastError();
+
+  if (cudaSuccess != err) {
+    fprintf(stderr,
+            "%s(%i) : getLastCudaError() CUDA error :"
+            " %s : (%d) %s.\n",
+            file, line, errorMessage, static_cast<int>(err),
+            cudaGetErrorString(err));
+  }
+}
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (a > b ? a : b)
+#endif
+
+// Float To Int conversion
+inline int ftoi(float value) {
+  return (value >= 0 ? static_cast<int>(value + 0.5)
+                     : static_cast<int>(value - 0.5));
+}
+
+// Beginning of GPU Architecture definitions
+inline int _ConvertSMVer2Cores(int major, int minor) {
+  // Defines for GPU Architecture types (using the SM version to determine
+  // the # of cores per SM
+  typedef struct {
+    int SM;  // 0xMm (hexidecimal notation), M = SM Major version,
+    // and m = SM minor version
+    int Cores;
+  } sSMtoCores;
+
+  sSMtoCores nGpuArchCoresPerSM[] = {
+      {0x30, 192},
+      {0x32, 192},
+      {0x35, 192},
+      {0x37, 192},
+      {0x50, 128},
+      {0x52, 128},
+      {0x53, 128},
+      {0x60,  64},
+      {0x61, 128},
+      {0x62, 128},
+      {0x70,  64},
+      {0x72,  64},
+      {0x75,  64},
+      {0x80,  64},
+      {0x86, 128},
+      {-1, -1}};
+
+  int index = 0;
+
+  while (nGpuArchCoresPerSM[index].SM != -1) {
+    if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
+      return nGpuArchCoresPerSM[index].Cores;
+    }
+
+    index++;
+  }
+
+  // If we don't find the values, we default use the previous one
+  // to run properly
+  printf(
+      "MapSMtoCores for SM %d.%d is undefined."
+      "  Default to use %d Cores/SM\n",
+      major, minor, nGpuArchCoresPerSM[index - 1].Cores);
+  return nGpuArchCoresPerSM[index - 1].Cores;
+}
+
+inline const char* _ConvertSMVer2ArchName(int major, int minor) {
+  // Defines for GPU Architecture types (using the SM version to determine
+  // the GPU Arch name)
+  typedef struct {
+    int SM;  // 0xMm (hexidecimal notation), M = SM Major version,
+    // and m = SM minor version
+    const char* name;
+  } sSMtoArchName;
+
+  sSMtoArchName nGpuArchNameSM[] = {
+      {0x30, "Kepler"},
+      {0x32, "Kepler"},
+      {0x35, "Kepler"},
+      {0x37, "Kepler"},
+      {0x50, "Maxwell"},
+      {0x52, "Maxwell"},
+      {0x53, "Maxwell"},
+      {0x60, "Pascal"},
+      {0x61, "Pascal"},
+      {0x62, "Pascal"},
+      {0x70, "Volta"},
+      {0x72, "Xavier"},
+      {0x75, "Turing"},
+      {0x80, "Ampere"},
+      {0x86, "Ampere"},
+      {-1, "Graphics Device"}};
+
+  int index = 0;
+
+  while (nGpuArchNameSM[index].SM != -1) {
+    if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
+      return nGpuArchNameSM[index].name;
+    }
+
+    index++;
+  }
+
+  // If we don't find the values, we default use the previous one
+  // to run properly
+  printf(
+      "MapSMtoArchName for SM %d.%d is undefined."
+      "  Default to use %s\n",
+      major, minor, nGpuArchNameSM[index - 1].name);
+  return nGpuArchNameSM[index - 1].name;
+}
+  // end of GPU Architecture definitions
+
+#ifdef __CUDA_RUNTIME_H__
+// General GPU Device CUDA Initialization
+inline int gpuDeviceInit(int devID) {
+  int device_count;
+  checkCudaErrors(cudaGetDeviceCount(&device_count));
+
+  if (device_count == 0) {
+    fprintf(stderr,
+            "gpuDeviceInit() CUDA error: "
+            "no devices supporting CUDA.\n");
+    exit(EXIT_FAILURE);
+  }
+
+  if (devID < 0) {
+    devID = 0;
+  }
+
+  if (devID > device_count - 1) {
+    fprintf(stderr, "\n");
+    fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
+            device_count);
+    fprintf(stderr,
+            ">> gpuDeviceInit (-device=%d) is not a valid"
+            " GPU device. <<\n",
+            devID);
+    fprintf(stderr, "\n");
+    return -devID;
+  }
+
+  int computeMode = -1, major = 0, minor = 0;
+  checkCudaErrors(cudaDeviceGetAttribute(&computeMode, cudaDevAttrComputeMode, devID));
+  checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, devID));
+  checkCudaErrors(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, devID));
+  if (computeMode == cudaComputeModeProhibited) {
+    fprintf(stderr,
+            "Error: device is running in <Compute Mode "
+            "Prohibited>, no threads can use cudaSetDevice().\n");
+    return -1;
+  }
+
+  if (major < 1) {
+    fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
+    exit(EXIT_FAILURE);
+  }
+
+  checkCudaErrors(cudaSetDevice(devID));
+  printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
+
+  return devID;
+}
+
+// This function returns the best GPU (with maximum GFLOPS)
+inline int gpuGetMaxGflopsDeviceId() {
+  int current_device = 0, sm_per_multiproc = 0;
+  int max_perf_device = 0;
+  int device_count = 0;
+  int devices_prohibited = 0;
+
+  uint64_t max_compute_perf = 0;
+  checkCudaErrors(cudaGetDeviceCount(&device_count));
+
+  if (device_count == 0) {
+    fprintf(stderr,
+            "gpuGetMaxGflopsDeviceId() CUDA error:"
+            " no devices supporting CUDA.\n");
+    exit(EXIT_FAILURE);
+  }
+
+  // Find the best CUDA capable GPU device
+  current_device = 0;
+
+  while (current_device < device_count) {
+    int computeMode = -1, major = 0, minor = 0;
+    checkCudaErrors(cudaDeviceGetAttribute(&computeMode, cudaDevAttrComputeMode, current_device));
+    checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, current_device));
+    checkCudaErrors(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, current_device));
+
+    // If this GPU is not running on Compute Mode prohibited,
+    // then we can add it to the list
+    if (computeMode != cudaComputeModeProhibited) {
+      if (major == 9999 && minor == 9999) {
+        sm_per_multiproc = 1;
+      } else {
+        sm_per_multiproc =
+            _ConvertSMVer2Cores(major,  minor);
+      }
+      int multiProcessorCount = 0, clockRate = 0;
+      checkCudaErrors(cudaDeviceGetAttribute(&multiProcessorCount, cudaDevAttrMultiProcessorCount, current_device));
+      cudaError_t result = cudaDeviceGetAttribute(&clockRate, cudaDevAttrClockRate, current_device);
+      if (result != cudaSuccess) {
+        // If cudaDevAttrClockRate attribute is not supported we
+        // set clockRate as 1, to consider GPU with most SMs and CUDA Cores.
+        if(result == cudaErrorInvalidValue) {
+          clockRate = 1;
+        }
+        else {
+          fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \n", __FILE__, __LINE__,
+            static_cast<unsigned int>(result), _cudaGetErrorEnum(result));
+          exit(EXIT_FAILURE);
+        }
+      }
+      uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
+
+      if (compute_perf > max_compute_perf) {
+        max_compute_perf = compute_perf;
+        max_perf_device = current_device;
+      }
+    } else {
+      devices_prohibited++;
+    }
+
+    ++current_device;
+  }
+
+  if (devices_prohibited == device_count) {
+    fprintf(stderr,
+            "gpuGetMaxGflopsDeviceId() CUDA error:"
+            " all devices have compute mode prohibited.\n");
+    exit(EXIT_FAILURE);
+  }
+
+  return max_perf_device;
+}
+
+// Initialization code to find the best CUDA Device
+inline int findCudaDevice(int argc, const char **argv) {
+  int devID = 0;
+
+  // If the command-line has a device number specified, use it
+  if (checkCmdLineFlag(argc, argv, "device")) {
+    devID = getCmdLineArgumentInt(argc, argv, "device=");
+
+    if (devID < 0) {
+      printf("Invalid command line parameter\n ");
+      exit(EXIT_FAILURE);
+    } else {
+      devID = gpuDeviceInit(devID);
+
+      if (devID < 0) {
+        printf("exiting...\n");
+        exit(EXIT_FAILURE);
+      }
+    }
+  } else {
+    // Otherwise pick the device with highest Gflops/s
+    devID = gpuGetMaxGflopsDeviceId();
+    checkCudaErrors(cudaSetDevice(devID));
+    int major = 0, minor = 0;
+    checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, devID));
+    checkCudaErrors(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, devID));
+    printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
+           devID, _ConvertSMVer2ArchName(major, minor), major, minor);
+
+  }
+
+  return devID;
+}
+
+inline int findIntegratedGPU() {
+  int current_device = 0;
+  int device_count = 0;
+  int devices_prohibited = 0;
+
+  checkCudaErrors(cudaGetDeviceCount(&device_count));
+
+  if (device_count == 0) {
+    fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
+    exit(EXIT_FAILURE);
+  }
+
+  // Find the integrated GPU which is compute capable
+  while (current_device < device_count) {
+    int computeMode = -1, integrated = -1;
+    checkCudaErrors(cudaDeviceGetAttribute(&computeMode, cudaDevAttrComputeMode, current_device));
+    checkCudaErrors(cudaDeviceGetAttribute(&integrated, cudaDevAttrIntegrated, current_device));
+    // If GPU is integrated and is not running on Compute Mode prohibited,
+    // then cuda can map to GLES resource
+    if (integrated && (computeMode != cudaComputeModeProhibited)) {
+      checkCudaErrors(cudaSetDevice(current_device));
+
+      int major = 0, minor = 0;
+      checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, current_device));
+      checkCudaErrors(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, current_device));
+      printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
+             current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
+
+      return current_device;
+    } else {
+      devices_prohibited++;
+    }
+
+    current_device++;
+  }
+
+  if (devices_prohibited == device_count) {
+    fprintf(stderr,
+            "CUDA error:"
+            " No GLES-CUDA Interop capable GPU found.\n");
+    exit(EXIT_FAILURE);
+  }
+
+  return -1;
+}
+
+// General check for CUDA GPU SM Capabilities
+inline bool checkCudaCapabilities(int major_version, int minor_version) {
+  int dev;
+  int major = 0, minor = 0;
+
+  checkCudaErrors(cudaGetDevice(&dev));
+  checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, dev));
+  checkCudaErrors(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, dev));
+
+  if ((major > major_version) ||
+      (major == major_version &&
+       minor >= minor_version)) {
+    printf("  Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
+           _ConvertSMVer2ArchName(major, minor), major, minor);
+    return true;
+  } else {
+    printf(
+        "  No GPU device was found that can support "
+        "CUDA compute capability %d.%d.\n",
+        major_version, minor_version);
+    return false;
+  }
+}
+#endif
+
+  // end of CUDA Helper Functions
+
+#endif  // COMMON_HELPER_CUDA_H_

+ 368 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/helper_string.h

@@ -0,0 +1,368 @@
+/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  * Neither the name of NVIDIA CORPORATION nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// These are helper functions for the SDK samples (string parsing, timers, etc)
+#ifndef COMMON_HELPER_STRING_H_
+#define COMMON_HELPER_STRING_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fstream>
+#include <string>
+
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#ifndef _CRT_SECURE_NO_DEPRECATE
+#define _CRT_SECURE_NO_DEPRECATE
+#endif
+#ifndef STRCASECMP
+#define STRCASECMP _stricmp
+#endif
+#ifndef STRNCASECMP
+#define STRNCASECMP _strnicmp
+#endif
+#ifndef STRCPY
+#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
+#endif
+
+#ifndef FOPEN
+#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
+#endif
+#ifndef FOPEN_FAIL
+#define FOPEN_FAIL(result) (result != 0)
+#endif
+#ifndef SSCANF
+#define SSCANF sscanf_s
+#endif
+#ifndef SPRINTF
+#define SPRINTF sprintf_s
+#endif
+#else  // Linux Includes
+#include <string.h>
+#include <strings.h>
+
+#ifndef STRCASECMP
+#define STRCASECMP strcasecmp
+#endif
+#ifndef STRNCASECMP
+#define STRNCASECMP strncasecmp
+#endif
+#ifndef STRCPY
+#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
+#endif
+
+#ifndef FOPEN
+#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
+#endif
+#ifndef FOPEN_FAIL
+#define FOPEN_FAIL(result) (result == NULL)
+#endif
+#ifndef SSCANF
+#define SSCANF sscanf
+#endif
+#ifndef SPRINTF
+#define SPRINTF sprintf
+#endif
+#endif
+
+#ifndef EXIT_WAIVED
+#define EXIT_WAIVED 2
+#endif
+
+// CUDA Utility Helper Functions
+inline int stringRemoveDelimiter(char delimiter, const char *string) {
+  int string_start = 0;
+
+  while (string[string_start] == delimiter) {
+    string_start++;
+  }
+
+  if (string_start >= static_cast<int>(strlen(string) - 1)) {
+    return 0;
+  }
+
+  return string_start;
+}
+
+inline int getFileExtension(char *filename, char **extension) {
+  int string_length = static_cast<int>(strlen(filename));
+
+  while (filename[string_length--] != '.') {
+    if (string_length == 0) break;
+  }
+
+  if (string_length > 0) string_length += 2;
+
+  if (string_length == 0)
+    *extension = NULL;
+  else
+    *extension = &filename[string_length];
+
+  return string_length;
+}
+
+inline bool checkCmdLineFlag(const int argc, const char **argv,
+                             const char *string_ref) {
+  bool bFound = false;
+
+  if (argc >= 1) {
+    for (int i = 1; i < argc; i++) {
+      int string_start = stringRemoveDelimiter('-', argv[i]);
+      const char *string_argv = &argv[i][string_start];
+
+      const char *equal_pos = strchr(string_argv, '=');
+      int argv_length = static_cast<int>(
+          equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
+
+      int length = static_cast<int>(strlen(string_ref));
+
+      if (length == argv_length &&
+          !STRNCASECMP(string_argv, string_ref, length)) {
+        bFound = true;
+        continue;
+      }
+    }
+  }
+
+  return bFound;
+}
+
+// This function wraps the CUDA Driver API into a template function
+template <class T>
+inline bool getCmdLineArgumentValue(const int argc, const char **argv,
+                                    const char *string_ref, T *value) {
+  bool bFound = false;
+
+  if (argc >= 1) {
+    for (int i = 1; i < argc; i++) {
+      int string_start = stringRemoveDelimiter('-', argv[i]);
+      const char *string_argv = &argv[i][string_start];
+      int length = static_cast<int>(strlen(string_ref));
+
+      if (!STRNCASECMP(string_argv, string_ref, length)) {
+        if (length + 1 <= static_cast<int>(strlen(string_argv))) {
+          int auto_inc = (string_argv[length] == '=') ? 1 : 0;
+          *value = (T)atoi(&string_argv[length + auto_inc]);
+        }
+
+        bFound = true;
+        i = argc;
+      }
+    }
+  }
+
+  return bFound;
+}
+
+inline int getCmdLineArgumentInt(const int argc, const char **argv,
+                                 const char *string_ref) {
+  bool bFound = false;
+  int value = -1;
+
+  if (argc >= 1) {
+    for (int i = 1; i < argc; i++) {
+      int string_start = stringRemoveDelimiter('-', argv[i]);
+      const char *string_argv = &argv[i][string_start];
+      int length = static_cast<int>(strlen(string_ref));
+
+      if (!STRNCASECMP(string_argv, string_ref, length)) {
+        if (length + 1 <= static_cast<int>(strlen(string_argv))) {
+          int auto_inc = (string_argv[length] == '=') ? 1 : 0;
+          value = atoi(&string_argv[length + auto_inc]);
+        } else {
+          value = 0;
+        }
+
+        bFound = true;
+        continue;
+      }
+    }
+  }
+
+  if (bFound) {
+    return value;
+  } else {
+    return 0;
+  }
+}
+
+inline float getCmdLineArgumentFloat(const int argc, const char **argv,
+                                     const char *string_ref) {
+  bool bFound = false;
+  float value = -1;
+
+  if (argc >= 1) {
+    for (int i = 1; i < argc; i++) {
+      int string_start = stringRemoveDelimiter('-', argv[i]);
+      const char *string_argv = &argv[i][string_start];
+      int length = static_cast<int>(strlen(string_ref));
+
+      if (!STRNCASECMP(string_argv, string_ref, length)) {
+        if (length + 1 <= static_cast<int>(strlen(string_argv))) {
+          int auto_inc = (string_argv[length] == '=') ? 1 : 0;
+          value = static_cast<float>(atof(&string_argv[length + auto_inc]));
+        } else {
+          value = 0.f;
+        }
+
+        bFound = true;
+        continue;
+      }
+    }
+  }
+
+  if (bFound) {
+    return value;
+  } else {
+    return 0;
+  }
+}
+
+inline bool getCmdLineArgumentString(const int argc, const char **argv,
+                                     const char *string_ref,
+                                     char **string_retval) {
+  bool bFound = false;
+
+  if (argc >= 1) {
+    for (int i = 1; i < argc; i++) {
+      int string_start = stringRemoveDelimiter('-', argv[i]);
+      char *string_argv = const_cast<char *>(&argv[i][string_start]);
+      int length = static_cast<int>(strlen(string_ref));
+
+      if (!STRNCASECMP(string_argv, string_ref, length)) {
+        *string_retval = &string_argv[length + 1];
+        bFound = true;
+        continue;
+      }
+    }
+  }
+
+  if (!bFound) {
+    *string_retval = NULL;
+  }
+
+  return bFound;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//! Find the path for a file assuming that
+//! files are found in the searchPath.
+//!
+//! @return the path if succeeded, otherwise 0
+//! @param filename         name of the file
+//! @param executable_path  optional absolute path of the executable
+//////////////////////////////////////////////////////////////////////////////
+inline char *sdkFindFilePath(const char *filename,
+                             const char *executable_path) {
+  // <executable_name> defines a variable that is replaced with the name of the
+  // executable
+
+  // Typical relative search paths to locate needed companion files (e.g. sample
+  // input data, or JIT source files) The origin for the relative search may be
+  // the .exe file, a .bat file launching an .exe, a browser .exe launching the
+  // .exe or .bat, etc
+  const char *searchPath[] = {
+      "./",                                          // same dir
+      "./data/",                                      // same dir
+      "../../../../Samples/<executable_name>/",       // up 4 in tree
+      "../../../Samples/<executable_name>/",          // up 3 in tree
+      "../../Samples/<executable_name>/",             // up 2 in tree
+      "../../../../Samples/<executable_name>/data/",  // up 4 in tree
+      "../../../Samples/<executable_name>/data/",     // up 3 in tree
+      "../../Samples/<executable_name>/data/",        // up 2 in tree
+      "../../../../Common/data/",                     // up 4 in tree
+      "../../../Common/data/",                        // up 3 in tree
+      "../../Common/data/"                            // up 2 in tree
+  };
+
+  // Extract the executable name
+  std::string executable_name;
+
+  if (executable_path != 0) {
+    executable_name = std::string(executable_path);
+
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+    // Windows path delimiter
+    size_t delimiter_pos = executable_name.find_last_of('\\');
+    executable_name.erase(0, delimiter_pos + 1);
+
+    if (executable_name.rfind(".exe") != std::string::npos) {
+      // we strip .exe, only if the .exe is found
+      executable_name.resize(executable_name.size() - 4);
+    }
+
+#else
+    // Linux & OSX path delimiter
+    size_t delimiter_pos = executable_name.find_last_of('/');
+    executable_name.erase(0, delimiter_pos + 1);
+#endif
+  }
+
+  // Loop over all search paths and return the first hit
+  for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
+    std::string path(searchPath[i]);
+    size_t executable_name_pos = path.find("<executable_name>");
+
+    // If there is executable_name variable in the searchPath
+    // replace it with the value
+    if (executable_name_pos != std::string::npos) {
+      if (executable_path != 0) {
+        path.replace(executable_name_pos, strlen("<executable_name>"),
+                     executable_name);
+      } else {
+        // Skip this path entry if no executable argument is given
+        continue;
+      }
+    }
+
+#ifdef _DEBUG
+    printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
+#endif
+
+    // Test if the file exists
+    path.append(filename);
+    FILE *fp;
+    FOPEN(fp, path.c_str(), "rb");
+
+    if (fp != NULL) {
+      fclose(fp);
+      // File found
+      // returning an allocated array here for backwards compatibility reasons
+      char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
+      STRCPY(file_path, path.length() + 1, path.c_str());
+      return file_path;
+    }
+
+    if (fp) {
+      fclose(fp);
+    }
+  }
+
+  // File not found
+  return 0;
+}
+
+#endif  // COMMON_HELPER_STRING_H_

+ 465 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/Common/helper_timer.h

@@ -0,0 +1,465 @@
+/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  * Neither the name of NVIDIA CORPORATION nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Helper Timing Functions
+#ifndef COMMON_HELPER_TIMER_H_
+#define COMMON_HELPER_TIMER_H_
+
+#ifndef EXIT_WAIVED
+#define EXIT_WAIVED 2
+#endif
+
+// includes, system
+#include <vector>
+
+// includes, project
+#include <exception.h>
+
+// Definition of the StopWatch Interface, this is used if we don't want to use
+// the CUT functions But rather in a self contained class interface
+class StopWatchInterface {
+ public:
+  StopWatchInterface() {}
+  virtual ~StopWatchInterface() {}
+
+ public:
+  //! Start time measurement
+  virtual void start() = 0;
+
+  //! Stop time measurement
+  virtual void stop() = 0;
+
+  //! Reset time counters to zero
+  virtual void reset() = 0;
+
+  //! Time in msec. after start. If the stop watch is still running (i.e. there
+  //! was no call to stop()) then the elapsed time is returned, otherwise the
+  //! time between the last start() and stop call is returned
+  virtual float getTime() = 0;
+
+  //! Mean time to date based on the number of times the stopwatch has been
+  //! _stopped_ (ie finished sessions) and the current total time
+  virtual float getAverageTime() = 0;
+};
+
+//////////////////////////////////////////////////////////////////
+// Begin Stopwatch timer class definitions for all OS platforms //
+//////////////////////////////////////////////////////////////////
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+// includes, system
+#define WINDOWS_LEAN_AND_MEAN
+#include <windows.h>
+#undef min
+#undef max
+
+//! Windows specific implementation of StopWatch
+class StopWatchWin : public StopWatchInterface {
+ public:
+  //! Constructor, default
+  StopWatchWin()
+      : start_time(),
+        end_time(),
+        diff_time(0.0f),
+        total_time(0.0f),
+        running(false),
+        clock_sessions(0),
+        freq(0),
+        freq_set(false) {
+    if (!freq_set) {
+      // helper variable
+      LARGE_INTEGER temp;
+
+      // get the tick frequency from the OS
+      QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
+
+      // convert to type in which it is needed
+      freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
+
+      // rememeber query
+      freq_set = true;
+    }
+  }
+
+  // Destructor
+  ~StopWatchWin() {}
+
+ public:
+  //! Start time measurement
+  inline void start();
+
+  //! Stop time measurement
+  inline void stop();
+
+  //! Reset time counters to zero
+  inline void reset();
+
+  //! Time in msec. after start. If the stop watch is still running (i.e. there
+  //! was no call to stop()) then the elapsed time is returned, otherwise the
+  //! time between the last start() and stop call is returned
+  inline float getTime();
+
+  //! Mean time to date based on the number of times the stopwatch has been
+  //! _stopped_ (ie finished sessions) and the current total time
+  inline float getAverageTime();
+
+ private:
+  // member variables
+
+  //! Start of measurement
+  LARGE_INTEGER start_time;
+  //! End of measurement
+  LARGE_INTEGER end_time;
+
+  //! Time difference between the last start and stop
+  float diff_time;
+
+  //! TOTAL time difference between starts and stops
+  float total_time;
+
+  //! flag if the stop watch is running
+  bool running;
+
+  //! Number of times clock has been started
+  //! and stopped to allow averaging
+  int clock_sessions;
+
+  //! tick frequency
+  double freq;
+
+  //! flag if the frequency has been set
+  bool freq_set;
+};
+
+// functions, inlined
+
+////////////////////////////////////////////////////////////////////////////////
+//! Start time measurement
+////////////////////////////////////////////////////////////////////////////////
+inline void StopWatchWin::start() {
+  QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
+  running = true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Stop time measurement and increment add to the current diff_time summation
+//! variable. Also increment the number of times this clock has been run.
+////////////////////////////////////////////////////////////////////////////////
+inline void StopWatchWin::stop() {
+  QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
+  diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
+                                   static_cast<double>(start_time.QuadPart)) /
+                                  freq));
+
+  total_time += diff_time;
+  clock_sessions++;
+  running = false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Reset the timer to 0. Does not change the timer running state but does
+//! recapture this point in time as the current start time if it is running.
+////////////////////////////////////////////////////////////////////////////////
+inline void StopWatchWin::reset() {
+  diff_time = 0;
+  total_time = 0;
+  clock_sessions = 0;
+
+  if (running) {
+    QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
+  }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Time in msec. after start. If the stop watch is still running (i.e. there
+//! was no call to stop()) then the elapsed time is returned added to the
+//! current diff_time sum, otherwise the current summed time difference alone
+//! is returned.
+////////////////////////////////////////////////////////////////////////////////
+inline float StopWatchWin::getTime() {
+  // Return the TOTAL time to date
+  float retval = total_time;
+
+  if (running) {
+    LARGE_INTEGER temp;
+    QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
+    retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
+                                   static_cast<double>(start_time.QuadPart)) /
+                                  freq));
+  }
+
+  return retval;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Time in msec. for a single run based on the total number of COMPLETED runs
+//! and the total time.
+////////////////////////////////////////////////////////////////////////////////
+inline float StopWatchWin::getAverageTime() {
+  return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
+}
+#else
+// Declarations for Stopwatch on Linux and Mac OSX
+// includes, system
+#include <sys/time.h>
+#include <ctime>
+
+//! Windows specific implementation of StopWatch
+class StopWatchLinux : public StopWatchInterface {
+ public:
+  //! Constructor, default
+  StopWatchLinux()
+      : start_time(),
+        diff_time(0.0),
+        total_time(0.0),
+        running(false),
+        clock_sessions(0) {}
+
+  // Destructor
+  virtual ~StopWatchLinux() {}
+
+ public:
+  //! Start time measurement
+  inline void start();
+
+  //! Stop time measurement
+  inline void stop();
+
+  //! Reset time counters to zero
+  inline void reset();
+
+  //! Time in msec. after start. If the stop watch is still running (i.e. there
+  //! was no call to stop()) then the elapsed time is returned, otherwise the
+  //! time between the last start() and stop call is returned
+  inline float getTime();
+
+  //! Mean time to date based on the number of times the stopwatch has been
+  //! _stopped_ (ie finished sessions) and the current total time
+  inline float getAverageTime();
+
+ private:
+  // helper functions
+
+  //! Get difference between start time and current time
+  inline float getDiffTime();
+
+ private:
+  // member variables
+
+  //! Start of measurement
+  struct timeval start_time;
+
+  //! Time difference between the last start and stop
+  float diff_time;
+
+  //! TOTAL time difference between starts and stops
+  float total_time;
+
+  //! flag if the stop watch is running
+  bool running;
+
+  //! Number of times clock has been started
+  //! and stopped to allow averaging
+  int clock_sessions;
+};
+
+// functions, inlined
+
+////////////////////////////////////////////////////////////////////////////////
+//! Start time measurement
+////////////////////////////////////////////////////////////////////////////////
+inline void StopWatchLinux::start() {
+  gettimeofday(&start_time, 0);
+  running = true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Stop time measurement and increment add to the current diff_time summation
+//! variable. Also increment the number of times this clock has been run.
+////////////////////////////////////////////////////////////////////////////////
+inline void StopWatchLinux::stop() {
+  diff_time = getDiffTime();
+  total_time += diff_time;
+  running = false;
+  clock_sessions++;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Reset the timer to 0. Does not change the timer running state but does
+//! recapture this point in time as the current start time if it is running.
+////////////////////////////////////////////////////////////////////////////////
+inline void StopWatchLinux::reset() {
+  diff_time = 0;
+  total_time = 0;
+  clock_sessions = 0;
+
+  if (running) {
+    gettimeofday(&start_time, 0);
+  }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Time in msec. after start. If the stop watch is still running (i.e. there
+//! was no call to stop()) then the elapsed time is returned added to the
+//! current diff_time sum, otherwise the current summed time difference alone
+//! is returned.
+////////////////////////////////////////////////////////////////////////////////
+inline float StopWatchLinux::getTime() {
+  // Return the TOTAL time to date
+  float retval = total_time;
+
+  if (running) {
+    retval += getDiffTime();
+  }
+
+  return retval;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Time in msec. for a single run based on the total number of COMPLETED runs
+//! and the total time.
+////////////////////////////////////////////////////////////////////////////////
+inline float StopWatchLinux::getAverageTime() {
+  return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
+}
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+inline float StopWatchLinux::getDiffTime() {
+  struct timeval t_time;
+  gettimeofday(&t_time, 0);
+
+  // time difference in milli-seconds
+  return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
+                            (0.001 * (t_time.tv_usec - start_time.tv_usec)));
+}
+#endif  // WIN32
+
+////////////////////////////////////////////////////////////////////////////////
+//! Timer functionality exported
+
+////////////////////////////////////////////////////////////////////////////////
+//! Create a new timer
+//! @return true if a time has been created, otherwise false
+//! @param  name of the new timer, 0 if the creation failed
+////////////////////////////////////////////////////////////////////////////////
+inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
+// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+  *timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
+#else
+  *timer_interface =
+      reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
+#endif
+  return (*timer_interface != NULL) ? true : false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Delete a timer
+//! @return true if a time has been deleted, otherwise false
+//! @param  name of the timer to delete
+////////////////////////////////////////////////////////////////////////////////
+inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
+  // printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
+  if (*timer_interface) {
+    delete *timer_interface;
+    *timer_interface = NULL;
+  }
+
+  return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Start the time with name \a name
+//! @param name  name of the timer to start
+////////////////////////////////////////////////////////////////////////////////
+inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
+  // printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
+  if (*timer_interface) {
+    (*timer_interface)->start();
+  }
+
+  return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Stop the time with name \a name. Does not reset.
+//! @param name  name of the timer to stop
+////////////////////////////////////////////////////////////////////////////////
+inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
+  // printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
+  if (*timer_interface) {
+    (*timer_interface)->stop();
+  }
+
+  return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Resets the timer's counter.
+//! @param name  name of the timer to reset.
+////////////////////////////////////////////////////////////////////////////////
+inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
+  // printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
+  if (*timer_interface) {
+    (*timer_interface)->reset();
+  }
+
+  return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Return the average time for timer execution as the total time
+//! for the timer dividied by the number of completed (stopped) runs the timer
+//! has made.
+//! Excludes the current running time if the timer is currently running.
+//! @param name  name of the timer to return the time of
+////////////////////////////////////////////////////////////////////////////////
+inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
+  //  printf("sdkGetAverageTimerValue called object %08x\n", (void
+  //  *)*timer_interface);
+  if (*timer_interface) {
+    return (*timer_interface)->getAverageTime();
+  } else {
+    return 0.0f;
+  }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//! Total execution time for the timer over all runs since the last reset
+//! or timer creation.
+//! @param name  name of the timer to obtain the value of.
+////////////////////////////////////////////////////////////////////////////////
+inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
+  // printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
+  if (*timer_interface) {
+    return (*timer_interface)->getTime();
+  } else {
+    return 0.0f;
+  }
+}
+
+#endif  // COMMON_HELPER_TIMER_H_

+ 28 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/LICENSE

@@ -0,0 +1,28 @@
+Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+ * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+ * Neither the name of NVIDIA CORPORATION nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+For additional information on the license terms, see the CUDA EULA at
+https://docs.nvidia.com/cuda/eula/index.html

+ 337 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/Samples/p2pBandwidthLatencyTest/Makefile

@@ -0,0 +1,337 @@
+################################################################################
+# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#  * Neither the name of NVIDIA CORPORATION nor the names of its
+#    contributors may be used to endorse or promote products derived
+#    from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+################################################################################
+#
+# Makefile project only supported on Mac OS X and Linux Platforms)
+#
+################################################################################
+
+# Location of the CUDA Toolkit
+CUDA_PATH ?= /usr/local/cuda
+
+##############################
+# start deprecated interface #
+##############################
+ifeq ($(x86_64),1)
+    $(info WARNING - x86_64 variable has been deprecated)
+    $(info WARNING - please use TARGET_ARCH=x86_64 instead)
+    TARGET_ARCH ?= x86_64
+endif
+ifeq ($(ARMv7),1)
+    $(info WARNING - ARMv7 variable has been deprecated)
+    $(info WARNING - please use TARGET_ARCH=armv7l instead)
+    TARGET_ARCH ?= armv7l
+endif
+ifeq ($(aarch64),1)
+    $(info WARNING - aarch64 variable has been deprecated)
+    $(info WARNING - please use TARGET_ARCH=aarch64 instead)
+    TARGET_ARCH ?= aarch64
+endif
+ifeq ($(ppc64le),1)
+    $(info WARNING - ppc64le variable has been deprecated)
+    $(info WARNING - please use TARGET_ARCH=ppc64le instead)
+    TARGET_ARCH ?= ppc64le
+endif
+ifneq ($(GCC),)
+    $(info WARNING - GCC variable has been deprecated)
+    $(info WARNING - please use HOST_COMPILER=$(GCC) instead)
+    HOST_COMPILER ?= $(GCC)
+endif
+ifneq ($(abi),)
+    $(error ERROR - abi variable has been removed)
+endif
+############################
+# end deprecated interface #
+############################
+
+# architecture
+HOST_ARCH   := $(shell uname -m)
+TARGET_ARCH ?= $(HOST_ARCH)
+ifneq (,$(filter $(TARGET_ARCH),x86_64 aarch64 sbsa ppc64le armv7l))
+    ifneq ($(TARGET_ARCH),$(HOST_ARCH))
+        ifneq (,$(filter $(TARGET_ARCH),x86_64 aarch64 sbsa ppc64le))
+            TARGET_SIZE := 64
+        else ifneq (,$(filter $(TARGET_ARCH),armv7l))
+            TARGET_SIZE := 32
+        endif
+    else
+        TARGET_SIZE := $(shell getconf LONG_BIT)
+    endif
+else
+    $(error ERROR - unsupported value $(TARGET_ARCH) for TARGET_ARCH!)
+endif
+
+# sbsa and aarch64 systems look similar. Need to differentiate them at host level for now.
+ifeq ($(HOST_ARCH),aarch64)
+    ifeq ($(CUDA_PATH)/targets/sbsa-linux,$(shell ls -1d $(CUDA_PATH)/targets/sbsa-linux 2>/dev/null))
+        HOST_ARCH := sbsa
+        TARGET_ARCH := sbsa
+    endif
+endif
+
+ifneq ($(TARGET_ARCH),$(HOST_ARCH))
+    ifeq (,$(filter $(HOST_ARCH)-$(TARGET_ARCH),aarch64-armv7l x86_64-armv7l x86_64-aarch64 x86_64-sbsa x86_64-ppc64le))
+        $(error ERROR - cross compiling from $(HOST_ARCH) to $(TARGET_ARCH) is not supported!)
+    endif
+endif
+
+# When on native aarch64 system with userspace of 32-bit, change TARGET_ARCH to armv7l
+ifeq ($(HOST_ARCH)-$(TARGET_ARCH)-$(TARGET_SIZE),aarch64-aarch64-32)
+    TARGET_ARCH = armv7l
+endif
+
+# operating system
+HOST_OS   := $(shell uname -s 2>/dev/null | tr "[:upper:]" "[:lower:]")
+TARGET_OS ?= $(HOST_OS)
+ifeq (,$(filter $(TARGET_OS),linux darwin qnx android))
+    $(error ERROR - unsupported value $(TARGET_OS) for TARGET_OS!)
+endif
+
+# host compiler
+ifeq ($(TARGET_OS),darwin)
+    ifeq ($(shell expr `xcodebuild -version | grep -i xcode | awk '{print $$2}' | cut -d'.' -f1` \>= 5),1)
+        HOST_COMPILER ?= clang++
+    endif
+else ifneq ($(TARGET_ARCH),$(HOST_ARCH))
+    ifeq ($(HOST_ARCH)-$(TARGET_ARCH),x86_64-armv7l)
+        ifeq ($(TARGET_OS),linux)
+            HOST_COMPILER ?= arm-linux-gnueabihf-g++
+        else ifeq ($(TARGET_OS),qnx)
+            ifeq ($(QNX_HOST),)
+                $(error ERROR - QNX_HOST must be passed to the QNX host toolchain)
+            endif
+            ifeq ($(QNX_TARGET),)
+                $(error ERROR - QNX_TARGET must be passed to the QNX target toolchain)
+            endif
+            export QNX_HOST
+            export QNX_TARGET
+            HOST_COMPILER ?= $(QNX_HOST)/usr/bin/arm-unknown-nto-qnx6.6.0eabi-g++
+        else ifeq ($(TARGET_OS),android)
+            HOST_COMPILER ?= arm-linux-androideabi-g++
+        endif
+    else ifeq ($(TARGET_ARCH),aarch64)
+        ifeq ($(TARGET_OS), linux)
+            HOST_COMPILER ?= aarch64-linux-gnu-g++
+        else ifeq ($(TARGET_OS),qnx)
+            ifeq ($(QNX_HOST),)
+                $(error ERROR - QNX_HOST must be passed to the QNX host toolchain)
+            endif
+            ifeq ($(QNX_TARGET),)
+                $(error ERROR - QNX_TARGET must be passed to the QNX target toolchain)
+            endif
+            export QNX_HOST
+            export QNX_TARGET
+            HOST_COMPILER ?= $(QNX_HOST)/usr/bin/q++
+        else ifeq ($(TARGET_OS), android)
+            HOST_COMPILER ?= aarch64-linux-android-clang++
+        endif
+    else ifeq ($(TARGET_ARCH),sbsa)
+        HOST_COMPILER ?= aarch64-linux-gnu-g++
+    else ifeq ($(TARGET_ARCH),ppc64le)
+        HOST_COMPILER ?= powerpc64le-linux-gnu-g++
+    endif
+endif
+HOST_COMPILER ?= g++
+NVCC          := $(CUDA_PATH)/bin/nvcc -ccbin $(HOST_COMPILER)
+
+# internal flags
+NVCCFLAGS   := -m${TARGET_SIZE}
+CCFLAGS     :=
+LDFLAGS     :=
+
+# build flags
+ifeq ($(TARGET_OS),darwin)
+    LDFLAGS += -rpath $(CUDA_PATH)/lib
+    CCFLAGS += -arch $(HOST_ARCH)
+else ifeq ($(HOST_ARCH)-$(TARGET_ARCH)-$(TARGET_OS),x86_64-armv7l-linux)
+    LDFLAGS += --dynamic-linker=/lib/ld-linux-armhf.so.3
+    CCFLAGS += -mfloat-abi=hard
+else ifeq ($(TARGET_OS),android)
+    LDFLAGS += -pie
+    CCFLAGS += -fpie -fpic -fexceptions
+endif
+
+ifneq ($(TARGET_ARCH),$(HOST_ARCH))
+    ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-linux)
+        ifneq ($(TARGET_FS),)
+            GCCVERSIONLTEQ46 := $(shell expr `$(HOST_COMPILER) -dumpversion` \<= 4.6)
+            ifeq ($(GCCVERSIONLTEQ46),1)
+                CCFLAGS += --sysroot=$(TARGET_FS)
+            endif
+            LDFLAGS += --sysroot=$(TARGET_FS)
+            LDFLAGS += -rpath-link=$(TARGET_FS)/lib
+            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib
+            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib/arm-linux-gnueabihf
+        endif
+    endif
+    ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-linux)
+        ifneq ($(TARGET_FS),)
+            GCCVERSIONLTEQ46 := $(shell expr `$(HOST_COMPILER) -dumpversion` \<= 4.6)
+            ifeq ($(GCCVERSIONLTEQ46),1)
+                CCFLAGS += --sysroot=$(TARGET_FS)
+            endif
+            LDFLAGS += --sysroot=$(TARGET_FS)
+            LDFLAGS += -rpath-link=$(TARGET_FS)/lib -L$(TARGET_FS)/lib
+            LDFLAGS += -rpath-link=$(TARGET_FS)/lib/aarch64-linux-gnu -L$(TARGET_FS)/lib/aarch64-linux-gnu
+            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib -L$(TARGET_FS)/usr/lib
+            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib/aarch64-linux-gnu -L$(TARGET_FS)/usr/lib/aarch64-linux-gnu
+            LDFLAGS += --unresolved-symbols=ignore-in-shared-libs
+            CCFLAGS += -isystem=$(TARGET_FS)/usr/include -I$(TARGET_FS)/usr/include -I$(TARGET_FS)/usr/include/libdrm
+            CCFLAGS += -isystem=$(TARGET_FS)/usr/include/aarch64-linux-gnu -I$(TARGET_FS)/usr/include/aarch64-linux-gnu
+        endif
+    endif
+    ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-qnx)
+        NVCCFLAGS += --qpp-config 5.4.0,gcc_ntoaarch64le
+        CCFLAGS += -DWIN_INTERFACE_CUSTOM -I/usr/include/aarch64-qnx-gnu
+        LDFLAGS += -lsocket
+        LDFLAGS += -L/usr/lib/aarch64-qnx-gnu
+        CCFLAGS += "-Wl\,-rpath-link\,/usr/lib/aarch64-qnx-gnu"
+        ifdef TARGET_OVERRIDE
+            LDFLAGS += -lslog2
+        endif
+
+        ifneq ($(TARGET_FS),)
+            LDFLAGS += -L$(TARGET_FS)/usr/lib
+            CCFLAGS += "-Wl\,-rpath-link\,$(TARGET_FS)/usr/lib"
+            LDFLAGS += -L$(TARGET_FS)/usr/libnvidia
+            CCFLAGS += "-Wl\,-rpath-link\,$(TARGET_FS)/usr/libnvidia"
+            CCFLAGS += -I$(TARGET_FS)/../include
+        endif
+    endif
+endif
+
+ifdef TARGET_OVERRIDE # cuda toolkit targets override
+    NVCCFLAGS += -target-dir $(TARGET_OVERRIDE)
+endif
+
+# Install directory of different arch
+CUDA_INSTALL_TARGET_DIR :=
+ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-linux)
+    CUDA_INSTALL_TARGET_DIR = targets/armv7-linux-gnueabihf/
+else ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-linux)
+    CUDA_INSTALL_TARGET_DIR = targets/aarch64-linux/
+else ifeq ($(TARGET_ARCH)-$(TARGET_OS),sbsa-linux)
+    CUDA_INSTALL_TARGET_DIR = targets/sbsa-linux/
+else ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-android)
+    CUDA_INSTALL_TARGET_DIR = targets/armv7-linux-androideabi/
+else ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-android)
+    CUDA_INSTALL_TARGET_DIR = targets/aarch64-linux-androideabi/
+else ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-qnx)
+    CUDA_INSTALL_TARGET_DIR = targets/ARMv7-linux-QNX/
+else ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-qnx)
+    CUDA_INSTALL_TARGET_DIR = targets/aarch64-qnx/
+else ifeq ($(TARGET_ARCH),ppc64le)
+    CUDA_INSTALL_TARGET_DIR = targets/ppc64le-linux/
+endif
+
+# Debug build flags
+ifeq ($(dbg),1)
+      NVCCFLAGS += -g -G
+      BUILD_TYPE := debug
+else
+      BUILD_TYPE := release
+endif
+
+ALL_CCFLAGS :=
+ALL_CCFLAGS += $(NVCCFLAGS)
+ALL_CCFLAGS += $(EXTRA_NVCCFLAGS)
+ALL_CCFLAGS += $(addprefix -Xcompiler ,$(CCFLAGS))
+ALL_CCFLAGS += $(addprefix -Xcompiler ,$(EXTRA_CCFLAGS))
+
+SAMPLE_ENABLED := 1
+
+ALL_LDFLAGS :=
+ALL_LDFLAGS += $(ALL_CCFLAGS)
+ALL_LDFLAGS += $(addprefix -Xlinker ,$(LDFLAGS))
+ALL_LDFLAGS += $(addprefix -Xlinker ,$(EXTRA_LDFLAGS))
+
+# Common includes and paths for CUDA
+INCLUDES  := -I../../Common
+LIBRARIES :=
+
+################################################################################
+
+# Gencode arguments
+ifeq ($(TARGET_ARCH),$(filter $(TARGET_ARCH),armv7l aarch64))
+SMS ?= 35 37 50 52 60 61 70 72 75
+else
+SMS ?= 35 37 50 52 60 61 70 75
+endif
+
+ifeq ($(SMS),)
+$(info >>> WARNING - no SM architectures have been specified - waiving sample <<<)
+SAMPLE_ENABLED := 0
+endif
+
+ifeq ($(GENCODE_FLAGS),)
+# Generate SASS code for each SM architecture listed in $(SMS)
+$(foreach sm,$(SMS),$(eval GENCODE_FLAGS += -gencode arch=compute_$(sm),code=sm_$(sm)))
+
+# Generate PTX code from the highest SM architecture in $(SMS) to guarantee forward-compatibility
+HIGHEST_SM := $(lastword $(sort $(SMS)))
+ifneq ($(HIGHEST_SM),)
+GENCODE_FLAGS += -gencode arch=compute_$(HIGHEST_SM),code=compute_$(HIGHEST_SM)
+endif
+endif
+
+#ALL_CCFLAGS += --threads 0
+
+ifeq ($(SAMPLE_ENABLED),0)
+EXEC ?= @echo "[@]"
+endif
+
+################################################################################
+
+# Target rules
+all: build
+
+build: p2pBandwidthLatencyTest
+
+check.deps:
+ifeq ($(SAMPLE_ENABLED),0)
+	@echo "Sample will be waived due to the above missing dependencies"
+else
+	@echo "Sample is ready - all dependencies have been met"
+endif
+
+p2pBandwidthLatencyTest.o:p2pBandwidthLatencyTest.cu
+	$(EXEC) $(NVCC) $(INCLUDES) $(ALL_CCFLAGS) $(GENCODE_FLAGS) -o $@ -c $<
+
+p2pBandwidthLatencyTest: p2pBandwidthLatencyTest.o
+	$(EXEC) $(NVCC) $(ALL_LDFLAGS) $(GENCODE_FLAGS) -o $@ $+ $(LIBRARIES)
+	$(EXEC) mkdir -p ../../bin/$(TARGET_ARCH)/$(TARGET_OS)/$(BUILD_TYPE)
+	$(EXEC) cp $@ ../../bin/$(TARGET_ARCH)/$(TARGET_OS)/$(BUILD_TYPE)
+
+run: build
+	$(EXEC) ./p2pBandwidthLatencyTest
+
+clean:
+	rm -f p2pBandwidthLatencyTest p2pBandwidthLatencyTest.o
+	rm -rf ../../bin/$(TARGET_ARCH)/$(TARGET_OS)/$(BUILD_TYPE)/p2pBandwidthLatencyTest
+
+clobber: clean

File diff ditekan karena terlalu besar
+ 70 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/Samples/p2pBandwidthLatencyTest/README.md


+ 695 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/Samples/p2pBandwidthLatencyTest/p2pBandwidthLatencyTest.cu

@@ -0,0 +1,695 @@
+/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  * Neither the name of NVIDIA CORPORATION nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cstdio>
+#include <vector>
+
+#include <helper_cuda.h>
+#include <helper_timer.h>
+
+using namespace std;
+
+const char *sSampleName = "P2P (Peer-to-Peer) GPU Bandwidth Latency Test";
+
+typedef enum {
+  P2P_WRITE = 0,
+  P2P_READ = 1,
+} P2PDataTransfer;
+
+typedef enum {
+  CE = 0,
+  SM = 1,
+} P2PEngine;
+
+P2PEngine p2p_mechanism = CE;  // By default use Copy Engine
+
+// Macro for checking cuda errors following a cuda launch or api call
+#define cudaCheckError()                                       \
+  {                                                            \
+    cudaError_t e = cudaGetLastError();                        \
+    if (e != cudaSuccess) {                                    \
+      printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, \
+             cudaGetErrorString(e));                           \
+      exit(EXIT_FAILURE);                                      \
+    }                                                          \
+  }
+__global__ void delay(volatile int *flag,
+                      unsigned long long timeout_clocks = 10000000) {
+  // Wait until the application notifies us that it has completed queuing up the
+  // experiment, or timeout and exit, allowing the application to make progress
+  long long int start_clock, sample_clock;
+  start_clock = clock64();
+
+  while (!*flag) {
+    sample_clock = clock64();
+
+    if (sample_clock - start_clock > timeout_clocks) {
+      break;
+    }
+  }
+}
+
+// This kernel is for demonstration purposes only, not a performant kernel for
+// p2p transfers.
+__global__ void copyp2p(int4 *__restrict__ dest, int4 const *__restrict__ src,
+                        size_t num_elems) {
+  size_t globalId = blockIdx.x * blockDim.x + threadIdx.x;
+  size_t gridSize = blockDim.x * gridDim.x;
+
+#pragma unroll(5)
+  for (size_t i = globalId; i < num_elems; i += gridSize) {
+    dest[i] = src[i];
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Print help screen
+///////////////////////////////////////////////////////////////////////////
+void printHelp(void) {
+  printf("Usage:  p2pBandwidthLatencyTest [OPTION]...\n");
+  printf("Tests bandwidth/latency of GPU pairs using P2P and without P2P\n");
+  printf("\n");
+
+  printf("Options:\n");
+  printf("--help\t\tDisplay this help menu\n");
+  printf(
+      "--p2p_read\tUse P2P reads for data transfers between GPU pairs and show "
+      "corresponding results.\n \t\tDefault used is P2P write operation.\n");
+  printf("--sm_copy                      Use SM intiated p2p transfers instead of Copy Engine\n");
+  printf("--numElems=<NUM_OF_INT_ELEMS>  Number of integer elements to be used in p2p copy.\n");
+}
+
+void checkP2Paccess(int numGPUs) {
+  for (int i = 0; i < numGPUs; i++) {
+    cudaSetDevice(i);
+    cudaCheckError();
+
+    for (int j = 0; j < numGPUs; j++) {
+      int access;
+      if (i != j) {
+        cudaDeviceCanAccessPeer(&access, i, j);
+        cudaCheckError();
+        printf("Device=%d %s Access Peer Device=%d\n", i,
+               access ? "CAN" : "CANNOT", j);
+      }
+    }
+  }
+  printf(
+      "\n***NOTE: In case a device doesn't have P2P access to other one, it "
+      "falls back to normal memcopy procedure.\nSo you can see lesser "
+      "Bandwidth (GB/s) and unstable Latency (us) in those cases.\n\n");
+}
+
+void performP2PCopy(int *dest, int destDevice, int *src, int srcDevice,
+                    int num_elems, int repeat, bool p2paccess,
+                    cudaStream_t streamToRun) {
+  int blockSize = 0;
+  int numBlocks = 0;
+
+  cudaOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize, copyp2p);
+  cudaCheckError();
+
+  if (p2p_mechanism == SM && p2paccess) {
+    for (int r = 0; r < repeat; r++) {
+      copyp2p<<<numBlocks, blockSize, 0, streamToRun>>>(
+          (int4 *)dest, (int4 *)src, num_elems / 4);
+    }
+  } else {
+    for (int r = 0; r < repeat; r++) {
+      cudaMemcpyPeerAsync(dest, destDevice, src, srcDevice,
+                          sizeof(int) * num_elems, streamToRun);
+    }
+  }
+}
+
+void outputBandwidthMatrix(int numElems, int numGPUs, bool p2p, P2PDataTransfer p2p_method) {
+  int repeat = 5;
+  volatile int *flag = NULL;
+  vector<int *> buffers(numGPUs);
+  vector<int *> buffersD2D(numGPUs);  // buffer for D2D, that is, intra-GPU copy
+  vector<cudaEvent_t> start(numGPUs);
+  vector<cudaEvent_t> stop(numGPUs);
+  vector<cudaStream_t> stream(numGPUs);
+
+  cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
+  cudaCheckError();
+
+  for (int d = 0; d < numGPUs; d++) {
+    cudaSetDevice(d);
+    cudaStreamCreateWithFlags(&stream[d], cudaStreamNonBlocking);
+    cudaMalloc(&buffers[d], numElems * sizeof(int));
+    cudaCheckError();
+    cudaMemset(buffers[d], 0, numElems * sizeof(int));
+    cudaCheckError();
+    cudaMalloc(&buffersD2D[d], numElems * sizeof(int));
+    cudaCheckError();
+    cudaMemset(buffersD2D[d], 0, numElems * sizeof(int));
+    cudaCheckError();
+    cudaEventCreate(&start[d]);
+    cudaCheckError();
+    cudaEventCreate(&stop[d]);
+    cudaCheckError();
+  }
+
+  vector<double> bandwidthMatrix(numGPUs * numGPUs);
+
+  for (int i = 0; i < numGPUs; i++) {
+    cudaSetDevice(i);
+
+    for (int j = 0; j < numGPUs; j++) {
+      int access = 0;
+      if (p2p) {
+        cudaDeviceCanAccessPeer(&access, i, j);
+        if (access) {
+          cudaDeviceEnablePeerAccess(j, 0);
+          cudaCheckError();
+          cudaSetDevice(j);
+          cudaCheckError();
+          cudaDeviceEnablePeerAccess(i, 0);
+          cudaCheckError();
+          cudaSetDevice(i);
+          cudaCheckError();
+        }
+      }
+
+      cudaStreamSynchronize(stream[i]);
+      cudaCheckError();
+
+      // Block the stream until all the work is queued up
+      // DANGER! - cudaMemcpy*Async may infinitely block waiting for
+      // room to push the operation, so keep the number of repeatitions
+      // relatively low.  Higher repeatitions will cause the delay kernel
+      // to timeout and lead to unstable results.
+      *flag = 0;
+      delay<<<1, 1, 0, stream[i]>>>(flag);
+      cudaCheckError();
+      cudaEventRecord(start[i], stream[i]);
+      cudaCheckError();
+
+      if (i == j) {
+        // Perform intra-GPU, D2D copies
+        performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
+                       access, stream[i]);
+
+      } else {
+        if (p2p_method == P2P_WRITE) {
+          performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
+                         stream[i]);
+        } else {
+          performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
+                         stream[i]);
+        }
+      }
+
+      cudaEventRecord(stop[i], stream[i]);
+      cudaCheckError();
+
+      // Release the queued events
+      *flag = 1;
+      cudaStreamSynchronize(stream[i]);
+      cudaCheckError();
+
+      float time_ms;
+      cudaEventElapsedTime(&time_ms, start[i], stop[i]);
+      double time_s = time_ms / 1e3;
+
+      double gb = numElems * sizeof(int) * repeat / (double)1e9;
+      if (i == j) {
+        gb *= 2;  // must count both the read and the write here
+      }
+      bandwidthMatrix[i * numGPUs + j] = gb / time_s;
+      if (p2p && access) {
+        cudaDeviceDisablePeerAccess(j);
+        cudaSetDevice(j);
+        cudaDeviceDisablePeerAccess(i);
+        cudaSetDevice(i);
+        cudaCheckError();
+      }
+    }
+  }
+
+  printf("   D\\D");
+
+  for (int j = 0; j < numGPUs; j++) {
+    printf("%6d ", j);
+  }
+
+  printf("\n");
+
+  for (int i = 0; i < numGPUs; i++) {
+    printf("%6d ", i);
+
+    for (int j = 0; j < numGPUs; j++) {
+      printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
+    }
+
+    printf("\n");
+  }
+
+  for (int d = 0; d < numGPUs; d++) {
+    cudaSetDevice(d);
+    cudaFree(buffers[d]);
+    cudaFree(buffersD2D[d]);
+    cudaCheckError();
+    cudaEventDestroy(start[d]);
+    cudaCheckError();
+    cudaEventDestroy(stop[d]);
+    cudaCheckError();
+    cudaStreamDestroy(stream[d]);
+    cudaCheckError();
+  }
+
+  cudaFreeHost((void *)flag);
+  cudaCheckError();
+}
+
+void outputBidirectionalBandwidthMatrix(int numElems, int numGPUs, bool p2p) {
+  int repeat = 5;
+  volatile int *flag = NULL;
+  vector<int *> buffers(numGPUs);
+  vector<int *> buffersD2D(numGPUs);
+  vector<cudaEvent_t> start(numGPUs);
+  vector<cudaEvent_t> stop(numGPUs);
+  vector<cudaStream_t> stream0(numGPUs);
+  vector<cudaStream_t> stream1(numGPUs);
+
+  cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
+  cudaCheckError();
+
+  for (int d = 0; d < numGPUs; d++) {
+    cudaSetDevice(d);
+    cudaMalloc(&buffers[d], numElems * sizeof(int));
+    cudaMemset(buffers[d], 0, numElems * sizeof(int));
+    cudaMalloc(&buffersD2D[d], numElems * sizeof(int));
+    cudaMemset(buffersD2D[d], 0, numElems * sizeof(int));
+    cudaCheckError();
+    cudaEventCreate(&start[d]);
+    cudaCheckError();
+    cudaEventCreate(&stop[d]);
+    cudaCheckError();
+    cudaStreamCreateWithFlags(&stream0[d], cudaStreamNonBlocking);
+    cudaCheckError();
+    cudaStreamCreateWithFlags(&stream1[d], cudaStreamNonBlocking);
+    cudaCheckError();
+  }
+
+  vector<double> bandwidthMatrix(numGPUs * numGPUs);
+
+  for (int i = 0; i < numGPUs; i++) {
+    cudaSetDevice(i);
+
+    for (int j = 0; j < numGPUs; j++) {
+      int access = 0;
+      if (p2p) {
+        cudaDeviceCanAccessPeer(&access, i, j);
+        if (access) {
+          cudaSetDevice(i);
+          cudaDeviceEnablePeerAccess(j, 0);
+          cudaCheckError();
+          cudaSetDevice(j);
+          cudaDeviceEnablePeerAccess(i, 0);
+          cudaCheckError();
+        }
+      }
+
+      cudaSetDevice(i);
+      cudaStreamSynchronize(stream0[i]);
+      cudaStreamSynchronize(stream1[j]);
+      cudaCheckError();
+
+      // Block the stream until all the work is queued up
+      // DANGER! - cudaMemcpy*Async may infinitely block waiting for
+      // room to push the operation, so keep the number of repeatitions
+      // relatively low.  Higher repeatitions will cause the delay kernel
+      // to timeout and lead to unstable results.
+      *flag = 0;
+      cudaSetDevice(i);
+      // No need to block stream1 since it'll be blocked on stream0's event
+      delay<<<1, 1, 0, stream0[i]>>>(flag);
+      cudaCheckError();
+
+      // Force stream1 not to start until stream0 does, in order to ensure
+      // the events on stream0 fully encompass the time needed for all
+      // operations
+      cudaEventRecord(start[i], stream0[i]);
+      cudaStreamWaitEvent(stream1[j], start[i], 0);
+
+      if (i == j) {
+        // For intra-GPU perform 2 memcopies buffersD2D <-> buffers
+        performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
+                       access, stream0[i]);
+        performP2PCopy(buffersD2D[i], i, buffers[i], i, numElems, repeat,
+                       access, stream1[i]);
+      } else {
+        if (access && p2p_mechanism == SM) {
+          cudaSetDevice(j);
+        }
+        performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
+                       stream1[j]);
+        if (access && p2p_mechanism == SM) {
+          cudaSetDevice(i);
+        }
+        performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
+                       stream0[i]);
+      }
+
+      // Notify stream0 that stream1 is complete and record the time of
+      // the total transaction
+      cudaEventRecord(stop[j], stream1[j]);
+      cudaStreamWaitEvent(stream0[i], stop[j], 0);
+      cudaEventRecord(stop[i], stream0[i]);
+
+      // Release the queued operations
+      *flag = 1;
+      cudaStreamSynchronize(stream0[i]);
+      cudaStreamSynchronize(stream1[j]);
+      cudaCheckError();
+
+      float time_ms;
+      cudaEventElapsedTime(&time_ms, start[i], stop[i]);
+      double time_s = time_ms / 1e3;
+
+      double gb = 2.0 * numElems * sizeof(int) * repeat / (double)1e9;
+      if (i == j) {
+        gb *= 2;  // must count both the read and the write here
+      }
+      bandwidthMatrix[i * numGPUs + j] = gb / time_s;
+      if (p2p && access) {
+        cudaSetDevice(i);
+        cudaDeviceDisablePeerAccess(j);
+        cudaSetDevice(j);
+        cudaDeviceDisablePeerAccess(i);
+      }
+    }
+  }
+
+  printf("   D\\D");
+
+  for (int j = 0; j < numGPUs; j++) {
+    printf("%6d ", j);
+  }
+
+  printf("\n");
+
+  for (int i = 0; i < numGPUs; i++) {
+    printf("%6d ", i);
+
+    for (int j = 0; j < numGPUs; j++) {
+      printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
+    }
+
+    printf("\n");
+  }
+
+  for (int d = 0; d < numGPUs; d++) {
+    cudaSetDevice(d);
+    cudaFree(buffers[d]);
+    cudaFree(buffersD2D[d]);
+    cudaCheckError();
+    cudaEventDestroy(start[d]);
+    cudaCheckError();
+    cudaEventDestroy(stop[d]);
+    cudaCheckError();
+    cudaStreamDestroy(stream0[d]);
+    cudaCheckError();
+    cudaStreamDestroy(stream1[d]);
+    cudaCheckError();
+  }
+
+  cudaFreeHost((void *)flag);
+  cudaCheckError();
+}
+
+void outputLatencyMatrix(int numGPUs, bool p2p, P2PDataTransfer p2p_method) {
+  int repeat = 100;
+  int numElems = 4;  // perform 1-int4 transfer.
+  volatile int *flag = NULL;
+  StopWatchInterface *stopWatch = NULL;
+  vector<int *> buffers(numGPUs);
+  vector<int *> buffersD2D(numGPUs);  // buffer for D2D, that is, intra-GPU copy
+  vector<cudaStream_t> stream(numGPUs);
+  vector<cudaEvent_t> start(numGPUs);
+  vector<cudaEvent_t> stop(numGPUs);
+
+  cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
+  cudaCheckError();
+
+  if (!sdkCreateTimer(&stopWatch)) {
+    printf("Failed to create stop watch\n");
+    exit(EXIT_FAILURE);
+  }
+  sdkStartTimer(&stopWatch);
+
+  for (int d = 0; d < numGPUs; d++) {
+    cudaSetDevice(d);
+    cudaStreamCreateWithFlags(&stream[d], cudaStreamNonBlocking);
+    cudaMalloc(&buffers[d], sizeof(int) * numElems);
+    cudaMemset(buffers[d], 0, sizeof(int) * numElems);
+    cudaMalloc(&buffersD2D[d], sizeof(int) * numElems);
+    cudaMemset(buffersD2D[d], 0, sizeof(int) * numElems);
+    cudaCheckError();
+    cudaEventCreate(&start[d]);
+    cudaCheckError();
+    cudaEventCreate(&stop[d]);
+    cudaCheckError();
+  }
+
+  vector<double> gpuLatencyMatrix(numGPUs * numGPUs);
+  vector<double> cpuLatencyMatrix(numGPUs * numGPUs);
+
+  for (int i = 0; i < numGPUs; i++) {
+    cudaSetDevice(i);
+
+    for (int j = 0; j < numGPUs; j++) {
+      int access = 0;
+      if (p2p) {
+        cudaDeviceCanAccessPeer(&access, i, j);
+        if (access) {
+          cudaDeviceEnablePeerAccess(j, 0);
+          cudaCheckError();
+          cudaSetDevice(j);
+          cudaDeviceEnablePeerAccess(i, 0);
+          cudaSetDevice(i);
+          cudaCheckError();
+        }
+      }
+      cudaStreamSynchronize(stream[i]);
+      cudaCheckError();
+
+      // Block the stream until all the work is queued up
+      // DANGER! - cudaMemcpy*Async may infinitely block waiting for
+      // room to push the operation, so keep the number of repeatitions
+      // relatively low.  Higher repeatitions will cause the delay kernel
+      // to timeout and lead to unstable results.
+      *flag = 0;
+      delay<<<1, 1, 0, stream[i]>>>(flag);
+      cudaCheckError();
+      cudaEventRecord(start[i], stream[i]);
+
+      sdkResetTimer(&stopWatch);
+      if (i == j) {
+        // Perform intra-GPU, D2D copies
+        performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
+                       access, stream[i]);
+      } else {
+        if (p2p_method == P2P_WRITE) {
+          performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
+                         stream[i]);
+        } else {
+          performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
+                         stream[i]);
+        }
+      }
+      float cpu_time_ms = sdkGetTimerValue(&stopWatch);
+
+      cudaEventRecord(stop[i], stream[i]);
+      // Now that the work has been queued up, release the stream
+      *flag = 1;
+      cudaStreamSynchronize(stream[i]);
+      cudaCheckError();
+
+      float gpu_time_ms;
+      cudaEventElapsedTime(&gpu_time_ms, start[i], stop[i]);
+
+      gpuLatencyMatrix[i * numGPUs + j] = gpu_time_ms * 1e3 / repeat;
+      cpuLatencyMatrix[i * numGPUs + j] = cpu_time_ms * 1e3 / repeat;
+      if (p2p && access) {
+        cudaDeviceDisablePeerAccess(j);
+        cudaSetDevice(j);
+        cudaDeviceDisablePeerAccess(i);
+        cudaSetDevice(i);
+        cudaCheckError();
+      }
+    }
+  }
+
+  printf("   GPU");
+
+  for (int j = 0; j < numGPUs; j++) {
+    printf("%6d ", j);
+  }
+
+  printf("\n");
+
+  for (int i = 0; i < numGPUs; i++) {
+    printf("%6d ", i);
+
+    for (int j = 0; j < numGPUs; j++) {
+      printf("%6.02f ", gpuLatencyMatrix[i * numGPUs + j]);
+    }
+
+    printf("\n");
+  }
+
+  printf("\n   CPU");
+
+  for (int j = 0; j < numGPUs; j++) {
+    printf("%6d ", j);
+  }
+
+  printf("\n");
+
+  for (int i = 0; i < numGPUs; i++) {
+    printf("%6d ", i);
+
+    for (int j = 0; j < numGPUs; j++) {
+      printf("%6.02f ", cpuLatencyMatrix[i * numGPUs + j]);
+    }
+
+    printf("\n");
+  }
+
+  for (int d = 0; d < numGPUs; d++) {
+    cudaSetDevice(d);
+    cudaFree(buffers[d]);
+    cudaFree(buffersD2D[d]);
+    cudaCheckError();
+    cudaEventDestroy(start[d]);
+    cudaCheckError();
+    cudaEventDestroy(stop[d]);
+    cudaCheckError();
+    cudaStreamDestroy(stream[d]);
+    cudaCheckError();
+  }
+
+  sdkDeleteTimer(&stopWatch);
+
+  cudaFreeHost((void *)flag);
+  cudaCheckError();
+}
+
+int main(int argc, char **argv) {
+  int numGPUs, numElems = 40000000;
+  P2PDataTransfer p2p_method = P2P_WRITE;
+
+  cudaGetDeviceCount(&numGPUs);
+  cudaCheckError();
+
+  // process command line args
+  if (checkCmdLineFlag(argc, (const char **)argv, "help")) {
+    printHelp();
+    return 0;
+  }
+
+  if (checkCmdLineFlag(argc, (const char **)argv, "p2p_read")) {
+    p2p_method = P2P_READ;
+  }
+
+  if (checkCmdLineFlag(argc, (const char **)argv, "sm_copy")) {
+    p2p_mechanism = SM;
+  }
+
+  // number of elements of int to be used in copy.
+  if (checkCmdLineFlag(argc, (const char **)argv, "numElems")) {
+    numElems = getCmdLineArgumentInt(argc, (const char **)argv, "numElems");
+  }
+
+  printf("[%s]\n", sSampleName);
+
+  // output devices
+  for (int i = 0; i < numGPUs; i++) {
+    cudaDeviceProp prop;
+    cudaGetDeviceProperties(&prop, i);
+    cudaCheckError();
+    printf("Device: %d, %s, pciBusID: %x, pciDeviceID: %x, pciDomainID:%x\n", i,
+           prop.name, prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
+  }
+
+  checkP2Paccess(numGPUs);
+
+  // Check peer-to-peer connectivity
+  printf("P2P Connectivity Matrix\n");
+  printf("     D\\D");
+
+  for (int j = 0; j < numGPUs; j++) {
+    printf("%6d", j);
+  }
+  printf("\n");
+
+  for (int i = 0; i < numGPUs; i++) {
+    printf("%6d\t", i);
+    for (int j = 0; j < numGPUs; j++) {
+      if (i != j) {
+        int access;
+        cudaDeviceCanAccessPeer(&access, i, j);
+        cudaCheckError();
+        printf("%6d", (access) ? 1 : 0);
+      } else {
+        printf("%6d", 1);
+      }
+    }
+    printf("\n");
+  }
+
+  printf("Unidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
+  outputBandwidthMatrix(numElems, numGPUs, false, P2P_WRITE);
+  printf("Unidirectional P2P=Enabled Bandwidth (P2P Writes) Matrix (GB/s)\n");
+  outputBandwidthMatrix(numElems, numGPUs, true, P2P_WRITE);
+  if (p2p_method == P2P_READ) {
+    printf("Unidirectional P2P=Enabled Bandwidth (P2P Reads) Matrix (GB/s)\n");
+    outputBandwidthMatrix(numElems, numGPUs, true, p2p_method);
+  }
+  printf("Bidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
+  outputBidirectionalBandwidthMatrix(numElems, numGPUs, false);
+  printf("Bidirectional P2P=Enabled Bandwidth Matrix (GB/s)\n");
+  outputBidirectionalBandwidthMatrix(numElems, numGPUs, true);
+
+  printf("P2P=Disabled Latency Matrix (us)\n");
+  outputLatencyMatrix(numGPUs, false, P2P_WRITE);
+  printf("P2P=Enabled Latency (P2P Writes) Matrix (us)\n");
+  outputLatencyMatrix(numGPUs, true, P2P_WRITE);
+  if (p2p_method == P2P_READ) {
+    printf("P2P=Enabled Latency (P2P Reads) Matrix (us)\n");
+    outputLatencyMatrix(numGPUs, true, p2p_method);
+  }
+
+  printf(
+      "\nNOTE: The CUDA Samples are not meant for performance measurements. "
+      "Results may vary when GPU Boost is enabled.\n");
+
+  exit(EXIT_SUCCESS);
+}

+ 103 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N2/cnn_fmnist.py

@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(mnist_images, mnist_labels), _ =     tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank())
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
+             tf.cast(mnist_labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+mnist_model = tf.keras.Sequential([
+    tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),
+    tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),
+    tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
+    tf.keras.layers.Dropout(0.25),
+    tf.keras.layers.Flatten(),
+    tf.keras.layers.Dense(128, activation='relu'),
+    tf.keras.layers.Dropout(0.5),
+    tf.keras.layers.Dense(10, activation='softmax')
+])
+
+# Horovod: adjust learning rate based on number of GPUs.
+opt = tf.optimizers.Adam(0.001)
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+mnist_model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_train_begin(self, epoch, logs=None):
+        global seconds1
+        seconds1 = time.time()
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    #Throughput calculator
+    PrintLR(total_images=len(mnist_labels)),
+
+]
+
+
+# Horovod: write logs on worker 0.
+verbose = 2 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+mnist_model.fit(dataset, steps_per_epoch=len(mnist_labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=6, verbose=verbose)
+

+ 103 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N3/cnn_fmnist.py

@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(mnist_images, mnist_labels), _ =     tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank())
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
+             tf.cast(mnist_labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+mnist_model = tf.keras.Sequential([
+    tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),
+    tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),
+    tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
+    tf.keras.layers.Dropout(0.25),
+    tf.keras.layers.Flatten(),
+    tf.keras.layers.Dense(128, activation='relu'),
+    tf.keras.layers.Dropout(0.5),
+    tf.keras.layers.Dense(10, activation='softmax')
+])
+
+# Horovod: adjust learning rate based on number of GPUs.
+opt = tf.optimizers.Adam(0.001)
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+mnist_model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_train_begin(self, epoch, logs=None):
+        global seconds1
+        seconds1 = time.time()
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    #Throughput calculator
+    PrintLR(total_images=len(mnist_labels)),
+
+]
+
+
+# Horovod: write logs on worker 0.
+verbose = 2 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+mnist_model.fit(dataset, steps_per_epoch=len(mnist_labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=8, verbose=verbose)
+

+ 178 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar.py

@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.applications.imagenet_utils import preprocess_input
+from tensorflow.keras import backend as K
+from tensorflow.keras.initializers import glorot_uniform
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(images, labels), _ =     tf.keras.datasets.cifar10.load_data()
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(images[...] / 255.0, tf.float32),
+             tf.cast(labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+def convolutional_block(X, f, filters, stage, block, s=2):
+
+    # Defining name basis
+    conv_name_base = 'res' + str(stage) + block + '_branch'
+    bn_name_base = 'bn' + str(stage) + block + '_branch'
+
+    # Retrieve Filters
+    F1, F2, F3 = filters
+
+    # Save the input value
+    X_shortcut = X
+
+    ##### MAIN PATH #####
+    # First component of main path 
+    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
+    X = Activation('relu')(X)
+
+    # Second component of main path
+    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
+    X = Activation('relu')(X)
+
+    # Third component of main path
+    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
+
+    ##### SHORTCUT PATH #### 
+    X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
+    X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
+
+    # Final step: Add shortcut value to main path, and pass it through a RELU activation
+    X = Add()([X, X_shortcut])
+    X = Activation('relu')(X)
+
+    return X
+
+def ResNet(input_shape = (28, 28, 1), classes = 10):
+    
+    # Define the input as a tensor with shape input_shape
+    X_input = Input(shape=input_shape)
+
+    
+    # Zero-Padding
+    X = ZeroPadding2D((3, 3))(X_input)
+    
+    # Stage 1
+    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
+    X = Activation('relu')(X)
+    X = MaxPooling2D((3, 3), strides=(2, 2))(X)
+
+    # Stage 2
+    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
+
+    # Stage 3
+    X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)
+
+    # AVGPOOL
+    X = AveragePooling2D(pool_size=(2,2), padding='same')(X)
+
+    # Output layer
+    X = Flatten()(X)
+    X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
+    
+    
+    # Create model
+    model = Model(inputs = X_input, outputs = X, name='ResNet')
+
+    return model
+
+model = ResNet(input_shape = (32, 32, 3), classes = 10)
+
+# %%
+# Horovod: adjust learning rate based on number of GPUs.
+scaled_lr = 0.001 * hvd.size()
+# opt = tf.optimizers.Adam(scaled_lr)
+from tensorflow_addons.optimizers import LAMB
+
+# Replace the Adam optimizer with NovoGrad:
+opt = LAMB(learning_rate=scaled_lr)
+
+
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(
+    opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+            
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    PrintLR(total_images=len(labels)),
+    hvd.callbacks.LearningRateWarmupCallback(initial_lr=scaled_lr, warmup_epochs=3, verbose=1),
+
+]
+
+# model.summary()
+
+# Horovod: write logs on worker 0.
+verbose = 1 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+model.fit(dataset, steps_per_epoch=len(labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=20, verbose=verbose)

+ 174 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_base.py

@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.applications.imagenet_utils import preprocess_input
+from tensorflow.keras import backend as K
+from tensorflow.keras.initializers import glorot_uniform
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+tf.random.set_seed(1330)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(images, labels), _ =     tf.keras.datasets.cifar10.load_data()
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(images[...] / 255.0, tf.float32),
+             tf.cast(labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+def convolutional_block(X, f, filters, stage, block, s=2):
+
+    # Defining name basis
+    conv_name_base = 'res' + str(stage) + block + '_branch'
+    bn_name_base = 'bn' + str(stage) + block + '_branch'
+
+    # Retrieve Filters
+    F1, F2, F3 = filters
+
+    # Save the input value
+    X_shortcut = X
+
+    ##### MAIN PATH #####
+    # First component of main path 
+    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
+    #X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
+    X = Activation('relu')(X)
+
+    # Second component of main path
+    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
+    #X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
+    X = Activation('relu')(X)
+
+    # Third component of main path
+    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
+    #X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
+
+    ##### SHORTCUT PATH #### 
+    X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
+    #X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
+
+    # Final step: Add shortcut value to main path, and pass it through a RELU activation
+    X = Add()([X, X_shortcut])
+    X = Activation('relu')(X)
+
+    return X
+
+def ResNet(input_shape = (28, 28, 1), classes = 10):
+    
+    # Define the input as a tensor with shape input_shape
+    X_input = Input(shape=input_shape)
+
+    
+    # Zero-Padding
+    X = ZeroPadding2D((3, 3))(X_input)
+    
+    # Stage 1
+    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
+    #X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
+    X = Activation('relu')(X)
+    X = MaxPooling2D((3, 3), strides=(2, 2))(X)
+
+    # Stage 2
+    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
+
+    # Stage 3
+    X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)
+
+    # AVGPOOL
+    X = AveragePooling2D(pool_size=(2,2), padding='same')(X)
+
+    # Output layer
+    X = Flatten()(X)
+    X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
+    
+    
+    # Create model
+    model = Model(inputs = X_input, outputs = X, name='ResNet')
+
+    return model
+
+model = ResNet(input_shape = (32, 32, 3), classes = 10)
+
+# %%
+# Horovod: adjust learning rate based on number of GPUs.
+opt = tf.optimizers.Adam()
+
+
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(
+    opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+            
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    PrintLR(total_images=len(labels))
+
+]
+
+# model.summary()
+
+# Horovod: write logs on worker 0.
+verbose = 1 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+model.fit(dataset, steps_per_epoch=len(labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=12, verbose=verbose)

+ 177 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_batch_norm.py

@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.applications.imagenet_utils import preprocess_input
+from tensorflow.keras import backend as K
+from tensorflow.keras.initializers import glorot_uniform
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+tf.random.set_seed(1337)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(images, labels), _ =     tf.keras.datasets.cifar10.load_data()
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(images[...] / 255.0, tf.float32),
+             tf.cast(labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+def convolutional_block(X, f, filters, stage, block, s=2):
+
+    # Defining name basis
+    conv_name_base = 'res' + str(stage) + block + '_branch'
+    bn_name_base = 'bn' + str(stage) + block + '_branch'
+
+    # Retrieve Filters
+    F1, F2, F3 = filters
+
+    # Save the input value
+    X_shortcut = X
+
+    ##### MAIN PATH #####
+    # First component of main path 
+    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
+    X = Activation('relu')(X)
+
+    # Second component of main path
+    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
+    X = Activation('relu')(X)
+
+    # Third component of main path
+    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
+
+    ##### SHORTCUT PATH #### 
+    X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
+    X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
+
+    # Final step: Add shortcut value to main path, and pass it through a RELU activation
+    X = Add()([X, X_shortcut])
+    X = Activation('relu')(X)
+
+    return X
+
+def ResNet(input_shape = (28, 28, 1), classes = 10):
+    
+    # Define the input as a tensor with shape input_shape
+    X_input = Input(shape=input_shape)
+
+    
+    # Zero-Padding
+    X = ZeroPadding2D((3, 3))(X_input)
+    
+    # Stage 1
+    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
+    X = Activation('relu')(X)
+    X = MaxPooling2D((3, 3), strides=(2, 2))(X)
+
+    # Stage 2
+    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
+
+    # Stage 3
+    X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)
+
+    # AVGPOOL
+    X = AveragePooling2D(pool_size=(2,2), padding='same')(X)
+
+    # Output layer
+    X = Flatten()(X)
+    X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
+    
+    
+    # Create model
+    model = Model(inputs = X_input, outputs = X, name='ResNet')
+
+    return model
+
+model = ResNet(input_shape = (32, 32, 3), classes = 10)
+
+# %%
+# Horovod: adjust learning rate based on number of GPUs.
+scaled_lr = 0.001 * hvd.size()
+opt = tf.optimizers.Adam(scaled_lr)
+
+
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(
+    opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+            
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    PrintLR(total_images=len(labels)),
+    hvd.callbacks.LearningRateWarmupCallback(initial_lr=scaled_lr, warmup_epochs=3, verbose=1),
+
+]
+
+# model.summary()
+
+# Horovod: write logs on worker 0.
+verbose = 1 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+model.fit(dataset, steps_per_epoch=len(labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=12, verbose=verbose)

+ 178 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_lamb.py

@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.applications.imagenet_utils import preprocess_input
+from tensorflow.keras import backend as K
+from tensorflow.keras.initializers import glorot_uniform
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(images, labels), _ =     tf.keras.datasets.cifar10.load_data()
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(images[...] / 255.0, tf.float32),
+             tf.cast(labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+def convolutional_block(X, f, filters, stage, block, s=2):
+
+    # Defining name basis
+    conv_name_base = 'res' + str(stage) + block + '_branch'
+    bn_name_base = 'bn' + str(stage) + block + '_branch'
+
+    # Retrieve Filters
+    F1, F2, F3 = filters
+
+    # Save the input value
+    X_shortcut = X
+
+    ##### MAIN PATH #####
+    # First component of main path 
+    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
+    X = Activation('relu')(X)
+
+    # Second component of main path
+    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
+    X = Activation('relu')(X)
+
+    # Third component of main path
+    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
+
+    ##### SHORTCUT PATH #### 
+    X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
+    X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
+
+    # Final step: Add shortcut value to main path, and pass it through a RELU activation
+    X = Add()([X, X_shortcut])
+    X = Activation('relu')(X)
+
+    return X
+
+def ResNet(input_shape = (28, 28, 1), classes = 10):
+    
+    # Define the input as a tensor with shape input_shape
+    X_input = Input(shape=input_shape)
+
+    
+    # Zero-Padding
+    X = ZeroPadding2D((3, 3))(X_input)
+    
+    # Stage 1
+    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
+    X = Activation('relu')(X)
+    X = MaxPooling2D((3, 3), strides=(2, 2))(X)
+
+    # Stage 2
+    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
+
+    # Stage 3
+    X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)
+
+    # AVGPOOL
+    X = AveragePooling2D(pool_size=(2,2), padding='same')(X)
+
+    # Output layer
+    X = Flatten()(X)
+    X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
+    
+    
+    # Create model
+    model = Model(inputs = X_input, outputs = X, name='ResNet')
+
+    return model
+
+model = ResNet(input_shape = (32, 32, 3), classes = 10)
+
+# %%
+# Horovod: adjust learning rate based on number of GPUs.
+scaled_lr = 0.001 * hvd.size()
+# opt = tf.optimizers.Adam(scaled_lr)
+from tensorflow_addons.optimizers import LAMB
+
+# Replace the Adam optimizer with NovoGrad:
+opt = LAMB(learning_rate=scaled_lr)
+
+
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(
+    opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+            
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    PrintLR(total_images=len(labels)),
+    hvd.callbacks.LearningRateWarmupCallback(initial_lr=scaled_lr, warmup_epochs=3, verbose=1),
+
+]
+
+# model.summary()
+
+# Horovod: write logs on worker 0.
+verbose = 1 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+model.fit(dataset, steps_per_epoch=len(labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=12, verbose=verbose)

+ 176 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_scalelr.py

@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.applications.imagenet_utils import preprocess_input
+from tensorflow.keras import backend as K
+from tensorflow.keras.initializers import glorot_uniform
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+tf.random.set_seed(1337)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(images, labels), _ =     tf.keras.datasets.cifar10.load_data()
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(images[...] / 255.0, tf.float32),
+             tf.cast(labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+def convolutional_block(X, f, filters, stage, block, s=2):
+
+    # Defining name basis
+    conv_name_base = 'res' + str(stage) + block + '_branch'
+    bn_name_base = 'bn' + str(stage) + block + '_branch'
+
+    # Retrieve Filters
+    F1, F2, F3 = filters
+
+    # Save the input value
+    X_shortcut = X
+
+    ##### MAIN PATH #####
+    # First component of main path 
+    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
+    X = Activation('relu')(X)
+
+    # Second component of main path
+    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
+    X = Activation('relu')(X)
+
+    # Third component of main path
+    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
+
+    ##### SHORTCUT PATH #### 
+    X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
+    X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
+
+    # Final step: Add shortcut value to main path, and pass it through a RELU activation
+    X = Add()([X, X_shortcut])
+    X = Activation('relu')(X)
+
+    return X
+
+def ResNet(input_shape = (28, 28, 1), classes = 10):
+    
+    # Define the input as a tensor with shape input_shape
+    X_input = Input(shape=input_shape)
+
+    
+    # Zero-Padding
+    X = ZeroPadding2D((3, 3))(X_input)
+    
+    # Stage 1
+    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
+    X = Activation('relu')(X)
+    X = MaxPooling2D((3, 3), strides=(2, 2))(X)
+
+    # Stage 2
+    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
+
+    # Stage 3
+    X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)
+
+    # AVGPOOL
+    X = AveragePooling2D(pool_size=(2,2), padding='same')(X)
+
+    # Output layer
+    X = Flatten()(X)
+    X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
+    
+    
+    # Create model
+    model = Model(inputs = X_input, outputs = X, name='ResNet')
+
+    return model
+
+model = ResNet(input_shape = (32, 32, 3), classes = 10)
+
+# %%
+# Horovod: adjust learning rate based on number of GPUs.
+scaled_lr = 0.001 * hvd.size()
+opt = tf.optimizers.Adam(scaled_lr)
+
+
+
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(
+    opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+            
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    PrintLR(total_images=len(labels))
+]
+
+# model.summary()
+
+# Horovod: write logs on worker 0.
+verbose = 1 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+model.fit(dataset, steps_per_epoch=len(labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=12, verbose=verbose)

+ 177 - 0
ai/Distributed_Deep_Learning/English/python/source_code/N4/cifar_warmup.py

@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# coding: utf-8
+# %%
+import argparse
+import tensorflow as tf
+from tensorflow.keras.datasets import cifar10
+from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.applications.imagenet_utils import preprocess_input
+from tensorflow.keras import backend as K
+from tensorflow.keras.initializers import glorot_uniform
+import horovod.tensorflow.keras as hvd
+import sys
+import time
+
+tf.random.set_seed(1337)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
+    args = parser.parse_args()
+
+    return args
+
+args = parse_args()
+global g_args
+g_args = args
+batch_size = args.batch_size
+
+# Horovod: initialize Horovod.
+hvd.init()
+
+# Horovod: pin GPU to be used to process local rank (one GPU per process)
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+    tf.config.experimental.set_memory_growth(gpu, True)
+if gpus:
+    tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
+
+(images, labels), _ =     tf.keras.datasets.cifar10.load_data()
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (tf.cast(images[...] / 255.0, tf.float32),
+             tf.cast(labels, tf.int64))
+)
+dataset = dataset.repeat().shuffle(10000).batch(batch_size)
+
+def convolutional_block(X, f, filters, stage, block, s=2):
+
+    # Defining name basis
+    conv_name_base = 'res' + str(stage) + block + '_branch'
+    bn_name_base = 'bn' + str(stage) + block + '_branch'
+
+    # Retrieve Filters
+    F1, F2, F3 = filters
+
+    # Save the input value
+    X_shortcut = X
+
+    ##### MAIN PATH #####
+    # First component of main path 
+    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
+    X = Activation('relu')(X)
+
+    # Second component of main path
+    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
+    X = Activation('relu')(X)
+
+    # Third component of main path
+    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
+
+    ##### SHORTCUT PATH #### 
+    X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
+    X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
+
+    # Final step: Add shortcut value to main path, and pass it through a RELU activation
+    X = Add()([X, X_shortcut])
+    X = Activation('relu')(X)
+
+    return X
+
+def ResNet(input_shape = (28, 28, 1), classes = 10):
+    
+    # Define the input as a tensor with shape input_shape
+    X_input = Input(shape=input_shape)
+
+    
+    # Zero-Padding
+    X = ZeroPadding2D((3, 3))(X_input)
+    
+    # Stage 1
+    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
+    X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
+    X = Activation('relu')(X)
+    X = MaxPooling2D((3, 3), strides=(2, 2))(X)
+
+    # Stage 2
+    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
+
+    # Stage 3
+    X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)
+
+    # AVGPOOL
+    X = AveragePooling2D(pool_size=(2,2), padding='same')(X)
+
+    # Output layer
+    X = Flatten()(X)
+    X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
+    
+    
+    # Create model
+    model = Model(inputs = X_input, outputs = X, name='ResNet')
+
+    return model
+
+model = ResNet(input_shape = (32, 32, 3), classes = 10)
+
+# %%
+# Horovod: adjust learning rate based on number of GPUs.
+scaled_lr = 0.001 * hvd.size()
+opt = tf.optimizers.Adam(scaled_lr)
+
+
+
+# Horovod: add Horovod DistributedOptimizer.
+opt = hvd.DistributedOptimizer(
+    opt, backward_passes_per_step=1, average_aggregated_gradients=True)
+
+# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
+# uses hvd.DistributedOptimizer() to compute gradients.
+model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
+                    optimizer=opt,
+                    metrics=['accuracy'],
+                    experimental_run_tf_function=False)
+
+class PrintLR(tf.keras.callbacks.Callback):
+    def __init__(self, total_images=0):
+        self.total_images = total_images
+    def on_epoch_begin(self, epoch, logs=None):
+        self.epoch_start_time = time.time()
+    def on_epoch_end(self, epoch, logs=None):
+        if hvd.rank() == 0 :
+            epoch_time = time.time() - self.epoch_start_time
+            print('Epoch time : {}'.format(epoch_time))
+            images_per_sec = round(self.total_images / epoch_time, 2)
+            print('Images/sec: {}'.format(images_per_sec))
+            
+
+callbacks = [
+    # Horovod: broadcast initial variable states from rank 0 to all other processes.
+    # This is necessary to ensure consistent initialization of all workers when
+    # training is started with random weights or restored from a checkpoint.
+    hvd.callbacks.BroadcastGlobalVariablesCallback(0),
+
+    # Horovod: average metrics among workers at the end of every epoch.
+    #
+    # Note: This callback must be in the list before the ReduceLROnPlateau,
+    # TensorBoard or other metrics-based callbacks.
+    hvd.callbacks.MetricAverageCallback(),
+    PrintLR(total_images=len(labels)),
+    hvd.callbacks.LearningRateWarmupCallback(initial_lr=scaled_lr, warmup_epochs=3, verbose=1),
+
+]
+
+# model.summary()
+
+# Horovod: write logs on worker 0.
+verbose = 1 if hvd.rank() == 0 else 0
+
+# Train the model.
+# Horovod: adjust number of steps based on number of GPUs.
+model.fit(dataset, steps_per_epoch=len(labels) // (batch_size*hvd.size()), callbacks=callbacks, epochs=12, verbose=verbose)

File diff ditekan karena terlalu besar
+ 58 - 0
ai/Distributed_Deep_Learning/README.md


+ 29 - 0
ai/Distributed_Deep_Learning/Singularity

@@ -0,0 +1,29 @@
+Bootstrap: docker
+From: nvcr.io/nvidia/tensorflow:21.07-tf2-py3
+
+%runscript
+ 
+    "$@"
+
+%post
+
+    apt-get -y update
+    apt-get -y install git nvidia-modprobe
+    pip3 install jupyterlab
+    pip3 install ipywidgets
+    mkdir /workspace/python/source_code/Data
+    curl https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip -o /workspace/python/source_code/Data/wikitext-2-v1.zip
+    unzip /workspace/python/source_code/Data/wikitext-2-v1.zip -d /workspace/python/source_code/Data
+
+
+
+%files
+
+    English/* /workspace/
+
+%environment
+XDG_RUNTIME_DIR=
+
+%labels
+
+AUTHOR bharatk