Forráskód Böngészése

Issue #173: Kubernetes Playbook changes and testing framework for Kubernetes

Signed-off-by: blesson-james <blesson_james@dellteam.com>
John Lockman 4 éve
szülő
commit
6456c8abdd
67 módosított fájl, 1612 hozzáadás és 200 törlés
  1. 5 5
      omnia.yml
  2. 0 35
      roles/common/handlers/main.yml
  3. 0 56
      roles/common/tasks/ntp.yml
  4. 0 42
      roles/common/templates/chrony.conf.j2
  5. 0 16
      roles/common/templates/ntp.conf.j2
  6. 2 2
      roles/compute_gpu/tasks/main.yml
  7. 0 0
      roles/k8s_common/files/k8s.conf
  8. 0 0
      roles/k8s_common/files/kubernetes.repo
  9. 28 0
      roles/k8s_common/handlers/main.yml
  10. 1 5
      roles/common/tasks/main.yml
  11. 1 19
      roles/common/vars/main.yml
  12. 2 2
      roles/firewalld/tasks/main.yml
  13. 1 2
      roles/firewalld/vars/main.yml
  14. 0 0
      roles/k8s_manager/tasks/main.yml
  15. 0 0
      roles/k8s_manager/vars/main.yml
  16. 40 0
      roles/k8s_nfs_client_setup/tasks/main.yml
  17. 20 0
      roles/k8s_nfs_client_setup/vars/main.yml
  18. 84 0
      roles/k8s_nfs_server_setup/tasks/main.yml
  19. 25 0
      roles/k8s_nfs_server_setup/vars/main.yml
  20. 0 0
      roles/k8s_start_manager/files/create_admin_user.yaml
  21. 0 0
      roles/k8s_start_manager/files/create_clusterRoleBinding.yaml
  22. 0 0
      roles/startmanager/files/data-pv.yaml
  23. 0 0
      roles/startmanager/files/data2-pv.yaml
  24. 0 0
      roles/startmanager/files/data3-pv.yaml
  25. 0 0
      roles/startmanager/files/data4-pv.yaml
  26. 0 0
      roles/startmanager/files/flannel_net.sh
  27. 0 0
      roles/startmanager/files/katib-pv.yaml
  28. 0 0
      roles/k8s_start_manager/files/kube-flannel.yaml
  29. 0 0
      roles/k8s_start_manager/files/kubeflow_persistent_volumes.yaml
  30. 0 0
      roles/startmanager/files/minio-pvc.yaml
  31. 0 0
      roles/startmanager/files/mysql-pv.yaml
  32. 0 0
      roles/k8s_start_manager/files/nfs-class.yaml
  33. 0 0
      roles/k8s_start_manager/files/nfs-deployment.yaml
  34. 0 0
      roles/k8s_start_manager/files/nfs-serviceaccount.yaml
  35. 0 0
      roles/k8s_start_manager/files/nfs_clusterrole.yaml
  36. 0 0
      roles/k8s_start_manager/files/nfs_clusterrolebinding.yaml
  37. 0 0
      roles/startmanager/files/notebook-pv.yaml
  38. 0 0
      roles/startmanager/files/persistent_volumes.yaml
  39. 0 0
      roles/k8s_start_manager/files/pvc.yaml
  40. 0 0
      roles/startmanager/files/tiller_config.sh
  41. 1 1
      roles/startmanager/tasks/main.yml
  42. 1 1
      roles/startmanager/vars/main.yml
  43. 0 0
      roles/k8s_start_services/files/metal-config.yaml
  44. 0 0
      roles/k8s_start_services/files/metallb.yaml
  45. 0 0
      roles/k8s_start_services/tasks/main.yml
  46. 2 3
      roles/startservices/vars/main.yml
  47. 0 0
      roles/k8s_start_workers/tasks/main.yml
  48. 0 0
      roles/k8s_start_workers/vars/main.yml
  49. 0 3
      roles/manager/files/k8s.conf
  50. 0 8
      roles/manager/files/kubernetes.repo
  51. 156 0
      test/test_compute_gpu.yml
  52. 113 0
      test/test_jupyterhub.yml
  53. 177 0
      test/test_k8s_common.yml
  54. 226 0
      test/test_k8s_firewalld.yml
  55. 70 0
      test/test_k8s_manager.yml
  56. 144 0
      test/test_k8s_start_manager_workers.yml
  57. 97 0
      test/test_k8s_start_services.yml
  58. 50 0
      test/test_k8s_start_workers.yml
  59. 123 0
      test/test_kubeflow.yml
  60. 30 0
      test/test_vars/test_compute_gpu_vars.yml
  61. 22 0
      test/test_vars/test_jupyterhub_vars.yml
  62. 38 0
      test/test_vars/test_k8s_common_vars.yml
  63. 38 0
      test/test_vars/test_k8s_firewalld_vars.yml
  64. 17 0
      test/test_vars/test_k8s_manager_vars.yml
  65. 38 0
      test/test_vars/test_k8s_start_manager_workers_vars.yml
  66. 38 0
      test/test_vars/test_k8s_start_services_vars.yml
  67. 22 0
      test/test_vars/test_kubeflow_vars.yml

+ 5 - 5
omnia.yml

@@ -24,11 +24,11 @@
   roles:
     - common
  
-- name: Apply GPU node config
-  hosts: gpus
-  gather_facts: false
-  roles:
-    - compute_gpu
+#- name: Apply GPU node config
+#  hosts: gpus
+#  gather_facts: false
+#  roles:
+#    - compute_gpu
 
 - name: Apply K8s manager config
   hosts: manager

+ 0 - 35
roles/common/handlers/main.yml

@@ -1,35 +0,0 @@
----
-
-- name: Start and Enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  #tags: install
-
-- name: Start and Enable Kubernetes - kubelet
-  service:
-    name: kubelet
-    state: started
-    enabled: yes
-  #tags: install
-
-- name: Restart chrony
-  service:
-    name: chronyd
-    state: restarted
-    enabled: yes
-
-- name: Sync tp clocks
-  command: ntpdc -np
-  register: ntp_clock
-  until:  ntp_clock.stdout.find('*') > -1
-  retries: "{{ retry_count_one }}"
-  delay: "{{ delay_count_one }}"
-
-- name: Sync chrony sources
-  command: chronyc sources
-  register: chrony_src
-  until:  chrony_src.stdout.find('^*') > -1
-  retries: "{{ retry_count }}"
-  delay: "{{ delay_count }}"

+ 0 - 56
roles/common/tasks/ntp.yml

@@ -1,56 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
----
-
-#- name: Deploy ntp servers
-#block:
-#- name: Deploy ntpd
-#package:
-#name: ntp
-#state: present
-#- name: Deploy ntpdate
-#package:
-#name: ntpdate
-#state: present
-#- name: Update ntp servers
-#template:
-#src: ntp.conf.j2
-#dest: "{{ ntp_path }}"
-#owner: root
-#group: root
-#mode: "{{ ntp_mode }}"
-          #backup: yes
-          #notify:
-          #- restart ntpd
-            #- sync ntp clocks
-            #when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  < os_higher_version
-
-  - name: Deploy chrony server
-    block:
-      - name: Deploy chrony
-        package:
-            name: chrony
-            state: present
-      - name: Update ntp servers
-        template:
-          src: chrony.conf.j2
-          dest: "{{ chrony_path }}"
-          owner: root
-          group: root
-          mode: "{{ ntp_mode }}"
-          backup: yes
-        notify:
-          - restart chrony
-          - sync chrony sources
-    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  > os_version

+ 0 - 42
roles/common/templates/chrony.conf.j2

@@ -1,42 +0,0 @@
-# Use public servers from the pool.ntp.org project.
-# Please consider joining the pool (http://www.pool.ntp.org/join.html).
-{% for item in chrony_servers %}
-pool {{ item }} iburst
-{% endfor %}
-
-
-# Record the rate at which the system clock gains/losses time.
-driftfile /var/lib/chrony/drift
-
-# Allow the system clock to be stepped in the first three updates
-# if its offset is larger than 1 second.
-makestep 1.0 3
-
-# Enable kernel synchronization of the real-time clock (RTC).
-rtcsync
-
-# Enable hardware timestamping on all interfaces that support it.
-#hwtimestamp *
-
-# Increase the minimum number of selectable sources required to adjust
-# the system clock.
-#minsources 2
-
-# Allow NTP client access from local network.
-#allow 192.168.0.0/16
-
-# Serve time even if not synchronized to a time source.
-#local stratum 10
-
-# Specify file containing keys for NTP authentication.
-keyfile /etc/chrony.keys
-
-# Get TAI-UTC offset and leap seconds from the system tz database.
-leapsectz right/UTC
-
-# Specify directory for log files.
-logdir /var/log/chrony
-
-# Select which information is logged.
-#log measurements statistics tracking
-

+ 0 - 16
roles/common/templates/ntp.conf.j2

@@ -1,16 +0,0 @@
-driftfile /var/lib/ntp/drift
-
-restrict default nomodify notrap nopeer noquery
-
-restrict 127.0.0.1
-restrict ::1
-
-{% for item in ntp_servers %}
-server  {{ item }} iburst
-{% endfor %}
-
-includefile /etc/ntp/crypto/pw
-
-keys /etc/ntp/keys
-
-

+ 2 - 2
roles/compute_gpu/tasks/main.yml

@@ -28,7 +28,7 @@
 - name: Install nvidia driver and nvidia-docker2
   package:
     name: "{{ nvidia_packages }}"
-    enablerepo: libnvidia-container,nvidia-docker
+    enablerepo: libnvidia-container, nvidia-docker
     state: present
   tags: install
 
@@ -58,4 +58,4 @@
     name: kubelet
     state: restarted
     enabled: yes
-  tags: install
+  tags: install

roles/common/files/k8s.conf → roles/k8s_common/files/k8s.conf


roles/common/files/kubernetes.repo → roles/k8s_common/files/kubernetes.repo


+ 28 - 0
roles/k8s_common/handlers/main.yml

@@ -0,0 +1,28 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Start and Enable docker service
+  service:
+    name: docker
+    state: restarted
+    enabled: yes
+  #tags: install
+
+- name: Start and Enable Kubernetes - kubelet
+  service:
+    name: kubelet
+    state: started
+    enabled: yes
+  #tags: install

+ 1 - 5
roles/common/tasks/main.yml

@@ -102,8 +102,4 @@
   service:
     name: kubelet
     state: restarted
-    enabled: yes
-
-- name: Deploy time ntp/chrony
-  include_tasks: ntp.yml
-  tags: install
+    enabled: yes

+ 1 - 19
roles/common/vars/main.yml

@@ -22,7 +22,6 @@ common_packages:
   - docker-ce
   - bash-completion
   - nvidia-detect
-  - chrony
 
 k8s_packages:
   - kubelet-1.16.7
@@ -43,21 +42,4 @@ k8s_conf_dest: /etc/sysctl.d/
 
 k8s_repo_file_mode: 0644
 
-k8s_conf_file_mode: 0644
-
-chrony_path: "/etc/chrony.conf"
-ntp_path: "/etc/ntp.conf"
-ntp_mode: "0644"
-os_higher_version: "8"
-os_version: "7"
-retry_count_one: "10"
-delay_count_one: "60"
-retry_count: "6"
-delay_count: "10"
-
-ntp_servers: 
-  - 0.centos.pool.ntp.org
-  - 1.centos.pool.ntp.org
-  - 2.centos.pool.ntp.org
-chrony_servers:
-  - 2.centos.pool.ntp.org
+k8s_conf_file_mode: 0644

+ 2 - 2
roles/firewalld/tasks/main.yml

@@ -40,7 +40,7 @@
     port: "{{ item }}/tcp"
     permanent: yes
     state: enabled
-  with_items: '{{ k8s_worker_ports }}'
+  with_items: '{{ k8s_compute_ports }}'
   when: "'compute' in group_names"
   tags: firewalld
 
@@ -81,4 +81,4 @@
     name: firewalld
     state: stopped
     enabled: no
-  tags: firewalld
+  tags: firewalld

+ 1 - 2
roles/firewalld/vars/main.yml

@@ -25,7 +25,7 @@ k8s_master_ports:
   - 10252
 
 # Worker nodes firewall ports
-k8s_worker_ports:
+k8s_compute_ports:
   - 10250
   - 30000-32767
 
@@ -35,7 +35,6 @@ calico_udp_ports:
 calico_tcp_ports:
   - 5473
   - 179
-  - 5473
 
 # Flannel CNI firewall ports
 flannel_udp_ports:

roles/manager/tasks/main.yml → roles/k8s_manager/tasks/main.yml


roles/manager/vars/main.yml → roles/k8s_manager/vars/main.yml


+ 40 - 0
roles/k8s_nfs_client_setup/tasks/main.yml

@@ -0,0 +1,40 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Install nfs-utils
+  package:
+    name: nfs-utils
+    state: present
+  tags: nfs_client
+
+- name: Creating directory to mount NFS Share
+  file:
+    path: "{{ nfs_mnt_dir }}"
+    state: directory
+    mode: "{{ nfs_mnt_dir_mode }}"
+  tags: nfs_client
+
+- name: Mounting NFS Share
+  command: "mount {{ groups['manager'] }}:{{ nfs_mnt_dir }} {{ nfs_mnt_dir }}"
+  changed_when: true
+  args:
+    warn: false
+  tags: nfs_client
+
+- name: Configuring Automount NFS Shares on reboot
+  lineinfile:
+    path: "{{ fstab_file_path }}"
+    line: "{{ groups['manager'] }}:{{ nfs_mnt_dir }}     {{ nfs_mnt_dir }}  nfs     nosuid,rw,sync,hard,intr 0 0"
+  tags: nfs_client

+ 20 - 0
roles/k8s_nfs_client_setup/vars/main.yml

@@ -0,0 +1,20 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+nfs_mnt_dir: /home/k8snfs
+
+nfs_mnt_dir_mode: 0755
+
+fstab_file_path: /etc/fstab

+ 84 - 0
roles/k8s_nfs_server_setup/tasks/main.yml

@@ -0,0 +1,84 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Install nfs-utils
+  package:
+    name: nfs-utils
+    state: present
+  tags: nfs_server
+
+- name: Install firewalld
+  package:
+    name: firewalld
+    state: present
+  tags: firewalld
+
+- name: Start and enable firewalld
+  service:
+    name: firewalld
+    state: started
+    enabled: yes
+  tags: firewalld
+
+- name: Start and enable rpcbind and nfs-server service
+  service:
+    name: "{{ item }}"
+    state: restarted
+    enabled: yes
+  with_items:
+    - rpcbind
+    - nfs-server
+  tags: nfs_server
+
+- name: Creating NFS share directory
+  file:
+    path: "{{ nfs_share_dir }}"
+    state: directory
+    mode: "{{ nfs_share_dir_mode }}"
+  tags: nfs_server
+
+- name: Adding NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ nfs_share_dir }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['compute'] }}"
+  tags: nfs_server
+
+- name: Exporting the shared directories
+  command: exportfs -r
+  changed_when: true
+  tags: nfs_server
+
+- name: Configuring firewall
+  firewalld:
+    service: "{{ item }}"
+    permanent: true
+    state: enabled
+  with_items:
+    - "{{ nfs_services }}"
+  tags: nfs_server
+
+- name: Reload firewalld
+  command: firewall-cmd --reload
+  changed_when: true
+  tags: nfs_server
+
+- name: Stop and disable firewalld
+  service:
+    name: firewalld
+    state: stopped
+    enabled: no
+  tags: firewalld

+ 25 - 0
roles/k8s_nfs_server_setup/vars/main.yml

@@ -0,0 +1,25 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+nfs_share_dir: /home/k8snfs
+
+nfs_share_dir_mode: 0777
+
+exports_file_path: /etc/exports
+
+nfs_services:
+  - mountd
+  - rpc-bind
+  - nfs

roles/startmanager/files/create_admin_user.yaml → roles/k8s_start_manager/files/create_admin_user.yaml


roles/startmanager/files/create_clusterRoleBinding.yaml → roles/k8s_start_manager/files/create_clusterRoleBinding.yaml


+ 0 - 0
roles/startmanager/files/data-pv.yaml


+ 0 - 0
roles/startmanager/files/data2-pv.yaml


+ 0 - 0
roles/startmanager/files/data3-pv.yaml


+ 0 - 0
roles/startmanager/files/data4-pv.yaml


+ 0 - 0
roles/startmanager/files/flannel_net.sh


+ 0 - 0
roles/startmanager/files/katib-pv.yaml


roles/startmanager/files/kube-flannel.yaml → roles/k8s_start_manager/files/kube-flannel.yaml


roles/startmanager/files/kubeflow_persistent_volumes.yaml → roles/k8s_start_manager/files/kubeflow_persistent_volumes.yaml


+ 0 - 0
roles/startmanager/files/minio-pvc.yaml


+ 0 - 0
roles/startmanager/files/mysql-pv.yaml


roles/startmanager/files/nfs-class.yaml → roles/k8s_start_manager/files/nfs-class.yaml


roles/startmanager/files/nfs-deployment.yaml → roles/k8s_start_manager/files/nfs-deployment.yaml


roles/startmanager/files/nfs-serviceaccount.yaml → roles/k8s_start_manager/files/nfs-serviceaccount.yaml


roles/startmanager/files/nfs_clusterrole.yaml → roles/k8s_start_manager/files/nfs_clusterrole.yaml


roles/startmanager/files/nfs_clusterrolebinding.yaml → roles/k8s_start_manager/files/nfs_clusterrolebinding.yaml


+ 0 - 0
roles/startmanager/files/notebook-pv.yaml


+ 0 - 0
roles/startmanager/files/persistent_volumes.yaml


roles/startmanager/files/pvc.yaml → roles/k8s_start_manager/files/pvc.yaml


+ 0 - 0
roles/startmanager/files/tiller_config.sh


+ 1 - 1
roles/startmanager/tasks/main.yml

@@ -149,4 +149,4 @@
 - name: Edge / Workstation Install allows pods to scheudle on manager
   command: kubectl taint nodes --all node-role.kubernetes.io/master-
   when: single_node
-  tags: init
+  tags: init

+ 1 - 1
roles/startmanager/vars/main.yml

@@ -47,4 +47,4 @@ k8s_clusterRoleBinding_file_mode: 0655
 
 calico_yml_url: https://docs.projectcalico.org/manifests/calico.yaml
 
-flannel_yml_url: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+flannel_yml_url: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

roles/startservices/files/metal-config.yaml → roles/k8s_start_services/files/metal-config.yaml


roles/startservices/files/metallb.yaml → roles/k8s_start_services/files/metallb.yaml


roles/startservices/tasks/main.yml → roles/k8s_start_services/tasks/main.yml


+ 2 - 3
roles/startservices/vars/main.yml

@@ -27,8 +27,7 @@ k8s_dashboard_yaml_url: https://raw.githubusercontent.com/kubernetes/dashboard/v
 
 helm_stable_repo_url: https://charts.helm.sh/stable
 
-#nfs_server: "{{ ansible_host }}"
-nfs_server: 10.0.0.1
+nfs_server: {{ ansible_host }}
 
 nfs_path: /home/k8snfs
 
@@ -44,4 +43,4 @@ mig_strategy: none
 
 gpu_feature_discovery_version: 0.2.0
 
-fpga_device_plugin_yaml_url: https://raw.githubusercontent.com/Xilinx/FPGA_as_a_Service/master/k8s-fpga-device-plugin/fpga-device-plugin.yml
+fpga_device_plugin_yaml_url: https://raw.githubusercontent.com/Xilinx/FPGA_as_a_Service/master/k8s-fpga-device-plugin/fpga-device-plugin.yml

roles/startworkers/tasks/main.yml → roles/k8s_start_workers/tasks/main.yml


roles/startworkers/vars/main.yml → roles/k8s_start_workers/vars/main.yml


+ 0 - 3
roles/manager/files/k8s.conf

@@ -1,3 +0,0 @@
-net.bridge.bridge-nf-call-ip6tables = 1
-net.bridge.bridge-nf-call-iptables = 1
-

+ 0 - 8
roles/manager/files/kubernetes.repo

@@ -1,8 +0,0 @@
-[kubernetes]
-name=Kubernetes
-baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
-enabled=1
-gpgcheck=1
-repo_gpgcheck=1
-gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-

+ 156 - 0
test/test_compute_gpu.yml

@@ -0,0 +1,156 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_003
+# Execute compute_gpu role in compute nodes with os installed centos 7.8
+- name: OMNIA_UKP_US_VFKP_TC_003
+  hosts: gpus
+  vars_files:
+    - test_vars/test_compute_gpu_vars.yml
+  tasks:
+    - block:
+        - name: Call compute_gpu role
+          include_role:
+            name: ../roles/compute_gpu
+      tags: TC_003
+
+    - name: Checking Nvidia drivers installation
+      command: whereis nvidia
+      register: nvidia_drivers
+      changed_when: false
+      ignore_errors: True
+      tags: TC_003, VERIFY_003
+
+    - name: Checking nvidia-docker2 installation status
+      command: nvidia-docker version
+      register: nvidia_docker_version
+      changed_when: false
+      ignore_errors: True
+      tags: TC_003, VERIFY_003
+
+    - name: Checking docker service status
+      systemd:
+        name: docker
+      register: docker_service
+      tags: TC_003, VERIFY_003
+
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: TC_003, VERIFY_003
+
+    - name: Validating Nvidia drivers
+      assert:
+        that:
+          - "'/usr/lib/nvidia' in nvidia_drivers.stdout"
+        fail_msg: "{{ nvidia_drivers_fail_msg }}"
+        success_msg: "{{ nvidia_drivers_success_msg }}"
+      tags: TC_003, VERIFY_003
+
+    - name: Validating nvidia-docker2 installation
+      assert:
+        that:
+          - "'command not found' not in nvidia_docker_version.stdout"
+        fail_msg: "{{ nvidia_docker_fail_msg }}"
+        success_msg: "{{ nvidia_docker_success_msg }}"
+      tags: TC_003, VERIFY_003
+
+    - name: Validating docker service status
+      assert:
+        that:
+          - docker_service.status.ActiveState == 'active'
+        fail_msg: "{{ docker_service_fail_msg }}"
+        success_msg: "{{ docker_service_success_msg }}"
+      tags: TC_003, VERIFY_003
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: TC_003, VERIFY_003
+
+# OMNIA_UKP_US_VFKP_TC_004
+# Execute compute_gpu role in compute nodes with NVIDIA kmod/docker drivers already installed
+- name: OMNIA_UKP_US_VFKP_TC_004
+  hosts: gpus
+  vars_files:
+    - test_vars/test_compute_gpu_vars.yml
+  tasks:
+    - block:
+        - name: Call compute_gpu role
+          include_role:
+            name: ../roles/compute_gpu
+      tags: TC_004
+
+    - name: Checking Nvidia drivers installation
+      command: whereis nvidia
+      register: nvidia_drivers
+      changed_when: false
+      ignore_errors: True
+      tags: TC_004, VERIFY_004
+
+    - name: Checking nvidia-docker2 installation status
+      command: nvidia-docker version
+      register: nvidia_docker_version
+      changed_when: false
+      ignore_errors: True
+      tags: TC_004, VERIFY_004
+
+    - name: Checking docker service status
+      systemd:
+        name: docker
+      register: docker_service
+      tags: TC_004, VERIFY_0042
+
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: TC_004, VERIFY_004
+
+    - name: Validating Nvidia drivers
+      assert:
+        that:
+          - "'/usr/lib/nvidia' in nvidia_drivers.stdout"
+        fail_msg: "{{ nvidia_drivers_fail_msg }}"
+        success_msg: "{{ nvidia_drivers_success_msg }}"
+      tags: TC_004, VERIFY_004
+
+    - name: Validating nvidia-docker2 installation
+      assert:
+        that:
+          - "'command not found' not in nvidia_docker_version.stdout"
+        fail_msg: "{{ nvidia_docker_fail_msg }}"
+        success_msg: "{{ nvidia_docker_success_msg }}"
+      tags: TC_004, VERIFY_004
+
+    - name: Validating docker service status
+      assert:
+        that:
+          - docker_service.status.ActiveState == 'active'
+        fail_msg: "{{ docker_service_fail_msg }}"
+        success_msg: "{{ docker_service_success_msg }}"
+      tags: TC_004, VERIFY_004
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: TC_004, VERIFY_004

+ 113 - 0
test/test_jupyterhub.yml

@@ -0,0 +1,113 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_014
+# Execute jupyterhub role in manager nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_014
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_jupyterhub_vars.yml
+  tasks:
+    - block:
+        - name: Call jupyterhub role
+          include_role:
+            name: ../platforms/roles/jupyterhub
+      tags: TC_014
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 5
+      tags: TC_014
+
+    - name: Checking all running pods under default namespace
+      command: kubectl get pods --namespace default --field-selector=status.phase=Running
+      register: namesapce_default_running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_014, VERIFY_014
+
+    - name: Checking K8s services
+      command: kubectl get services
+      register: k8s_services
+      changed_when: false
+      ignore_errors: True
+      tags: TC_014, VERIFY_014
+
+    - name: Validating JupyterHub pods
+      assert:
+        that:
+          - "'hub' in namesapce_default_running_pods.stdout"
+          - "'proxy' in namesapce_default_running_pods.stdout"
+        fail_msg: "{{ jupyterhub_pods_fail_msg }}"
+        success_msg: "{{ jupyterhub_pods_success_msg }}"
+      tags: TC_014, VERIFY_014
+
+    - name: Validating JupyterHub services
+      assert:
+        that:
+          - "'hub' in k8s_services.stdout"
+          - "'proxy-public' in k8s_services.stdout"
+          - "'proxy-api' in k8s_services.stdout"
+        fail_msg: "{{ jupyterhub_services_fail_msg }}"
+        success_msg: "{{ jupyterhub_services_success_msg }}"
+      tags: TC_014, VERIFY_014
+
+# OMNIA_UKP_US_VFKP_TC_015
+# Execute jupyterhub role in manager nodes with JupyterHub already deployed
+- name: OMNIA_UKP_US_VFKP_TC_015
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_jupyterhub_vars.yml
+  tasks:
+    - block:
+        - name: Call jupyterhub role
+          include_role:
+            name: ../platforms/roles/jupyterhub
+      tags: TC_015, VERIFY_015
+
+    - name: Checking all running pods under default namespace
+      command: kubectl get pods --namespace default --field-selector=status.phase=Running
+      register: namesapce_default_running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_015, VERIFY_015
+
+    - name: Checking K8s services
+      command: kubectl get services
+      register: k8s_services
+      changed_when: false
+      ignore_errors: True
+      tags: TC_015, VERIFY_015
+
+    - name: Validating JupyterHub pods
+      assert:
+        that:
+          - "'hub' in namesapce_default_running_pods.stdout"
+          - "'proxy' in namesapce_default_running_pods.stdout"
+        fail_msg: "{{ jupyterhub_pods_fail_msg }}"
+        success_msg: "{{ jupyterhub_pods_success_msg }}"
+      tags: TC_015, VERIFY_015
+
+    - name: Validating JupyterHub services
+      assert:
+        that:
+          - "'hub' in k8s_services.stdout"
+          - "'proxy-public' in k8s_services.stdout"
+          - "'proxy-api' in k8s_services.stdout"
+        fail_msg: "{{ jupyterhub_services_fail_msg }}"
+        success_msg: "{{ jupyterhub_services_success_msg }}"
+      tags: TC_015, VERIFY_015

+ 177 - 0
test/test_k8s_common.yml

@@ -0,0 +1,177 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_UKP_US_VFKP_TC_001
+# Execute k8s_common role in manager & compute nodes with os installed centos 7.8
+- name: OMNIA_UKP_US_VFKP_TC_001
+  hosts: manager, compute
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/k8s_common
+      tags: TC_001
+
+    - name: Checking common packages installation status
+      command: "'{{ item }}' --version"
+      with_items:
+        - "{{ common_packages }}"
+      register: common_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_001, VERIFY_001
+
+    - name: Checking K8s packages installation status
+      command: "'{{ item }}' version"
+      with_items:
+        - "{{ k8_packages }}"
+      register: k8s_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_001, VERIFY_001
+
+    - name: Checking nfs-utils and versionlock status
+      command: "{{ item }}"
+      with_items:
+        - "yum versionlock"
+        - "nfsstat"
+      register: nfs_versionlock_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_001, VERIFY_001
+
+    - name: Checking docker service status
+      systemd:
+        name: docker
+      register: docker_service
+      tags: TC_001, VERIFY_001
+
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: TC_001, VERIFY_001
+
+    - name: Validate common & K8s packages status
+      assert:
+        that:
+          - "'command not found' not in {{ item }}"
+        fail_msg: "{{ packages_status_fail_msg }}"
+        success_msg: "{{ packages_status_success_msg }}"
+      with_items:
+        - "{{ common_packages_status.results }}"
+        - "{{ k8s_packages_status.results }}"
+        - "{{ nfs_versionlock_status.results }}"
+      tags: TC_001, VERIFY_001
+
+    - name: Validating docker service status
+      assert:
+        that:
+          - docker_service.status.ActiveState == 'active'
+        fail_msg: "{{ docker_service_fail_msg }}"
+        success_msg: "{{ docker_service_success_msg }}"
+      tags: TC_001, VERIFY_001
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: TC_001, VERIFY_001
+
+# Testcase OMNIA_UKP_US_VFKP_TC_002
+# Execute k8s_common role in manager & compute nodes with common and K8s packages already installed
+- name: OMNIA_UKP_US_VFKP_TC_002
+  hosts: manager, compute
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/k8s_common
+      tags: TC_002, VERIFY_002
+
+    - name: Checking common packages installation status
+      command: "'{{ item }}' --version"
+      with_items:
+        - "{{ common_packages }}"
+      register: common_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_002, VERIFY_002
+
+    - name: Checking K8s packages installation status
+      command: "'{{ item }}' version"
+      with_items:
+        - "{{ k8_packages }}"
+      register: k8s_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_002, VERIFY_002
+
+    - name: Checking nfs-utils and versionlock status
+      command: "{{ item }}"
+      with_items:
+        - "yum versionlock"
+        - "nfsstat"
+      register: nfs_versionlock_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_002, VERIFY_002
+
+    - name: Checking docker service status
+      systemd:
+        name: docker
+      register: docker_service
+      tags: TC_002, VERIFY_002
+
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: TC_002, VERIFY_002
+
+    - name: Validate common & K8s packages status
+      assert:
+        that:
+          - "'command not found' not in {{ item }}"
+        fail_msg: "{{ packages_status_fail_msg }}"
+        success_msg: "{{ packages_status_success_msg }}"
+        quiet: true
+      with_items:
+        - "{{ common_packages_status.results }}"
+        - "{{ k8s_packages_status.results }}"
+        - "{{ nfs_versionlock_status.results }}"
+      tags: TC_002, VERIFY_002
+
+    - name: Validating docker service status
+      assert:
+        that:
+          - docker_service.status.ActiveState == 'active'
+        fail_msg: "{{ docker_service_fail_msg }}"
+        success_msg: "{{ docker_service_success_msg }}"
+      tags: TC_002, VERIFY_002
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: TC_002, VERIFY_002

+ 226 - 0
test/test_k8s_firewalld.yml

@@ -0,0 +1,226 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_007
+# Execute k8s_firewalld role in manager and compute nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_007
+  hosts: manager, compute
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_firewalld_vars.yml
+    - ../roles/k8s_firewalld/vars/main.yml
+  tasks:
+    - block:
+        - name: Call k8s_firewalld role
+          include_role:
+            name: ../roles/k8s_firewalld
+      tags: TC_007
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+      tags: TC_007, VERIFY_007
+
+    - name: Checking firewalld open ports on manager node
+      command: firewall-cmd --list-ports
+      register: manager_firewalld_ports
+      when: "'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Checking firewalld open ports on compute node
+      command: firewall-cmd --list-ports
+      register: compute_firewalld_ports
+      when: "'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating K8s port on manager node
+      assert:
+        that:
+          - "'6443' in manager_firewalld_ports.stdout"
+          - "'2379-2380' in manager_firewalld_ports.stdout"
+          - "'10250' in manager_firewalld_ports.stdout"
+          - "'10251' in manager_firewalld_ports.stdout"
+          - "'10252' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ manager_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ manager_k8s_ports_status_success_msg }}"
+      when: "'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating K8s port on compute node
+      assert:
+        that:
+          - "'10250' in compute_firewalld_ports.stdout"
+          - "'30000-32767' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ compute_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ compute_k8s_ports_status_success_msg }}"
+      when: "'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Calico udp/tcp ports on manager nodes
+      assert:
+        that:
+          - "'4789' in manager_firewalld_ports.stdout"
+          - "'5473' in manager_firewalld_ports.stdout"
+          - "'179' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_manager_fail_msg }}"
+        success_msg: "{{ calico_ports_manager_success_msg }}"
+      when: "k8s_cni == 'calico' and 'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Calico udp/tcp ports on compute nodes
+      assert:
+        that:
+          - "'4789' in compute_firewalld_ports.stdout"
+          - "'5473' in compute_firewalld_ports.stdout"
+          - "'179' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_compute_fail_msg }}"
+        success_msg: "{{ calico_ports_compute_success_msg }}"
+      when: "k8s_cni == 'calico' and 'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Flannel ports on manager nodes
+      assert:
+        that:
+          - "'8285' in manager_firewalld_ports.stdout"
+          - "'8472' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_manager_fail_msg }}"
+        success_msg: "{{ flannel_ports_manager_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Flannel ports on compute nodes
+      assert:
+        that:
+          - "'8285' in compute_firewalld_ports.stdout"
+          - "'8472' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_compute_fail_msg }}"
+        success_msg: "{{ flannel_ports_compute_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+      tags: TC_007, VERIFY_007
+
+# OMNIA_UKP_US_VFKP_TC_008
+# Execute k8s_firewalld role in manager and compute nodes with K8s ports already opened
+- name: OMNIA_UKP_US_VFKP_TC_008
+  hosts: manager, compute
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_firewalld_vars.yml
+    - ../roles/k8s_firewalld/vars/main.yml
+  tasks:
+    - block:
+        - name: Call k8s_firewalld role
+          include_role:
+            name: ../roles/k8s_firewalld
+      tags: TC_008
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+      tags: TC_008, VERIFY_008
+
+    - name: Checking firewalld open ports on manager node
+      command: firewall-cmd --list-ports
+      register: manager_firewalld_ports
+      when: "'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Checking firewalld open ports on compute node
+      command: firewall-cmd --list-ports
+      register: compute_firewalld_ports
+      when: "'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating K8s port on manager node
+      assert:
+        that:
+          - "'6443' in manager_firewalld_ports.stdout"
+          - "'2379-2380' in manager_firewalld_ports.stdout"
+          - "'10250' in manager_firewalld_ports.stdout"
+          - "'10251' in manager_firewalld_ports.stdout"
+          - "'10252' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ manager_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ manager_k8s_ports_status_success_msg }}"
+      when: "'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating K8s port on compute node
+      assert:
+        that:
+          - "'10250' in compute_firewalld_ports.stdout"
+          - "'30000-32767' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ compute_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ compute_k8s_ports_status_success_msg }}"
+      when: "'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Calico udp/tcp ports on manager nodes
+      assert:
+        that:
+          - "'4789' in manager_firewalld_ports.stdout"
+          - "'5473' in manager_firewalld_ports.stdout"
+          - "'179' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_manager_fail_msg }}"
+        success_msg: "{{ calico_ports_manager_success_msg }}"
+      when: "k8s_cni == 'calico' and 'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Calico udp/tcp ports on compute nodes
+      assert:
+        that:
+          - "'4789' in compute_firewalld_ports.stdout"
+          - "'5473' in compute_firewalld_ports.stdout"
+          - "'179' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_compute_fail_msg }}"
+        success_msg: "{{ calico_ports_compute_success_msg }}"
+      when: "k8s_cni == 'calico' and 'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Flannel ports on manager nodes
+      assert:
+        that:
+          - "'8285' in manager_firewalld_ports.stdout"
+          - "'8472' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_manager_fail_msg }}"
+        success_msg: "{{ flannel_ports_manager_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Flannel ports on compute nodes
+      assert:
+        that:
+          - "'8285' in compute_firewalld_ports.stdout"
+          - "'8472' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_compute_fail_msg }}"
+        success_msg: "{{ flannel_ports_compute_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+      tags: TC_008, VERIFY_008

+ 70 - 0
test/test_k8s_manager.yml

@@ -0,0 +1,70 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_005
+# Execute k8s_manager role in manager nodes with os installed centos 7.8
+- name: OMNIA_UKP_US_VFKP_TC_005
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_manager_vars.yml
+  tasks:
+    - block:
+        - name: Call manager role
+          include_role:
+            name: ../roles/k8s_manager
+      tags: TC_005
+
+    - name: Checking helm installation status
+      command: helm version
+      register: helm_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_005, VERIFY_005
+
+    - name: Validating helm installation status
+      assert:
+        that:
+          - "'version.BuildInfo' in helm_status.stdout"
+        fail_msg: "{{ helm_status_fail_msg }}"
+        success_msg: "{{ helm_status_success_msg }}"
+      tags: TC_005, VERIFY_005
+
+# OMNIA_UKP_US_VFKP_TC_006
+# Execute k8s_manager role in manager nodes with helm already installed
+- name: OMNIA_UKP_US_VFKP_TC_006
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_manager_vars.yml
+  tasks:
+    - block:
+        - name: Call manager role
+          include_role:
+            name: ../roles/k8s_manager
+      tags: TC_006, VERIFY_006
+
+    - name: Checking helm installation status
+      command: helm version
+      register: helm_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_006, VERIFY_006
+
+    - name: Validating helm installation status
+      assert:
+        that:
+          - "'command not found' not in helm_status.stdout"
+        fail_msg: "{{ helm_status_fail_msg }}"
+        success_msg: "{{ helm_status_success_msg }}"
+      tags: TC_006, VERIFY_006

+ 144 - 0
test/test_k8s_start_manager_workers.yml

@@ -0,0 +1,144 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_009, OMNIA_UKP_US_VFKP_TC_010
+# Execute k8s_start_manager role in manager nodes with os installed centos 7.8 and swap enabled
+- name: OMNIA_UKP_US_VFKP_TC_009, OMNIA_UKP_US_VFKP_TC_010
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_start_manager_workers_vars.yml
+  tasks:
+    - name: Enable Swap
+      command: /usr/sbin/swapon -a
+      changed_when: true
+      tags: TC_009, TC_010
+
+    - block:
+        - name: Call k8s_start_manager role
+          include_role:
+            name: ../roles/k8s_start_manager
+      tags: TC_009, TC_010
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 10
+      tags: TC_009, TC_010
+
+    - name: Checking master node
+      command: kubectl get nodes
+      register: master_node_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Checking kube-system pods
+      command: kubectl get pods --namespace kube-system --field-selector=status.phase=Running
+      register: kube_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Checking calico/flannel SDN network status
+      command: ip address
+      register: calico_flannel_status
+      changed_when: false
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Checking K8s service account and token
+      command: kubectl get secrets
+      register: service_account_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating master node status
+      assert:
+        that:
+          - "'master' in master_node_status.stdout"
+        fail_msg: "{{ master_node_status_fail_msg }}"
+        success_msg: "{{ master_node_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating controller-manager and scheduler and coreDNS pods status
+      assert:
+        that:
+          - "'kube-scheduler' in kube_system_pods.stdout"
+          - "'kube-controller' in kube_system_pods.stdout"
+        fail_msg: "{{ controller_scheduler_status_fail_msg }}"
+        success_msg: "{{ controller_scheduler_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating coreDNS pods status
+      assert:
+        that:
+          - "'coredns' in kube_system_pods.stdout"
+        fail_msg: "{{ coredns_status_fail_msg }}"
+        success_msg: "{{ coredns_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating calico/flannel SDN network status
+      assert:
+        that:
+          - "'calico' in kube_system_pods.stdout or 'flannel' in kube_system_pods.stdout"
+        fail_msg: "{{ calico_flannel_status_fail_msg }}"
+        success_msg: "{{ calico_flannel_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating K8s service account and token status
+      assert:
+        that:
+          - "'kubernetes.io/service-account-token' in service_account_status.stdout"
+        fail_msg: "{{ k8s_service_account_status_fail_msg }}"
+        success_msg: "{{ k8s_service_account_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+# OMNIA_UKP_US_VFKP_TC_011, OMNIA_UKP_US_VFKP_TC_012
+# Execute k8s_start_workers role in compute nodes with os installed centos 7.8 and swap enabled
+- name: OMNIA_UKP_US_VFKP_TC_011, OMNIA_UKP_US_VFKP_TC_012
+  hosts: compute
+  gather_facts: false
+  tasks:
+    - name: Enable Swap
+      command: /usr/sbin/swapon -a
+      changed_when: true
+      tags: TC_011, TC_012
+
+    - block:
+        - name: Call k8s_start_workers role
+          include_role:
+            name: ../roles/k8s_start_workers.yml
+      tags: TC_011, TC_012
+
+- name: OMNIA_UKP_US_VFKP_TC_011, OMNIA_UKP_US_VFKP_TC_012
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_start_manager_workers_vars.yml
+  tasks:
+    - name: Check worker nodes status
+      command: kubectl get node --selector='!node-role.kubernetes.io/master'
+      register: worker_nodes_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_011, TC_012, VERIFY_011
+
+    - name: Validating worker nodes status
+      assert:
+        that:
+          - "'Ready' in worker_nodes_status.stdout"
+        fail_msg: "{{ worker_nodes_status_fail_msg }}"
+        success_msg: "{{ worker_nodes_status_success_msg }}"
+      tags: TC_011, TC_012, VERIFY_011

+ 97 - 0
test/test_k8s_start_services.yml

@@ -0,0 +1,97 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_013
+# Execute k8s_start_services role in manager nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_013
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_start_services_vars.yml
+  tasks:
+    - block:
+        - name: Call k8s_start_services role
+          include_role:
+            name: ../roles/k8s_start_services
+      tags: TC_013
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 10
+      tags: TC_013
+
+    - name: Checking all running pods
+      command: kubectl get pods --all-namespaces --field-selector=status.phase=Running
+      register: running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_013, VERIFY_013
+
+    - name: Checking default storage class
+      command: kubectl get sc
+      register: default_storage_class
+      changed_when: false
+      ignore_errors: True
+      tags: TC_013, VERIFY_013
+
+    - name: Validating Metallb, Prometheus and MPI pods
+      assert:
+        that:
+          - "'metallb' in running_pods.stdout"
+          - "'prometheus' in running_pods.stdout"
+          - "'mpi-operator' in running_pods.stdout"
+        fail_msg: "{{ metallb_prometheus_mpi_pods_fail_msg }}"
+        success_msg: "{{ metallb_prometheus_mpi_pods_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating K8s dashboard
+      assert:
+        that:
+          - "'kubernetes-dashboard' in running_pods.stdout"
+        fail_msg: "{{ kubernetes_dashboard_fail_msg }}"
+        success_msg: "{{ kubernetes_dashboard_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating NFS Client Provisioner pods
+      assert:
+        that:
+          - "'nfs-client-provisioner' in running_pods.stdout"
+        fail_msg: "{{ nfs_client_provisioner_pods_fail_msg }}"
+        success_msg: "{{ nfs_client_provisioner_pods_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating default storage class
+      assert:
+        that:
+          - "'nfs-client' in default_storage_class.stdout"
+        fail_msg: "{{ default_storage_class_fail_msg }}"
+        success_msg: "{{ default_storage_class_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating Node Feature Discovery pods
+      assert:
+        that:
+          - "'node-feature-discovery' in running_pods.stdout"
+        fail_msg: "{{ node_feature_discovery_pods_fail_msg }}"
+        success_msg: "{{ node_feature_discovery_pods_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating Nvidia device plugin pods
+      assert:
+        that:
+          - "'nvidia-device-plugin' in running_pods.stdout"
+        fail_msg: "{{ nvidia_device_plugin_pods_fail_msg }}"
+        success_msg: "{{ nvidia_device_plugin_pods_success_msg }}"
+      tags: TC_013, VERIFY_013

+ 50 - 0
test/test_k8s_start_workers.yml

@@ -0,0 +1,50 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_013, OMNIA_UKP_US_VFKP_TC_014
+# Execute startworkers role in compute nodes with os installed centos 7.8 and swap enabled
+- name: OMNIA_UKP_US_VFKP_TC_013, OMNIA_UKP_US_VFKP_TC_014
+  hosts: compute
+  vars_files:
+    - test_vars/test_k8s_start_workers_vars.yml
+  tasks:
+    - name: Enable Swap
+      command: /usr/sbin/swapon -a
+      changed_when: true
+      tags: TC_013, TC_014
+
+    - block:
+        - name: Call k8s_start_workers role
+          include_role:
+            name: ../roles/k8s_start_workers.yml
+      tags: TC_013, TC_014
+
+- name: OMNIA_UKP_US_VFKP_TC_013, OMNIA_UKP_US_VFKP_TC_014
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_start_workers_vars.yml
+  tasks:
+    - name: Check worker nodes status
+      command: kubectl get node --selector='!node-role.kubernetes.io/master'
+      register: worker_nodes_status
+      tags: TC_013, TC_014, VERIFY_013
+
+    - name: Validating worker nodes status
+      assert:
+        that:
+          - "'Ready' in worker_nodes_status.stdout"
+        fail_msg: "{{ worker_nodes_status_fail_msg }}"
+        success_msg: "{{ worker_nodes_status_success_msg }}"
+      tags: TC_013, TC_014, VERIFY_013

+ 123 - 0
test/test_kubeflow.yml

@@ -0,0 +1,123 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_016
+# Execute kubeflow role in manager nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_016
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_kubeflow_vars.yml
+  tasks:
+    - block:
+        - name: Call kubeflow role
+          include_role:
+            name: ../platforms/roles/kubeflow
+      tags: TC_016
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 5
+      tags: TC_016
+
+    - name: Checking installed Kubeflow version
+      command: kfctl version
+      register: kfctl_version
+      changed_when: false
+      ignore_errors: True
+      tags: TC_016, VERIFY_016
+
+    - name: Checking pods under kubeflow namespace
+      command: kubectl get pods --namespace kubeflow
+      register: kubeflow_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_016, VERIFY_016
+
+    - name: Checking pods under istio-system namespace
+      command: kubectl get pods --namespace istio-system
+      register: istio_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_016, VERIFY_016
+
+    - name: Validating Kubeflow Installation
+      assert:
+        that:
+          - "'command not found' not in kfctl_version.stdout"
+        fail_msg: "{{ kubeflow_install_fail_msg }}"
+        success_msg: "{{ kubeflow_install_success_msg }}"
+      tags: TC_016, VERIFY_016
+
+    - name: Validating Kubeflow pods deployment
+      assert:
+        that:
+          - "'Running' in kubeflow_pods.stdout or 'ContainerCreating' in kubeflow_pods.stdout"
+          - "'Running' in istio_system_pods.stdout or 'ContainerCreating' in istio_system_pods.stdout"
+        fail_msg: "{{ kubeflow_pods_deployment_fail_msg }}"
+        success_msg: "{{ kubeflow_pods_deployment_success_msg }}"
+      tags: TC_016, VERIFY_016
+
+# OMNIA_UKP_US_VFKP_TC_017
+# Execute kubeflow role in manager nodes with kubeflow already deployed
+- name: OMNIA_UKP_US_VFKP_TC_017
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_kubeflow_vars.yml
+  tasks:
+    - block:
+        - name: Call kubeflow role
+          include_role:
+            name: ../platforms/roles/kubeflow
+      tags: TC_017, VERIFY_017
+
+    - name: Checking installed Kubeflow version
+      command: kfctl version
+      register: kfctl_version
+      changed_when: false
+      ignore_errors: True
+      tags: TC_017, VERIFY_017
+
+    - name: Checking pods under kubeflow namespace
+      command: kubectl get pods --namespace kubeflow
+      register: kubeflow_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_017, VERIFY_017
+
+    - name: Checking pods under istio-system namespace
+      command: kubectl get pods --namespace istio-system
+      register: istio_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_017, VERIFY_017
+
+    - name: Validating Kubeflow Installation
+      assert:
+        that:
+          - "'command not found' not in kfctl_version.stdout"
+        fail_msg: "{{ kubeflow_install_fail_msg }}"
+        success_msg: "{{ kubeflow_install_success_msg }}"
+      tags: TC_017, VERIFY_017
+
+    - name: Validating Kubeflow pods deployment
+      assert:
+        that:
+          - "'Running' in kubeflow_pods.stdout or 'ContainerCreating' in kubeflow_pods.stdout"
+          - "'Running' in istio_system_pods.stdout or 'ContainerCreating' in istio_system_pods.stdout"
+        fail_msg: "{{ kubeflow_pods_deployment_fail_msg }}"
+        success_msg: "{{ kubeflow_pods_deployment_success_msg }}"
+      tags: TC_017, VERIFY_017

+ 30 - 0
test/test_vars/test_compute_gpu_vars.yml

@@ -0,0 +1,30 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+nvidia_drivers_fail_msg: "Nvidia drivers are not installed"
+
+nvidia_drivers_success_msg: "Nvidia drivers are installed"
+
+nvidia_docker_fail_msg: "nvidia-docker2 package is not installed"
+
+nvidia_docker_success_msg: "nvidia-docker2 package is installed"
+
+docker_service_fail_msg: "Docker service is not running"
+
+docker_service_success_msg: "Docker service is running"
+
+kubelet_service_fail_msg: "K8s service is not running"
+
+kubelet_service_success_msg: "K8s service is running"

+ 22 - 0
test/test_vars/test_jupyterhub_vars.yml

@@ -0,0 +1,22 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+jupyterhub_pods_fail_msg: "JupyterHub pods are not deployed"
+
+jupyterhub_pods_success_msg: "JupyterHub pods are deployed and running"
+
+jupyterhub_services_fail_msg: "JupyterHub services are not running"
+
+jupyterhub_services_success_msg: "JupyterHub services are running"

+ 38 - 0
test/test_vars/test_k8s_common_vars.yml

@@ -0,0 +1,38 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+common_packages:
+  - gcc
+  - python3
+  - pip3
+  - docker
+  - nvidia-detect
+  - kubelet
+
+k8_packages:
+  - kubeadm
+  - kubectl
+
+packages_status_success_msg: "Common & K8s packages are installed"
+
+packages_status_fail_msg: "Common & K8s packages are not installed"
+
+docker_service_fail_msg: "Docker service is not running"
+
+docker_service_success_msg: "Docker service is running"
+
+kubelet_service_fail_msg: "K8s service is not running"
+
+kubelet_service_success_msg: "K8s service is running"

+ 38 - 0
test/test_vars/test_k8s_firewalld_vars.yml

@@ -0,0 +1,38 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+manager_k8s_ports_status_fail_msg: "Kubernetes Ports are not opened in manager node"
+
+manager_k8s_ports_status_success_msg: "Kubernetes Ports are opened in manager node"
+
+compute_k8s_ports_status_fail_msg: "Kubernetes Ports are not opened in compute nodes"
+
+compute_k8s_ports_status_success_msg: "Kubernetes Ports are opened in compute nodes"
+
+calico_ports_manager_fail_msg: "Calico ports are not opened in manager nodes"
+
+calico_ports_manager_success_msg: "Calico ports are opened in manager nodes"
+
+calico_ports_compute_fail_msg: "Calico ports are not opened in compute nodes"
+
+calico_ports_compute_success_msg: "Calico ports are opened in compute nodes"
+
+flannel_ports_manager_fail_msg: "Flannel ports are not opened in manager nodes"
+
+flannel_ports_manager_success_msg: "Flannel ports are opened in manager nodes"
+
+flannel_ports_compute_fail_msg: "Flannel ports are not opened in compute nodes"
+
+flannel_ports_compute_success_msg: "Flannel ports are opened in compute nodes"

+ 17 - 0
test/test_vars/test_k8s_manager_vars.yml

@@ -0,0 +1,17 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+helm_status_fail_msg: "Helm is not installed"
+
+helm_status_success_msg: "Helm is installed"

+ 38 - 0
test/test_vars/test_k8s_start_manager_workers_vars.yml

@@ -0,0 +1,38 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+master_node_status_fail_msg: "Master Node is not configured"
+
+master_node_status_success_msg: "Master Node is configured and initialized successfully"
+
+controller_scheduler_status_fail_msg: "Static Pod manifests (controller-manager and scheduler) are not deployed"
+
+controller_scheduler_status_success_msg: "Static Pod manifests (controller-manager and scheduler) are deployed and running"
+
+coredns_status_fail_msg: "Core DNS pods are not deployed"
+
+coredns_status_success_msg: "Core DNS pods are deployed and running"
+
+calico_flannel_status_fail_msg: "Calico/Flannel SDN network is not deployed"
+
+calico_flannel_status_success_msg: "Calico/Flannel SDN network is deployed and running"
+
+k8s_service_account_status_fail_msg: "Kubernetes dashboard service account and token is not created"
+
+k8s_service_account_status_success_msg: "Kubernetes dashboard service account and token is created"
+
+worker_nodes_status_fail_msg: "Worker Nodes are not initialized"
+
+worker_nodes_status_success_msg: "Worker Nodes are initialized and joined to the cluster"

+ 38 - 0
test/test_vars/test_k8s_start_services_vars.yml

@@ -0,0 +1,38 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+metallb_prometheus_mpi_pods_fail_msg: "Metallb/Prometheus/MPI pods are not deployed/running"
+
+metallb_prometheus_mpi_pods_success_msg: "Metallb, Prometheus and MPI pods are deployed and running"
+
+kubernetes_dashboard_fail_msg: "Kubernetes Dashboard is not deployed"
+
+kubernetes_dashboard_success_msg: "Kubernetes Dashboard is deployed"
+
+nfs_client_provisioner_pods_fail_msg: "NFS Client Provisioner pod is not deployed"
+
+nfs_client_provisioner_pods_success_msg: "NFS Client Provisioner pod is deployed and running"
+
+node_feature_discovery_pods_fail_msg: "Node Feature Discovery pods are not deployed"
+
+node_feature_discovery_pods_success_msg: "Node Feature Discovery pods are deployed and running"
+
+nvidia_device_plugin_pods_fail_msg: "Nvidia Device Plugin pod is not deployed/running"
+
+nvidia_device_plugin_pods_success_msg: "Nvidia Device Plugin pod is deployed and running"
+
+default_storage_class_fail_msg: "NFS Client Provisioner is not configured as default storage class"
+
+default_storage_class_success_msg: "NFS Client Provisioner is configured as default storage class"

+ 22 - 0
test/test_vars/test_kubeflow_vars.yml

@@ -0,0 +1,22 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+kubeflow_install_fail_msg: "Kubeflow (kfctl) is not installed"
+
+kubeflow_install_success_msg: "Kubeflow (kfctl) is installed"
+
+kubeflow_pods_deployment_fail_msg: "Kubeflow pods are not deployed"
+
+kubeflow_pods_deployment_success_msg: "Kubeflow pods are deployed"