123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271 |
- # Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- ---
- - block:
- - name: Fetch Package info
- package_facts:
- manager: auto
-
- - name: Verify all packages are installed
- assert:
- that: "'{{ item }}' in ansible_facts.packages"
- success_msg: "{{ install_package_success_msg }}"
- fail_msg: "{{ install_package_fail_msg }}"
- when: "'python-docker' not in item"
- with_items: "{{ common_packages }}"
- ignore_errors: true
-
- - name: Check login_vars is encrypted
- command: cat {{ login_vars_filename }}
- changed_when: false
- register: config_content
-
- - name: Validate login file is encypted or not
- assert:
- that: "'$ANSIBLE_VAULT;' in config_content.stdout"
- fail_msg: "{{ login_vars_fail_msg }}"
- success_msg: "{{ login_vars_success_msg }}"
-
- # Installing a required package : JQ
- - name: Installing jq (JSON Query)
- package:
- name: "{{ test_package }}"
- state: present
-
- # Checking if all the required pods are working
- - name: Get pods info
- shell: kubectl get pods --all-namespaces
- register: all_pods_info
-
- - name: Check the count of pods
- set_fact:
- count: "{{ all_pods_info.stdout_lines|length - 1 }}"
-
- - name: Check if all the pods are running
- assert:
- that:
- - "'Running' in all_pods_info.stdout_lines[{{ item }}]"
- fail_msg: "{{ check_pods_fail_msg }}"
- success_msg: "{{ check_pods_success_msg }}"
- with_sequence: start=1 end={{ count }}
-
- # Checking if NFS Server is running and Custom ISO is created
- - name: Get NFS Stat
- shell: systemctl status nfs-idmapd
- register: nfstat_info
-
- - name: Verify NFS Stat is running
- assert:
- that:
- - "'Active: active (running)' in nfstat_info.stdout"
- success_msg: "{{ nfs_share_success_msg }}"
- fail_msg: "{{ nfs_share_fail_msg }}"
-
- - name: Check nfs mount point
- stat:
- path: "{{ nfs_mount_Path }}"
- register: nfs_mount_info
-
- - name: Verify nfs share is mounted
- assert:
- that:
- - "{{ nfs_mount_info.stat.exists }}"
- success_msg: "{{ nfs_mount_success_msg }}"
- fail_msg: "{{ nfs_mount_fail_msg }}"
-
- - name: Check Custom ISO
- stat:
- path: "{{ check_iso_path }}"
- register: check_iso_info
-
- - name: Verify Custom ISO is created in the NFS repo
- assert:
- that:
- - "{{ check_iso_info.stat.exists }}"
- success_msg: "{{ check_iso_success_msg }}"
- fail_msg: "{{ check_iso_fail_msg }}"
-
- # Checking if network-config container is running
-
- - name: Get Pod info for network-config
- shell: |
- crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id) \(.metadata.name) \(.state)"'
- register: network_config_pod_info
-
- - name: Get Pod Status for network-config
- assert:
- that:
- - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
- success_msg: "{{ network_config_pod_success_msg }}"
- fail_msg: "{{ network_config_pod_fail_msg }}"
-
- - name: Get Pod facts
- shell: |
- crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id)"'
- register: network_config_pod_fact
-
- - name: Parse container id for the pods
- set_fact:
- container_id: "{{ network_config_pod_fact.stdout[1:-1] }}"
-
- - name: Check dhcpd,xinetd service is running
- command: crictl exec {{ container_id }} systemctl is-active {{ item }}
- changed_when: false
- ignore_errors: yes
- register: pod_service_check
- with_items:
- - dhcpd
- - xinetd
-
- - name: Verify dhcpd, xinetd service is running
- assert:
- that:
- - "'active' in pod_service_check.results[{{ item }}].stdout"
- - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
- - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
- fail_msg: "{{ pod_service_check_fail_msg }}"
- success_msg: "{{ pod_service_check_success_msg }}"
- with_sequence: start=0 end={{ pod_service_check.results|length - 1 }}
-
- # Checking if cobbler-container is running
- - name: Get Pod info for cobbler
- shell: |
- crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id) \(.metadata.name) \(.state)"'
- register: network_config_pod_info
-
- - name: Get Pod Status for cobbler
- assert:
- that:
- - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
- success_msg: "{{ cobbler_pod_success_msg }}"
- fail_msg: "{{ cobbler_pod_fail_msg }}"
-
- - name: Get Pod facts for cobbler
- shell: |
- crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id)"'
- register: network_config_pod_fact
-
- - name: Extract cobbler pod id
- set_fact:
- cobbler_id: "{{ network_config_pod_fact.stdout[1:-1] }}"
-
- - name: Check tftp,dhcpd,xinetd,cobblerd service is running
- command: crictl exec {{ cobbler_id }} systemctl is-active {{ item }}
- changed_when: false
- ignore_errors: yes
- register: pod_service_check
- with_items:
- - dhcpd
- - tftp
- - xinetd
- - cobblerd
-
- - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
- assert:
- that:
- - "'active' in pod_service_check.results[{{ item }}].stdout"
- - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
- - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
- fail_msg: "{{pod_service_check_fail_msg}}"
- success_msg: "{{pod_service_check_success_msg}}"
- with_sequence: start=0 end=3
- # Checking Cron-Jobs
- - name: Check crontab list
- command: crictl exec {{ cobbler_id }} crontab -l
- changed_when: false
- register: crontab_list
- - name: Verify crontab list
- assert:
- that:
- - "'* * * * * /usr/bin/ansible-playbook /root/tftp.yml' in crontab_list.stdout"
- - "'*/5 * * * * /usr/bin/ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
- fail_msg: "{{cron_jobs_fail_msg}}"
- success_msg: "{{cron_jobs_success_msg}}"
- # Checking subnet-manger pod is running and open sm is running
- # Comment if infiniband is not connected
- - name: Fetch subnet-manager stats
- shell: kubectl get pods -n subnet-manager
- register: sm_manager_info
- - name: Verify subnet_manager container is running
- assert:
- that:
- - "'Running' in sm_manager_info.stdout_lines[1]"
- fail_msg: "{{subnet_manager_fail_msg}}"
- success_msg: "{{subnet_manager_success_msg}}"
- # Checking awx pod is running
- - name: Get Pod info for awx
- shell: |
- crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "awx") | "\(.id) \(.metadata.name) \(.state)"'
- register: awx_config_pod_info
-
- - name: Get Pod Status for awx
- assert:
- that:
- - network_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
- success_msg: "{{ awx_pod_success_msg }}"
- fail_msg: "{{ awx_pod_fail_msg }}"
- ignore_errors: yes
- with_sequence: start=0 end={{ network_config_pod_info.stdout_lines |length - 1 }}
-
- - name: Get pvc stats
- shell: |
- kubectl get pvc -n awx -o json |jq '.items[] | "\(.status.phase)"'
- register: pvc_stats_info
-
- - name: Verify if pvc stats is running
- assert:
- that:
- - "'Bound' in pvc_stats_info.stdout"
- fail_msg: "{{ pvc_stat_fail_msg }}"
- success_msg: "{{ pvc_stat_success_msg }}"
- with_sequence: start=0 end={{ pvc_stats_info.stdout_lines |length|int - 1 }}
-
- - name: Get svc stats
- shell: kubectl get svc -n awx awx-service -o json
- register: svc_stats_info
-
- - name: Verify if svc is up and running
- assert:
- that:
- - "'Error from server (NotFound):' not in svc_stats_info.stdout"
- success_msg: "{{ svc_stat_success_msg }}"
- fail_msg: "{{ svc_stat_fail_msg }}"
-
- - name: Fetch Cluster IP from svc
- shell: |
- kubectl get svc -n awx -o json | jq '.items[] | select(.metadata.name == "awx-service") | "\(.spec.clusterIP)"'
- register: cluster_ip_info
-
- - name: Check if connection to svc Cluster IP is enabled
- uri:
- url: http://{{ cluster_ip_info.stdout[1:-1] }}
- follow_redirects: none
- method: GET
- ignore_errors: yes
- register: cluster_ip_conn
-
- - name: Verify connection to svc cluster is working
- assert:
- that:
- - cluster_ip_conn.status == 200
- success_msg: "{{ svc_conn_success_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
- fail_msg: "{{ svc_conn_fail_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
|