#  Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
---  

- block:

    - name: Fetch Package info
      package_facts:
        manager: auto
      
    - name: Verify all packages are installed
      assert:
        that: "'{{ item }}' in ansible_facts.packages"
        success_msg: "{{ install_package_success_msg }}"
        fail_msg: "{{ install_package_fail_msg }}"
      when: "'python-docker' not in item"
      with_items: "{{ common_packages }}"
      ignore_errors: true
      
    - name: Check login_vars is encrypted
      command: cat {{ login_vars_filename }}
      changed_when: false
      register: config_content
       
    - name: Validate login file is encypted or not
      assert:
        that: "'$ANSIBLE_VAULT;' in config_content.stdout"
        fail_msg: "{{ login_vars_fail_msg }}"
        success_msg: "{{ login_vars_success_msg }}"
            
#  Installing a required package : JQ      
    - name: Installing jq (JSON Query)
      package:
        name: "{{ test_package }}"
        state: present
           
#  Checking if all the required pods are working
    - name: Get pods info
      shell: kubectl get pods --all-namespaces
      register: all_pods_info
          
    - name: Check the count of pods
      set_fact:
         count: "{{ all_pods_info.stdout_lines|length - 1 }}"
          
    - name: Check if all the pods are running
      assert:
        that:
          - "'Running' in all_pods_info.stdout_lines[{{ item }}]"
        fail_msg: "{{ check_pods_fail_msg }}"
        success_msg: "{{ check_pods_success_msg }}"
      with_sequence: start=1 end={{ count }}
      
#  Checking if NFS Server is running and Custom ISO is created
    - name: Get NFS Stat
      shell: systemctl status nfs-idmapd
      register: nfstat_info
       
    - name: Verify NFS Stat is running
      assert:
        that:
          - "'Active: active (running)' in nfstat_info.stdout"
        success_msg: "{{ nfs_share_success_msg }}"
        fail_msg: "{{ nfs_share_fail_msg }}"
        
    - name: Check nfs mount point
      stat:
        path: "{{ nfs_mount_Path }}"
      register: nfs_mount_info
          
    - name: Verify nfs share is mounted
      assert:
        that:
          - "{{ nfs_mount_info.stat.exists }}"
        success_msg: "{{ nfs_mount_success_msg }}"
        fail_msg: "{{ nfs_mount_fail_msg }}"
           
    - name: Check Custom ISO
      stat:
        path: "{{ check_iso_path }}"
      register: check_iso_info
          
    - name: Verify Custom ISO is created in the NFS repo
      assert:
        that:
          - "{{ check_iso_info.stat.exists }}"
        success_msg: "{{ check_iso_success_msg }}"
        fail_msg: "{{ check_iso_fail_msg }}"
      
#  Checking if network-config container is running
    
    - name: Get Pod info for network-config
      shell: |
         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id) \(.metadata.name) \(.state)"'
      register: network_config_pod_info
          
    - name: Get Pod Status for network-config
      assert:
        that:
          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
        success_msg: "{{ network_config_pod_success_msg }}"
        fail_msg: "{{ network_config_pod_fail_msg }}"
         
    - name: Get Pod facts
      shell: |
            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id)"'
      register: network_config_pod_fact
         
    - name: Parse container id for the pods
      set_fact: 
        container_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
          
    - name: Check dhcpd,xinetd service is running
      command: crictl exec {{ container_id }} systemctl is-active {{ item }}
      changed_when: false
      ignore_errors: yes
      register: pod_service_check
      with_items:
        - dhcpd
        - xinetd
            
    - name: Verify dhcpd, xinetd service is running
      assert:
        that:
          - "'active' in pod_service_check.results[{{ item }}].stdout"
          - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
          - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
        fail_msg: "{{ pod_service_check_fail_msg }}"
        success_msg: "{{ pod_service_check_success_msg }}"
      with_sequence: start=0 end={{ pod_service_check.results|length - 1 }}
         
# Checking if cobbler-container is running
    - name: Get Pod info for cobbler
      shell: |
         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id) \(.metadata.name) \(.state)"'
      register: network_config_pod_info
      
    - name: Get Pod Status for cobbler
      assert:
        that:
          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
        success_msg: "{{ cobbler_pod_success_msg }}"
        fail_msg: "{{ cobbler_pod_fail_msg }}"
      
    - name: Get Pod facts for cobbler
      shell: |
            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id)"'
      register: network_config_pod_fact
      
    - name: Extract cobbler pod id
      set_fact: 
        cobbler_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
      
    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
      command: crictl exec {{ cobbler_id }} systemctl is-active {{ item }}
      changed_when: false
      ignore_errors: yes
      register: pod_service_check
      with_items:
        - dhcpd
        - tftp
        - xinetd
        - cobblerd
        
    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
      assert:
        that:
          - "'active' in pod_service_check.results[{{  item  }}].stdout"
          - "'inactive' not in pod_service_check.results[{{  item  }}].stdout"
          - "'unknown' not in pod_service_check.results[{{  item  }}].stdout"
        fail_msg: "{{pod_service_check_fail_msg}}"
        success_msg: "{{pod_service_check_success_msg}}"
      with_sequence: start=0 end=3

# Checking Cron-Jobs
    - name: Check crontab list
      command: crictl exec {{ cobbler_id }} crontab -l
      changed_when: false
      register: crontab_list

    - name: Verify crontab list
      assert:
        that:
          - "'* * * * * /usr/bin/ansible-playbook /root/tftp.yml' in crontab_list.stdout"
          - "'*/5 * * * * /usr/bin/ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
        fail_msg: "{{cron_jobs_fail_msg}}"
        success_msg: "{{cron_jobs_success_msg}}"

#  Checking subnet-manger pod is running and open sm is running 
#  Comment if infiniband is not connected
    - name: Fetch subnet-manager stats
      shell: kubectl get pods -n subnet-manager 
      register: sm_manager_info

    - name: Verify subnet_manager container is running
      assert:
        that:
          - "'Running' in sm_manager_info.stdout_lines[1]"
        fail_msg: "{{subnet_manager_fail_msg}}"
        success_msg: "{{subnet_manager_success_msg}}"

# Checking awx pod is running

    - name: Get Pod info for awx
      shell: |
         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "awx") | "\(.id) \(.metadata.name) \(.state)"'
      register: awx_config_pod_info
           
    - name: Get Pod Status for awx
      assert:
        that:
          - network_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
        success_msg: "{{ awx_pod_success_msg }}"
        fail_msg: "{{ awx_pod_fail_msg }}"
      ignore_errors: yes
      with_sequence: start=0 end={{ network_config_pod_info.stdout_lines |length - 1 }}
          
    - name: Get pvc stats
      shell: |
          kubectl get pvc -n awx -o json |jq '.items[] | "\(.status.phase)"'
      register: pvc_stats_info
            
    - name: Verify if pvc stats is running
      assert:
        that:
          - "'Bound' in pvc_stats_info.stdout"
        fail_msg: "{{ pvc_stat_fail_msg }}"
        success_msg: "{{ pvc_stat_success_msg }}"
      with_sequence: start=0 end={{ pvc_stats_info.stdout_lines |length|int - 1 }}
            
    - name: Get svc stats
      shell: kubectl get svc -n awx awx-service -o json
      register: svc_stats_info
           
    - name: Verify if svc is up and running
      assert:
        that:
          - "'Error from server (NotFound):' not in svc_stats_info.stdout"
        success_msg: "{{ svc_stat_success_msg }}"
        fail_msg: "{{ svc_stat_fail_msg }}"
             
    - name: Fetch Cluster IP from svc
      shell: |
          kubectl get svc -n awx -o json | jq '.items[] | select(.metadata.name == "awx-service") | "\(.spec.clusterIP)"'
      register: cluster_ip_info
           
    - name: Check if connection to svc Cluster IP is enabled
      uri:
        url: http://{{ cluster_ip_info.stdout[1:-1] }}
        follow_redirects: none
        method: GET
      ignore_errors: yes
      register: cluster_ip_conn
           
    - name: Verify connection to svc cluster is working
      assert:
        that:
          - cluster_ip_conn.status == 200
        success_msg: "{{ svc_conn_success_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
        fail_msg: "{{ svc_conn_fail_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"

# OMNIA_1.2_Grafana_TC_001
# Validate Grafana k8s Loki pod and namespaces is running or not

    - name: Get Pod info for Grafana k8s Loki
      shell: |
         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "grafana") | "\(.id) \(.metadata.name) \(.state)"'
      register: grafana_config_pod_info

    - name: Get Pod Status for Grafana k8s Loki
      assert:
        that:
          - grafana_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
        success_msg: "{{ grafana_pod_success_msg }}"
        fail_msg: "{{ grafana_pod_fail_msg }}"
      ignore_errors: yes
      with_sequence: start=0 end={{ grafana_config_pod_info.stdout_lines |length - 1 }}

# OMNIA_1.2_Grafana_TC_002
# Validate Grafana k8s Loki  pvc , svc and cluster IP

    - name: Get grafana pvc stats
      shell: |
          kubectl get pvc -n grafana -o json |jq '.items[] | "\(.status.phase)"'
      register: grafana_pvc_stats_info

    - name: Verify if grafana pvc stats is running
      assert:
        that:
          - "'Bound' in grafana_pvc_stats_info.stdout"
        fail_msg: "{{ grafana_pvc_stat_fail_msg }}"
        success_msg: "{{ grafana_pvc_stat_success_msg }}"
      with_sequence: start=0 end={{ grafana_pvc_stats_info.stdout_lines |length|int - 1 }}

    - name: Get grafana svc stats
      shell: kubectl get svc -n grafana grafana -o json
      register: grafana_svc_stats_info

    - name: Verify if grafana svc is up and running
      assert:
        that:
          - "'Error from server (NotFound):' not in grafana_svc_stats_info.stdout"
        success_msg: "{{ grafana_svc_stat_success_msg }}"
        fail_msg: "{{ grafana_svc_stat_fail_msg }}"

    - name: Get grafana loki svc stats
      shell: kubectl get svc -n grafana loki -o json
      register: grafana_loki_svc_stats_info

    - name: Verify if grafana loki svc is up and running
      assert:
        that:
          - "'Error from server (NotFound):' not in grafana_loki_svc_stats_info.stdout"
        success_msg: "{{ grafana_loki_svc_stat_success_msg }}"
        fail_msg: "{{ grafana_loki_svc_stat_fail_msg }}"

# OMNIA_1.2_Grafana_TC_003
# Validate Grafana Loki Host IP connection

    - name: Fetch Grafana Loki Cluster IP from svc
      shell: |
          kubectl get svc -n grafana -o json | jq '.items[] | select(.metadata.name == "loki") | "\(.spec.clusterIP)"'
      register: grafana_loki_cluster_ip_info

    - name: Check if connection to Grafana Loki svc Cluster IP is enabled
      command: ping -c1 {{ grafana_loki_cluster_ip_info.stdout[1:-1] }}
      register: validate_grafana_loki
      changed_when: false
      failed_when: false

    - name: Verify connection to Grafana Loki svc cluster is working
      assert:
        that:
          - "'ping' in validate_grafana_loki.stdout"
        success_msg: "{{ grafana_svc_conn_success_msg }} : {{ grafana_loki_cluster_ip_info.stdout[1:-1] }}"
        fail_msg: "{{ grafana_svc_conn_fail_msg }} : {{ grafana_loki_cluster_ip_info.stdout[1:-1] }}"

    - name: Fetch Grafana Cluster IP from svc
      shell: |
        kubectl get svc -n grafana -o json | jq '.items[] | select(.metadata.name == "grafana") | "\(.spec.clusterIP)"'
      register: grafana_cluster_ip_info

    - name: Ping the grafana to validate connectivity
      command: ping -c1 {{ grafana_cluster_ip_info.stdout[1:-1] }}
      register: validate_grafana
      changed_when: false
      failed_when: false

    - name: Verify connection to Grafana svc cluster is working
      assert:
        that:
          - "'ping' in validate_grafana.stdout"
        success_msg: "{{ grafana_svc_conn_success_msg }} : {{ grafana_cluster_ip_info.stdout[1:-1] }}"
        fail_msg: "{{ grafana_svc_conn_fail_msg }} : {{ grafana_cluster_ip_info.stdout[1:-1] }}"


# OMNIA_1.2_Grafana_TC_017
# Validate Prometheus pod , pvc , svc and cluster IP

    - name: Get monitoring Pod info for Prometheus alertmanager
      shell: |
         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "monitoring" and .labels."io.kubernetes.container.name" == "alertmanager") | "\(.id) \(.metadata.name) \(.state)"'
      register: monitoring_alertmanager_pod_info

    - name: Get monitoring Pod Status for Prometheus alertmanager
      assert:
        that:
          - monitoring_alertmanager_pod_info.stdout_lines | regex_search( "{{ container_info }}")
        success_msg: "{{ prometheus_alertmanager_pod_success_msg }}"
        fail_msg: "{{ prometheus_alertmanager_pod_fail_msg }}"

    - name: Get monitoring Pod info for Prometheus node-exporter
      shell: |
         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "monitoring" and .labels."io.kubernetes.container.name" == "node-exporter") | "\(.id) \(.metadata.name) \(.state)"'
      register: monitoring_node_exporter_pod_info

    - name: Get monitoring Pod Status for Prometheus node-exporter
      assert:
        that:
          - monitoring_node_exporter_pod_info.stdout_lines | regex_search( "{{ container_info }}")
        success_msg: "{{ prometheus_node_exporter_pod_success_msg }}"
        fail_msg: "{{ prometheus_node_exporter_pod_fail_msg }}"

    - name: Get Prometheus alertmanager svc stats
      shell: kubectl get svc -n monitoring monitoring-kube-prometheus-alertmanager -o json
      register: prometheus_alertmanager_svc_stats_info

    - name: Verify if Prometheus alertmanager is up and running
      assert:
        that:
          - "'Error from server (NotFound):' not in prometheus_alertmanager_svc_stats_info.stdout"
        success_msg: "{{ prometheus_alertmanager_svc_stat_success_msg }}"
        fail_msg: "{{ prometheus_alertmanager_svc_stat_fail_msg }}"

    - name: Get Prometheus node-exporter svc stats
      shell: kubectl get svc -n monitoring monitoring-prometheus-node-exporter -o json
      register: prometheus_node_exporter_svc_stats_info

    - name: Verify if Prometheus node-exporter svc is up and running
      assert:
        that:
          - "'Error from server (NotFound):' not in prometheus_node_exporter_svc_stats_info.stdout"
        success_msg: "{{ prometheus_node_exporter_svc_stat_success_msg }}"
        fail_msg: "{{ prometheus_node_exporter_svc_stat_fail_msg }}"

    - name: Get Prometheus monitoring svc stats
      shell: kubectl get svc -n monitoring {{ item }} -o json
      changed_when: false
      ignore_errors: yes
      register: monitoring_pod_svc_check
      with_items:
        - monitoring-prometheus-node-exporter
        - monitoring-kube-prometheus-alertmanager
        - monitoring-kube-prometheus-operator
        - monitoring-kube-state-metrics
        - monitoring-kube-prometheus-prometheus

# Testcase OMNIA_1.2_AppArmor_TC_001
# Test case to  Find out if AppArmor is enabled (returns Y if true)

    - name: AppArmor is enabled Validation
      shell: cat /sys/module/apparmor/parameters/enabled
      register: apparmor_enabled

    - name: Find out if AppArmor is enabled (returns Y if true)
      assert:
        that:
          - apparmor_enabled.stdout | regex_search( "{{ apparmor_true }}" )
        success_msg: "{{ apparmor_enabled_success_msg }}"
        fail_msg: "{{ apparmor_enabled_fail_msg }}"

# Testcase OMNIA_1.2_AppArmor_TC_002
# Test case to List all loaded AppArmor profiles for applications and processes and detail their status (enforced, complain, unconfined):

    - name: AppArmor is List all loaded AppArmor profiles status
      shell: aa-status
      register: apparmor_status

    - name: Verify the apparmor module shoule be return which all the profiles
      assert:
        that:
          - apparmor_status.stdout | regex_search( "{{ apparmor_module }}" )
        success_msg: "{{ apparmor_status_success_msg }}"
        fail_msg: "{{ apparmor_status_fail_msg }}"

# Testcase OMNIA_1.2_AppArmor_TC_003
# Test case to validate available profiles in /extra-profiles/ path

    - name: AppArmor is available profiles in /extra-profiles/ path
      shell: ls /usr/share/apparmor/extra-profiles/ | grep 'usr.bin.passwd'
      register: apparmor_profile

    - name: Verify the usr.bin.passwd profiles in /extra-profiles/ path
      assert:
        that:
          - apparmor_profile.stdout | regex_search( "{{ apparmor_passwd_profile }}" )
        success_msg: "{{ apparmor_profile_success_msg }}"
        fail_msg: "{{ apparmor_profile_fail_msg }}"

# Testcase OMNIA_1.2_AppArmor_TC_004
# Test case to running executables which are currently confined by an AppArmor profile

    - name: AppArmor is running executables which are currently confined by an AppArmor profile
      shell: ps auxZ | grep -v '^unconfined' | grep 'nscd'
      register: apparmor_not_unconfined

    - name: Verify the not unconfined AppArmor profiles with nscd
      assert:
        that:
          - apparmor_not_unconfined.stdout | regex_search( "{{ apparmor_nscd }}" )
        success_msg: "{{ apparmor_not_unconfined_success_msg }}"
        fail_msg: "{{ apparmor_not_unconfined_fail_msg }}"

# Testcase OMNIA_1.2_AppArmor_TC_005
# Test case to processes with tcp or udp ports that do not have AppArmor profiles loaded

    - name: A Processes with tcp or udp ports that do not have AppArmor profiles loaded
      shell: aa-unconfined --paranoid | grep '/usr/sbin/auditd'
      register: apparmor_unconfined

    - name: Verify the unconfined AppArmor profiles with auditd
      assert:
        that:
          - apparmor_unconfined.stdout | regex_search( "{{ apparmor_auditd }}" )
        success_msg: "{{ apparmor_unconfined_success_msg }}"
        fail_msg: "{{ apparmor_unconfined_fail_msg }}"