test_control_plane_validation.yml 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. # Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. ---
  15. - block:
  16. - name: Fetch Package info
  17. package_facts:
  18. manager: auto
  19. - name: Verify all packages are installed
  20. assert:
  21. that: "'{{ item }}' in ansible_facts.packages"
  22. success_msg: "{{ install_package_success_msg }}"
  23. fail_msg: "{{ install_package_fail_msg }}"
  24. when: "'python-docker' not in item"
  25. with_items: "{{ common_packages }}"
  26. ignore_errors: true
  27. - name: Check login_vars is encrypted
  28. command: cat {{ login_vars_filename }}
  29. changed_when: false
  30. register: config_content
  31. - name: Validate login file is encypted or not
  32. assert:
  33. that: "'$ANSIBLE_VAULT;' in config_content.stdout"
  34. fail_msg: "{{ login_vars_fail_msg }}"
  35. success_msg: "{{ login_vars_success_msg }}"
  36. # Installing a required package : JQ
  37. - name: Installing jq (JSON Query)
  38. package:
  39. name: "{{ test_package }}"
  40. state: present
  41. # Checking if all the required pods are working
  42. - name: Get pods info
  43. shell: kubectl get pods --all-namespaces
  44. register: all_pods_info
  45. - name: Check the count of pods
  46. set_fact:
  47. count: "{{ all_pods_info.stdout_lines|length - 1 }}"
  48. - name: Check if all the pods are running
  49. assert:
  50. that:
  51. - "'Running' in all_pods_info.stdout_lines[{{ item }}]"
  52. fail_msg: "{{ check_pods_fail_msg }}"
  53. success_msg: "{{ check_pods_success_msg }}"
  54. with_sequence: start=1 end={{ count }}
  55. # Checking if NFS Server is running and Custom ISO is created
  56. - name: Get NFS Stat
  57. shell: systemctl status nfs-idmapd
  58. register: nfstat_info
  59. - name: Verify NFS Stat is running
  60. assert:
  61. that:
  62. - "'Active: active (running)' in nfstat_info.stdout"
  63. success_msg: "{{ nfs_share_success_msg }}"
  64. fail_msg: "{{ nfs_share_fail_msg }}"
  65. - name: Check nfs mount point
  66. stat:
  67. path: "{{ nfs_mount_Path }}"
  68. register: nfs_mount_info
  69. - name: Verify nfs share is mounted
  70. assert:
  71. that:
  72. - "{{ nfs_mount_info.stat.exists }}"
  73. success_msg: "{{ nfs_mount_success_msg }}"
  74. fail_msg: "{{ nfs_mount_fail_msg }}"
  75. - name: Check Custom ISO
  76. stat:
  77. path: "{{ check_iso_path }}"
  78. register: check_iso_info
  79. - name: Verify Custom ISO is created in the NFS repo
  80. assert:
  81. that:
  82. - "{{ check_iso_info.stat.exists }}"
  83. success_msg: "{{ check_iso_success_msg }}"
  84. fail_msg: "{{ check_iso_fail_msg }}"
  85. # Checking if network-config container is running
  86. - name: Get Pod info for network-config
  87. shell: |
  88. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id) \(.metadata.name) \(.state)"'
  89. register: network_config_pod_info
  90. - name: Get Pod Status for network-config
  91. assert:
  92. that:
  93. - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
  94. success_msg: "{{ network_config_pod_success_msg }}"
  95. fail_msg: "{{ network_config_pod_fail_msg }}"
  96. - name: Get Pod facts
  97. shell: |
  98. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id)"'
  99. register: network_config_pod_fact
  100. - name: Parse container id for the pods
  101. set_fact:
  102. container_id: "{{ network_config_pod_fact.stdout[1:-1] }}"
  103. - name: Check dhcpd,xinetd service is running
  104. command: crictl exec {{ container_id }} systemctl is-active {{ item }}
  105. changed_when: false
  106. ignore_errors: yes
  107. register: pod_service_check
  108. with_items:
  109. - dhcpd
  110. - xinetd
  111. - name: Verify dhcpd, xinetd service is running
  112. assert:
  113. that:
  114. - "'active' in pod_service_check.results[{{ item }}].stdout"
  115. - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
  116. - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
  117. fail_msg: "{{ pod_service_check_fail_msg }}"
  118. success_msg: "{{ pod_service_check_success_msg }}"
  119. with_sequence: start=0 end={{ pod_service_check.results|length - 1 }}
  120. # Checking if cobbler-container is running
  121. - name: Get Pod info for cobbler
  122. shell: |
  123. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id) \(.metadata.name) \(.state)"'
  124. register: network_config_pod_info
  125. - name: Get Pod Status for cobbler
  126. assert:
  127. that:
  128. - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
  129. success_msg: "{{ cobbler_pod_success_msg }}"
  130. fail_msg: "{{ cobbler_pod_fail_msg }}"
  131. - name: Get Pod facts for cobbler
  132. shell: |
  133. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id)"'
  134. register: network_config_pod_fact
  135. - name: Extract cobbler pod id
  136. set_fact:
  137. cobbler_id: "{{ network_config_pod_fact.stdout[1:-1] }}"
  138. - name: Check tftp,dhcpd,xinetd,cobblerd service is running
  139. command: crictl exec {{ cobbler_id }} systemctl is-active {{ item }}
  140. changed_when: false
  141. ignore_errors: yes
  142. register: pod_service_check
  143. with_items:
  144. - dhcpd
  145. - tftp
  146. - xinetd
  147. - cobblerd
  148. - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
  149. assert:
  150. that:
  151. - "'active' in pod_service_check.results[{{ item }}].stdout"
  152. - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
  153. - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
  154. fail_msg: "{{pod_service_check_fail_msg}}"
  155. success_msg: "{{pod_service_check_success_msg}}"
  156. with_sequence: start=0 end=3
  157. # Checking Cron-Jobs
  158. - name: Check crontab list
  159. command: crictl exec {{ cobbler_id }} crontab -l
  160. changed_when: false
  161. register: crontab_list
  162. - name: Verify crontab list
  163. assert:
  164. that:
  165. - "'* * * * * /usr/bin/ansible-playbook /root/tftp.yml' in crontab_list.stdout"
  166. - "'*/5 * * * * /usr/bin/ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
  167. fail_msg: "{{cron_jobs_fail_msg}}"
  168. success_msg: "{{cron_jobs_success_msg}}"
  169. # Checking subnet-manger pod is running and open sm is running
  170. # Comment if infiniband is not connected
  171. - name: Fetch subnet-manager stats
  172. shell: kubectl get pods -n subnet-manager
  173. register: sm_manager_info
  174. - name: Verify subnet_manager container is running
  175. assert:
  176. that:
  177. - "'Running' in sm_manager_info.stdout_lines[1]"
  178. fail_msg: "{{subnet_manager_fail_msg}}"
  179. success_msg: "{{subnet_manager_success_msg}}"
  180. # Checking awx pod is running
  181. - name: Get Pod info for awx
  182. shell: |
  183. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "awx") | "\(.id) \(.metadata.name) \(.state)"'
  184. register: awx_config_pod_info
  185. - name: Get Pod Status for awx
  186. assert:
  187. that:
  188. - network_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
  189. success_msg: "{{ awx_pod_success_msg }}"
  190. fail_msg: "{{ awx_pod_fail_msg }}"
  191. ignore_errors: yes
  192. with_sequence: start=0 end={{ network_config_pod_info.stdout_lines |length - 1 }}
  193. - name: Get pvc stats
  194. shell: |
  195. kubectl get pvc -n awx -o json |jq '.items[] | "\(.status.phase)"'
  196. register: pvc_stats_info
  197. - name: Verify if pvc stats is running
  198. assert:
  199. that:
  200. - "'Bound' in pvc_stats_info.stdout"
  201. fail_msg: "{{ pvc_stat_fail_msg }}"
  202. success_msg: "{{ pvc_stat_success_msg }}"
  203. with_sequence: start=0 end={{ pvc_stats_info.stdout_lines |length|int - 1 }}
  204. - name: Get svc stats
  205. shell: kubectl get svc -n awx awx-service -o json
  206. register: svc_stats_info
  207. - name: Verify if svc is up and running
  208. assert:
  209. that:
  210. - "'Error from server (NotFound):' not in svc_stats_info.stdout"
  211. success_msg: "{{ svc_stat_success_msg }}"
  212. fail_msg: "{{ svc_stat_fail_msg }}"
  213. - name: Fetch Cluster IP from svc
  214. shell: |
  215. kubectl get svc -n awx -o json | jq '.items[] | select(.metadata.name == "awx-service") | "\(.spec.clusterIP)"'
  216. register: cluster_ip_info
  217. - name: Check if connection to svc Cluster IP is enabled
  218. uri:
  219. url: http://{{ cluster_ip_info.stdout[1:-1] }}
  220. follow_redirects: none
  221. method: GET
  222. ignore_errors: yes
  223. register: cluster_ip_conn
  224. - name: Verify connection to svc cluster is working
  225. assert:
  226. that:
  227. - cluster_ip_conn.status == 200
  228. success_msg: "{{ svc_conn_success_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
  229. fail_msg: "{{ svc_conn_fail_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
  230. # OMNIA_1.2_Grafana_TC_001
  231. # Validate Grafana k8s Loki pod and namespaces is running or not
  232. - name: Get Pod info for Grafana k8s Loki
  233. shell: |
  234. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "grafana") | "\(.id) \(.metadata.name) \(.state)"'
  235. register: grafana_config_pod_info
  236. - name: Get Pod Status for Grafana k8s Loki
  237. assert:
  238. that:
  239. - grafana_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
  240. success_msg: "{{ grafana_pod_success_msg }}"
  241. fail_msg: "{{ grafana_pod_fail_msg }}"
  242. ignore_errors: yes
  243. with_sequence: start=0 end={{ grafana_config_pod_info.stdout_lines |length - 1 }}
  244. # OMNIA_1.2_Grafana_TC_002
  245. # Validate Grafana k8s Loki pvc , svc and cluster IP
  246. - name: Get grafana pvc stats
  247. shell: |
  248. kubectl get pvc -n grafana -o json |jq '.items[] | "\(.status.phase)"'
  249. register: grafana_pvc_stats_info
  250. - name: Verify if grafana pvc stats is running
  251. assert:
  252. that:
  253. - "'Bound' in grafana_pvc_stats_info.stdout"
  254. fail_msg: "{{ grafana_pvc_stat_fail_msg }}"
  255. success_msg: "{{ grafana_pvc_stat_success_msg }}"
  256. with_sequence: start=0 end={{ grafana_pvc_stats_info.stdout_lines |length|int - 1 }}
  257. - name: Get grafana svc stats
  258. shell: kubectl get svc -n grafana grafana -o json
  259. register: grafana_svc_stats_info
  260. - name: Verify if grafana svc is up and running
  261. assert:
  262. that:
  263. - "'Error from server (NotFound):' not in grafana_svc_stats_info.stdout"
  264. success_msg: "{{ grafana_svc_stat_success_msg }}"
  265. fail_msg: "{{ grafana_svc_stat_fail_msg }}"
  266. - name: Get grafana loki svc stats
  267. shell: kubectl get svc -n grafana loki -o json
  268. register: grafana_loki_svc_stats_info
  269. - name: Verify if grafana loki svc is up and running
  270. assert:
  271. that:
  272. - "'Error from server (NotFound):' not in grafana_loki_svc_stats_info.stdout"
  273. success_msg: "{{ grafana_loki_svc_stat_success_msg }}"
  274. fail_msg: "{{ grafana_loki_svc_stat_fail_msg }}"
  275. # OMNIA_1.2_Grafana_TC_003
  276. # Validate Grafana Loki Host IP connection
  277. - name: Fetch Grafana Loki Cluster IP from svc
  278. shell: |
  279. kubectl get svc -n grafana -o json | jq '.items[] | select(.metadata.name == "loki") | "\(.spec.clusterIP)"'
  280. register: grafana_loki_cluster_ip_info
  281. - name: Check if connection to Grafana Loki svc Cluster IP is enabled
  282. command: ping -c1 {{ grafana_loki_cluster_ip_info.stdout[1:-1] }}
  283. register: validate_grafana_loki
  284. changed_when: false
  285. failed_when: false
  286. - name: Verify connection to Grafana Loki svc cluster is working
  287. assert:
  288. that:
  289. - "'ping' in validate_grafana_loki.stdout"
  290. success_msg: "{{ grafana_svc_conn_success_msg }} : {{ grafana_loki_cluster_ip_info.stdout[1:-1] }}"
  291. fail_msg: "{{ grafana_svc_conn_fail_msg }} : {{ grafana_loki_cluster_ip_info.stdout[1:-1] }}"
  292. - name: Fetch Grafana Cluster IP from svc
  293. shell: |
  294. kubectl get svc -n grafana -o json | jq '.items[] | select(.metadata.name == "grafana") | "\(.spec.clusterIP)"'
  295. register: grafana_cluster_ip_info
  296. - name: Ping the grafana to validate connectivity
  297. command: ping -c1 {{ grafana_cluster_ip_info.stdout[1:-1] }}
  298. register: validate_grafana
  299. changed_when: false
  300. failed_when: false
  301. - name: Verify connection to Grafana svc cluster is working
  302. assert:
  303. that:
  304. - "'ping' in validate_grafana.stdout"
  305. success_msg: "{{ grafana_svc_conn_success_msg }} : {{ grafana_cluster_ip_info.stdout[1:-1] }}"
  306. fail_msg: "{{ grafana_svc_conn_fail_msg }} : {{ grafana_cluster_ip_info.stdout[1:-1] }}"
  307. # OMNIA_1.2_Grafana_TC_017
  308. # Validate Prometheus pod , pvc , svc and cluster IP
  309. - name: Get monitoring Pod info for Prometheus alertmanager
  310. shell: |
  311. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "monitoring" and .labels."io.kubernetes.container.name" == "alertmanager") | "\(.id) \(.metadata.name) \(.state)"'
  312. register: monitoring_alertmanager_pod_info
  313. - name: Get monitoring Pod Status for Prometheus alertmanager
  314. assert:
  315. that:
  316. - monitoring_alertmanager_pod_info.stdout_lines | regex_search( "{{ container_info }}")
  317. success_msg: "{{ prometheus_alertmanager_pod_success_msg }}"
  318. fail_msg: "{{ prometheus_alertmanager_pod_fail_msg }}"
  319. - name: Get monitoring Pod info for Prometheus node-exporter
  320. shell: |
  321. crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "monitoring" and .labels."io.kubernetes.container.name" == "node-exporter") | "\(.id) \(.metadata.name) \(.state)"'
  322. register: monitoring_node_exporter_pod_info
  323. - name: Get monitoring Pod Status for Prometheus node-exporter
  324. assert:
  325. that:
  326. - monitoring_node_exporter_pod_info.stdout_lines | regex_search( "{{ container_info }}")
  327. success_msg: "{{ prometheus_node_exporter_pod_success_msg }}"
  328. fail_msg: "{{ prometheus_node_exporter_pod_fail_msg }}"
  329. - name: Get Prometheus alertmanager svc stats
  330. shell: kubectl get svc -n monitoring monitoring-kube-prometheus-alertmanager -o json
  331. register: prometheus_alertmanager_svc_stats_info
  332. - name: Verify if Prometheus alertmanager is up and running
  333. assert:
  334. that:
  335. - "'Error from server (NotFound):' not in prometheus_alertmanager_svc_stats_info.stdout"
  336. success_msg: "{{ prometheus_alertmanager_svc_stat_success_msg }}"
  337. fail_msg: "{{ prometheus_alertmanager_svc_stat_fail_msg }}"
  338. - name: Get Prometheus node-exporter svc stats
  339. shell: kubectl get svc -n monitoring monitoring-prometheus-node-exporter -o json
  340. register: prometheus_node_exporter_svc_stats_info
  341. - name: Verify if Prometheus node-exporter svc is up and running
  342. assert:
  343. that:
  344. - "'Error from server (NotFound):' not in prometheus_node_exporter_svc_stats_info.stdout"
  345. success_msg: "{{ prometheus_node_exporter_svc_stat_success_msg }}"
  346. fail_msg: "{{ prometheus_node_exporter_svc_stat_fail_msg }}"
  347. - name: Get Prometheus monitoring svc stats
  348. shell: kubectl get svc -n monitoring {{ item }} -o json
  349. changed_when: false
  350. ignore_errors: yes
  351. register: monitoring_pod_svc_check
  352. with_items:
  353. - monitoring-prometheus-node-exporter
  354. - monitoring-kube-prometheus-alertmanager
  355. - monitoring-kube-prometheus-operator
  356. - monitoring-kube-state-metrics
  357. - monitoring-kube-prometheus-prometheus
  358. # Testcase OMNIA_1.2_AppArmor_TC_001
  359. # Test case to Find out if AppArmor is enabled (returns Y if true)
  360. - name: AppArmor is enabled Validation
  361. shell: cat /sys/module/apparmor/parameters/enabled
  362. register: apparmor_enabled
  363. - name: Find out if AppArmor is enabled (returns Y if true)
  364. assert:
  365. that:
  366. - apparmor_enabled.stdout | regex_search( "{{ apparmor_true }}" )
  367. success_msg: "{{ apparmor_enabled_success_msg }}"
  368. fail_msg: "{{ apparmor_enabled_fail_msg }}"
  369. # Testcase OMNIA_1.2_AppArmor_TC_002
  370. # Test case to List all loaded AppArmor profiles for applications and processes and detail their status (enforced, complain, unconfined):
  371. - name: AppArmor is List all loaded AppArmor profiles status
  372. shell: aa-status
  373. register: apparmor_status
  374. - name: Verify the apparmor module shoule be return which all the profiles
  375. assert:
  376. that:
  377. - apparmor_status.stdout | regex_search( "{{ apparmor_module }}" )
  378. success_msg: "{{ apparmor_status_success_msg }}"
  379. fail_msg: "{{ apparmor_status_fail_msg }}"
  380. # Testcase OMNIA_1.2_AppArmor_TC_003
  381. # Test case to validate available profiles in /extra-profiles/ path
  382. - name: AppArmor is available profiles in /extra-profiles/ path
  383. shell: ls /usr/share/apparmor/extra-profiles/ | grep 'usr.bin.passwd'
  384. register: apparmor_profile
  385. - name: Verify the usr.bin.passwd profiles in /extra-profiles/ path
  386. assert:
  387. that:
  388. - apparmor_profile.stdout | regex_search( "{{ apparmor_passwd_profile }}" )
  389. success_msg: "{{ apparmor_profile_success_msg }}"
  390. fail_msg: "{{ apparmor_profile_fail_msg }}"
  391. # Testcase OMNIA_1.2_AppArmor_TC_004
  392. # Test case to running executables which are currently confined by an AppArmor profile
  393. - name: AppArmor is running executables which are currently confined by an AppArmor profile
  394. shell: ps auxZ | grep -v '^unconfined' | grep 'nscd'
  395. register: apparmor_not_unconfined
  396. - name: Verify the not unconfined AppArmor profiles with nscd
  397. assert:
  398. that:
  399. - apparmor_not_unconfined.stdout | regex_search( "{{ apparmor_nscd }}" )
  400. success_msg: "{{ apparmor_not_unconfined_success_msg }}"
  401. fail_msg: "{{ apparmor_not_unconfined_fail_msg }}"
  402. # Testcase OMNIA_1.2_AppArmor_TC_005
  403. # Test case to processes with tcp or udp ports that do not have AppArmor profiles loaded
  404. - name: A Processes with tcp or udp ports that do not have AppArmor profiles loaded
  405. shell: aa-unconfined --paranoid | grep '/usr/sbin/auditd'
  406. register: apparmor_unconfined
  407. - name: Verify the unconfined AppArmor profiles with auditd
  408. assert:
  409. that:
  410. - apparmor_unconfined.stdout | regex_search( "{{ apparmor_auditd }}" )
  411. success_msg: "{{ apparmor_unconfined_success_msg }}"
  412. fail_msg: "{{ apparmor_unconfined_fail_msg }}"