Browse Source

Merge branch 'devel' into patch-1

John Lockman 3 years ago
parent
commit
af0733c584

+ 15 - 4
.all-contributorsrc

@@ -2,7 +2,7 @@
   "files": [
   "files": [
     "README.md"
     "README.md"
   ],
   ],
-  "imageSize": 100,
+  "imageSize": 75,
   "commit": false,
   "commit": false,
   "badgeTemplate": "<!-- DO NOT ADD A BADGE -->",
   "badgeTemplate": "<!-- DO NOT ADD A BADGE -->",
   "contributors": [
   "contributors": [
@@ -46,7 +46,8 @@
       "avatar_url": "https://avatars.githubusercontent.com/u/73213880?v=4",
       "avatar_url": "https://avatars.githubusercontent.com/u/73213880?v=4",
       "profile": "https://github.com/DeepikaKrishnaiah",
       "profile": "https://github.com/DeepikaKrishnaiah",
       "contributions": [
       "contributions": [
-        "code"
+        "code",
+        "test"
       ]
       ]
     },
     },
     {
     {
@@ -135,7 +136,8 @@
       "avatar_url": "https://avatars.githubusercontent.com/u/72869337?v=4",
       "avatar_url": "https://avatars.githubusercontent.com/u/72869337?v=4",
       "profile": "https://github.com/Shubhangi-dell",
       "profile": "https://github.com/Shubhangi-dell",
       "contributions": [
       "contributions": [
-        "code"
+        "code",
+        "maintenance"
       ]
       ]
     },
     },
     {
     {
@@ -288,9 +290,18 @@
         "test",
         "test",
         "code"
         "code"
       ]
       ]
+    },
+    {
+      "login": "ptrinesh",
+      "name": "ptrinesh",
+      "avatar_url": "https://avatars.githubusercontent.com/u/73214211?v=4",
+      "profile": "https://github.com/ptrinesh",
+      "contributions": [
+        "code"
+      ]
     }
     }
   ],
   ],
-  "contributorsPerLine": 7,
+  "contributorsPerLine": 10,
   "projectName": "omnia",
   "projectName": "omnia",
   "projectOwner": "dellhpc",
   "projectOwner": "dellhpc",
   "repoType": "github",
   "repoType": "github",

File diff suppressed because it is too large
+ 5 - 2
README.md


+ 2 - 2
control_plane/roles/control_plane_common/tasks/validate_host_mapping_file.yml

@@ -162,12 +162,12 @@
   - name: Validation to check number of login nodes defined
   - name: Validation to check number of login nodes defined
     fail:
     fail:
       msg: "{{ fail_mapping_file_login_role }}"
       msg: "{{ fail_mapping_file_login_role }}"
-    when: not ( count_of_login|int == 1)
+    when: (not ( count_of_login|int == 1) and login_node_required) or ((count_of_login|int == 1) and not login_node_required)
 
 
   - name: Validation to check number of nfs nodes defined
   - name: Validation to check number of nfs nodes defined
     fail:
     fail:
       msg: "{{ fail_mapping_file_nfs_role }}"
       msg: "{{ fail_mapping_file_nfs_role }}"
-    when: powervault_support and not (count_of_nfs_node|int == 1)
+    when: (not (count_of_nfs_node|int == 1) and powervault_support) or ((count_of_nfs_node|int == 1) and not powervault_support)
   tags: install
   tags: install
 
 
   rescue:
   rescue:

+ 2 - 2
control_plane/roles/control_plane_common/vars/main.yml

@@ -170,8 +170,8 @@ fail_mapping_file_roles_error: "Failed. Define correct Component Roles for each
                                  {{ group_name_login }}, {{ group_name_nfs }}"
                                  {{ group_name_login }}, {{ group_name_nfs }}"
 fail_mapping_file_manager_role: "Exactly 1 manager node must be defined"
 fail_mapping_file_manager_role: "Exactly 1 manager node must be defined"
 fail_mapping_file_compute_role: "Atleast 1 compute node must be defined"
 fail_mapping_file_compute_role: "Atleast 1 compute node must be defined"
-fail_mapping_file_login_role: "Exactly 1 login node must be defined"
-fail_mapping_file_nfs_role: "Exactly 1 nfs node must be defined"
+fail_mapping_file_login_role: "Exactly 1 login node must be defined or login_node_required must be true in omnia_config.yml"
+fail_mapping_file_nfs_role: "Exactly 1 nfs node must be defined or powervault_support must be true in base_vars.yml"
 count_of_roles_defined: "Component Roles defined: Manager Node: {{ count_of_manager }},
 count_of_roles_defined: "Component Roles defined: Manager Node: {{ count_of_manager }},
                         Compute Nodes: {{ count_of_compute }}, Login Node: {{ count_of_login }},
                         Compute Nodes: {{ count_of_compute }}, Login Node: {{ count_of_login }},
                         Nfs Node: {{ count_of_nfs_node }}, Total Nodes: {{ count_total_items }} "
                         Nfs Node: {{ count_of_nfs_node }}, Total Nodes: {{ count_total_items }} "

+ 2 - 1
control_plane/roles/control_plane_device/tasks/mapping_file.yml

@@ -67,6 +67,7 @@
   copy:
   copy:
     src: "{{ temp_mgmt_mapping_file }}"
     src: "{{ temp_mgmt_mapping_file }}"
     dest: "{{ role_path }}/files/backup_mngmnt_mapping_file.csv"
     dest: "{{ role_path }}/files/backup_mngmnt_mapping_file.csv"
+    mode: 0644
 
 
 - name: Get mngmnt container pod name
 - name: Get mngmnt container pod name
   command: 'kubectl get pod -n network-config -l app=mngmnt-network -o jsonpath="{.items[0].metadata.name}"'
   command: 'kubectl get pod -n network-config -l app=mngmnt-network -o jsonpath="{.items[0].metadata.name}"'
@@ -82,4 +83,4 @@
 
 
 - name: Restart dhcpd
 - name: Restart dhcpd
   command: 'kubectl exec --stdin --tty -n network-config {{ mngmnt_pod_name.stdout }} -- systemctl restart dhcpd'
   command: 'kubectl exec --stdin --tty -n network-config {{ mngmnt_pod_name.stdout }} -- systemctl restart dhcpd'
-  when:  (  mngmnt_network_container_status ) and ( new_mngmnt_node_status )
+  when:  (  mngmnt_network_container_status ) and ( new_mngmnt_node_status )

+ 16 - 2
control_plane/roles/deploy_job_templates/tasks/group_inventory.yml

@@ -20,13 +20,27 @@
   no_log: true
   no_log: true
   register: hosts_list
   register: hosts_list
 
 
-- name: Add the host to the group in node_inventory if present
+- name: Add the host to compute group in node_inventory if it exists
   awx.awx.tower_group:
   awx.awx.tower_group:
     name: "{{ item.split(',')[3] }}"
     name: "{{ item.split(',')[3] }}"
     inventory: "{{ node_inventory }}"
     inventory: "{{ node_inventory }}"
+    preserve_existing_hosts: true
     hosts:
     hosts:
       - "{{ item.split(',')[2] }}"
       - "{{ item.split(',')[2] }}"
     tower_config_file: "{{ tower_config_file }}"
     tower_config_file: "{{ tower_config_file }}"
   when:
   when:
     - item.split(',')[2] != "IP"
     - item.split(',')[2] != "IP"
-    - item.split(',')[2] in hosts_list.stdout
+    - item.split(',')[2] in hosts_list.stdout 
+    - item.split(',')[3] == "compute"
+
+- name: Add the host to other groups in node_inventory if it exists
+  awx.awx.tower_group:
+    name: "{{ item.split(',')[3] }}"
+    inventory: "{{ node_inventory }}"
+    hosts:
+      - "{{ item.split(',')[2] }}"
+    tower_config_file: "{{ tower_config_file }}"
+  when:
+    - item.split(',')[2] != "IP"
+    - item.split(',')[2] in hosts_list.stdout
+    - item.split(',')[3] != "compute"

+ 6 - 0
docs/README.md

@@ -192,6 +192,12 @@ If hosts are listed, then an IP address has been assigned to them by DHCP. Howev
 	2. For connecting to the internet (Management purposes)
 	2. For connecting to the internet (Management purposes)
 	3. For connecting to PowerVault (Data Connection)  
 	3. For connecting to PowerVault (Data Connection)  
 	
 	
+	
+* **Issue**: Hosts are not automatically deleted from awx UI when redeploying the cluster.  
+	**Resolution**: Before re-deploying the cluster, ensure that the user manually deletes all hosts from the awx UI.
+	
+* **Issue**: Decomissioned compute nodes do not get deleted automatically from the awx UI.
+	**Resolution**: Once a node is decommisioned, ensure that the user manually deletes decomissioned hosts from the awx UI.
 
 
 # [Frequently asked questions](FAQ.md)
 # [Frequently asked questions](FAQ.md)
 
 

File diff suppressed because it is too large
+ 3 - 3
docs/control_plane/device_templates/CONFIGURE_INFINIBAND_SWITCHES.md


File diff suppressed because it is too large
+ 2 - 2
docs/control_plane/input_parameters/INFINIBAND_SWITCHES.md


+ 1 - 1
roles/powervault_me4_nfs/tasks/nfs_node_configure.yml

@@ -15,7 +15,7 @@
 ---
 ---
 
 
 - name: Include
 - name: Include
-  include_tasks: "{{ playbook_dir }}/../../../control_plane/roles/powervault_me4/tasks/ports.yml"
+  include_tasks: "{{ pv_ports_file }}"
 
 
 - name: Refresh ssh keys
 - name: Refresh ssh keys
   command: ssh-keygen -R {{ groups['powervault_me4'][0] }}
   command: ssh-keygen -R {{ groups['powervault_me4'][0] }}

+ 2 - 1
roles/powervault_me4_nfs/vars/main.yml

@@ -35,4 +35,5 @@ pv_nic_ip: 192.168.25.3
 pv_nic_gateway: 192.168.25.1
 pv_nic_gateway: 192.168.25.1
 pv_port_ip: 192.168.25.5
 pv_port_ip: 192.168.25.5
 pv_nfs_file: "{{ role_path }}/../../control_plane/input_params/powervault_me4_vars.yml"
 pv_nfs_file: "{{ role_path }}/../../control_plane/input_params/powervault_me4_vars.yml"
-nic_path: "/etc/sysconfig/network-scripts/ifcfg-{{ powervault_me4_server_nic }}"    
+nic_path: "/etc/sysconfig/network-scripts/ifcfg-{{ powervault_me4_server_nic }}"   
+pv_ports_file: "{{ playbook_dir }}/control_plane/roles/powervault_me4/tasks/ports.yml"