Sfoglia il codice sorgente

Issue#259: adding new nodes using mapping file

Signed-off-by: shubhangi_srivastava <shubhangi_srivastava@dell.com>
John Lockman 4 anni fa
parent
commit
bf2e13a042

+ 11 - 4
appliance/appliance_config.yml

@@ -32,11 +32,18 @@ hpc_nic: "em1"
 # Default value of nic is em2
 public_nic: "em2"
 
+# This is the  path where user has kept the iso image that needs to be provisioned in target nodes.
+# The iso file should be CentOS7-2009-minimal edition.
+# Other iso file not supported.
+iso_file_path: "" 
+
 # The mapping file consists of the MAC address and its respective IP address and hostname.
-# If user wants to provide a mapping file, set this value to "true"
 # The format of mapping file should be MAC,hostname,IP and must be a CSV file.
-mapping_file_exists: ""
+# A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+mapping_file_path: ""
 
-# The dhcp range for assigning the IP address to the baremetal nodes.
+# The dhcp range for assigning the IPv4 address to the baremetal nodes.
+# Example: 10.1.23.1
 dhcp_start_ip_range: ""
-dhcp_end_ip_range: ""
+dhcp_end_ip_range: ""

+ 37 - 4
appliance/roles/common/tasks/password_config.yml

@@ -38,6 +38,7 @@
       awx_password | length < 1 or
       hpc_nic | length < 1 or
       public_nic | length < 1 or
+      iso_file_path | length < 1 or
       dhcp_start_ip_range | length < 1 or
       dhcp_end_ip_range | length < 1
 
@@ -47,9 +48,11 @@
     admin_password: "{{ awx_password }}"
     nic:  "{{ hpc_nic }}"
     internet_nic: "{{ public_nic }}"
+    path_for_iso_file: "{{ iso_file_path }}"
     dhcp_start_ip: "{{ dhcp_start_ip_range | ipv4 }}"
     dhcp_end_ip: "{{ dhcp_end_ip_range | ipv4 }}"
-    mapping_file: "{{ mapping_file_exists }}"
+    mapping_file: false
+    path_for_mapping_file: "{{ mapping_file_path }}"
   no_log: true
 
 - name: Get the system hpc ip
@@ -153,10 +156,40 @@
 - name: Assert mapping_file_exists
   assert:
     that:
-      - "( mapping_file == true) or ( mapping_file == false)"
+      - "( mapping_file == true ) or ( mapping_file == false )"
     success_msg: "{{ success_mapping_file }}"
     fail_msg: "{{ fail_mapping_file }}"
-  register: mapping_file_check
+
+- name: Set the mapping file value
+  set_fact:
+    mapping_file: true
+  when: path_for_mapping_file != ""
+  
+- name: Assert valid mapping_file_path
+  stat: 
+    path: "{{ path_for_mapping_file }}"
+  when: mapping_file == true
+  register: result_path_mapping_file
+  
+- name : Valid mapping_file_path
+  fail:
+    msg: "{{ invalid_mapping_file_path }}"
+  when: ( mapping_file == true ) and ( result_path_mapping_file.stat.exists == false )
+
+- name: Assert valid iso_file_path
+  stat:
+    path: "{{ path_for_iso_file }}"
+  register: result_path_iso_file
+
+- name : Incorrect iso_file_path
+  fail:
+    msg: "{{ invalid_iso_file_path }}"
+  when: ( result_path_iso_file.stat.exists == false ) and ( ".iso" not in  path_for_iso_file )
+
+- name: Fail when iso path valid but image not right
+  fail:
+    msg: "{{ invalid_iso_file_path }}"
+  when: ( result_path_iso_file.stat.exists == true ) and ( ".iso" not in path_for_iso_file )
 
 - name: Check the subnet of dhcp start range
   shell: |
@@ -301,4 +334,4 @@
   command: >-
     ansible-vault encrypt {{ role_path }}/../../../{{ config_filename }}
     --vault-password-file {{ role_path }}/../../../{{ config_vaultname }}
-  changed_when: false
+  changed_when: false

+ 3 - 0
appliance/roles/common/vars/main.yml

@@ -76,6 +76,9 @@ success_dhcp_range: "Dhcp_range validated"
 fail_dhcp_range: "Failed. Incorrect range assigned for dhcp"
 success_hpc_ip: "IP validated"
 fail_hpc_ip: "Failed. Nic should be configured"
+fail_mapping_file_path: "Failed. Mapping_file_path input is empty in appliance_config.yml. Either set mapping_file_exists to false or provide a path for a valid mapping file."
+invalid_mapping_file_path: "Incorrect mapping_file_path provided in appliance_config.yml"
+invalid_iso_file_path: "Incorrect iso_file_path provided in appliance_config.yml."
 min_length: 8
 max_length: 30
 nic_min_length: 3

+ 16 - 3
appliance/roles/inventory/files/create_inventory.yml

@@ -88,14 +88,18 @@
     - name: Set the hostname from mapping file
       hostname:
         name: "{{ host_name.stdout }}"
-      register: result_host_name
       when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and  (mapping_file | bool == true )
       ignore_errors: true
+    
+    - name: Set the hostname if hostname not present mapping file
+      hostname:
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+      when: ('localhost' in hostname_check.stdout) and (file_present.rc != 0) and (mapping_file | bool == true )
+      ignore_errors: true
 
     - name: Set the system hostname
       hostname:
         name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
-      register: result_name
       when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false)
       ignore_errors: true
 
@@ -108,11 +112,20 @@
       when: ('localhost' in hostname_check.stdout) and ( mapping_file_present != "" ) and ( mapping_file | bool == true )
       ignore_errors: true
 
+    - name: Add new hostname to /etc/hosts if hostname not present mapping fil
+      lineinfile:
+        dest: /etc/hosts
+        regexp: '^127\.0\.0\.1[ \t]+localhost'
+        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        state: present
+      when: ('localhost' in hostname_check.stdout) and ( file_present.rc != 0 ) and ( mapping_file | bool == true )
+      ignore_errors: true
+
     - name: Add new hostname to /etc/hosts
       lineinfile:
         dest: /etc/hosts
         regexp: '^127\.0\.0\.1[ \t]+localhost'
-        line: "127.0.0.1 localhost 'compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}'"
+        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
         state: present
       when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false )
       ignore_errors: true

+ 10 - 5
appliance/roles/inventory/tasks/main.yml

@@ -49,9 +49,15 @@
     - name: Save input variables from file
       set_fact:
         cobbler_password: "{{ provision_password }}"
-        mapping_file: "{{ mapping_file_exists }}"
+        mapping_file: false
+        path_mapping_file: "{{ mapping_file_path }}"
       no_log: True
 
+    - name: Check the status for mapping file
+      set_fact:
+        mapping_file: true
+      when: path_mapping_file != ""
+
     - name: Encrypt input config file
       command: >-
         ansible-vault encrypt {{ input_config_filename }}
@@ -59,11 +65,11 @@
       changed_when: false
 
     - name: Check if inventory file already exists
-      stat:
+      file:
         path: "/root/inventory"
-      register: stat_result
+        state: absent
 
-    - name: Create inventory file if doesnt exist
+    - name: Create empty inventory file
       copy:
         dest:  "/root/inventory"
         content: |
@@ -72,7 +78,6 @@
             hosts:
         owner: root
         mode: 0775
-      when: not stat_result.stat.exists
 
     - name: Add inventory playbook
       block:

+ 20 - 14
appliance/roles/provision/tasks/check_prerequisites.yml

@@ -13,26 +13,32 @@
 # limitations under the License.
 ---
 
-- name: Check availability of iso file
-  stat:
-    path: "{{ role_path }}/files/{{ iso_name }}"
-  register: iso_status
-  tags: install
-
-- name: Iso file not present
-  fail:
-    msg: "{{ iso_fail }}"
-  when: iso_status.stat.exists == false
-  register: iso_file_check
-  tags: install
-
 - name: Initialize variables
   set_fact:
     cobbler_container_status: false
     cobbler_image_status: false
     cobbler_config_status: false
+    backup_map_status: false
+    new_node_status: false
   tags: install
 
+- name: Check if any backup file exists
+  block:
+  - name: Check status of backup file
+    stat:
+      path: "{{ role_path }}/files/backup_mapping_file.csv"
+    register: backup_map
+
+  - name: Set status for backup file
+    set_fact:
+      backup_map_status: true
+    when: backup_map.stat.exists == true  
+  rescue:
+  - name: Message
+    debug:
+      msg: "All nodes are new"
+      verbosity: 2
+
 - name: Inspect the cobbler image
   docker_image_info:
     name: cobbler
@@ -78,4 +84,4 @@
     - cobbler_container_status == true
     - "'CentOS' in cobbler_profile_list.stdout"
     - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
-    - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+    - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"

+ 1 - 1
appliance/roles/provision/tasks/configure_cobbler.yml

@@ -53,4 +53,4 @@
     - "{{ role_path }}/files/dhcp.template"
     - "{{ role_path }}/files/settings"
     - "{{ role_path }}/files/centos7.ks"
-    - "{{ role_path }}/files/new_mapping_file.csv.bak"
+    - "{{ role_path }}/files/new_mapping_file.csv.bak"

+ 2 - 2
appliance/roles/provision/tasks/main.yml

@@ -40,11 +40,11 @@
 
 - name: Dhcp Configuration
   import_tasks: dhcp_configure.yml
-  when: not cobbler_image_status
+  when: (not cobbler_image_status) or ( backup_map_status == true)
 
 - name: Mapping file validation
   import_tasks: mapping_file.yml
-  when: (not cobbler_image_status) and (mapping_file == true)
+  when: (not cobbler_image_status) and (mapping_file == true) or ( backup_map_status == true)
 
 - name: Cobbler image creation
   import_tasks: cobbler_image.yml

+ 96 - 14
appliance/roles/provision/tasks/mapping_file.yml

@@ -9,25 +9,24 @@
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
 # limitations under the License.
 ---
 
-- name: Check availability of mapping file
-  stat:
-    path: "{{ role_path }}/files/{{ mapping_file_name }}"
-  register: mapping_file_status
+- name: Check if file is comma seperated
+  shell: awk -F\, '{print NF-1}' {{ path_for_mapping_file }}
+  register: comma_seperated
+  changed_when: false
   tags: install
 
-- name: Mapping file not present
+- name: Fail if not comma seperated
   fail:
-    msg: "{{ mapping_file_fail }}"
-  when: mapping_file_status.stat.exists == false
-  register: mapping_file_check
+    msg: "{{ not_comma_seperated }}"
+  when: item != "2"
+  with_items: "{{ comma_seperated.stdout_lines }}"
   tags: install
 
 - name: Remove blank lines
-  shell:  awk -F, 'length>NF+1' {{ role_path }}/files/{{ mapping_file_name }} > {{ role_path }}/files/new_mapping_file.csv
+  shell:  awk -F, 'length>NF+1' {{ path_for_mapping_file }} > {{ role_path }}/files/new_mapping_file.csv
   changed_when: false
   tags: install
 
@@ -38,22 +37,69 @@
   changed_when: false
   tags: install
 
-- name: Count the rows
+- name: Check if header present
+  shell:  awk 'NR==1 { print $1}' {{ role_path }}/files/new_mapping_file.csv
+  register: header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not present
+  fail:
+    msg: "{{ header_fail }}"
+  when: header.stdout !=  valid_header
+
+- name: Count the hostname
   shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv | wc -l
-  register: total_count
+  register: total_hostname
+  changed_when: false
+  tags: install
+
+- name: Count the ip
+  shell: awk -F',' '{print $3}' {{ role_path }}/files/new_mapping_file.csv | wc -l
+  register: total_ip
+  changed_when: false
+  tags: install
+
+- name: Count the macs
+  shell: awk -F',' '{print $1}' {{ role_path }}/files/new_mapping_file.csv | wc -l
+  register: total_mac
   changed_when: false
   tags: install
 
 - name: Check for duplicate hostname
   shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv | uniq | wc -l
-  register: count_host
+  register: uniq_hostname
+  changed_when: false
+  tags: install
+
+- name: Check for duplicate ip
+  shell: awk -F',' '{print $3}' {{ role_path }}/files/new_mapping_file.csv | uniq | wc -l
+  register: uniq_ip
+  changed_when: false
+  tags: install
+
+- name: Check for duplicate mac
+  shell: awk -F',' '{print $1}' {{ role_path }}/files/new_mapping_file.csv | uniq | wc -l
+  register: uniq_mac
   changed_when: false
   tags: install
 
 - name: Fail if duplicate hosts exist
   fail:
     msg: "{{ fail_hostname_duplicate }}"
-  when:  total_count.stdout >  count_host.stdout
+  when:  total_hostname.stdout >  uniq_hostname.stdout
+  tags: install
+
+- name: Fail if duplicate ips exist
+  fail:
+    msg: "{{ fail_ip_duplicate }}"
+  when:  total_ip.stdout >  uniq_ip.stdout
+  tags: install
+
+- name: Fail if duplicate mac exist
+  fail:
+    msg: "{{ fail_mac_duplicate }}"
+  when:  total_mac.stdout >  uniq_mac.stdout
   tags: install
 
 - name: Check if _ or . or space present in hostname
@@ -69,6 +115,23 @@
   when: hostname_result.stdout != ""
   tags: install
 
+- name: Compare the file for new nodes
+  block:
+  - name: difference
+    shell: diff {{ role_path }}/files/new_mapping_file.csv {{role_path}}/files/backup_mapping_file.csv| tr -d \>|tr -d \<| grep -E -- ', & :| '
+    register: diff_output
+    when: backup_map_status == true
+
+  - name: status of new nodes
+    set_fact:
+      new_node_status: true
+    when: diff_output.stdout!= ""
+  rescue:
+  - name: No new nodes
+    debug:
+      msg: "No new nodes to add"
+      verbosity: 2
+
 - name: Fetch input
   blockinfile:
     path: "{{ role_path }}/files/dhcp.template"
@@ -81,4 +144,23 @@
     marker: "# {mark} DHCP BLOCK OF {{ item.split(',')[0] }}"
   with_lines: "{{ remove_header }}"
   ignore_errors: true
+  when: (not cobbler_image_status) or (new_node_status == true)
   tags: install
+
+- name: Create a backup file
+  copy:
+    src: "{{ role_path }}/files/new_mapping_file.csv"
+    dest: "{{ role_path }}/files/backup_mapping_file.csv"
+
+- name: Copy the dhcp.template inside container
+  command: docker exec cobbler cp /root/omnia/appliance/roles/provision/files/dhcp.template /etc/cobbler/dhcp.template
+  when:  ( cobbler_container_status == true ) and ( new_node_status == true )
+
+- name: Cobbler sync for adding new nodes
+  command: docker exec cobbler cobbler sync
+  when:  ( cobbler_container_status == true ) and ( new_node_status == true )
+
+- name: Restart dhcpd
+  command: docker exec cobbler systemctl restart dhcpd
+  when:  ( cobbler_container_status == true ) and ( new_node_status == true )
+

+ 1 - 1
appliance/roles/provision/tasks/mount_iso.yml

@@ -36,7 +36,7 @@
   tags: install
 
 - name: Mount the iso file
-  command: mount -o loop {{ role_path }}/files/{{ iso_name }} /mnt/{{ iso_path }}
+  command: mount -o loop {{ path_for_iso_file }} /mnt/{{ iso_path }}
   changed_when: false
   args:
     warn: no

+ 2 - 0
appliance/roles/provision/tasks/provision_password.yml

@@ -58,11 +58,13 @@
   changed_when: false
   register: prompt_random_phrase
   tags: install
+  no_log: true
 
 - name: Set random phrase
   set_fact:
     random_phrase: "{{ prompt_random_phrase.stdout }}"
   tags: install
+  no_log: true
 
 - name: Login password
   command: openssl passwd -1 -salt {{ random_phrase }} {{ cobbler_password }}

+ 6 - 3
appliance/roles/provision/vars/main.yml

@@ -16,10 +16,13 @@
 # vars file for provision
 
 #Usage: mapping_file.yml
-mapping_file_name: mapping_file.csv
-mapping_file_fail: "Mapping file not found. Copy the mapping_file.csv to omnia/appliance/roles/provision/files"
-fail_hostname_duplicate:  "Duplicate hostname exists. Please verify mapping file again."
+fail_hostname_duplicate:  "Failed: Duplicate hostname exists. Please verify mapping file again."
 remove_header: awk 'NR > 1 { print }' {{ role_path }}/files/new_mapping_file.csv
+fail_ip_duplicate:  "Failed: Duplicate ip exists. Please verify mapping file again."
+fail_mac_duplicate:  "Failed: Duplicate mac exists. Please verify mapping file again."
+header_fail: "Failed: Header (MAC,Hostname,IP) should be present in the mapping file"
+valid_header: MAC,Hostname,IP
+not_comma_seperated: "Failed: Mapping file should be comma seperated." 
 
 #Usage: check_prerequisite.yml
 iso_name: CentOS-7-x86_64-Minimal-2009.iso

+ 2 - 2
appliance/test/appliance_config_empty.yml

@@ -14,12 +14,12 @@
 ---
 
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
-# The Length of the password should be at least 8.
+# The Length of the password should be atleast 8.
 # The password must not contain -,\, ',"
 provision_password: ""
 
 # Password used for the AWX UI.
-# The Length of the password should be at least 8.
+# The Length of the password should be atleast 8.
 # The password must not contain -,\, ',"
 awx_password: ""
 

+ 2 - 2
appliance/test/appliance_config_test.yml

@@ -14,12 +14,12 @@
 ---
 
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
-# The Length of the password should be at least 8.
+# The Length of the password should be atleast 8.
 # The password must not contain -,\, ',"
 provision_password: "omnia@123"
 
 # Password used for the AWX UI.
-# The Length of the password should be at least 8.
+# The Length of the password should be atleast 8.
 # The password must not contain -,\, ',"
 awx_password: "omnia@123"
 

+ 2 - 0
examples/mapping_file.csv

@@ -0,0 +1,2 @@
+MAC,Hostname,IP
+xx:yy:zz:aa:bb,server,1.2.3.4

+ 2 - 1
roles/slurm_exporter/tasks/start_services.yml

@@ -23,4 +23,5 @@
 - name: Start services
   systemd:
     name: prometheus-slurm-exporter
-    state: restarted
+    state: restarted
+    enabled: yes

+ 4 - 2
roles/slurm_start_services/tasks/main.yml

@@ -36,15 +36,17 @@
     - "{{ groups['compute'] }}"
 
 - name: Enable slurmdbd on manager
-  service:
+  systemd:
     name: slurmdbd
     state: restarted
+    enabled: yes
   tags: install
 
 - name: Start slurmctld on manager
-  service:
+  systemd:
     name: slurmctld
     state: restarted
+    enabled: yes
   tags: install
 
 - name: Show cluster if exists

+ 1 - 0
roles/slurm_workers/tasks/main.yml

@@ -147,4 +147,5 @@
   systemd:
     name: slurmd.service
     state: started
+    enabled: yes
   tags: install