Procházet zdrojové kódy

Issue#153: TA - Cobbler Configuration and OS Deployment

Signed-off-by: abhishek-s-a <a_sa@dellteam.com>
Lucas A. Wilson před 4 roky
rodič
revize
c194e58319

+ 3 - 0
appliance/test/cobbler_inventory

@@ -0,0 +1,3 @@
+[cobbler_servers]
+172.17.0.10
+100.98.24.231

+ 1 - 1
appliance/test/test_common.yml

@@ -222,4 +222,4 @@
     - name: Disable selinux
       selinux:
         state: disabled
-      tags: TC_006
+      tags: TC_006

+ 363 - 0
appliance/test/test_provision_cc.yml

@@ -0,0 +1,363 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_DIO_US_CC_TC_010
+# Execute provision role in management station and verify cobbler configuration
+- name: OMNIA_DIO_US_CC_TC_010
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_010
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_010
+
+    - block:
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+          vars:
+            no_prompt: true
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ boundary_password }}"
+      tags: TC_010
+
+    - name: Check the connection to cobbler UI and it returns a status 200
+      uri:
+        url: https://localhost/cobbler_web
+        status_code: 200
+        return_content: yes
+        validate_certs: no
+      tags: TC_010
+
+    - name: Fetch cobbler version in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler version
+      changed_when: false
+      register: cobbler_version
+      tags: TC_010
+
+    - name: Verify cobbler version
+      assert:
+        that:
+          - "'Cobbler' in cobbler_version.stdout"
+          - "'Error' not in cobbler_version.stdout"
+        fail_msg: "{{ cobbler_version_fail_msg }}"
+        success_msg: "{{ cobbler_version_success_msg }}"
+      tags: TC_010
+
+    - name: Run cobbler check command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler check
+      changed_when: false
+      register: cobbler_check
+      tags: TC_010
+
+    - name: Verify cobbler check command output
+      assert:
+        that:
+          - "'The following are potential configuration items that you may want to fix' not in cobbler_check.stdout"
+          - "'Error' not in cobbler_check.stdout"
+        fail_msg: "{{ cobbler_check_fail_msg }}"
+        success_msg: "{{ cobbler_check_success_msg }}"
+      ignore_errors: yes
+      tags: TC_010
+
+    - name: Run cobbler sync command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler sync
+      changed_when: false
+      register: cobbler_sync
+      tags: TC_010
+
+    - name: Verify cobbler sync command output
+      assert:
+        that:
+          - "'TASK COMPLETE' in cobbler_sync.stdout"
+          - "'Fail' not in cobbler_sync.stdout"
+          - "'Error' not in cobbler_sync.stdout"
+        fail_msg: "{{ cobbler_sync_fail_msg }}"
+        success_msg: "{{ cobbler_sync_success_msg }}"
+      tags: TC_010
+
+    - name: Fetch cobbler distro list
+      command: docker exec {{ docker_container_name }} cobbler distro list
+      changed_when: false
+      register: cobbler_distro_list
+      tags: TC_010
+
+    - name: Verify cobbler distro list
+      assert:
+        that:
+          - "'CentOS' in cobbler_distro_list.stdout"
+        fail_msg: "{{ cobbler_distro_list_fail_msg }}"
+        success_msg: "{{ cobbler_distro_list_success_msg }}"
+      tags: TC_010
+
+    - name: Fetch cobbler profile list
+      command: docker exec cobbler cobbler profile list
+      changed_when: false
+      register: cobbler_profile_list
+      tags: TC_010
+
+    - name: Verify cobbler profile list
+      assert:
+        that:
+          - "'CentOS' in cobbler_profile_list.stdout"
+        fail_msg: "{{ cobbler_profile_list_fail_msg }}"
+        success_msg: "{{ cobbler_profile_list_success_msg }}"
+      tags: TC_010
+
+    - name: Check kickstart file
+      shell: |
+        docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
+      changed_when: false
+      register: kickstart_file_status
+      tags: TC_010
+
+    - name: Verify kickstart file present
+      assert:
+        that:
+          - "'File exist' in kickstart_file_status.stdout"
+        fail_msg: "{{ kickstart_file_fail_msg }}"
+        success_msg: "{{ kickstart_file_success_msg }}"
+      tags: TC_010
+
+# Testcase OMNIA_DIO_US_CC_TC_011
+# Execute provision role in management station where already one container present
+- name: OMNIA_DIO_US_CC_TC_011
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_011
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_011
+
+    - name: Create docker image
+      docker_image:
+        name: ubuntu
+        tag: latest
+        source: pull
+      tags: TC_011
+
+    - name: Create docker container
+      command: docker run -dit ubuntu
+      register: create_docker_container
+      changed_when: true
+      args:
+        warn: false
+      tags: TC_011
+
+    - block:
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+          vars:
+            no_prompt: true
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ boundary_password }}"
+      tags: TC_011
+
+    - name: Check the connection to cobbler UI and it returns a status 200
+      uri:
+        url: https://localhost/cobbler_web
+        status_code: 200
+        return_content: yes
+        validate_certs: no
+      tags: TC_011
+
+    - name: Fetch cobbler version in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler version
+      changed_when: false
+      register: cobbler_version
+      tags: TC_011
+
+    - name: Verify cobbler version
+      assert:
+        that:
+          - "'Cobbler' in cobbler_version.stdout"
+          - "'Error' not in cobbler_version.stdout"
+        fail_msg: "{{ cobbler_version_fail_msg }}"
+        success_msg: "{{ cobbler_version_success_msg }}"
+      tags: TC_011
+
+    - name: Run cobbler check command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler check
+      changed_when: false
+      register: cobbler_check
+      tags: TC_011
+
+    - name: Verify cobbler check command output
+      assert:
+        that:
+          - "'The following are potential configuration items that you may want to fix' not in cobbler_check.stdout"
+          - "'Error' not in cobbler_check.stdout"
+        fail_msg: "{{ cobbler_check_fail_msg }}"
+        success_msg: "{{ cobbler_check_success_msg }}"
+      ignore_errors: yes
+      tags: TC_011
+
+    - name: Run cobbler sync command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler sync
+      changed_when: false
+      register: cobbler_sync
+      tags: TC_011
+
+    - name: Verify cobbler sync command output
+      assert:
+        that:
+          - "'TASK COMPLETE' in cobbler_sync.stdout"
+          - "'Fail' not in cobbler_sync.stdout"
+          - "'Error' not in cobbler_sync.stdout"
+        fail_msg: "{{ cobbler_sync_fail_msg }}"
+        success_msg: "{{ cobbler_sync_success_msg }}"
+      tags: TC_011
+
+    - name: Fetch cobbler distro list
+      command: docker exec {{ docker_container_name }} cobbler distro list
+      changed_when: false
+      register: cobbler_distro_list
+      tags: TC_011
+
+    - name: Verify cobbler distro list
+      assert:
+        that:
+          - "'CentOS' in cobbler_distro_list.stdout"
+        fail_msg: "{{ cobbler_distro_list_fail_msg }}"
+        success_msg: "{{ cobbler_distro_list_success_msg }}"
+      tags: TC_011
+
+    - name: Fetch cobbler profile list
+      command: docker exec cobbler cobbler profile list
+      changed_when: false
+      register: cobbler_profile_list
+      tags: TC_011
+
+    - name: Verify cobbler profile list
+      assert:
+        that:
+          - "'CentOS' in cobbler_profile_list.stdout"
+        fail_msg: "{{ cobbler_profile_list_fail_msg }}"
+        success_msg: "{{ cobbler_profile_list_success_msg }}"
+      tags: TC_011
+
+    - name: Check kickstart file
+      shell: |
+        docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
+      changed_when: false
+      register: kickstart_file_status
+      tags: TC_011
+
+    - name: Verify kickstart file present
+      assert:
+        that:
+          - "'File exist' in kickstart_file_status.stdout"
+        fail_msg: "{{ kickstart_file_fail_msg }}"
+        success_msg: "{{ kickstart_file_success_msg }}"
+      tags: TC_011
+
+    - name: Delete the ubuntu container
+      docker_container:
+        name: "{{ create_docker_container.stdout }}"
+        state: absent
+      tags: TC_011
+
+    - name: Delete the ubuntu umage
+      docker_image:
+        name: ubuntu
+        state: absent
+      tags: TC_011
+
+# Testcase OMNIA_DIO_US_CC_TC_012
+# Execute provision role in management station and reboot management station
+- name: OMNIA_DIO_US_CC_TC_012
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Check last uptime of the server
+      shell: |
+        current_time=$(date +"%Y-%m-%d %H")
+        uptime -s | grep "$current_time"
+      register: uptime_status
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_012
+
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      when: uptime_status.stdout|length < 1
+      tags: TC_012
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      when: uptime_status.stdout|length < 1
+      tags: TC_012
+
+    - block:
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+          vars:
+            no_prompt: true
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ boundary_password }}"
+      when: uptime_status.stdout|length < 1
+      tags: TC_012
+
+    - name: Reboot localhost
+      command: reboot
+      when: uptime_status.stdout|length < 1
+      tags: TC_012
+
+    - name: Inspect cobbler container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: cobbler_cnt_status
+      tags: TC_012
+
+    - name: Verify cobbler container is running after reboot
+      assert:
+        that: "'running' in cobbler_cnt_status.container.State.Status"
+        fail_msg: "{{ cobbler_reboot_fail_msg }}"
+        success_msg: "{{ cobbler_reboot_success_msg }}"
+      tags: TC_012

+ 458 - 0
appliance/test/test_provision_cdip.yml

@@ -0,0 +1,458 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_DIO_US_CDIP_TC_001
+# Execute provison role in management station with cobbler as empty
+- name: OMNIA_DIO_US_CDIP_TC_001
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_001
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_001
+
+    - block:
+        - name: Test cobbler password with empty string
+          include_role:
+            name: ../roles/provision
+            tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+          vars:
+            no_prompt: true
+            admin_password: "{{ empty_password }}"
+            admin_password_confirm: "{{ empty_password }}"
+      rescue:
+        - name: Validate failure message
+          assert:
+            that: fail_msg_pwd_format in msg_pwd_format.msg
+            success_msg: "{{ validate_password_success_msg }}"
+            fail_msg: "{{ validate_password_fail_msg }}"
+      tags: TC_001
+
+# Testcase OMNIA_DIO_US_CDIP_TC_002
+# Execute provison role in management station with cobbler password of length 8 characters
+- name: OMNIA_DIO_US_CDIP_TC_002
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_002
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_002
+
+    - block:
+        - name: Test cobbler password with 8 characters
+          include_role:
+            name: ../roles/provision
+            tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+          vars:
+            no_prompt: true
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ boundary_password }}"
+      always:
+        - name: Validate success message
+          assert:
+            that:  success_msg_pwd_format in msg_pwd_format.msg
+            success_msg: "{{ validate_password_success_msg }}"
+            fail_msg: "{{ validate_password_fail_msg }}"
+      tags: TC_002
+
+# Testcase OMNIA_DIO_US_CDIP_TC_003
+# Execute provison role in management station with cobbler password of length greather than 15 characters
+- name: OMNIA_DIO_US_CDIP_TC_003
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_003
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_003
+
+    - block:
+        - name: Test cobbler password with lengthy string
+          include_role:
+             name: ../roles/provision
+             tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+          vars:
+            no_prompt: true
+            admin_password: "{{ lengthy_password }}"
+            admin_password_confirm: "{{ lengthy_password }}"
+      always:
+        - name: Validate success message
+          assert:
+            that:  success_msg_pwd_format in msg_pwd_format.msg
+            success_msg: "{{ validate_password_success_msg }}"
+            fail_msg: "{{ validate_password_fail_msg }}"
+      tags: TC_003
+
+# Testcase OMNIA_DIO_US_CDIP_TC_004
+# Execute provison role in management station with cobbler password contains white spaces
+- name: OMNIA_DIO_US_CDIP_TC_004
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_004
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_004
+
+    - block:
+        - name: Test cobbler password with string contains white space
+          include_role:
+            name: ../roles/provision
+            tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+          vars:
+            no_prompt: true
+            admin_password: "{{ whitespace_password }}"
+            admin_password_confirm: "{{ whitespace_password }}"
+      always:
+        - name: Validate success message
+          assert:
+            that:  success_msg_pwd_format in msg_pwd_format.msg
+            success_msg: "{{ validate_password_success_msg }}"
+            fail_msg: "{{ validate_password_fail_msg }}"
+      tags: TC_004
+
+# Testcase OMNIA_DIO_US_CDIP_TC_005
+# Execute provison role in management station with cobbler password as string with special characters
+- name: OMNIA_DIO_US_CDIP_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_005
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_005
+
+    - block:
+        - name: Test cobbler password with string contains special characters
+          include_role:
+            name: ../roles/provision
+            tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+          vars:
+            no_prompt: true
+            admin_password: "{{ special_character_password }}"
+            admin_password_confirm: "{{ special_character_password }}"
+      always:
+        - name: Validate success message
+          assert:
+            that:  success_msg_pwd_format in msg_pwd_format.msg
+            success_msg: "{{ validate_password_success_msg }}"
+            fail_msg: "{{ validate_password_success_msg }}"
+      tags: TC_005
+
+# Testcase OMNIA_DIO_US_CDIP_TC_006
+# Execute provison role in management station with cobbler password and cobbler password confirm having unequal values
+- name: OMNIA_DIO_US_CDIP_TC_006
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_006
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_006
+
+    - block:
+        - name: Test cobbler password with unequal values
+          include_role:
+            name: ../roles/provision
+            tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+          vars:
+            no_prompt: true
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ lengthy_password }}"
+      rescue:
+        - name: Validate failure message
+          assert:
+            that:  fail_msg_pwd_confirm in msg_pwd_confirm.msg
+            success_msg: "{{ validate_password_success_msg }}"
+            fail_msg: "{{ validate_password_success_msg }}"
+      tags: TC_006
+
+# Testcase OMNIA_DIO_US_CDIP_TC_007
+# Execute provison role in management station where docker service not running
+- name: OMNIA_DIO_US_CDIP_TC_007
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_007
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_007
+
+    - name: Stop docker service
+      service:
+        name: docker
+        state: stopped
+      tags: TC_007
+
+    - block:
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+          vars:
+            no_prompt: true
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ boundary_password }}"
+
+        - name: Docker service stopped usecase fail message
+          fail:
+            msg: "{{ docker_check_fail_msg }}"
+      rescue:
+        - name: Docker service stopped usecase success message
+          debug:
+            msg: "{{ docker_check_success_msg }}"
+      always:
+        - name: Start docker service
+          service:
+            name: docker
+            state: started
+      tags: TC_007
+
+# Testcase OMNIA_DIO_US_CDIP_TC_008
+# Execute provison role in management station with os installed centos 8.2
+- name: OMNIA_DIO_US_CDIP_TC_008
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_008
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_008
+
+    - block:
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+            tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+          vars:
+            no_prompt: true
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ boundary_password }}"
+      tags: TC_008
+
+    - name: Inspect cobbler docker image
+      docker_image_info:
+        name: "{{ docker_image_name }}"
+      register: cobbler_image_status
+      tags: TC_008
+
+    - name: Validate cobbler docker image
+      assert:
+        that:
+          - cobbler_image_status.images
+        fail_msg: "{{ cobbler_img_fail_msg }}"
+        success_msg: "{{ cobbler_img_success_msg }}"
+      tags: TC_008
+
+    - name: Inspect cobbler container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: cobbler_cnt_status
+      tags: TC_008
+
+    - name: Validate cobbler docker container
+      assert:
+        that:
+          - cobbler_cnt_status.exists
+        fail_msg: "{{ cobbler_cnt_fail_msg }}"
+        success_msg: "{{ cobbler_cnt_success_msg }}"
+      tags: TC_008
+
+    - name: Validate first NIC is not assigned to public internet
+      shell: |
+        set -o pipefail
+        ip route get 8.8.8.8 | awk '{print $5}'
+      register: nic_output
+      args:
+        executable: /bin/bash
+      failed_when: first_nic in nic_output.stdout
+      changed_when: false
+      tags: TC_008
+
+    - name: "Validate NIC-1 is assigned to IP {{ nic1_ip_address }}"
+      assert:
+        that: "'{{ nic1_ip_address }}' in ansible_eno1.ipv4.address"
+        fail_msg: "{{ nic_check_fail_msg }}"
+        success_msg: "{{ nic_check_success_msg }}"
+      tags: TC_008
+
+# Testcase OMNIA_DIO_US_CDIP_TC_009
+# Execute provison role in management station where cobbler container and image already created
+- name: OMNIA_DIO_US_CDIP_TC_009
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - block:
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+          vars:
+            no_prompt: true
+            username: "{{ cobbler_username }}"
+            admin_password: "{{ boundary_password }}"
+            admin_password_confirm: "{{ boundary_password }}"
+      tags: TC_009
+
+    - name: Inspect cobbler docker image
+      docker_image_info:
+        name: "{{ docker_image_name }}"
+      register: cobbler_image_status
+      tags: TC_009
+
+    - name: Validate cobbler docker image
+      assert:
+        that:
+          - cobbler_image_status.images
+        fail_msg: "{{ cobbler_img_fail_msg }}"
+        success_msg: "{{ cobbler_img_success_msg }}"
+      tags: TC_009
+
+    - name: Inspect cobbler container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: cobbler_cnt_status
+      tags: TC_009
+
+    - name: Validate cobbler docker container
+      assert:
+        that:
+          - cobbler_cnt_status.exists
+        fail_msg: "{{ cobbler_cnt_fail_msg }}"
+        success_msg: "{{ cobbler_cnt_success_msg }}"
+      tags: TC_009
+
+    - name: Validate first NIC is not assigned to public internet
+      shell: |
+        set -o pipefail
+        ip route get 8.8.8.8 | awk '{print $5}'
+      register: nic_output
+      args:
+        executable: /bin/bash
+      failed_when: first_nic in nic_output.stdout
+      changed_when: false
+      tags: TC_009
+
+    - name: "Validate NIC-1 is assigned to IP {{ nic1_ip_address }}"
+      assert:
+        that: "'{{ nic1_ip_address }}' in ansible_eno1.ipv4.address"
+        fail_msg: "{{ nic_check_fail_msg }}"
+        success_msg: "{{ nic_check_success_msg }}"
+      tags: TC_009

+ 245 - 0
appliance/test/test_provision_ndod.yml

@@ -0,0 +1,245 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_DIO_US_NDOD_TC_013
+# Execute provison role in management station and  PXE boot one compute node 
+- name: OMNIA_DIO_US_NDOD_TC_013
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+  tasks:
+    - name: Set ip address of the compute node
+      set_fact:
+        single_node_ip_address: "{{ groups[cobbler_groupname][0] }}"
+      tags: TC_013
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_013
+
+    - name: Create inventory file
+      lineinfile:
+        path: inventory
+        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ boundary_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+        create: yes
+        mode: '{{ file_permission }}'
+      tags: TC_013
+
+    - meta: refresh_inventory
+      tags: TC_013
+
+    - name: Validate authentication of username and password
+      command: ansible {{ single_node_ip_address }} -m ping -i inventory
+      register: validate_login
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_013
+
+    - name: Validate the authentication output
+      assert:
+        that:
+          - "'pong' in validate_login.stdout"
+          - "'SUCCESS' in validate_login.stdout"
+          - "'UNREACHABLE' not in validate_login.stdout"
+        fail_msg: "{{ authentication_fail_msg }}"
+        success_msg: "{{ authentication_success_msg }}"
+      tags: TC_013
+
+    - name: Check hostname
+      command: ansible {{ single_node_ip_address }} -m shell -a hostname -i inventory
+      register: validate_hostname
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_013
+
+    - name: Validate the hostname
+      assert:
+        that: "'localhost' not in validate_hostname.stdout"
+        fail_msg: "{{ hostname_fail_msg }}"
+        success_msg: "{{ hostname_success_msg }}"
+      tags: TC_013
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_013
+
+# OMNIA_DIO_US_NDOD_TC_014
+# Execute provison role in management station and PXE boot two compute node
+- name: OMNIA_DIO_US_NDOD_TC_014
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_014
+
+    - name: Create inventory file
+      lineinfile:
+        path: inventory
+        line: "[nodes]"
+        create: yes
+        mode: '{{ file_permission }}'
+      tags: TC_014
+
+    - name: Edit inventory file
+      lineinfile:
+        path: inventory
+        line: "{{ item }} ansible_user=root ansible_password={{ boundary_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+      with_items:
+        - "{{ groups[cobbler_groupname] }}"
+      tags: TC_014
+
+    - meta: refresh_inventory
+      tags: TC_014
+
+    - name: Validate ip address is different for both servers
+      assert:
+        that: groups[cobbler_groupname][0] != groups[cobbler_groupname][1]
+        fail_msg: "{{ ip_address_fail_msg }}"
+        success_msg: "{{ ip_address_success_msg }}"
+      delegate_to: localhost
+      run_once: yes
+      tags: TC_014
+
+    - name: Check hostname of both servers
+      command: ansible nodes -m shell -a hostname -i inventory
+      register: node_hostname
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_014
+
+    - name: Validate hostname is different for both servers
+      assert:
+        that:
+          - node_hostname.stdout_lines[1] != node_hostname.stdout_lines[3]
+          - "'localhost' not in node_hostname.stdout_lines[1]"
+          - "'localhost' not in node_hostname.stdout_lines[3]"
+        fail_msg: "{{ hostname_fail_msg }}"
+        success_msg: "{{ hostname_success_msg }}"
+      delegate_to: localhost
+      run_once: yes
+      tags: TC_014
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      delegate_to: localhost
+      run_once: yes
+      tags: TC_014
+
+# OMNIA_DIO_US_NDOD_TC_015
+# Validate passwordless ssh connection established or not with compute nodes
+- name: OMNIA_DIO_US_NDOD_TC_015
+  hosts: localhost
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Validate authentication of username and password
+      command: "ansible {{ cobbler_groupname }} -m ping -i cobbler_inventory"
+      register: validate_login
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_015
+
+    - name: Validate the passwordless SSH connection
+      assert:
+        that:
+          - "'pong' in validate_login.stdout"
+          - "'SUCCESS' in validate_login.stdout"
+          - "'UNREACHABLE' not in validate_login.stdout"
+        success_msg: "{{ authentication_success_msg }}"
+        fail_msg: "{{ authentication_fail_msg }}"
+      tags: TC_015
+
+# OMNIA_DIO_US_NDOD_TC_016
+# Execute provison role in management station and reboot compute node after os provision again
+- name: OMNIA_DIO_US_NDOD_TC_016
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+  tasks:
+    - name: Set ip address of the compute node
+      set_fact:
+        single_node_ip_address: "{{ groups[cobbler_groupname][0] }}"
+      tags: TC_016
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_016
+
+    - name: Create inventory file
+      lineinfile:
+        path: inventory
+        line: "[nodes]"
+        create: yes
+        mode: '{{ file_permission }}'
+      tags: TC_016
+
+    - name: Edit inventory file
+      lineinfile:
+        path: inventory
+        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ boundary_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+      tags: TC_016
+
+    - meta: refresh_inventory
+      tags: TC_016
+
+    - name: Reboot servers
+      command: ansible nodes -m command -a reboot -i inventory
+      ignore_errors: yes
+      changed_when: true
+      tags: TC_016
+
+    - name: Wait for 10 minutes
+      pause:
+        minutes: 10
+      tags: TC_016
+
+    - name: Check ip address of servers
+      command: ansible nodes -m command -a 'ip a' -i inventory
+      ignore_errors: yes
+      changed_when: false
+      register: ip_address_after_reboot
+      tags: TC_016
+
+    - name: Validate ip address is same after reboot
+      assert:
+        that: "'{{ single_node_ip_address }}' in ip_address_after_reboot.stdout"
+        fail_msg: "{{ ip_address_fail_msg }}"
+        success_msg: "{{ ip_address_success_msg }}"
+      tags: TC_016

+ 72 - 0
appliance/test/test_vars/test_provision_vars.yml

@@ -0,0 +1,72 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Usage: test_provision_cdip.yml
+empty_password: ""
+lengthy_password: "a1b2c3d4e5f6g7h8i9j10k11"
+whitespace_password: "hello world 123"
+special_character_password: "hello@123#%"
+first_nic: "eno1"
+nic1_ip_address: 172.17.0.1
+validate_password_success_msg: "Password validation successful"
+validate_password_fail_msg: "Password validation failed"
+cobbler_img_fail_msg: "Docker image cobbler does not exist"
+cobbler_img_success_msg: "Docker image cobbler exists"
+cobbler_cnt_fail_msg: "Docker container cobbler does not exist"
+cobbler_cnt_success_msg: "Docker container cobbler exists"
+nic_check_fail_msg: "NIC-1 ip address validation failed"
+nic_check_success_msg: "NIC-1 ip address validation successful"
+cobbler_image_files:
+ - configure_nic
+ - check_prerequisites
+ - mount_iso
+ - firewall_settings
+ - provision_password
+ - cobbler_image
+
+# Usage: test_provision_cc.yml
+docker_check_success_msg: "Docker service stopped usescase validation successful"
+docker_check_fail_msg: "Docker service stopped usescase validation failed"
+docker_ip_fail_msg: "Docker IP validation failed"
+docker_ip_success_msg: "Docker IP validation successful"
+cobbler_version_fail_msg: "Cobbler version validation failed"
+cobbler_version_success_msg: "Cobbler version validation successful"
+cobbler_check_fail_msg: "Cobbler check validation failed"
+cobbler_check_success_msg: "Cobbler check validation successful"
+cobbler_sync_fail_msg: "Cobbler sync validation failed"
+cobbler_sync_success_msg: "Cobbler sync validation successful"
+cobbler_distro_list_fail_msg: "Cobbler distro list validation failed"
+cobbler_distro_list_success_msg: "Cobbler distro list validation successful"
+cobbler_profile_list_fail_msg: "Cobbler profile list validation failed"
+cobbler_profile_list_success_msg: "Cobbler profile list validation successful"
+kickstart_file_fail_msg: "Kickstart file validation failed"
+kickstart_file_success_msg: "Kickstart file validation successful"
+cobbler_reboot_fail_msg: "Cobbler container failed to start after reboot"
+cobbler_reboot_success_msg: "Cobbler container started successfully after reboot"
+kickstart_filename: "centos8.ks"
+
+# Usage: test_provision_cdip.yml, test_provision_cc.yml, test_provision_ndod.yml
+docker_container_name: "cobbler"
+boundary_password: "testpass"
+
+# Usage: test_provision_ndod.yml
+hostname_fail_msg: "Hostname validation failed"
+hostname_success_msg: "Hostname validation successful"
+authentication_fail_msg: "Server authentication validation failed"
+authentication_success_msg: "Server authentication validation successful"
+ip_address_fail_msg: "IP address validation failed"
+ip_address_success_msg: "IP address validation successful"
+cobbler_groupname: "cobbler_servers"
+file_permission: 0644

+ 28 - 14
slurm/roles/common/handlers/main.yml

@@ -1,26 +1,40 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
 ---
-- name: restart ntpd
+
+- name: Restart ntpd
   service:
-    name=ntpd
-    state=restarted
-    enabled=yes
+    name: ntpd
+    state: restarted
+    enabled: yes
 
-- name: restart chrony
+- name: Restart chrony
   service:
-    name=chronyd
-    state=restarted
-    enabled=yes
+    name: chronyd
+    state: restarted
+    enabled: yes
 
-- name: sync ntp clocks
+- name: Sync tp clocks
   command: ntpdc -np
   register: ntp_clock
   until:  ntp_clock.stdout.find('*') > -1
-  retries: 10
-  delay: 60
+  retries: "{{ retry_count_one }}"
+  delay: "{{ delay_count_one }}"
 
-- name: sync chrony sources
+- name: Sync chrony sources
   command: chronyc sources
   register: chrony_src
   until:  chrony_src.stdout.find('^*') > -1
-  retries: 6
-  delay: 10
+  retries: "{{ retry_count }}"
+  delay: "{{ delay_count }}"

+ 17 - 2
slurm/roles/common/tasks/main.yml

@@ -1,2 +1,17 @@
-- name: deploy time ntp/chrony
-  include_tasks: ntp.yml
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Deploy time ntp/chrony
+  include_tasks: ntp.yml

+ 27 - 13
slurm/roles/common/tasks/ntp.yml

@@ -1,42 +1,56 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
 ---
 
-  - name: deploy ntp servers
+  - name: Deploy ntp servers
     block:
-      - name: deploy ntpd
+      - name: Deploy ntpd
         package:
           name: ntp
           state: present
-      - name: deploy ntpdate
+      - name: Deploy ntpdate
         package:
           name: ntpdate
           state: present
-      - name: update ntp servers
+      - name: Update ntp servers
         template:
           src: ntp.conf.j2
-          dest: /etc/ntp.conf
+          dest: "{{ ntp_path }}"
           owner: root
           group: root
-          mode: u=rw,g=r,o=r
+          mode: "{{ ntp_mode }}"
           backup: yes
         notify:
           - restart ntpd
           - sync ntp clocks
-    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version | int < 8
-  - name:   deploy chrony server
+    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  < os_higher_version
+
+  - name: Deploy chrony server
     block:
-      - name: deploy chrony
+      - name: Deploy chrony
         package:
             name: chrony
             state: present
-      - name: update ntp servers
+      - name: Update ntp servers
         template:
           src: chrony.conf.j2
-          dest: /etc/chrony.conf
+          dest: "{{ chrony_path }}"
           owner: root
           group: root
-          mode: u=rw,g=r,o=r
+          mode: "{{ ntp_mode }}"
           backup: yes
         notify:
           - restart chrony
           - sync chrony sources
-    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version | int > 7
+    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  > os_version

+ 26 - 1
slurm/roles/common/vars/main.yml

@@ -1,6 +1,31 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+chrony_path: "/etc/chrony.conf"
+ntp_path: "/etc/ntp.conf"
+ntp_mode: "0644"
+os_higher_version: "8"
+os_version: "7"
+retry_count_one: "10"
+delay_count_one: "60"
+retry_count: "6"
+delay_count: "10"
+
 ntp_servers: 
   - 0.centos.pool.ntp.org
   - 1.centos.pool.ntp.org
   - 2.centos.pool.ntp.org
 chrony_servers:
-  - 2.centos.pool.ntp.org 
+  - 2.centos.pool.ntp.org

+ 0 - 104
slurm/roles/slurm-common/tasks/main.yml

@@ -1,104 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
----
-
-- name: install packages for slurm
-  package:
-    name:
-      - munge
-      - mariadb
-      - mariadb-devel
-      - python3
-    state: present
-  tags: install
-
-- name: create munge key
-  command: /usr/sbin/create-munge-key -f
-  tags: install
-
-- name: Copy munge key
-  copy:
-    src: munge.key
-    dest: /etc/munge
-    owner: munge
-    group: munge
-    mode: 0400
-  tags: install
-
-- name: Copy example Slurm Configuration - slurm.conf
-  copy:
-    src: slurm.conf
-    dest: /etc/slurm/
-    mode: 0644
-  tags: install
-
-
-- name: create SLURM Group
-  group:
-    name: slurm
-    state: present
-  tags: install
-
-- name: Add the user 'slurm' with uid 6001 and a primary group of 'slurm'
-  user:
-    name: slurm
-    comment: Slurm User Account
-    uid: 6001
-    group: slurm
-  tags: install
-
-- name: create SLURM log directory
-  file:
-    path: /var/log/slurm
-    state: directory
-    owner: slurm
-    group: slurm
-    mode: 0755
-    recurse: yes
-  tags: install
-
-- name: give slurm user permission to spool
-  file:
-    path: /var/spool/slurm
-    owner: slurm
-    group: slurm
-    state: directory
-    mode: 0755
-    recurse: yes
-
-- name: give slurm user permission to slurmctld
-  file:
-    path: /var/run/slurmctld.pid
-    owner: slurm
-    group: slurm
-    mode: 0755
-    state: touch
-
-- name: give slurm user permission to slurmd
-  file:
-    path: /var/run/slurmd.pid
-    owner: slurm
-    group: slurm
-    mode: 0755
-    state: touch
-
-- name: start munge service
-  service:
-    name: munge
-    state: restarted
-    enabled: yes
-  tags: install
-
-
-

+ 0 - 118
slurm/roles/slurm-manager/tasks/main.yml

@@ -1,118 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
----
-- name: create download folder
-  file:
-    path: /root/Downloads
-    state: directory
-    mode: '0755'
-- name: Download Slurm source
-  get_url:
-    url: "{{ slurm_url }}"
-    dest: /root/Downloads/
-    checksum: "{{ slurm_md5 }}"
-    validate_certs: no    
-  tags: install
-
-- name: Build SLURM RPMs
-  command: rpmbuild -ta /root/Downloads/slurm-20.02.0.tar.bz2
-  tags: install
-
-- name: Copy RPMs to NFS share
-  copy:
-    src: "{{ item }}"
-    dest: /home/rpms/
-    mode: '0755'
-  with_fileglob:
-    - /root/rpmbuild/RPMS/x86_64/slurm*20*.rpm
-  tags: install
-
-- name: Install SLURM RPMs on Manager
-  yum:
-    name: "{{ item }}"
-    #name: "{{ query('fileglob', ['/home/rpms/slurm*20*.rpm']) }}" <-- how it should work to avoid loop
-  with_fileglob:
-    - /home/rpms/slurm*20*.rpm
-  tags: install
-
-- name: Firewall Rule slurm allow 6817/tcp
-  command: firewall-cmd  --zone=internal --add-port=6817/tcp --permanent
-  tags: install
-
-- name: Firewall Rule slurm allow 6818/tcp
-  command: firewall-cmd  --zone=internal --add-port=6818/tcp --permanent
-  tags: install
-
-- name: Firewall Rule slurm allow 6819/tcp
-  command: firewall-cmd  --zone=internal --add-port=6819/tcp --permanent
-  tags: install
-
-- name: Firewall Rule slurm allow all incoming traffic on internal network
-  command: firewall-cmd --permanent --zone=internal --add-rich-rule='rule family="ipv4" source address="192.168.1.0/24" accept'
-  tags: install
-
-- name: Firewall Reload
-  command: firewall-cmd  --reload
-  tags: install
-
-
-- name: Start MariaDB
-  service:
-    name: mariadb
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Grant Permissions for SLURM DB
-  command: mysql -u root -e "GRANT ALL ON slurm_acct_db.* TO 'slurm'@'localhost' identified by 'password' with grant option;"
-  tags: install
-
-- name: Create slurmdbd.conf file
-  copy:
-    src: /etc/slurm/slurmdbd.conf.example
-    dest: /etc/slurm/slurmdbd.conf
-    mode: 0600
-  tags: install
-
-- name: Populate Accounting Database
-  command: slurmdbd
-  tags: install
-
-- name: Create Slurm Cluster
-  command: sacctmgr -i add cluster {{ inventory_hostname }}
-  tags: install
-
-- name: Create Default Slurm Group
-  command: sacctmgr -i add account defaultgroup Cluster={{inventory_hostname}} Description="Default Account" Organization="Default Org"
-  tags: install
-
-- name: Add root to the Default Account
-  command: sacctmgr -i add user root DefaultAccount=defaultgroup
-  tags: install
-
-- name: Start slurmctld on Manager
-  service:
-    name: slurmctld
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Enable Slurmdbd on Manager
-  service:
-    name: slurmdbd
-    state: restarted
-    enabled: yes
-  tags: install
-

slurm/roles/slurm-common/files/munge.key → slurm/roles/slurm_common/files/munge.key


+ 14 - 14
slurm/roles/slurm-common/files/slurm.conf

@@ -8,25 +8,25 @@
 #
 # See the slurm.conf man page for more information.
 #
-ClusterName=friday
-ControlMachine=friday
-ControlAddr=10.0.0.1
+ClusterName=
+ControlMachine=
+#ControlAddr=
 #BackupController=
 #BackupAddr=
 #
-SlurmUser=slurm
+SlurmUser=
 #SlurmdUser=root
-SlurmctldPort=6817
-SlurmdPort=6818
+SlurmctldPort=
+SlurmdPort=
 AuthType=auth/munge
 #JobCredentialPrivateKey=
 #JobCredentialPublicCertificate=
-StateSaveLocation=/var/spool/slurm/ctld
-SlurmdSpoolDir=/var/spool/slurm/
+#StateSaveLocation=/var/spool/
+SlurmdSpoolDir=
 SwitchType=switch/none
 MpiDefault=none
-SlurmctldPidFile=/var/run/slurmctld.pid
-SlurmdPidFile=/var/run/slurmd.pid
+SlurmctldPidFile=
+SlurmdPidFile=
 ProctrackType=proctrack/pgid
 #PluginDir=
 #FirstJobId=
@@ -72,9 +72,9 @@ PriorityMaxAge=14-0
 #
 # LOGGING
 SlurmctldDebug=3
-SlurmctldLogFile=/var/log/slurm/slurmctld.log
+SlurmctldLogFile=
 SlurmdDebug=1
-SlurmdLogFile=/var/log/slurm/slurmd.log
+SlurmdLogFile=
 JobCompType=jobcomp/none
 #JobCompLoc=
 #
@@ -91,7 +91,7 @@ AccountingStorageType=accounting_storage/slurmdbd
 # COMPUTE NODES
 #NodeName=linux[1-32] Procs=1 State=UNKNOWN
 #NodeName=DEFAULT Sockets=2 CoresPerSocket=20 State=UNKNOWN
-NodeName=compute000 Sockets=2 CoresPerSocket=8
-NodeName=compute[002-005] CoresPerSocket=20
+NodeName= Sockets= CoresPerSocket=
+#NodeName=compute[002-005] CoresPerSocket=20
 PartitionName=normal Nodes=ALL Default=YES MaxTime=INFINITE State=UP
 #PartitionName=debug Nodes=ALL Default=YES MaxTime=INFINITE State=UP

+ 164 - 0
slurm/roles/slurm_common/tasks/main.yml

@@ -0,0 +1,164 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Install epel repository
+  package:
+    name: epel-release
+    state: present
+  tags: install
+
+- name: Munge installation
+  package:
+    name: munge-devel
+    enablerepo: PowerTools
+    state: present
+
+- name: Install packages for slurm
+  package:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - "{{ common_packages }}"
+  tags: install
+
+- name: Create munge key
+  command: "{{ munge_cmd }}"
+  changed_when: true
+
+- name: Copy munge key
+  copy:
+    src: munge.key
+    dest: "{{ munge_dest }}"
+    owner: munge
+    group: munge
+    mode: "{{ munge_mode }}"
+  tags: install
+
+- name: Slurm configuration - slurm.conf
+  copy:
+    src: slurm.conf
+    dest: "{{ slurm_dest }}"
+    mode: "{{ slurm_mode }}"
+  tags: install
+
+- name: Add cluster name
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "ClusterName="
+    line: "ClusterName={{ cluster_name }}"
+
+- name: Add slurm user name
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmUser="
+    line: "SlurmUser={{ slurm_user }}"
+
+- name: Add slurmctld port no
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmctldPort="
+    line: "SlurmctldPort={{ slurmctld_port }}"
+
+- name: Add slurmd port no
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmdPort="
+    line: "SlurmdPort={{ slurmd_port }}"
+
+- name: Add spool path
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmdSpoolDir="
+    line: "SlurmdSpoolDir={{ spool_pth }}"
+
+- name: Add slurmctld pid file path
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmctldPidFile="
+    line: "SlurmctldPidFile={{ slurmctld_pid }}"
+
+- name: Add slurmd pid file path
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmdPidFile="
+    line: "SlurmdPidFile={{ slurmd_pid }}"
+
+- name: Add slurmctld log file path
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmctldLogFile="
+    line: "SlurmctldLogFile={{ slurmctld_log }}"
+
+- name: Add slurmd log file path
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "SlurmdLogFile="
+    line: "SlurmdLogFile={{ slurmd_log }}"
+
+- name: Create slurm group
+  group:
+    name: slurm
+    state: present
+  tags: install
+
+- name: Add the user 'slurm' with uid 6001 and a primary group of 'slurm'
+  user:
+    name: slurm
+    comment: Slurm User Account
+    uid: "{{ slurm_uid }}"
+    group: slurm
+  tags: install
+
+- name: Create slurm log directory
+  file:
+    path: "{{ slurm_logpth }}"
+    state: directory
+    owner: slurm
+    group: slurm
+    mode: "{{ gen_mode }}"
+    recurse: yes
+  tags: install
+
+- name: Give slurm user permission to spool
+  file:
+    path: "{{ spool_pth }}"
+    owner: slurm
+    group: slurm
+    state: directory
+    mode: "{{ gen_mode }}"
+    recurse: yes
+
+- name: Give slurm user permission to slurmctld
+  file:
+    path: "{{ slurmctld_pid }}"
+    owner: slurm
+    group: slurm
+    mode: "{{ gen_mode }}"
+    state: touch
+
+- name: Give slurm user permission to slurmd
+  file:
+    path: "{{ slurmd_pid }}"
+    owner: slurm
+    group: slurm
+    mode: "{{ gen_mode }}"
+    state: touch
+
+- name: Start munge service
+  service:
+    name: munge
+    state: restarted
+    enabled: yes
+  tags: install

+ 42 - 0
slurm/roles/slurm_common/vars/main.yml

@@ -0,0 +1,42 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+epel_url: https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
+
+common_packages:
+   - munge
+   - munge-libs
+   - mariadb-server
+   - mariadb-devel
+   - python3
+
+munge_dest: "/etc/munge/"
+munge_cmd: "/usr/sbin/create-munge-key -f"
+munge_mode: "0400"
+slurm_mode: "0644"
+slurm_dest: "/etc/slurm/"
+slurm_confpth: "/etc/slurm/slurm.conf"
+slurm_user: "slurm"
+slurmctld_port: "6817"
+slurmd_port: "6818"
+slurm_uid: "6001"
+slurm_logpth: "/var/log/slurm/"
+gen_mode: "0755"
+spool_pth: "/var/spool/slurm/"
+slurmctld_pid: "/var/run/slurmctld.pid"
+slurmd_pid: "/var/run/slurmd.pid"
+cluster_name : "manager,compute"
+slurmctld_log: "/var/log/slurm/slurmctld.log"
+slurmd_log: "/var/log/slurm/slurmd.log"

+ 38 - 0
slurm/roles/slurm_manager/files/slurmdbd.conf

@@ -0,0 +1,38 @@
+#
+# Example slurmdbd.conf file.
+#
+# See the slurmdbd.conf man page for more information.
+#
+# Archive info
+#ArchiveJobs=yes
+#ArchiveDir="/tmp"
+#ArchiveSteps=yes
+#ArchiveScript=
+#JobPurge=12
+#StepPurge=1
+#
+# Authentication info
+AuthType=auth/munge
+#AuthInfo=/var/run/munge/munge.socket.2
+#
+# slurmDBD info
+DbdAddr=
+DbdHost=
+#DbdPort=7031
+SlurmUser=
+#MessageTimeout=300
+DebugLevel=verbose
+#DefaultQOS=normal,standby
+LogFile=
+PidFile=
+#PluginDir=/usr/lib/slurm
+#PrivateData=accounts,users,usage,jobs
+#TrackWCKey=yes
+#
+# Database info
+StorageType=accounting_storage/mysql
+#StorageHost=
+#StoragePort=
+#StoragePass=
+#StorageUser=
+#StorageLoc=

+ 174 - 0
slurm/roles/slurm_manager/tasks/main.yml

@@ -0,0 +1,174 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Install packages for slurm
+  package:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - "{{ slurm_packages }}"
+  tags: install
+
+- name: Install development tools
+  package:
+    name: "{{ item }}"
+    enablerepo: PowerTools
+    state: present
+  with_items:
+    - "{{ dev_tools }}"
+  tags: install
+
+- name: Create temporary download folder for slurm
+  file:
+    path: "{{ tmp_path }}"
+    owner: slurm
+    group: slurm
+    mode: "{{ tmp_mode }}"
+    state: directory
+
+- name: Download slurm source
+  get_url:
+    url: "{{ slurm_url }}"
+    dest: "{{ tmp_path }}"
+    checksum: "{{ slurm_md5 }}"
+    validate_certs: no
+  tags: install
+
+- name: Build slurm rpms
+  command: rpmbuild -ta "{{ rpmbuild_path }}"
+  changed_when: false
+  args:
+    warn: no
+
+- name: Verify package md5
+  command: rpm -qa
+  ignore_errors: true
+  register: verify_result
+  changed_when: no
+  failed_when: no
+  args:
+    warn: no
+
+- name: Install rpms
+  command: rpm -Uvh ~"{{ rpm_loop }}"
+  args:
+    chdir: "{{ rpm_path }}"
+    warn: no
+  when: verify_result.rc != 0
+
+- name: Add control machine name
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "ControlMachine="
+    line: "ControlMachine={{ group_names[0] }}"
+
+- name: Add slurm user name
+  lineinfile:
+    path: "{{ slurmdbd_path }}"
+    regexp: "SlurmUser="
+    line: "SlurmUser={{ slurm_user }}"
+
+- name: Firewall rule for slurm - tcp/ip,udp
+  firewalld:
+    zone: internal
+    port: "{{ item }}"
+    permanent: true
+    state: enabled
+  with_items:
+    - "{{ tcp_port1 }}"
+    - "{{ tcp_port2 }}"
+    - "{{ tcp_port3 }}"
+    - "{{ tcp_port4 }}"
+    - "{{ udp_port1 }}"
+    - "{{ udp_port2 }}"
+  tags: install
+
+- name: Get network address/subnet mask through ipaddr
+  set_fact:
+    network_address: "{{ (ansible_default_ipv4.network + '/' + ansible_default_ipv4.netmask) | ipaddr('network/prefix') }}"
+
+- name: Firewall rule slurm - allow all incoming traffic on internal network
+  firewalld:
+    zone: internal
+    rich_rule: 'rule family="{{ family }}" source address="{{ network_address }}" accept'
+    permanent: true
+    state: enabled
+  tags: install
+
+- name: Firewall reload
+  systemd:
+    name: firewalld
+    state: reloaded
+  tags: install
+
+- name: Start mariadb
+  service:
+    name: mariadb
+    state: restarted
+    enabled: yes
+  tags: install
+
+- name: Grant permissions for slurm db
+  command: mysql -u root -e "GRANT ALL ON slurm_acct_db.* TO 'slurm'@'localhost' identified by 'password' with grant option;"
+  tags: install
+  changed_when: true
+
+- name: Create slurmdbd.conf file
+  copy:
+    src: slurmdbd.conf
+    dest: "{{ slurmdbd_path }}"
+    mode: "{{ slurmdbd_mode }}"
+  tags: install
+
+- name: Add slurm user name
+  lineinfile:
+    path: "{{ slurmdbd_path }}"
+    regexp: "SlurmUser="
+    line: "SlurmUser={{ slurm_user }}"
+
+- name: Add db address
+  lineinfile:
+    path: "{{ slurmdbd_path }}"
+    regexp: "DbdAddr="
+    line: "DbdAddr={{ DbdAddr }}"
+
+- name: Add db host
+  lineinfile:
+    path: "{{ slurmdbd_path }}"
+    regexp: "DbdHost="
+    line: "DbdHost={{ DbdHost }}"
+
+- name: Add log file path
+  lineinfile:
+    path: "{{ slurmdbd_path }}"
+    regexp: "LogFile="
+    line: "LogFile={{ logfile }}"
+
+- name: Add pid file path
+  lineinfile:
+    path: "{{ slurmdbd_path }}"
+    regexp: "PidFile="
+    line: "PidFile={{ pidfile }}"
+
+- name: Populate accounting database
+  command: slurmdbd
+  tags: install
+  changed_when: true
+
+- name: Save slurm conf file in buffer
+  fetch:
+    src: "{{ slurm_confpth }}"
+    dest: "{{ buffer_path }}"
+    flat: true

+ 62 - 0
slurm/roles/slurm_manager/vars/main.yml

@@ -0,0 +1,62 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+slurm_packages:
+   - python3
+   - gcc
+   - openssl
+   - openssl-devel
+   - numactl
+   - numactl-devel
+   - hwloc
+   - lua
+   - readline
+   - readline-devel
+   - pam-devel
+   - perl-ExtUtils-MakeMaker
+   - cpanm*
+   - rpm-build
+
+dev_tools:
+   - rrdtool-devel
+   - lua-devel
+   - hwloc-devel
+
+tmp_path: "/root/slurm-tmp"
+tmp_mode: "0755"
+slurm_url: https://download.schedmd.com/slurm/slurm-20.02.3.tar.bz2
+slurm_md5: "md5:c71a300d6c5d33ef8ca60e52a203bb1e"
+rpmbuild_path: "/root/slurm-tmp/slurm-20.02.3.tar.bz2"
+rpm_loop: "/rpmbuild/RPMS/x86_64/*.rpm"
+tcp_port1: "6817/tcp"
+tcp_port2: "6818/tcp"
+tcp_port3: "6819/tcp"
+tcp_port4: "7321/tcp"
+udp_port1: "6817/udp"
+udp_port2: "7321/udp"
+family: "ipv4"
+db_user: "slurm"
+db_host: "localhost"
+slurmdbd_path: "/etc/slurm/slurmdbd.conf"
+slurmdbd_mode: "0600"
+slurm_confpth: "/etc/slurm/slurm.conf"
+slurm_user: "slurm"
+DbdAddr: "localhost"
+DbdHost: "localhost"
+logfile: "/var/log/slurm/slurmdbd.log"
+pidfile: "/var/run/slurm/slurmdbd.pid"
+buffer_path: "/tmp/slurm.conf"
+rpm_path: "/root/rpmbuild/RPMS/x86_64/"
+slurm_mode: "0644"

+ 64 - 0
slurm/roles/slurm_start_services/tasks/main.yml

@@ -0,0 +1,64 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Include common variables
+  include_vars: ../../slurm_manager/vars/main.yml
+
+- name: Copy slurm conf from buffer
+  copy:
+    src: "{{ buffer_path }}"
+    dest: "{{ slurm_confpth }}"
+    mode: "{{ slurm_mode }}"
+
+- name: Start slurmctld on manager
+  service:
+    name: slurmctld
+    enabled: yes
+  tags: install
+
+- name: Enable slurmdbd on manager
+  service:
+    name: slurmdbd
+    enabled: yes
+  tags: install
+
+- name: Show cluster if exists
+  command: sacctmgr -n show cluster {{ inventory_hostname }}
+  register: slurm_clusterlist
+  changed_when: false
+
+- name: Create slurm cluster
+  command: sacctmgr -i add cluster {{ inventory_hostname }}
+  when: slurm_clusterlist.stdout.find(inventory_hostname) == 1
+
+- name: Show account
+  command: sacctmgr show account
+  register: account_added
+  changed_when: false
+
+- name: Create default slurm group
+  command: sacctmgr -i add account defaultgroup Cluster={{ inventory_hostname }} Description="Default Account" Organization="Default Org"
+  when: account_added.stdout.find(inventory_hostname) == 1
+  tags: install
+
+- name: Check if user exists
+  command: sacctmgr show user
+  register: user_added
+  changed_when: false
+
+- name: Add root to the default account
+  command: sacctmgr -i add user root DefaultAccount=defaultgroup
+  when: account_added.stdout.find(inventory_hostname) == 1
+  tags: install

+ 0 - 22
slurm/roles/start-slurm-workers/tasks/main.yml

@@ -1,22 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
----
-- name: Install SLURM RPMs on compute
-  yum:
-    name: "{{ item }}"
-    #name: "{{ query('fileglob', ['/home/rpms/slurm*20*.rpm']) }}" <-- how it should work to avoid loop
-  with_fileglob:
-    - /home/rpms/slurm*20*.rpm
-  tags: install
-

+ 97 - 0
slurm/roles/start_slurm_workers/tasks/main.yml

@@ -0,0 +1,97 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Include common variables
+  include_vars: ../../slurm_manager/vars/main.yml
+
+- name: Copy slurm conf from buffer
+  copy:
+    src: "{{ buffer_path }}"
+    dest: "{{ slurm_confpth }}"
+    mode: "{{ slurm_mode }}"
+
+- name: Install packages for slurm
+  package:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - "{{ slurm_packages }}"
+  tags: install
+
+- name: Install development tools
+  package:
+    name: "{{ item }}"
+    enablerepo: PowerTools
+    state: present
+  with_items:
+    - "{{ dev_tools }}"
+  tags: install
+
+- name: Create temporary download folder for slurm
+  file:
+    path: "{{ tmp_path }}"
+    owner: slurm
+    group: slurm
+    mode: "{{ tmp_mode }}"
+    state: directory
+
+- name: Download slurm source
+  get_url:
+    url: "{{ slurm_url }}"
+    dest: "{{ tmp_path }}"
+    checksum: "{{ slurm_md5 }}"
+    validate_certs: no
+  tags: install
+
+- name: Build slurm rpms
+  command: rpmbuild -ta "{{ rpmbuild_path }}"
+  changed_when: false
+  args:
+    warn: no
+
+- name: Verify package md5
+  command: rpm -qa
+  ignore_errors: true
+  register: verify_result
+  changed_when: no
+  failed_when: no
+  args:
+    warn: no
+
+- name: Install rpms
+  command: rpm -Uvh ~"{{ rpm_loop }}"
+  args:
+    chdir: "{{ rpm_path }}"
+    warn: no
+  when: verify_result.rc != 0
+
+- name: Add socket and core info
+  lineinfile:
+    path: "{{ slurm_confpth }}"
+    regexp: "NodeName= Sockets= CoresPerSocket="
+    line: "NodeName={{ group_names[0] }} Sockets={{ hostvars[inventory_hostname]['ansible_facts']['processor_count'] }}
+      CoresPerSocket={{ hostvars[inventory_hostname]['ansible_facts']['processor_cores'] }}"
+
+- name: Save slurm conf in buffer
+  fetch:
+    src: "{{ slurm_confpth }}"
+    dest: "{{ buffer_path }}"
+    flat: true
+
+- name: Start slurmd on compute nodes
+  service:
+    name: slurmd.service
+    enabled: yes
+  tags: install

+ 16 - 9
slurm/slurm.yml

@@ -12,26 +12,33 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
+
 #Playbook for installing Slurm on a cluster
 
 #collect info from everything
 - hosts: all
 
-# Apply Common Installation and Config
-- hosts: cluster
+- name: Apply common installation and config
+  hosts: manager,compute
   gather_facts: false
   roles:
+    - slurm_common
     - common
-    - slurm-common
 
-# Apply Manager Config, start services
-- hosts: manager
+- name: Apply manager config
+  hosts: manager
   gather_facts: false
   roles:
-    - slurm-manager
+    - slurm_manager
+
+- name: Start slurm workers
+  hosts: compute
+  gather_facts: true
+  roles:
+    - start_slurm_workers
 
-# Start SLURM workers
-- hosts: compute
+- name: Start services
+  hosts: manager
   gather_facts: false
   roles:
-    - start-slurm-workers
+    - slurm_start_services