Jelajahi Sumber

Issue #874 Updating Docs

Signed-off-by: cgoveas <cassandra.goveas@dell.com>
cgoveas 3 tahun lalu
induk
melakukan
6ca65745c4
31 mengubah file dengan 507 tambahan dan 171 penghapusan
  1. 2 2
      control_plane/input_params/idrac_vars.yml
  2. 8 7
      control_plane/input_params/security_vars.yml
  3. 11 11
      control_plane/roles/control_plane_common/tasks/verify_omnia_params.yml
  4. 2 4
      control_plane/roles/control_plane_common/vars/main.yml
  5. 4 4
      control_plane/roles/control_plane_k8s/tasks/k8s_init.yml
  6. 1 1
      control_plane/roles/control_plane_k8s/tasks/k8s_services.yml
  7. 6 5
      control_plane/roles/control_plane_security/tasks/install_389ds.yml
  8. 1 1
      docs/INSTALL_OMNIA.md
  9. 37 32
      docs/INSTALL_OMNIA_CONTROL_PLANE.md
  10. 6 6
      docs/README.md
  11. 14 2
      docs/Security/Enable_Security_LoginNode.md
  12. 4 1
      docs/control_plane/device_templates/PROVISION_SERVERS.md
  13. 4 4
      omnia.yml
  14. 4 3
      omnia_config.yml
  15. 7 6
      omnia_security_config.yml
  16. 13 11
      roles/cluster_validation/tasks/fetch_omnia_inputs.yml
  17. 6 8
      roles/cluster_validation/vars/main.yml
  18. 3 3
      roles/common/tasks/main.yml
  19. 10 40
      roles/hostname_validation/tasks/main.yml
  20. 54 0
      roles/hostname_validation/tasks/validate_hostname.yml
  21. 1 0
      roles/hostname_validation/vars/main.yml
  22. 2 1
      roles/login_common/tasks/main.yml
  23. 7 0
      roles/login_node/files/temp_dsrc
  24. 23 0
      roles/login_node/files/temp_krb5.conf
  25. 11 0
      roles/login_node/files/temp_ldap1.inf
  26. 222 0
      roles/login_node/tasks/install_389ds.yml
  27. 1 4
      roles/login_node/tasks/install_ipa_client.yml
  28. 9 0
      roles/login_node/tasks/main.yml
  29. 23 0
      roles/login_node/vars/main.yml
  30. 3 3
      roles/login_server/tasks/install_ipa_server.yml
  31. 8 12
      roles/login_server/tasks/main.yml

+ 2 - 2
control_plane/input_params/idrac_vars.yml

@@ -22,8 +22,8 @@ idrac_system_profile: "Performance"
 
 # Boolean value indicating whether OMNIA should perform firmware update or not
 # Accepted values: true, false
-# Default value: true
-firmware_update_required: true
+# Default value: false
+firmware_update_required: false
 
 # This is the list of poweredge server models
 # The firmware updates will be downloaded only for the below list of models

+ 8 - 7
control_plane/input_params/security_vars.yml

@@ -25,17 +25,17 @@ realm_name: "OMNIA.TEST"
 
 # Maximum number of consecutive failures before lockout
 # The default value of this variable can't be changed
-# Default: 3
+# Default value: 3
 max_failures: 3
 
 # Period (in seconds) after which the number of failed login attempts is reset
-# Default: 60
+# Default value: 60
 # Min: 30
 # Max: 60
 failure_reset_interval: 60
 
 # Period (in seconds) for which users are locked out 
-# Default: 10
+# Default value: 10
 # Min: 5
 # Max: 10
 lockout_duration: 10
@@ -48,7 +48,7 @@ session_timeout: 180
 
 # Email address used for sending alerts in case of authentication failure
 # If this variable is left blank, authentication failure alerts will be disabled.
-# Required value
+# Currently, only one email ID is accepted in this field
 alert_email_address: ""
 
 # This variable mentions the users to whom the access will be provided
@@ -57,12 +57,13 @@ alert_email_address: ""
 user: ''
 
 # This variable provides the type of access
-# Accepted values 'Allow' or 'Deny' by default 'Allow'
-allow_deny: 'Allow'
+# Accepted values: "Allow" or "Deny"
+# Default value: "Allow"
+allow_deny: "Allow"
 
 # This variable is used to disable services.
 # Accepted values: "true" or "false". 
-# Default values are: true  
+# Default values: false  
 # Root access is needed.
 restrict_program_support: false
 

+ 11 - 11
control_plane/roles/control_plane_common/tasks/verify_omnia_params.yml

@@ -74,7 +74,7 @@
     - ( domain_name | length < 1 or
       realm_name | length < 1 or
       directory_manager_password | length < 1 or
-      ipa_admin_password | length < 1 ) and
+      kerberos_admin_password | length < 1 ) and
       ( login_node_required and
       host_mapping_file  and
       not enable_security_support)
@@ -174,17 +174,17 @@
     - login_node_required
     - not enable_security_support
 
-- name: Assert ipa_admin_password
+- name: Assert kerberos_admin_password
   assert:
     that:
-      - ipa_admin_password | length > min_length | int - 1
-      - ipa_admin_password | length < max_length | int + 1
-      - '"-" not in ipa_admin_password '
-      - '"\\" not in ipa_admin_password '
-      - '"\"" not in ipa_admin_password '
-      - " \"'\" not in ipa_admin_password "
-    success_msg: "{{ success_msg_ipa_admin_password }}"
-    fail_msg: "{{ fail_msg_ipa_admin_password }}"
+      - kerberos_admin_password | length > min_length | int - 1
+      - kerberos_admin_password | length < max_length | int + 1
+      - '"-" not in kerberos_admin_password '
+      - '"\\" not in kerberos_admin_password '
+      - '"\"" not in kerberos_admin_password '
+      - " \"'\" not in kerberos_admin_password "
+    success_msg: "{{ success_msg_kerberos_admin_password }}"
+    fail_msg: "{{ fail_msg_kerberos_admin_password }}"
   tags: [ validate, templates ]
   when:
     - host_mapping_file
@@ -202,4 +202,4 @@
   file:
     path: "{{ role_path }}/../../../{{ config_filename }}"
     mode: "{{ vault_file_perm }}"
-  tags: init
+  tags: init

+ 2 - 4
control_plane/roles/control_plane_common/vars/main.yml

@@ -110,8 +110,8 @@ realm_name_success_msg: "realm_name successfully validated"
 realm_name_fail_msg: "Failed. Incorrect realm_name formate in omnia_config.yml"
 success_msg_directory_manager_password: "directory_manager_password successfully validated"
 fail_msg_directory_manager_password: "Failed. Incorrect format provided for directory_manager_password"
-success_msg_ipa_admin_password: "ipa_admin_password successfully validated"
-fail_msg_ipa_admin_password: "Failed. Incorrect format provided for ipa_admin_password"
+success_msg_kerberos_admin_password: "kerberos_admin_password successfully validated"
+fail_msg_kerberos_admin_password: "Failed. Incorrect format provided for kerberos_admin_password"
 omnia_input_config_failure_msg: "Failed. Please provide all the required parameters in omnia_config.yml for for login_node"
 login_node_required_success_msg: "login_node_required successfully validated"
 login_node_required_fail_msg: "Failed. login_node_required should be either true or false"
@@ -277,8 +277,6 @@ alert_email_fail_msg: "Failed. Incorrect alert_email_address value in security_v
 alert_email_warning_msg: "[WARNING] alert_email_address is empty. Authentication failure alerts won't be configured."
 email_max_length: 320
 email_search_key: "@"
-login_node_security_success_msg: "Login node security successfully validated"
-login_node_security_fail_msg: "Failed. Incorrect Login node security format in security_vars.yml"
 user_success_msg: "user successfully validated"
 user_fail_msg: "Failed. Incorrect user format in security_vars.yml"
 allow_deny_success_msg: "Access successfully validated"

+ 4 - 4
control_plane/roles/control_plane_k8s/tasks/k8s_init.yml

@@ -52,7 +52,7 @@
   block:
     - name: Initialize kubeadm (This process may take 5-10min)
       command: "kubeadm init --pod-network-cidr='{{ appliance_k8s_pod_net_cidr }}' \
-        --apiserver-advertise-address='{{ ansible_default_ipv4.address }}'"
+        --apiserver-advertise-address='{{ public_ip }}'"
       changed_when: true
       register: init_output
   rescue:
@@ -62,7 +62,7 @@
 
     - name: Initialize kubeadm (This process may take 5-10min)
       command: "kubeadm init --pod-network-cidr='{{ appliance_k8s_pod_net_cidr }}' \
-          --apiserver-advertise-address='{{ ansible_default_ipv4.address }}'"
+          --apiserver-advertise-address='{{ public_ip }}'"
       changed_when: true
       register: init_output
 
@@ -113,7 +113,7 @@
     name:   "K8S_TOKEN_HOLDER"
     token:  "{{ k8s_token.stdout }}"
     hash:   "{{ k8s_manager_ca_hash.stdout }}"
-    ip:     "{{ ansible_default_ipv4.address }}"
+    ip:     "{{ public_ip }}"
 
 - name: Create yaml repo for setup
   file:
@@ -135,4 +135,4 @@
 - name: Edge / Workstation Install allows pods to schedule on manager
   command: kubectl taint nodes --all node-role.kubernetes.io/master-
   changed_when: true
-  failed_when: false
+  failed_when: false

+ 1 - 1
control_plane/roles/control_plane_k8s/tasks/k8s_services.yml

@@ -108,7 +108,7 @@
   changed_when: true
 
 - name: Start NFS Client Provisioner
-  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ ansible_default_ipv4.address }}' --set nfs.path='{{ nfs_path }}' --generate-name"
+  command: "helm install nfs-control-plane stable/nfs-client-provisioner --set nfs.server='{{ public_ip }}' --set nfs.path='{{ nfs_path }}'"
   changed_when: true
   when: "'nfs-client-provisioner' not in k8s_pods.stdout"
 

+ 6 - 5
control_plane/roles/control_plane_security/tasks/install_389ds.yml

@@ -69,7 +69,8 @@
       lineinfile:
         path: "{{ ldap1_config_path }}"
         regexp: "^root_password = password"
-        line: "root_password = {{ ms_directory_manager_password }}"  
+        line: "root_password = {{ ms_directory_manager_password }}"
+      no_log: true
 
     - name: Check ldap instance is running or not
       command: dsctl {{ ldap_instance }} status
@@ -112,10 +113,9 @@
         state: enabled
       with_items: "{{ ldap_services }}"
 
-    - name: Reload firewalld service
-      systemd:
-        name: firewalld
-        state: reloaded
+    - name: Reload firewalld
+      command: firewall-cmd --reload
+      changed_when: true
 
     - name: Install kerberos packages
       zypper:
@@ -194,5 +194,6 @@
     
     - name: Configure password policy in 389-ds
       command: dsconf -w {{ ms_directory_manager_password }} -D "cn=Directory Manager" ldap://{{ server_hostname_ms }} pwpolicy set --pwdlockoutduration {{ lockout_duration }} --pwdmaxfailures {{ max_failures }} --pwdresetfailcount {{ failure_reset_interval }}
+      no_log: true
       changed_when: true
   when: not ds389_status

+ 1 - 1
docs/INSTALL_OMNIA.md

@@ -90,7 +90,7 @@ __Note:__ After the Omnia repository is cloned, a folder named __omnia__ is crea
 | domain_name                | omnia.test    | Sets the intended domain name                                                                                                                                                                                                                        |
 | realm_name                 | OMNIA.TEST    | Sets the intended realm name                                                                                                                                                                                                                         |
 | directory_manager_password |               | Password authenticating admin level access to the Directory for system   management tasks. It will be added to the instance of directory server   created for IPA. <br> Required Length: 8 characters. <br> The   password must not contain -,\, '," |
-| ipa_admin_password         |               | IPA server admin password                                                                                                                                                                                                                            |
+| kerberos_admin_password         |               | "admin" user password for the IPA server on RockyOS. If LeapOS is in use, it is used as the "kerberos admin" user password for 389-ds <br> This field is not relevant to Management Stations running `LeapOS`                                                                                                                                                                                                                            |
 | enable_secure_login_node   |  **false**, true             | Boolean value deciding whether security features are enabled on the Login Node. For more information, see [here](docs/Security/Enable_Security_LoginNode.md).                                                                                                                                                                                                                           |
 	
 	

File diff ditekan karena terlalu besar
+ 37 - 32
docs/INSTALL_OMNIA_CONTROL_PLANE.md


+ 6 - 6
docs/README.md

@@ -51,8 +51,8 @@ The following table lists the software and operating system requirements on the
 
 Requirements  |   Version
 ----------------------------------  |   -------
-OS pre-installed on the management station  |  Rocky 8.5/ Leap 15.3
-OS deployed by Omnia on bare-metal Dell EMC PowerEdge Servers | Rocky 8.5 Minimal Edition/ Leap 15.3
+OS pre-installed on the management station  |  Rocky 8.x/ Leap 15.x
+OS deployed by Omnia on bare-metal Dell EMC PowerEdge Servers | Rocky 8.x Minimal Edition/ Leap 15.x
 Cobbler  |  3.2.2
 Ansible AWX  |  19.4.0
 Slurm Workload Manager  |  20.11.2
@@ -80,10 +80,10 @@ The following table lists the software and its compatible version managed by Omn
 
 Software	|	License	|	Compatible Version	|	Description
 -----------	|	-------	|	----------------	|	-----------------
-LeapOS 15.3	|	-	|	15.3|	Operating system on entire cluster
+LeapOS 15.3	|	-	|	15.x|	Operating system on entire cluster
 CentOS Linux release 7.9.2009 (Core)	|	-	|	7.9	|	Operating system on entire cluster except for management station
-Rocky 8.5	|	-	|	8.5	|	Operating system on entire cluster except for management station
-Rocky 8.5	|	-	|	8.5	|	Operating system on the management station
+Rocky 8.x	|	-	|	8.x	|	Operating system on entire cluster except for management station
+Rocky 8.x	|	-	|	8.x	|	Operating system on the management station
 MariaDB	|	GPL 2.0	|	5.5.68	|	Relational database used by Slurm
 Slurm	|	GNU General Public	|	20.11.7	|	HPC Workload Manager
 Docker CE	|	Apache-2.0	|	20.10.2	|	Docker Service
@@ -110,7 +110,7 @@ AWX	|	Apache-2.0	|	19.4.0	|	Web-based User Interface
 AWX.AWX	|	Apache-2.0	|	19.4.0	|	Galaxy collection to perform awx configuration
 AWXkit	|	Apache-2.0	|	to be updated	|	To perform configuration through CLI commands
 Cri-o	|	Apache-2.0	|	1.21	|	Container Service
-Buildah	|	Apache-2.0	|	1.21.4	|	Tool to build and run container
+Buildah	|	Apache-2.0	|	1.22.4	|	Tool to build and run containers
 PostgreSQL	|	Copyright (c) 1996-2020, PostgreSQL Global Development Group	|	10.15	|	Database Management System
 Redis	|	BSD-3-Clause License	|	6.0.10	|	In-memory database
 NGINX	|	BSD-2-Clause License	|	1.14	|	-

+ 14 - 2
docs/Security/Enable_Security_LoginNode.md

@@ -1,4 +1,4 @@
-# Enabling Security on the Login Node (RockyOS)
+# Enabling Security on the Login Node 
 
 * Ensure that `enable_secure_login_node` is set to **true** in `omnia_config.yml`
 * Set the following parameters in `omnia_security_config.yml`
@@ -9,7 +9,19 @@
 | failure_reset_interval | 60              | Period (in seconds) after which the number of failed login attempts is   reset <br> Accepted Values: 30-60                                                       |
 | lockout_duration       | 10              | Period (in seconds) for which users are locked out. <br> Accepted   Values: 5-10                                                                                 |
 | session_timeout        | 180             | Period (in seconds) after which idle users get logged out automatically   <br> Accepted Values: 30-90                                                            |
-| alert_email_address    |                 | Email address used for sending alerts in case of authentication failure   <br> If this variable is left blank, authentication failure alerts will   be disabled. |
+| alert_email_address    |                 | Email address used for sending alerts in case of authentication failure. Currently, only one email ID is accepted in this field.   <br> If this variable is left blank, authentication failure alerts will   be disabled. |
 | allow_deny             | Allow           | This variable sets whether the user list is Allowed or Denied. <br>   Accepted Values: Allow, Deny                                                               |
 | user                   |                 | Array of users that are allowed or denied based on the `allow_deny`   value. Multiple users must be separated by a space.                                        |
 
+* Set the following parameters in `control_plane/input_params/security_vars.yml`
+
+|  Parameter Name        |  Default Value  |  Additional Information                                                                                                                                          |
+|------------------------|-----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| allow_deny             | Allow           | This variable sets whether the user list is Allowed or Denied. <br>   Accepted Values: Allow, Deny                                                               |
+| user                   |                 | Array of users that are allowed or denied based on the `allow_deny`   value. Multiple users must be separated by a space.                                        |
+
+
+## Kernel Lockdown
+
+* RockyOS has Kernel Lockdown mode (Integrity) enabled by default
+* SUSE/Leap allows users to set Kernel Lockdown mode to Confidentiality or Integrity.

+ 4 - 1
docs/control_plane/device_templates/PROVISION_SERVERS.md

@@ -63,7 +63,7 @@ If you want to reprovision all the servers in the cluster or any of the faulty s
 
 Omnia role used: *provision_cobbler*  
 Ports used by Cobbler:  
-* TCP ports: 80,443,69
+* TCP ports: 69,8000, 8008
 * UDP ports: 69,4011
 
 To create the Cobbler image, Omnia configures the following:
@@ -81,6 +81,9 @@ To access the Cobbler dashboard, enter `https://<IP>/cobbler_web` where `<IP>` i
 
 >> __Note__: With the addition of Multiple profiles, the cobbler container dynamically updates the mount point based on the value of `provision_os` in `base_vars.yml`.
 
+### DHCP routing using Cobbler
+Omnia now supports DHCP routing via Cobbler. To enable routing, update the `primary_dns` and `secondary_dns` in `base_vars` with the appropriate IPs (hostnames are currently not supported). For compute nodes that are not directly connected to the internet (ie only host network is configured), this configuration allows for internet connectivity.
+
 ## Security enhancements  
 Omnia provides the following options to enhance security on the provisioned PowerEdge servers:
 * **System lockdown mode**: To enable the system lockdown mode on iDRAC, set the *system_lockdown* variable to "enabled" in the `idrac_vars.yml` file.

+ 4 - 4
omnia.yml

@@ -20,6 +20,9 @@
   roles:
     - cluster_validation
 
+- name: Gather facts from all the nodes
+  hosts: manager, compute, login_node, nfs_node
+
 - name: Validate hostname
   hosts: manager, login_node
   gather_facts: false
@@ -38,9 +41,6 @@
         tasks_from: validate_nfs_config.yml
       when: hostvars['127.0.0.1']['powervault_status']
 
-- name: Gather facts from all the nodes
-  hosts: manager, compute, login_node, nfs_node
-
 - name: Apply common installation and config
   hosts: manager, compute, login_node
   gather_facts: false
@@ -231,4 +231,4 @@
 
 - name: Passwordless SSH between manager and compute nodes
   include: control_plane/tools/passwordless_ssh.yml
-  when: hostvars['127.0.0.1']['control_plane_status']
+  when: hostvars['127.0.0.1']['control_plane_status']

+ 4 - 3
omnia_config.yml

@@ -70,11 +70,12 @@ realm_name: "OMNIA.TEST"
 # The password must not contain -,\, ',"
 directory_manager_password: ""
 
-# The IPA server requires an administrative user, named 'admin'. 
+# kerberos_admin_password used by IPA admin user in Rocky OS and used by 389-ds for kerberos admin password in leap OS
+# The IPA server requires an administrative user, named 'admin'.
 # This user is a regular system account used for IPA server administration
-ipa_admin_password: ""
+kerberos_admin_password: ""
 
 # Boolean indicating whether login node is requires security features or not
 # It can be set to true or false
 # By default it is set to false indicating security features will not be configured
-enable_secure_login_node: false
+enable_secure_login_node: false

+ 7 - 6
omnia_security_config.yml

@@ -15,17 +15,17 @@
 
 # Maximum number of consecutive failures before lockout
 # The default value of this variable can't be changed
-# Default: 3
+# Default value: 3
 max_failures: 3
 
 # Period (in seconds) after which the number of failed login attempts is reset
-# Default: 60
+# Default value: 60
 # Min: 30
 # Max: 60
 failure_reset_interval: 60
 
 # Period (in seconds) for which users are locked out 
-# Default: 10
+# Default value: 10
 # Min: 5
 # Max: 10
 lockout_duration: 10
@@ -38,7 +38,7 @@ session_timeout: 180
 
 # Email address used for sending alerts in case of authentication failure
 # If this variable is left blank, authentication failure alerts will be disabled.
-# Required value
+# Currently, only one email ID is accepted in this field
 alert_email_address: ""
 
 # This variable mentions the users to whom the access will be provided
@@ -47,5 +47,6 @@ alert_email_address: ""
 user: ''
 
 # This variable provides the type of access
-# Accepted values 'Allow' or 'Deny' by default 'Allow'
-allow_deny: 'Allow'
+# Accepted values: "Allow" or "Deny"
+# Default value: "Allow"
+allow_deny: "Allow"

+ 13 - 11
roles/cluster_validation/tasks/fetch_omnia_inputs.yml

@@ -137,7 +137,7 @@
     - ( domain_name | length < 1 or
       realm_name | length < 1 or
       directory_manager_password | length < 1 or
-      ipa_admin_password | length < 1 ) 
+      kerberos_admin_password | length < 1 ) 
     - login_node_required
     - not ipa_server_ms
 
@@ -193,17 +193,17 @@
     - login_node_required
     - not ipa_server_ms
 
-- name: Assert ipa_admin_password
+- name: Assert kerberos_admin_password
   assert:
     that:
-      - ipa_admin_password | length > min_length | int - 1
-      - ipa_admin_password | length < max_length | int + 1
-      - '"-" not in ipa_admin_password '
-      - '"\\" not in ipa_admin_password '
-      - '"\"" not in ipa_admin_password '
-      - " \"'\" not in ipa_admin_password "
-    success_msg: "{{ success_msg_ipa_admin_password }}"
-    fail_msg: "{{ fail_msg_ipa_admin_password }}"
+      - kerberos_admin_password | length > min_length | int - 1
+      - kerberos_admin_password | length < max_length | int + 1
+      - '"-" not in kerberos_admin_password '
+      - '"\\" not in kerberos_admin_password '
+      - '"\"" not in kerberos_admin_password '
+      - " \"'\" not in kerberos_admin_password "
+    success_msg: "{{ success_msg_kerberos_admin_password }}"
+    fail_msg: "{{ fail_msg_kerberos_admin_password }}"
   when:
     - login_node_required
     - not ipa_server_ms
@@ -216,4 +216,6 @@
 
 - name: Fetch security inputs
   include_tasks: fetch_security_inputs.yml
-  when: enable_secure_login_node
+  when: 
+    - login_node_required
+    - enable_secure_login_node

+ 6 - 8
roles/cluster_validation/vars/main.yml

@@ -16,7 +16,7 @@
 # Usage: main.yml
 awx_search_key: "-job-"
 
-# Usage: fetch_password.yml
+# Usage: fetch_omnia_inputs.yml
 config_filename: "omnia_config.yml"
 config_vaultname: .omnia_vault_key
 min_length: 8
@@ -37,13 +37,14 @@ realm_name_success_msg: "realm_name successfully validated"
 realm_name_fail_msg: "Failed. Incorrect realm_name formate in omnia_config.yml"
 success_msg_directory_manager_password: "directory_manager_password successfully validated"
 fail_msg_directory_manager_password: "Failed. Incorrect format provided for directory_manager_password"
-success_msg_ipa_admin_password: "ipa_admin_password successfully validated"
-fail_msg_ipa_admin_password: "Failed. Incorrect format provided for ipa_admin_password"
+success_msg_kerberos_admin_password: "kerberos_admin_password successfully validated"
+fail_msg_kerberos_admin_password: "Failed. Incorrect format provided for kerberos_admin_password"
 input_config_failure_msg: "Input parameters cannot be empty"
 login_node_required_success_msg: "login_node_required successfully validated"
 login_node_required_fail_msg: "Failed. login_node_required should be either true or false"
 secure_login_node_success_msg: "enable_secure_login_node successfully validated"
 secure_login_node_fail_msg: "Failed. enable_secure_login_node should be either true or false"
+ipa_secret_file: "{{ playbook_dir }}/control_plane/roles/control_plane_security/files/.ipavars.yml"
 
 # Usage: validations.yml
 skip_tag_fail_msg: "Can't skip both slurm and kubernetes"
@@ -64,11 +65,10 @@ tower_vault_path: "{{ playbook_dir }}/control_plane/roles/webui_awx/files/.tower
 powervault_inventory_name: "powervault_me4_inventory"
 powervault_group: "powervault_me4"
 
-# Usage: fetch_ipa_password.yml
+# Usage: fetch_control_plane_credentials.yml
 login_vars_filename: input_params/login_vars.yml
 vault_filename: input_params/.login_vault_key
 vault_file_perm: '0644'
-ipa_secret_file: "{{ playbook_dir }}/control_plane/roles/control_plane_security/files/.ipavars.yml"
 
 # Usage: fetch_security_inputs.yml
 security_vars_filename: "{{ playbook_dir }}/omnia_security_config.yml"
@@ -92,9 +92,7 @@ alert_email_fail_msg: "Failed. Incorrect alert_email_address value in security_v
 alert_email_warning_msg: "[WARNING] alert_email_address is empty. Authentication failure alerts won't be configured."
 email_max_length: 320
 email_search_key: "@"
-login_node_security_success_msg: "Login node security successfully validated"
-login_node_security_fail_msg: "Failed. Incorrect Login node security format in security_vars.yml"
 user_success_msg: "user successfully validated"
 user_fail_msg: "Failed. Incorrect user format in security_vars.yml"
 allow_deny_success_msg: "Access successfully validated"
-allow_deny_fail_msg: "Failed. Incorrect Access format in security_vars.yml"
+allow_deny_fail_msg: "Failed. Incorrect Access format in security_vars.yml"

+ 3 - 3
roles/common/tasks/main.yml

@@ -72,7 +72,7 @@
         name: "swap"
         fstype: swap
         state: absent
-        
+
     - name: Disable selinux
       selinux:
         state: disabled
@@ -124,9 +124,9 @@
         autorefresh: yes
       tags: install
 
-    - name: Install nvidia
+    - name: Install nvidia(This might take 10-15 minutes)
       command: zypper --gpg-auto-import-keys install -l -y x11-video-nvidiaG06
-      changed_when: false
+      changed_when: true
       tags: install
 
     - name: Add docker community edition repository

+ 10 - 40
roles/hostname_validation/tasks/main.yml

@@ -13,47 +13,17 @@
 #  limitations under the License.
 ---
 
-- name: Verify domain name in hostname
-  block:
-    - name: Fetch the hostname
-      command: hostname -s
-      register: machine_hostname
-      changed_when: false
-
-    - name: Verify the hostname is not blank in hostname
-      fail:
-        msg: " {{ hostname_blank_msg }}"
-      when: machine_hostname.stdout | length < 1
-
-    - name: Validate the host name
-      assert:
-        that:
-          - machine_hostname.stdout is regex(("^(([a-z]|[a-z][a-z0-9\-]*[a-z0-9])\.)*([a-z]|[a-z][a-z0-9\-]*[a-z0-9])$"))
-          - machine_hostname.stdout != "localhost"
-        success_msg: "{{ server_hostname_success }}"
-        fail_msg: "{{ server_hostname_fail }}"
-
-    - name: Fetch the domain name
-      command: hostname -d
-      register: domain_name_set
-      changed_when: false
-
-    - name: Verify the domain name is not blank in hostname
-      fail:
-        msg: " {{ domain_name_blank_msg }}"
-      when: domain_name_set.stdout | length < 1
-
-    - name: Set fact for the domain name in hostname
-      set_fact:
-        ms_domain_name: "{{ domain_name_set.stdout }}"
-
-    - name: Validate the domain name set on the host
-      assert:
-        that:
-          - hostvars['127.0.0.1']['domain_name'] == ms_domain_name
-        success_msg: "{{ server_domain_name_success }}"
-        fail_msg: "{{ server_domain_name_fail }}"
+- name: Verify domain name in hostname - rocky and centos
+  include_tasks: validate_hostname.yml
   when:
+    - os_leap not in ansible_distribution | lower
     - hostvars['127.0.0.1']['login_node_required']
     - hostvars['127.0.0.1']['ipa_server_ms'] and login_node_group in group_names or
       not hostvars['127.0.0.1']['ipa_server_ms']
+
+- name: Verify domain name in hostname - leap
+  include_tasks: validate_hostname.yml
+  when:
+    - os_leap in ansible_distribution | lower
+    - hostvars['127.0.0.1']['login_node_required']
+    - login_node_group in group_names

+ 54 - 0
roles/hostname_validation/tasks/validate_hostname.yml

@@ -0,0 +1,54 @@
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Fetch the hostname
+  command: hostname -s
+  register: machine_hostname
+  changed_when: false
+
+- name: Verify the hostname is not blank in hostname
+  fail:
+    msg: " {{ hostname_blank_msg }}"
+  when: machine_hostname.stdout | length < 1
+
+- name: Validate the host name
+  assert:
+    that:
+      - machine_hostname.stdout is regex(("^(([a-z]|[a-z][a-z0-9\-]*[a-z0-9])\.)*([a-z]|[a-z][a-z0-9\-]*[a-z0-9])$"))
+      - machine_hostname.stdout != "localhost"
+    success_msg: "{{ server_hostname_success }}"
+    fail_msg: "{{ server_hostname_fail }}"
+
+- name: Fetch the domain name
+  command: hostname -d
+  register: domain_name_set
+  changed_when: false
+  failed_when: false
+
+- name: Verify the domain name is not blank in hostname
+  fail:
+    msg: " {{ domain_name_blank_msg }}"
+  when: domain_name_set.stdout | length < 1
+
+- name: Set fact for the domain name in hostname
+  set_fact:
+    ms_domain_name: "{{ domain_name_set.stdout }}"
+
+- name: Validate the domain name set on the host
+  assert:
+    that:
+      - hostvars['127.0.0.1']['domain_name'] == ms_domain_name
+    success_msg: "{{ server_domain_name_success }}"
+    fail_msg: "{{ server_domain_name_fail }}"

+ 1 - 0
roles/hostname_validation/vars/main.yml

@@ -20,3 +20,4 @@ hostname_blank_msg: "Failed. Domain name is not set in hostname It should have h
 server_hostname_success: "Hostname in server hostname validated"
 server_hostname_fail: "Failed. Hostname set is not valid"
 login_node_group: login_node
+os_leap: leap

+ 2 - 1
roles/login_common/tasks/main.yml

@@ -23,6 +23,7 @@
     - name: Update Packages
       include_tasks: update_package.yml
   when:
+    - os_leap not in ansible_distribution | lower
     - hostvars['127.0.0.1']['login_node_required']
     - hostvars['127.0.0.1']['ipa_server_ms'] and login_node_group in group_names or
-      not hostvars['127.0.0.1']['ipa_server_ms']
+      not hostvars['127.0.0.1']['ipa_server_ms']

+ 7 - 0
roles/login_node/files/temp_dsrc

@@ -0,0 +1,7 @@
+# /root/.dsrc file for administering the ldap1 instance
+
+[ldap1]
+
+uri = ldapi://%%2fvar%%2frun%%2fslapd-ldap1.socket
+basedn = dc=omnia,dc=test
+binddn = cn=Directory Manager

+ 23 - 0
roles/login_node/files/temp_krb5.conf

@@ -0,0 +1,23 @@
+[libdefaults]
+dns_canonicalize_hostname = false
+rdns = false
+default_realm = OMNIA.TEST
+ticket_lifetime = 24h
+renew_lifetime = 7d
+
+[realms]
+OMNIA.TEST = {
+kdc = hostname.omnia.test:88
+admin_server = hostname.omnia.test
+default_domain = omnia.test
+}
+
+[logging]
+kdc = FILE:/var/log/krb5kdc.log
+admin_server = FILE:/var/log/kadmind.log
+default = SYSLOG:NOTICE:DAEMON
+
+[domain_realm]
+.omnia.test = omnia.test
+omnia.test = omnia.test
+hostname.omnia.test = OMNIA.TEST

+ 11 - 0
roles/login_node/files/temp_ldap1.inf

@@ -0,0 +1,11 @@
+[general]
+config_version = 2 
+
+[slapd]
+root_password = password
+self_sign_cert = True 
+instance_name = ldap1
+
+[backend-userroot]
+sample_entries = yes 
+suffix = dc=omnia,dc=test

+ 222 - 0
roles/login_node/tasks/install_389ds.yml

@@ -0,0 +1,222 @@
+#  Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Initialize ds389_status
+  set_fact:
+    ds389_status: false
+
+- name: Fetch hostname
+  command: hostname
+  register: new_serv_hostname
+  changed_when: false
+
+- name: Fetch the short hostname
+  command: hostname -s
+  register: short_hostname
+  changed_when: false
+
+- name: Set fact for server hostname
+  set_fact:
+    server_hostname_fqdn: "{{ new_serv_hostname.stdout }}"
+    server_hostname_short: "{{ short_hostname.stdout }}"
+    directory_manager_password: "{{ hostvars['127.0.0.1']['directory_manager_password'] }}"
+    kerberos_admin_password: "{{ hostvars['127.0.0.1']['kerberos_admin_password'] }}"
+    domain_name: "{{ hostvars['127.0.0.1']['domain_name'] }}"
+    realm_name: "{{ hostvars['127.0.0.1']['realm_name'] }}"
+  no_log: true
+
+- name: Check ldap instance is running or not
+  command: dsctl {{ ldap_instance }} status
+  changed_when: false
+  failed_when: false
+  register: ldap1_status
+
+- name: Check ds389_status admin authentication
+  shell: set -o pipefail && echo {{ kerberos_admin_password }} | kinit {{ ipa_admin_username }}
+  changed_when: false
+  failed_when: false
+  no_log: true
+  register: ds389_status_authentication
+ 
+- name: Modify ds389_status
+  set_fact:
+    ds389_status: true
+  when: 
+    - ds389_status_authentication.rc == 0
+    - ldap1_install_search_key in ldap1_status.stdout.split(' ')[3]
+
+- block:
+    - name: Install 389-ds
+      zypper:
+        name: "{{ ds389_packages }}"
+        state: present 
+
+    - name: Create the ldap1.inf file
+      copy:
+        src: "{{ role_path }}/files/temp_ldap1.inf"
+        dest: "{{ ldap1_config_path }}"
+        mode: "{{ file_mode }}"       
+
+    - name: Configure ldap1.inf with domain name
+      lineinfile:
+        path: "{{ ldap1_config_path }}"
+        regexp: "^suffix = dc=omnia,dc=test"
+        line: "suffix = dc={{ domain_name.split('.')[0] }},dc={{ domain_name.split('.')[1] }}"
+
+    - name: Configure ldap1.inf with directory manager password
+      lineinfile:
+        path: "{{ ldap1_config_path }}"
+        regexp: "^root_password = password"
+        line: "root_password = {{ directory_manager_password }}"
+      no_log: true
+
+    - name: Check ldap instance is running or not
+      command: dsctl {{ ldap_instance }} status
+      changed_when: false
+      failed_when: false
+      register: ldap1_status
+
+    - name: Creating 389 directory server instance
+      shell: dscreate -v from-file {{ ldap1_config_path }} | tee {{ ldap1_output_path }}
+      changed_when: true
+      when: ldap1_search_key in ldap1_status.stdout
+      
+    - name: Remove the ldap1.inf
+      file:
+        path: "{{ ldap1_config_path }}"
+        state: absent
+
+    - name: Start dirsrv service
+      systemd:
+        name: "dirsrv@{{ ldap_instance }}.service"
+        state: started
+        enabled: yes
+
+    - name: Create the dsrc file
+      copy:
+        src: "{{ role_path }}/files/temp_dsrc"
+        dest: "{{ dsrc_path }}"
+        mode: "{{ file_mode }}"
+
+    - name: Configure dsrc file with domain name
+      lineinfile:
+        path: "{{ dsrc_path }}"
+        regexp: "^basedn = dc=omnia,dc=test"
+        line: "basedn = dc={{ domain_name.split('.')[0] }},dc={{ domain_name.split('.')[1] }}"
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+
+    - name: Permit traffic in default zone for ldap and ldaps service
+      firewalld:
+        service: "{{ item }}"
+        permanent: yes
+        state: enabled
+      with_items: "{{ ldap_services }}"
+
+    - name: Reload firewalld
+      command: firewall-cmd --reload
+      changed_when: true
+
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+
+    - name: Install kerberos packages
+      zypper:
+        name: "{{ kerberos_packages }}"
+        state: present 
+
+    - name: Check kerberos principal is created or not
+      stat:
+        path: "{{ kerberos_principal_path }}"
+      register: principal_status
+
+    - name: Create the kerberos conf file
+      copy:
+        src: "{{ role_path }}/files/temp_krb5.conf"
+        dest: "{{ kerberos_conf_path }}"
+        mode: "{{ file_mode }}"
+
+    - name: Configure kerberos conf file with domain name
+      replace:
+        path: "{{ kerberos_conf_path }}"
+        regexp: "omnia.test"
+        replace: "{{ domain_name }}"
+
+    - name: Configure kerberos conf file with realm name
+      replace:
+        path: "{{ kerberos_conf_path }}"
+        regexp: "OMNIA.TEST"
+        replace: "{{ realm_name }}"
+
+    - name: Configure kerberos conf file with hostname
+      replace:
+        path: "{{ kerberos_conf_path }}"
+        regexp: "hostname"
+        replace: "{{ server_hostname_short }}"
+
+    - block:
+        - name: Setting up the kerberos database
+          command: "kdb5_util -r {{ realm_name }} -P {{ directory_manager_password }} create -s"
+          no_log: true
+          changed_when: true
+          register: setting_database
+          environment:
+            PATH: "{{ ansible_env.PATH }}:{{ kerberos_env_path }}"
+          when: not principal_status.stat.exists
+      rescue:
+        - name: Setting up the kerberos database failed
+          fail:
+            msg: "Error: {{ setting_database.stderr }}"
+
+    - name: Start krb5kdc and kadmind services
+      systemd:
+        name: "{{ item }}"
+        state: started
+        enabled: yes
+      with_items:
+        - krb5kdc
+        - kadmind
+
+    - block:
+        - name: Create admin principal
+          command: kadmin.local -q "ank -pw {{ kerberos_admin_password }} {{ ipa_admin_username }}"
+          no_log: true
+          changed_when: true
+          register: create_admin_principal
+          environment:
+            PATH: "{{ ansible_env.PATH }}:{{ kerberos_env_path }}"
+      rescue:
+        - name: Create admin principal failed
+          fail:
+            msg: "Error: {{ create_admin_principal.stderr }}"
+
+    - name: Authenticate as admin
+      shell: set -o pipefail && echo {{ kerberos_admin_password }} | kinit {{ ipa_admin_username }}
+      no_log: true
+      changed_when: false
+  when: not ds389_status
+
+- name: Configure password policy in 389-ds
+  command: dsconf -w {{ directory_manager_password }} -D "cn=Directory Manager" ldap://{{ server_hostname_fqdn }} pwpolicy set --pwdlockoutduration {{ hostvars['127.0.0.1']['lockout_duration'] }} --pwdmaxfailures {{ hostvars['127.0.0.1']['max_failures'] }} --pwdresetfailcount {{ hostvars['127.0.0.1']['failure_reset_interval'] }}
+  changed_when: true
+  no_log: true
+  when: hostvars['127.0.0.1']['enable_secure_login_node']

+ 1 - 4
roles/login_node/tasks/install_ipa_client.yml

@@ -13,9 +13,6 @@
 #  limitations under the License.
 ---
 
-- name: Include ipa server variables
-  include_vars: ../../login_common/vars/main.yml
-
 - name: Fetch hostname
   command: hostname
   register: loginnode_hostname
@@ -29,7 +26,7 @@
 
 - name: Set hostname of ipa server when manager node has ipa server installed
   set_fact:
-    required_ipa_admin_pwd: "{{ hostvars['127.0.0.1']['ipa_admin_password'] }}"
+    required_ipa_admin_pwd: "{{ hostvars['127.0.0.1']['kerberos_admin_password'] }}"
     required_server_hostname: "{{ hostvars[groups['manager'][0]]['server_hostname'] }}"
     required_domain_name: "{{ hostvars['127.0.0.1']['domain_name'] }}"
   when: not hostvars['127.0.0.1']['ipa_server_ms']

+ 9 - 0
roles/login_node/tasks/main.yml

@@ -13,16 +13,25 @@
 #  limitations under the License.
 ---
 
+- name: Include ipa server variables
+  include_vars: ../../login_common/vars/main.yml
+
 - block:
     - name: Add ports to firewall to run slurm jobs
       include_tasks: firewall_settings.yml
     
     - name: Install ipa client
       include_tasks: install_ipa_client.yml
+      when: os_leap not in ansible_distribution | lower
+
+    - name: Install 389 directory server
+      include_tasks: install_389ds.yml
+      when: os_leap in ansible_distribution | lower
 
     - block:
         - name: FreeIPA configuration
           include_tasks: ipa_configuration.yml
+          when: os_leap not in ansible_distribution | lower
 
         - name: Install Apparmor on Leap
           include_tasks: install_apparmor.yml

+ 23 - 0
roles/login_node/vars/main.yml

@@ -63,3 +63,26 @@ snoopy_path: /var/lib
 # Usage: user_monitor.yml
 psacct: psacct
 acct: acct
+
+# Usage: install_389ds.yml
+ds389_packages:
+  - 389-ds
+  - db48-utils
+  - python3-argcomplete
+ldap1_search_key: "No such instance"
+ds389_pwpolicy_search_key: "passwordlockoutduration: {{ lockout_duration }}"
+ldap1_install_search_key: running
+ldap1_config_path: "/root/ldap1.inf"
+ldap_instance: ldap1
+ldap1_output_path: /var/log/ldap1_output.txt
+ldap_services:
+  - ldap
+  - ldaps
+dsrc_path: /root/.dsrc
+kerberos_packages:
+  - krb5
+  - krb5-server
+  - krb5-client
+kerberos_principal_path: /var/lib/kerberos/krb5kdc/principal
+kerberos_conf_path: /etc/krb5.conf
+kerberos_env_path: /usr/lib/mit/sbin/

+ 3 - 3
roles/login_server/tasks/install_ipa_server.yml

@@ -32,7 +32,7 @@
 
 - name: Install ipa server in CentOS 7.9
   command: >-
-    ipa-server-install -n '{{ hostvars['127.0.0.1']['domain_name'] }}' --hostname='{{ server_hostname }}' -a '{{ hostvars['127.0.0.1']['ipa_admin_password'] }}'
+    ipa-server-install -n '{{ hostvars['127.0.0.1']['domain_name'] }}' --hostname='{{ server_hostname }}' -a '{{ hostvars['127.0.0.1']['kerberos_admin_password'] }}'
     -p '{{ hostvars['127.0.0.1']['directory_manager_password'] }}' -r '{{ hostvars['127.0.0.1']['realm_name'] }}' --setup-dns --auto-forwarders --auto-reverse -U
   changed_when: true
   no_log: true
@@ -42,7 +42,7 @@
 
 - name: Install ipa server in CentOS > 8 or Rocky 8.4
   command: >-
-    ipa-server-install -n '{{ hostvars['127.0.0.1']['domain_name'] }}' --hostname='{{ server_hostname }}' -a '{{ hostvars['127.0.0.1']['ipa_admin_password'] }}'
+    ipa-server-install -n '{{ hostvars['127.0.0.1']['domain_name'] }}' --hostname='{{ server_hostname }}' -a '{{ hostvars['127.0.0.1']['kerberos_admin_password'] }}'
     -p '{{ hostvars['127.0.0.1']['directory_manager_password'] }}' -r '{{ hostvars['127.0.0.1']['realm_name'] }}' --setup-dns --no-forwarders --no-reverse --no-ntp -U
   changed_when: true
   no_log: true
@@ -52,7 +52,7 @@
     - ( ansible_distribution_version >= os_version )
 
 - name: Authenticate as admin
-  shell: set -o pipefail && echo $'{{ hostvars['127.0.0.1']['ipa_admin_password'] }}' | kinit admin
+  shell: set -o pipefail && echo $'{{ hostvars['127.0.0.1']['kerberos_admin_password'] }}' | kinit admin
   no_log: true
   changed_when: false
 

+ 8 - 12
roles/login_server/tasks/main.yml

@@ -13,20 +13,16 @@
 #  limitations under the License.
 ---
 
-- name: Include variables
-  include_vars: ../../login_common/vars/main.yml
-  when:
-    - hostvars['127.0.0.1']['login_node_required']
-    - not hostvars['127.0.0.1']['ipa_server_ms']
+- name: Include login_common variables
+  include_vars: "{{ playbook_dir }}/roles/login_common/vars/main.yml"
 
-- name: Install required packages
-  include_tasks: install_packages.yml
-  when:
-    - hostvars['127.0.0.1']['login_node_required']
-    - not hostvars['127.0.0.1']['ipa_server_ms']
+- block:
+    - name: Install required packages
+      include_tasks: install_packages.yml
 
-- name: Install free-ipa server
-  include_tasks: install_ipa_server.yml
+    - name: Install free-ipa server
+      include_tasks: install_ipa_server.yml
   when:
+    - os_leap not in ansible_distribution | lower
     - hostvars['127.0.0.1']['login_node_required']
     - not hostvars['127.0.0.1']['ipa_server_ms']