Browse Source

removed all instances of `master` from scripts and playbooks

Signed-off-by: John Lockman <jlockman3@gmail.com>
John Lockman 4 years ago
parent
commit
44fce1ea2b
41 changed files with 46 additions and 46 deletions
  1. 2 2
      CONTRIBUTING.md
  2. 3 3
      docs/INSTALL.md
  3. 2 2
      docs/PREINSTALL.md
  4. 1 1
      docs/README.md
  5. 1 1
      examples/README.md
  6. 3 3
      kubernetes/host_inventory_file
  7. 1 1
      kubernetes/jupyterhub.yaml
  8. 1 1
      kubernetes/kubeflow.yaml
  9. 7 7
      kubernetes/kubernetes.yml
  10. 0 0
      kubernetes/roles/manager/files/k8s.conf
  11. 0 0
      kubernetes/roles/manager/files/kubernetes.repo
  12. 0 0
      kubernetes/roles/manager/files/nvidia
  13. 5 5
      kubernetes/roles/master/tasks/main.yml
  14. 0 0
      kubernetes/roles/startmanager/files/create_admin_user.yaml
  15. 0 0
      kubernetes/roles/startmanager/files/create_clusterRoleBinding.yaml
  16. 0 0
      kubernetes/roles/startmanager/files/data-pv.yaml
  17. 0 0
      kubernetes/roles/startmanager/files/data2-pv.yaml
  18. 0 0
      kubernetes/roles/startmanager/files/data3-pv.yaml
  19. 0 0
      kubernetes/roles/startmanager/files/data4-pv.yaml
  20. 0 0
      kubernetes/roles/startmanager/files/enable_gpu_k8s.sh
  21. 0 0
      kubernetes/roles/startmanager/files/flannel_net.sh
  22. 0 0
      kubernetes/roles/startmanager/files/katib-pv.yaml
  23. 0 0
      kubernetes/roles/startmanager/files/kube-flannel.yaml
  24. 0 0
      kubernetes/roles/startmanager/files/kubeflow_persistent_volumes.yaml
  25. 0 0
      kubernetes/roles/startmanager/files/minio-pvc.yaml
  26. 0 0
      kubernetes/roles/startmanager/files/mysql-pv.yaml
  27. 0 0
      kubernetes/roles/startmanager/files/nfs-class.yaml
  28. 0 0
      kubernetes/roles/startmanager/files/nfs-deployment.yaml
  29. 0 0
      kubernetes/roles/startmanager/files/nfs-serviceaccount.yaml
  30. 0 0
      kubernetes/roles/startmanager/files/nfs_clusterrole.yaml
  31. 0 0
      kubernetes/roles/startmanager/files/nfs_clusterrolebinding.yaml
  32. 0 0
      kubernetes/roles/startmanager/files/notebook-pv.yaml
  33. 0 0
      kubernetes/roles/startmanager/files/persistent_volumes.yaml
  34. 0 0
      kubernetes/roles/startmanager/files/pvc.yaml
  35. 0 0
      kubernetes/roles/startmanager/files/tiller_config.sh
  36. 9 9
      kubernetes/roles/startmaster/tasks/main.yml
  37. 1 1
      kubernetes/roles/startworkers/tasks/main.yml
  38. 3 3
      slurm/roles/slurm-master/tasks/main.yaml
  39. 3 3
      slurm/slurm.yml
  40. 3 3
      slurm/slurm_inventory_file
  41. 1 1
      tools/install_tools.yml

+ 2 - 2
CONTRIBUTING.md

@@ -32,7 +32,7 @@ Contributions to Omnia are made through [Pull Requests (PRs)](https://help.githu
 6. **Create a pull request:** [Create a pull request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request) with a title following this format Issue ###: Description (_i.e., Issue 1023: Reformat testutils_). It is important that you do a good job with the description to make the job of the code reviewer easier. A good description not only reduces review time, but also reduces the probability of a misunderstanding with the pull request.
    * **Important:** When preparing a pull request it is important to stay up-to-date with the project repository. We recommend that you rebase against the upstream repo _frequently_. To do this, use the following commands:
    ```
-   git pull --rebase upstream master #upstream is dellhpc/omnia
+   git pull --rebase upstream devel #upstream is dellhpc/omnia
    git push --force origin <pr-branch-name> #origin is your fork of the repository (e.g., <github_user_name>/omnia.git)
    ```
    * **PR Description:** Be sure to fully describe the pull request. Ideally, your PR description will contain:
@@ -42,7 +42,7 @@ Contributions to Omnia are made through [Pull Requests (PRs)](https://help.githu
       4. How to verify that the changes work correctly.
    
 ## Omnia Branches and Contribution Flow
-The diagram below describes the contribution flow. Omnia has two lifetime branches: `devel` and `master`. The `master` branch is reserved for releases and their associated tags. The `devel` branch is where all development work occurs. The `devel` branch is also the default branch for the project.
+The diagram below describes the contribution flow. Omnia has two lifetime branches: `devel` and `release`. The `release` branch is reserved for releases and their associated tags. The `devel` branch is where all development work occurs. The `devel` branch is also the default branch for the project.
 
 ![Omnia Branch Flowchart](docs/images/omnia-branch-structure.png "Flowchart of Omnia branches")
 

+ 3 - 3
docs/INSTALL.md

@@ -63,7 +63,7 @@ Omnia playbooks perform several tasks:
     - Docker
     - Kubelet
 
-`master` playbook
+`manager` playbook
 * Install Helm v3
 * (optional) add firewall rules for Slurm and kubernetes
 
@@ -72,10 +72,10 @@ Everything from this point on can be called by using the `init` tag
 ansible-playbook -i host_inventory_file kubernetes/kubernetes.yml --tags "init"
 ```
 
-`startmaster` playbook
+`startmanager` playbook
 * turn off swap
 *Initialize Kubernetes
-    * Head/master
+    * Head/manager
         - Start K8S pass startup token to compute/slaves
         - Initialize software defined networking (Calico)
 

+ 2 - 2
docs/PREINSTALL.md

@@ -5,7 +5,7 @@ Omnia assumes that prior to installation:
 * Systems have a base operating system (currently CentOS 7 or 8)
 * Network(s) has been cabled and nodes can reach the internet
 * SSH Keys for `root` have been installed on all nodes to allow for password-less SSH
-* Ansible is installed on either the master node or a separate deployment node
+* Ansible is installed on either the manager node or a separate deployment node
 ```
 yum install ansible
 ```
@@ -22,6 +22,6 @@ Omnia assumes that servers are already connected to the network and have access
 ### Network Topology
 Possible network configurations include:
 * A flat topology where all nodes are connected to a switch which includes an uplink to the internet. This requires multiple externally-facing IP addresses
-* A hierarchical topology where compute nodes are connected to a common switch, but the master node contains a second network connection which is connected to the internet. All outbound/inbound traffic would be routed through the master node. This requires setting up firewall rules for IP masquerade, see [here](https://www.server-world.info/en/note?os=CentOS_7&p=firewalld&f=2) for an example.
+* A hierarchical topology where compute nodes are connected to a common switch, but the manager node contains a second network connection which is connected to the internet. All outbound/inbound traffic would be routed through the manager node. This requires setting up firewall rules for IP masquerade, see [here](https://www.server-world.info/en/note?os=CentOS_7&p=firewalld&f=2) for an example.
 ### IP and Hostname Assignment
 The recommended setup is to assign IP addresses to individual servers. This can be done manually by logging onto each node, or via DHCP.

+ 1 - 1
docs/README.md

@@ -38,6 +38,6 @@ It's not just new features and bug fixes that can be contributed to the Omnia pr
 * Feedback
 * Validation that it works for your particular configuration
 
-If you would like to contribute, see [CONTRIBUTING](https://github.com/dellhpc/omnia/blob/master/CONTRIBUTING.md).
+If you would like to contribute, see [CONTRIBUTING](https://github.com/dellhpc/omnia/blob/devel/CONTRIBUTING.md).
 
 ### [Omnia Contributors](CONTRIBUTORS.md)

+ 1 - 1
examples/README.md

@@ -1,7 +1,7 @@
 # Examples
 
 
-The examples [K8s Submit](https://github.com/dellhpc/omnia/blob/master/examples/k8s-TensorFlow-resnet50-multinode-MPIOperator.yaml) and [SLURM submit](https://github.com/dellhpc/omnia/blob/master/examples/slurm-TensorFlow-resnet50-multinode-MPI.batch) are provide as examples for running the resnet50 benchmark with TensorFlow on 8 GPUs using 2 C4140s.
+The examples [K8s Submit](https://github.com/dellhpc/omnia/blob/devel/examples/k8s-TensorFlow-resnet50-multinode-MPIOperator.yaml) and [SLURM submit](https://github.com/dellhpc/omnia/blob/devel/examples/slurm-TensorFlow-resnet50-multinode-MPI.batch) are provide as examples for running the resnet50 benchmark with TensorFlow on 8 GPUs using 2 C4140s.
 
 ## Submitting the example
 

+ 3 - 3
kubernetes/host_inventory_file

@@ -2,7 +2,7 @@ all:
   children:
     cluster:
       children:
-        master:
+        manager:
           hosts:
             compute000:
         workers:
@@ -14,7 +14,7 @@ all:
               hosts:
                 compute002:
                 compute004:
-                compute005:
+                #compute005:
       vars:
         single_node: false
-        master_ip: 10.0.0.100
+        manager_ip: 10.0.0.100

+ 1 - 1
kubernetes/jupyterhub.yaml

@@ -16,7 +16,7 @@
 #Playbook for installing JupyterHub v1.1.0 in Omnia
  
 # Start K8s worker servers
-- hosts: master
+- hosts: manager
   gather_facts: false
   roles:
     - jupyterhub

+ 1 - 1
kubernetes/kubeflow.yaml

@@ -16,7 +16,7 @@
 #Playbook for installing Kubeflow v1.0 on Omnia
  
 # Start K8s worker servers
-- hosts: master
+- hosts: manager
   gather_facts: false
   roles:
     - kubeflow

+ 7 - 7
kubernetes/kubernetes.yml

@@ -30,17 +30,17 @@
   roles:
     - computeGPU
 
-# Apply Master Config
-- hosts: master
+# Apply Manager Config
+- hosts: manager
   gather_facts: false
   roles:
-    - master
+    - manager
 
-# Start K8s on master server
-- hosts: master
+# Start K8s on manager server
+- hosts: manager
   gather_facts: false
   roles:
-    - startmaster
+    - startmanager
 
 # Start K8s worker servers
 - hosts: compute,gpus
@@ -49,7 +49,7 @@
     - startworkers
 
 # Start K8s worker servers
-- hosts: master
+- hosts: manager
   gather_facts: false
   roles:
     - startservices

kubernetes/roles/master/files/k8s.conf → kubernetes/roles/manager/files/k8s.conf


kubernetes/roles/master/files/kubernetes.repo → kubernetes/roles/manager/files/kubernetes.repo


kubernetes/roles/master/files/nvidia → kubernetes/roles/manager/files/nvidia


+ 5 - 5
kubernetes/roles/master/tasks/main.yml

@@ -15,15 +15,15 @@
 ---
 #- name: Firewall Rule K8s:6443/tcp
   #command: firewall-cmd  --zone=internal --add-port=6443/tcp --permanent
-  #tags: master
+  #tags: manager
 #
 #- name: Firewall Rule K8s:10250/tcp
   #command: firewall-cmd  --zone=internal --add-port=10250/tcp --permanent
-  #tags: master
+  #tags: manater
 ##
 #- name: Firewall Reload
   #command: firewall-cmd  --reload
-  #tags: master
+  #tags: manager
 #
 - name: Create /root/bin (if it doesn't exist)
   file:
@@ -36,10 +36,10 @@
     url: https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
     dest: /root/bin/get_helm.sh
     mode: 700 
-  tags: master
+  tags: manager
 
 - name: Install Helm
   command: /root/bin/get_helm.sh
-  tags: master
+  tags: manager
 
 # install and start up OpenSM -  III

kubernetes/roles/startmaster/files/create_admin_user.yaml → kubernetes/roles/startmanager/files/create_admin_user.yaml


kubernetes/roles/startmaster/files/create_clusterRoleBinding.yaml → kubernetes/roles/startmanager/files/create_clusterRoleBinding.yaml


kubernetes/roles/startmaster/files/data-pv.yaml → kubernetes/roles/startmanager/files/data-pv.yaml


kubernetes/roles/startmaster/files/data2-pv.yaml → kubernetes/roles/startmanager/files/data2-pv.yaml


kubernetes/roles/startmaster/files/data3-pv.yaml → kubernetes/roles/startmanager/files/data3-pv.yaml


kubernetes/roles/startmaster/files/data4-pv.yaml → kubernetes/roles/startmanager/files/data4-pv.yaml


kubernetes/roles/startmaster/files/enable_gpu_k8s.sh → kubernetes/roles/startmanager/files/enable_gpu_k8s.sh


kubernetes/roles/startmaster/files/flannel_net.sh → kubernetes/roles/startmanager/files/flannel_net.sh


kubernetes/roles/startmaster/files/katib-pv.yaml → kubernetes/roles/startmanager/files/katib-pv.yaml


kubernetes/roles/startmaster/files/kube-flannel.yaml → kubernetes/roles/startmanager/files/kube-flannel.yaml


kubernetes/roles/startmaster/files/kubeflow_persistent_volumes.yaml → kubernetes/roles/startmanager/files/kubeflow_persistent_volumes.yaml


kubernetes/roles/startmaster/files/minio-pvc.yaml → kubernetes/roles/startmanager/files/minio-pvc.yaml


kubernetes/roles/startmaster/files/mysql-pv.yaml → kubernetes/roles/startmanager/files/mysql-pv.yaml


kubernetes/roles/startmaster/files/nfs-class.yaml → kubernetes/roles/startmanager/files/nfs-class.yaml


kubernetes/roles/startmaster/files/nfs-deployment.yaml → kubernetes/roles/startmanager/files/nfs-deployment.yaml


kubernetes/roles/startmaster/files/nfs-serviceaccount.yaml → kubernetes/roles/startmanager/files/nfs-serviceaccount.yaml


kubernetes/roles/startmaster/files/nfs_clusterrole.yaml → kubernetes/roles/startmanager/files/nfs_clusterrole.yaml


kubernetes/roles/startmaster/files/nfs_clusterrolebinding.yaml → kubernetes/roles/startmanager/files/nfs_clusterrolebinding.yaml


kubernetes/roles/startmaster/files/notebook-pv.yaml → kubernetes/roles/startmanager/files/notebook-pv.yaml


kubernetes/roles/startmaster/files/persistent_volumes.yaml → kubernetes/roles/startmanager/files/persistent_volumes.yaml


kubernetes/roles/startmaster/files/pvc.yaml → kubernetes/roles/startmanager/files/pvc.yaml


kubernetes/roles/startmaster/files/tiller_config.sh → kubernetes/roles/startmanager/files/tiller_config.sh


+ 9 - 9
kubernetes/roles/startmaster/tasks/main.yml

@@ -18,7 +18,7 @@
   tags: init
 
 - name: Initialize kubeadm
-  command: /bin/kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address={{ master_ip }}
+  command: /bin/kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address={{ manager_ip }}
   #command: /bin/kubeadm init 
   register: init_output 
   tags: init
@@ -44,30 +44,30 @@
 
 - name: CA Hash
   shell: openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
-  register: K8S_MASTER_CA_HASH
+  register: K8S_MANAGER_CA_HASH
   tags: init
 
-- name: Add K8S Master IP, Token, and Hash to dummy host
+- name: Add K8S Manager IP, Token, and Hash to dummy host
   add_host:
     name:   "K8S_TOKEN_HOLDER"
     token:  "{{ K8S_TOKEN.stdout }}"
-    hash:   "{{ K8S_MASTER_CA_HASH.stdout }}"
-    ip:     "{{ master_ip }}"
+    hash:   "{{ K8S_MANAGER_CA_HASH.stdout }}"
+    ip:     "{{ manager_ip }}"
   tags: init
 
 - name:
   debug:
-    msg: "[Master] K8S_TOKEN_HOLDER K8S token is {{ hostvars['K8S_TOKEN_HOLDER']['token'] }}"
+    msg: "[Manager] K8S_TOKEN_HOLDER K8S token is {{ hostvars['K8S_TOKEN_HOLDER']['token'] }}"
   tags: init
 
 - name:
   debug:
-    msg: "[Master] K8S_TOKEN_HOLDER K8S Hash is  {{ hostvars['K8S_TOKEN_HOLDER']['hash'] }}"
+    msg: "[Manager] K8S_TOKEN_HOLDER K8S Hash is  {{ hostvars['K8S_TOKEN_HOLDER']['hash'] }}"
   tags: init
 
 - name:
   debug:
-    msg: "[Master] K8S_MASTER_IP is  {{ master_ip }}"
+    msg: "[Manager] K8S_MANAGER_IP is  {{ manager_ip }}"
   tags: init
 
 - name: Setup Calico SDN network
@@ -115,7 +115,7 @@
   shell: kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') > /root/k8s/token
   tags: init
 
-- name: Edge / Workstation Install allows pods to scheudle on master
+- name: Edge / Workstation Install allows pods to scheudle on manager
   shell: kubectl taint nodes --all node-role.kubernetes.io/master-
   when: single_node 
   tags: init

+ 1 - 1
kubernetes/roles/startworkers/tasks/main.yml

@@ -30,7 +30,7 @@
 
 #- name:
   #debug:
-    #msg: "[Worker] K8S_MASTER_IP is  {{ hostvars['K8S_TOKEN_HOLDER']['ip'] }}"
+    #msg: "[Worker] K8S_MANGER_IP is  {{ hostvars['K8S_TOKEN_HOLDER']['ip'] }}"
   #tags: init
 
 - name: "Kubeadmn join"

+ 3 - 3
slurm/roles/slurm-master/tasks/main.yaml

@@ -33,7 +33,7 @@
     - /root/rpmbuild/RPMS/x86_64/slurm*20*.rpm
   tags: install
 
-- name: Install SLURM RPMs on Master
+- name: Install SLURM RPMs on Manager
   yum: 
     name: "{{ item }}"
     #name: "{{ query('fileglob', ['/home/rpms/slurm*20*.rpm']) }}" <-- how it should work to avoid loop
@@ -96,14 +96,14 @@
   command: sacctmgr -i add user root DefaultAccount=defaultgroup
   tags: install
 
-- name: Start slurmctld on Master
+- name: Start slurmctld on Manager
   service: 
     name: slurmctld
     state: restarted
     enabled: yes
   tags: install
 
-- name: Enable Slurmdbd on Master
+- name: Enable Slurmdbd on Manager
   service: 
     name: slurmdbd
     state: restarted

+ 3 - 3
slurm/slurm.yml

@@ -23,11 +23,11 @@
   roles:
     - slurm-common
 
-# Apply Master Config, start services
-- hosts: master
+# Apply Manager Config, start services
+- hosts: manager
   gather_facts: false
   roles:
-    - slurm-master
+    - slurm-manager
 
 # Start SLURM workers
 - hosts: compute

+ 3 - 3
slurm/slurm_inventory_file

@@ -1,7 +1,7 @@
-[master]
+[manager]
 friday
 
-[master:vars]
+[manager:vars]
 slurm_url=https://download.schedmd.com/slurm/slurm-20.02.0.tar.bz2
 slurm_md5=md5:8ed2257471ff24ca213b510a4c1c3563
 
@@ -14,5 +14,5 @@ compute[002:005]
 compute
 
 [cluster:children]
-master
+manager
 workers

+ 1 - 1
tools/install_tools.yml

@@ -13,7 +13,7 @@
 #  limitations under the License.
 ---
 
-- hosts: master
+- hosts: manager
   tasks:
   - name: Install Change Personality Script
     copy: