소스 검색

Issue #333: Added docker credentials as input vars for k8s cluster

Signed-off-by: blesson-james <blesson_james@Dellteam>
Lucas A. Wilson 3 년 전
부모
커밋
878cc97cac

+ 9 - 0
.all-contributorsrc

@@ -105,6 +105,15 @@
       "contributions": [
         "code"
       ]
+    },
+    {
+      "login": "avinashvishwanath",
+      "name": "avinashvishwanath",
+      "avatar_url": "https://avatars.githubusercontent.com/u/77823538?v=4",
+      "profile": "https://github.com/avinashvishwanath",
+      "contributions": [
+        "doc"
+      ]
     }
   ],
   "contributorsPerLine": 7,

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 1 - 0
README.md


+ 11 - 1
control_plane/input_params/login_vars.yml

@@ -80,4 +80,14 @@ powervault_me4_username: ""
 # Password used for powervault_me4
 # The Length of the password should be at least 8.
 # The password must not contain -,\, ',"
-powervault_me4_password: ""
+powervault_me4_password: ""
+
+# Username for Dockerhub account
+# This will be used for Docker login
+# This value is optional but suggested to avoid docker pull limit issues
+docker_username: ""
+
+# Password for Dockerhub account
+# This will be used for Docker login
+# This value is mandatory if docker username is provided
+docker_password: ""

+ 12 - 0
control_plane/roles/control_plane_common/tasks/password_config.yml

@@ -94,6 +94,18 @@
   no_log: true
   register: idrac_password_check
 
+- name: Assert docker_username and docker_password
+  assert:
+    that:
+      - docker_username | length > min_length | int - 1
+      - docker_username | length < max_length | int + 1
+      - docker_password | length > min_length | int - 1
+      - docker_password | length < max_length | int + 1
+    success_msg: "{{ success_msg_docker_credentials }}"
+    fail_msg: "{{ fail_msg_docker_credentials }}"
+  when: docker_username or docker_password
+  no_log: true
+
 - name: Verify ethernet_switch_username and ethernet_switch_password are not empty
   assert:
     that:

+ 2 - 0
control_plane/roles/control_plane_common/vars/main.yml

@@ -66,6 +66,8 @@ success_idrac_username: "idrac username validated"
 fail_idrac_username: "Failed. Incorrect idrac_username format provided in base_vars.yml"
 success_msg_idrac_password: "idrac password validated"
 fail_msg_idrac_password: "Failed. Incorrect idrac_password format provided in base_vars.yml"
+success_msg_docker_credentials: "docker username and password validated"
+fail_msg_docker_credentials: "Failed. Docker username or password is missing"
 ethernet_params_success_msg: "Ethernet switch username and password are not blank"
 ethernet_params_empty_fail_msg: "Failed. ethernet switch username or password cannot be empty when ethernet_switch_support is true"
 success_ethernet_switch_username: "Ethernet switch username validated"

+ 12 - 0
control_plane/roles/control_plane_k8s/tasks/k8s_init.yml

@@ -36,6 +36,18 @@
   ignore_errors: True
   register: k8s_pods
 
+- name: Docker login
+  command: docker login -u {{ docker_username }} -p {{ docker_password }}
+  changed_when: true
+  register: docker_login_output
+  ignore_errors: True
+  when: docker_username and docker_password
+
+- name: Docker login check
+  fail:
+    msg: "{{ docker_login_fail_msg }}"
+  when: docker_login_output is failed
+
 - name: Initialize kubeadm
   block:
     - name: Initialize kubeadm

+ 17 - 0
control_plane/roles/control_plane_k8s/tasks/k8s_installation.yml

@@ -105,6 +105,23 @@
     - "{{ k8s_packages }}"
   changed_when: true
 
+- name: Add docker community edition repository for docker-ce-cli
+  get_url:
+    url: "{{ docker_repo_url }}"
+    dest: "{{ docker_repo_dest }}"
+  when: docker_username and docker_password
+  register: docker_repo
+  until: docker_repo is not failed
+  retries: 20
+  delay: 10
+  tags: install
+
+- name: Install docker-ce-cli
+  package:
+    name: docker-ce-cli
+    state: present
+  when: docker_username and docker_password
+
 - name: Start and enable crio
   service:
     name: crio

+ 3 - 0
control_plane/roles/control_plane_k8s/vars/main.yml

@@ -33,6 +33,8 @@ crio_repo1_url: https://download.opensuse.org/repositories/devel:/kubic:/libcont
 crio_repo1_dest: /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo
 crio_repo2_url: https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:1.21/CentOS_8/devel:kubic:libcontainers:stable:cri-o:1.21.repo
 crio_repo2_dest: /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:1.21.repo
+docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
+docker_repo_dest: /etc/yum.repos.d/docker-ce.repo
 
 # Usage: k8s_firewalld.yml
 k8s_master_ports:
@@ -56,6 +58,7 @@ helm_installer_file_mode: 0700
 helm_stable_repo_url: https://charts.helm.sh/stable
 
 # Usage: k8s_init.yml
+docker_login_fail_msg: "Docker login failed! Please check the credentials and re-execute playbook."
 k8s_root_directory: /root/.kube
 k8s_root_directory_mode: 0755
 k8s_config_src: /etc/kubernetes/admin.conf

+ 11 - 0
omnia_config.yml

@@ -33,6 +33,17 @@ k8s_cni: "calico"
 # Make sure this value does not overlap with any of the host networks.
 k8s_pod_network_cidr: "10.244.0.0/16"
 
+# Username for Dockerhub account
+# This will be used for Docker login and a kubernetes secret will be created and patched to service account in default namespace.
+# This kubernetes secret can be used to pull images from private repositories
+# This value is optional but suggested to avoid docker pull limit issues
+docker_username: ""
+
+# Password for Dockerhub account
+# This will be used for Docker login
+# This value is mandatory if docker username is provided
+docker_password: ""
+
 # Path to directory hosting ansible config file (ansible.cfg file)
 # Default value is "/etc/ansible"
 # This directory is on the host running ansible, if ansible is installed using dnf

+ 13 - 0
roles/cluster_validation/tasks/fetch_password.yml

@@ -91,12 +91,25 @@
     success_msg: "{{ success_msg_k8s_pod_network_cidr }}"
     fail_msg: "{{ fail_msg_k8s_pod_network_cidr }}"
 
+- name: Assert docker_username and docker_password
+  assert:
+    that:
+      - docker_username | length > min_length | int - 1
+      - docker_username | length < max_length | int + 1
+      - docker_password | length > min_length | int - 1
+      - docker_password | length < max_length | int + 1
+    success_msg: "{{ success_msg_docker_credentials }}"
+    fail_msg: "{{ fail_msg_docker_credentials }}"
+  when: docker_username or docker_password
+
 - name: Save input variables from file
   set_fact:
     db_password: "{{ mariadb_password }}"
     k8s_version: "{{ k8s_version }}"
     k8s_cni: "{{ k8s_cni }}"
     k8s_pod_network_cidr: "{{ k8s_pod_network_cidr }}"
+    docker_username: "{{ docker_username }}"
+    docker_password: "{{ docker_password }}"
     ansible_conf_file_path: "{{ ansible_config_file_path }}"
   no_log: True
 

+ 2 - 0
roles/cluster_validation/vars/main.yml

@@ -25,6 +25,8 @@ success_msg_k8s_cni: "Kubernetes CNI Validated"
 fail_msg_k8s_cni: "Kubernetes CNI not correct."
 success_msg_k8s_pod_network_cidr: "Kubernetes pod network cidr validated"
 fail_msg_k8s_pod_network_cidr: "Kubernetes pod network cidr not given in correct format"
+success_msg_docker_credentials: "docker username and password validated"
+fail_msg_docker_credentials: "Failed. Docker username or password is missing"
 file_perm: '0755'
 
 #Usage: validations.yml

+ 34 - 0
roles/k8s_start_manager/tasks/main.yml

@@ -49,6 +49,18 @@
   register: k8s_nodes_not_ready
   tags: init
 
+- name: Docker login
+  command: docker login -u {{ hostvars['127.0.0.1']['docker_username'] }} -p {{ hostvars['127.0.0.1']['docker_password'] }}
+  changed_when: true
+  register: docker_login_output
+  ignore_errors: True
+  when: hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+
+- name: Docker login check
+  fail:
+    msg: "{{ docker_login_fail_msg }}"
+  when: docker_login_output is failed
+
 - name: Initialize kubeadm
   block:
     - name: Initialize kubeadm
@@ -139,6 +151,28 @@
     verbosity: 2
   tags: init
 
+- name: Get K8s secrets
+  command: kubectl get secrets
+  changed_when: false
+  register: k8s_secret
+  when: hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+
+- name: Create docker secret
+  command: kubectl create secret docker-registry dockerregcred --docker-username={{ hostvars['127.0.0.1']['docker_username'] }} \
+    --docker-password={{ hostvars['127.0.0.1']['docker_password'] }}
+  when:
+    - hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+    - "'dockerregcred' not in k8s_secret.stdout"
+
+- name: Add docker secret to default service account
+  shell: >
+    kubectl patch serviceaccount default -p '{"imagePullSecrets": [{"name": "dockerregcred"}]}'
+  register: patch_service_account
+  when: hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+  until: patch_service_account is not failed
+  retries: 10
+  tags: install
+
 - name: Setup Calico SDN network
   command: "kubectl apply -f '{{ calico_yml_url }}'"
   when: hostvars['127.0.0.1']['k8s_cni'] == "calico"

+ 2 - 0
roles/k8s_start_manager/vars/main.yml

@@ -13,6 +13,8 @@
 #  limitations under the License.
 ---
 
+docker_login_fail_msg: "Docker login failed! Please check the credentials and re-execute playbook."
+
 k8s_root_directory: /root/.kube
 
 k8s_root_directory_mode: 0755

+ 49 - 5
roles/k8s_start_services/tasks/main.yml

@@ -17,10 +17,38 @@
   include_vars: ../../slurm_exporter/vars/main.yml
 
 - name: Wait for CoreDNS to restart
-  command: kubectl rollout status deployment/coredns -n kube-system
-  changed_when: false
-  ignore_errors: True
-  tags: init
+  block:
+    - name: Wait for CoreDNS to restart
+      command: kubectl rollout status deployment/coredns -n kube-system  --timeout=4m
+      changed_when: false
+      tags: init
+  rescue:
+    - name: Get K8s pods
+      command: kubectl get pods --all-namespaces
+      register: k8s_pods
+      tags: init
+
+    - name: Pull docker images
+      command: docker pull {{ item }}
+      with_items: "{{ kube_system_docker_images }}"
+      when:
+        - hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+        - "'ImagePullBackOff' in k8s_pods.stdout"
+      register: docker_image_pull_result
+      until: docker_image_pull_result is not failed
+      retries: 5
+
+    - name: Wait for CoreDNS to restart
+      command: kubectl rollout status deployment/coredns -n kube-system
+      when: hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+      tags: init
+
+    - name: Fail message
+      fail:
+        msg: "{{ docker_pull_limit_msg }}"
+      when:
+        - "'ImagePullBackOff' in k8s_pods.stdout or 'ErrImagePull' in k8s_pods.stdout"
+        - not hostvars['127.0.0.1']['docker_username'] and not hostvars['127.0.0.1']['docker_password']
 
 - name: Get K8s pods
   command: kubectl get pods --all-namespaces
@@ -211,4 +239,20 @@
   command: "kubectl apply -f '{{ volcano_scheduling_yaml_url }}'"
   changed_when: true
   when: "'volcano-system' not in k8s_pods.stdout"
-  tags: init
+  tags: init
+
+- name: Get K8s pods
+  command: kubectl get pods --all-namespaces
+  changed_when: false
+  register: k8s_pods
+  tags: init
+
+- name: Pull K8s services docker images
+  command: docker pull {{ item }}
+  with_items: "{{ k8s_services_docker_images }}"
+  when:
+    - "'ImagePullBackOff' in k8s_pods.stdout"
+    - hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+  register: docker_image_pull_result
+  until: docker_image_pull_result is not failed
+  retries: 5

+ 27 - 0
roles/k8s_start_services/vars/main.yml

@@ -13,6 +13,33 @@
 #  limitations under the License.
 ---
 
+kube_system_docker_images:
+  - docker.io/calico/kube-controllers:v3.19.1
+  - docker.io/calico/cni:v3.19.1
+  - docker.io/calico/pod2daemon-flexvol:v3.19.1
+  - docker.io/calico/node:v3.19.1
+  - xilinxatg/xilinx_k8s_fpga_plugin:2020.11.24
+  - nvidia/k8s-device-plugin:v0.7.0
+
+k8s_services_docker_images:
+  - docker.io/rocm/k8s-device-plugin
+  - kubernetesui/dashboard:v2.0.5
+  - kubernetesui/metrics-scraper:v1.0.6
+  - prom/alertmanager:v0.21.0
+  - jimmidyson/configmap-reload:v0.4.0
+  - prom/node-exporter:v1.0.1
+  - prom/pushgateway:v1.2.0
+  - prom/prometheus:v2.20.1
+  - metallb/controller:v0.8.1
+  - metallb/controller:v0.7.3
+  - metallb/speaker:v0.7.3
+  - volcanosh/vc-controller-manager:latest
+  - volcanosh/vc-scheduler:latest
+  - volcanosh/vc-webhook-manager:latest
+  - quay.io/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11
+
+docker_pull_limit_msg: "You have reached your docker pull rate limit. Please provide docker credentials in omnia_config.yml and try again"
+
 metallb_config_file_dest: /root/k8s/metal-config.yaml
 
 metallb_config_file_mode: 0655