Bläddra i källkod

Merge pull request #39 from j0hnL/devel

added versionlock, helm3
John Lockman 5 år sedan
förälder
incheckning
1cd11f23e6

+ 9 - 3
kubernetes/roles/common/tasks/main.yml

@@ -45,18 +45,24 @@
 - name: install common packages
   yum: 
     name:
+      - yum-plugin-versionlock
       - gcc
       - nfs-utils
       - python-pip
       - docker
       - bash-completion
-      - kubelet
-      - kubeadm
-      - kubectl
+      - kubelet-1.16.7
+      - kubeadm-1.16.7
+      - kubectl-1.16.7
       - nvidia-detect
     state: present
   tags: install
 
+- name: versionlock kubernetes
+  command: yum versionlock kubelet-1.16.7 kubectl-1.16.7 kubeadm-1.16.7
+  tags: install
+  
+
 - name: install InfiniBand Support
   yum:
     name: "@Infiniband Support"

+ 1 - 1
kubernetes/roles/master/tasks/main.yml

@@ -19,7 +19,7 @@
 
 - name: Get Helm Installer
   get_url:
-    url: https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get 
+    url: https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
     dest: /root/bin/get_helm.sh
     mode: 700 
   tags: master

+ 6 - 5
kubernetes/roles/startmaster/tasks/main.yml

@@ -51,13 +51,15 @@
     msg: "[Master] K8S_MASTER_IP is  {{ hostvars['K8S_TOKEN_HOLDER']['ip'] }}"
   tags: init
 
-  
-- name: Setup Flannel SDN network
-  shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+- name: Setup Calico SDN network
+  shell: kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
   tags: init
+  
+#- name: Setup Flannel SDN network
+  #shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+  #tags: init
 
 - name: Enabled GPU support in Kubernetes
-  #script: enable_gpu_k8s.sh
   shell: kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/1.0.0-beta4/nvidia-device-plugin.yml
                            #https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v1.11/nvidia-device-plugin.yml
   register: gpu_enable
@@ -89,7 +91,6 @@
   shell: kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') > /root/k8s/token
   tags: init
 
-
 # If more debug information is needed during init uncomment the following 2 lines
 #- debug: var=init_output.stdout_lines
   #tags: init

+ 27 - 27
kubernetes/roles/startservices/tasks/main.yml

@@ -1,7 +1,7 @@
 ---
-- name: Kick CoreDNS (this is a hack that needs to be fixed)
-  shell:  kubectl get pods -n kube-system --no-headers=true | awk '/coredns/{print $1}'|xargs kubectl delete -n kube-system pod
-  tags: init
+#- name: Kick CoreDNS (this is a hack that needs to be fixed)
+  #shell:  kubectl get pods -n kube-system --no-headers=true | awk '/coredns/{print $1}'|xargs kubectl delete -n kube-system pod
+  #tags: init
 
 - name: Wait for CoreDNS to restart 
   shell: kubectl rollout status deployment/coredns -n kube-system
@@ -27,40 +27,40 @@
   shell: kubectl apply -f /root/k8s/metal-config.yaml
   tags: init
 
-- name: Helm - create service account
-  shell: kubectl create serviceaccount --namespace kube-system tiller
-  tags: init
+#- name: Helm - create service account
+  #shell: kubectl create serviceaccount --namespace kube-system tiller
+  #tags: init
 
-- name: Helm - create clusterRole Binding for tiller-cluster-rule
-  shell: kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
-  tags: init
+#- name: Helm - create clusterRole Binding for tiller-cluster-rule
+  #shell: kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+  #tags: init
 
-- name: Helm - create clusterRoleBinding for admin
-  shell: kubectl create clusterrolebinding tiller-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
-  tags: init
+#- name: Helm - create clusterRoleBinding for admin
+  #shell: kubectl create clusterrolebinding tiller-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+  #tags: init
 
-- name: Helm - init
-  shell: helm init  --upgrade
-  tags: init
+#- name: Helm - init
+  #shell: helm init  --upgrade
+  #tags: init
 
-- name: Wait for tiller to start 
-  shell: kubectl rollout status deployment/tiller-deploy -n kube-system
-  tags: init
+#- name: Wait for tiller to start 
+  #shell: kubectl rollout status deployment/tiller-deploy -n kube-system
+  #tags: init
 
-- name: Helm - patch cluster Role Binding for tiller
-  shell:  kubectl --namespace kube-system patch deploy tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
-  tags: init
+#- name: Helm - patch cluster Role Binding for tiller
+  #shell:  kubectl --namespace kube-system patch deploy tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
+  #tags: init
 
-- name: Wait for tiller to start 
-  shell: kubectl rollout status deployment/tiller-deploy -n kube-system
-  tags: init
+#- name: Wait for tiller to start 
+  #shell: kubectl rollout status deployment/tiller-deploy -n kube-system
+  #tags: init
 
 - name: Start K8S Dashboard
   shell: kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml
   tags: init
 
 - name: Start NFS Client Provisioner
-  shell: helm install --name nfs  stable/nfs-client-provisioner --set nfs.server=10.0.0.1 --set nfs.path=/work
+  shell: helm install stable/nfs-client-provisioner --set nfs.server=10.0.0.1 --set nfs.path=/work --generate-name
   tags: init
 
 - name: JupyterHub Persistent Volume Creation (files)  
@@ -76,9 +76,9 @@
   tags: init
  
 - name: jupyterHub deploy
-  shell: helm install jupyterhub/jupyterhub  --namespace default --version 0.8.2 --values /root/k8s/jupyter_config.yaml
+  shell: helm install jupyterhub/jupyterhub  --namespace default --version 0.8.2 --values /root/k8s/jupyter_config.yaml --generate-name
   tags: init
 
 - name: Prometheus deployment
-  shell: helm install stable/prometheus --set alertmanager.persistentVolume.storageClass=nfs-client,server.persistentVolume.storageClass=nfs-client,server.service.type=LoadBalancer
+  shell: helm install stable/prometheus --set alertmanager.persistentVolume.storageClass=nfs-client,server.persistentVolume.storageClass=nfs-client,server.service.type=LoadBalancer --generate-name
   tags: init