main.yml 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. ---
  2. #- name: Kick CoreDNS (this is a hack that needs to be fixed)
  3. #shell: kubectl get pods -n kube-system --no-headers=true | awk '/coredns/{print $1}'|xargs kubectl delete -n kube-system pod
  4. #tags: init
  5. - name: Wait for CoreDNS to restart
  6. shell: kubectl rollout status deployment/coredns -n kube-system
  7. tags: init
  8. - name: Deploy MetalLB
  9. shell: kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.8.1/manifests/metallb.yaml
  10. tags: init
  11. - name: Create MetalLB Setup Config Files
  12. copy: src=metal-config.yaml dest=/root/k8s/metal-config.yaml owner=root group=root mode=655
  13. tags: init
  14. - name: Create MetalLB Setup Deployment Files
  15. copy: src=metallb.yaml dest=/root/k8s/metallb.yaml owner=root group=root mode=655
  16. tags: init
  17. - name: Deploy MetalLB
  18. shell: kubectl apply -f /root/k8s/metallb.yaml
  19. tags: init
  20. - name: Create default setup for MetalLB
  21. shell: kubectl apply -f /root/k8s/metal-config.yaml
  22. tags: init
  23. #- name: Helm - create service account
  24. #shell: kubectl create serviceaccount --namespace kube-system tiller
  25. #tags: init
  26. #- name: Helm - create clusterRole Binding for tiller-cluster-rule
  27. #shell: kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
  28. #tags: init
  29. #- name: Helm - create clusterRoleBinding for admin
  30. #shell: kubectl create clusterrolebinding tiller-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
  31. #tags: init
  32. #- name: Helm - init
  33. #shell: helm init --upgrade
  34. #tags: init
  35. #- name: Wait for tiller to start
  36. #shell: kubectl rollout status deployment/tiller-deploy -n kube-system
  37. #tags: init
  38. #- name: Helm - patch cluster Role Binding for tiller
  39. #shell: kubectl --namespace kube-system patch deploy tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
  40. #tags: init
  41. #- name: Wait for tiller to start
  42. #shell: kubectl rollout status deployment/tiller-deploy -n kube-system
  43. #tags: init
  44. - name: Start K8S Dashboard
  45. shell: kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml
  46. tags: init
  47. - name: Start NFS Client Provisioner
  48. shell: helm install stable/nfs-client-provisioner --set nfs.server=10.0.0.1 --set nfs.path=/work --generate-name
  49. tags: init
  50. - name: JupyterHub Persistent Volume Creation (files)
  51. copy: src=jhub-db-pv.yaml dest=/root/k8s/jhub-db-pv.yaml owner=root group=root mode=655
  52. tags: init
  53. - name: jupyterHub Persistent Volume creation
  54. shell: kubectl create -f /root/k8s/jhub-db-pv.yaml
  55. tags: init
  56. - name: JupyterHub Custom Config (files)
  57. copy: src=jupyter_config.yaml dest=/root/k8s/jupyter_config.yaml owner=root group=root mode=655
  58. tags: init
  59. - name: jupyterHub deploy
  60. shell: helm install jupyterhub/jupyterhub --namespace default --version 0.8.2 --values /root/k8s/jupyter_config.yaml --generate-name
  61. tags: init
  62. - name: Prometheus deployment
  63. shell: helm install stable/prometheus --set alertmanager.persistentVolume.storageClass=nfs-client,server.persistentVolume.storageClass=nfs-client,server.service.type=LoadBalancer --generate-name
  64. tags: init