Prechádzať zdrojové kódy

HPCC-26636 add suppport for "pod topologySpreadConstraints"
An example is provided in helm/hpcc/docs/placements.md

xwang2713 3 rokov pred
rodič
commit
5ca991dd40

+ 21 - 0
helm/hpcc/docs/placements.md

@@ -74,6 +74,10 @@ Supported configurations under each "placement"
    Kubernetes 1.20.0 beta and later releases
    Only one "schedulerName" can be applied to a Pod/Job.
 
+5) topologySpreadConstraints
+   Requires Kubernetes v1.19+.
+   Reference https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+
 "nodeSelector" example:
 ```code
 placements:
@@ -124,4 +128,21 @@ placements:
       operator: "Equal"
       value: "true"
       effect: "NoSchedule"
+
+```
+"topologySpreadConstraints" example, there are two node pools which have "hpcc=spot1" and "hpcc=spot2" respectively. The roxie pods will be evenly scheduled on the two node pools. After deployment verify it with
+```code
+kubectl get pod -o wide | grep roxie
+```
+Placements code:
+```code
+- pods: ["type:roxie"]
+  placement:
+    topologySpreadConstraints:
+    - maxSkew: 1
+      topologyKey: hpcc
+      whenUnsatisfiable: ScheduleAnyway
+      labelSelector:
+        matchLabels:
+          roxie-cluster: "roxie"
 ```

+ 7 - 4
helm/hpcc/templates/_helpers.tpl

@@ -1051,16 +1051,19 @@ Pass in dict with me for current placements and dict with new for the new placem
 */}}
 {{- define "hpcc.mergePlacementSetting" -}}
 {{- if .me.placement.nodeSelector }}
-{{- $_ := set .new "nodeSelector" (mergeOverwrite (.new.nodeSelector | default dict ) .me.placement.nodeSelector)  }}
+ {{- $_ := set .new "nodeSelector" (mergeOverwrite (.new.nodeSelector | default dict ) .me.placement.nodeSelector)  }}
 {{- end -}}
 {{- if .me.placement.tolerations }}
-{{- $_ := set .new "tolerations" (concat (.new.tolerations | default list ) .me.placement.tolerations)  }}
+ {{- $_ := set .new "tolerations" (concat (.new.tolerations | default list ) .me.placement.tolerations)  }}
 {{- end -}}
 {{- if .me.placement.affinity }}
-{{- $_ := set .new "affinity" .me.placement.affinity  }}
+ {{- $_ := set .new "affinity" .me.placement.affinity  }}
 {{- end -}}
 {{- if .me.placement.schedulerName }}
-{{- $_ := set .new "schedulerName" .me.placement.schedulerName }}
+ {{- $_ := set .new "schedulerName" .me.placement.schedulerName }}
+{{- end -}}
+{{- if .me.placement.topologySpreadConstraints }}
+ {{- $_ := set .new "topologySpreadConstraints" (concat (.new.topologySpreadConstraints | default list ) .me.placement.topologySpreadConstraints)  }}
 {{- end -}}
 {{- end -}}
 

+ 26 - 0
helm/hpcc/values.schema.json

@@ -1633,6 +1633,10 @@
         },
         "schedulerName": {
           "type": "string"
+        },
+        "topologySpreadConstraints": {
+          "type": "array",
+          "items": { "$ref": "#/definitions/topologySpreadConstraint" }
         }
       }
     },
@@ -1666,6 +1670,28 @@
         }
       }
     },
+    "topologySpreadConstraint": {
+      "type": "object",
+      "properties": {
+        "maxSkew": {
+          "type": "integer",
+          "description": "describes the degree to which Pods may be unevenly distributed. It must be greater than zero"
+        },
+        "topologyKey": {
+          "type": "string",
+          "description": "is the key of node labels"
+        },
+        "whenUnsatisfiable": {
+          "type": "string",
+          "enum": ["DoNotSchedule", "ScheduleAnyway"],
+          "description": "indicates how to deal with a Pod if it doesn't satisfy the spread constraint"
+        },
+        "labelSelector": {
+          "type": "object",
+          "description": "labelSelector is used to find matching Pods"
+        }
+      }
+    },
     "issuer": {
       "type": "object",
       "required": [ "name" ],

+ 1 - 1
helm/hpcc/values.yaml

@@ -510,7 +510,7 @@ roxie:
     numThreads: 30
     visibility: local
   ## replicas indicates the number of replicas per channel
-  replicas: 2  
+  replicas: 2
   numChannels: 2
   ## Set serverReplicas to indicate a separate replicaSet of roxie servers, with agent nodes not acting as servers
   serverReplicas: 0