Browse Source

HPCC-24460 Initial definition of storage planes

Signed-off-by: Gavin Halliday <gavin.halliday@lexisnexis.com>
Gavin Halliday 5 years ago
parent
commit
62f62d7a34

+ 97 - 10
dali/base/dadfs.cpp

@@ -9553,7 +9553,7 @@ class CInitGroups
         return loadMachineMap(conn->queryRoot());
     }
 
-    IPropertyTree *createClusterGroup(GroupType groupType, const std::vector<std::string> &hosts, const char *dir, const IPropertyTree &envCluster, bool realCluster, bool _expand)
+    IPropertyTree *createClusterGroup(GroupType groupType, const std::vector<std::string> &hosts, const char *dir, const IPropertyTree * envCluster, bool realCluster, bool _expand)
     {
         bool expand = _expand;
         if (grp_thor != groupType)
@@ -9589,8 +9589,9 @@ class CInitGroups
         };
         if (expand)
         {
-            unsigned slavesPerNode = envCluster.getPropInt("@slavesPerNode", 1);
-            unsigned channelsPerSlave = envCluster.getPropInt("@channelsPerSlave", 1);
+            assertex(envCluster);
+            unsigned slavesPerNode = envCluster->getPropInt("@slavesPerNode", 1);
+            unsigned channelsPerSlave = envCluster->getPropInt("@channelsPerSlave", 1);
             for (unsigned s=0; s<(slavesPerNode*channelsPerSlave); s++)
                 addHostsToIPTFunc();
         }
@@ -9661,7 +9662,7 @@ class CInitGroups
         if (!hosts.size())
             return nullptr;
 
-        return createClusterGroup(groupType, hosts, dir, cluster, realCluster, expand);
+        return createClusterGroup(groupType, hosts, dir, &cluster, realCluster, expand);
     }
 
     bool constructGroup(const IPropertyTree &cluster, const char *altName, IPropertyTree *oldEnvCluster, GroupType groupType, bool force, StringBuffer &messages)
@@ -9699,9 +9700,7 @@ class CInitGroups
         if (altName)
             gname.clear().append(altName).toLowerCase();
 
-        VStringBuffer xpath("Group[@name=\"%s\"]", gname.str());
-        IPropertyTree *existingClusterGroup = groupsconnlock.conn->queryRoot()->queryPropTree(xpath.str()); // 'live' cluster group
-
+        IPropertyTree *existingClusterGroup = queryExistingGroup(gname);
         bool matchOldEnv = false;
         Owned<IPropertyTree> newClusterGroup = createClusterGroupFromEnvCluster(groupType, cluster, defDir, realCluster, true);
         bool matchExisting = !force && clusterGroupCompare(newClusterGroup, existingClusterGroup);
@@ -9767,7 +9766,7 @@ class CInitGroups
                     VStringBuffer gname("hthor__%s", groupname);
                     if (ins>1)
                         gname.append('_').append(ins);
-                    Owned<IPropertyTree> clusterGroup = createClusterGroup(grp_hthor, { na }, nullptr, cluster, true, false);
+                    Owned<IPropertyTree> clusterGroup = createClusterGroup(grp_hthor, { na }, nullptr, &cluster, true, false);
                     addClusterGroup(gname.str(), clusterGroup.getClear(), true);
                 }
             }
@@ -10006,13 +10005,86 @@ public:
             }
         }
     }
+
+    IPropertyTree * createStorageGroup(const char * name, size32_t size, const char * path)
+    {
+        std::vector<std::string> hosts(size, "localhost");
+        return createClusterGroup(grp_unknown, hosts, path, nullptr, false, false);
+    }
+
+    void ensureStorageGroup(bool force, const char * name, unsigned numDevices, const char * path, StringBuffer & messages)
+    {
+        IPropertyTree *existingClusterGroup = queryExistingGroup(name);
+        Owned<IPropertyTree> newClusterGroup = createStorageGroup(name, numDevices, path);
+        bool matchExisting = clusterGroupCompare(newClusterGroup, existingClusterGroup);
+        if (!existingClusterGroup || !matchExisting)
+        {
+            if (!existingClusterGroup)
+            {
+                VStringBuffer msg("New cluster layout for cluster %s", name);
+                UWARNLOG("%s", msg.str());
+                messages.append(msg).newline();
+                addClusterGroup(name, newClusterGroup.getClear(), false);
+            }
+            else if (force)
+            {
+                VStringBuffer msg("Forcing new group layout for storageplane %s", name);
+                UWARNLOG("%s", msg.str());
+                messages.append(msg).newline();
+                addClusterGroup(name, newClusterGroup.getClear(), false);
+            }
+            else
+            {
+                VStringBuffer msg("Active cluster '%s' group layout does not match stroageplane definition", name);
+                UWARNLOG("%s", msg.str());                                                                        \
+                messages.append(msg).newline();
+            }
+        }
+    }
+
+    void constructStorageGroups(bool force, StringBuffer &messages)
+    {
+        IPropertyTree & global = queryGlobalConfig();
+        IPropertyTree * storage = global.queryPropTree("storage");
+        if (storage)
+        {
+            Owned<IPropertyTreeIterator> planes = storage->getElements("planes");
+            ForEach(*planes)
+            {
+                IPropertyTree & plane = planes->query();
+                const char * name = plane.queryProp("@name");
+                if (isEmptyString(name))
+                    continue;
+
+                //Lower case the group name - see CnamedGroupStore::dolookup which lower cases before resolving.
+                StringBuffer gname;
+                gname.append(name).toLowerCase();
+
+                //Two main type of storage plane - with a host group (bare metal) and without.
+                IPropertyTree *existingGroup = queryExistingGroup(gname);
+                const char * hosts = plane.queryProp("@hosts");
+                const char * prefix = plane.queryProp("@prefix");
+                if (hosts)
+                {
+                    IPropertyTree *existingClusterGroup = queryExistingGroup(gname);
+                    if (!existingClusterGroup)
+                        UNIMPLEMENTED_X("Bare metal storage planes not yet supported");
+                }
+                else
+                {
+                    unsigned numDevices = plane.getPropInt("@numDevices", 1);
+                    ensureStorageGroup(force, gname, numDevices, prefix, messages);
+                }
+            }
+        }
+    }
     IGroup *getGroupFromCluster(const char *type, const IPropertyTree &cluster, bool expand)
     {
         loadMachineMap();
         GroupType gt = getGroupType(type);
         return getGroupFromCluster(gt, cluster, expand);
     }
-    IPropertyTree *queryRawGroup(const char *name)
+    IPropertyTree *queryExistingGroup(const char *name)
     {
         VStringBuffer xpath("Group[@name=\"%s\"]", name);
         return groupsconnlock.conn->queryRoot()->queryPropTree(xpath.str());
@@ -10025,6 +10097,21 @@ void initClusterGroups(bool force, StringBuffer &response, IPropertyTree *oldEnv
     init.constructGroups(force, response, oldEnvironment);
 }
 
+void initClusterAndStoragePlaneGroups(bool force, IPropertyTree *oldEnvironment, unsigned timems)
+{
+    CInitGroups init(timems);
+
+    StringBuffer response;
+    init.constructGroups(force, response, oldEnvironment);
+    if (response.length())
+        PROGLOG("DFS group initialization : %s", response.str()); // should this be a syslog?
+
+    response.clear();
+    init.constructStorageGroups(false, response);
+    if (response.length())
+        PROGLOG("StoragePlane group initialization : %s", response.str()); // should this be a syslog?
+}
+
 bool resetClusterGroup(const char *clusterName, const char *type, bool spares, StringBuffer &response, unsigned timems)
 {
     CInitGroups init(timems);
@@ -10069,7 +10156,7 @@ static IGroup *getClusterNodeGroup(const char *clusterName, const char *type, bo
         throwStringExceptionV(0, "Failed to get group for '%s' cluster '%s'", type, clusterName);
     if (!expandedClusterGroup->equals(nodeGroup))
     {
-        IPropertyTree *rawGroup = init.queryRawGroup(nodeGroupName);
+        IPropertyTree *rawGroup = init.queryExistingGroup(nodeGroupName);
         if (!rawGroup)
             throwUnexpectedX("missing node group");
         unsigned nodesSwapped = rawGroup->getPropInt("@nodesSwapped");

+ 2 - 0
dali/base/dadfs.hpp

@@ -792,9 +792,11 @@ extern da_decl IDaliServer *createDaliDFSServer(IPropertyTree *config); // calle
 
 // to initialize clustergroups after clusters change in the environment
 extern da_decl void initClusterGroups(bool force, StringBuffer &response, IPropertyTree *oldEnvironment, unsigned timems=INFINITE);
+extern da_decl void initClusterAndStoragePlaneGroups(bool force, IPropertyTree *oldEnvironment, unsigned timems=INFINITE);
 extern da_decl bool resetClusterGroup(const char *clusterName, const char *type, bool spares, StringBuffer &response, unsigned timems=INFINITE);
 extern da_decl bool addClusterSpares(const char *clusterName, const char *type, const std::vector<std::string> &hosts, StringBuffer &response, unsigned timems=INFINITE);
 extern da_decl bool removeClusterSpares(const char *clusterName, const char *type, const std::vector<std::string> &hosts, StringBuffer &response, unsigned timems=INFINITE);
+
 // should poss. belong in lib workunit
 extern da_decl StringBuffer &getClusterGroupName(const IPropertyTree &cluster, StringBuffer &groupName);
 extern da_decl StringBuffer &getClusterSpareGroupName(const IPropertyTree &cluster, StringBuffer &groupName);

+ 1 - 4
dali/base/dasds.cpp

@@ -6389,10 +6389,7 @@ void CCovenSDSManager::loadStore(const char *storeName, const bool *abort)
     initializeInternals(conn->queryRoot());
     conn.clear();
     bool forceGroupUpdate = config.getPropBool("DFS/@forceGroupUpdate");
-    StringBuffer response;
-    initClusterGroups(forceGroupUpdate, response, oldEnvironment);
-    if (response.length())
-        PROGLOG("DFS group initialization : %s", response.str()); // should this be a syslog?
+    initClusterAndStoragePlaneGroups(forceGroupUpdate, oldEnvironment);
 }
 
 void CCovenSDSManager::saveStore(const char *storeName, bool currentEdition)

+ 2 - 1
dali/server/daserver.cpp

@@ -401,7 +401,8 @@ int main(int argc, const char* argv[])
                 port = atoi(argv[++i]);
             else if (streq(argv[i],"--rank") || streq(argv[i],"-r"))
                 myrank = atoi(argv[++i]);
-            else {
+            else if (!startsWith(argv[i],"--config"))
+            {
                 usage();
                 return EXIT_FAILURE;
             }

+ 43 - 0
helm/examples/storageplanes/baremetal.yaml

@@ -0,0 +1,43 @@
+#An example of a complex storage definition for a bare metal system
+hostgroups:
+  thor400: [ node1, node2, node3, node4, node5, node6, node400 ]
+  thor400m:
+    group: thor400
+    delta: 1
+  thor20_1:
+    group: thor400
+    count: 20
+    offset: 0
+  thor20_2:
+    group: thor400
+    count: 20
+    offset: 20
+  thor100_4:
+    group: thor400
+    count: 100
+    offset: 300
+
+storage:
+  planes:
+  #Bare metal system with attached storage
+  - name: thor400
+    prefix: /var/lib/hpccsystems/hpcc-data
+    hosts: thor400
+    replication: [ thor400mirror ]
+    #numDevices: count(hosts)
+  - name: thor400mirror
+    prefix: /var/lib/hpccsystems/hpcc-mirror
+    hosts: thor400mirror
+    #Does any other information about the replication policy need to be included?  I don't think it does....
+
+  - name: thor100_4
+    prefix: /var/lib/hpccsystems/hpcc-data
+    hosts: thor100_4
+    replication: [ azureBlobPlane ]
+
+  - name: azureBlobPlane
+    prefix: azure://ghallidaystorage      # Not sure if there should be a different tag for prefix v mount
+    secret: azure-ghallidaystorage
+
+  - name: localSpill
+    prefix: /tmp/hpcc/spill

+ 58 - 0
helm/examples/storageplanes/complex.yaml

@@ -0,0 +1,58 @@
+#An example of various different potential uses of storageplanes
+storage:
+  planes:
+  #Some examples of the different kinds of storage planes that are supported
+  #Store data on azure blobs
+  - name: azureBlobPlane
+    prefix: azure://ghallidaystorage      # Not sure if this should be different from the mount.
+    secret: azure-ghallidaystorage
+
+  - name: azureDllPlane
+    prefix: azure://ghallidaydllstore      # Not sure if this should be different from the mount.
+    secret: azure-ghallidaydllstore
+
+  #Store data on aws s3 buckets
+  - name: s3BucketPlane
+    prefix: s3://...
+
+  #Single node with data mounted, and mirror mounted at a different locations (could be a different disk)
+  - name: localDataPlane
+    prefix: /var/lib/hpccsystems/hpcc-data
+    replication: [ localMirrorPlane, localMirror2Plane ]
+    pvc: "local-data-pvc"
+  - name: localMirrorPlane
+    prefix: /var/lib/hpccsystems/hpcc-mirror
+    pvc: "local-mirror-pvc"
+  - name: localMirror2Plane
+    prefix: /var/lib/hpccsystems/hpcc-mirror2
+    pvc: "local-mirror2-pvc"
+
+  #Multiple nodes, data on a local mounts (all nodes mount the same logical file system).
+  #Essentially identical to localDataPlane above
+  - name: nasPlane
+    prefix: /var/lib/hpccsystems/hpcc-data
+    replication: [ nasMirrorPlane ]
+    pvc: "nas-data-pvc"
+  - name: nasMirrorPlane
+    prefix: /var/lib/hpccsystems/hpcc-mirror
+    pvc: "nas-mirror-pvc"
+
+  #Multiple nodes, data on multiple local mounts (all nodes mount the same logical file system).
+  #Allows an array of NFS servers to be used to store the data.  Would also potentially work for
+  #local case with large numbers of local disks.
+  #Mount point is /var/lib/hpccsystems/hpcc-data/d<device-number>
+  - name: nasArrayPlane
+    prefix: /var/lib/hpccsystems/hpcc-data
+    pvc: "nas-array-data-pvc"
+    numDevices: 100
+    includeDeviceInPath: true
+
+  # A locally mounted directory
+  - name: localNvmePlane
+    prefix: /tmp/hpcc/spill
+
+  #The following defines the default data plane (used if a component does not specify one)
+  dataStorage:
+    plane: localDataPlane
+  dllStorage:
+    plane: azureDllPlane

+ 190 - 12
helm/hpcc/templates/_helpers.tpl

@@ -48,6 +48,15 @@ Pass in root as .
 */}}
 {{- define "hpcc.generateGlobalConfigMap" -}}
 {{- $local := dict "defaultEsp" "" -}}
+{{- /*Create local variables which always exist to avoid having to check if intermediate key values exist*/ -}}
+{{- $storage := (.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $dataStorage := ($storage.dataStorage | default dict) -}}
+{{- $spillStorage := ($storage.spillStorage | default dict) -}}
+{{- $daliStorage := ($storage.daliStorage | default dict) -}}
+{{- $dllStorage := ($storage.dllStorage | default dict) -}}
+{{- $daliStoragePlane := ($daliStorage.plane | default "") -}}
+{{- $dllStoragePlane := ($dllStorage.plane | default "") -}}
 imageVersion: {{ required "Please specify .global.image.version" .Values.global.image.version | quote }}
 singleNode: {{ .Values.global.singleNode | default false }}
 defaultEsp: {{ .Values.global.defaultEsp | default ""}}
@@ -57,15 +66,22 @@ esp:
 {{ end -}}
 secretTimeout: {{ .Values.secrets.timeout | default 300 }}
 storage:
-  ##The following is a temporary solution to allow blob storage to be tested
-  ##This will be completely rewritten and restructured to encompass the idea of multiple storage planes.
-  ##The source of the information is likely to be move to .Values.storage rather than .Values.global
-  default:
-{{- if .Values.global.defaultDataPath }}
-    data: {{ .Values.global.defaultDataPath }}
+  planes:
+{{- /*Generate entries for each data plane (removing the pvc).  Exclude the planes used for dlls and dali.*/ -}}
+{{- range $plane := $planes -}}
+ {{- if and (ne $plane.name $daliStoragePlane) (ne $plane.name $dllStoragePlane) -}}
+  - name: {{ $plane.name | quote }}
+{{ toYaml (unset (unset (deepCopy $plane) "name") "pvc")| indent 4 }}
+ {{- end }}
+{{- end }}
+{{- /* Add implicit planes if data or spill storage plane not specified*/ -}}
+{{- if not $dataStorage.plane }}
+  - name: hpcc-data-plane
+    mount: {{ .Values.global.defaultDataPath | default "/var/lib/HPCCSystems/hpcc-data" | quote }}
 {{- end }}
-{{- if .Values.global.defaultMirrorPath }}
-    mirror: {{ .Values.global.defaultMirrorPath }}
+{{- if not $spillStorage.plane }}
+  - name: hpcc-spill-plane
+    mount: {{ .Values.global.defaultSpillPath | default "/var/lib/HPCCSystems/hpcc-spill" | quote }}
 {{- end }}
 {{- end -}}
 
@@ -101,36 +117,161 @@ Add ConfigMap volume for a component
 
 {{/*
 Add data volume mount
+If any storage planes are defined that name pvcs they will be mounted
 */}}
 {{- define "hpcc.addDataVolumeMount" -}}
+{{- /*Create local variables which always exist to avoid having to check if intermediate key values exist*/ -}}
+{{- $storage := (.root.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $dataStorage := ($storage.dataStorage | default dict) -}}
+{{- $daliStorage := ($storage.daliStorage | default dict) -}}
+{{- $dllStorage := ($storage.dllStorage | default dict) -}}
+{{- $daliStoragePlane := ($daliStorage.plane | default "") -}}
+{{- $dllStoragePlane := ($dllStorage.plane | default "") -}}
+{{- range $plane := $planes -}}
+ {{- if $plane.pvc -}}
+  {{- if and (ne $plane.name $daliStoragePlane) (ne $plane.name $dllStoragePlane) -}}
+   {{- $num := int ( $plane.numDevices | default 1 ) -}}
+   {{- if le $num 1 }}
+- name: {{ $plane.name }}-pv
+  mountPath: {{ $plane.prefix | quote }}
+   {{- else }}
+    {{- range $elem := untilStep 1 (int (add $num 1)) 1 }}
+- name: {{ $plane.name }}-pv-many-{{- $elem }}
+  mountPath: {{ printf "%s/d%d" $plane.prefix $elem | quote }}
+    {{- end }}
+   {{- end }}
+  {{- end }}
+ {{- end }}
+{{- end }}
+{{- if (not $dataStorage.plane) }}
 - name: datastorage-pv
   mountPath: "/var/lib/HPCCSystems/hpcc-data"
+{{- end }}
 {{- end -}}
 
 {{/*
 Add data volume
 */}}
 {{- define "hpcc.addDataVolume" -}}
+{{- /*Create local variables which always exist to avoid having to check if intermediate key values exist*/ -}}
+{{- $storage := (.root.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $dataStorage := ($storage.dataStorage | default dict) -}}
+{{- $daliStorage := ($storage.daliStorage | default dict) -}}
+{{- $dllStorage := ($storage.dllStorage | default dict) -}}
+{{- $daliStoragePlane := ($daliStorage.plane | default "") -}}
+{{- $dllStoragePlane := ($dllStorage.plane | default "") -}}
+{{- range $plane := $planes -}}
+ {{- if $plane.pvc -}}
+  {{- if and (ne $plane.name $daliStoragePlane) (ne $plane.name $dllStoragePlane) -}}
+   {{- $num := int ( $plane.numDevices | default 1 ) -}}
+   {{- $pvc := $plane.pvc | required (printf "pvc for %s not supplied" $plane.name) }}
+   {{- if le $num 1 }}
+- name: {{ $plane.name }}-pv
+  persistentVolumeClaim:
+    claimName: {{ $pvc }}
+   {{- else }}
+    {{- range $elem := until $num }}
+- name: {{ $plane.name }}-pv-many-{{- add $elem 1 }}
+  persistentVolumeClaim:
+    claimName: {{ $pvc }}-{{- add $elem 1 }}
+    {{- end }}
+   {{- end -}}
+  {{- end }}
+ {{- end }}
+{{- end -}}
+{{- if (not $dataStorage.plane) }}
 - name: datastorage-pv
   persistentVolumeClaim:
-    claimName: {{ .Values.storage.dataStorage.existingClaim | default (printf "%s-datastorage-pvc" (include "hpcc.fullname" .)) }}
+    claimName: {{ $dataStorage.existingClaim | default (printf "%s-datastorage-pvc" (include "hpcc.fullname" .root )) }}
+{{- end }}
 {{- end -}}
 
 {{/*
-Add dll volume mount
+Add dll volume mount - if default plane is used, or the dll storage plane specifies a pvc
 */}}
 {{- define "hpcc.addDllVolumeMount" -}}
+{{- $storage := (.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $dllStorage := ($storage.dllStorage | default dict) -}}
+{{- if $dllStorage.plane -}}
+ {{- range $plane := $planes -}}
+  {{- if and ($plane.pvc) (eq $plane.name $dllStorage.plane) -}}
+- name: dllstorage-pv
+  mountPath: {{ $plane.prefix | quote }}
+  {{- end -}}
+ {{- end -}}
+{{- else -}}
 - name: dllstorage-pv
   mountPath: "/var/lib/HPCCSystems/queries"
 {{- end -}}
+{{- end -}}
 
 {{/*
-Add dll volume
+Add dll volume - if default plane is used, or the dll storage plane specifies a pvc
 */}}
 {{- define "hpcc.addDllVolume" -}}
+{{- /*Create local variables which always exist to avoid having to check if intermediate key values exist*/ -}}
+{{- $storage := (.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $dllStorage := ($storage.dllStorage | default dict) -}}
+{{- if $dllStorage.plane -}}
+ {{- range $plane := $planes -}}
+  {{- if and ($plane.pvc) (eq $plane.name $dllStorage.plane) -}}
+- name: dllstorage-pv
+  persistentVolumeClaim:
+    claimName: {{ $plane.pvc }}
+  {{- end }}
+ {{- end }}
+{{- else -}}
 - name: dllstorage-pv
   persistentVolumeClaim:
-    claimName: {{ .Values.storage.dllStorage.existingClaim | default (printf "%s-dllstorage-pvc" (include "hpcc.fullname" .)) }}
+    claimName: {{ $dllStorage.existingClaim | default (printf "%s-dllstorage-pvc" (include "hpcc.fullname" .)) }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Add dali volume mount - if default plane is used, or the dali storage plane specifies a pvc
+*/}}
+{{- define "hpcc.addDaliVolumeMount" -}}
+{{- $storage := (.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $daliStorage := ($storage.daliStorage | default dict) -}}
+{{- if $daliStorage.plane -}}
+ {{- range $plane := $planes -}}
+  {{- if and ($plane.pvc) (eq $plane.name $daliStorage.plane) -}}
+- name: dalistorage-pv
+  mountPath: {{ $plane.prefix | quote }}
+  {{- end -}}
+ {{- end -}}
+{{- else -}}
+- name: dalistorage-pv
+  mountPath: "/var/lib/HPCCSystems/dalistorage"
+{{- end -}}
+{{- end -}}
+
+{{/*
+Add dali volume - if default plane is used, or the dali storage plane specifies a pvc
+*/}}
+{{- define "hpcc.addDaliVolume" -}}
+{{- /*Create local variables which always exist to avoid having to check if intermediate key values exist*/ -}}
+{{- $storage := (.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $daliStorage := ($storage.daliStorage | default dict) -}}
+{{- if $daliStorage.plane -}}
+ {{- range $plane := $planes -}}
+  {{- if and ($plane.pvc) (eq $plane.name $daliStorage.plane) -}}
+- name: dalistorage-pv
+  persistentVolumeClaim:
+    claimName: {{ $plane.pvc }}
+  {{- end }}
+ {{- end }}
+{{- else -}}
+- name: dalistorage-pv
+  persistentVolumeClaim:
+    claimName: {{ $daliStorage.existingClaim | default (printf "%s-dalistorage-pvc" (include "hpcc.fullname" .)) }}
+{{- end -}}
 {{- end -}}
 
 {{/*
@@ -167,6 +308,43 @@ Add Secret volume for a component
 {{- end -}}
 
 {{/*
+Return a value indicating whether a storage plane is defined or not.
+*/}}
+{{- define "hpcc.isValidStoragePlane" -}}
+{{- $search := .search -}}
+{{- $storage := (.root.Values.storage | default dict) -}}
+{{- $planes := ($storage.planes | default list) -}}
+{{- $dataStorage := ($storage.dataStorage | default dict) -}}
+{{- /* If storage.dataStorage.plane is defined, the implicit plane hpcc-dataplane is not defined */ -}}
+{{- $done := dict "matched" (and (not $dataStorage.plane) (eq $search "hpcc-dataplane")) -}}
+{{- range $plane := $planes -}}
+ {{- if eq $search $plane.name -}}
+ {{- $_ := set $done "matched" true -}}
+ {{- end -}}
+{{- end -}}
+{{- $done.matched | ternary "true" "false" -}}
+{{- end -}}
+
+{{/*
+Check that the storage and spill planes for a component exist
+*/}}
+{{- define "hpcc.checkDefaultStoragePlane" -}}
+{{- if (hasKey .me "storagePlane") }}
+ {{- $search := .me.storagePlane -}}
+ {{- if ne (include "hpcc.isValidStoragePlane" (dict "search" $search "root" .root)) "true" -}}
+  {{- $_ := fail (printf "storage data plane %s for %s is not defined" $search .me.name ) }}
+ {{- end -}}
+{{- end }}
+{{- if (hasKey .me "spillPlane") }}
+ {{- $search := .me.spillPlane -}}
+ {{- if ne (include "hpcc.isValidStoragePlane" (dict "search" $search "root" .root)) "true" -}}
+  {{- $_ := fail (printf "storage spill plane %s for %s is not defined" $search .me.name ) }}
+ {{- end -}}
+{{- end }}
+{{- end -}}
+
+
+{{/*
 Add config arg for a component
 */}}
 {{- define "hpcc.configArg" -}}

+ 3 - 6
helm/hpcc/templates/dali.yaml

@@ -28,16 +28,13 @@ spec:
         volumeMounts:
         - name: {{ .name }}-configmap-volume
           mountPath: /etc/config
-        - name: dalistorage-pv
-          mountPath: "/var/lib/HPCCSystems/dalistorage"
+{{ include "hpcc.addDaliVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" ) ) | indent 8 }}
       volumes:
       - name: {{ .name }}-configmap-volume
         configMap:
           name: {{ .name }}-configmap
-      - name: dalistorage-pv
-        persistentVolumeClaim:
-          claimName: {{ $.Values.storage.daliStorage.existingClaim | default (printf "%s-%s-dalistorage-pvc" (include "hpcc.fullname" $) .name) }}
+{{ include "hpcc.addDaliVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" ) ) | indent 6 }}
 ---
 kind: ConfigMap 
@@ -70,7 +67,7 @@ spec:
 apiVersion: v1
 kind: PersistentVolumeClaim
 metadata:
-  name: {{ printf "%s-%s-dalistorage-pvc" (include "hpcc.fullname" $) .name }}
+  name: {{ printf "%s-dalistorage-pvc" (include "hpcc.fullname" $) }}
   labels:
     app.kubernetes.io/name: {{ printf "%s-%s-dalistorage-pvc" (include "hpcc.fullname" $) .name }}
     app.kubernetes.io/instance: {{ $.Release.Name }}

+ 7 - 6
helm/hpcc/templates/eclagent.yaml

@@ -1,5 +1,6 @@
 {{ range $.Values.eclagent -}}
 {{- if not .disabled -}}
+{{- include "hpcc.checkDefaultStoragePlane" (dict "root" $ "me" . )}}
 {{- $apptype := .type | default "hthor" -}} 
 apiVersion: apps/v1
 kind: Deployment
@@ -30,12 +31,12 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" .) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 8 }}
       volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 6 }}
 ---
@@ -83,12 +84,12 @@ data:
                      ]
             volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 12 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 12 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 12 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 12 }}
           volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 10 }}
-{{ include "hpcc.addDataVolume" $ | indent 10 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 10 }}
 {{ include "hpcc.addDllVolume" $ | indent 10 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 10 }}
           restartPolicy: Never

+ 6 - 6
helm/hpcc/templates/eclccserver.yaml

@@ -31,14 +31,14 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" .) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" ) ) | indent 8 }}
         - name: "hpccbundles"
           mountPath: "/home/hpcc/.HPCCSystems"
       volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" ) ) | indent 6 }}
       - name: hpccbundles
@@ -87,12 +87,12 @@ data:
                      ]
             volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 12 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 12 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 12 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" ) ) | indent 12 }}
           volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 10 }}
-{{ include "hpcc.addDataVolume" $ | indent 10 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 10 }}
 {{ include "hpcc.addDllVolume" $ | indent 10 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" ) ) | indent 10 }}
           restartPolicy: Never

+ 3 - 3
helm/hpcc/templates/esp.yaml

@@ -28,12 +28,12 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" .) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "storage" ) ) | indent 8 }}
       volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "storage" ) ) | indent 6 }}
 ---

+ 4 - 3
helm/hpcc/templates/localroxie.yaml

@@ -1,5 +1,6 @@
 {{ range $roxie := $.Values.roxie -}}
 {{- if not $roxie.disabled  -}}
+{{- include "hpcc.checkDefaultStoragePlane" (dict "root" $ "me" $roxie )}}
 {{- if $roxie.localAgent -}}
 {{- $name := $roxie.name -}}
 {{- $servername := printf "%s-server" $roxie.name -}}
@@ -47,12 +48,12 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" .) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 8 }}
       volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 6 }}
 ---

+ 7 - 6
helm/hpcc/templates/roxie.yaml

@@ -1,5 +1,6 @@
 {{ range $roxie := $.Values.roxie -}}
 {{- if not $roxie.disabled -}}
+{{- include "hpcc.checkDefaultStoragePlane" (dict "root" $ "me" $roxie )}}
 {{- if not $roxie.localAgent -}}
 {{- $toponame := printf "%s-toposerver" $roxie.name -}}
 {{- $numChannels := $roxie.numChannels | int | default 1 -}}
@@ -128,12 +129,12 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" $roxie) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 8 }}
       volumes:
 {{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 6 }}
 ---
@@ -192,12 +193,12 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" $roxie) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" $roxie | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" $roxie | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" $roxie ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 8 }}
       volumes:
 {{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" $roxie ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 6 }}
 

+ 16 - 15
helm/hpcc/templates/thor.yaml

@@ -1,5 +1,6 @@
 {{ range $thor := $.Values.thor -}}
 {{- if not $thor.disabled -}}
+{{- include "hpcc.checkDefaultStoragePlane" (dict "root" $ "me" $thor) }}
 {{- $hthorName := printf "%s-hthor" .name }}
 {{- $eclAgentName := printf "%s-agent" .name }}
 {{- $thorAgentName := printf "%s-thoragent" .name }}
@@ -42,12 +43,12 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" .) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 8 }}
       volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 6 }}
 ---
@@ -79,12 +80,12 @@ spec:
 {{ include "hpcc.addImageAttrs" (dict "root" $ "me" .) | indent 8 }}
         volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 8 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 8 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 8 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 8 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 8 }}
       volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 6 }}
-{{ include "hpcc.addDataVolume" $ | indent 6 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 6 }}
 {{ include "hpcc.addDllVolume" $ | indent 6 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 6 }}
 ---
@@ -160,12 +161,12 @@ data:
                      ]
             volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 12 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 12 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 12 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 12 }}
           volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 10 }}
-{{ include "hpcc.addDataVolume" $ | indent 10 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 10 }}
 {{ include "hpcc.addDllVolume" $ | indent 10 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 10 }}
           restartPolicy: Never
@@ -198,12 +199,12 @@ data:
                   ]
             volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 12 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 12 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 12 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 12 }}
           volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 10 }}
-{{ include "hpcc.addDataVolume" $ | indent 10 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 10 }}
 {{ include "hpcc.addDllVolume" $ | indent 10 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 10 }}
           restartPolicy: Never
@@ -234,12 +235,12 @@ data:
                   ]
             volumeMounts:
 {{ include "hpcc.addConfigMapVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDataVolumeMount" . | indent 12 }}
-{{ include "hpcc.addDllVolumeMount" . | indent 12 }}
+{{ include "hpcc.addDataVolumeMount" (dict "root" $ "me" . ) | indent 12 }}
+{{ include "hpcc.addDllVolumeMount" $ | indent 12 }}
 {{ include "hpcc.addSecretVolumeMounts" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 12 }}
           volumes:
 {{ include "hpcc.addConfigMapVolume" . | indent 10 }}
-{{ include "hpcc.addDataVolume" $ | indent 10 }}
+{{ include "hpcc.addDataVolume" (dict "root" $ "me" . ) | indent 10 }}
 {{ include "hpcc.addDllVolume" $ | indent 10 }}
 {{ include "hpcc.addSecretVolumes" (dict "root" $ "categories" (list "all" "ecl" "storage" ) ) | indent 10 }}
           restartPolicy: Never

+ 154 - 1
helm/hpcc/values.schema.json

@@ -5,6 +5,9 @@
     "global": {
       "$ref": "#/definitions/global"
     },
+    "hostgroups": {
+      "$ref": "#/definitions/hostgroups"
+    },
     "storage": {
       "type": "object",
       "properties": {
@@ -16,6 +19,19 @@
         },
         "daliStorage": {
           "$ref": "#/definitions/storageType"
+        },
+        "spillStorage": {
+          "oneOf": [
+            {
+              "$ref": "#/definitions/storageType"
+            },
+            {
+              "type": "null"
+            }
+          ]
+        },
+        "planes": {
+          "$ref": "#/definitions/storagePlanes"
         }
       },
       "additionalProperties": false
@@ -74,6 +90,14 @@
               "type": "string",
               "description": "The (optional) file prefix to add to relative filenames"
             },
+            "storagePlane": {
+              "description": "The default storage plane to write data files to",
+              "type": "string"
+            },
+            "spillPlane": {
+              "description": "The storage plane to write spill files to",
+              "type": "string"
+            },
             "required": [ "name" ]
           }
         ]
@@ -146,6 +170,9 @@
         "storageClass": {
           "type": "string"
         },
+        "plane": {
+          "type": "string"
+        },
         "existingClaim": {
           "type": "string"
         },
@@ -153,7 +180,67 @@
           "type": "boolean"
         }
       },
-      "required": [ "storageSize" ],
+      "anyOf": [
+        {
+          "required": [ "storageSize" ]
+        },
+        {
+          "required": [ "plane" ]
+        }
+      ],
+      "additionalProperties": false
+    },
+    "storagePlanes": {
+      "description": "storage plane definitions",
+      "oneOf": [
+        {
+          "type": "array",
+          "items": { "$ref": "#/definitions/storagePlane" }
+        },
+        {
+          "type": "null"
+        }
+      ]
+    },
+    "storagePlane": {
+      "description": "information about an individual storage plane",
+      "type": "object",
+      "properties": {
+        "name": {
+          "description": "the name of the storage plane",
+          "type": "string"
+        },
+        "prefix": {
+          "description": "either the path for a local mount, or the url prefix",
+          "type": "string"
+        },
+        "secret": {
+          "description": "optional name of any secret required to access this storage plane",
+          "type": "string"
+        },
+        "pvc": {
+          "description": "optional name of the persistent volume claim for this plane",
+          "type": "string"
+        },
+        "hosts": {
+          "description": "optional name of the host group (for bare metal storage)",
+          "type": "string"
+        },
+        "numDevices": {
+          "description": "optional number of devices in the storage plane (default 1)",
+          "type": "integer"
+        },
+        "includeDeviceInPath": {
+          "description": "is a directory based on the part number appended to the path.  May not be needed",
+          "type": "boolean"
+        },
+        "replication": {
+          "description": "which planes (if any) the data is replicated onto (primarily bare metal)",
+          "type": "array",
+          "items": { "type": "string" }
+        }
+      },
+      "required": [ "name", "prefix" ],
       "additionalProperties": false
     },
     "secrets": {
@@ -167,6 +254,52 @@
         }
       ]
     },
+    "hostgroups": {
+      "oneOf": [
+        {
+          "type": "object",
+          "additionalProperties": {
+            "$ref": "#/definitions/hostgroup"
+          }
+        },
+        {
+          "type": "null"
+        }
+      ]
+    },
+    "hostgroup": {
+      "oneOf": [
+        {
+          "description": "a list of host names",
+          "type": "array",
+          "items": { "type": "string" }
+        },
+        {
+          "description": "a subset of an existing host group",
+          "type": "object",
+          "properties": {
+            "group": {
+              "description": "Name of the hostgroup to create a subset of",
+              "type": "string"
+            },
+            "count": {
+              "description": "Number of hosts in the subset",
+              "type": "integer"
+            },
+            "offset": {
+              "description": "Offset of the first host within the group",
+              "type": "integer"
+            },
+            "delta": {
+              "type": "integer",
+              "description": "Cycle offsset to apply to the hosts"
+            }
+          },
+          "required": [ "group" ],
+          "additionalProperties": false
+        }
+      ]
+    },
     "logging": {
       "type": "object",
       "properties": {
@@ -277,6 +410,10 @@
         },
         "logging": {
           "$ref": "#/definitions/logging"
+        },
+        "storagePlane": {
+          "description": "The default storage plane to write data files to",
+          "type": "string"
         }
       }
     },
@@ -303,6 +440,14 @@
           "description": "Roxie query services",
           "type": "array",
           "items": { "$ref": "#/definitions/roxieservice" }
+        },
+        "storagePlane": {
+          "description": "The default storage plane to write data files to",
+          "type": "string"
+        },
+        "spillPlane": {
+          "description": "The storage plane to write spill files to",
+          "type": "string"
         }
       }
     },
@@ -377,6 +522,14 @@
         },
         "logging": {
           "$ref": "#/definitions/logging"
+        },
+        "storagePlane": {
+          "description": "The default storage plane to write data files to",
+          "type": "string"
+        },
+        "spillPlane": {
+          "description": "The storage plane to write spill files to",
+          "type": "string"
         }
       }
     }

+ 13 - 3
helm/hpcc/values.yaml

@@ -36,6 +36,16 @@ global:
 ## minikube and docker for desktop), or using NFS mounted storage.
 
 storage:
+  planes:
+  #   name: <required>
+  #   prefix: <path>                        # Root directory for accessing the plane (if pvc defined), or url to access plane.
+  #   numDevices: 1                         # number of devices that are part of the plane
+  #   replication: nullptr                  # a list or single item indicating which planes the data should be replicated onto
+  #   includeDeviceInPath: false            # Is the device number appended to the mount for the physical path for a file?  (Only required in unusual situations)
+  #   hosts: <name>                         # Name of the host group for bare metal - must match the name of the storage plane..
+  #   secret: <secret-id>                   # what secret is required to access the files.  This could optionally become a list if required (or add secrets:).
+  #   options:                              # not sure if it is needed
+
   dllStorage:
     storageSize: 3Gi
     storageClass: ""
@@ -63,16 +73,16 @@ secrets:
   #timeout: 300 # timeout period for cached secrets.  Should be similar to the k8s refresh period.
 
   #Secret categories follow, remove the {} if a secret is defined in a section
-  storage: {}
+  storage:
     ## Secrets that are required for accessing storage.  Currently exposed in the engines, but in the future will
     ## likely be restricted to esp (when it becomes the meta-data provider)
     ## For example, to set the secret associated with the azure storage account "mystorageaccount" use
     ##azure-mystorageaccount: storage-myazuresecret
 
-  ecl: {}
+  ecl:
     ## Category for secrets published to all components that run ecl
 
-  all: {}
+  all:
     ## Category for secrets published to all components
 
 bundles: []

File diff suppressed because it is too large
+ 97 - 0
helm/storage.rst


+ 3 - 3
testing/helm/run.sh

@@ -4,7 +4,7 @@ options="--set global.image.version=latest"
 hpccchart=$scriptdir/../../helm/hpcc
 failed=0
 
-helm --version
+helm version
 echo Testing unmodified values file
 helm lint $hpccchart ${options} > results.txt 2> errors.txt
 if [ $? -ne 0 ]
@@ -35,10 +35,10 @@ do
    if [ $? -eq 0 ]
    then
       echo $file should have failed
+      failed=1
    else
-      echo $file failed
+      echo "$file failed - correctly"
       cat results.txt
-      failed=1
    fi
 done
 exit $failed

+ 3 - 9
testing/helm/tests/baremetal.yaml

@@ -6,7 +6,7 @@ hostgroups:
   thor20_1:
     group: thor400
     count: 20
-    offset: 0 }
+    offset: 0
   thor20_2:
     group: thor400
     count: 20
@@ -22,24 +22,18 @@ storage:
   - name: thor400
     prefix: /var/lib/hpccsystems/hpcc-data       # only used if the local host matches the host for the device
     hosts: thor400
-    replication: attachedThor400MirrorPlane
-    speed: 2000
-    remoteSpeed: 1000
+    replication: [ attachedThor400MirrorPlane ]
     #numDevices: count(hosts)
   - name: thor400mirror
     prefix: /var/lib/hpccsystems/hpcc-mirror       # only used if the local host matches the host for the device
     hosts: thor400mirror
     #Does any other information about the replication policy need to be included?  I don't think it does....
-    speed: 2000
-    remoteSpeed: 1000
 
   - name: thor100_4
     prefix: /var/lib/hpccsystems/hpcc-data       # only used if the local host matches the host for the device
     hosts: thor100_4
-    replication: azureBlobPlane
-    remoteSpeed: 2000
+    replication: [ azureBlobPlane ]
 
   - name: azureBlobPlane
     prefix: azure://ghallidaystorage      # Not sure if this should be different from the mount.
     secret: azure-ghallidaystorage
-    speed: 500

+ 1 - 3
testing/helm/tests/complex.yaml

@@ -5,12 +5,10 @@ storage:
   - name: azureBlobPlane
     prefix: azure://ghallidaystorage      # Not sure if this should be different from the mount.
     secret: azure-ghallidaystorage
-    speed: 500
 
   #Store data on aws s3 buckets
   - name: s3BucketPlane
     prefix: s3://...
-    speed: 400
 
   #Single node with data mounted, and mirror mounted at a different locations (could be a different disk)
   - name: localDataPlane
@@ -28,7 +26,7 @@ storage:
   #Essentially identical to localDataPlane above
   - name: nasPlane
     prefix: /var/lib/hpccsystems/hpcc-data
-    replication: nasMirrorPlane
+    replication: [ nasMirrorPlane ]
     pvc: nas-data-pvc
   - name: nasMirrorPlane
     prefix: /var/lib/hpccsystems/hpcc-mirror