Ver código fonte

Merge pull request #5810 from ghalliday/issue11074

HPCC-11074 Move text search queries over to new regression suite

Reviewed-By: Richard Chapman <rchapman@hpccsystems.com>
Richard Chapman 11 anos atrás
pai
commit
123425d49a
100 arquivos alterados com 2633 adições e 1356 exclusões
  1. 4 4
      ecl/hql/hqlgram.y
  2. 1 0
      ecl/hql/hqlutil.cpp
  3. 1 1
      ecl/regress/dataset3.ecl
  4. 0 387
      testing/ecl/textsearch_ao.ecl
  5. 0 387
      testing/ecl/textsearch_no.ecl
  6. 0 40
      testing/regress/ecl/aggds1.ecl
  7. 19 0
      testing/regress/ecl/aggds1_hthor.ecl
  8. 19 0
      testing/regress/ecl/aggds1_roxie.ecl
  9. 19 0
      testing/regress/ecl/aggds1_thor.ecl
  10. 2 4
      testing/regress/ecl/aggds1seq.ecl
  11. 2 12
      testing/regress/ecl/aggds2.ecl
  12. 19 0
      testing/regress/ecl/aggds2_thor.ecl
  13. 2 1
      testing/regress/ecl/aggds2seq.ecl
  14. 2 26
      testing/regress/ecl/aggds3.ecl
  15. 19 0
      testing/regress/ecl/aggds3_thor.ecl
  16. 2 1
      testing/regress/ecl/aggds3seq.ecl
  17. 2 12
      testing/regress/ecl/aggds4.ecl
  18. 19 0
      testing/regress/ecl/aggds4_thor.ecl
  19. 2 1
      testing/regress/ecl/aggds4seq.ecl
  20. 2 25
      testing/regress/ecl/aggidx1.ecl
  21. 19 0
      testing/regress/ecl/aggidx1_thor.ecl
  22. 2 1
      testing/regress/ecl/aggidx1seq.ecl
  23. 2 12
      testing/regress/ecl/aggidx2.ecl
  24. 19 0
      testing/regress/ecl/aggidx2_thor.ecl
  25. 2 1
      testing/regress/ecl/aggidx2seq.ecl
  26. 2 27
      testing/regress/ecl/aggidx3.ecl
  27. 19 0
      testing/regress/ecl/aggidx3_thor.ecl
  28. 2 1
      testing/regress/ecl/aggidx3seq.ecl
  29. 2 12
      testing/regress/ecl/aggidx4.ecl
  30. 19 0
      testing/regress/ecl/aggidx4_thor.ecl
  31. 2 1
      testing/regress/ecl/aggidx4seq.ecl
  32. 2 1
      testing/regress/ecl/aggsqx1.ecl
  33. 2 1
      testing/regress/ecl/aggsqx1b.ecl
  34. 2 1
      testing/regress/ecl/aggsqx2.ecl
  35. 2 1
      testing/regress/ecl/aggsqx3.ecl
  36. 2 1
      testing/regress/ecl/aggsqx3err.ecl
  37. 2 1
      testing/regress/ecl/aggsqx4.ecl
  38. 145 124
      testing/ecl/setup/textsearch.txt
  39. 7 19
      testing/ecl/textsearch.ecl
  40. 45 0
      testing/regress/ecl/common/aggds1.ecl
  41. 36 0
      testing/regress/ecl/common/aggds2.ecl
  42. 50 0
      testing/regress/ecl/common/aggds3.ecl
  43. 36 0
      testing/regress/ecl/common/aggds4.ecl
  44. 50 0
      testing/regress/ecl/common/aggidx1.ecl
  45. 37 0
      testing/regress/ecl/common/aggidx2.ecl
  46. 51 0
      testing/regress/ecl/common/aggidx3.ecl
  47. 36 0
      testing/regress/ecl/common/aggidx4.ecl
  48. 119 0
      testing/regress/ecl/common/sqagg.ecl
  49. 2 1
      testing/regress/ecl/diskAggregate.ecl
  50. 2 1
      testing/regress/ecl/diskGroupAggregate.ecl
  51. 2 1
      testing/regress/ecl/fetch2.ecl
  52. 2 1
      testing/regress/ecl/indexAggregate.ecl
  53. 2 1
      testing/regress/ecl/indexGroupAggregate.ecl
  54. 2 1
      testing/regress/ecl/indexread5.ecl
  55. 0 0
      testing/regress/ecl/key/aggds1_hthor.xml
  56. 24 0
      testing/regress/ecl/key/aggds1_roxie.xml
  57. 24 0
      testing/regress/ecl/key/aggds1_thor.xml
  58. 6 0
      testing/regress/ecl/key/aggds2_thor.xml
  59. 49 0
      testing/regress/ecl/key/aggds3_thor.xml
  60. 17 0
      testing/regress/ecl/key/aggds4_thor.xml
  61. 28 0
      testing/regress/ecl/key/aggidx1_thor.xml
  62. 6 0
      testing/regress/ecl/key/aggidx2_thor.xml
  63. 46 0
      testing/regress/ecl/key/aggidx3_thor.xml
  64. 17 0
      testing/regress/ecl/key/aggidx4_thor.xml
  65. 6 0
      testing/regress/ecl/key/setuptext.xml
  66. 232 0
      testing/regress/ecl/key/sqagg_thor.xml
  67. 0 0
      testing/regress/ecl/key/textsearch1.xml
  68. 3 0
      testing/ecl/key/textsearch_ao.xml
  69. 3 0
      testing/ecl/key/textsearch_no.xml
  70. 271 0
      testing/regress/ecl/key/textsearch2.xml
  71. 271 0
      testing/regress/ecl/key/textsearch2_thor.xml
  72. 271 0
      testing/regress/ecl/key/textsearch3.xml
  73. 271 0
      testing/regress/ecl/key/textsearch3_thor.xml
  74. 0 0
      testing/regress/ecl/key/textsearch4.xml
  75. 2 1
      testing/regress/ecl/keyed_join3.ecl
  76. 2 1
      testing/regress/ecl/normalize3.ecl
  77. 2 1
      testing/regress/ecl/payload.ecl
  78. 8 47
      testing/regress/ecl/setup/files.ecl
  79. 11 0
      testing/regress/ecl/setup/options.ecl
  80. 3 2
      testing/regress/ecl/setup/setup.ecl
  81. 2 1
      testing/regress/ecl/setup/setup_fetch.ecl
  82. 3 4
      testing/regress/ecl/setup/setupsq.ecl
  83. 68 64
      testing/ecl/setup/setuptext.ecl
  84. 2 1
      testing/regress/ecl/setup/setupxml.ecl
  85. 17 16
      testing/regress/ecl/setup/sq.ecl
  86. 61 0
      testing/regress/ecl/setup/ts.ecl
  87. 2 1
      testing/regress/ecl/sort2.ecl
  88. 2 96
      testing/regress/ecl/sqagg.ecl
  89. 2 1
      testing/regress/ecl/sqagg2.ecl
  90. 19 0
      testing/regress/ecl/sqagg_thor.ecl
  91. 2 1
      testing/regress/ecl/sqaggds.ecl
  92. 2 1
      testing/regress/ecl/sqaggds3.ecl
  93. 2 1
      testing/regress/ecl/sqaggds4.ecl
  94. 2 1
      testing/regress/ecl/sqaggds_an.ecl
  95. 2 1
      testing/regress/ecl/sqaggseq.ecl
  96. 2 1
      testing/regress/ecl/sqcntds.ecl
  97. 4 1
      testing/regress/ecl/sqcntidx.ecl
  98. 2 1
      testing/regress/ecl/sqcond.ecl
  99. 2 1
      testing/regress/ecl/sqcond3.ecl
  100. 0 0
      testing/regress/ecl/sqdistr.ecl

+ 4 - 4
ecl/hql/hqlgram.y

@@ -8573,21 +8573,21 @@ simpleDataSet
                             $$.setExpr(dataset);
                             $$.setPosition($1);
                         }
-    | DATASET '(' '[' beginList inlineDatasetValueList ']' ',' recordDef ')'
+    | DATASET '(' '[' beginList inlineDatasetValueList ']' ',' recordDef optDatasetFlags ')'
                         {
                             HqlExprArray values;
                             parser->endList(values);
-                            OwnedHqlExpr table = createDataset(no_temptable, createValue(no_recordlist, NULL, values), $8.getExpr());
+                            OwnedHqlExpr table = createDataset(no_temptable, createValue(no_recordlist, NULL, values), createComma($8.getExpr(), $9.getExpr()));
                             $$.setExpr(convertTempTableToInlineTable(*parser->errorHandler, $5.pos, table));
                             $$.setPosition($1);
                         }
-    | DATASET '(' '[' beginList transformList ']' ')'
+    | DATASET '(' '[' beginList transformList ']' optDatasetFlags ')'
                         {
                             HqlExprArray values;
                             parser->endList(values);
                             IHqlExpression * record = values.item(0).queryRecord();
                             parser->checkCompatibleTransforms(values, record, $5);
-                            $$.setExpr(createDataset(no_inlinetable, createValue(no_transformlist, makeNullType(), values), LINK(record)));
+                            $$.setExpr(createDataset(no_inlinetable, createValue(no_transformlist, makeNullType(), values), createComma(LINK(record), $7.getExpr())));
                             $$.setPosition($1);
                         }
     | DATASET '(' thorFilenameOrList ',' beginCounterScope recordDef endCounterScope ')'

+ 1 - 0
ecl/hql/hqlutil.cpp

@@ -5922,6 +5922,7 @@ static IHqlExpression * convertTempTableToInline(IErrorReceiver & errorProcessor
     HqlExprArray children;
     children.append(*createValue(no_transformlist, makeNullType(), transforms));
     children.append(*LINK(record));
+    unwindChildren(children, expr, 2);
     OwnedHqlExpr ret = createDataset(no_inlinetable, children);
     return expr->cloneAllAnnotations(ret);
 }

+ 1 - 1
ecl/regress/dataset3.ecl

@@ -32,7 +32,7 @@ namesTable := dataset([
         {'Page','John',62},
         {'Page','Chris',26},
         {'Smithe','Abigail',13},
-        {'X','Za'}], namesRecord);
+        {'X','Za'}], namesRecord, DISTRIBUTED);
 
 x := group(namesTable, surname);// : persist('~GroupedNames2');
 

+ 0 - 387
testing/ecl/textsearch_ao.ecl

@@ -1,387 +0,0 @@
-/*##############################################################################
-
-    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-############################################################################## */
-
-//UseStandardFiles
-//UseTextSearch
-//tidyoutput
-//nothor
-//nothorlcr
-//nohthor
-//DoesntReallyUseIndexes    Thje test is primarily here to check everything works when the index optimizer is always on
-//xxvarskip type==roxie && setuptype==thor && !local
-
-#option ('checkAsserts',false)
-
-//SingleQuery := 'AND(NOTAT(AND("twinkle", "little"), 9999), NOTAT(AND("how", "wonder"),8888))';
-
-q1 := dataset([
-
-#if (#ISDEFINED(SingleQuery))
-            SingleQuery
-#else
-            'AND("black","sheep")',
-            'ANDNOT("black","sheep")',
-            'MOFN(2,"black","sheep","white")',
-            'MOFN(2,2,"black","sheep","white")',
-
-//Word tests
-            '("nonexistant")',
-            '("one")',
-            'CAPS("one")',
-            'NOCAPS("one")',
-            'ALLCAPS("one")',
-            '"ibm"',                                        // simple word, and an alias
-
-//Or tests
-            'OR("nonexistant1", "nonexistant2")',           // neither side matches
-            'OR("nonexistant1", "sheep")',                  // RHS matches
-            'OR("twinkle", "nonexistant2")',                // LHS matches
-            'OR("twinkle", "twinkle")',                     // should dedup
-            'OR("sheep", "black")',                         // matches in same document
-            'OR("sheep", "twinkle")',                       // matches in different documents
-            'OR("one", "sheep", "sheep", "black", "fish")', // matches in different documents
-            'OR(OR("one", "sheep"), OR("sheep", "black", "fish"))', // matches in different documents
-
-//And tests
-            'AND("nonexistant1", "nonexistant2")',          // neither side matches
-            'AND("nonexistant1", "sheep")',                 // RHS matches
-            'AND("twinkle", "nonexistant2")',               // LHS matches
-            'AND("twinkle", "twinkle")',                    // should dedup
-            'AND("sheep", "black")',                        // matches in same document
-            'AND("sheep", "twinkle")',                      // matches in different documents
-            'AND("in", "a")',                               // high frequencies
-            'AND("twinkle", "little", "how", "star")',      // Nary
-            'AND(AND("twinkle", "little"), AND("how", "star"))',        // Nested
-            'AND(NOTAT(AND("twinkle", "little"), 9999), NOTAT(AND("how", "wonder"),8888))',     // Nested
-            'AND(AND("twinkle", "little"), AND("how", "wonder"))',      // Nested
-
-//MORE: Should also test segment restriction....
-
-            'ANDNOT("nonexistant1", "nonexistant2")',       // neither side matches
-            'ANDNOT("nonexistant1", "sheep")',              // RHS matches
-            'ANDNOT("twinkle", "nonexistant2")',            // LHS matches
-            'ANDNOT("twinkle", "twinkle")',                 // should dedup
-            'ANDNOT("sheep", "black")',                     // matches in same document
-            'ANDNOT("sheep", "twinkle")',                   // matches in different documents
-            'ANDNOT("one", OR("sheep", "black", "fish"))',      // matches one, but none of the others
-
-//Phrases
-            'PHRASE("nonexistant1", "nonexistant2")',       // words don't occour
-            'PHRASE("in", "are")',                          // doesn't occur, but words do
-            'PHRASE("baa", "black")',                       // occurs, but
-            'PHRASE("x", "y", "x", "x", "y")',              // a partial match, first - doesn't actually make it more complicatied to implement
-            'PHRASE("james","arthur","stuart")',            // check that next occurence of stuart takes note of the change of document.
-            'PHRASE(OR("black","white"),"sheep")',          // proximity on a non-word input
-            'PHRASE("one", "for", OR(PHRASE("the","master"),PHRASE("the","dame"),PHRASE("the","little","boy")))',
-                                                            // more complex again
-
-//Testing range
-            'PHRASE1to5("humpty","dumpty")',
-            'PHRASE1to5("together","again")',
-
-//M of N
-            'MOFN(2, "humpty", "horses", "together", "beansprout")',    // m<matches
-            'MOFN(3, "humpty", "horses", "together", "beansprout")',    // m=matches
-            'MOFN(4, "humpty", "horses", "together", "beansprout")',    // m>matches
-            'MOFN(2,2, "humpty", "horses", "together", "beansprout")',  // too many matches
-            'MOFN(2, "nonexistant", "little", "bo")',                   // first input fails to match any
-            'MOFN(2, "little", "bo", "nonexistant")',                   // lose an input while finising candidates
-            'MOFN(2, "one", "two", "three", "four", "five")',
-            'MOFN(2, "nonexistant", "two", "three", "four", "five")',
-            'MOFN(2, "one", "nonexistant", "three", "four", "five")',
-            'MOFN(2, "nonexistant1", "nonexistant2", "three", "four", "five")',
-            'MOFN(2, "nonexistant1", "nonexistant2", "nonexistant3", "four", "five")',
-            'MOFN(2, "nonexistant1", "nonexistant2", "nonexistant3", "nonexistant4", "five")',
-            'MOFN(2, PHRASE("little","bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases
-            'MOFN(2, PHRASE("Little","Bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases - capital letters don't match
-            'MOFN(2, OR("little","big"), OR("king", "queen"), OR("star", "sheep", "twinkle"))',
-
-//Proximity
-            'PROXIMITY("nonexistant1", "nonexistant2", -1, 1)',
-            'PROXIMITY("black", "nonexistant2", -1, 1)',
-            'PROXIMITY("nonexistant1", "sheep", -1, 1)',
-
-//Adjacent checks
-            'PROXIMITY("ship", "sank", 0, 0)',                      // either order but adjacent
-            'NORM(PROXIMITY("ship", "sank", 0, 0))',
-            'PROXIMITY("ship", "sank", -1, 0)',                     // must follow
-            'PROXIMITY("ship", "sank", 0, -1)',                     // must preceed
-            'PROXIMITY("sank", "ship", 0, 0)',                      // either order but adjacent
-            'PROXIMITY("sank", "ship", -1, 0)',                     // must follow
-            'PROXIMITY("sank", "ship", 0, -1)',                     // must preceed
-
-//Within a distance of 1
-            'PROXIMITY("ship", "sank", 1, 1)',                      // either order but only 1 intervening word
-            'PROXIMITY("ship", "sank", -1, 1)',                     // must follow
-            'PROXIMITY("ship", "sank", 1, -1)',                     // must preceed
-            'PROXIMITY("sank", "ship", 1, 1)',                      // either order but only 1 intervening word
-            'PROXIMITY("sank", "ship", -1, 1)',                     // must follow
-            'PROXIMITY("sank", "ship", 1, -1)',                     // must preceed
-
-            'PROXIMITY("ship", "sank", 0, 2)',                      // asymetric range
-
-//Within a distance of 2
-            'PROXIMITY("ship", "ship", 2, 2)',                      // either order but only 2 intervening word, no dups
-                                                                    // *** currently fails because of lack of duplication in lowest merger
-            'PROXIMITY("zx", "zx", 5, 5)',                          // "zx (za) zx", "zx (za zx zb zc zd) zx" and "zx (zb zc zd zx)"
-            'PROXIMITY(PROXIMITY("zx", "zx", 5, 5), "zx", 1, 1)',   // "zx (za) zx (zb zc zd) zx" - obtained two different ways.
-            'NORM(PROXIMITY(PROXIMITY("zx", "zx", 5, 5), "zx", 1, 1))', // as above, but normalized
-            'PROXIMITY(PROXIMITY("zx", "zx", 5, 5), "zx", 0, 0)',   // "zx (za) zx (zb zc zd) zx" - can obly be obtained from first
-                                                                    // you could imagine -ve left and right to mean within - would need -1,0 in stepping, and appropriate hard condition.
-
-            'PROXIMITY("ibm", "business", 2, 2)',                   // alias doesn't allow matching within itself.
-            'PROXIMITY("ibm", "business", 3, 3)',                   // alias should match now with other word
-            'PROXIMITY("ibm", "ibm", 0, 0)',                        // aliases and non aliases cause fun.
-
-//More combinations of operators
-            'AND(OR("twinkle", "black"), OR("sheep", "wonder"))',
-            'OR(AND("twinkle", "sheep"), AND("star", "black"))',
-            'OR(AND("twinkle", "star"), AND("sheep", "black"))',
-            'AND(SET("twinkle", "black"), SET("sheep", "wonder"))',
-
-//Silly queries
-            'OR("star","star","star","star","star")',
-            'AND("star","star","star","star","star")',
-            'MOFN(4,"star","star","star","star","star")',
-
-
-//Other operators
-            'PRE("twinkle", "twinkle")',
-            'PRE(PHRASE("twinkle", "twinkle"), PHRASE("little","star"))',
-            'PRE(PHRASE("little","star"), PHRASE("twinkle", "twinkle"))',
-            'PRE(PROXIMITY("twinkle","twinkle", 3, 3), PROXIMITY("little", "star", 2, 2))',
-            'AFT("twinkle", "twinkle")',
-            'AFT(PHRASE("little","star"), PHRASE("twinkle", "twinkle"))',
-            'AFT(PHRASE("twinkle", "twinkle"), PHRASE("little","star"))',
-            'AFT(PROXIMITY("twinkle","twinkle", 3, 3), PROXIMITY("little", "star", 2, 2))',
-
-// Left outer joins for ranking.
-            'RANK("sheep", OR("peep", "baa"))',
-            'RANK("three", OR("bags", "full"))',
-            'RANK("three", OR("one", "bags"))',
-
-
-//Non standard variants - AND, generating a single record for the match.  Actually for each cross product as it is currently (and logically) implemented
-            'ANDJOIN("nonexistant1", "nonexistant2")',          // neither side matches
-            'ANDJOIN("nonexistant1", "sheep")',                 // RHS matches
-            'ANDJOIN("twinkle", "nonexistant2")',               // LHS matches
-            'ANDJOIN("twinkle", "twinkle")',                    // should dedup
-            'ANDJOIN("sheep", "black")',                        // matches in same document
-            'ANDJOIN("sheep", "twinkle")',                      // matches in different documents
-            'ANDJOIN("in", "a")',                               // high frequencies
-            'ANDJOIN("twinkle", "little", "how", "star")',      // Nary
-
-            'ANDNOTJOIN("nonexistant1", "nonexistant2")',       // neither side matches
-            'ANDNOTJOIN("nonexistant1", "sheep")',              // RHS matches
-            'ANDNOTJOIN("twinkle", "nonexistant2")',            // LHS matches
-            'ANDNOTJOIN("twinkle", "twinkle")',                 // should dedup
-            'ANDNOTJOIN("sheep", "black")',                     // matches in same document
-            'ANDNOTJOIN("sheep", "twinkle")',                   // matches in different documents
-            'ANDNOTJOIN("one", OR("sheep", "black", "fish"))',  // matches one, but none of the others
-
-            'MOFNJOIN(2, "humpty", "horses", "together", "beansprout")',    // m<matches
-            'MOFNJOIN(3, "humpty", "horses", "together", "beansprout")',    // m=matches
-            'MOFNJOIN(4, "humpty", "horses", "together", "beansprout")',    // m>matches
-            'MOFNJOIN(2,2, "humpty", "horses", "together", "beansprout")',  // too many matches
-            'MOFNJOIN(2, "nonexistant", "little", "bo")',                   // first input fails to match any
-            'MOFNJOIN(2, "little", "bo", "nonexistant")',                   // lose an input while finising candidates
-            'MOFNJOIN(2, "one", "two", "three", "four", "five")',
-            'MOFNJOIN(2, "nonexistant", "two", "three", "four", "five")',
-            'MOFNJOIN(2, "one", "nonexistant", "three", "four", "five")',
-            'MOFNJOIN(2, "nonexistant1", "nonexistant2", "three", "four", "five")',
-            'MOFNJOIN(2, "nonexistant1", "nonexistant2", "nonexistant3", "four", "five")',
-            'MOFNJOIN(2, "nonexistant1", "nonexistant2", "nonexistant3", "nonexistant4", "five")',
-            'MOFNJOIN(2, PHRASE("little","bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases
-            'MOFNJOIN(2, PHRASE("Little","Bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases - capital letters don't match
-            'MOFNJOIN(2, OR("little","big"), OR("king", "queen"), OR("star", "sheep", "twinkle"))',
-
-            'RANKJOIN("SHEEP", "BLACK")',
-            'RANKJOIN("sheep", OR("peep", "baa"))',
-            'RANKJOIN("three", OR("bags", "full"))',
-            'RANKJOIN("three", OR("one", "bags"))',
-
-
-//ROLLAND - does AND, followed by a rollup by doc.  Should also check that smart stepping still works through the grouped rollup
-            'ROLLAND("nonexistant1", "nonexistant2")',          // neither side matches
-            'ROLLAND("nonexistant1", "sheep")',                 // RHS matches
-            'ROLLAND("twinkle", "nonexistant2")',               // LHS matches
-            'ROLLAND("twinkle", "twinkle")',                    // should dedup
-            'ROLLAND("sheep", "black")',                        // matches in same document
-            'ROLLAND("sheep", "twinkle")',                      // matches in different documents
-            'ROLLAND("in", "a")',                               // high frequencies
-            'ROLLAND("twinkle", "little", "how", "star")',      // Nary
-            'AND(ROLLAND("twinkle", "little"), ROLLAND("how", "star"))',        // Nary
-
-//Same tests as proximity above, but not calling a transform - merging instead
-            'PROXMERGE("ship", "sank", 0, 0)',                      // either order but adjacent
-            'PROXMERGE("ship", "sank", -1, 0)',                     // must follow
-            'PROXMERGE("ship", "sank", 0, -1)',                     // must preceed
-            'PROXMERGE("sank", "ship", 0, 0)',                      // either order but adjacent
-            'PROXMERGE("sank", "ship", -1, 0)',                     // must follow
-            'PROXMERGE("sank", "ship", 0, -1)',                     // must preceed
-            'PROXMERGE("ship", "sank", 1, 1)',                      // either order but only 1 intervening word
-            'PROXMERGE("ship", "sank", -1, 1)',                     // must follow
-            'PROXMERGE("ship", "sank", 1, -1)',                     // must preceed
-            'PROXMERGE("sank", "ship", 1, 1)',                      // either order but only 1 intervening word
-            'PROXMERGE("sank", "ship", -1, 1)',                     // must follow
-            'PROXMERGE("sank", "ship", 1, -1)',                     // must preceed
-            'PROXMERGE("ship", "sank", 0, 2)',                      // asymetric range
-
-//SET should be equivalent to OR
-            'SET("nonexistant1", "nonexistant2")',          // neither side matches
-            'SET("nonexistant1", "sheep")',                 // RHS matches
-            'SET("twinkle", "nonexistant2")',               // LHS matches
-            'SET("twinkle", "twinkle")',                    // should dedup
-            'SET("sheep", "black")',                        // matches in same document
-            'SET("sheep", "twinkle")',                      // matches in different documents
-            'SET("one", "sheep", "sheep", "black", "fish")',    // matches in different documents
-            'OR(SET("one", "sheep"), SET("sheep", "black", "fish"))',   // matches in different documents
-
-//Testing range
-            'PHRASE1to5(PHRASE1to5("what","you"),"are")',
-            'PHRASE1to5("what", PHRASE1to5("you","are"))',
-            'PHRASE1to5(PHRASE1to5("open","source"),"software")',
-            'PHRASE1to5("open", PHRASE1to5("source","software"))',
-
-//Atleast
-            'ATLEAST(2, "twinkle")',                                // would something like UNIQUEAND("twinkle", "twinkle") be more efficient???
-            'ATLEAST(4, "twinkle")',
-            'ATLEAST(5, "twinkle")',
-            'ATLEAST(5, AND("twinkle","star"))',
-            'AND(ATLEAST(4, "twinkle"),"star")',                    // make sure this still smart steps!
-            'AND(ATLEAST(5, "twinkle"),"star")',
-            'ATLEAST(1, PHRASE("humpty","dumpty"))',
-            'ATLEAST(2, PHRASE("humpty","dumpty"))',
-            'ATLEAST(3, PHRASE("humpty","dumpty"))',
-
-            '"little"',
-            'IN(name, "little")',
-            'NOTIN(name, "little")',
-            'IN(suitcase, AND("sock", "shirt"))',
-            'IN(suitcase, AND("sock", "dress"))',
-            'IN(suitcase, AND("shirt", "dress"))',                  //no, different suitcases..
-            'IN(suitcase, OR("cat", "dog"))',                       //no - wrong container
-            'IN(box, OR("cat", "dog"))',                            //yes
-            'IN(box, IN(suitcase, "shirt"))',
-            'IN(suitcase, IN(box, "shirt"))',                       // no other elements in the suitcase, so not valid
-            'IN(box, AND(IN(suitcase, "shirt"), "car"))',
-            'IN(box, AND(IN(suitcase, "shirt"), "lego"))',          // no, lego isn't in the box...
-            'IN(box, MOFN(2, "car", "train", "glue"))',             // really nasty - need to modify the m of n to add position equality!
-            'IN(box, MOFN(2, "car", "glue", "train"))',             // and check works in all positions.
-            'IN(box, MOFN(2, "glue", "car", "train"))',             //   " ditto "
-            'IN(box, MOFN(3, "car", "train", "glue"))',
-            'NOTIN(box, AND("computer", "lego"))',
-            'NOTIN(box, AND("train", "lego"))',
-            'IN(suitcase, PROXIMITY("trouser", "sock", 1, 2))',     // ok.
-            'IN(suitcase, PROXIMITY("trouser", "train", 1, 2))',    // no, close enough, but not both in the suitcase
-            'IN(suitcase, PROXIMITY("trouser", "dress", 6, 6))',    // no, close enough, but not both in the same suitcase
-            'PROXIMITY(IN(suitcase, "trouser"), IN(suitcase, "dress"), 6, 6)',  // yes - testing the proximity of the suitcases, not the contents.
-
-            'IN(S, AND("fish", "alive"))',                          // <s> is the sentence container
-            'S(AND("fish", "alive"))',                              // pseudonym for within same sentence -
-            'S(AND("fish", "finger"))',                             //
-            'S(AND("sheep", "wagging"))',
-            'P(AND("sheep", "wagging"))',                           // same paragraph...
-            'AND(IN(socks, "fox"),IN(socks, "knox"))',
-            'AND(IN(box, "fox"),IN(box, "knox"))',
-            'AND(IN(box, IN(socks, "fox")),IN(box, "knox"))',
-            'AND(IN(socks, IN(box, "fox")),IN(box, "knox"))',           // yes - because no extra elements in the box.
-            'S(PHRASE("black", "sheep"))',
-            'IN(name, PHRASE("little", "bo", "peep"))',
-            'IN(name, PHRASE("little", "bo", "peep", "has"))',
-
-            'IN(range1, IN(range2, "seven"))',                      // only match 5.3
-            'SAME(IN(range1, "seven"), IN(range2, "seven"))',       // only match 5.3
-            'OVERLAP(IN(range1, "five"), IN(range2, "ten"))',       // overlapping, match 5.4
-            'PROXIMITY(IN(range1, "five"), IN(range2, "ten"), 0, 0)',   // adjacent match 5.4, 5.5
-            'PROXIMITY(IN(range1, "five"), IN(range2, "ten"), 1, 1)',   // proximity match 5.4, 5.5
-            'PROXIMITY(IN(range1, "five"), IN(range2, "ten"), 2, 2)',   // adjacent match 5.4, 5.5, 5.6
-
-            'ATLEAST(2, IN(suitcase, "sock"))',                     // at least two suitcases containing a sock.
-            'ATLEAST(3, IN(suitcase, "sock"))',                     // no
-
-            'IN(box, "train")',                                     // should be 4 matches (since inside nested boxes)
-
-            'IN(suitcase, ATLEAST(1, "sock"))',                     // suitcases containing at least one sock.   (should really optimize away)
-            'IN(suitcase, ATLEAST(2, "sock"))',                     // at least two suit cases containing a sock.
-            'IN(suitcase, ATLEAST(3, "sock"))',
-            'IN(suitcase, ATLEAST(3, OR("sock", "dress")))',        //no
-            'IN(suitcase, ATLEAST(3, SET("sock", "dress")))',       //no
-            'IN(suitcase, ATLEAST(3, OR("sock", "jacket")))',       //yes...
-            'IN(suitcase, ATLEAST(3, SET("sock", "jacket")))',      //yes...
-            'IN(box, IN(suitcase, ATLEAST(2, "sock")))',            //yes - box, with one match
-            'IN(box, IN(suitcase, ATLEAST(3, "sock")))',            //no -
-            'IN(box, ATLEAST(2, IN(suitcase, "sock")))',            //yes -
-            'IN(box, ATLEAST(3, IN(suitcase, "sock")))',            //no -
-            'IN(box, ATLEAST(2, IN(suitcase, ATLEAST(2, "sock"))))',            //no...
-            'IN(box, AND(ATLEAST(2, "train"), ATLEAST(2, "sock")))',    // yes
-            'IN(box, AND(ATLEAST(3, "train"), ATLEAST(2, "sock")))',    // no
-            'IN(suitcase, AND(ATLEAST(2, "sock"), ATLEAST(2, OR("tights", "dress"))))', // no - different suitcases.
-            'IN(suitcase, ATLEAST(2, "sock"))', // yes
-            'IN(suitcase, ATLEAST(2, OR("tights", "dress")))',  // yes
-
-//The following example fails - not quite sure how to fix it.
-//          'IN(suitcase, OR(ATLEAST(2, "sock"), ATLEAST(2, OR("tights", "dress"))))',  // yes
-            'IN(suitcase, ATLEAST(4, AND(ATLEAST(2, "sock"), OR("shirt", "trouser"))))',    // yes - nested atleasts...
-            'IN(suitcase, ATLEAST(5, AND(ATLEAST(2, "sock"), OR("shirt", "trouser"))))',    // no
-
-            '_ATLEASTIN_(1, IN(suitcase, "sock"), 1)',                      // suitcases containing at least one sock.   (should really optimize away)
-            '_ATLEASTIN_(2, IN(suitcase, "sock"), 1)',                      // at least two suit cases containing a sock.
-            '_ATLEASTIN_(3, IN(suitcase, "sock"), 1)',
-
-            'S(ANDNOT("fish", "alive"))',                               // pseudonym for within same sentence -
-            'S(ANDNOT("fish", "finger"))',                              //
-
-            'AT("the", 2)',                                             // occurences of 'the' at position 2
-            'AT("the", 18)',
-            'AT("is", 17)',
-            'AND(AT("the", 18),AT("is",17))',
-
-            'AND("gch01", "gch02", "gch04")',
-            'AND("gch01", "gch02", "gch10")',
-
-            'AND(SET("and","a"), SET("the", "one"), PHRASE("for","the","dame"))',
-            'AND(CAPS("sheep"), "spotted")',
-            'AND(CAPS("sheep"), NOCAPS("spotted"))',
-            'AND(SET(CAPS("sheep","little")), SET(CAPS("Up","go")))',
-            'AND(SET(CAPS("sheep","little")), SET(NOCAPS("Up","go")))',
-            'AND(OR(CAPS("sheep"),CAPS("Little")), OR(CAPS("Up"),NOCAPS("go")))',
-
-            'ANDNOT(AND("black","sheep"), "family")',
-            'ANDNOT(AND("little","and"), "jack")',
-
-            'BUTNOT("little", PHRASE("little", "star"))',
-            'BUTNOTJOIN("little", PHRASE("little", "star"))',
-            'BUTNOT("black", PHRASE("black", OR("spotted", "sheep")))',
-            'BUTNOTJOIN("black", PHRASE("black", OR("spotted", "sheep")))',
-
-//MORE:
-// STEPPED flag on merge to give an error if input doesn't support stepping.
-// What about the duplicates that can come out of the proximity operators?
-// where the next on the rhs is at a compatible position, but in a different document
-// What about inverse of proximity x not w/n y
-// Can inverse proximity be used for sentance/paragraph.  Can we combine them so short circuited before temporaries created.
-//MORE: What other boundary conditions can we think of.
-
-                ' '
-
-#end
-
-            ], queryInputRecord);
-
-p := project(nofold(q1), doBatchExecute(TS_wordIndex, LEFT, 0x80000000));
-output(p);

+ 0 - 387
testing/ecl/textsearch_no.ecl

@@ -1,387 +0,0 @@
-/*##############################################################################
-
-    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-############################################################################## */
-
-//UseStandardFiles
-//UseTextSearch
-//tidyoutput
-//nothor
-//nothorlcr
-//nohthor
-//DoesntReallyUseIndexes    Thje test is primarily here to check everything works when the join index optimizer is always on
-//xxvarskip type==roxie && setuptype==thor && !local
-
-#option ('checkAsserts',false)
-
-//SingleQuery := 'AND(NOTAT(AND("twinkle", "little"), 9999), NOTAT(AND("how", "wonder"),8888))';
-
-q1 := dataset([
-
-#if (#ISDEFINED(SingleQuery))
-            SingleQuery
-#else
-            'AND("black","sheep")',
-            'ANDNOT("black","sheep")',
-            'MOFN(2,"black","sheep","white")',
-            'MOFN(2,2,"black","sheep","white")',
-
-//Word tests
-            '("nonexistant")',
-            '("one")',
-            'CAPS("one")',
-            'NOCAPS("one")',
-            'ALLCAPS("one")',
-            '"ibm"',                                        // simple word, and an alias
-
-//Or tests
-            'OR("nonexistant1", "nonexistant2")',           // neither side matches
-            'OR("nonexistant1", "sheep")',                  // RHS matches
-            'OR("twinkle", "nonexistant2")',                // LHS matches
-            'OR("twinkle", "twinkle")',                     // should dedup
-            'OR("sheep", "black")',                         // matches in same document
-            'OR("sheep", "twinkle")',                       // matches in different documents
-            'OR("one", "sheep", "sheep", "black", "fish")', // matches in different documents
-            'OR(OR("one", "sheep"), OR("sheep", "black", "fish"))', // matches in different documents
-
-//And tests
-            'AND("nonexistant1", "nonexistant2")',          // neither side matches
-            'AND("nonexistant1", "sheep")',                 // RHS matches
-            'AND("twinkle", "nonexistant2")',               // LHS matches
-            'AND("twinkle", "twinkle")',                    // should dedup
-            'AND("sheep", "black")',                        // matches in same document
-            'AND("sheep", "twinkle")',                      // matches in different documents
-            'AND("in", "a")',                               // high frequencies
-            'AND("twinkle", "little", "how", "star")',      // Nary
-            'AND(AND("twinkle", "little"), AND("how", "star"))',        // Nested
-            'AND(NOTAT(AND("twinkle", "little"), 9999), NOTAT(AND("how", "wonder"),8888))',     // Nested
-            'AND(AND("twinkle", "little"), AND("how", "wonder"))',      // Nested
-
-//MORE: Should also test segment restriction....
-
-            'ANDNOT("nonexistant1", "nonexistant2")',       // neither side matches
-            'ANDNOT("nonexistant1", "sheep")',              // RHS matches
-            'ANDNOT("twinkle", "nonexistant2")',            // LHS matches
-            'ANDNOT("twinkle", "twinkle")',                 // should dedup
-            'ANDNOT("sheep", "black")',                     // matches in same document
-            'ANDNOT("sheep", "twinkle")',                   // matches in different documents
-            'ANDNOT("one", OR("sheep", "black", "fish"))',      // matches one, but none of the others
-
-//Phrases
-            'PHRASE("nonexistant1", "nonexistant2")',       // words don't occour
-            'PHRASE("in", "are")',                          // doesn't occur, but words do
-            'PHRASE("baa", "black")',                       // occurs, but
-            'PHRASE("x", "y", "x", "x", "y")',              // a partial match, first - doesn't actually make it more complicatied to implement
-            'PHRASE("james","arthur","stuart")',            // check that next occurence of stuart takes note of the change of document.
-            'PHRASE(OR("black","white"),"sheep")',          // proximity on a non-word input
-            'PHRASE("one", "for", OR(PHRASE("the","master"),PHRASE("the","dame"),PHRASE("the","little","boy")))',
-                                                            // more complex again
-
-//Testing range
-            'PHRASE1to5("humpty","dumpty")',
-            'PHRASE1to5("together","again")',
-
-//M of N
-            'MOFN(2, "humpty", "horses", "together", "beansprout")',    // m<matches
-            'MOFN(3, "humpty", "horses", "together", "beansprout")',    // m=matches
-            'MOFN(4, "humpty", "horses", "together", "beansprout")',    // m>matches
-            'MOFN(2,2, "humpty", "horses", "together", "beansprout")',  // too many matches
-            'MOFN(2, "nonexistant", "little", "bo")',                   // first input fails to match any
-            'MOFN(2, "little", "bo", "nonexistant")',                   // lose an input while finising candidates
-            'MOFN(2, "one", "two", "three", "four", "five")',
-            'MOFN(2, "nonexistant", "two", "three", "four", "five")',
-            'MOFN(2, "one", "nonexistant", "three", "four", "five")',
-            'MOFN(2, "nonexistant1", "nonexistant2", "three", "four", "five")',
-            'MOFN(2, "nonexistant1", "nonexistant2", "nonexistant3", "four", "five")',
-            'MOFN(2, "nonexistant1", "nonexistant2", "nonexistant3", "nonexistant4", "five")',
-            'MOFN(2, PHRASE("little","bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases
-            'MOFN(2, PHRASE("Little","Bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases - capital letters don't match
-            'MOFN(2, OR("little","big"), OR("king", "queen"), OR("star", "sheep", "twinkle"))',
-
-//Proximity
-            'PROXIMITY("nonexistant1", "nonexistant2", -1, 1)',
-            'PROXIMITY("black", "nonexistant2", -1, 1)',
-            'PROXIMITY("nonexistant1", "sheep", -1, 1)',
-
-//Adjacent checks
-            'PROXIMITY("ship", "sank", 0, 0)',                      // either order but adjacent
-            'NORM(PROXIMITY("ship", "sank", 0, 0))',
-            'PROXIMITY("ship", "sank", -1, 0)',                     // must follow
-            'PROXIMITY("ship", "sank", 0, -1)',                     // must preceed
-            'PROXIMITY("sank", "ship", 0, 0)',                      // either order but adjacent
-            'PROXIMITY("sank", "ship", -1, 0)',                     // must follow
-            'PROXIMITY("sank", "ship", 0, -1)',                     // must preceed
-
-//Within a distance of 1
-            'PROXIMITY("ship", "sank", 1, 1)',                      // either order but only 1 intervening word
-            'PROXIMITY("ship", "sank", -1, 1)',                     // must follow
-            'PROXIMITY("ship", "sank", 1, -1)',                     // must preceed
-            'PROXIMITY("sank", "ship", 1, 1)',                      // either order but only 1 intervening word
-            'PROXIMITY("sank", "ship", -1, 1)',                     // must follow
-            'PROXIMITY("sank", "ship", 1, -1)',                     // must preceed
-
-            'PROXIMITY("ship", "sank", 0, 2)',                      // asymetric range
-
-//Within a distance of 2
-            'PROXIMITY("ship", "ship", 2, 2)',                      // either order but only 2 intervening word, no dups
-                                                                    // *** currently fails because of lack of duplication in lowest merger
-            'PROXIMITY("zx", "zx", 5, 5)',                          // "zx (za) zx", "zx (za zx zb zc zd) zx" and "zx (zb zc zd zx)"
-            'PROXIMITY(PROXIMITY("zx", "zx", 5, 5), "zx", 1, 1)',   // "zx (za) zx (zb zc zd) zx" - obtained two different ways.
-            'NORM(PROXIMITY(PROXIMITY("zx", "zx", 5, 5), "zx", 1, 1))', // as above, but normalized
-            'PROXIMITY(PROXIMITY("zx", "zx", 5, 5), "zx", 0, 0)',   // "zx (za) zx (zb zc zd) zx" - can obly be obtained from first
-                                                                    // you could imagine -ve left and right to mean within - would need -1,0 in stepping, and appropriate hard condition.
-
-            'PROXIMITY("ibm", "business", 2, 2)',                   // alias doesn't allow matching within itself.
-            'PROXIMITY("ibm", "business", 3, 3)',                   // alias should match now with other word
-            'PROXIMITY("ibm", "ibm", 0, 0)',                        // aliases and non aliases cause fun.
-
-//More combinations of operators
-            'AND(OR("twinkle", "black"), OR("sheep", "wonder"))',
-            'OR(AND("twinkle", "sheep"), AND("star", "black"))',
-            'OR(AND("twinkle", "star"), AND("sheep", "black"))',
-            'AND(SET("twinkle", "black"), SET("sheep", "wonder"))',
-
-//Silly queries
-            'OR("star","star","star","star","star")',
-            'AND("star","star","star","star","star")',
-            'MOFN(4,"star","star","star","star","star")',
-
-
-//Other operators
-            'PRE("twinkle", "twinkle")',
-            'PRE(PHRASE("twinkle", "twinkle"), PHRASE("little","star"))',
-            'PRE(PHRASE("little","star"), PHRASE("twinkle", "twinkle"))',
-            'PRE(PROXIMITY("twinkle","twinkle", 3, 3), PROXIMITY("little", "star", 2, 2))',
-            'AFT("twinkle", "twinkle")',
-            'AFT(PHRASE("little","star"), PHRASE("twinkle", "twinkle"))',
-            'AFT(PHRASE("twinkle", "twinkle"), PHRASE("little","star"))',
-            'AFT(PROXIMITY("twinkle","twinkle", 3, 3), PROXIMITY("little", "star", 2, 2))',
-
-// Left outer joins for ranking.
-            'RANK("sheep", OR("peep", "baa"))',
-            'RANK("three", OR("bags", "full"))',
-            'RANK("three", OR("one", "bags"))',
-
-
-//Non standard variants - AND, generating a single record for the match.  Actually for each cross product as it is currently (and logically) implemented
-            'ANDJOIN("nonexistant1", "nonexistant2")',          // neither side matches
-            'ANDJOIN("nonexistant1", "sheep")',                 // RHS matches
-            'ANDJOIN("twinkle", "nonexistant2")',               // LHS matches
-            'ANDJOIN("twinkle", "twinkle")',                    // should dedup
-            'ANDJOIN("sheep", "black")',                        // matches in same document
-            'ANDJOIN("sheep", "twinkle")',                      // matches in different documents
-            'ANDJOIN("in", "a")',                               // high frequencies
-            'ANDJOIN("twinkle", "little", "how", "star")',      // Nary
-
-            'ANDNOTJOIN("nonexistant1", "nonexistant2")',       // neither side matches
-            'ANDNOTJOIN("nonexistant1", "sheep")',              // RHS matches
-            'ANDNOTJOIN("twinkle", "nonexistant2")',            // LHS matches
-            'ANDNOTJOIN("twinkle", "twinkle")',                 // should dedup
-            'ANDNOTJOIN("sheep", "black")',                     // matches in same document
-            'ANDNOTJOIN("sheep", "twinkle")',                   // matches in different documents
-            'ANDNOTJOIN("one", OR("sheep", "black", "fish"))',  // matches one, but none of the others
-
-            'MOFNJOIN(2, "humpty", "horses", "together", "beansprout")',    // m<matches
-            'MOFNJOIN(3, "humpty", "horses", "together", "beansprout")',    // m=matches
-            'MOFNJOIN(4, "humpty", "horses", "together", "beansprout")',    // m>matches
-            'MOFNJOIN(2,2, "humpty", "horses", "together", "beansprout")',  // too many matches
-            'MOFNJOIN(2, "nonexistant", "little", "bo")',                   // first input fails to match any
-            'MOFNJOIN(2, "little", "bo", "nonexistant")',                   // lose an input while finising candidates
-            'MOFNJOIN(2, "one", "two", "three", "four", "five")',
-            'MOFNJOIN(2, "nonexistant", "two", "three", "four", "five")',
-            'MOFNJOIN(2, "one", "nonexistant", "three", "four", "five")',
-            'MOFNJOIN(2, "nonexistant1", "nonexistant2", "three", "four", "five")',
-            'MOFNJOIN(2, "nonexistant1", "nonexistant2", "nonexistant3", "four", "five")',
-            'MOFNJOIN(2, "nonexistant1", "nonexistant2", "nonexistant3", "nonexistant4", "five")',
-            'MOFNJOIN(2, PHRASE("little","bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases
-            'MOFNJOIN(2, PHRASE("Little","Bo"),PHRASE("three","bags"),"sheep")',    // m of n on phrases - capital letters don't match
-            'MOFNJOIN(2, OR("little","big"), OR("king", "queen"), OR("star", "sheep", "twinkle"))',
-
-            'RANKJOIN("SHEEP", "BLACK")',
-            'RANKJOIN("sheep", OR("peep", "baa"))',
-            'RANKJOIN("three", OR("bags", "full"))',
-            'RANKJOIN("three", OR("one", "bags"))',
-
-
-//ROLLAND - does AND, followed by a rollup by doc.  Should also check that smart stepping still works through the grouped rollup
-            'ROLLAND("nonexistant1", "nonexistant2")',          // neither side matches
-            'ROLLAND("nonexistant1", "sheep")',                 // RHS matches
-            'ROLLAND("twinkle", "nonexistant2")',               // LHS matches
-            'ROLLAND("twinkle", "twinkle")',                    // should dedup
-            'ROLLAND("sheep", "black")',                        // matches in same document
-            'ROLLAND("sheep", "twinkle")',                      // matches in different documents
-            'ROLLAND("in", "a")',                               // high frequencies
-            'ROLLAND("twinkle", "little", "how", "star")',      // Nary
-            'AND(ROLLAND("twinkle", "little"), ROLLAND("how", "star"))',        // Nary
-
-//Same tests as proximity above, but not calling a transform - merging instead
-            'PROXMERGE("ship", "sank", 0, 0)',                      // either order but adjacent
-            'PROXMERGE("ship", "sank", -1, 0)',                     // must follow
-            'PROXMERGE("ship", "sank", 0, -1)',                     // must preceed
-            'PROXMERGE("sank", "ship", 0, 0)',                      // either order but adjacent
-            'PROXMERGE("sank", "ship", -1, 0)',                     // must follow
-            'PROXMERGE("sank", "ship", 0, -1)',                     // must preceed
-            'PROXMERGE("ship", "sank", 1, 1)',                      // either order but only 1 intervening word
-            'PROXMERGE("ship", "sank", -1, 1)',                     // must follow
-            'PROXMERGE("ship", "sank", 1, -1)',                     // must preceed
-            'PROXMERGE("sank", "ship", 1, 1)',                      // either order but only 1 intervening word
-            'PROXMERGE("sank", "ship", -1, 1)',                     // must follow
-            'PROXMERGE("sank", "ship", 1, -1)',                     // must preceed
-            'PROXMERGE("ship", "sank", 0, 2)',                      // asymetric range
-
-//SET should be equivalent to OR
-            'SET("nonexistant1", "nonexistant2")',          // neither side matches
-            'SET("nonexistant1", "sheep")',                 // RHS matches
-            'SET("twinkle", "nonexistant2")',               // LHS matches
-            'SET("twinkle", "twinkle")',                    // should dedup
-            'SET("sheep", "black")',                        // matches in same document
-            'SET("sheep", "twinkle")',                      // matches in different documents
-            'SET("one", "sheep", "sheep", "black", "fish")',    // matches in different documents
-            'OR(SET("one", "sheep"), SET("sheep", "black", "fish"))',   // matches in different documents
-
-//Testing range
-            'PHRASE1to5(PHRASE1to5("what","you"),"are")',
-            'PHRASE1to5("what", PHRASE1to5("you","are"))',
-            'PHRASE1to5(PHRASE1to5("open","source"),"software")',
-            'PHRASE1to5("open", PHRASE1to5("source","software"))',
-
-//Atleast
-            'ATLEAST(2, "twinkle")',                                // would something like UNIQUEAND("twinkle", "twinkle") be more efficient???
-            'ATLEAST(4, "twinkle")',
-            'ATLEAST(5, "twinkle")',
-            'ATLEAST(5, AND("twinkle","star"))',
-            'AND(ATLEAST(4, "twinkle"),"star")',                    // make sure this still smart steps!
-            'AND(ATLEAST(5, "twinkle"),"star")',
-            'ATLEAST(1, PHRASE("humpty","dumpty"))',
-            'ATLEAST(2, PHRASE("humpty","dumpty"))',
-            'ATLEAST(3, PHRASE("humpty","dumpty"))',
-
-            '"little"',
-            'IN(name, "little")',
-            'NOTIN(name, "little")',
-            'IN(suitcase, AND("sock", "shirt"))',
-            'IN(suitcase, AND("sock", "dress"))',
-            'IN(suitcase, AND("shirt", "dress"))',                  //no, different suitcases..
-            'IN(suitcase, OR("cat", "dog"))',                       //no - wrong container
-            'IN(box, OR("cat", "dog"))',                            //yes
-            'IN(box, IN(suitcase, "shirt"))',
-            'IN(suitcase, IN(box, "shirt"))',                       // no other elements in the suitcase, so not valid
-            'IN(box, AND(IN(suitcase, "shirt"), "car"))',
-            'IN(box, AND(IN(suitcase, "shirt"), "lego"))',          // no, lego isn't in the box...
-            'IN(box, MOFN(2, "car", "train", "glue"))',             // really nasty - need to modify the m of n to add position equality!
-            'IN(box, MOFN(2, "car", "glue", "train"))',             // and check works in all positions.
-            'IN(box, MOFN(2, "glue", "car", "train"))',             //   " ditto "
-            'IN(box, MOFN(3, "car", "train", "glue"))',
-            'NOTIN(box, AND("computer", "lego"))',
-            'NOTIN(box, AND("train", "lego"))',
-            'IN(suitcase, PROXIMITY("trouser", "sock", 1, 2))',     // ok.
-            'IN(suitcase, PROXIMITY("trouser", "train", 1, 2))',    // no, close enough, but not both in the suitcase
-            'IN(suitcase, PROXIMITY("trouser", "dress", 6, 6))',    // no, close enough, but not both in the same suitcase
-            'PROXIMITY(IN(suitcase, "trouser"), IN(suitcase, "dress"), 6, 6)',  // yes - testing the proximity of the suitcases, not the contents.
-
-            'IN(S, AND("fish", "alive"))',                          // <s> is the sentence container
-            'S(AND("fish", "alive"))',                              // pseudonym for within same sentence -
-            'S(AND("fish", "finger"))',                             //
-            'S(AND("sheep", "wagging"))',
-            'P(AND("sheep", "wagging"))',                           // same paragraph...
-            'AND(IN(socks, "fox"),IN(socks, "knox"))',
-            'AND(IN(box, "fox"),IN(box, "knox"))',
-            'AND(IN(box, IN(socks, "fox")),IN(box, "knox"))',
-            'AND(IN(socks, IN(box, "fox")),IN(box, "knox"))',           // yes - because no extra elements in the box.
-            'S(PHRASE("black", "sheep"))',
-            'IN(name, PHRASE("little", "bo", "peep"))',
-            'IN(name, PHRASE("little", "bo", "peep", "has"))',
-
-            'IN(range1, IN(range2, "seven"))',                      // only match 5.3
-            'SAME(IN(range1, "seven"), IN(range2, "seven"))',       // only match 5.3
-            'OVERLAP(IN(range1, "five"), IN(range2, "ten"))',       // overlapping, match 5.4
-            'PROXIMITY(IN(range1, "five"), IN(range2, "ten"), 0, 0)',   // adjacent match 5.4, 5.5
-            'PROXIMITY(IN(range1, "five"), IN(range2, "ten"), 1, 1)',   // proximity match 5.4, 5.5
-            'PROXIMITY(IN(range1, "five"), IN(range2, "ten"), 2, 2)',   // adjacent match 5.4, 5.5, 5.6
-
-            'ATLEAST(2, IN(suitcase, "sock"))',                     // at least two suitcases containing a sock.
-            'ATLEAST(3, IN(suitcase, "sock"))',                     // no
-
-            'IN(box, "train")',                                     // should be 4 matches (since inside nested boxes)
-
-            'IN(suitcase, ATLEAST(1, "sock"))',                     // suitcases containing at least one sock.   (should really optimize away)
-            'IN(suitcase, ATLEAST(2, "sock"))',                     // at least two suit cases containing a sock.
-            'IN(suitcase, ATLEAST(3, "sock"))',
-            'IN(suitcase, ATLEAST(3, OR("sock", "dress")))',        //no
-            'IN(suitcase, ATLEAST(3, SET("sock", "dress")))',       //no
-            'IN(suitcase, ATLEAST(3, OR("sock", "jacket")))',       //yes...
-            'IN(suitcase, ATLEAST(3, SET("sock", "jacket")))',      //yes...
-            'IN(box, IN(suitcase, ATLEAST(2, "sock")))',            //yes - box, with one match
-            'IN(box, IN(suitcase, ATLEAST(3, "sock")))',            //no -
-            'IN(box, ATLEAST(2, IN(suitcase, "sock")))',            //yes -
-            'IN(box, ATLEAST(3, IN(suitcase, "sock")))',            //no -
-            'IN(box, ATLEAST(2, IN(suitcase, ATLEAST(2, "sock"))))',            //no...
-            'IN(box, AND(ATLEAST(2, "train"), ATLEAST(2, "sock")))',    // yes
-            'IN(box, AND(ATLEAST(3, "train"), ATLEAST(2, "sock")))',    // no
-            'IN(suitcase, AND(ATLEAST(2, "sock"), ATLEAST(2, OR("tights", "dress"))))', // no - different suitcases.
-            'IN(suitcase, ATLEAST(2, "sock"))', // yes
-            'IN(suitcase, ATLEAST(2, OR("tights", "dress")))',  // yes
-
-//The following example fails - not quite sure how to fix it.
-//          'IN(suitcase, OR(ATLEAST(2, "sock"), ATLEAST(2, OR("tights", "dress"))))',  // yes
-            'IN(suitcase, ATLEAST(4, AND(ATLEAST(2, "sock"), OR("shirt", "trouser"))))',    // yes - nested atleasts...
-            'IN(suitcase, ATLEAST(5, AND(ATLEAST(2, "sock"), OR("shirt", "trouser"))))',    // no
-
-            '_ATLEASTIN_(1, IN(suitcase, "sock"), 1)',                      // suitcases containing at least one sock.   (should really optimize away)
-            '_ATLEASTIN_(2, IN(suitcase, "sock"), 1)',                      // at least two suit cases containing a sock.
-            '_ATLEASTIN_(3, IN(suitcase, "sock"), 1)',
-
-            'S(ANDNOT("fish", "alive"))',                               // pseudonym for within same sentence -
-            'S(ANDNOT("fish", "finger"))',                              //
-
-            'AT("the", 2)',                                             // occurences of 'the' at position 2
-            'AT("the", 18)',
-            'AT("is", 17)',
-            'AND(AT("the", 18),AT("is",17))',
-
-            'AND("gch01", "gch02", "gch04")',
-            'AND("gch01", "gch02", "gch10")',
-
-            'AND(SET("and","a"), SET("the", "one"), PHRASE("for","the","dame"))',
-            'AND(CAPS("sheep"), "spotted")',
-            'AND(CAPS("sheep"), NOCAPS("spotted"))',
-            'AND(SET(CAPS("sheep","little")), SET(CAPS("Up","go")))',
-            'AND(SET(CAPS("sheep","little")), SET(NOCAPS("Up","go")))',
-            'AND(OR(CAPS("sheep"),CAPS("Little")), OR(CAPS("Up"),NOCAPS("go")))',
-
-            'ANDNOT(AND("black","sheep"), "family")',
-            'ANDNOT(AND("little","and"), "jack")',
-
-            'BUTNOT("little", PHRASE("little", "star"))',
-            'BUTNOTJOIN("little", PHRASE("little", "star"))',
-            'BUTNOT("black", PHRASE("black", OR("spotted", "sheep")))',
-            'BUTNOTJOIN("black", PHRASE("black", OR("spotted", "sheep")))',
-
-//MORE:
-// STEPPED flag on merge to give an error if input doesn't support stepping.
-// What about the duplicates that can come out of the proximity operators?
-// where the next on the rhs is at a compatible position, but in a different document
-// What about inverse of proximity x not w/n y
-// Can inverse proximity be used for sentance/paragraph.  Can we combine them so short circuited before temporaries created.
-//MORE: What other boundary conditions can we think of.
-
-                ' '
-
-#end
-
-            ], queryInputRecord);
-
-p := project(nofold(q1), doBatchExecute(TS_wordIndex, LEFT, 0x40000000));
-output(p);

+ 0 - 40
testing/regress/ecl/aggds1.ecl

@@ -1,40 +0,0 @@
-/*##############################################################################
-
-    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-############################################################################## */
-
-import $.setup.sq;
-
-//Check fixed size disk count correctly checks canMatchAny()
-inlineDs := dataset([1,2],{integer value});
-
-//Simple disk aggregate
-output(table(sq.SimplePersonBookDs, { sum(group, aage),exists(group),exists(group,aage>0),exists(group,aage>100),count(group,aage>20) })) : independent;
-
-//Filtered disk aggregate, which also requires a beenProcessed flag
-output(table(sq.SimplePersonBookDs(surname != 'Halliday'), { max(group, aage) })): independent;
-
-//Special case count.
-output(table(sq.SimplePersonBookDs(forename = 'Gavin'), { count(group) })): independent;
-
-output(count(sq.SimplePersonBookDs)): independent;
-
-//Special case count.
-output(table(sq.SimplePersonBookDs, { count(group, (forename = 'Gavin')) })): independent;
-
-//existance checks
-output(exists(sq.SimplePersonBookDs)): independent;
-output(exists(sq.SimplePersonBookDs(forename = 'Gavin'))): independent;
-output(exists(sq.SimplePersonBookDs(forename = 'Joshua'))): independent;

+ 19 - 0
testing/regress/ecl/aggds1_hthor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggds1('hthor');

+ 19 - 0
testing/regress/ecl/aggds1_roxie.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggds1('roxie');

+ 19 - 0
testing/regress/ecl/aggds1_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggds1('thor');

+ 2 - 4
testing/regress/ecl/aggds1seq.ecl

@@ -15,10 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-//Check fixed size disk count correctly checks canMatchAny()
-inlineDs := dataset([1,2],{integer value});
+import $.setup;
+sq := setup.sq('hthor');
 
 sequential(
 //Simple disk aggregate

+ 2 - 12
testing/regress/ecl/aggds2.ecl

@@ -15,15 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-pr:= table(sq.SimplePersonBookDs, { fullname := trim(surname) + ', ' + trim(forename), aage });
-
-//Aggregate on a projected table that can't be merged.  seq is actually set to aage
-pr2:= table(sq.SimplePersonBookDs, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
-
-//Filtered Aggregate on a projected table.
-output(table(pr(aage > 20), { max(group, fullname) }));
-
-//Filtered Aggregate on a projected table.
-output(table(pr2(seq > 30), { ave(group, aage) }));
+import $.common;
+common.aggds2('hthor');

+ 19 - 0
testing/regress/ecl/aggds2_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggds2('thor');

+ 2 - 1
testing/regress/ecl/aggds2seq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 pr:= table(sq.SimplePersonBookDs, { fullname := trim(surname) + ', ' + trim(forename), aage });
 

+ 2 - 26
testing/regress/ecl/aggds3.ecl

@@ -15,29 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-//A not-so-simple out of line subquery
-secondBookName := (string20)sort(sq.SimplePersonBookDs.books, name)[2].name;
-
-//Simple disk aggregate
-output(sort(table(sq.SimplePersonBookDs, { surname, sumage := sum(group, aage) }, surname, few),surname));
-
-//Filtered disk aggregate, which also requires a beenProcessed flag
-output(sort(table(sq.SimplePersonBookDs(surname != 'Halliday'), { max(group, aage), surname }, surname, few),surname));
-
-//check literals are assigned
-output(sort(table(sq.SimplePersonBookDs(forename = 'Gavin'), { 'Count: ', count(group), 'Name: ', surname }, surname, few),surname));
-
-//Sub query needs serializing or repeating....
-
-// A simple inline subquery
-output(sort(table(sq.SimplePersonBookDs, { cnt := count(books), sumage := sum(group, aage) }, count(books), few),cnt));
-
-output(sort(table(sq.SimplePersonBookDs, { sbn := secondBookName, sumage := sum(group, aage) }, secondBookName, few),sbn));
-
-// An out of line subquery (caused problems accessing parent inside sort criteria
-output(sort(table(sq.SimplePersonBookDs, { cnt := count(books(id != 0)), sumage := sum(group, aage) }, count(books(id != 0)), few),cnt));
-
-//Bizarre - add a dataset that needs serialization/deserialisation to enusre cloned correctly
-output(sort(table(nofold(sq.SimplePersonBookDs)(surname != 'Halliday'), { max(group, aage), surname, dataset books := books}, surname, few),surname));
+import $.common;
+common.aggds3('hthor');

+ 19 - 0
testing/regress/ecl/aggds3_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggds3('thor');

+ 2 - 1
testing/regress/ecl/aggds3seq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 //A not-so-simple out of line subquery
 secondBookName := (string20)sort(sq.SimplePersonBookDs.books, name)[2].name;

+ 2 - 12
testing/regress/ecl/aggds4.ecl

@@ -15,15 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-pr:= table(sq.SimplePersonBookDs, { fullname := trim(surname) + ', ' + trim(forename), aage });
-
-//Aggregate on a projected table that can't be merged
-pr2:= table(sq.SimplePersonBookDs, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
-
-//Filtered Aggregate on a projected table.
-output(sort(table(pr(aage > 20), { aage, max(group, fullname) }, aage, few), record));
-
-//Filtered Aggregate on a projected table.
-output(sort(table(pr2(seq > 10), { surname, ave(group, aage) }, surname, few), record));
+import $.common;
+common.aggds4('hthor');

+ 19 - 0
testing/regress/ecl/aggds4_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggds4('thor');

+ 2 - 1
testing/regress/ecl/aggds4seq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 pr:= table(sq.SimplePersonBookDs, { fullname := trim(surname) + ', ' + trim(forename), aage });
 

+ 2 - 25
testing/regress/ecl/aggidx1.ecl

@@ -15,28 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-//Check correctly checks canMatchAny()
-inlineDs := dataset([1,2],{integer value});
-
-//Simple disk aggregate
-output(table(sq.SimplePersonBookIndex, { sum(group, aage),exists(group),exists(group,aage>0),exists(group,aage>100),count(group,aage>20) }));
-
-//Filtered disk aggregate, which also requires a beenProcessed flag
-output(table(sq.SimplePersonBookIndex(surname != 'Halliday'), { max(group, aage) }));
-
-//Special case count.
-output(table(sq.SimplePersonBookIndex(forename = 'Gavin'), { count(group) }));
-
-output(count(sq.SimplePersonBookIndex));
-
-//Special case count.
-output(table(sq.SimplePersonBookIndex, { count(group, (forename = 'Gavin')) }));
-
-output(table(inlineDs, { count(sq.SimplePersonBookIndex(inlineDs.value = 1)); }));
-
-//existance checks
-output(exists(sq.SimplePersonBookIndex));
-output(exists(sq.SimplePersonBookIndex(forename = 'Gavin')));
-output(exists(sq.SimplePersonBookIndex(forename = 'Joshua')));
+import $.common;
+common.aggidx1('hthor');

+ 19 - 0
testing/regress/ecl/aggidx1_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggidx1('thor');

+ 2 - 1
testing/regress/ecl/aggidx1seq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 //Check correctly checks canMatchAny()
 inlineDs := dataset([1,2],{integer value});

+ 2 - 12
testing/regress/ecl/aggidx2.ecl

@@ -15,15 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-pr:= table(sq.SimplePersonBookIndex, { fullname := trim(surname) + ', ' + trim(forename), aage });
-
-//Aggregate on a projected table that can't be merged
-pr2:= table(sq.SimplePersonBookIndex, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
-
-//Filtered Aggregate on a projected table.
-output(table(pr(aage > 20), { max(group, fullname) }));
-
-//Filtered Aggregate on a projected table.
-output(table(pr2(seq > 30), { ave(group, aage) }));
+import $.common;
+common.aggidx2('hthor');

+ 19 - 0
testing/regress/ecl/aggidx2_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggidx2('thor');

+ 2 - 1
testing/regress/ecl/aggidx2seq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 pr:= table(sq.SimplePersonBookIndex, { fullname := trim(surname) + ', ' + trim(forename), aage });
 

+ 2 - 27
testing/regress/ecl/aggidx3.ecl

@@ -15,30 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-//A not-so-simple out of line subquery
-secondBookName := (string20)sort(sq.SimplePersonBookIndex.books, name)[2].name;
-
-//Simple disk aggregate
-output(sort(table(sq.SimplePersonBookIndex, { string surname := trim(sq.SimplePersonBookIndex.surname), sum(group, aage) }, trim(surname), few), surname));
-
-
-//Filtered disk aggregate, which also requires a beenProcessed flag
-output(sort(table(sq.SimplePersonBookIndex(surname != 'Halliday'), { max(group, aage), surname }, surname, few), surname));
-
-//Special case count.
-output(sort(table(sq.SimplePersonBookIndex(forename = 'Gavin'), { count(group), surname }, surname, few), surname));
-
-//Sub query needs serializing or repeating....
-
-// A simple inline subquery
-output(sort(table(sq.SimplePersonBookIndex, { cntBooks:= count(books), sumage := sum(group, aage) }, count(books), few), cntBooks, sumage));
-
-output(sort(table(sq.SimplePersonBookIndex, { secondBookName, sumage := sum(group, aage) }, secondBookName, few), secondbookname));
-
-//Access to file position - ensure it is serialized correctly!
-output(sort(table(sq.SimplePersonBookIndex, { pos := filepos DIV 8192, sumage := sum(group, aage) }, filepos DIV 8192, few), pos, sumage));
-
-// An out of line subquery (caused problems accessing parent inside sort criteria
-output(sort(table(sq.SimplePersonBookIndex, { numBooks := count(books(id != 0)), sumage := sum(group, aage) }, count(books(id != 0)), few), numbooks, sumage));
+import $.common;
+common.aggidx3('hthor');

+ 19 - 0
testing/regress/ecl/aggidx3_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggidx3('thor');

+ 2 - 1
testing/regress/ecl/aggidx3seq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 //A not-so-simple out of line subquery
 secondBookName := (string20)sort(sq.SimplePersonBookIndex.books, name)[2].name;

+ 2 - 12
testing/regress/ecl/aggidx4.ecl

@@ -15,15 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-pr:= table(sq.SimplePersonBookIndex, { fullname := trim(surname) + ', ' + trim(forename), aage });
-
-//Aggregate on a projected table that can't be merged
-pr2:= table(sq.SimplePersonBookIndex, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
-
-//Filtered Aggregate on a projected table.
-output(sort(table(pr(aage > 20), { aage, max(group, fullname) }, aage, few), aage));
-
-//Filtered Aggregate on a projected table.
-output(sort(table(pr2(seq > 10), { surname, ave(group, aage) }, surname, few), surname));
+import $.common;
+common.aggidx4('hthor');

+ 19 - 0
testing/regress/ecl/aggidx4_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.aggidx4('thor');

+ 2 - 1
testing/regress/ecl/aggidx4seq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 pr:= table(sq.SimplePersonBookIndex, { fullname := trim(surname) + ', ' + trim(forename), aage });
 

+ 2 - 1
testing/regress/ecl/aggsqx1.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 forceSubQuery(a) := macro
     { dedup(a,true)[1] }

+ 2 - 1
testing/regress/ecl/aggsqx1b.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 forceSubQuery(a) := macro
     { dedup(a,true)[1] }

+ 2 - 1
testing/regress/ecl/aggsqx2.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 forceSubQuery(a) := macro
     { dedup(a,true)[1] }

+ 2 - 1
testing/regress/ecl/aggsqx3.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 //Simple disk aggregate
 output(preload(sq.HousePersonBookDs), { dataset people := sort(table(persons, { surname, sum(group, aage) }, surname, few), surname)});

+ 2 - 1
testing/regress/ecl/aggsqx3err.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 //A mistyped query, but interesting never the less - needs to access a stored variable from the child.
 secondBookNameX := (string20)sort(sq.SimplePersonBookDs.books, name)[2].name;

+ 2 - 1
testing/regress/ecl/aggsqx4.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 pr:= table(sq.HousePersonBookDs.persons, { fullname := trim(surname) + ', ' + trim(forename), aage });
 

+ 145 - 124
testing/ecl/setup/textsearch.txt

@@ -15,38 +15,41 @@
     limitations under the License.
 ############################################################################## */
 
-//Code that is common to text searching
+import $.^.Setup;
+import Setup.Options;
+import Setup.TS;
 
+EXPORT TextSearch := FUNCTION
 
 INCLUDE_DEBUG_INFO := false;
 import lib_stringlib;
 
-MaxTerms            := TS_MaxTerms;
-MaxStages           := TS_MaxStages;
-MaxProximity        := TS_MaxProximity;
-MaxWildcard     := TS_MaxWildcard;
-MaxMatchPerDocument := TS_MaxMatchPerDocument;
-MaxFilenameLength := TS_MaxFilenameLength;
-MaxActions       := TS_MaxActions;
-
-kindType        := TS_kindType;
-sourceType      := TS_sourceType;
-wordCountType   := TS_wordCountType;
-segmentType     := TS_segmentType;
-wordPosType     := TS_wordPosType;
-docPosType      := TS_docPosType;
-documentId      := TS_documentId;
-termType        := TS_termType;
-distanceType    := TS_distanceType;
-stageType       := TS_stageType;
-dateType        := TS_dateType;
+MaxTerms            := TS.MaxTerms;
+MaxStages           := TS.MaxStages;
+MaxProximity        := TS.MaxProximity;
+MaxWildcard     := TS.MaxWildcard;
+MaxMatchPerDocument := TS.MaxMatchPerDocument;
+MaxFilenameLength := TS.MaxFilenameLength;
+MaxActions       := TS.MaxActions;
+
+kindType        := TS.kindType;
+sourceType      := TS.sourceType;
+wordCountType   := TS.wordCountType;
+segmentType     := TS.segmentType;
+wordPosType     := TS.wordPosType;
+docPosType      := TS.docPosType;
+documentId      := TS.documentId;
+termType        := TS.termType;
+distanceType    := TS.distanceType;
+stageType       := TS.stageType;
+dateType        := TS.dateType;
 matchCountType  := unsigned2;
-wordType        := TS_wordType;
-wordFlags       := TS_wordFlags;
-wordIdType      := TS_wordIdType;
+wordType        := TS.wordType;
+wordFlags       := TS.wordFlags;
+wordIdType      := TS.wordIdType;
 
 //May want the following, probably not actually implemented as an index - would save having dpos in the index, but more importantly storing it in the candidate match results because the mapping could be looked up later.
-wordIndexRecord := TS_wordIndexRecord;
+wordIndexRecord := TS.wordIndexRecord;
 
 MaxWipWordOrAlias  := 4;
 MaxWipTagContents  := 65535;
@@ -61,9 +64,9 @@ actionEnum := ENUM(
 //Minimal operations required to implement the searching.
     ReadWord,           // termNum, source, segment, word, wordFlagMask, wordFlagCompare,
     ReadWordSet,        // termNum, source, segment, words, wordFlagMask, wordFlagCompare,
-    AndTerms,           // 
-    OrTerms,            // 
-    AndNotTerms,        // 
+    AndTerms,           //
+    OrTerms,            //
+    AndNotTerms,        //
     PhraseAnd,          //
     ProximityAnd,       // distanceBefore, distanceAfter
     MofNTerms,          // minMatches, maxMatches
@@ -75,16 +78,16 @@ actionEnum := ENUM(
     ContainedAtLeast,
     TagContainsSearch,  // Used for the outermost IN() expression - check it overlaps and rolls up
     TagContainsTerm,    // Used for an inner tag contains - checks, but doesn't roll up
-    TagNotContainsTerm, // 
+    TagNotContainsTerm, //
     SameContainer,
-    NotSameContainer,   // 
-    MofNContainer,      // 
+    NotSameContainer,   //
+    MofNContainer,      //
     RankContainer,      // NOTIMPLEMENTED
     OverlapProximityAnd,
 
 //The following aren't very sensible as far as text searching goes, but are here to test the underlying functionality
     AndJoinTerms,       // join on non-proximity
-    AndNotJoinTerms,    // 
+    AndNotJoinTerms,    //
     MofNJoinTerms,      // minMatches, maxMatches
     RankJoinTerms,      // left outer join
     ProximityMergeAnd,  // merge join on proximity
@@ -102,13 +105,13 @@ actionEnum := ENUM(
 
     //The following are only used in the production
     FlagModifier,       // wordFlagMask, wordFlagCompare
-    QuoteModifier,      // 
+    QuoteModifier,      //
     Max
 );
 
 //  FAIL(stageType, 'Missing entry: ' + (string)action));
 
-boolean definesTerm(actionEnum action) := 
+boolean definesTerm(actionEnum action) :=
     (action in [actionEnum.ReadWord, actionEnum.ReadWordSet]);
 
 booleanRecord := { boolean value };
@@ -124,7 +127,7 @@ createStage(stageType stage) := transform(stageRecord, self.stage := stage);
 createTerm(termType term) := transform(termRecord, self.term := term);
 
 //should have an option to optimize the order
-searchRecord := 
+searchRecord :=
             RECORD  //,PACK
 stageType       stage;
 termType        term;
@@ -183,8 +186,8 @@ StageSetToDataset(stageSet x) := dataset(x, stageRecord);
 StageDatasetToSet(dataset(stageRecord) x) := set(x, stage);
 
 hasSingleRowPerMatch(actionEnum kind) :=
-    (kind IN [  actionEnum.ReadWord, 
-                actionEnum.ReadWordSet, 
+    (kind IN [  actionEnum.ReadWord,
+                actionEnum.ReadWordSet,
                 actionEnum.PhraseAnd,
                 actionEnum.ProximityAnd,
                 actionEnum.ContainedAtLeast,
@@ -193,7 +196,7 @@ hasSingleRowPerMatch(actionEnum kind) :=
                 actionEnum.OverlapProximityAnd]);
 
 inheritsSingleRowPerMatch(actionEnum kind) :=
-    (kind IN [  actionEnum.OrTerms, 
+    (kind IN [  actionEnum.OrTerms,
 //              actionEnum.AndNotTerms,                 // move container inside an andnot
                 actionEnum.TagNotContainsTerm,
                 actionEnum.NotSameContainer]);
@@ -236,8 +239,8 @@ combineChildren(dataset(childMatchRecord) l, dataset(childMatchRecord) r) := fun
     return dedup(mergedDs, wpos, wip, term);
 end;
 
-SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) := MODULE
-    
+SearchExecutor(dataset(TS.wordIndexRecord) wordIndex, unsigned4 internalFlags) := MODULE
+
     ///////////////////////////////////////////////////////////////////////////////////////////////////////////
     // Matching helper functions
 
@@ -254,7 +257,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
         matchSearchFlags(wIndex, search);
 
     EXPORT matchSearchSource(wordIndex wIndex, searchRecord search) :=
-        keyed(search.source = 0 OR TS_docMatchesSource(wIndex.doc, search.source), opt);
+        keyed(search.source = 0 OR TS.docMatchesSource(wIndex.doc, search.source), opt);
 
     ///////////////////////////////////////////////////////////////////////////////////////////////////////////
     // ReadWord
@@ -271,7 +274,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
             self := [];
         end;
 
-        steppedMatches :=   IF(search.priority <> 0, 
+        steppedMatches :=   IF(search.priority <> 0,
                                 stepped(matches, doc, segment, wpos, priority(search.priority)),
                                 stepped(matches, doc, segment, wpos)
                             );
@@ -298,7 +301,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
             self := [];
         end;
 
-        steppedMatches :=   IF(search.priority <> 0, 
+        steppedMatches :=   IF(search.priority <> 0,
                                 stepped(matches, doc, segment, wpos, priority(search.priority)),
                                 stepped(matches, doc, segment, wpos)
                             );
@@ -312,7 +315,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
     // OrTerms
 
     EXPORT doOrTerms(searchRecord search, SetOfInputs inputs) := FUNCTION
-        return merge(inputs, doc, segment, wpos, dedup);        // MORE  option to specify priority?
+        return merge(inputs, doc, segment, wpos, dedup, SORTED(doc, segment, wpos));        // MORE  option to specify priority?
     END;
 
 
@@ -336,7 +339,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
     // ButNotTerms
 
     EXPORT doButNotTerms(searchRecord search, SetOfInputs inputs) := FUNCTION
-        return mergejoin(inputs, STEPPED(left.doc = right.doc and left.segment=right.segment) and 
+        return mergejoin(inputs, STEPPED(left.doc = right.doc and left.segment=right.segment) and
                                  (LEFT.wpos BETWEEN RIGHT.wpos AND RIGHT.wpos+RIGHT.wip), doc, segment, wpos, left only, internal(internalFlags));
     END;
 
@@ -345,7 +348,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
     // ButNotJoinTerms
 
     EXPORT doButNotJoinTerms(searchRecord search, SetOfInputs inputs) := FUNCTION
-        return join(inputs, STEPPED(left.doc = right.doc and left.segment=right.segment) and 
+        return join(inputs, STEPPED(left.doc = right.doc and left.segment=right.segment) and
                                  (LEFT.wpos BETWEEN RIGHT.wpos AND RIGHT.wpos+RIGHT.wip), transform(left), sorted(doc, segment, wpos), left only, internal(internalFlags));
     END;
 
@@ -418,7 +421,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
     ///////////////////////////////////////////////////////////////////////////////////////////////////////////
     // PhraseAnd
 
-    steppedPhraseCondition(matchRecord l, matchRecord r, distanceType maxWip) := 
+    steppedPhraseCondition(matchRecord l, matchRecord r, distanceType maxWip) :=
             (l.doc = r.doc) and (l.segment = r.segment) and
             (r.wpos between l.wpos+1 and l.wpos+maxWip);
 
@@ -426,7 +429,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
 
         steppedCondition(matchRecord l, matchRecord r) := steppedPhraseCondition(l, r, search.maxWipLeft);
 
-        condition(matchRecord l, matchRecord r) :=  
+        condition(matchRecord l, matchRecord r) :=
             (r.wpos = l.wpos + l.wip);
 
         matchRecord createMatch(matchRecord l, dataset(matchRecord) allRows) := transform
@@ -444,7 +447,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
     ///////////////////////////////////////////////////////////////////////////////////////////////////////////
     // PhraseAnd
 
-    steppedPhrase1To5Condition(matchRecord l, matchRecord r, distanceType maxWip) := 
+    steppedPhrase1To5Condition(matchRecord l, matchRecord r, distanceType maxWip) :=
             (l.doc = r.doc) and (l.segment = r.segment) and
             (r.wpos between l.wpos+1 and l.wpos+5);
 
@@ -452,7 +455,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
 
         steppedCondition(matchRecord l, matchRecord r) := steppedPhrase1To5Condition(l, r, search.maxWipLeft);
 
-        condition(matchRecord l, matchRecord r) :=  
+        condition(matchRecord l, matchRecord r) :=
             (r.wpos = l.wpos + l.wip);
 
         matchRecord createMatch(matchRecord l, dataset(matchRecord) allRows) := transform
@@ -654,7 +657,7 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
             maxRightBeforeLeft := IF(maxDistanceRightBeforeLeft >= 0, maxDistanceRightBeforeLeft + maxWipRight, maxDistanceRightBeforeLeft);
             maxRightAfterLeft := IF(maxDistanceRightAfterLeft >= 0, maxDistanceRightAfterLeft + maxWipLeft, maxDistanceRightAfterLeft);
 
-            return 
+            return
                 (l.doc = r.doc) and (l.segment = r.segment) and
                 (r.wpos + maxRightBeforeLeft >= l.wpos) and             // (right.wpos + right.wip + maxRightBeforeLeft >= left.wpos)
                 (r.wpos <= l.wpos + (maxRightAfterLeft));               // (right.wpos <= left.wpos + left.wip + maxRightAfterLeft)
@@ -665,14 +668,14 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
 
         steppedCondition(matchRecord l, matchRecord r) := steppedProximityCondition(l, r, search.maxWipLeft, search.maxWipRight, search.maxDistanceRightBeforeLeft, search.maxDistanceRightAfterLeft);
 
-        condition(matchRecord l, matchRecord r) :=  
+        condition(matchRecord l, matchRecord r) :=
             (r.wpos + r.wip + search.maxDistanceRightBeforeLeft >= l.wpos) and
             (r.wpos <= l.wpos + l.wip + search.maxDistanceRightAfterLeft);
 
         overlaps(wordPosType wpos, childMatchRecord r) := (wpos between r.wpos and r.wpos + (r.wip - 1));
 
         createMatch(matchRecord l, matchRecord r) := function
-        
+
             wpos := min(l.wpos, r.wpos);
             wend := max(l.wpos + l.wip, r.wpos + r.wip);
 
@@ -704,14 +707,14 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
 
         steppedCondition(matchRecord l, matchRecord r) := steppedProximityCondition(l, r, search.maxWipLeft, search.maxWipRight, search.maxDistanceRightBeforeLeft, search.maxDistanceRightAfterLeft);
 
-        condition(matchRecord l, matchRecord r) :=  
+        condition(matchRecord l, matchRecord r) :=
             (r.wpos + r.wip + search.maxDistanceRightBeforeLeft >= l.wpos) and
             (r.wpos <= l.wpos + l.wip + search.maxDistanceRightAfterLeft);
 
         overlaps(wordPosType wpos, childMatchRecord r) := (wpos between r.wpos and r.wpos + (r.wip - 1));
 
         anyOverlaps (matchRecord l, matchRecord r) := function
-        
+
             wpos := min(l.wpos, r.wpos);
             wend := max(l.wpos + l.wip, r.wpos + r.wip);
 
@@ -735,17 +738,17 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
 
     EXPORT doOverlapProximityAnd(searchRecord search, SetOfInputs inputs) := FUNCTION
 
-        steppedCondition(matchRecord l, matchRecord r) := 
+        steppedCondition(matchRecord l, matchRecord r) :=
                 (l.doc = r.doc) and (l.segment = r.segment) and
                 (r.wpos + search.maxWipRight >= l.wpos) and
                 (r.wpos <= l.wpos + search.maxWipLeft);
 
-        condition(matchRecord l, matchRecord r) :=  
+        condition(matchRecord l, matchRecord r) :=
             (r.wpos + r.wip >= l.wpos) and (r.wpos <= l.wpos + l.wip);
 
 
         createMatch(matchRecord l, matchRecord r) := function
-        
+
             wpos := min(l.wpos, r.wpos);
             wend := max(l.wpos + l.wip, r.wpos + r.wip);
 
@@ -855,26 +858,33 @@ SearchExecutor(dataset(TS_wordIndexRecord) wordIndex, unsigned4 internalFlags) :
     EXPORT convertToUserOutput(dataset(matchRecord) results) := function
 
         simpleUserOutputRecord createUserOutput(matchRecord l) := transform
-                self.source := TS_docid2source(l.doc);
-                self.subDoc := TS_docid2doc(l.doc);
+                self.source := TS.docid2source(l.doc);
+                self.subDoc := TS.docid2doc(l.doc);
                 self.words := l.children;
-                SELF.line := l.dpos DIV TS_MaxColumnsPerLine;
-                SELF.column := l.dpos % TS_MaxColumnsPerLine;
+                SELF.line := l.dpos DIV TS.MaxColumnsPerLine;
+                SELF.column := l.dpos % TS.MaxColumnsPerLine;
                 SELF := l;
             END;
 
         return project(results, createUserOutput(left));
     end;
 
-    EXPORT doExecuteQuery(dataset(searchRecord) queryDefinition, dataset(matchRecord) initialResults = dataset([], matchRecord)) := function
+    EXPORT doExecuteQuery(dataset(searchRecord) queryDefinition, boolean useLocal) := function
+
+        initialResults := dataset([], matchRecord);
+        localResults := FUNCTION
+            executionPlan := thisnode(global(queryDefinition, opt, few));           // Store globally for efficient access
+            results := ALLNODES(LOCAL(graph(initialResults, count(executionPlan), processStage(executionPlan[NOBOUNDCHECK COUNTER], rowset(left)), parallel)));
+            RETURN results;
+        END;
+
+        globalResults := FUNCTION
+            executionPlan := global(queryDefinition, opt, few);         // Store globally for efficient access
+            results := graph(initialResults, count(executionPlan), processStage(executionPlan[NOBOUNDCHECK COUNTER], rowset(left)), parallel);
+            RETURN results;
+        END;
 
-    #if (useLocal=true)
-        executionPlan := thisnode(global(queryDefinition, opt, few));           // Store globally for efficient access
-        results := ALLNODES(LOCAL(graph(initialResults, count(executionPlan), processStage(executionPlan[NOBOUNDCHECK COUNTER], rowset(left)), parallel)));
-    #else
-        executionPlan := global(queryDefinition, opt, few);         // Store globally for efficient access
-        results := graph(initialResults, count(executionPlan), processStage(executionPlan[NOBOUNDCHECK COUNTER], rowset(left)), parallel);
-    #end
+        results := IF(useLocal, localResults, globalResults);
         userOutput := convertToUserOutput(results);
 
         return userOutput;
@@ -887,12 +897,12 @@ end;
 // A simplified query language
 parseQuery(string queryText) := function
 
-searchParseRecord := 
+searchParseRecord :=
             RECORD(searchRecord)
 unsigned        numInputs;
             END;
 
-productionRecord  := 
+productionRecord  :=
             record
 unsigned        termCount;
 dataset(searchParseRecord) actions{maxcount(MaxActions)};
@@ -924,7 +934,7 @@ END;
 
 PRULE forwardExpr := use(productionRecord, 'ExpressionRule');
 
-ARULE term0 
+ARULE term0
     := quotedword                               transform(searchParseRecord,
                                                     SELF.action := actionEnum.ReadWord;
                                                     SELF.word := StringLib.StringToLowerCase($1[2..length($1)-1]);
@@ -1020,11 +1030,11 @@ PRULE term1
                                                     )
                                                 )
     | 'RANK' '(' forwardExpr ',' forwardExpr ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
-                                                            self.action := actionEnum.RankMergeTerms; 
+                                                        transform(searchParseRecord,
+                                                            self.action := actionEnum.RankMergeTerms;
                                                             self.numInputs := 2;
                                                             self := []
                                                         )
@@ -1085,11 +1095,11 @@ PRULE term1
                                                     )
                                                 )
     | 'PROXIMITY' '(' forwardExpr ',' forwardExpr ',' number ',' number ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
-                                                            self.action := actionEnum.ProximityAnd; 
+                                                        transform(searchParseRecord,
+                                                            self.action := actionEnum.ProximityAnd;
                                                             self.numInputs := 2;
                                                             self.maxDistanceRightBeforeLeft := (integer)$7;
                                                             self.maxDistanceRightAfterLeft := (integer)$9;
@@ -1098,22 +1108,22 @@ PRULE term1
                                                     )
                                                 )
     | 'OVERLAP' '(' forwardExpr ',' forwardExpr ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
-                                                            self.action := actionEnum.OverlapProximityAnd; 
+                                                        transform(searchParseRecord,
+                                                            self.action := actionEnum.OverlapProximityAnd;
                                                             self.numInputs := 2;
                                                             self := []
                                                         )
                                                     )
                                                 )
     | 'PRE' '(' forwardExpr ',' forwardExpr ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
-                                                            self.action := actionEnum.ProximityAnd; 
+                                                        transform(searchParseRecord,
+                                                            self.action := actionEnum.ProximityAnd;
                                                             self.numInputs := 2;
                                                             self.maxDistanceRightBeforeLeft := -1;
                                                             self.maxDistanceRightAfterLeft := MaxWordsInDocument;
@@ -1122,11 +1132,11 @@ PRULE term1
                                                     )
                                                 )
     | 'AFT' '(' forwardExpr ',' forwardExpr ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
-                                                            self.action := actionEnum.ProximityAnd; 
+                                                        transform(searchParseRecord,
+                                                            self.action := actionEnum.ProximityAnd;
                                                             self.numInputs := 2;
                                                             self.maxDistanceRightBeforeLeft := MaxWordsInDocument;
                                                             self.maxDistanceRightAfterLeft := -1;
@@ -1135,10 +1145,10 @@ PRULE term1
                                                     )
                                                 )
     | 'PROXMERGE' '(' forwardExpr ',' forwardExpr ',' number ',' number ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
+                                                        transform(searchParseRecord,
                                                             self.action := actionEnum.ProximityMergeAnd;
                                                             self.numInputs := 2;
                                                             self.maxDistanceRightBeforeLeft := (integer)$7;
@@ -1193,11 +1203,11 @@ PRULE term1
                                                     )
                                                 )
     | 'RANKJOIN' '(' forwardExpr ',' forwardExpr ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
-                                                            self.action := actionEnum.RankJoinTerms; 
+                                                        transform(searchParseRecord,
+                                                            self.action := actionEnum.RankJoinTerms;
                                                             self.numInputs := 2;
                                                             self := []
                                                         )
@@ -1263,11 +1273,11 @@ PRULE term1
                                                     )
                                                 )
     | 'SAME' '(' forwardExpr ',' forwardExpr ')'
-                                                transform(productionRecord, 
+                                                transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + $5.actions + row(
-                                                        transform(searchParseRecord, 
-                                                            self.action := actionEnum.SameContainer; 
+                                                        transform(searchParseRecord,
+                                                            self.action := actionEnum.SameContainer;
                                                             self.numInputs := 2;
                                                             self := []
                                                         )
@@ -1295,7 +1305,7 @@ PRULE term1
                                                         )
                                                     )
                                                 )
-    | 'AT' '(' forwardExpr ',' number ')'   
+    | 'AT' '(' forwardExpr ',' number ')'
                                                 transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + row(
@@ -1307,7 +1317,7 @@ PRULE term1
                                                         )
                                                     )
                                                 )
-    | 'NOTAT' '(' forwardExpr ',' number ')'    
+    | 'NOTAT' '(' forwardExpr ',' number ')'
                                                 transform(productionRecord,
                                                     self.termCount := 1;
                                                     self.actions := $3.actions + row(
@@ -1373,11 +1383,11 @@ dataset(searchParseRecord) actions{maxcount(MaxActions)};
         end;
 
 
-resultsRecord extractResults(dataset(searchParseRecord) actions) := 
+resultsRecord extractResults(dataset(searchParseRecord) actions) :=
         TRANSFORM
             SELF.actions := actions;
         END;
-            
+
 p1 := PARSE(infile,line,expr,extractResults($1.actions),first,whole,skip(ws),nocase,parse);
 
 pnorm := normalize(p1, left.actions, transform(right));
@@ -1547,14 +1557,14 @@ transformAtLeast(dataset(searchRecord) parsed) := function
         shared inputTerms := r.mapping(stage in set(l.inputs, stage)).outputTerms;
         shared inputAtleast := r.mapping(stage in set(l.inputs, stage)).activeAtleast;
         shared outputTerms := IF(hasSingleRowPerMatch(l.action), dataset([createTerm(l.term)]), inputTerms);
-        shared outputAtleast := 
+        shared outputAtleast :=
             MAP(l.action = actionEnum.GlobalAtLeast=>inputAtleast + row(createAtleast(l, inputTerms)),
                 l.action <> actionEnum.TagContainsSearch=>inputAtleast);
 
         export processRecord nextRow := transform
             SELF.moved := IF(l.action = actionEnum.TagContainsSearch, inputAtLeast);
 #if (INCLUDE_DEBUG_INFO)
-            SELF.debug := trim(l.debug) + '[' + 
+            SELF.debug := trim(l.debug) + '[' +
                     (string)COUNT(inputTerms) + ':' +
                     (string)COUNT(inputAtleast) + ':' +
                     (string)COUNT(outputTerms) + ':' +
@@ -1663,8 +1673,8 @@ transformNotIn(dataset(searchRecord) input) := function
     doStage2(processRecord l, stage2Record r) := module
         shared newContainer := r.map(stage = l.stage)[1].container;
         shared newTerm := r.map(stage = l.stage)[1].term;
-        shared numStages := 
-                    MAP(l.singleRowPerMatch and newContainer <> ''=>2, 
+        shared numStages :=
+                    MAP(l.singleRowPerMatch and newContainer <> ''=>2,
                         l.action = actionEnum.TagNotContainsTerm and not l.singleRowPerMatch=>0,
                         1);
         export processRecord nextRow := transform
@@ -1792,7 +1802,7 @@ transformIn(dataset(searchRecord) input) := function
                  actionEnum.RankMergeTerms  => IF(l.newContainer <> '', actionEnum.RankContainer, l.action),
                  actionEnum.TagContainsSearch => IF(l.inputsSingleRowPerMatch, actionEnum.TagContainsSearch, actionEnum.rollupContainer),
                  l.action);
-                 
+
 
         SELF.stage := l.stage + (c - 1);
         SELF.inputs := IF(c=2, dataset([createStage(l.stage)]), l.inputs);
@@ -1817,37 +1827,48 @@ applySearchTransformations(dataset(searchRecord) input) := FUNCTION
     RETURN processed4;
 END;
 
-queryProcessor(dataset(TS_wordIndexRecord) wordIndex, string query, unsigned4 internalFlags) := module//,library('TextSearch',1,0)
+queryProcessor(dataset(TS.wordIndexRecord) wordIndex, string query, boolean useLocal, unsigned4 internalFlags) := module//,library('TextSearch',1,0)
 export string queryText := query;
 export request := parseQuery(query);
 export processed := applySearchTransformations(request);
-export result := SearchExecutor(wordIndex, internalFlags).doExecuteQuery(processed);
+export result := SearchExecutor(wordIndex, internalFlags).doExecuteQuery(processed, useLocal);
     end;
 
 
-queryInputRecord := { string query{maxlength(2048)}; };
-
 MaxResults := 10000;
 
-processedRecord := record(queryInputRecord)
-dataset(searchRecord) request{maxcount(MaxActions)};
-dataset(simpleUserOutputRecord) result{maxcount(MaxResults)};
-        end;
+publicExports := MODULE
 
+    EXPORT queryInputRecord := { string query{maxlength(2048)}; };
 
-processedRecord doBatchExecute(dataset(TS_wordIndexRecord) wordIndex, queryInputRecord l, unsigned4 internalFlags=0) := transform
-    processed := queryProcessor(wordIndex, l.query, internalFlags);
-    self.request := processed.processed;
-    self.result := choosen(processed.result, MaxResults);
-    self := l;
-end;
+    EXPORT processedRecord := record(queryInputRecord)
+        dataset(searchRecord) request{maxcount(MaxActions)};
+        dataset(simpleUserOutputRecord) result{maxcount(MaxResults)};
+    END;
 
+    EXPORT processedRecord doBatchExecute(dataset(TS.wordIndexRecord) wordIndex, queryInputRecord l, boolean useLocal, unsigned4 internalFlags=0) := transform
+        processed := queryProcessor(wordIndex, l.query, useLocal, internalFlags);
+        self.request := processed.processed;
+        self.result := choosen(processed.result, MaxResults);
+        self := l;
+    end;
 
-doSingleExecute(dataset(TS_wordIndexRecord) wordIndex, string queryText, unsigned4 internalFlags=0) := function
-    request := parseQuery(queryText);
-    result := SearchExecutor(wordIndex, internalFlags).doExecuteQuery(request);
-    return result;
-end;
 
+    EXPORT doSingleExecute(dataset(TS.wordIndexRecord) wordIndex, string queryText, boolean useLocal, unsigned4 internalFlags=0) := function
+        request := parseQuery(queryText);
+        result := SearchExecutor(wordIndex, internalFlags).doExecuteQuery(request, useLocal);
+        return result;
+    end;
+
+    EXPORT executeBatchAgainstWordIndex(DATASET(queryInputRecord) queries, boolean useLocal, string source, unsigned4 internalFlags=0) := FUNCTION
+        Files := Setup.Files(source);
+        wordIndex := index(TS.textSearchIndex, Files.NameWordIndex(useLocal));
+        p := project(nofold(queries), doBatchExecute(wordIndex, LEFT, useLocal, internalFlags));
+        RETURN p;
+    END;
+
+END;
 
-#line(0)
+   RETURN publicExports;
+
+END;

+ 7 - 19
testing/ecl/textsearch.ecl

@@ -15,24 +15,12 @@
     limitations under the License.
 ############################################################################## */
 
-//UseStandardFiles
-//UseTextSearch
-//tidyoutput
-//nothor
-//nothorlcr
-//UseIndexes
-//xxvarskip type==roxie && setuptype==thor && !local
+import $.TextSearch;
 
-#option ('checkAsserts',false)
+EXPORT TextSearchQueries := module
 
-//SingleQuery := 'AND("the":1, "software":2, "source":3)';
-//SingleQuery := 'AND("the", "software", "source")';
+EXPORT WordTests := dataset([
 
-q1 := dataset([
-
-#if (#ISDEFINED(SingleQuery))
-            SingleQuery
-#else
             'AND("black","sheep")',
             'ANDNOT("black","sheep")',
             'MOFN(2,"black","sheep","white")',
@@ -383,9 +371,9 @@ q1 := dataset([
 
                 ' '
 
-#end
+            ], TextSearch.queryInputRecord);
+
 
-            ], queryInputRecord);
+EXPORT SingleBatchQuery(string search) := dataset([search], TextSearch.queryInputRecord);
 
-p := project(nofold(q1), doBatchExecute(TS_wordIndex, LEFT, 0x00000200));           // 0x200 forces paranoid order checking on
-output(p);
+END;

+ 45 - 0
testing/regress/ecl/common/aggds1.ecl

@@ -0,0 +1,45 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggds1(string source) := function
+
+sq := setup.sq(source);
+
+    RETURN SEQUENTIAL(
+    //Simple disk aggregate
+    output(table(sq.SimplePersonBookDs, { sum(group, aage),exists(group),exists(group,aage>0),exists(group,aage>100),count(group,aage>20) }));
+
+    //Filtered disk aggregate, which also requires a beenProcessed flag
+    output(table(sq.SimplePersonBookDs(surname != 'Halliday'), { max(group, aage) }));
+
+    //Special case count.
+    output(table(sq.SimplePersonBookDs(forename = 'Gavin'), { count(group) }));
+
+    output(count(sq.SimplePersonBookDs));
+
+    //Special case count.
+    output(table(sq.SimplePersonBookDs, { count(group, (forename = 'Gavin')) }));
+
+    //existence checks
+    output(exists(sq.SimplePersonBookDs));
+    output(exists(sq.SimplePersonBookDs(forename = 'Gavin')));
+    output(exists(sq.SimplePersonBookDs(forename = 'Joshua')));
+    );
+
+END;

+ 36 - 0
testing/regress/ecl/common/aggds2.ecl

@@ -0,0 +1,36 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggds2(string source) := FUNCTION
+
+sq := setup.sq(source);
+
+pr:= table(sq.SimplePersonBookDs, { fullname := trim(surname) + ', ' + trim(forename), aage });
+
+//Aggregate on a projected table that can't be merged.  seq is actually set to aage
+pr2:= table(sq.SimplePersonBookDs, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
+
+    RETURN ORDERED(
+        //Filtered Aggregate on a projected table.
+        output(table(pr(aage > 20), { max(group, fullname) }));
+
+        //Filtered Aggregate on a projected table.
+        output(table(pr2(seq > 30), { ave(group, aage) }));
+    );
+END;

+ 50 - 0
testing/regress/ecl/common/aggds3.ecl

@@ -0,0 +1,50 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggds3(string source) := function
+
+    sq := setup.sq(source);
+
+    //A not-so-simple out of line subquery
+    secondBookName := (string20)sort(sq.SimplePersonBookDs.books, name)[2].name;
+
+    RETURN ORDERED(
+        //Simple disk aggregate
+        output(sort(table(sq.SimplePersonBookDs, { surname, sumage := sum(group, aage) }, surname, few),surname));
+
+        //Filtered disk aggregate, which also requires a beenProcessed flag
+        output(sort(table(sq.SimplePersonBookDs(surname != 'Halliday'), { max(group, aage), surname }, surname, few),surname));
+
+        //check literals are assigned
+        output(sort(table(sq.SimplePersonBookDs(forename = 'Gavin'), { 'Count: ', count(group), 'Name: ', surname }, surname, few),surname));
+
+        //Sub query needs serializing or repeating....
+
+        // A simple inline subquery
+        output(sort(table(sq.SimplePersonBookDs, { cnt := count(books), sumage := sum(group, aage) }, count(books), few),cnt));
+
+        output(sort(table(sq.SimplePersonBookDs, { sbn := secondBookName, sumage := sum(group, aage) }, secondBookName, few),sbn));
+
+        // An out of line subquery (caused problems accessing parent inside sort criteria
+        output(sort(table(sq.SimplePersonBookDs, { cnt := count(books(id != 0)), sumage := sum(group, aage) }, count(books(id != 0)), few),cnt));
+
+        //Bizarre - add a dataset that needs serialization/deserialisation to enusre cloned correctly
+        output(sort(table(nofold(sq.SimplePersonBookDs)(surname != 'Halliday'), { max(group, aage), surname, dataset books := books}, surname, few),surname));
+    );
+END;

+ 36 - 0
testing/regress/ecl/common/aggds4.ecl

@@ -0,0 +1,36 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggds4(string source) := function
+
+    sq := setup.sq(source);
+
+    pr:= table(sq.SimplePersonBookDs, { fullname := trim(surname) + ', ' + trim(forename), aage });
+
+    //Aggregate on a projected table that can't be merged
+    pr2:= table(sq.SimplePersonBookDs, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
+
+    RETURN ORDERED(
+        //Filtered Aggregate on a projected table.
+        output(sort(table(pr(aage > 20), { aage, max(group, fullname) }, aage, few), record));
+
+        //Filtered Aggregate on a projected table.
+        output(sort(table(pr2(seq > 10), { surname, ave(group, aage) }, surname, few), record));
+    );
+END;

+ 50 - 0
testing/regress/ecl/common/aggidx1.ecl

@@ -0,0 +1,50 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggidx1(string source) := function
+
+    sq := setup.sq(source);
+
+    //Check correctly checks canMatchAny()
+    inlineDs := dataset([1,2],{integer value});
+
+    RETURN ORDERED(
+        #onwarning (4515, ignore);
+        //Simple disk aggregate
+        output(table(sq.SimplePersonBookIndex, { sum(group, aage),exists(group),exists(group,aage>0),exists(group,aage>100),count(group,aage>20) }));
+
+        //Filtered disk aggregate, which also requires a beenProcessed flag
+        output(table(sq.SimplePersonBookIndex(surname != 'Halliday'), { max(group, aage) }));
+
+        //Special case count.
+        output(table(sq.SimplePersonBookIndex(forename = 'Gavin'), { count(group) }));
+
+        output(count(sq.SimplePersonBookIndex));
+
+        //Special case count.
+        output(table(sq.SimplePersonBookIndex, { count(group, (forename = 'Gavin')) }));
+
+        output(table(inlineDs, { count(sq.SimplePersonBookIndex(inlineDs.value = 1)); }));
+
+        //existence checks
+        output(exists(sq.SimplePersonBookIndex));
+        output(exists(sq.SimplePersonBookIndex(forename = 'Gavin')));
+        output(exists(sq.SimplePersonBookIndex(forename = 'Joshua')));
+    );
+END;

+ 37 - 0
testing/regress/ecl/common/aggidx2.ecl

@@ -0,0 +1,37 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggidx2(string source) := function
+
+    sq := setup.sq(source);
+
+    pr:= table(sq.SimplePersonBookIndex, { fullname := trim(surname) + ', ' + trim(forename), aage });
+
+    //Aggregate on a projected table that can't be merged
+    pr2:= table(sq.SimplePersonBookIndex, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
+
+    RETURN ORDERED(
+        //Filtered Aggregate on a projected table.
+        output(table(pr(aage > 20), { max(group, fullname) }));
+
+        //Filtered Aggregate on a projected table.
+        output(table(pr2(seq > 30), { ave(group, aage) }));
+    );
+
+END;

+ 51 - 0
testing/regress/ecl/common/aggidx3.ecl

@@ -0,0 +1,51 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggidx3(string source) := function
+
+    sq := setup.sq(source);
+
+    //A not-so-simple out of line subquery
+    secondBookName := (string20)sort(sq.SimplePersonBookIndex.books, name)[2].name;
+
+    RETURN ORDERED(
+        //Simple disk aggregate
+        output(sort(table(sq.SimplePersonBookIndex, { string surname := trim(sq.SimplePersonBookIndex.surname), sum(group, aage) }, trim(surname), few), surname));
+
+
+        //Filtered disk aggregate, which also requires a beenProcessed flag
+        output(sort(table(sq.SimplePersonBookIndex(surname != 'Halliday'), { max(group, aage), surname }, surname, few), surname));
+
+        //Special case count.
+        output(sort(table(sq.SimplePersonBookIndex(forename = 'Gavin'), { count(group), surname }, surname, few), surname));
+
+        //Sub query needs serializing or repeating....
+
+        // A simple inline subquery
+        output(sort(table(sq.SimplePersonBookIndex, { cntBooks:= count(books), sumage := sum(group, aage) }, count(books), few), cntBooks, sumage));
+
+        output(sort(table(sq.SimplePersonBookIndex, { secondBookName, sumage := sum(group, aage) }, secondBookName, few), secondbookname));
+
+        //Access to file position - ensure it is serialized correctly!
+        output(sort(table(sq.SimplePersonBookIndex, { pos := filepos DIV 8192, sumage := sum(group, aage) }, filepos DIV 8192, few), pos, sumage));
+
+        // An out of line subquery (caused problems accessing parent inside sort criteria
+        output(sort(table(sq.SimplePersonBookIndex, { numBooks := count(books(id != 0)), sumage := sum(group, aage) }, count(books(id != 0)), few), numbooks, sumage));
+    );
+END;

+ 36 - 0
testing/regress/ecl/common/aggidx4.ecl

@@ -0,0 +1,36 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.^.setup;
+
+EXPORT aggidx4(string source) := function
+
+    sq := setup.sq(source);
+
+    pr:= table(sq.SimplePersonBookIndex, { fullname := trim(surname) + ', ' + trim(forename), aage });
+
+    //Aggregate on a projected table that can't be merged
+    pr2:= table(sq.SimplePersonBookIndex, { surname, forename, aage, unsigned8 seq := (random() % 100) / 2000 + aage; });
+
+    RETURN ORDERED(
+        //Filtered Aggregate on a projected table.
+        output(sort(table(pr(aage > 20), { aage, max(group, fullname) }, aage, few), aage));
+
+        //Filtered Aggregate on a projected table.
+        output(sort(table(pr2(seq > 10), { surname, ave(group, aage) }, surname, few), surname));
+    );
+END;

+ 119 - 0
testing/regress/ecl/common/sqagg.ecl

@@ -0,0 +1,119 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+boolean missingLevelOk := false;
+
+export sqagg(string source) := function
+
+import $.^.setup;
+sq := setup.sq(source);
+
+// Test the different child operators.  Try and test inline and out of line, also part of a compound
+// source activity and not part.
+
+udecimal8 todaysDate := 20040602D;
+unsigned4 age(udecimal8 dob) := ((todaysDate - dob) / 10000D);
+
+//MORE: books[1] ave(books)
+
+// Different child operators, all inline.
+persons := sq.HousePersonBookDs.persons;
+books := persons.books;
+personByAgeDesc := sort(sq.HousePersonBookDs.persons, dob);
+
+// Grand children, again all inline.
+
+booksByRatingDesc := sort(sq.HousePersonBookDs.persons.books, -rating100);
+
+//sort order is deliberately different from anything that will be used later
+xpersons := sort(sq.HousePersonBookDs.persons, surname + (string)dob + forename)[1..200];
+xpersonByAgeDesc := sort(xpersons, dob);
+xbooks := sort(sq.HousePersonBookDs.persons.books, name + (string)rating100 + author)[1..200];
+xbooksByRatingDesc := sort(xbooks, -rating100);
+xxbooks := sort(xpersons.books, name + (string)rating100 + author)[1..200];
+xxbooksByRatingDesc := sort(xxbooks, -rating100);
+
+RETURN ORDERED(
+output(sq.HousePersonBookDs, { addr, numPeople := count(persons), aveAge:= ave(persons, age(dob)), maxDob := max(persons, dob)});
+output(sq.HousePersonBookDs, { addr, oldest := personByAgeDesc[1].forename + ' ' + personByAgeDesc[1].surname });
+output(sq.HousePersonBookDs, { addr, firstPerson := persons[1].forename + ' ' + persons[1].surname });
+
+//More: Need to think about walking 3rd level children e.g., in ave, and [1]:
+output(sq.HousePersonBookDs, { addr, numBooks := sum(persons, count(books)), maxRating := max(persons, max(books, rating100))});
+output(sq.HousePersonBookDs, { addr, firstBook := persons[1].books[1].name + ': ' + persons[1].books[1].author });
+#if (true)
+output(sq.HousePersonBookDs, { addr, numBooks := count(persons.books), ave(persons.books, rating100), max(persons.books, rating100)});
+output(sq.HousePersonBookDs, { addr, ave(persons.books(persons.booklimit > 0), rating100)});
+output(sq.HousePersonBookDs, { addr, bestBook := booksByRatingDesc[1].name + ': ' + booksByRatingDesc[1].author});
+output(sq.HousePersonBookDs, { addr, firstBook := persons.books[1].name + ': ' + persons.books[1].author });     //NB: Different from above.
+#end
+
+//Now do the same, but unsure the children and main activity are not inline or compound
+// Different child operators, out of line inline.
+output('**** The following results should be identical - calculated using a subquery ****');
+
+output(sq.HousePersonBookDs, { addr, numPeople := count(xpersons), aveAge := ave(xpersons, age(dob)), maxDob := max(xpersons, dob)});
+output(sq.HousePersonBookDs, { addr, oldest := xpersonByAgeDesc[1].forename + ' ' + xpersonByAgeDesc[1].surname });
+output(sq.HousePersonBookDs, { addr, firstPerson := xpersons[1].forename + ' ' + xpersons[1].surname });
+
+// Grand children out of line, children are inline
+
+output(sq.HousePersonBookDs, { addr, numBooks := sum(persons, count(xbooks)), maxRating := max(persons, max(xbooks, rating100))});
+output(sq.HousePersonBookDs, { addr, firstBook := evaluate(persons[1], xbooks[1].name) + ': ' + evaluate(persons[1], xbooks[1].author) });
+#if (true)
+output(sq.HousePersonBookDs, { addr, numBooks := count(xbooks), ave(xbooks, rating100), max(xbooks, rating100)});
+output(sq.HousePersonBookDs, { addr, bestBook := xbooksByRatingDesc[1].name + ': ' + xbooksByRatingDesc[1].author});
+output(sq.HousePersonBookDs, { addr, firstBook := xbooks[1].name + ': ' + xbooks[1].author });       //NB: Different from above.
+#end
+
+// Grand children out of line, children also out of line
+
+output('**** The following results should be similar persons are reordered ****');
+output(sq.HousePersonBookDs, { addr, numBooks := sum(xpersons, count(xxbooks)), max(xpersons, max(xxbooks, rating100))});
+output(sq.HousePersonBookDs, { addr, firstBook := evaluate(xpersons[1], xxbooks[1].name) + ': ' + evaluate(xpersons[1], xxbooks[1].author) });
+#if (true)
+output(sq.HousePersonBookDs, { addr, numBooks := count(xxbooks), ave(xxbooks, rating100), max(xxbooks, rating100)});
+output(sq.HousePersonBookDs, { addr, bestBook := xxbooksByRatingDesc[1].name + ': ' + xxbooksByRatingDesc[1].author});
+output(sq.HousePersonBookDs, { addr, firstBook := xxbooks[1].name + ': ' + xxbooks[1].author });     //NB: Different from above.
+#end
+
+//--------- Now perform the aggregate operations with person as outer iteration ----------
+// note: sq.HousePersonDs fields are still accessible!
+output(sq.HousePersonBookDs.persons, { surname, numBooks := count(books), ave(books, rating100), max(books, rating100)});
+output(sq.HousePersonBookDs.persons, { surname, bestBook := booksByRatingDesc[1].name + ': ' + booksByRatingDesc[1].author});
+output(sq.HousePersonBookDs.persons, { surname, firstBook := books[1].name + ': ' + books[1].author });
+
+output(xpersons, { surname, numBooks := count(xxbooks), ave(xxbooks, rating100), max(xxbooks, rating100)});
+output(xpersons, { surname, bestBook := xxbooksByRatingDesc[1].name + ': ' + xxbooksByRatingDesc[1].author});
+output(xpersons, { surname, firstBook := xxbooks[1].name + ': ' + xxbooks[1].author });
+
+// note: sq.HousePersonDs fields are still accessible!
+
+#if (false)
+output(sq.HousePersonBookDs.persons, { sq.HousePersonBookDs.addr, surname, numBooks := count(books), ave(books, rating100), max(books, rating100)});
+output(sq.HousePersonBookDs.persons, { sq.HousePersonBookDs.addr, surname, bestBook := booksByRatingDesc[1].name + ': ' + booksByRatingDesc[1].author});
+output(sq.HousePersonBookDs.persons, { sq.HousePersonBookDs.addr, surname, firstBook := books[1].name + ': ' + books[1].author });
+#end
+
+#if (missingLevelOk)
+output(xpersons, { sq.HousePersonBookDs.addr, surname, numBooks := count(xxbooks), ave(xxbooks, rating100), max(xxbooks, rating100)});
+output(xpersons, { sq.HousePersonBookDs.addr, surname, bestBook := xxbooksByRatingDesc[1].name + ': ' + xxbooksByRatingDesc[1].author});
+output(xpersons, { sq.HousePersonBookDs.addr, surname, firstBook := xxbooks[1].name + ': ' + xxbooks[1].author });
+#end
+);
+
+END;

+ 2 - 1
testing/regress/ecl/diskAggregate.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 // Test a case that needs serialize due to child dataset...
 table(sq.SimplePersonBookds, { dataset books := sq.SimplePersonBookDs.books, sq.SimplePersonBookDs.surname, count(group) });

+ 2 - 1
testing/regress/ecl/diskGroupAggregate.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 // Test a case that needs serialize due to child dataset...
 sort(table(sq.SimplePersonBookds, { dataset books := sq.SimplePersonBookDs.books, sq.SimplePersonBookDs.surname, sq.SimplePersonBookDs.forename, count(group) }, sq.SimplePersonBookDs.surname, few), surname, forename);

+ 2 - 1
testing/regress/ecl/fetch2.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 IMPORT Std;
 

+ 2 - 1
testing/regress/ecl/indexAggregate.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 // Test a case that needs serialize due to child dataset...
 table(sq.SimplePersonBookIndex, { dataset books := sq.SimplePersonBookIndex.books, sq.SimplePersonBookIndex.surname, count(group) });

+ 2 - 1
testing/regress/ecl/indexGroupAggregate.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 // Test a case that needs serialize due to child dataset...
 sort(table(sq.SimplePersonBookIndex, { dataset books := sq.SimplePersonBookIndex.books, sq.SimplePersonBookIndex.surname, sq.SimplePersonBookIndex.forename, count(group) }, sq.SimplePersonBookIndex.surname, few), surname, forename);

+ 2 - 1
testing/regress/ecl/indexread5.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 somePeople := sq.PersonBookDs(id % 2 = 1);
 

testing/regress/ecl/key/aggds1.xml → testing/regress/ecl/key/aggds1_hthor.xml


+ 24 - 0
testing/regress/ecl/key/aggds1_roxie.xml

@@ -0,0 +1,24 @@
+<Dataset name='Result 1'>
+ <Row><_unnamed_sum_aage1>600</_unnamed_sum_aage1><_unnamed_exists_2>true</_unnamed_exists_2><_unnamed_exists_3>true</_unnamed_exists_3><_unnamed_exists_4>false</_unnamed_exists_4><_unnamed_cnt_5>9</_unnamed_cnt_5></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><_unnamed_max_aage1>99</_unnamed_max_aage1></Row>
+</Dataset>
+<Dataset name='Result 3'>
+ <Row><_unnamed_cnt_1>1</_unnamed_cnt_1></Row>
+</Dataset>
+<Dataset name='Result 4'>
+ <Row><Result_4>10</Result_4></Row>
+</Dataset>
+<Dataset name='Result 5'>
+ <Row><_unnamed_cnt_1>1</_unnamed_cnt_1></Row>
+</Dataset>
+<Dataset name='Result 6'>
+ <Row><Result_6>true</Result_6></Row>
+</Dataset>
+<Dataset name='Result 7'>
+ <Row><Result_7>true</Result_7></Row>
+</Dataset>
+<Dataset name='Result 8'>
+ <Row><Result_8>false</Result_8></Row>
+</Dataset>

+ 24 - 0
testing/regress/ecl/key/aggds1_thor.xml

@@ -0,0 +1,24 @@
+<Dataset name='Result 1'>
+ <Row><_unnamed_sum_aage1>600</_unnamed_sum_aage1><_unnamed_exists_2>true</_unnamed_exists_2><_unnamed_exists_3>true</_unnamed_exists_3><_unnamed_exists_4>false</_unnamed_exists_4><_unnamed_cnt_5>9</_unnamed_cnt_5></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><_unnamed_max_aage1>99</_unnamed_max_aage1></Row>
+</Dataset>
+<Dataset name='Result 3'>
+ <Row><_unnamed_cnt_1>1</_unnamed_cnt_1></Row>
+</Dataset>
+<Dataset name='Result 4'>
+ <Row><Result_4>10</Result_4></Row>
+</Dataset>
+<Dataset name='Result 5'>
+ <Row><_unnamed_cnt_1>1</_unnamed_cnt_1></Row>
+</Dataset>
+<Dataset name='Result 6'>
+ <Row><Result_6>true</Result_6></Row>
+</Dataset>
+<Dataset name='Result 7'>
+ <Row><Result_7>true</Result_7></Row>
+</Dataset>
+<Dataset name='Result 8'>
+ <Row><Result_8>false</Result_8></Row>
+</Dataset>

+ 6 - 0
testing/regress/ecl/key/aggds2_thor.xml

@@ -0,0 +1,6 @@
+<Dataset name='Result 1'>
+ <Row><_unnamed_max_fullname1>Windsor, Philip</_unnamed_max_fullname1></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><_unnamed_avg_aage1>66.11111111111111</_unnamed_avg_aage1></Row>
+</Dataset>

Diferenças do arquivo suprimidas por serem muito extensas
+ 49 - 0
testing/regress/ecl/key/aggds3_thor.xml


+ 17 - 0
testing/regress/ecl/key/aggds4_thor.xml

@@ -0,0 +1,17 @@
+<Dataset name='Result 1'>
+ <Row><aage>34</aage><_unnamed_max_fullname2>Halliday, Liz</_unnamed_max_fullname2></Row>
+ <Row><aage>35</aage><_unnamed_max_fullname2>Halliday, Gavin</_unnamed_max_fullname2></Row>
+ <Row><aage>49</aage><_unnamed_max_fullname2>Jones, Julia</_unnamed_max_fullname2></Row>
+ <Row><aage>50</aage><_unnamed_max_fullname2>Jones, Brian</_unnamed_max_fullname2></Row>
+ <Row><aage>68</aage><_unnamed_max_fullname2>Grabdale, John</_unnamed_max_fullname2></Row>
+ <Row><aage>78</aage><_unnamed_max_fullname2>Windsor, Elizabeth</_unnamed_max_fullname2></Row>
+ <Row><aage>83</aage><_unnamed_max_fullname2>Windsor, Philip</_unnamed_max_fullname2></Row>
+ <Row><aage>99</aage><_unnamed_max_fullname2>Flintstone, Wilma</_unnamed_max_fullname2></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><surname>Flintstone          </surname><_unnamed_avg_aage2>99.0</_unnamed_avg_aage2></Row>
+ <Row><surname>Grabdale            </surname><_unnamed_avg_aage2>68.0</_unnamed_avg_aage2></Row>
+ <Row><surname>Halliday            </surname><_unnamed_avg_aage2>34.5</_unnamed_avg_aage2></Row>
+ <Row><surname>Jones               </surname><_unnamed_avg_aage2>49.5</_unnamed_avg_aage2></Row>
+ <Row><surname>Windsor             </surname><_unnamed_avg_aage2>80.5</_unnamed_avg_aage2></Row>
+</Dataset>

+ 28 - 0
testing/regress/ecl/key/aggidx1_thor.xml

@@ -0,0 +1,28 @@
+<Dataset name='Result 1'>
+ <Row><_unnamed_sum_aage1>600</_unnamed_sum_aage1><_unnamed_exists_2>true</_unnamed_exists_2><_unnamed_exists_3>true</_unnamed_exists_3><_unnamed_exists_4>false</_unnamed_exists_4><_unnamed_cnt_5>9</_unnamed_cnt_5></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><_unnamed_max_aage1>99</_unnamed_max_aage1></Row>
+</Dataset>
+<Dataset name='Result 3'>
+ <Row><_unnamed_cnt_1>1</_unnamed_cnt_1></Row>
+</Dataset>
+<Dataset name='Result 4'>
+ <Row><Result_4>10</Result_4></Row>
+</Dataset>
+<Dataset name='Result 5'>
+ <Row><_unnamed_cnt_1>1</_unnamed_cnt_1></Row>
+</Dataset>
+<Dataset name='Result 6'>
+ <Row><_unnamed_1>10</_unnamed_1></Row>
+ <Row><_unnamed_1>0</_unnamed_1></Row>
+</Dataset>
+<Dataset name='Result 7'>
+ <Row><Result_7>true</Result_7></Row>
+</Dataset>
+<Dataset name='Result 8'>
+ <Row><Result_8>true</Result_8></Row>
+</Dataset>
+<Dataset name='Result 9'>
+ <Row><Result_9>false</Result_9></Row>
+</Dataset>

+ 6 - 0
testing/regress/ecl/key/aggidx2_thor.xml

@@ -0,0 +1,6 @@
+<Dataset name='Result 1'>
+ <Row><_unnamed_max_fullname1>Windsor, Philip</_unnamed_max_fullname1></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><_unnamed_avg_aage1>66.11111111111111</_unnamed_avg_aage1></Row>
+</Dataset>

+ 46 - 0
testing/regress/ecl/key/aggidx3_thor.xml

@@ -0,0 +1,46 @@
+<Dataset name='Result 1'>
+ <Row><surname>Flintstone</surname><_unnamed_sum_aage2>198</_unnamed_sum_aage2></Row>
+ <Row><surname>Grabdale</surname><_unnamed_sum_aage2>68</_unnamed_sum_aage2></Row>
+ <Row><surname>Halliday</surname><_unnamed_sum_aage2>74</_unnamed_sum_aage2></Row>
+ <Row><surname>Jones</surname><_unnamed_sum_aage2>99</_unnamed_sum_aage2></Row>
+ <Row><surname>Windsor</surname><_unnamed_sum_aage2>161</_unnamed_sum_aage2></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><_unnamed_max_aage1>99</_unnamed_max_aage1><surname>Flintstone          </surname></Row>
+ <Row><_unnamed_max_aage1>68</_unnamed_max_aage1><surname>Grabdale            </surname></Row>
+ <Row><_unnamed_max_aage1>50</_unnamed_max_aage1><surname>Jones               </surname></Row>
+ <Row><_unnamed_max_aage1>83</_unnamed_max_aage1><surname>Windsor             </surname></Row>
+</Dataset>
+<Dataset name='Result 3'>
+ <Row><_unnamed_cnt_1>1</_unnamed_cnt_1><surname>Halliday            </surname></Row>
+</Dataset>
+<Dataset name='Result 4'>
+ <Row><cntbooks>0</cntbooks><sumage>99</sumage></Row>
+ <Row><cntbooks>1</cntbooks><sumage>99</sumage></Row>
+ <Row><cntbooks>2</cntbooks><sumage>118</sumage></Row>
+ <Row><cntbooks>3</cntbooks><sumage>34</sumage></Row>
+ <Row><cntbooks>4</cntbooks><sumage>137</sumage></Row>
+ <Row><cntbooks>5</cntbooks><sumage>113</sumage></Row>
+</Dataset>
+<Dataset name='Result 5'>
+ <Row><secondbookname>                    </secondbookname><sumage>198</sumage></Row>
+ <Row><secondbookname>Compilers           </secondbookname><sumage>35</sumage></Row>
+ <Row><secondbookname>Fox in Socks        </secondbookname><sumage>50</sumage></Row>
+ <Row><secondbookname>Girl guide annual   </secondbookname><sumage>78</sumage></Row>
+ <Row><secondbookname>Pride and prejudice,</secondbookname><sumage>68</sumage></Row>
+ <Row><secondbookname>Social etiquette    </secondbookname><sumage>83</sumage></Row>
+ <Row><secondbookname>The Life of Pi      </secondbookname><sumage>34</sumage></Row>
+ <Row><secondbookname>The cat in the hat  </secondbookname><sumage>49</sumage></Row>
+ <Row><secondbookname>The story of Jonah  </secondbookname><sumage>5</sumage></Row>
+</Dataset>
+<Dataset name='Result 6'>
+ <Row><pos>0</pos><sumage>600</sumage></Row>
+</Dataset>
+<Dataset name='Result 7'>
+ <Row><numbooks>0</numbooks><sumage>99</sumage></Row>
+ <Row><numbooks>1</numbooks><sumage>99</sumage></Row>
+ <Row><numbooks>2</numbooks><sumage>118</sumage></Row>
+ <Row><numbooks>3</numbooks><sumage>34</sumage></Row>
+ <Row><numbooks>4</numbooks><sumage>137</sumage></Row>
+ <Row><numbooks>5</numbooks><sumage>113</sumage></Row>
+</Dataset>

+ 17 - 0
testing/regress/ecl/key/aggidx4_thor.xml

@@ -0,0 +1,17 @@
+<Dataset name='Result 1'>
+ <Row><aage>34</aage><_unnamed_max_fullname2>Halliday, Liz</_unnamed_max_fullname2></Row>
+ <Row><aage>35</aage><_unnamed_max_fullname2>Halliday, Gavin</_unnamed_max_fullname2></Row>
+ <Row><aage>49</aage><_unnamed_max_fullname2>Jones, Julia</_unnamed_max_fullname2></Row>
+ <Row><aage>50</aage><_unnamed_max_fullname2>Jones, Brian</_unnamed_max_fullname2></Row>
+ <Row><aage>68</aage><_unnamed_max_fullname2>Grabdale, John</_unnamed_max_fullname2></Row>
+ <Row><aage>78</aage><_unnamed_max_fullname2>Windsor, Elizabeth</_unnamed_max_fullname2></Row>
+ <Row><aage>83</aage><_unnamed_max_fullname2>Windsor, Philip</_unnamed_max_fullname2></Row>
+ <Row><aage>99</aage><_unnamed_max_fullname2>Flintstone, Wilma</_unnamed_max_fullname2></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><surname>Flintstone          </surname><_unnamed_avg_aage2>99.0</_unnamed_avg_aage2></Row>
+ <Row><surname>Grabdale            </surname><_unnamed_avg_aage2>68.0</_unnamed_avg_aage2></Row>
+ <Row><surname>Halliday            </surname><_unnamed_avg_aage2>34.5</_unnamed_avg_aage2></Row>
+ <Row><surname>Jones               </surname><_unnamed_avg_aage2>49.5</_unnamed_avg_aage2></Row>
+ <Row><surname>Windsor             </surname><_unnamed_avg_aage2>80.5</_unnamed_avg_aage2></Row>
+</Dataset>

+ 6 - 0
testing/regress/ecl/key/setuptext.xml

@@ -0,0 +1,6 @@
+<Dataset name='Result 1'>
+</Dataset>
+<Dataset name='Result 2'>
+</Dataset>
+<Dataset name='Result 3'>
+</Dataset>

+ 232 - 0
testing/regress/ecl/key/sqagg_thor.xml

@@ -0,0 +1,232 @@
+<Dataset name='Result 1'>
+ <Row><addr>99 Maltings Road</addr><numpeople>3</numpeople><aveage>23.66666666666667</aveage><maxdob>20000101</maxdob></Row>
+ <Row><addr>Buckingham Palace</addr><numpeople>2</numpeople><aveage>80.0</aveage><maxdob>19260421</maxdob></Row>
+ <Row><addr>Bedrock</addr><numpeople>2</numpeople><aveage>2003.0</aveage><maxdob>20202</maxdob></Row>
+ <Row><addr>Wimpole Hall</addr><numpeople>1</numpeople><aveage>67.0</aveage><maxdob>19361008</maxdob></Row>
+ <Row><addr>56 New Road</addr><numpeople>2</numpeople><aveage>49.5</aveage><maxdob>19550312</maxdob></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><addr>99 Maltings Road</addr><oldest>Gavin Halliday</oldest></Row>
+ <Row><addr>Buckingham Palace</addr><oldest>Philip Windsor</oldest></Row>
+ <Row><addr>Bedrock</addr><oldest>Fred Flintstone</oldest></Row>
+ <Row><addr>Wimpole Hall</addr><oldest>John Grabdale</oldest></Row>
+ <Row><addr>56 New Road</addr><oldest>Brian Jones</oldest></Row>
+</Dataset>
+<Dataset name='Result 3'>
+ <Row><addr>99 Maltings Road</addr><firstperson>Gavin Halliday</firstperson></Row>
+ <Row><addr>Buckingham Palace</addr><firstperson>Elizabeth Windsor</firstperson></Row>
+ <Row><addr>Bedrock</addr><firstperson>Fred Flintstone</firstperson></Row>
+ <Row><addr>Wimpole Hall</addr><firstperson>John Grabdale</firstperson></Row>
+ <Row><addr>56 New Road</addr><firstperson>Brian Jones</firstperson></Row>
+</Dataset>
+<Dataset name='Result 4'>
+ <Row><addr>99 Maltings Road</addr><numbooks>12</numbooks><maxrating>98</maxrating></Row>
+ <Row><addr>Buckingham Palace</addr><numbooks>9</numbooks><maxrating>93</maxrating></Row>
+ <Row><addr>Bedrock</addr><numbooks>1</numbooks><maxrating>55</maxrating></Row>
+ <Row><addr>Wimpole Hall</addr><numbooks>2</numbooks><maxrating>95</maxrating></Row>
+ <Row><addr>56 New Road</addr><numbooks>6</numbooks><maxrating>99</maxrating></Row>
+</Dataset>
+<Dataset name='Result 5'>
+ <Row><addr>99 Maltings Road</addr><firstbook>To kill a mocking bird: Harper Lee</firstbook></Row>
+ <Row><addr>Buckingham Palace</addr><firstbook>The bible: Various</firstbook></Row>
+ <Row><addr>Bedrock</addr><firstbook>: </firstbook></Row>
+ <Row><addr>Wimpole Hall</addr><firstbook>Pride and prejudice, 1st edition: Jane Austen</firstbook></Row>
+ <Row><addr>56 New Road</addr><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+</Dataset>
+<Dataset name='Result 6'>
+ <Row><addr>99 Maltings Road</addr><numbooks>12</numbooks><_unnamed_3>73.25</_unnamed_3><_unnamed_4>98</_unnamed_4></Row>
+ <Row><addr>Buckingham Palace</addr><numbooks>9</numbooks><_unnamed_3>52.0</_unnamed_3><_unnamed_4>93</_unnamed_4></Row>
+ <Row><addr>Bedrock</addr><numbooks>1</numbooks><_unnamed_3>55.0</_unnamed_3><_unnamed_4>55</_unnamed_4></Row>
+ <Row><addr>Wimpole Hall</addr><numbooks>2</numbooks><_unnamed_3>87.5</_unnamed_3><_unnamed_4>95</_unnamed_4></Row>
+ <Row><addr>56 New Road</addr><numbooks>6</numbooks><_unnamed_3>72.33333333333333</_unnamed_3><_unnamed_4>99</_unnamed_4></Row>
+</Dataset>
+<Dataset name='Result 7'>
+ <Row><addr>99 Maltings Road</addr><_unnamed_2>71.55555555555556</_unnamed_2></Row>
+ <Row><addr>Buckingham Palace</addr><_unnamed_2>0.0</_unnamed_2></Row>
+ <Row><addr>Bedrock</addr><_unnamed_2>0.0</_unnamed_2></Row>
+ <Row><addr>Wimpole Hall</addr><_unnamed_2>0.0</_unnamed_2></Row>
+ <Row><addr>56 New Road</addr><_unnamed_2>0.0</_unnamed_2></Row>
+</Dataset>
+<Dataset name='Result 8'>
+ <Row><addr>99 Maltings Road</addr><bestbook>The bible: Various</bestbook></Row>
+ <Row><addr>Buckingham Palace</addr><bestbook>The bible: Various</bestbook></Row>
+ <Row><addr>Bedrock</addr><bestbook>Dinosaur stews: Trog</bestbook></Row>
+ <Row><addr>Wimpole Hall</addr><bestbook>Pride and prejudice, 1st edition: Jane Austen</bestbook></Row>
+ <Row><addr>56 New Road</addr><bestbook>Fox in Socks: Dr. Seuss</bestbook></Row>
+</Dataset>
+<Dataset name='Result 9'>
+ <Row><addr>99 Maltings Road</addr><firstbook>To kill a mocking bird: Harper Lee</firstbook></Row>
+ <Row><addr>Buckingham Palace</addr><firstbook>The bible: Various</firstbook></Row>
+ <Row><addr>Bedrock</addr><firstbook>Dinosaur stews: Trog</firstbook></Row>
+ <Row><addr>Wimpole Hall</addr><firstbook>Pride and prejudice, 1st edition: Jane Austen</firstbook></Row>
+ <Row><addr>56 New Road</addr><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+</Dataset>
+<Dataset name='Result 10'>
+ <Row><Result_10>**** The following results should be identical - calculated using a subquery ****</Result_10></Row>
+</Dataset>
+<Dataset name='Result 11'>
+ <Row><addr>99 Maltings Road</addr><numpeople>3</numpeople><aveage>23.66666666666667</aveage><maxdob>20000101</maxdob></Row>
+ <Row><addr>Buckingham Palace</addr><numpeople>2</numpeople><aveage>80.0</aveage><maxdob>19260421</maxdob></Row>
+ <Row><addr>Bedrock</addr><numpeople>2</numpeople><aveage>2003.0</aveage><maxdob>20202</maxdob></Row>
+ <Row><addr>Wimpole Hall</addr><numpeople>1</numpeople><aveage>67.0</aveage><maxdob>19361008</maxdob></Row>
+ <Row><addr>56 New Road</addr><numpeople>2</numpeople><aveage>49.5</aveage><maxdob>19550312</maxdob></Row>
+</Dataset>
+<Dataset name='Result 12'>
+ <Row><addr>99 Maltings Road</addr><oldest>Gavin Halliday</oldest></Row>
+ <Row><addr>Buckingham Palace</addr><oldest>Philip Windsor</oldest></Row>
+ <Row><addr>Bedrock</addr><oldest>Fred Flintstone</oldest></Row>
+ <Row><addr>Wimpole Hall</addr><oldest>John Grabdale</oldest></Row>
+ <Row><addr>56 New Road</addr><oldest>Brian Jones</oldest></Row>
+</Dataset>
+<Dataset name='Result 13'>
+ <Row><addr>99 Maltings Road</addr><firstperson>Gavin Halliday</firstperson></Row>
+ <Row><addr>Buckingham Palace</addr><firstperson>Philip Windsor</firstperson></Row>
+ <Row><addr>Bedrock</addr><firstperson>Fred Flintstone</firstperson></Row>
+ <Row><addr>Wimpole Hall</addr><firstperson>John Grabdale</firstperson></Row>
+ <Row><addr>56 New Road</addr><firstperson>Brian Jones</firstperson></Row>
+</Dataset>
+<Dataset name='Result 14'>
+ <Row><addr>99 Maltings Road</addr><numbooks>12</numbooks><maxrating>98</maxrating></Row>
+ <Row><addr>Buckingham Palace</addr><numbooks>9</numbooks><maxrating>93</maxrating></Row>
+ <Row><addr>Bedrock</addr><numbooks>1</numbooks><maxrating>55</maxrating></Row>
+ <Row><addr>Wimpole Hall</addr><numbooks>2</numbooks><maxrating>95</maxrating></Row>
+ <Row><addr>56 New Road</addr><numbooks>6</numbooks><maxrating>99</maxrating></Row>
+</Dataset>
+<Dataset name='Result 15'>
+ <Row><addr>99 Maltings Road</addr><firstbook>Clarion Language Manual: Richard Taylor</firstbook></Row>
+ <Row><addr>Buckingham Palace</addr><firstbook>Atlas of the world: Times</firstbook></Row>
+ <Row><addr>Bedrock</addr><firstbook>: </firstbook></Row>
+ <Row><addr>Wimpole Hall</addr><firstbook>Mein Kampf, 1st edition: Adolph Hitler</firstbook></Row>
+ <Row><addr>56 New Road</addr><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+</Dataset>
+<Dataset name='Result 16'>
+ <Row><addr>99 Maltings Road</addr><numbooks>12</numbooks><_unnamed_3>73.25</_unnamed_3><_unnamed_4>98</_unnamed_4></Row>
+ <Row><addr>Buckingham Palace</addr><numbooks>9</numbooks><_unnamed_3>52.0</_unnamed_3><_unnamed_4>93</_unnamed_4></Row>
+ <Row><addr>Bedrock</addr><numbooks>1</numbooks><_unnamed_3>55.0</_unnamed_3><_unnamed_4>55</_unnamed_4></Row>
+ <Row><addr>Wimpole Hall</addr><numbooks>2</numbooks><_unnamed_3>87.5</_unnamed_3><_unnamed_4>95</_unnamed_4></Row>
+ <Row><addr>56 New Road</addr><numbooks>6</numbooks><_unnamed_3>72.33333333333333</_unnamed_3><_unnamed_4>99</_unnamed_4></Row>
+</Dataset>
+<Dataset name='Result 17'>
+ <Row><addr>99 Maltings Road</addr><bestbook>The bible: Various</bestbook></Row>
+ <Row><addr>Buckingham Palace</addr><bestbook>The bible: Various</bestbook></Row>
+ <Row><addr>Bedrock</addr><bestbook>Dinosaur stews: Trog</bestbook></Row>
+ <Row><addr>Wimpole Hall</addr><bestbook>Pride and prejudice, 1st edition: Jane Austen</bestbook></Row>
+ <Row><addr>56 New Road</addr><bestbook>Fox in Socks: Dr. Seuss</bestbook></Row>
+</Dataset>
+<Dataset name='Result 18'>
+ <Row><addr>99 Maltings Road</addr><firstbook>BNF: Various</firstbook></Row>
+ <Row><addr>Buckingham Palace</addr><firstbook>Atlas of the world: Times</firstbook></Row>
+ <Row><addr>Bedrock</addr><firstbook>Dinosaur stews: Trog</firstbook></Row>
+ <Row><addr>Wimpole Hall</addr><firstbook>Mein Kampf, 1st edition: Adolph Hitler</firstbook></Row>
+ <Row><addr>56 New Road</addr><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+</Dataset>
+<Dataset name='Result 19'>
+ <Row><Result_19>**** The following results should be similar persons are reordered ****</Result_19></Row>
+</Dataset>
+<Dataset name='Result 20'>
+ <Row><addr>99 Maltings Road</addr><numbooks>12</numbooks><_unnamed_3>98</_unnamed_3></Row>
+ <Row><addr>Buckingham Palace</addr><numbooks>9</numbooks><_unnamed_3>93</_unnamed_3></Row>
+ <Row><addr>Bedrock</addr><numbooks>1</numbooks><_unnamed_3>55</_unnamed_3></Row>
+ <Row><addr>Wimpole Hall</addr><numbooks>2</numbooks><_unnamed_3>95</_unnamed_3></Row>
+ <Row><addr>56 New Road</addr><numbooks>6</numbooks><_unnamed_3>99</_unnamed_3></Row>
+</Dataset>
+<Dataset name='Result 21'>
+ <Row><addr>99 Maltings Road</addr><firstbook>Clarion Language Manual: Richard Taylor</firstbook></Row>
+ <Row><addr>Buckingham Palace</addr><firstbook>Greek tragedies: Various</firstbook></Row>
+ <Row><addr>Bedrock</addr><firstbook>: </firstbook></Row>
+ <Row><addr>Wimpole Hall</addr><firstbook>Mein Kampf, 1st edition: Adolph Hitler</firstbook></Row>
+ <Row><addr>56 New Road</addr><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+</Dataset>
+<Dataset name='Result 22'>
+ <Row><addr>99 Maltings Road</addr><numbooks>12</numbooks><_unnamed_3>73.25</_unnamed_3><_unnamed_4>98</_unnamed_4></Row>
+ <Row><addr>Buckingham Palace</addr><numbooks>9</numbooks><_unnamed_3>52.0</_unnamed_3><_unnamed_4>93</_unnamed_4></Row>
+ <Row><addr>Bedrock</addr><numbooks>1</numbooks><_unnamed_3>55.0</_unnamed_3><_unnamed_4>55</_unnamed_4></Row>
+ <Row><addr>Wimpole Hall</addr><numbooks>2</numbooks><_unnamed_3>87.5</_unnamed_3><_unnamed_4>95</_unnamed_4></Row>
+ <Row><addr>56 New Road</addr><numbooks>6</numbooks><_unnamed_3>72.33333333333333</_unnamed_3><_unnamed_4>99</_unnamed_4></Row>
+</Dataset>
+<Dataset name='Result 23'>
+ <Row><addr>99 Maltings Road</addr><bestbook>The bible: Various</bestbook></Row>
+ <Row><addr>Buckingham Palace</addr><bestbook>The bible: Various</bestbook></Row>
+ <Row><addr>Bedrock</addr><bestbook>Dinosaur stews: Trog</bestbook></Row>
+ <Row><addr>Wimpole Hall</addr><bestbook>Pride and prejudice, 1st edition: Jane Austen</bestbook></Row>
+ <Row><addr>56 New Road</addr><bestbook>Fox in Socks: Dr. Seuss</bestbook></Row>
+</Dataset>
+<Dataset name='Result 24'>
+ <Row><addr>99 Maltings Road</addr><firstbook>BNF: Various</firstbook></Row>
+ <Row><addr>Buckingham Palace</addr><firstbook>Atlas of the world: Times</firstbook></Row>
+ <Row><addr>Bedrock</addr><firstbook>Dinosaur stews: Trog</firstbook></Row>
+ <Row><addr>Wimpole Hall</addr><firstbook>Mein Kampf, 1st edition: Adolph Hitler</firstbook></Row>
+ <Row><addr>56 New Road</addr><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+</Dataset>
+<Dataset name='Result 25'>
+ <Row><surname>Halliday</surname><numbooks>5</numbooks><_unnamed_3>73.8</_unnamed_3><_unnamed_4>98</_unnamed_4></Row>
+ <Row><surname>Halliday</surname><numbooks>4</numbooks><_unnamed_3>68.75</_unnamed_3><_unnamed_4>90</_unnamed_4></Row>
+ <Row><surname>Halliday</surname><numbooks>3</numbooks><_unnamed_3>78.33333333333333</_unnamed_3><_unnamed_4>90</_unnamed_4></Row>
+ <Row><surname>Windsor</surname><numbooks>5</numbooks><_unnamed_3>56.6</_unnamed_3><_unnamed_4>93</_unnamed_4></Row>
+ <Row><surname>Windsor</surname><numbooks>4</numbooks><_unnamed_3>46.25</_unnamed_3><_unnamed_4>85</_unnamed_4></Row>
+ <Row><surname>Flintstone</surname><numbooks>0</numbooks><_unnamed_3>0.0</_unnamed_3><_unnamed_4>0</_unnamed_4></Row>
+ <Row><surname>Flintstone</surname><numbooks>1</numbooks><_unnamed_3>55.0</_unnamed_3><_unnamed_4>55</_unnamed_4></Row>
+ <Row><surname>Grabdale</surname><numbooks>2</numbooks><_unnamed_3>87.5</_unnamed_3><_unnamed_4>95</_unnamed_4></Row>
+ <Row><surname>Jones</surname><numbooks>2</numbooks><_unnamed_3>92.0</_unnamed_3><_unnamed_4>99</_unnamed_4></Row>
+ <Row><surname>Jones</surname><numbooks>4</numbooks><_unnamed_3>62.5</_unnamed_3><_unnamed_4>90</_unnamed_4></Row>
+</Dataset>
+<Dataset name='Result 26'>
+ <Row><surname>Halliday</surname><bestbook>The bible: Various</bestbook></Row>
+ <Row><surname>Halliday</surname><bestbook>The thinks you can think: Dr. Seuss</bestbook></Row>
+ <Row><surname>Halliday</surname><bestbook>The Life of Pi: Yan Martel</bestbook></Row>
+ <Row><surname>Windsor</surname><bestbook>The bible: Various</bestbook></Row>
+ <Row><surname>Windsor</surname><bestbook>The cat in the hat: Dr. Seuss</bestbook></Row>
+ <Row><surname>Flintstone</surname><bestbook>: </bestbook></Row>
+ <Row><surname>Flintstone</surname><bestbook>Dinosaur stews: Trog</bestbook></Row>
+ <Row><surname>Grabdale</surname><bestbook>Pride and prejudice, 1st edition: Jane Austen</bestbook></Row>
+ <Row><surname>Jones</surname><bestbook>Fox in Socks: Dr. Seuss</bestbook></Row>
+ <Row><surname>Jones</surname><bestbook>Zen and the art of motorcyle maintenance: Robert Pirsig</bestbook></Row>
+</Dataset>
+<Dataset name='Result 27'>
+ <Row><surname>Halliday</surname><firstbook>To kill a mocking bird: Harper Lee</firstbook></Row>
+ <Row><surname>Halliday</surname><firstbook>The thinks you can think: Dr. Seuss</firstbook></Row>
+ <Row><surname>Halliday</surname><firstbook>The Life of Pi: Yan Martel</firstbook></Row>
+ <Row><surname>Windsor</surname><firstbook>The bible: Various</firstbook></Row>
+ <Row><surname>Windsor</surname><firstbook>Greek tragedies: Various</firstbook></Row>
+ <Row><surname>Flintstone</surname><firstbook>: </firstbook></Row>
+ <Row><surname>Flintstone</surname><firstbook>Dinosaur stews: Trog</firstbook></Row>
+ <Row><surname>Grabdale</surname><firstbook>Pride and prejudice, 1st edition: Jane Austen</firstbook></Row>
+ <Row><surname>Jones</surname><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+ <Row><surname>Jones</surname><firstbook>Zen and the art of motorcyle maintenance: Robert Pirsig</firstbook></Row>
+</Dataset>
+<Dataset name='Result 28'>
+ <Row><surname>Flintstone</surname><numbooks>0</numbooks><_unnamed_3>0.0</_unnamed_3><_unnamed_4>0</_unnamed_4></Row>
+ <Row><surname>Flintstone</surname><numbooks>1</numbooks><_unnamed_3>55.0</_unnamed_3><_unnamed_4>55</_unnamed_4></Row>
+ <Row><surname>Grabdale</surname><numbooks>2</numbooks><_unnamed_3>87.5</_unnamed_3><_unnamed_4>95</_unnamed_4></Row>
+ <Row><surname>Halliday</surname><numbooks>5</numbooks><_unnamed_3>73.8</_unnamed_3><_unnamed_4>98</_unnamed_4></Row>
+ <Row><surname>Halliday</surname><numbooks>3</numbooks><_unnamed_3>78.33333333333333</_unnamed_3><_unnamed_4>90</_unnamed_4></Row>
+ <Row><surname>Halliday</surname><numbooks>4</numbooks><_unnamed_3>68.75</_unnamed_3><_unnamed_4>90</_unnamed_4></Row>
+ <Row><surname>Jones</surname><numbooks>2</numbooks><_unnamed_3>92.0</_unnamed_3><_unnamed_4>99</_unnamed_4></Row>
+ <Row><surname>Jones</surname><numbooks>4</numbooks><_unnamed_3>62.5</_unnamed_3><_unnamed_4>90</_unnamed_4></Row>
+ <Row><surname>Windsor</surname><numbooks>4</numbooks><_unnamed_3>46.25</_unnamed_3><_unnamed_4>85</_unnamed_4></Row>
+ <Row><surname>Windsor</surname><numbooks>5</numbooks><_unnamed_3>56.6</_unnamed_3><_unnamed_4>93</_unnamed_4></Row>
+</Dataset>
+<Dataset name='Result 29'>
+ <Row><surname>Flintstone</surname><bestbook>: </bestbook></Row>
+ <Row><surname>Flintstone</surname><bestbook>Dinosaur stews: Trog</bestbook></Row>
+ <Row><surname>Grabdale</surname><bestbook>Pride and prejudice, 1st edition: Jane Austen</bestbook></Row>
+ <Row><surname>Halliday</surname><bestbook>The bible: Various</bestbook></Row>
+ <Row><surname>Halliday</surname><bestbook>The Life of Pi: Yan Martel</bestbook></Row>
+ <Row><surname>Halliday</surname><bestbook>The thinks you can think: Dr. Seuss</bestbook></Row>
+ <Row><surname>Jones</surname><bestbook>Fox in Socks: Dr. Seuss</bestbook></Row>
+ <Row><surname>Jones</surname><bestbook>Zen and the art of motorcyle maintenance: Robert Pirsig</bestbook></Row>
+ <Row><surname>Windsor</surname><bestbook>The cat in the hat: Dr. Seuss</bestbook></Row>
+ <Row><surname>Windsor</surname><bestbook>The bible: Various</bestbook></Row>
+</Dataset>
+<Dataset name='Result 30'>
+ <Row><surname>Flintstone</surname><firstbook>: </firstbook></Row>
+ <Row><surname>Flintstone</surname><firstbook>Dinosaur stews: Trog</firstbook></Row>
+ <Row><surname>Grabdale</surname><firstbook>Mein Kampf, 1st edition: Adolph Hitler</firstbook></Row>
+ <Row><surname>Halliday</surname><firstbook>Clarion Language Manual: Richard Taylor</firstbook></Row>
+ <Row><surname>Halliday</surname><firstbook>BNF: Various</firstbook></Row>
+ <Row><surname>Halliday</surname><firstbook>David and Goliath: </firstbook></Row>
+ <Row><surname>Jones</surname><firstbook>All quiet on the western front: Erich Maria Remarque</firstbook></Row>
+ <Row><surname>Jones</surname><firstbook>The bible: Various</firstbook></Row>
+ <Row><surname>Windsor</surname><firstbook>Greek tragedies: Various</firstbook></Row>
+ <Row><surname>Windsor</surname><firstbook>Atlas of the world: Times</firstbook></Row>
+</Dataset>

testing/ecl/key/textsearch.xml → testing/regress/ecl/key/textsearch1.xml


Diferenças do arquivo suprimidas por serem muito extensas
+ 3 - 0
testing/ecl/key/textsearch_ao.xml


Diferenças do arquivo suprimidas por serem muito extensas
+ 3 - 0
testing/ecl/key/textsearch_no.xml


Diferenças do arquivo suprimidas por serem muito extensas
+ 271 - 0
testing/regress/ecl/key/textsearch2.xml


Diferenças do arquivo suprimidas por serem muito extensas
+ 271 - 0
testing/regress/ecl/key/textsearch2_thor.xml


Diferenças do arquivo suprimidas por serem muito extensas
+ 271 - 0
testing/regress/ecl/key/textsearch3.xml


Diferenças do arquivo suprimidas por serem muito extensas
+ 271 - 0
testing/regress/ecl/key/textsearch3_thor.xml


testing/ecl/key/textsearch2.xml → testing/regress/ecl/key/textsearch4.xml


+ 2 - 1
testing/regress/ecl/keyed_join3.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 somePeople := sq.PersonBookDs(id % 2 = 1);
 

+ 2 - 1
testing/regress/ecl/normalize3.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 o1 := normalize(sq.NamesTable1, left.books, transform(right));
 o2 := normalize(sq.NamesTable1, sort(left.books, -rating100, +author), transform(right));

+ 2 - 1
testing/regress/ecl/payload.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 //Sample query for pulling across some sample related payload indexes
 
 i1 := index({ string40 forename, string40 surname }, { unsigned4 id }, sq.PersonIndexName);

+ 8 - 47
testing/regress/ecl/setup/files.ecl

@@ -17,7 +17,7 @@
 //skip type==setup TBD
 
 //define constants
-EXPORT files(string p) := module
+EXPORT files(string platform) := module
 SHARED DG_GenFlat           := true;   //TRUE gens FlatFile
 SHARED DG_GenChild          := true;   //TRUE gens ChildFile
 SHARED DG_GenGrandChild     := true;   //TRUE gens GrandChildFile
@@ -36,14 +36,13 @@ EXPORT DG_MaxGrandChildren  := 3;    //maximum (1 to n) number of grandchild rec
 SHARED useDynamic := false;
 SHARED useLayoutTrans := false;
 SHARED useVarIndex := false;
-SHARED prefix := 'hthor';
 
 #if (useDynamic=true)
-SHARED  VarString EmptyString := '' : STORED('dummy');
-SHARED  filePrefix := prefix + EmptyString;
- #option ('allowVariableRoxieFilenames', 1)
+VarString EmptyString := '' : STORED('dummy');
+EXPORT  filePrefix := platform + EmptyString;
+#option ('allowVariableRoxieFilenames', 1);
 #else
-SHARED  filePrefix := prefix;
+EXPORT  filePrefix := platform;
 #end
 
 EXPORT DG_FileOut           := '~REGRESS::' + filePrefix + '::DG_';
@@ -227,47 +226,9 @@ END, THOR);
 
 
 //----------------------------- Text search definitions ----------------------------------
-EXPORT TS_MaxTerms             := 50;
-EXPORT TS_MaxStages            := 50;
-EXPORT TS_MaxProximity         := 10;
-EXPORT TS_MaxWildcard          := 1000;
-EXPORT TS_MaxMatchPerDocument  := 1000;
-EXPORT TS_MaxFilenameLength        := 255;
-EXPORT TS_MaxActions           := 255;
-EXPORT TS_MaxTagNesting        := 40;
-EXPORT TS_MaxColumnsPerLine := 10000;          // used to create a pseudo document position
-
-EXPORT TS_kindType         := enum(unsigned1, UnknownEntry=0, TextEntry, OpenTagEntry, CloseTagEntry, OpenCloseTagEntry, CloseOpenTagEntry);
-EXPORT TS_sourceType       := unsigned2;
-EXPORT TS_wordCountType    := unsigned8;
-EXPORT TS_segmentType      := unsigned1;
-EXPORT TS_wordPosType      := unsigned8;
-EXPORT TS_docPosType       := unsigned8;
-EXPORT TS_documentId       := unsigned8;
-EXPORT TS_termType         := unsigned1;
-EXPORT TS_distanceType     := integer8;
-EXPORT TS_indexWipType     := unsigned1;
-EXPORT TS_wipType          := unsigned8;
-EXPORT TS_stageType        := unsigned1;
-EXPORT TS_dateType         := unsigned8;
-
-EXPORT TS_sourceType TS_docid2source(TS_documentId x) := (x >> 48);
-EXPORT TS_documentId TS_docid2doc(TS_documentId x) := (x & 0xFFFFFFFFFFFF);
-EXPORT TS_documentId TS_createDocId(TS_sourceType source, TS_documentId doc) := (TS_documentId)(((unsigned8)source << 48) | doc);
-EXPORT boolean      TS_docMatchesSource(TS_documentId docid, TS_sourceType source) := (docid between TS_createDocId(source,0) and (TS_documentId)(TS_createDocId(source+1,0)-1));
-
-EXPORT TS_wordType := string20;
-EXPORT TS_wordFlags    := enum(unsigned1, HasLower=1, HasUpper=2);
-
-EXPORT TS_wordIdType       := unsigned4;
-
-EXPORT TS_NameWordIndex        := '~REGRESS::' + filePrefix + '::TS_wordIndex';
-EXPORT TS_NameSearchIndex      := '~REGRESS::' + filePrefix + '::TS_searchIndex';
-
-EXPORT TS_wordIndex        := index({ TS_kindType kind, TS_wordType word, TS_documentId doc, TS_segmentType segment, TS_wordPosType wpos, TS_indexWipType wip } , { TS_wordFlags flags, TS_wordType original, TS_docPosType dpos}, TS_NameWordIndex);
-EXPORT TS_searchIndex      := index(TS_wordIndex, TS_NameSearchIndex);
-
-EXPORT TS_wordIndexRecord := recordof(TS_wordIndex);
+
+EXPORT NameWordIndex(boolean useLocal) := '~REGRESS::' + filePrefix + '::wordIndex' + IF(useLocal, '_Local', '');
+EXPORT NameSearchIndex      := '~REGRESS::' + filePrefix + '::searchIndex';
 
 //----------------------------- End of text search definitions --------------------------
 

+ 11 - 0
testing/regress/ecl/setup/options.ecl

@@ -0,0 +1,11 @@
+//skip type==setup TBD
+
+EXPORT options := MODULE
+
+//MORE: This is currently hard-wired - it really needs to be in a configuration file instead
+EXPORT OriginalTextFilesIp := '192.168.0.200';
+
+//MORE: This is currently hard-wired - it really needs to be in a configuration file instead
+EXPORT OriginalTextFilesPath := 'home::gavin::dev::hpcc::testing::download';
+
+END;

+ 3 - 2
testing/regress/ecl/setup/setup.ecl

@@ -16,8 +16,9 @@
 ############################################################################## */
 
 import Std.File AS FileServices;
-import $; C := $.files('');
-import $.sq;
+import $;
+
+C := $.files(__PLATFORM__);
 
 C.DG_OutRec norm1(C.DG_OutRec l, integer cc) := transform
   self.DG_firstname := C.DG_Fnames[cc];

+ 2 - 1
testing/regress/ecl/setup/setup_fetch.ecl

@@ -16,7 +16,8 @@
 ############################################################################## */
 
 import Std.File AS FileServices;
-import $; C := $.files('');
+import $;
+C := $.files(__PLATFORM__);
 
 FetchData   := DATASET([
     {3000,   'FL', 'Boca Raton',  'London',   'Bridge'},

+ 3 - 4
testing/regress/ecl/setup/setupsq.ecl

@@ -16,7 +16,8 @@
 ############################################################################## */
 
 import Std.File AS FileServices;
-import $.sq;
+import $;
+sq := $.sq(__PLATFORM__);
 
 //******************************** Child query setup code ***********************
 
@@ -100,7 +101,7 @@ rawHouse := dataset([
             ]
         }]
     }
-    ], sq.HousePersonBookRec);
+    ], sq.HousePersonBookRec, DISTRIBUTED);
 
 
 //First reproject the datasets to
@@ -185,8 +186,6 @@ sq.BookRelatedIdRec extractBook(sq.BookIdRec l, unsigned4 personid) :=
                 SELF := l;
             END;
 
-
-
 //------------------- Add Sequence numbers by normalized/project/denormalize
 
 //normalize, adding parent ids as we do it.  Once all normalized and sequenced then combine them back together

+ 68 - 64
testing/ecl/setup/setuptext.ecl

@@ -15,16 +15,21 @@
     limitations under the License.
 ############################################################################## */
 
-//UseStandardFiles
-#option ('checkAsserts',false)
+#option ('checkAsserts',false);
 
 import Std.Str;
 import Std.File AS FileServices;
 
+import $;
+import $.Options;
+import $.TS;
+Files := $.Files(__PLATFORM__);
+
+
 rebuildSimpleIndex := true;
-rebuildSearchIndex := setupTextFileLocation <> '';
+rebuildSearchIndex := Options.OriginalTextFilesIp <> '' AND Options.OriginalTextFilesPath <> '';
 
-DirectoryPath := '~file::' + setupTextFileLocation + '::';          // path of the documents that are used to build the search index
+DirectoryPath := '~file::' + Options.OriginalTextFilesIp + '::' + Options.OriginalTextFilesPath + '::';          // path of the documents that are used to build the search index
 
 MaxDocumentLineLength := 50000;
 
@@ -36,16 +41,16 @@ unsigned8 filepos := 0;
     end;
 
 parseRecord := record
-TS_wordType     word;
-TS_kindType     kind;
-TS_wordType     original;
-TS_documentId   doc;
-TS_segmentType  segment;
-TS_wordFlags    flags;
-TS_wordPosType  wpos;
-TS_indexWipType wip;
-TS_docPosType   dpos;
-TS_docPosType   seq;
+TS.wordType     word;
+TS.kindType     kind;
+TS.wordType     original;
+TS.documentId   doc;
+TS.segmentType  segment;
+TS.wordFlags    flags;
+TS.wordPosType  wpos;
+TS.indexWipType wip;
+TS.docPosType   dpos;
+TS.docPosType   seq;
 boolean         beginSegment;           // could be set to {x:1} to separate title and chapters - would probably be useful.
 boolean         endOfSentence;
 boolean         endOfParagraph;
@@ -54,16 +59,16 @@ boolean         endOfParagraph;
 inputAliasRecord := record
 unsigned2   source;
 unsigned4   subdoc;
-TS_wordType word;
-TS_wordPosType wpos;
-TS_indexWipType wip;
+TS.wordType word;
+TS.wordPosType wpos;
+TS.indexWipType wip;
     end;
 
 spanTagSet := ['p','s'];
-wordKindSortOrder(TS_kindType kind, TS_wordPosType wip, TS_wordType tag) :=
-        MAP(kind = TS_kindType.OpenTagEntry and wip=0=>1,
-            kind = TS_kindType.CloseTagEntry=>2,
-            kind = TS_kindType.OpenTagEntry and wip<>0=>
+wordKindSortOrder(TS.kindType kind, TS.wordPosType wip, TS.wordType tag) :=
+        MAP(kind = TS.kindType.OpenTagEntry and wip=0=>1,
+            kind = TS.kindType.CloseTagEntry=>2,
+            kind = TS.kindType.OpenTagEntry and wip<>0=>
                 100+CASE(tag,'p'=>1,'s'=>2,3),                      // ensure sentences are contained within paragraphs
             1000);
 
@@ -100,17 +105,17 @@ convertDocumentStreamToTokens(dataset(inputDocumentRecord) inFile) := FUNCTION
 
     parseRecord createMatchRecord(inFile l) := transform
 
-            self.kind := MAP(MATCHED(patWord)=>TS_kindType.TextEntry,
-                             MATCHED(openTag)=>TS_kindType.OpenTagEntry,
-                             MATCHED(closeTag)=>TS_kindType.CloseTagEntry,
-                             MATCHED(openCloseTag)=>TS_kindType.OpenCloseTagEntry,
-                             MATCHED(sentenceTerminator)=>TS_kindType.CloseOpenTagEntry,
-                             TS_kindType.UnknownEntry);
+            self.kind := MAP(MATCHED(patWord)=>TS.kindType.TextEntry,
+                             MATCHED(openTag)=>TS.kindType.OpenTagEntry,
+                             MATCHED(closeTag)=>TS.kindType.CloseTagEntry,
+                             MATCHED(openCloseTag)=>TS.kindType.OpenCloseTagEntry,
+                             MATCHED(sentenceTerminator)=>TS.kindType.CloseOpenTagEntry,
+                             TS.kindType.UnknownEntry);
 
             self.original := MAP(MATCHED(sentenceTerminator)=>'s',
                                  MATCHED(patWord)=>matchtext(patWord),
                                  matchText(tag));
-            self.doc := TS_createDocId(l.source, l.subdoc);
+            self.doc := TS.createDocId(l.source, l.subdoc);
             self.dpos := l.filepos + matchposition(matchPattern);
             self.wpos := 0;
             self.wip := IF(MATCHED(anyTag), 0, 1);              // normal tags don't consume space - may want to change
@@ -121,11 +126,11 @@ convertDocumentStreamToTokens(dataset(inputDocumentRecord) inFile) := FUNCTION
     doProcess1 := parse(splitFile, text, S, createMatchRecord(left), first, scan);
 
     parseRecord createMatchPara(inFile l) := transform
-            self.kind := TS_kindType.CloseOpenTagEntry;
+            self.kind := TS.kindType.CloseOpenTagEntry;
             self.original := 'p';
             self.endOfParagraph := true;
             self.wip := 1;
-            self.doc := TS_createDocId(l.source, l.subdoc);
+            self.doc := TS.createDocId(l.source, l.subdoc);
             self.dpos := l.filepos+1;
             self := [];
         END;
@@ -140,25 +145,25 @@ END;
 
 processSentanceAndParagraphMarkers(dataset(parseRecord) extractedWords, set of string spanTagSet) := FUNCTION
 
-    spanTags := dataset(spanTagSet, { Ts_wordType tag });               // special tags which span all the text
+    spanTags := dataset(spanTagSet, { TS.wordType tag });               // special tags which span all the text
 
     //Now normalize sentance and paragraph markers into begin/end markers.  And add leading and trailing markers to each document.
-    withoutMarkers := extractedWords(kind <> TS_kindType.CloseOpenTagEntry);
-    markers := extractedWords(kind = TS_kindType.CloseOpenTagEntry);
+    withoutMarkers := extractedWords(kind <> TS.kindType.CloseOpenTagEntry);
+    markers := extractedWords(kind = TS.kindType.CloseOpenTagEntry);
 
-    parseRecord modifyMarker(parseRecord l, TS_kindType kind) := transform
+    parseRecord modifyMarker(parseRecord l, TS.kindType kind) := transform
         SELF.kind := kind;
-        SELF.wip := IF(kind = TS_kindType.OpenTagEntry, l.wip, 0);      // open may take up space, close dosen't take up space
+        SELF.wip := IF(kind = TS.kindType.OpenTagEntry, l.wip, 0);      // open may take up space, close dosen't take up space
         SELF := l;
     END;
-    markerOpen := project(markers, modifyMarker(left, TS_kindType.OpenTagEntry));
-    markerClose := project(markers, modifyMarker(left, TS_kindType.CloseTagEntry));
+    markerOpen := project(markers, modifyMarker(left, TS.kindType.OpenTagEntry));
+    markerClose := project(markers, modifyMarker(left, TS.kindType.CloseTagEntry));
 
     groupedByDoc := group(extractedWords, doc);
     singlePerDoc := table(groupedByDoc, { doc, maxDocPos := max(group, dpos); });
 
-    parseRecord createSpanTag(TS_documentId doc, TS_docPosType dpos, boolean isOpen, unsigned whichTag) := TRANSFORM
-        SELF.kind := IF(isOpen, TS_kindType.OpenTagEntry, TS_kindType.CloseTagEntry);
+    parseRecord createSpanTag(TS.documentId doc, TS.docPosType dpos, boolean isOpen, unsigned whichTag) := TRANSFORM
+        SELF.kind := IF(isOpen, TS.kindType.OpenTagEntry, TS.kindType.CloseTagEntry);
         SELF.original := spanTagSet[whichTag];
         SELF.doc := doc;
         SELF.dpos := dpos;
@@ -190,15 +195,15 @@ matchOpenCloseTags(dataset(parseRecord) _inFile) := FUNCTION
     //Now need to match up begin tags with end tags, work out the size of the tags, update the begin tags, and strip the end tags.
     //extract all close tags, group by doc, sort by name, sequence, then add a new sequence.
 
-    closeTags1 := inFile(kind = TS_kindType.CloseTagEntry);
+    closeTags1 := inFile(kind = TS.kindType.CloseTagEntry);
 
     //Now extract all the open tags, and do the same
-    openTags1 := inFile(kind = TS_kindType.OpenTagEntry);
+    openTags1 := inFile(kind = TS.kindType.OpenTagEntry);
 
     ////////Version2 - using grouped process to match up open with close ///////////////////
-    positionRecord := { TS_docPosType seq, TS_docPosType wpos };
+    positionRecord := { TS.docPosType seq, TS.docPosType wpos };
     matchTagRecord := RECORD
-        dataset(positionRecord) active{maxcount(TS_MaxTagNesting)}
+        dataset(positionRecord) active{maxcount(TS.MaxTagNesting)}
     END;
     nullMatchTag := row(transform(matchTagRecord, self := []));
 
@@ -209,18 +214,18 @@ matchOpenCloseTags(dataset(parseRecord) _inFile) := FUNCTION
             openWpos := r.active[1].wpos;
             self.wpos := openWpos;
             self.wip := l.wpos - openWpos;
-            self.seq := IF(l.kind = TS_kindType.OpenTagEntry, 0, r.active[1].seq);      // so we can remove them later
-            self.kind := TS_kindType.OpenTagEntry;
+            self.seq := IF(l.kind = TS.kindType.OpenTagEntry, 0, r.active[1].seq);      // so we can remove them later
+            self.kind := TS.kindType.OpenTagEntry;
             self := l;
         END;
         export matchTagRecord createRight := TRANSFORM
-            SELF.active := IF(l.kind = TS_kindType.OpenTagEntry,
+            SELF.active := IF(l.kind = TS.kindType.OpenTagEntry,
                                 row(transform(positionRecord, self.seq := l.seq; self.wpos := l.wpos)) & r.active,
                                 r.active[2..]);
         END;
     END;
 
-    openClose := inFile(kind IN [TS_kindType.OpenTagEntry,TS_kindType.CloseTagEntry]);
+    openClose := inFile(kind IN [TS.kindType.OpenTagEntry,TS.kindType.CloseTagEntry]);
     groupedOpenClose := group(openClose, doc, segment);
     sortedOpenClose := sort(groupedOpenClose, original, wpos, wordKindSortOrder(kind, wip, original));
 
@@ -235,7 +240,7 @@ matchOpenCloseTags(dataset(parseRecord) _inFile) := FUNCTION
     sortedFixedupOpenTags := SORT(fixedupOpentags, doc, segment, wpos, wip);
 
     //Now combine the non tags with the fixed up tags
-    fixedUp := merge(inFile(kind not in [TS_kindType.OpenTagEntry, TS_kindType.CloseTagEntry]), sortedFixedupOpenTags, sorted(doc, segment, wpos, wip));
+    fixedUp := merge(inFile(kind not in [TS.kindType.OpenTagEntry, TS.kindType.CloseTagEntry]), sortedFixedupOpenTags, sorted(doc, segment, wpos, wip));
     return fixedUp;
 END;
 
@@ -252,7 +257,7 @@ normalizeWordFormat(dataset(parseRecord) inFile) := FUNCTION
             hasUpper := REGEXFIND('[A-Z]', l.original);
             hasLower := REGEXFIND('[a-z]', l.original);
             self.word := Str.ToLowerCase(l.original);
-            self.flags := IF(hasUpper, TS_wordFlags.hasUpper, 0) + IF(hasLower, TS_wordFlags.hasLower, 0);
+            self.flags := IF(hasUpper, TS.wordFlags.hasUpper, 0) + IF(hasLower, TS.wordFlags.hasLower, 0);
             self := l;
         end;
     RETURN project(inFile, cleanWords(left));
@@ -260,9 +265,9 @@ END;
 
 createAliasesFromList(dataset(inputAliasRecord) inputAliases) := FUNCTION
     parseRecord createAlias(inputAliasRecord l) := transform
-            self.kind := TS_kindType.TextEntry;
+            self.kind := TS.kindType.TextEntry;
             self.original := l.word;
-            self.doc := TS_createDocId(l.source, l.subdoc);
+            self.doc := TS.createDocId(l.source, l.subdoc);
             self.dpos := 0;
             self.wpos := l.wpos;
             self.wip := l.wip;
@@ -422,20 +427,18 @@ wordsAndAliases := merge(orderedWords, orderedAliases, sorted(doc, segment, wpos
 
 normalizedInversion := normalizeWordFormat(wordsAndAliases);
 
-#if (useLocal=true)
-  processedWords := DISTRIBUTE(normalizedInversion, IF(doc > 6, 0, 1));
-#else
-  processedWords := normalizedInversion;
-#end
+boolean useLocal := false; // MORE: Should we create two different variants?
+
+processedWords(boolean useLocal) := IF(useLocal, DISTRIBUTE(normalizedInversion, IF(doc > 6, 0, 1)), normalizedInversion);
 
-doCreateSimpleIndex() := sequential(
-    BUILD(processedWords, { kind, word, doc, segment, wpos, wip }, { flags, original, dpos }, TS_NameWordIndex, overwrite,
+doCreateSimpleIndex(boolean useLocal) := sequential(
+    BUILD(processedWords(useLocal), { kind, word, doc, segment, wpos, wip }, { flags, original, dpos }, Files.NameWordIndex(useLocal), overwrite,
 #if (useLocal=true)
             NOROOT,
 #end
             compressed(row)),
     //Add a column mapping, testing done L->R, multiple transforms, and that parameters work
-    fileServices.setColumnMapping(TS_NameWordIndex, 'word{set(stringlib.StringToLowerCase,stringlib.StringFilterOut(\'AEIOU$?.:;,()\'))}')
+    fileServices.setColumnMapping(Files.NameWordIndex(useLocal), 'word{set(stringlib.StringToLowerCase,stringlib.StringFilterOut(\'AEIOU$?.:;,()\'))}')
 );
 
 
@@ -445,7 +448,7 @@ doCreateSimpleIndex() := sequential(
 
 boolean isNewDocumentFunction(string s) := false;
 
-convertTextFileToInversion(TS_sourceType sourceId, string filename, isNewDocumentFunction isNewDocument) := FUNCTION
+convertTextFileToInversion(TS.sourceType sourceId, string filename, isNewDocumentFunction isNewDocument) := FUNCTION
 
 inFile := dataset(filename, { string line{maxlength(MaxDocumentLineLength)}, unsigned8 filepos{virtual(fileposition)} }, csv(SEPARATOR(''),quote([]),maxlength(MaxDocumentLineLength+8+4)));
 
@@ -460,7 +463,7 @@ inFile := dataset(filename, { string line{maxlength(MaxDocumentLineLength)}, uns
     inputDocumentRecord allocateDocuments(inputDocumentRecord l, inputDocumentRecord r) := transform
         SELF.subdoc := IF(l.subdoc = 0, 1, l.subdoc + IF(isNewDocument(r.text), 1, 0));
         //This turns dpos into a (line, column) field instead of byte offset.  Remove for the latter.
-        SELF.filepos := l.filepos + TS_MaxColumnsPerLine;
+        SELF.filepos := l.filepos + TS.MaxColumnsPerLine;
         SELF := r;
     END;
     annotateWithDocument := ITERATE(annotateWithSource, allocateDocuments(LEFT, RIGHT));
@@ -497,14 +500,15 @@ shakespeareStream := normalizeWordFormat(convertTextFileToInversion(4, Directory
 //Build on bible and encyclopedia for the moment.
 //have different characteristics.  Bible has ~74 "documents", encyclopedia has
 doCreateSearchIndex() := sequential(
-    BUILD(bibleStream+encyclopediaStream, { kind, word, doc, segment, wpos, wip }, { flags, original, dpos }, TS_NameSearchIndex, overwrite,
+    BUILD(bibleStream+encyclopediaStream, { kind, word, doc, segment, wpos, wip }, { flags, original, dpos }, Files.NameSearchIndex, overwrite,
 #if (useLocal=true)
         NOROOT,
 #end
           compressed(row)),
-    fileServices.setColumnMapping(TS_NameSearchIndex, 'word{set(unicodelib.UnicodeToLowerCase)}')       // unicode just to be perverse
+    fileServices.setColumnMapping(Files.NameSearchIndex, 'word{set(unicodelib.UnicodeToLowerCase)}')       // unicode just to be perverse
 );
 
 
-IF (rebuildSimpleIndex, doCreateSimpleIndex());
+IF (rebuildSimpleIndex, doCreateSimpleIndex(FALSE));
+IF (rebuildSimpleIndex, doCreateSimpleIndex(TRUE));
 IF (rebuildSearchIndex, doCreateSearchIndex());

+ 2 - 1
testing/regress/ecl/setup/setupxml.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $; C := $.files('');
+import $;
+C := $.files(__PLATFORM__);
 
 set of string lines := [
     '<?xml version="1.0" encoding="UTF-8"?>',

+ 17 - 16
testing/regress/ecl/setup/sq.ecl

@@ -1,9 +1,10 @@
+//This is a module which is imported and used from other queries - don't execute it.
 //skip type==setup TBD
 
-EXPORT sq := MODULE
+EXPORT sq(string platform) := MODULE
 
 //MORE: This is currently hard-wired to hthor since it is a tiny dataset
-EXPORT NamePrefix := '~REGRESS::hthor::';
+EXPORT NamePrefix := '~REGRESS::' + platform + '::';
 
 EXPORT HouseRec :=
             record
@@ -121,20 +122,6 @@ dataset(BookIdRec)        books{blob};
             END;
 
 
-EXPORT HousePersonBookName := NamePrefix + 'HousePersonBook';
-EXPORT PersonBookName := NamePrefix + 'PersonBook';
-EXPORT HouseName := NamePrefix + 'House';
-EXPORT PersonName := NamePrefix + 'Person';
-EXPORT BookName := NamePrefix + 'Book';
-EXPORT SimplePersonBookName := NamePrefix + 'SimplePersonBook';
-
-EXPORT HousePersonBookIndexName := NamePrefix + 'HousePersonBookIndex';
-EXPORT PersonBookIndexName := NamePrefix + 'PersonBookIndex';
-EXPORT HouseIndexName := NamePrefix + 'HouseIndex';
-EXPORT PersonIndexName := NamePrefix + 'PersonIndex';
-EXPORT BookIndexName := NamePrefix + 'BookIndex';
-EXPORT SimplePersonBookIndexName := NamePrefix + 'SimplePersonBookIndex';
-
 EXPORT HousePersonBookIdExRec := record
 HousePersonBookIdRec;
 unsigned8           filepos{virtual(fileposition)};
@@ -168,6 +155,20 @@ unsigned8           filepos{virtual(fileposition)};
 // Dataset definitions:
 
 
+EXPORT HousePersonBookName := NamePrefix + 'HousePersonBook';
+EXPORT PersonBookName := NamePrefix + 'PersonBook';
+EXPORT HouseName := NamePrefix + 'House';
+EXPORT PersonName := NamePrefix + 'Person';
+EXPORT BookName := NamePrefix + 'Book';
+EXPORT SimplePersonBookName := NamePrefix + 'SimplePersonBook';
+
+EXPORT HousePersonBookIndexName := NamePrefix + 'HousePersonBookIndex';
+EXPORT PersonBookIndexName := NamePrefix + 'PersonBookIndex';
+EXPORT HouseIndexName := NamePrefix + 'HouseIndex';
+EXPORT PersonIndexName := NamePrefix + 'PersonIndex';
+EXPORT BookIndexName := NamePrefix + 'BookIndex';
+EXPORT SimplePersonBookIndexName := NamePrefix + 'SimplePersonBookIndex';
+
 EXPORT HousePersonBookDs := dataset(HousePersonBookName, HousePersonBookIdExRec, thor);
 EXPORT PersonBookDs := dataset(PersonBookName, PersonBookRelatedIdRec, thor);
 EXPORT HouseDs := dataset(HouseName, HouseIdExRec, thor);

+ 61 - 0
testing/regress/ecl/setup/ts.ecl

@@ -0,0 +1,61 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+//skip type==setup TBD
+
+//define constants
+EXPORT TS := module
+
+EXPORT MaxTerms             := 50;
+EXPORT MaxStages            := 50;
+EXPORT MaxProximity         := 10;
+EXPORT MaxWildcard          := 1000;
+EXPORT MaxMatchPerDocument  := 1000;
+EXPORT MaxFilenameLength        := 255;
+EXPORT MaxActions           := 255;
+EXPORT MaxTagNesting        := 40;
+EXPORT MaxColumnsPerLine := 10000;          // used to create a pseudo document position
+
+EXPORT kindType         := enum(unsigned1, UnknownEntry=0, TextEntry, OpenTagEntry, CloseTagEntry, OpenCloseTagEntry, CloseOpenTagEntry);
+EXPORT sourceType       := unsigned2;
+EXPORT wordCountType    := unsigned8;
+EXPORT segmentType      := unsigned1;
+EXPORT wordPosType      := unsigned8;
+EXPORT docPosType       := unsigned8;
+EXPORT documentId       := unsigned8;
+EXPORT termType         := unsigned1;
+EXPORT distanceType     := integer8;
+EXPORT indexWipType     := unsigned1;
+EXPORT wipType          := unsigned8;
+EXPORT stageType        := unsigned1;
+EXPORT dateType         := unsigned8;
+
+EXPORT sourceType docid2source(documentId x) := (x >> 48);
+EXPORT documentId docid2doc(documentId x) := (x & 0xFFFFFFFFFFFF);
+EXPORT documentId createDocId(sourceType source, documentId doc) := (documentId)(((unsigned8)source << 48) | doc);
+EXPORT boolean      docMatchesSource(documentId docid, sourceType source) := (docid between createDocId(source,0) and (documentId)(createDocId(source+1,0)-1));
+
+EXPORT wordType := string20;
+EXPORT wordFlags    := enum(unsigned1, HasLower=1, HasUpper=2);
+
+EXPORT wordIdType       := unsigned4;
+
+EXPORT textSearchIndex  := index({ kindType kind, wordType word, documentId doc, segmentType segment, wordPosType wpos, indexWipType wip } , { wordFlags flags, wordType original, docPosType dpos}, '~DoesNotExist');
+EXPORT wordIndexRecord := recordof(textSearchIndex);
+
+
+END;

+ 2 - 1
testing/regress/ecl/sort2.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 myPeople := sq.SimplePersonBookDs(surname <> '');
 

+ 2 - 96
testing/regress/ecl/sqagg.ecl

@@ -15,99 +15,5 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
-
-boolean missingLevelOk := false;
-
-// Test the different child operators.  Try and test inline and out of line, also part of a compound
-// source activity and not part.
-
-udecimal8 todaysDate := 20040602D;
-unsigned4 age(udecimal8 dob) := ((todaysDate - dob) / 10000D);
-
-//MORE: books[1] ave(books)
-
-// Different child operators, all inline.
-persons := sq.HousePersonBookDs.persons;
-books := persons.books;
-personByAgeDesc := sort(sq.HousePersonBookDs.persons, dob);
-
-output(sq.HousePersonBookDs, { addr, numPeople := count(persons), aveAge:= ave(persons, age(dob)), maxDob := max(persons, dob)});
-output(sq.HousePersonBookDs, { addr, oldest := personByAgeDesc[1].forename + ' ' + personByAgeDesc[1].surname });
-output(sq.HousePersonBookDs, { addr, firstPerson := persons[1].forename + ' ' + persons[1].surname });
-
-// Grand children, again all inline.
-
-booksByRatingDesc := sort(sq.HousePersonBookDs.persons.books, -rating100);
-
-
-//sort order is deliberately different from anything that will be used later
-xpersons := sort(sq.HousePersonBookDs.persons, surname + (string)dob + forename)[1..200];
-xpersonByAgeDesc := sort(xpersons, dob);
-xbooks := sort(sq.HousePersonBookDs.persons.books, name + (string)rating100 + author)[1..200];
-xbooksByRatingDesc := sort(xbooks, -rating100);
-xxbooks := sort(xpersons.books, name + (string)rating100 + author)[1..200];
-xxbooksByRatingDesc := sort(xxbooks, -rating100);
-
-//More: Need to think about walking 3rd level children e.g., in ave, and [1]:
-output(sq.HousePersonBookDs, { addr, numBooks := sum(persons, count(books)), maxRating := max(persons, max(books, rating100))});
-output(sq.HousePersonBookDs, { addr, firstBook := persons[1].books[1].name + ': ' + persons[1].books[1].author });
-#if (true)
-output(sq.HousePersonBookDs, { addr, numBooks := count(persons.books), ave(persons.books, rating100), max(persons.books, rating100)});
-output(sq.HousePersonBookDs, { addr, ave(persons.books(persons.booklimit > 0), rating100)});
-output(sq.HousePersonBookDs, { addr, bestBook := booksByRatingDesc[1].name + ': ' + booksByRatingDesc[1].author});
-output(sq.HousePersonBookDs, { addr, firstBook := persons.books[1].name + ': ' + persons.books[1].author });     //NB: Different from above.
-#end
-
-//Now do the same, but unsure the children and main activity are not inline or compound
-// Different child operators, out of line inline.
-output('**** The following results should be identical - calculated using a subquery ****');
-
-output(sq.HousePersonBookDs, { addr, numPeople := count(xpersons), aveAge := ave(xpersons, age(dob)), maxDob := max(xpersons, dob)});
-output(sq.HousePersonBookDs, { addr, oldest := xpersonByAgeDesc[1].forename + ' ' + xpersonByAgeDesc[1].surname });
-output(sq.HousePersonBookDs, { addr, firstPerson := xpersons[1].forename + ' ' + xpersons[1].surname });
-
-// Grand children out of line, children are inline
-
-output(sq.HousePersonBookDs, { addr, numBooks := sum(persons, count(xbooks)), maxRating := max(persons, max(xbooks, rating100))});
-output(sq.HousePersonBookDs, { addr, firstBook := evaluate(persons[1], xbooks[1].name) + ': ' + evaluate(persons[1], xbooks[1].author) });
-#if (true)
-output(sq.HousePersonBookDs, { addr, numBooks := count(xbooks), ave(xbooks, rating100), max(xbooks, rating100)});
-output(sq.HousePersonBookDs, { addr, bestBook := xbooksByRatingDesc[1].name + ': ' + xbooksByRatingDesc[1].author});
-output(sq.HousePersonBookDs, { addr, firstBook := xbooks[1].name + ': ' + xbooks[1].author });       //NB: Different from above.
-#end
-
-// Grand children out of line, children also out of line
-
-output('**** The following results should be similar persons are reordered ****');
-output(sq.HousePersonBookDs, { addr, numBooks := sum(xpersons, count(xxbooks)), max(xpersons, max(xxbooks, rating100))});
-output(sq.HousePersonBookDs, { addr, firstBook := evaluate(xpersons[1], xxbooks[1].name) + ': ' + evaluate(xpersons[1], xxbooks[1].author) });
-#if (true)
-output(sq.HousePersonBookDs, { addr, numBooks := count(xxbooks), ave(xxbooks, rating100), max(xxbooks, rating100)});
-output(sq.HousePersonBookDs, { addr, bestBook := xxbooksByRatingDesc[1].name + ': ' + xxbooksByRatingDesc[1].author});
-output(sq.HousePersonBookDs, { addr, firstBook := xxbooks[1].name + ': ' + xxbooks[1].author });     //NB: Different from above.
-#end
-
-//--------- Now perform the aggregate operations with person as outer iteration ----------
-// note: sq.HousePersonDs fields are still accessible!
-output(sq.HousePersonBookDs.persons, { surname, numBooks := count(books), ave(books, rating100), max(books, rating100)});
-output(sq.HousePersonBookDs.persons, { surname, bestBook := booksByRatingDesc[1].name + ': ' + booksByRatingDesc[1].author});
-output(sq.HousePersonBookDs.persons, { surname, firstBook := books[1].name + ': ' + books[1].author });
-
-output(xpersons, { surname, numBooks := count(xxbooks), ave(xxbooks, rating100), max(xxbooks, rating100)});
-output(xpersons, { surname, bestBook := xxbooksByRatingDesc[1].name + ': ' + xxbooksByRatingDesc[1].author});
-output(xpersons, { surname, firstBook := xxbooks[1].name + ': ' + xxbooks[1].author });
-
-// note: sq.HousePersonDs fields are still accessible!
-
-#if (false)
-output(sq.HousePersonBookDs.persons, { sq.HousePersonBookDs.addr, surname, numBooks := count(books), ave(books, rating100), max(books, rating100)});
-output(sq.HousePersonBookDs.persons, { sq.HousePersonBookDs.addr, surname, bestBook := booksByRatingDesc[1].name + ': ' + booksByRatingDesc[1].author});
-output(sq.HousePersonBookDs.persons, { sq.HousePersonBookDs.addr, surname, firstBook := books[1].name + ': ' + books[1].author });
-#end
-
-#if (missingLevelOk)
-output(xpersons, { sq.HousePersonBookDs.addr, surname, numBooks := count(xxbooks), ave(xxbooks, rating100), max(xxbooks, rating100)});
-output(xpersons, { sq.HousePersonBookDs.addr, surname, bestBook := xxbooksByRatingDesc[1].name + ': ' + xxbooksByRatingDesc[1].author});
-output(xpersons, { sq.HousePersonBookDs.addr, surname, firstBook := xxbooks[1].name + ': ' + xxbooks[1].author });
-#end
+import $.common;
+common.sqagg('hthor');

+ 2 - 1
testing/regress/ecl/sqagg2.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 // Similar to sqagg.hql.  This time using a compound aggregate operator and selecting the results out.
 persons := sq.HousePersonBookDs.persons;

+ 19 - 0
testing/regress/ecl/sqagg_thor.ecl

@@ -0,0 +1,19 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+import $.common;
+common.sqagg('thor');

+ 2 - 1
testing/regress/ecl/sqaggds.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 boolean storedTrue := true : stored('storedTrue');
 

+ 2 - 1
testing/regress/ecl/sqaggds3.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 unsigned xxid := 0 : stored('xxid');
 

+ 2 - 1
testing/regress/ecl/sqaggds4.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 unsigned xxid := 0 : stored('xxid');
 

+ 2 - 1
testing/regress/ecl/sqaggds_an.ecl

@@ -17,7 +17,8 @@
 
 //nothorlcr
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 boolean storedTrue := true : stored('storedTrue');
 

+ 2 - 1
testing/regress/ecl/sqaggseq.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 boolean missingLevelOk := false;
 

+ 2 - 1
testing/regress/ecl/sqcntds.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 output(count(sq.HouseDs));
 output(count(sq.HouseDs)=5);

+ 4 - 1
testing/regress/ecl/sqcntidx.ecl

@@ -15,7 +15,10 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+#onwarning (1036, ignore);
+
+import $.setup;
+sq := setup.sq('hthor');
 
 sequential(
 output(count(sq.SimplePersonBookIndex));

+ 2 - 1
testing/regress/ecl/sqcond.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 // Test the different child operators.  Try and test inline and out of line, also part of a compound
 // source activity and not part.

+ 2 - 1
testing/regress/ecl/sqcond3.ecl

@@ -15,7 +15,8 @@
     limitations under the License.
 ############################################################################## */
 
-import $.setup.sq;
+import $.setup;
+sq := setup.sq('hthor');
 
 ded := dedup(sq.HousePersonBookDs.persons, forename);
 cnt := table(ded, { cnt := count(group); })[1].cnt;

+ 0 - 0
testing/regress/ecl/sqdistr.ecl


Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff