Browse Source

t.rast.list: Add CSV, JSON, YAML outputs (#2258)

* This adds CSV, JSON, and YAML output and generalizes the other outputs.
* Method is now meant only for different data gathering methods, not for the output formatting. method=cols and method=comma are now obsolete.
* All formatting is available with all methods.
* Most code is shared between the two main branches of code (simple list versus delta/gran methods code).
* Columns can be now specified for any option (previously only for cols/list and partially for comma/line output).
* The change is backwards compatible both on module interface level and on Python API level.
* Activate and fix old bash test (-i by t.register is not accepted without start_time).
Vaclav Petras 2 years ago
parent
commit
d2317a2bb6

+ 381 - 169
python/grass/temporal/list_stds.py

@@ -10,19 +10,25 @@ Usage:
     tgis.register_maps_in_space_time_dataset(type, name, maps)
 
 
-(C) 2012-2016 by the GRASS Development Team
+(C) 2012-2022 by the GRASS Development Team
 This program is free software under the GNU General Public
 License (>=v2). Read the file COPYING that comes with GRASS GIS
 for details.
 
 :authors: Soeren Gebbert
+:authors: Vaclav Petras
 """
-from __future__ import print_function
+
+import os
+from contextlib import contextmanager
+import sys
+
+import grass.script as gs
+
 from .core import get_tgis_message_interface, get_available_temporal_mapsets, init_dbif
 from .datetime_math import time_delta_to_relative_time
 from .factory import dataset_factory
 from .open_stds import open_old_stds
-import grass.script as gscript
 
 ###############################################################################
 
@@ -118,9 +124,344 @@ def get_dataset_list(
 ###############################################################################
 
 
+@contextmanager
+def _open_output_file(file, encoding="utf-8", **kwargs):
+    if not file:
+        yield sys.stdout
+    elif not isinstance(file, (str, os.PathLike)):
+        yield file
+    else:
+        with open(file, "w", encoding=encoding, **kwargs) as stream:
+            yield stream
+
+
+def _write_line(items, separator, file):
+    if not separator:
+        separator = ","
+    output = separator.join([f"{item}" for item in items])
+    with _open_output_file(file) as stream:
+        print(f"{output}", file=stream)
+
+
+def _write_plain(rows, header, separator, file):
+    def write_plain_row(items, separator, file):
+        output = separator.join([f"{item}" for item in items])
+        print(f"{output}", file=file)
+
+    with _open_output_file(file) as stream:
+        # Print the column names if requested
+        if header:
+            write_plain_row(items=header, separator=separator, file=stream)
+        for row in rows:
+            write_plain_row(items=row, separator=separator, file=stream)
+
+
+def _write_json(rows, column_names, file):
+    # Lazy import output format-specific dependencies.
+    # pylint: disable=import-outside-toplevel
+    import json
+    import datetime
+
+    class ResultsEncoder(json.JSONEncoder):
+        """Results encoder for JSON which handles SimpleNamespace objects"""
+
+        def default(self, o):
+            """Handle additional types"""
+            if isinstance(o, datetime.datetime):
+                return f"{o}"
+            return super().default(o)
+
+    dict_rows = []
+    for row in rows:
+        new_row = {}
+        for key, value in zip(column_names, row):
+            new_row[key] = value
+        dict_rows.append(new_row)
+    meta = {"column_names": column_names}
+    with _open_output_file(file) as stream:
+        json.dump({"data": dict_rows, "metadata": meta}, stream, cls=ResultsEncoder)
+
+
+def _write_yaml(rows, column_names, file=sys.stdout):
+    # Lazy import output format-specific dependencies.
+    # pylint: disable=import-outside-toplevel
+    import yaml
+
+    class NoAliasIndentListSafeDumper(yaml.SafeDumper):
+        """YAML dumper class which does not create aliases and indents lists
+
+        This avoid dates being labeled with &id001 and referenced with *id001.
+        Instead, same dates are simply repeated.
+
+        Lists have their dash-space (- ) indented instead of considering the
+        dash and space to be a part of indentation. This might be better handled
+        when https://github.com/yaml/pyyaml/issues/234 is resolved.
+        """
+
+        def ignore_aliases(self, data):
+            return True
+
+        def increase_indent(self, flow=False, indentless=False):
+            return super().increase_indent(flow=flow, indentless=False)
+
+    dict_rows = []
+    for row in rows:
+        new_row = {}
+        for key, value in zip(column_names, row):
+            new_row[key] = value
+        dict_rows.append(new_row)
+    meta = {"column_names": column_names}
+    with _open_output_file(file) as stream:
+        print(
+            yaml.dump(
+                {"data": dict_rows, "metadata": meta},
+                Dumper=NoAliasIndentListSafeDumper,
+                default_flow_style=False,
+            ),
+            end="",
+            file=stream,
+        )
+
+
+def _write_csv(rows, column_names, separator, file=sys.stdout):
+    # Lazy import output format-specific dependencies.
+    # pylint: disable=import-outside-toplevel
+    import csv
+
+    # Newlines handled by the CSV writter. Set according to the package doc.
+    with _open_output_file(file, newline="") as stream:
+        spamwriter = csv.writer(
+            stream,
+            delimiter=separator,
+            quotechar='"',
+            doublequote=True,
+            quoting=csv.QUOTE_NONNUMERIC,
+            lineterminator="\n",
+        )
+        if column_names:
+            spamwriter.writerow(column_names)
+        for row in rows:
+            spamwriter.writerow(row)
+
+
+def _write_table(rows, column_names, output_format, separator, file):
+    if output_format == "json":
+        _write_json(rows=rows, column_names=column_names, file=file)
+    elif output_format == "yaml":
+        _write_yaml(rows=rows, column_names=column_names, file=file)
+    elif output_format == "plain":
+        # No particular reason for this separator expect that this is the original behavior.
+        if not separator:
+            separator = "\t"
+        _write_plain(rows=rows, header=column_names, separator=separator, file=file)
+    elif output_format == "csv":
+        if not separator:
+            separator = ","
+        _write_csv(rows=rows, column_names=column_names, separator=separator, file=file)
+    else:
+        raise ValueError(f"Unknown value '{output_format}' for output_format")
+
+
+def _get_get_registered_maps_as_objects_with_method(dataset, where, method, gran, dbif):
+    if method == "deltagaps":
+        return dataset.get_registered_maps_as_objects_with_gaps(where=where, dbif=dbif)
+    if method == "delta":
+        return dataset.get_registered_maps_as_objects(
+            where=where, order="start_time", dbif=dbif
+        )
+    if method == "gran":
+        if where:
+            raise ValueError(
+                f"The where parameter is not supported with method={method}"
+            )
+        if gran is not None and gran != "":
+            return dataset.get_registered_maps_as_objects_by_granularity(
+                gran=gran, dbif=dbif
+            )
+        return dataset.get_registered_maps_as_objects_by_granularity(dbif=dbif)
+    raise ValueError(f"Invalid method '{method}'")
+
+
+def _get_get_registered_maps_as_objects_delta_gran(
+    dataset, where, method, gran, dbif, msgr
+):
+    maps = _get_get_registered_maps_as_objects_with_method(
+        dataset=dataset, where=where, method=method, gran=gran, dbif=dbif
+    )
+    if not maps:
+        return []
+
+    if isinstance(maps[0], list):
+        if len(maps[0]) > 0:
+            first_time, unused = maps[0][0].get_temporal_extent_as_tuple()
+        else:
+            msgr.warning(_("Empty map list"))
+            return []
+    else:
+        first_time, unused = maps[0].get_temporal_extent_as_tuple()
+
+    records = []
+    for map_object in maps:
+
+        if isinstance(map_object, list):
+            if len(map_object) > 0:
+                map_object = map_object[0]
+            else:
+                msgr.fatal(_("Empty entry in map list, this should not happen"))
+
+        start, end = map_object.get_temporal_extent_as_tuple()
+        if end:
+            delta = end - start
+        else:
+            delta = None
+        delta_first = start - first_time
+
+        if map_object.is_time_absolute():
+            if end:
+                delta = time_delta_to_relative_time(delta)
+            delta_first = time_delta_to_relative_time(delta_first)
+        records.append((map_object, start, end, delta, delta_first))
+    return records
+
+
+def _get_list_of_maps_delta_gran(dataset, columns, where, method, gran, dbif, msgr):
+    maps = _get_get_registered_maps_as_objects_delta_gran(
+        dataset=dataset, where=where, method=method, gran=gran, dbif=dbif, msgr=msgr
+    )
+    rows = []
+    for map_object, start, end, delta, delta_first in maps:
+        row = []
+        # Here the names must be the same as in the database
+        # to make the interface consistent.
+        for column in columns:
+            if column == "id":
+                row.append(map_object.get_id())
+            elif column == "name":
+                row.append(map_object.get_name())
+            elif column == "layer":
+                row.append(map_object.get_layer())
+            elif column == "mapset":
+                row.append(map_object.get_mapset())
+            elif column == "start_time":
+                row.append(start)
+            elif column == "end_time":
+                row.append(end)
+            elif column == "interval_length":
+                row.append(delta)
+            elif column == "distance_from_begin":
+                row.append(delta_first)
+            else:
+                raise ValueError(f"Unsupported column '{column}'")
+        rows.append(row)
+    return rows
+
+
+def _get_list_of_maps_stds(
+    element_type,
+    name,
+    columns,
+    order,
+    where,
+    method,
+    output_format,
+    gran=None,
+    dbif=None,
+):
+    dbif, connection_state_changed = init_dbif(dbif)
+    msgr = get_tgis_message_interface()
+
+    dataset = open_old_stds(name, element_type, dbif)
+
+    def check_columns(column_names, output_format, element_type):
+        if element_type != "stvds" and "layer" in columns:
+            raise ValueError(
+                f"Column 'layer' is not allowed with temporal type '{element_type}'"
+            )
+        if output_format == "line" and len(column_names) > 1:
+            raise ValueError(
+                f"'{output_format}' output_format can have only 1 column, "
+                f"not {len(column_names)}"
+            )
+
+    # This method expects a list of objects for gap detection
+    if method in ["delta", "deltagaps", "gran"]:
+        if not columns:
+            if output_format == "list":
+                # Only one column is needed.
+                columns = ["id"]
+            else:
+                columns = ["id", "name"]
+                if element_type == "stvds":
+                    columns.append("layer")
+                columns.extend(
+                    [
+                        "mapset",
+                        "start_time",
+                        "end_time",
+                        "interval_length",
+                        "distance_from_begin",
+                    ]
+                )
+        check_columns(
+            column_names=columns,
+            output_format=output_format,
+            element_type=element_type,
+        )
+        rows = _get_list_of_maps_delta_gran(
+            dataset=dataset,
+            columns=columns,
+            where=where,
+            method=method,
+            gran=gran,
+            dbif=dbif,
+            msgr=msgr,
+        )
+    else:
+        if columns:
+            check_columns(
+                column_names=columns,
+                output_format=output_format,
+                element_type=element_type,
+            )
+        else:
+            if output_format == "line":
+                # For list of values, only one column is needed.
+                columns = ["id"]
+            else:
+                columns = ["name", "mapset", "start_time", "end_time"]
+        if not order:
+            order = "start_time"
+
+        rows = dataset.get_registered_maps(",".join(columns), where, order, dbif)
+
+        # End with error for the old, custom formats. Proper formats simply return
+        # empty result whatever empty is for each format (e.g., empty list for JSON).
+        if not rows and (output_format in ["plain", "line"]):
+            dbif.close()
+            gs.fatal(
+                _(
+                    "Nothing found in the database for space time dataset <{name}> "
+                    "(type: {element_type}): {detail}"
+                ).format(
+                    name=dataset.get_id(),
+                    element_type=element_type,
+                    detail=_(
+                        "Dataset is empty or where clause is too constrained or incorrect"
+                    )
+                    if where
+                    else _("Dataset is empty"),
+                )
+            )
+    if connection_state_changed:
+        dbif.close()
+    return rows, columns
+
+
+# The code is compatible with pre-v8.2 versions, but for v9, it needs to be reviewed
+# to remove the backwards compatibility which will clean it up.
 def list_maps_of_stds(
-    type,
-    input,
+    type,  # pylint: disable=redefined-builtin
+    input,  # pylint: disable=redefined-builtin
     columns,
     order,
     where,
@@ -130,6 +471,7 @@ def list_maps_of_stds(
     gran=None,
     dbif=None,
     outpath=None,
+    output_format=None,
 ):
     """List the maps of a space time dataset using different methods
 
@@ -161,171 +503,41 @@ def list_maps_of_stds(
                  dataset is used
     :param outpath: The path to file where to save output
     """
-
-    dbif, connection_state_changed = init_dbif(dbif)
-    msgr = get_tgis_message_interface()
-
-    sp = open_old_stds(input, type, dbif)
-
-    if separator is None or separator == "":
-        separator = "\t"
-
-    if outpath:
-        outfile = open(outpath, "w")
-
-    # This method expects a list of objects for gap detection
-    if method == "delta" or method == "deltagaps" or method == "gran":
-        if type == "stvds":
-            columns = "id,name,layer,mapset,start_time,end_time"
-        else:
-            columns = "id,name,mapset,start_time,end_time"
-        if method == "deltagaps":
-            maps = sp.get_registered_maps_as_objects_with_gaps(where=where, dbif=dbif)
-        elif method == "delta":
-            maps = sp.get_registered_maps_as_objects(
-                where=where, order="start_time", dbif=dbif
-            )
-        elif method == "gran":
-            if gran is not None and gran != "":
-                maps = sp.get_registered_maps_as_objects_by_granularity(
-                    gran=gran, dbif=dbif
-                )
-            else:
-                maps = sp.get_registered_maps_as_objects_by_granularity(dbif=dbif)
-
-        if no_header is False:
-            string = ""
-            string += "%s%s" % ("id", separator)
-            string += "%s%s" % ("name", separator)
-            if type == "stvds":
-                string += "%s%s" % ("layer", separator)
-            string += "%s%s" % ("mapset", separator)
-            string += "%s%s" % ("start_time", separator)
-            string += "%s%s" % ("end_time", separator)
-            string += "%s%s" % ("interval_length", separator)
-            string += "%s" % ("distance_from_begin")
-            if outpath:
-                outfile.write("{st}\n".format(st=string))
-            else:
-                print(string)
-
-        if maps and len(maps) > 0:
-
-            if isinstance(maps[0], list):
-                if len(maps[0]) > 0:
-                    first_time, dummy = maps[0][0].get_temporal_extent_as_tuple()
-                else:
-                    msgr.warning(_("Empty map list"))
-                    return
-            else:
-                first_time, dummy = maps[0].get_temporal_extent_as_tuple()
-
-            for mymap in maps:
-
-                if isinstance(mymap, list):
-                    if len(mymap) > 0:
-                        map = mymap[0]
-                    else:
-                        msgr.fatal(_("Empty entry in map list, this should not happen"))
-                else:
-                    map = mymap
-
-                start, end = map.get_temporal_extent_as_tuple()
-                if end:
-                    delta = end - start
-                else:
-                    delta = None
-                delta_first = start - first_time
-
-                if map.is_time_absolute():
-                    if end:
-                        delta = time_delta_to_relative_time(delta)
-                    delta_first = time_delta_to_relative_time(delta_first)
-
-                string = ""
-                string += "%s%s" % (map.get_id(), separator)
-                string += "%s%s" % (map.get_name(), separator)
-                if type == "stvds":
-                    string += "%s%s" % (map.get_layer(), separator)
-                string += "%s%s" % (map.get_mapset(), separator)
-                string += "%s%s" % (start, separator)
-                string += "%s%s" % (end, separator)
-                string += "%s%s" % (delta, separator)
-                string += "%s" % (delta_first)
-                if outpath:
-                    outfile.write("{st}\n".format(st=string))
-                else:
-                    print(string)
-
-    else:
-        # In comma separated mode only map ids are needed
+    if not output_format:
         if method == "comma":
-            if columns not in ["id", "name"]:
-                columns = "id"
-
-        rows = sp.get_registered_maps(columns, where, order, dbif)
-
-        if not rows:
-            dbif.close()
-            err = "Space time %(sp)s dataset <%(i)s> is empty"
-            if where:
-                err += " or where condition is wrong"
-            gscript.fatal(
-                _(err)
-                % {"sp": sp.get_new_map_instance(None).get_type(), "i": sp.get_id()}
-            )
-
-        if rows:
-            if method == "comma":
-                string = ""
-                count = 0
-                for row in rows:
-                    if count == 0:
-                        string += row[columns]
-                    else:
-                        string += ",%s" % row[columns]
-                    count += 1
-                if outpath:
-                    outfile.write("{st}\n".format(st=string))
-                else:
-                    print(string)
-
-            elif method == "cols":
-                # Print the column names if requested
-                if no_header is False:
-                    output = ""
-                    count = 0
-
-                    collist = columns.split(",")
-
-                    for key in collist:
-                        if count > 0:
-                            output += separator + str(key)
-                        else:
-                            output += str(key)
-                        count += 1
-                    if outpath:
-                        outfile.write("{st}\n".format(st=output))
-                    else:
-                        print(output)
-
-                for row in rows:
-                    output = ""
-                    count = 0
-                    for col in row:
-                        if count > 0:
-                            output += separator + str(col)
-                        else:
-                            output += str(col)
-                        count += 1
-                    if outpath:
-                        outfile.write("{st}\n".format(st=output))
-                    else:
-                        print(output)
-    if outpath:
-        outfile.close()
-    if connection_state_changed:
-        dbif.close()
+            output_format = "line"
+        output_format = "plain"
+
+    if columns:
+        if isinstance(columns, str):
+            columns = columns.split(",")
+
+    rows, columns = _get_list_of_maps_stds(
+        element_type=type,
+        name=input,
+        columns=columns,
+        order=order,
+        where=where,
+        method=method,
+        output_format=output_format,
+        gran=gran,
+        dbif=dbif,
+    )
+
+    if output_format == "line":
+        _write_line(
+            items=[row[0] for row in rows],
+            separator=separator,
+            file=outpath,
+        )
+    else:
+        _write_table(
+            rows=rows,
+            column_names=None if no_header else columns,
+            separator=separator,
+            output_format=output_format,
+            file=outpath,
+        )
 
 
 ###############################################################################

+ 37 - 8
temporal/t.rast.list/t.rast.list.html

@@ -5,24 +5,41 @@ raster dataset. <em>t.rast.list</em> provides several options to list map layers
 and their metadata. Listing of map layer can be ordered by metadata,
 metadata columns can be specified and SQL where conditions can be
 provided to select a map layer subset of the input space time raster
-dataset. Most of the raster map  specific metadat is available for
+dataset. Most of the raster map  specific metadata is available for
 column selection, sorting and SQL where statements.
 
 Using the <b>method</b> option allows the specification of different
-methods to list map layers. Method <i>col</i> is the default option and
+methods to list map layers. Method <i>list</i> is the default option and
 sensitive to the <b>column</b>,<b>order</b> and <b>where</b> options.
-It will simply print user specified metadata columns of one map layer
-per line. The <i>comma</i> method will list the map layer as comma
-separated list that can be used as input for spatial modules.
 <p>
 To print interval length in days and distance from the begin use method
 <i>delta</i>. Method <i>deltagap</i> will additionally print temporal
 gaps between map layer. The <i>gran</i> method allows the listing of
 map layer sampled by a user defined <b>granule</b>. As default the
 granularity of the space time raster dataset is used for sampling.
-
-The output column separator can be specified with the <b>separator</b>
-option.
+<p>
+While method <i>list</i> supports all columns except for
+interval_length and distance_from_begin,
+methods <i>delta</i>, <i>deltagap</i>, and <i>gran</i> support only the following
+columns: id, name, mapset, start_time, end_time, interval_length, and distance_from_begin.
+The option <b>order</b> is only available with method <i>list</i>.
+<p>
+Methods <i>cols</i> and <i>comma</i> are depreciated.
+The <i>cols</i> method is replaced by the <i>plain</i> format and
+the <i>comma</i> method is replaced by the <i>line</i> format.
+<p>
+The <b>format</b> option specifies the format of the output data.
+The default <i>plain</i> format will simply print user specified metadata
+columns of one map layer per line separated by a pipe by default.
+The <i>line</i> format will list fully qualified map names (name and mapset)
+as a comma-separated list of values that can be used as input for spatial modules.
+The <i>csv</i> format will print data in the CSV format using
+comma as the value separator (delimiter) and double quote for text field quoting.
+The <i>json</i> format generates JSON and, if the PyYAML package is installed,
+The <i>yaml</i> format generates YAML.
+
+The column (or item) separator can be specified with the <b>separator</b>
+option for <i>plain</i>, <i>line</i>, and <i>csv</i>.
 
 <h2>EXAMPLES</h2>
 
@@ -183,6 +200,18 @@ id|name|mapset|start_time|end_time|interval_length|distance_from_begin
 For the <em>deltagaps</em> value you can see the example for space time
 vector dataset <a href="t.vect.list.html#using-method-option">t.vect.list</a>
 
+<h3>Reading raster names in Python</h3>
+
+<div class="code"><pre>
+result = json.loads(
+    gs.read_command(
+        "t.rast.list", input="tempmean_monthly", format="json"
+    )
+)
+for item in result["data"]:
+    print(item["name"])
+</pre></div>
+
 <h3>Filtering the result by semantic label</h3>
 
 Semantic label can be assigned to raster maps

+ 161 - 9
temporal/t.rast.list/t.rast.list.py

@@ -40,7 +40,6 @@
 # % required: no
 # % multiple: yes
 # % options: id,name,semantic_label,creator,mapset,temporal_type,creation_time,start_time,end_time,north,south,west,east,nsres,ewres,cols,rows,number_of_cells,min,max
-# % answer: start_time
 # %end
 
 # %option
@@ -50,8 +49,7 @@
 # % guisection: Selection
 # % required: no
 # % multiple: yes
-# % options: id,name,semantic_label,creator,mapset,temporal_type,creation_time,start_time,end_time,north,south,west,east,nsres,ewres,cols,rows,number_of_cells,min,max
-# % answer: name,mapset,start_time,end_time
+# % options: id,name,semantic_label,creator,mapset,temporal_type,creation_time,start_time,end_time,north,south,west,east,nsres,ewres,cols,rows,number_of_cells,min,max,interval_length,distance_from_begin
 # %end
 
 # %option G_OPT_T_WHERE
@@ -64,8 +62,8 @@
 # % description: Method used for data listing
 # % required: no
 # % multiple: no
-# % options: cols,comma,delta,deltagaps,gran
-# % answer: cols
+# % options: list,cols,comma,delta,deltagaps,gran
+# % answer: list
 # %end
 
 # %option
@@ -76,6 +74,16 @@
 # % multiple: no
 # %end
 
+# %option
+# % key: format
+# % type: string
+# % description: Output format
+# % required: no
+# % multiple: no
+# % options: plain,line,json,yaml,csv
+# % guisection: Formatting
+# %end
+
 # %option G_OPT_F_SEP
 # % label: Field separator character between the output columns
 # % guisection: Formatting
@@ -91,26 +99,170 @@
 # % guisection: Formatting
 # %end
 
-import grass.script as grass
+import grass.script as gs
 
 
-############################################################################
+def message_option_value_excludes_option_value(
+    option_name, option_value, excluded_option_name, excluded_option_value, reason
+):
+    return _(
+        "Combining {option_name}={option_value} and "
+        "{excluded_option_name}={excluded_option_value} is not allowed. {reason}"
+    ).format(**locals())
+
+
+def message_option_value_excludes_option(
+    option_name, option_value, excluded_option_name, reason
+):
+    return _(
+        "The option {excluded_option_name} is not allowed with "
+        "{option_name}={option_value}. {reason}"
+    ).format(**locals())
+
+
+def message_option_value_excludes_flag(option_name, option_value, flag_name, reason):
+    return _(
+        "The flag -{flag_name} is not allowed with {option_name}={option_value}."
+        " {reason}".format(**locals())
+    )
 
 
 def main():
+    options, flags = gs.parser()
+
     # lazy imports
     import grass.temporal as tgis
 
     # Get the options
+    # Parser does not ensure that the input exists.
     input = options["input"]
     columns = options["columns"]
     order = options["order"]
     where = options["where"]
-    separator = grass.separator(options["separator"])
+    separator = gs.separator(options["separator"])
     method = options["method"]
     granule = options["granule"]
     header = flags["u"]
     output = options["output"]
+    output_format = options["format"]
+
+    if output_format == "csv":
+        if len(separator) > 1:
+            gs.fatal(
+                message_option_value_excludes_option_value(
+                    option_name="format",
+                    option_value=output_format,
+                    excluded_option_name="separator",
+                    excluded_option_value=separator,
+                    reason=_(
+                        "A standard CSV separator (delimiter) is only one character long"
+                    ),
+                )
+            )
+        if separator == "|":
+            # We use comma as the default for separator, so we override the pipe.
+            # This does not allow for users to generate CSV with pipe, but unlike
+            # the C API, the Python interface specs does not allow reseting the default
+            # except for setting it to an empty string which does not have a precedence
+            # in the current code and the behavior is unclear.
+            separator = ","
+    if output_format in ["json", "yaml"] and header:
+        gs.fatal(
+            message_option_value_excludes_flag(
+                option_name="format",
+                option_value=output_format,
+                flag_name="u",
+                reason=_("Column names are always included"),
+            )
+        )
+        # We ignore when separator is set for JSON and YAML because of the default
+        # value which is always there (see above). Having no default and producing
+        # an error when set would be more clear and would fit with using different
+        # defaults for plain and CSV formats.
+    elif (output_format == "line" or method == "comma") and separator == "|":
+        # Same as for CSV: Custom default needed.
+        # Pipe is currently not supported at all.
+        separator = ","
+
+    if method in ["delta", "deltagaps", "gran"]:
+        if order:
+            gs.fatal(
+                message_option_value_excludes_option(
+                    option_name="method",
+                    option_value=method,
+                    excluded_option_name="order",
+                    reason=_("Values are always ordered by start_time"),
+                )
+            )
+        if columns:
+            columns_list = columns.split(",")
+            for column in [
+                "semantic_label",
+                "creator",
+                "temporal_type",
+                "creation_time",
+                "north",
+                "south",
+                "west",
+                "east",
+                "nsres",
+                "ewres",
+                "cols",
+                "rows",
+                "number_of_cells",
+                "min",
+                "max",
+            ]:
+                if column in columns_list:
+                    gs.fatal(
+                        message_option_value_excludes_option_value(
+                            option_name="method",
+                            option_value=method,
+                            excluded_option_name="columns",
+                            excluded_option_value=columns,
+                            reason=_(
+                                "Column '{name}' is not available with the method '{method}'"
+                            ).format(name=column, method=method),
+                        )
+                    )
+    elif columns:
+        columns_list = columns.split(",")
+        for column in ["interval_length", "distance_from_begin"]:
+            if column in columns_list:
+                gs.fatal(
+                    message_option_value_excludes_option_value(
+                        option_name="method",
+                        option_value=method,
+                        excluded_option_name="columns",
+                        excluded_option_value=columns,
+                        reason=_(
+                            "Column '{name}' is not available with the method '{method}'"
+                        ).format(name=column, method=method),
+                    )
+                )
+    if output_format == "line" or method == "comma":
+        columns_list = columns.split(",")
+        if len(columns_list) > 1:
+            gs.fatal(
+                message_option_value_excludes_option_value(
+                    option_name="format",
+                    option_value=output_format,
+                    excluded_option_name="columns",
+                    excluded_option_value=columns,
+                    reason=_("Only one column is allowed (not {num_columns})").format(
+                        num_columns=len(columns_list)
+                    ),
+                )
+            )
+    if method == "gran" and where:
+        gs.fatal(
+            message_option_value_excludes_option(
+                option_name="method",
+                option_value=method,
+                excluded_option_name="where",
+                reason=_("All maps are always listed"),
+            )
+        )
 
     # Make sure the temporal database exists
     tgis.init()
@@ -126,9 +278,9 @@ def main():
         header,
         granule,
         outpath=output,
+        output_format=output_format,
     )
 
 
 if __name__ == "__main__":
-    options, flags = grass.parser()
     main()

+ 54 - 0
temporal/t.rast.list/tests/conftest.py

@@ -0,0 +1,54 @@
+"""Fixture for t.rast.list test"""
+
+from datetime import datetime
+from types import SimpleNamespace
+
+import pytest
+
+import grass.script as gs
+import grass.script.setup as grass_setup
+
+
+@pytest.fixture(scope="module")
+def space_time_raster_dataset(tmp_path_factory):
+    """Start a session and create a raster time series
+
+    Returns object with attributes about the dataset.
+    """
+    tmp_path = tmp_path_factory.mktemp("raster_time_series")
+    location = "test"
+    gs.core._create_location_xy(tmp_path, location)  # pylint: disable=protected-access
+    with grass_setup.init(tmp_path / location):
+        gs.run_command("g.region", s=0, n=80, w=0, e=120, b=0, t=50, res=10, res3=10)
+        names = [f"precipitation_{i}" for i in range(1, 7)]
+        max_values = [550, 450, 320, 510, 300, 650]
+        for name, value in zip(names, max_values):
+            gs.mapcalc(f"{name} = rand(0, {value})", seed=1)
+        dataset_name = "precipitation"
+        gs.run_command(
+            "t.create",
+            type="strds",
+            temporaltype="absolute",
+            output=dataset_name,
+            title="Precipitation",
+            description="Random series generated for tests",
+        )
+        dataset_file = tmp_path / "names.txt"
+        dataset_file.write_text("\n".join(names))
+        gs.run_command(
+            "t.register",
+            type="raster",
+            flags="i",
+            input=dataset_name,
+            file=dataset_file,
+            start="2001-01-01",
+            increment="1 month",
+        )
+        times = [datetime(2001, i, 1) for i in range(1, len(names) + 1)]
+        full_names = [f"{name}@PERMANENT" for name in names]
+        yield SimpleNamespace(
+            name=dataset_name,
+            raster_names=names,
+            full_raster_names=full_names,
+            start_times=times,
+        )

+ 261 - 0
temporal/t.rast.list/tests/t_rast_list_test.py

@@ -0,0 +1,261 @@
+"""Test t.rast.list output formats"""
+
+import csv
+import datetime
+import io
+import json
+
+import pytest
+
+try:
+    import yaml
+except ImportError:
+    yaml = None
+
+import grass.script as gs
+
+
+def test_defaults(space_time_raster_dataset):
+    """Check that the module runs with default parameters"""
+    gs.run_command("t.rast.list", input=space_time_raster_dataset.name)
+
+
+def test_line(space_time_raster_dataset):
+    """Line format can be parsed and contains full names by default"""
+    names = (
+        gs.read_command(
+            "t.rast.list", input=space_time_raster_dataset.name, format="line"
+        )
+        .strip()
+        .split(",")
+    )
+    assert names == space_time_raster_dataset.full_raster_names
+
+
+def test_json(space_time_raster_dataset):
+    """Check JSON can be parsed and contains the right values"""
+    result = json.loads(
+        gs.read_command(
+            "t.rast.list", input=space_time_raster_dataset.name, format="json"
+        )
+    )
+    assert "data" in result
+    assert "metadata" in result
+    for item in result["data"]:
+        for name in result["metadata"]["column_names"]:
+            assert item[name], "All values should be set with the default columns"
+    names = [item["name"] for item in result["data"]]
+    assert names == space_time_raster_dataset.raster_names
+
+
+@pytest.mark.skipif(yaml is None, reason="PyYAML package not available")
+def test_yaml(space_time_raster_dataset):
+    """Check JSON can be parsed and contains the right values"""
+    result = yaml.safe_load(
+        gs.read_command(
+            "t.rast.list", input=space_time_raster_dataset.name, format="yaml"
+        )
+    )
+    assert "data" in result
+    assert "metadata" in result
+    for item in result["data"]:
+        for name in result["metadata"]["column_names"]:
+            assert item[name], "All values should be set with the default columns"
+        assert isinstance(item["start_time"], datetime.datetime)
+    names = [item["name"] for item in result["data"]]
+    assert names == space_time_raster_dataset.raster_names
+    times = [item["start_time"] for item in result["data"]]
+    assert times == space_time_raster_dataset.start_times
+
+
+@pytest.mark.parametrize(
+    "separator,delimeter", [(None, ","), (",", ","), (";", ";"), ("tab", "\t")]
+)
+def test_csv(space_time_raster_dataset, separator, delimeter):
+    """Check CSV can be parsed with different separators"""
+    columns = ["name", "start_time"]
+    text = gs.read_command(
+        "t.rast.list",
+        input=space_time_raster_dataset.name,
+        columns=columns,
+        format="csv",
+        separator=separator,
+    )
+    io_string = io.StringIO(text)
+    reader = csv.DictReader(
+        io_string,
+        delimiter=delimeter,
+        quotechar='"',
+        doublequote=True,
+        lineterminator="\n",
+        strict=True,
+    )
+    data = list(reader)
+    assert len(data) == len(space_time_raster_dataset.raster_names)
+    for row in data:
+        assert len(row) == len(columns)
+
+
+def test_columns_list(space_time_raster_dataset):
+    """Check CSV can be parsed with different separators"""
+    # All relevant columns from the interface.
+    columns = [
+        "id",
+        "name",
+        "semantic_label",
+        "creator",
+        "mapset",
+        "temporal_type",
+        "creation_time",
+        "start_time",
+        "end_time",
+        "north",
+        "south",
+        "west",
+        "east",
+        "nsres",
+        "ewres",
+        "cols",
+        "rows",
+        "number_of_cells",
+        "min",
+        "max",
+    ]
+    result = json.loads(
+        gs.read_command(
+            "t.rast.list",
+            input=space_time_raster_dataset.name,
+            method="list",
+            columns=columns,
+            format="json",
+        )
+    )
+    data = result["data"]
+    assert len(data) == len(space_time_raster_dataset.raster_names)
+    for row in data:
+        assert len(row) == len(columns)
+
+
+def test_columns_delta_gran(space_time_raster_dataset):
+    """Check CSV can be parsed with different separators"""
+    # All relevant columns from the interface.
+    columns = [
+        "id",
+        "name",
+        "mapset",
+        "start_time",
+        "end_time",
+        "interval_length",
+        "distance_from_begin",
+    ]
+    result = json.loads(
+        gs.read_command(
+            "t.rast.list",
+            input=space_time_raster_dataset.name,
+            method="gran",
+            columns=columns,
+            format="json",
+        )
+    )
+    data = result["data"]
+    assert len(data) == len(space_time_raster_dataset.raster_names)
+    for row in data:
+        assert len(row) == len(columns)
+
+
+def test_json_empty_result(space_time_raster_dataset):
+    """Check JSON is generated for no returned values"""
+    result = json.loads(
+        gs.read_command(
+            "t.rast.list",
+            input=space_time_raster_dataset.name,
+            format="json",
+            where="FALSE",
+        )
+    )
+    assert "data" in result
+    assert "metadata" in result
+    assert len(result["data"]) == 0
+
+
+@pytest.mark.parametrize("output_format", ["plain", "line"])
+def test_plain_empty_result(space_time_raster_dataset, output_format):
+    """Check module fails with non-zero return code for empty result"""
+    return_code = gs.run_command(
+        "t.rast.list",
+        input=space_time_raster_dataset.name,
+        format=output_format,
+        where="FALSE",
+        errors="status",
+    )
+    assert return_code != 0
+
+
+@pytest.mark.parametrize("output_format", ["csv", "plain"])
+def test_no_header_accepted(space_time_raster_dataset, output_format):
+    """Check that the no column names flag is accepted"""
+    gs.run_command(
+        "t.rast.list", input=space_time_raster_dataset.name, format=output_format
+    )
+
+
+@pytest.mark.parametrize("output_format", ["json", "yaml"])
+def test_no_header_rejected(space_time_raster_dataset, output_format):
+    """Check that the no column names flag is rejected
+
+    Given how the format dependencies are handled, this will run even
+    when YAML support is missing.
+    """
+    return_code = gs.run_command(
+        "t.rast.list",
+        input=space_time_raster_dataset.name,
+        format=output_format,
+        flags="u",
+        errors="status",
+    )
+    assert return_code != 0
+
+
+@pytest.mark.parametrize("method", ["delta", "deltagaps", "gran"])
+def test_other_methods_json(space_time_raster_dataset, method):
+    """Test methods other than list"""
+    result = json.loads(
+        gs.read_command(
+            "t.rast.list",
+            input=space_time_raster_dataset.name,
+            format="json",
+            method=method,
+        )
+    )
+    assert "data" in result
+    assert "metadata" in result
+    for item in result["data"]:
+        assert item["interval_length"] >= 0
+        assert item["distance_from_begin"] >= 0
+    names = [item["name"] for item in result["data"]]
+    assert names == space_time_raster_dataset.raster_names
+
+
+def test_gran_json(space_time_raster_dataset):
+    """Test granularity method"""
+    result = json.loads(
+        gs.read_command(
+            "t.rast.list",
+            input=space_time_raster_dataset.name,
+            format="json",
+            method="gran",
+            gran="15 days",
+        )
+    )
+    assert "data" in result
+    assert "metadata" in result
+    for item in result["data"]:
+        assert item["interval_length"] >= 0
+        assert item["distance_from_begin"] >= 0
+        assert (
+            item["name"] in space_time_raster_dataset.raster_names
+            or item["name"] is None
+        )
+    assert len(result["data"]) > len(
+        space_time_raster_dataset.raster_names
+    ), "There should be more entries because of finer granularity"

+ 10 - 10
temporal/t.rast.list/test.t.rast.list.sh

@@ -72,7 +72,7 @@ t.create type=strds temporaltype=absolute output=precip_abs0 title="A test with
 
 # The @test
 t.register type=raster -i input=precip_abs0 file="${n1}" start="2001-01-01" increment="1 month"
-t.rast.list    separator=" | " method=comma     input=precip_abs0
+t.rast.list  separator=" | " method=comma     input=precip_abs0
 t.rast.list  input=precip_abs0
 t.rast.list  separator=" | " method=cols      input=precip_abs0
 t.rast.list  separator=" | " method=delta     input=precip_abs0
@@ -82,7 +82,7 @@ t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="2 months"
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="1 day"
 
 t.register type=raster input=precip_abs0 file="${n1}" start="2001-01-01" increment="1 month"
-t.rast.list    separator=" | " method=comma     input=precip_abs0
+t.rast.list  separator=" | " method=comma     input=precip_abs0
 t.rast.list  input=precip_abs0
 t.rast.list  separator=" | " method=cols      input=precip_abs0
 t.rast.list  separator=" | " method=delta     input=precip_abs0
@@ -91,8 +91,8 @@ t.rast.list  separator=" | " method=gran      input=precip_abs0
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="2 months"
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="6 days"
 
-t.register type=raster -i input=precip_abs0 file="${n2}" 
-t.rast.list    separator=" | " method=comma     input=precip_abs0
+t.register type=raster input=precip_abs0 file="${n2}"
+t.rast.list  separator=" | " method=comma     input=precip_abs0
 t.rast.list  input=precip_abs0
 t.rast.list  separator=" | " method=cols      input=precip_abs0
 t.rast.list  separator=" | " method=delta     input=precip_abs0
@@ -101,24 +101,24 @@ t.rast.list  separator=" | " method=gran      input=precip_abs0
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="2 months"
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="6 days"
 
-t.register type=raster -i input=precip_abs0 file="${n3}"
-t.rast.list    separator=" | " method=comma     input=precip_abs0
+t.register type=raster input=precip_abs0 file="${n3}"
+t.rast.list  separator=" | " method=comma     input=precip_abs0
 t.rast.list  separator=" | " method=delta     input=precip_abs0
 t.rast.list  separator=" | " method=deltagaps input=precip_abs0
 t.rast.list  separator=" | " method=gran      input=precip_abs0
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="2 months"
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="6 days"
 
-t.register type=raster -i input=precip_abs0 file="${n4}"
-t.rast.list    separator=" | " method=comma     input=precip_abs0
+t.register type=raster input=precip_abs0 file="${n4}"
+t.rast.list  separator=" | " method=comma     input=precip_abs0
 t.rast.list  separator=" | " method=delta     input=precip_abs0
 t.rast.list  separator=" | " method=deltagaps input=precip_abs0
 t.rast.list  separator=" | " method=gran      input=precip_abs0
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="2 months"
 t.rast.list  separator=" | " method=gran      input=precip_abs0 gran="6 days"
 
-t.register type=raster -i input=precip_abs0 file="${n5}"
-t.rast.list    separator=" | " method=comma     input=precip_abs0
+t.register type=raster input=precip_abs0 file="${n5}"
+t.rast.list  separator=" | " method=comma     input=precip_abs0
 t.rast.list  input=precip_abs0
 t.rast.list  separator=" | " method=cols      input=precip_abs0
 t.rast.list  separator=" | " method=delta     input=precip_abs0