Sfoglia il codice sorgente

Move g.download.location to core, create grass.utils lib (#1715)

* This moves g.download.location from addons to core to avoid need to install it in every CI.
* g.download.location partially matches functionality of download in Data tab, so the module is important for feature parity.
* Shared code is now in new subpackage of grass, grass.utils (original code is from g.extension which still has its own copy).
* grass.utils uses empty init file. Preferred import is (as of now) importing individual functions from individual modules. 
* Make location name optional for is_location_valid function to support cases when the directory is location.
* Use pathlib in interface (return value) and for better suffix/extension checks.
Vaclav Petras 3 anni fa
parent
commit
94bcea4216

+ 1 - 2
.github/workflows/test_thorough.bat

@@ -1,6 +1,5 @@
 set grass=%1
 set python=C:\OSGeo4W64\bin\python3
 
-call %grass% --tmp-location XY --exec g.extension g.download.location
-call %grass% --tmp-location XY --exec g.download.location url=https://grass.osgeo.org/sampledata/north_carolina/nc_spm_full_v2alpha2.tar.gz dbase=%USERPROFILE%
+call %grass% --tmp-location XY --exec g.download.location url=https://grass.osgeo.org/sampledata/north_carolina/nc_spm_full_v2alpha2.tar.gz path=%USERPROFILE%
 call %grass% --tmp-location XY --exec %python% -m grass.gunittest.main --grassdata %USERPROFILE% --location nc_spm_full_v2alpha2 --location-type nc --min-success 60

+ 1 - 3
.github/workflows/test_thorough.sh

@@ -4,9 +4,7 @@
 set -e
 
 grass --tmp-location XY --exec \
-    g.extension g.download.location
-grass --tmp-location XY --exec \
-    g.download.location url=https://grass.osgeo.org/sampledata/north_carolina/nc_spm_full_v2alpha2.tar.gz dbase=$HOME
+    g.download.location url=https://grass.osgeo.org/sampledata/north_carolina/nc_spm_full_v2alpha2.tar.gz path=$HOME
 
 grass --tmp-location XY --exec \
     python3 -m grass.gunittest.main \

+ 9 - 169
gui/wxpython/startup/locdownload.py

@@ -20,26 +20,16 @@ from __future__ import print_function
 
 import os
 import sys
-import tempfile
 import shutil
 import textwrap
 import time
 
-try:
-    from urllib2 import HTTPError, URLError
-    from urllib import request, urlretrieve
-except ImportError:
-    # there is also HTTPException, perhaps change to list
-    from urllib.error import HTTPError, URLError
-    from urllib.request import urlretrieve
-    from urllib import request
-
 import wx
 from wx.lib.newevent import NewEvent
 
-from grass.script import debug
-from grass.script.utils import try_rmdir
-
+from grass.script.utils import try_rmdir, legalize_vector_name
+from grass.utils.download import download_and_extract, name_from_url, DownloadError
+from grass.grassdb.checks import is_location_valid
 from grass.script.setup import set_gui_path
 
 set_gui_path()
@@ -94,12 +84,6 @@ LOCATIONS = [
 ]
 
 
-class DownloadError(Exception):
-    """Error happened during download or when processing the file"""
-
-    pass
-
-
 class RedirectText(object):
     def __init__(self, window):
         self.out = window
@@ -155,82 +139,6 @@ class RedirectText(object):
         )
 
 
-# copy from g.extension, potentially move to library
-def move_extracted_files(extract_dir, target_dir, files):
-    """Fix state of extracted file by moving them to different diretcory
-
-    When extracting, it is not clear what will be the root directory
-    or if there will be one at all. So this function moves the files to
-    a different directory in the way that if there was one directory extracted,
-    the contained files are moved.
-    """
-    debug("move_extracted_files({0})".format(locals()))
-    if len(files) == 1:
-        shutil.copytree(os.path.join(extract_dir, files[0]), target_dir)
-    else:
-        if not os.path.exists(target_dir):
-            os.mkdir(target_dir)
-        for file_name in files:
-            actual_file = os.path.join(extract_dir, file_name)
-            if os.path.isdir(actual_file):
-                # copy_tree() from distutils failed to create
-                # directories before copying files time to time
-                # (when copying to recently deleted directory)
-                shutil.copytree(actual_file, os.path.join(target_dir, file_name))
-            else:
-                shutil.copy(actual_file, os.path.join(target_dir, file_name))
-
-
-# copy from g.extension, potentially move to library
-def extract_zip(name, directory, tmpdir):
-    """Extract a ZIP file into a directory"""
-    debug(
-        "extract_zip(name={name}, directory={directory},"
-        " tmpdir={tmpdir})".format(name=name, directory=directory, tmpdir=tmpdir),
-        3,
-    )
-    try:
-        import zipfile
-
-        zip_file = zipfile.ZipFile(name, mode="r")
-        file_list = zip_file.namelist()
-        # we suppose we can write to parent of the given dir
-        # (supposing a tmp dir)
-        extract_dir = os.path.join(tmpdir, "extract_dir")
-        os.mkdir(extract_dir)
-        for subfile in file_list:
-            # this should be safe in Python 2.7.4
-            zip_file.extract(subfile, extract_dir)
-        files = os.listdir(extract_dir)
-        move_extracted_files(extract_dir=extract_dir, target_dir=directory, files=files)
-    except zipfile.BadZipfile as error:
-        raise DownloadError(_("ZIP file is unreadable: {0}").format(error))
-
-
-# copy from g.extension, potentially move to library
-def extract_tar(name, directory, tmpdir):
-    """Extract a TAR or a similar file into a directory"""
-    debug(
-        "extract_tar(name={name}, directory={directory},"
-        " tmpdir={tmpdir})".format(name=name, directory=directory, tmpdir=tmpdir),
-        3,
-    )
-    try:
-        import tarfile  # we don't need it anywhere else
-
-        tar = tarfile.open(name)
-        extract_dir = os.path.join(tmpdir, "extract_dir")
-        os.mkdir(extract_dir)
-        tar.extractall(path=extract_dir)
-        files = os.listdir(extract_dir)
-        move_extracted_files(extract_dir=extract_dir, target_dir=directory, files=files)
-    except tarfile.TarError as error:
-        raise DownloadError(_("Archive file is unreadable: {0}").format(error))
-
-
-extract_tar.supported_formats = ["tar.gz", "gz", "bz2", "tar", "gzip", "targz"]
-
-
 # based on https://blog.shichao.io/2012/10/04/progress_speed_indicator_for_urlretrieve_in_python.html
 def reporthook(count, block_size, total_size):
     global start_time
@@ -259,64 +167,6 @@ def reporthook(count, block_size, total_size):
     )
 
 
-# based on g.extension, potentially move to library
-def download_and_extract(source):
-    """Download a file (archive) from URL and uncompress it"""
-    tmpdir = tempfile.mkdtemp()
-    Debug.msg(1, "Tmpdir: {}".format(tmpdir))
-    directory = os.path.join(tmpdir, "location")
-    http_error_message = _("Download file from <{url}>, " "return status code {code}, ")
-    url_error_message = _(
-        "Download file from <{url}>, " "failed. Check internet connection."
-    )
-    if source.endswith(".zip"):
-        archive_name = os.path.join(tmpdir, "location.zip")
-        try:
-            filename, headers = urlretrieve(source, archive_name, reporthook)
-        except HTTPError as err:
-            raise DownloadError(
-                http_error_message.format(
-                    url=source,
-                    code=err,
-                ),
-            )
-        except URLError:
-            raise DownloadError(url_error_message.format(url=source))
-        if headers.get("content-type", "") != "application/zip":
-            raise DownloadError(
-                _(
-                    "Download of <{url}> failed" " or file <{name}> is not a ZIP file"
-                ).format(url=source, name=filename)
-            )
-        extract_zip(name=archive_name, directory=directory, tmpdir=tmpdir)
-    elif (
-        source.endswith(".tar.gz")
-        or source.rsplit(".", 1)[1] in extract_tar.supported_formats
-    ):
-        if source.endswith(".tar.gz"):
-            ext = "tar.gz"
-        else:
-            ext = source.rsplit(".", 1)[1]
-        archive_name = os.path.join(tmpdir, "location." + ext)
-        try:
-            urlretrieve(source, archive_name, reporthook)
-        except HTTPError as err:
-            raise DownloadError(
-                http_error_message.format(
-                    url=source,
-                    code=err,
-                ),
-            )
-        except URLError:
-            raise DownloadError(url_error_message.format(url=source))
-        extract_tar(name=archive_name, directory=directory, tmpdir=tmpdir)
-    else:
-        # probably programmer error
-        raise DownloadError(_("Unknown format '{0}'.").format(source))
-    assert os.path.isdir(directory)
-    return directory
-
-
 def download_location(url, name, database):
     """Wrapper to return DownloadError by value
 
@@ -325,7 +175,7 @@ def download_location(url, name, database):
     try:
         # TODO: the unpacking could go right to the path (but less
         # robust) or replace copytree here with move
-        directory = download_and_extract(source=url)
+        directory = download_and_extract(source=url, reporthook=reporthook)
         destination = os.path.join(database, name)
         if not is_location_valid(directory):
             return _("Downloaded location is not valid")
@@ -336,23 +186,9 @@ def download_location(url, name, database):
     return None
 
 
-# based on grass.py (to be moved to future "grass.init")
-def is_location_valid(location):
-    """Return True if GRASS Location is valid
-
-    :param location: path of a Location
-    """
-    # DEFAULT_WIND file should not be required until you do something
-    # that actually uses them. The check is just a heuristic; a directory
-    # containing a PERMANENT/DEFAULT_WIND file is probably a GRASS
-    # location, while a directory lacking it probably isn't.
-    # TODO: perhaps we can relax this and require only permanent
-    return os.access(os.path.join(location, "PERMANENT", "DEFAULT_WIND"), os.F_OK)
-
-
 def location_name_from_url(url):
     """Create location name from URL"""
-    return url.rsplit("/", 1)[1].split(".", 1)[0].replace("-", "_").replace(" ", "_")
+    return legalize_vector_name(name_from_url(url))
 
 
 DownloadDoneEvent, EVT_DOWNLOAD_DONE = NewEvent()
@@ -508,6 +344,10 @@ class LocationDownloadPanel(wx.Panel):
             self._change_download_btn_label()
 
         def terminate_download_callback(event):
+            # Clean up after urllib urlretrieve which is used internally
+            # in grass.utils.
+            from urllib import request  # pylint: disable=import-outside-toplevel
+
             self._download_in_progress = False
             request.urlcleanup()
             sys.stdout.write("Download aborted")

+ 15 - 1
python/grass/Makefile

@@ -5,7 +5,21 @@ include $(MODULE_TOPDIR)/include/Make/Python.make
 
 PYDIR = $(ETC)/python/grass
 
-SUBDIRS = app benchmark exceptions script ctypes grassdb temporal pygrass pydispatch imaging gunittest bandref jupyter
+SUBDIRS = \
+	app \
+	bandref \
+	benchmark \
+	ctypes \
+	exceptions \
+	grassdb \
+	gunittest \
+	imaging \
+	jupyter \
+	pydispatch \
+	pygrass \
+	script \
+	temporal \
+	utils
 
 default: $(PYDIR)/__init__.py
 	$(MAKE) subdirs

+ 6 - 6
python/grass/grassdb/checks.py

@@ -49,19 +49,19 @@ def is_mapset_valid(mapset_path):
     return os.access(os.path.join(mapset_path, "WIND"), os.R_OK)
 
 
-def is_location_valid(database, location):
+def is_location_valid(path, location=None):
     """Return True if GRASS Location is valid
 
-    :param database: Path to GRASS GIS database directory
-    :param location: name of a Location
+    :param database: Path to a Location or to a GRASS GIS database directory
+    :param location: name of a Location if not part of *path*
     """
     # DEFAULT_WIND file should not be required until you do something
     # that actually uses them. The check is just a heuristic; a directory
     # containing a PERMANENT/DEFAULT_WIND file is probably a GRASS
     # location, while a directory lacking it probably isn't.
-    return os.access(
-        os.path.join(database, location, "PERMANENT", "DEFAULT_WIND"), os.F_OK
-    )
+    if location:
+        path = os.path.join(location)
+    return os.access(os.path.join(path, "PERMANENT", "DEFAULT_WIND"), os.F_OK)
 
 
 def is_mapset_current(database, location, mapset):

+ 20 - 0
python/grass/utils/Makefile

@@ -0,0 +1,20 @@
+MODULE_TOPDIR = ../../..
+
+include $(MODULE_TOPDIR)/include/Make/Other.make
+include $(MODULE_TOPDIR)/include/Make/Python.make
+
+DSTDIR = $(ETC)/python/grass/utils
+
+MODULES = \
+	download
+
+PYFILES := $(patsubst %,$(DSTDIR)/%.py,$(MODULES) __init__)
+PYCFILES := $(patsubst %,$(DSTDIR)/%.pyc,$(MODULES) __init__)
+
+default: $(PYFILES) $(PYCFILES)
+
+$(DSTDIR):
+	$(MKDIR) $@
+
+$(DSTDIR)/%: % | $(DSTDIR)
+	$(INSTALL_DATA) $< $@

+ 0 - 0
python/grass/utils/__init__.py


+ 194 - 0
python/grass/utils/download.py

@@ -0,0 +1,194 @@
+# MODULE:    grass.utils
+#
+# AUTHOR(S): Vaclav Petras <wenzeslaus gmail com>
+#
+# PURPOSE:   Collection of various helper general (non-GRASS) utilities
+#
+# COPYRIGHT: (C) 2021 Vaclav Petras, and by the GRASS Development Team
+#
+#            This program is free software under the GNU General Public
+#            License (>=v2). Read the file COPYING that comes with GRASS
+#            for details.
+
+"""Download and extract various archives"""
+
+import os
+import shutil
+import tarfile
+import tempfile
+import zipfile
+from pathlib import Path
+from urllib.error import HTTPError, URLError
+from urllib.parse import urlparse
+from urllib.request import urlretrieve
+
+
+def debug(*args, **kwargs):
+    """Print a debug message (to be used in this module only)
+
+    Using the stanard grass.script debug function is nice, but it may create a circular
+    dependency if this is used from grass.script, so this is a wrapper which lazy
+    imports the standard function.
+    """
+    # Lazy import to avoding potential circular dependency.
+    import grass.script as gs  # pylint: disable=import-outside-toplevel
+
+    gs.debug(*args, **kwargs)
+
+
+class DownloadError(Exception):
+    """Error happened during download or when processing the file"""
+
+
+# modified copy from g.extension
+# TODO: Possibly migrate to shutil.unpack_archive.
+def extract_tar(name, directory, tmpdir):
+    """Extract a TAR or a similar file into a directory"""
+    debug(
+        f"extract_tar(name={name}, directory={directory}, tmpdir={tmpdir})",
+        3,
+    )
+    try:
+        tar = tarfile.open(name)
+        extract_dir = os.path.join(tmpdir, "extract_dir")
+        os.mkdir(extract_dir)
+        tar.extractall(path=extract_dir)
+        files = os.listdir(extract_dir)
+        _move_extracted_files(
+            extract_dir=extract_dir, target_dir=directory, files=files
+        )
+    except tarfile.TarError as error:
+        raise DownloadError(
+            _("Archive file is unreadable: {0}").format(error)
+        ) from error
+    except EOFError as error:
+        raise DownloadError(
+            _("Archive file is incomplete: {0}").format(error)
+        ) from error
+
+
+extract_tar.supported_formats = ["tar.gz", "gz", "bz2", "tar", "gzip", "targz", "xz"]
+
+
+# modified copy from g.extension
+# TODO: Possibly migrate to shutil.unpack_archive.
+def extract_zip(name, directory, tmpdir):
+    """Extract a ZIP file into a directory"""
+    debug(
+        f"extract_zip(name={name}, directory={directory}, tmpdir={tmpdir})",
+        3,
+    )
+    try:
+        zip_file = zipfile.ZipFile(name, mode="r")
+        file_list = zip_file.namelist()
+        # we suppose we can write to parent of the given dir
+        # (supposing a tmp dir)
+        extract_dir = os.path.join(tmpdir, "extract_dir")
+        os.mkdir(extract_dir)
+        for subfile in file_list:
+            # this should be safe in Python 2.7.4
+            zip_file.extract(subfile, extract_dir)
+        files = os.listdir(extract_dir)
+        _move_extracted_files(
+            extract_dir=extract_dir, target_dir=directory, files=files
+        )
+    except zipfile.BadZipfile as error:
+        raise DownloadError(_("ZIP file is unreadable: {0}").format(error))
+
+
+# modified copy from g.extension
+def _move_extracted_files(extract_dir, target_dir, files):
+    """Fix state of extracted file by moving them to different directory
+
+    When extracting, it is not clear what will be the root directory
+    or if there will be one at all. So this function moves the files to
+    a different directory in the way that if there was one directory extracted,
+    the contained files are moved.
+    """
+    debug("_move_extracted_files({})".format(locals()))
+    if len(files) == 1:
+        actual_path = os.path.join(extract_dir, files[0])
+        if os.path.isdir(actual_path):
+            shutil.copytree(actual_path, target_dir)
+        else:
+            shutil.copy(actual_path, target_dir)
+    else:
+        if not os.path.exists(target_dir):
+            os.mkdir(target_dir)
+        for file_name in files:
+            actual_file = os.path.join(extract_dir, file_name)
+            if os.path.isdir(actual_file):
+                # Choice of copy tree function:
+                # shutil.copytree() fails when subdirectory exists.
+                # However, distutils.copy_tree() may fail to create directories before
+                # copying files into them when copying to a recently deleted directory.
+                shutil.copytree(actual_file, os.path.join(target_dir, file_name))
+            else:
+                shutil.copy(actual_file, os.path.join(target_dir, file_name))
+
+
+# modified copy from g.extension
+# TODO: remove the hardcoded location/extension, use general name
+def download_and_extract(source, reporthook=None):
+    """Download a file (archive) from URL and extract it
+
+    Call urllib.request.urlcleanup() to clean up after urlretrieve if you terminate
+    this function from another thread.
+    """
+    source_path = Path(urlparse(source).path)
+    tmpdir = tempfile.mkdtemp()
+    debug("Tmpdir: {}".format(tmpdir))
+    directory = Path(tmpdir) / "extracted"
+    http_error_message = _("Download file from <{url}>, return status code {code}, ")
+    url_error_message = _(
+        "Download file from <{url}>, failed. Check internet connection."
+    )
+    if source_path.suffix and source_path.suffix == ".zip":
+        archive_name = os.path.join(tmpdir, "archive.zip")
+        try:
+            filename, headers = urlretrieve(source, archive_name, reporthook)
+        except HTTPError as err:
+            raise DownloadError(
+                http_error_message.format(
+                    url=source,
+                    code=err,
+                ),
+            )
+        except URLError:
+            raise DownloadError(url_error_message.format(url=source))
+        if headers.get("content-type", "") != "application/zip":
+            raise DownloadError(
+                _(
+                    "Download of <{url}> failed or file <{name}> is not a ZIP file"
+                ).format(url=source, name=filename)
+            )
+        extract_zip(name=archive_name, directory=directory, tmpdir=tmpdir)
+    elif source_path.suffix and source_path.suffix[1:] in extract_tar.supported_formats:
+        ext = "".join(source_path.suffixes)
+        archive_name = os.path.join(tmpdir, "archive" + ext)
+        try:
+            urlretrieve(source, archive_name, reporthook)
+        except HTTPError as err:
+            raise DownloadError(
+                http_error_message.format(
+                    url=source,
+                    code=err,
+                ),
+            )
+        except URLError:
+            raise DownloadError(url_error_message.format(url=source))
+        extract_tar(name=archive_name, directory=directory, tmpdir=tmpdir)
+    else:
+        # probably programmer error
+        raise DownloadError(_("Unknown format '{}'.").format(source))
+    return directory
+
+
+def name_from_url(url):
+    """Extract name from URL"""
+    name = os.path.basename(urlparse(url).path)
+    name = os.path.splitext(name)[0]
+    if name.endswith(".tar"):
+        # Special treatment of .tar.gz extension.
+        return os.path.splitext(name)[0]
+    return name

+ 1 - 0
scripts/Makefile

@@ -19,6 +19,7 @@ SUBDIRS = \
 	db.test \
 	db.univar \
 	g.bands \
+	g.download.location \
 	g.extension \
 	g.extension.all \
 	g.manual \

+ 7 - 0
scripts/g.download.location/Makefile

@@ -0,0 +1,7 @@
+MODULE_TOPDIR = ../..
+
+PGM = g.download.location
+
+include $(MODULE_TOPDIR)/include/Make/Script.make
+
+default: script

+ 30 - 0
scripts/g.download.location/g.download.location.html

@@ -0,0 +1,30 @@
+<h2>DESCRIPTION</h2>
+
+<em>g.download.location</em> downloads an archived (e.g.,
+<code>.zip</code> or <code>.tar.gz</code>) location from a given URL
+and unpacks it to a specified or current GRASS GIS Spatial Database.
+URL can be also a local file on the disk.
+
+If the archive contains a directory which contains a location, the module
+will recognize that and use the location automatically.
+First directory which is a location is used.
+Other locations or any other files are ignored.
+
+<h2>SEE ALSO</h2>
+
+<em>
+  <a href="https://grass.osgeo.org/grass-stable/manuals/g.mapset.html">g.mapset</a>,
+  <a href="https://grass.osgeo.org/grass-stable/manuals/g.mapsets.html">g.mapsets</a>,
+  <a href="https://grass.osgeo.org/grass-stable/manuals/r.proj.html">r.proj</a>,
+  <a href="https://grass.osgeo.org/grass-stable/manuals/v.proj.html">v.proj</a>,
+  <a href="g.proj.all.html">g.proj.all</a>
+</em>
+
+<h2>AUTHORS</h2>
+
+Vaclav Petras, <a href="http://geospatial.ncsu.edu/osgeorel/">NCSU GeoForAll Lab</a>
+
+<!--
+<p>
+<i>Last changed: $Date$</i>
+-->

+ 151 - 0
scripts/g.download.location/g.download.location.py

@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+############################################################################
+#
+# MODULE:    g.download.location
+# AUTHOR(S): Vaclav Petras <wenzeslaus gmail com>
+# PURPOSE:   Download and extract location from web
+# COPYRIGHT: (C) 2017 by the GRASS Development Team
+#
+#    This program is free software under the GNU General
+#    Public License (>=v2). Read the file COPYING that
+#    comes with GRASS for details.
+#
+#############################################################################
+
+"""Download GRASS Locations"""
+
+# %module
+# % label: Download GRASS Location from the web
+# % description: Get GRASS Location from an URL or file path
+# % keyword: general
+# % keyword: data
+# % keyword: download
+# % keyword: import
+# %end
+# %option
+# % key: url
+# % multiple: no
+# % type: string
+# % label: URL of the archive with a location to be downloaded
+# % description: URL of ZIP, TAR.GZ, or other similar archive
+# % required: yes
+# %end
+# %option G_OPT_M_LOCATION
+# % key: name
+# % required: no
+# % multiple: no
+# % key_desc: name
+# %end
+# %option G_OPT_M_DBASE
+# % key: path
+# % required: no
+# % multiple: no
+# %end
+
+import atexit
+import os
+import shutil
+from pathlib import Path
+
+import grass.script as gs
+from grass.grassdb.checks import is_location_valid
+from grass.script.utils import try_rmdir
+from grass.utils.download import DownloadError, download_and_extract, name_from_url
+
+
+def find_location_in_directory(path, recurse=0):
+    """Return path to location in one of the subdirectories or None
+
+    The first location found is returned. The expected usage is looking for one
+    location somewhere nested in subdirectories.
+
+    By default only the immediate subdirectories of the provided directory are
+    tested, but with ``recurse >= 1`` additional levels of subdirectories
+    are tested for being locations.
+
+    Directory names are sorted to provide a stable result.
+
+    :param path: Path to the directory to search
+    :param recurse: How many additional levels of subdirectories to explore
+    """
+    assert recurse >= 0
+    full_paths = [os.path.join(path, i) for i in os.listdir(path)]
+    candidates = sorted([i for i in full_paths if os.path.isdir(i)])
+    for candidate in candidates:
+        if is_location_valid(candidate):
+            return candidate
+    if recurse:
+        for candidate in candidates:
+            result = find_location_in_directory(candidate, recurse - 1)
+            if result:
+                return result
+    return None
+
+
+def location_name_from_url(url):
+    """Create location name from URL"""
+    return gs.legalize_vector_name(name_from_url(url))
+
+
+def main(options, unused_flags):
+    """Download and copy location to destination"""
+    url = options["url"]
+    name = options["name"]
+    database = options["path"]
+
+    if not database:
+        # Use the current database path.
+        database = gs.gisenv()["GISDBASE"]
+    if not name:
+        name = location_name_from_url(url)
+    destination = Path(database) / name
+
+    if destination.exists():
+        gs.fatal(
+            _("Location named <{}> already exists, download canceled").format(name)
+        )
+
+    gs.message(_("Downloading and extracting..."))
+    try:
+        directory = download_and_extract(url)
+        if not directory.is_dir():
+            gs.fatal(_("Archive contains only one file and no mapset directories"))
+        atexit.register(lambda: try_rmdir(directory))
+    except DownloadError as error:
+        gs.fatal(_("Unable to get the location: {error}").format(error=error))
+    if not is_location_valid(directory):
+        gs.verbose(_("Searching for valid location..."))
+        # This in fact deal with location being on the third level of directories
+        # thanks to how the extraction functions work (leaving out one level).
+        result = find_location_in_directory(directory, recurse=1)
+        if result:
+            # We just want to show relative path in the message.
+            # The relative path misses the root directory (name), because we
+            # loose it on the way. (We should use parent directory to get the
+            # full relative path, but the directory name is diffrent now.
+            # This is the consequence of how the extract functions work.)
+            relative = os.path.relpath(result, start=directory)
+            gs.verbose(
+                _("Location found in a nested directory '{directory}'").format(
+                    directory=relative
+                )
+            )
+            directory = result
+        else:
+            # The list is similarly misleading as the relative path above
+            # as it misses the root directory, but it still should be useful.
+            files_and_dirs = os.listdir(directory)
+            gs.fatal(
+                _(
+                    "The dowloaded file is not a valid GRASS Location."
+                    " The extracted file contains these files and directories:"
+                    "\n{files_and_dirs}"
+                ).format(files_and_dirs=" ".join(files_and_dirs))
+            )
+    gs.verbose(_("Copying to final destination..."))
+    shutil.copytree(src=directory, dst=destination)
+    gs.message(_("Path to the location now <{path}>").format(path=destination))
+
+
+if __name__ == "__main__":
+    main(*gs.parser())