Skip to content

Commit

Permalink
Add function to import hotspot dataset (#1386)
Browse files Browse the repository at this point in the history
Co-authored-by: Wei Ji <[email protected]>
Co-authored-by: Meghan Jones <[email protected]>
  • Loading branch information
3 people authored Aug 26, 2021
1 parent 0881d0f commit 8a6db70
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 0 deletions.
1 change: 1 addition & 0 deletions doc/api/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ and store them in the GMT cache folder.
datasets.load_sample_bathymetry
datasets.load_usgs_quakes
datasets.load_fractures_compilation
datasets.load_hotspots

.. automodule:: pygmt.exceptions

Expand Down
1 change: 1 addition & 0 deletions pygmt/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from pygmt.datasets.earth_relief import load_earth_relief
from pygmt.datasets.samples import (
load_fractures_compilation,
load_hotspots,
load_japan_quakes,
load_ocean_ridge_points,
load_sample_bathymetry,
Expand Down
26 changes: 26 additions & 0 deletions pygmt/datasets/samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,3 +123,29 @@ def load_fractures_compilation():
fname = which("@fractures_06.txt", download="c")
data = pd.read_csv(fname, header=None, sep=r"\s+", names=["azimuth", "length"])
return data[["length", "azimuth"]]


def load_hotspots():
"""
Load a table with the locations, names, and suggested symbol sizes of
hotspots.
This is the ``@hotspots.txt`` dataset used in the GMT tutorials, with data
from Mueller, Royer, and Lawver, 1993, Geology, vol. 21, pp. 275-278. The
main 5 hotspots used by Doubrovine et al. [2012] have symbol sizes twice
the size of all other hotspots.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table with columns "longitude", "latitude", "symbol_size", and
"placename".
"""
fname = which("@hotspots.txt", download="c")
columns = ["longitude", "latitude", "symbol_size", "place_name"]
data = pd.read_table(filepath_or_buffer=fname, sep="\t", skiprows=3, names=columns)
return data
1 change: 1 addition & 0 deletions pygmt/helpers/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ def download_test_data():
"@N00W090.earth_relief_03m_p.nc",
# Other cache files
"@fractures_06.txt",
"@hotspots.txt",
"@ridge.txt",
"@srtm_tiles.nc", # needed for 03s and 01s relief data
"@Table_5_11.txt",
Expand Down
17 changes: 17 additions & 0 deletions pygmt/tests/test_datasets_samples.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""
Test basic functionality for loading sample datasets.
"""
import pandas as pd
from pygmt.datasets import (
load_fractures_compilation,
load_hotspots,
load_japan_quakes,
load_ocean_ridge_points,
load_sample_bathymetry,
Expand Down Expand Up @@ -72,3 +74,18 @@ def test_fractures_compilation():
assert summary.loc["max", "length"] == 984.652
assert summary.loc["min", "azimuth"] == 0.0
assert summary.loc["max", "azimuth"] == 360.0


def test_hotspots():
"""
Check that the @hotspots.txt dataset loads without errors.
"""
data = load_hotspots()
assert data.shape == (55, 4)
assert data.columns.values.tolist() == [
"longitude",
"latitude",
"symbol_size",
"place_name",
]
assert isinstance(data, pd.DataFrame)

0 comments on commit 8a6db70

Please sign in to comment.