Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New plotting functions for 3D graph visualisations. #56

Open
wants to merge 8 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
161 changes: 161 additions & 0 deletions notebooks/dynamic_vis.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Visualize any Graph\n",
"\n",
"Load and visualize a hierarchical/normal graph generated by using anemoi-graph HierarchicalGraphCreator or GraphCreator."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load the Graph"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from anemoi.graphs.plotting.interactive.graph_3d import plot_downscale, plot_upscale, plot_level"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Specify path to graph\n",
"path = 'path_to_graph.pt'\n",
"num_hidden = 1\n",
"\n",
"# Load graph and separate each sub-graph\n",
"hetero_data = torch.load(path, weights_only=False) \n",
"data_nodes = hetero_data['data'].x\n",
"hidden_nodes = []\n",
"hidden_edges = []\n",
"downscale_edges = []\n",
"upscale_edges = []\n",
"\n",
"if num_hidden > 1:\n",
"\n",
" data_to_hidden_edges = hetero_data[('data', 'to', 'hidden_1')].edge_index\n",
"\n",
" for i in range(1, num_hidden):\n",
" hidden_nodes.append(hetero_data[f'hidden_{i}'].x)\n",
" hidden_edges.append(hetero_data[(f'hidden_{i}', 'to', f'hidden_{i}')].edge_index)\n",
" downscale_edges.append(hetero_data[(f'hidden_{i}', 'to', f'hidden_{i+1}')].edge_index)\n",
" upscale_edges.append(hetero_data[(f'hidden_{num_hidden+1-i}', 'to', f'hidden_{num_hidden-i}')].edge_index)\n",
"\n",
" # Add hidden-most layer\n",
" hidden_nodes.append(hetero_data[f'hidden_{num_hidden}'].x) \n",
" hidden_edges.append(hetero_data[(f'hidden_{num_hidden}', 'to', f'hidden_{num_hidden}')].edge_index)\n",
" # Add symbolic graphs for last layers of downscaling and upscaling -> they do not have edges\n",
" downscale_edges.append(hetero_data[(f'hidden_{num_hidden}', 'to', f'hidden_{i}')].edge_index)\n",
" upscale_edges.append(hetero_data[('hidden_1', 'to', 'data')].edge_index)\n",
"\n",
" hidden_to_data_edges = hetero_data[('hidden_1', 'to', 'data')].edge_index\n",
"\n",
"else:\n",
" try:\n",
" data_to_hidden_edges = hetero_data[('data', 'to', 'hidden_1')].edge_index\n",
" hidden_nodes.append(hetero_data['hidden_1'].x)\n",
" hidden_edges.append(hetero_data[('hidden_1', 'to', 'hidden_1')].edge_index)\n",
" downscale_edges.append(hetero_data[('data', 'to', 'hidden_1')].edge_index)\n",
" upscale_edges.append(hetero_data[('hidden_1', 'to', 'data')].edge_index)\n",
" hidden_to_data_edges = hetero_data[('hidden_1', 'to', 'data')].edge_index\n",
" \n",
" except Exception:\n",
" data_to_hidden_edges = hetero_data[('data', 'to', 'hidden')].edge_index\n",
" hidden_nodes.append(hetero_data['hidden'].x)\n",
" hidden_edges.append(hetero_data[('hidden', 'to', 'hidden')].edge_index)\n",
" downscale_edges.append(hetero_data[('data', 'to', 'hidden')].edge_index)\n",
" upscale_edges.append(hetero_data[('hidden', 'to', 'data')].edge_index)\n",
" hidden_to_data_edges = hetero_data[('hidden', 'to', 'data')].edge_index\n",
"\n",
"print(f'Lat Lon grid has: {len(data_nodes)} points.')\n",
"for i in range(num_hidden):\n",
" print(f'Hidden layer {i+1} has: {len(hidden_nodes[i])} points')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Encoder + Downscaling"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig = plot_downscale(data_nodes, hidden_nodes, data_to_hidden_edges, downscale_edges, title='Downscaling', color='red', num_hidden=num_hidden, x_range=[0, 0,4], y_range=[0, 0.4], z_range=[0, 0.4])\n",
"fig.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Upscaling + Decoder\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig = plot_upscale(data_nodes, hidden_nodes, hidden_to_data_edges, upscale_edges, title='Upscaling', color='blue', num_hidden=num_hidden,x_range=[0, 0,4], y_range=[0, 0.4], z_range=[0, 0.4])\n",
"fig.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Same Level Processing"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig = plot_level(data_nodes, hidden_nodes, data_to_hidden_edges, hidden_edges, title='Level Processing', color='green', num_hidden=num_hidden, x_range=[0, 0,4], y_range=[0, 0.4], z_range=[0, 0.4])\n",
"fig.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "anemoi",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ dependencies = [

optional-dependencies.all = [ ]
optional-dependencies.dev = [ "anemoi-graphs[docs,tests]" ]

optional-dependencies.docs = [
"nbsphinx",
"pandoc",
Expand All @@ -68,6 +67,7 @@ optional-dependencies.docs = [
"tomli",
]

optional-dependencies.notebooks = [ "nbformat>=5.10.4" ]
optional-dependencies.tests = [ "pytest", "pytest-mock" ]

urls.Documentation = "https://anemoi-graphs.readthedocs.io/"
Expand Down
104 changes: 101 additions & 3 deletions src/anemoi/graphs/inspect.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,12 @@
from anemoi.graphs.plotting.displots import plot_distribution_edge_attributes
from anemoi.graphs.plotting.displots import plot_distribution_node_attributes
from anemoi.graphs.plotting.displots import plot_distribution_node_derived_attributes
from anemoi.graphs.plotting.interactive_html import plot_interactive_nodes
from anemoi.graphs.plotting.interactive_html import plot_interactive_subgraph
from anemoi.graphs.plotting.interactive_html import plot_isolated_nodes
from anemoi.graphs.plotting.interactive.edges import plot_interactive_subgraph
from anemoi.graphs.plotting.interactive.graph_3d import plot_downscale
from anemoi.graphs.plotting.interactive.graph_3d import plot_level
from anemoi.graphs.plotting.interactive.graph_3d import plot_upscale
from anemoi.graphs.plotting.interactive.nodes import plot_interactive_nodes
from anemoi.graphs.plotting.interactive.nodes import plot_isolated_nodes

LOGGER = logging.getLogger(__name__)

Expand All @@ -34,13 +37,17 @@ def __init__(
output_path: Path,
show_attribute_distributions: Optional[bool] = True,
show_nodes: Optional[bool] = False,
show_3d_graph: Optional[bool] = False,
num_hidden_layers: Optional[int] = 1,
**kwargs,
):
self.path = path
self.graph = torch.load(self.path)
self.output_path = output_path
self.show_attribute_distributions = show_attribute_distributions
self.show_nodes = show_nodes
self.show_3d_graph = show_3d_graph
self.num_hidden_layers = num_hidden_layers

if isinstance(self.output_path, str):
self.output_path = Path(self.output_path)
Expand Down Expand Up @@ -70,3 +77,94 @@ def inspect(self):
LOGGER.info("Saving interactive plots of nodes ...")
for nodes_name in self.graph.node_types:
plot_interactive_nodes(self.graph, nodes_name, out_file=self.output_path / f"{nodes_name}_nodes.html")

if self.show_3d_graph:

data_nodes = self.graph["data"].x
hidden_nodes = []
hidden_edges = []
downscale_edges = []
upscale_edges = []

if self.num_hidden_layers > 1:

data_to_hidden_edges = self.graph[("data", "to", "hidden_1")].edge_index

for i in range(1, self.num_hidden_layers):
hidden_nodes.append(self.graph[f"hidden_{i}"].x)
hidden_edges.append(self.graph[(f"hidden_{i}", "to", f"hidden_{i}")].edge_index)
downscale_edges.append(self.graph[(f"hidden_{i}", "to", f"hidden_{i+1}")].edge_index)
upscale_edges.append(
self.graph[
(f"hidden_{self.num_hidden_layers+1-i}", "to", f"hidden_{self.num_hidden_layers-i}")
].edge_index
)

# Add hidden-most layer
hidden_nodes.append(self.graph[f"hidden_{self.num_hidden_layers}"].x)
hidden_edges.append(
self.graph[
(f"hidden_{self.num_hidden_layers}", "to", f"hidden_{self.num_hidden_layers}")
].edge_index
)
# Add symbolic graphs for last layers of downscaling and upscaling -> they do not have edges
downscale_edges.append(self.graph[(f"hidden_{self.num_hidden_layers}", "to", f"hidden_{i}")].edge_index)
upscale_edges.append(self.graph[("hidden_1", "to", "data")].edge_index)

hidden_to_data_edges = self.graph[("hidden_1", "to", "data")].edge_index

else:
data_to_hidden_edges = self.graph[("data", "to", "hidden")].edge_index
hidden_nodes.append(self.graph["hidden"].x)
hidden_edges.append(self.graph[("hidden", "to", "hidden")].edge_index)
downscale_edges.append(self.graph[("data", "to", "hidden")].edge_index)
upscale_edges.append(self.graph[("hidden", "to", "data")].edge_index)
hidden_to_data_edges = self.graph[("hidden", "to", "data")].edge_index

# Encoder
ofile = self.output_path / "encoder.html"
encoder_fig = plot_downscale(
data_nodes,
hidden_nodes,
data_to_hidden_edges,
downscale_edges,
title="Downscaling",
color="red",
num_hidden=self.num_hidden_layers,
x_range=[0, 0.4],
y_range=[0, 0.4],
z_range=[0, 0.4],
)
encoder_fig.write_html(ofile)

# Processor
ofile = self.output_path / "processor.html"
level_fig = plot_level(
data_nodes,
hidden_nodes,
data_to_hidden_edges,
hidden_edges,
title="Level Processing",
color="green",
num_hidden=self.num_hidden_layers,
x_range=[0, 0.4],
y_range=[0, 0.4],
z_range=[0, 0.4],
)
level_fig.write_html(ofile)

# Decoder
ofile = self.output_path / "dencoder.html"
decoder_fig = plot_upscale(
data_nodes,
hidden_nodes,
hidden_to_data_edges,
upscale_edges,
title="Upscaling",
color="blue",
num_hidden=self.num_hidden_layers,
x_range=[0, 0.4],
y_range=[0, 0.4],
z_range=[0, 0.4],
)
decoder_fig.write_html(ofile)
59 changes: 51 additions & 8 deletions src/anemoi/graphs/nodes/attributes.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,32 +124,75 @@ def __init__(
self.radius = radius
self.centre = centre

# def get_raw_values(self, nodes: NodeStorage, **kwargs) -> np.ndarray:
# """Compute the area associated to each node.

# It uses Voronoi diagrams to compute the area of each node.

# Parameters
# ----------
# nodes : NodeStorage
# Nodes of the graph.
# kwargs : dict
# Additional keyword arguments.

# Returns
# -------
# np.ndarray
# Attributes.
# """
# latitudes, longitudes = nodes.x[:, 0], nodes.x[:, 1]
# points = latlon_rad_to_cartesian((np.asarray(latitudes), np.asarray(longitudes)))
# sv = SphericalVoronoi(points, self.radius, self.centre)
# area_weights = sv.calculate_areas()

# LOGGER.debug(
# "There are %d of weights, which (unscaled) add up a total weight of %.2f.",
# len(area_weights),
# np.array(area_weights).sum(),
# )

# return area_weights

def get_raw_values(self, nodes: NodeStorage, **kwargs) -> np.ndarray:
"""Compute the area associated to each node.

It uses Voronoi diagrams to compute the area of each node.
Uses Voronoi diagrams to compute the area of each node on the sphere.

Parameters
----------
nodes : NodeStorage
Nodes of the graph.
Nodes of the graph. Assumes `nodes.x` is an array with latitude and longitude in radians.
kwargs : dict
Additional keyword arguments.

Returns
-------
np.ndarray
Attributes.
Array of area weights for each node.
"""
latitudes, longitudes = nodes.x[:, 0], nodes.x[:, 1]
points = latlon_rad_to_cartesian((np.asarray(latitudes), np.asarray(longitudes)))
# Convert latitudes and longitudes to ensure consistent types
latitudes = np.asarray(nodes.x[:, 0], dtype=np.float64)
longitudes = np.asarray(nodes.x[:, 1], dtype=np.float64)

# Convert to Cartesian coordinates
points = latlon_rad_to_cartesian((latitudes, longitudes))

# Instantiate SphericalVoronoi with consistent data types
sv = SphericalVoronoi(points, self.radius, self.centre)
area_weights = sv.calculate_areas()

# Calculate areas and handle possible dtype issues
try:
area_weights = sv.calculate_areas()
except ValueError as e:
LOGGER.error("Error in calculating Voronoi areas: %s", e)
raise

LOGGER.debug(
"There are %d of weights, which (unscaled) add up a total weight of %.2f.",
"There are %d weights, which (unscaled) add up to a total weight of %.2f.",
len(area_weights),
np.array(area_weights).sum(),
)

return area_weights


Expand Down
Loading