Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TXTExport refactoring #883

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 22 additions & 8 deletions festim/exports/txt_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,15 @@ class TXTExport(festim.Export):
field (str): the exported field ("solute", "1", "retention",
"T"...)
filename (str): the filename (must end with .txt).
write_at_last (bool): if True, the data will be exported at
the last export time. Otherwise, the data will be exported
at each export time. Defaults to False.
times (list, optional): if provided, the field will be
exported at these timesteps. Otherwise exports at all
timesteps. Defaults to None.
filter (bool): if True and the field is projected to a DG function space,
the duplicated vertices in the output file array are filtered except those near interfaces.
Defaults to True.
write_at_last (bool): if True, the data will be exported at
the last export time. Otherwise, the data will be exported
at each export time. Defaults to False.
header_format (str, optional): the format of column headers.
Defautls to ".2e".

Expand All @@ -29,17 +32,28 @@ class TXTExport(festim.Export):
header (str): the header of the exported file.
V (fenics.FunctionSpace): the vector-function space for the exported field.

.. note::
The exported field is projected to DG if conservation of chemical potential is considered or
``traps_element_type`` is "DG".

"""

def __init__(
self, field, filename, times=None, write_at_last=False, header_format=".2e"
self,
field,
filename,
times=None,
filter=True,
write_at_last=False,
header_format=".2e",
) -> None:
super().__init__(field=field)
if times:
self.times = sorted(times)
else:
self.times = times
self.filename = filename
self.filter = filter
self.write_at_last = write_at_last
self.header_format = header_format

Expand Down Expand Up @@ -116,7 +130,7 @@ def initialise(self, mesh, project_to_DG=False, materials=None):
The array is then used to obtain indices of sorted elements for the data export.

.. note::
If DG1 is used, the duplicated vertices in the array are filtered except those near interfaces,
If DG1 is used and filter flag is True, the duplicated vertices in the array are filtered except those near interfaces,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
If DG1 is used and filter flag is True, the duplicated vertices in the array are filtered except those near interfaces,
If DG1 is used and ``filter`` flag is True, the duplicated vertices in the array are filtered except those near interfaces,

The interfaces are defined by ``material.borders`` in the ``Materials`` list.

Args:
Expand All @@ -134,9 +148,9 @@ def initialise(self, mesh, project_to_DG=False, materials=None):
x = f.interpolate(f.Expression("x[0]", degree=1), self.V)
x_column = np.transpose([x.vector()[:]])

# if project_to_DG is True, get indices of duplicates near interfaces
# if filter is True, get indices of duplicates near interfaces
# and indices of the first elements from a pair of duplicates otherwise
if project_to_DG:
if project_to_DG and self.filter:
# Collect all borders
borders = []
for material in materials:
Expand Down Expand Up @@ -165,7 +179,7 @@ def initialise(self, mesh, project_to_DG=False, materials=None):
self._unique_indices = np.array(unique_indices)

else:
# Get list of unique indices
# Get list of sorted indices
self._unique_indices = np.argsort(x_column, axis=0)[:, 0]

self.data = x_column[self._unique_indices]
Expand Down
18 changes: 15 additions & 3 deletions test/unit/test_exports/test_txt_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,33 +111,45 @@ def test_sorted_by_x(self, my_export, function, mesh):
assert (np.diff(my_export.data[:, 0]) >= 0).all()

@pytest.mark.parametrize(
"materials,project_to_DG,export_len",
"materials,project_to_DG,filter,export_len",
[
(None, False, 11),
(None, False, False, 11),
(
[
Material(id=1, D_0=1, E_D=0, S_0=1, E_S=0, borders=[0, 0.5]),
Material(id=2, D_0=2, E_D=0, S_0=2, E_S=0, borders=[0.5, 1]),
],
True,
True,
12, # + 1 duplicate near the interface
),
(
[
Material(id=1, D_0=1, E_D=0, S_0=1, E_S=0, borders=[0, 0.5]),
Material(id=2, D_0=2, E_D=0, S_0=2, E_S=0, borders=[0.5, 1]),
],
True,
False,
20, # 2 * (len_vertices - 1)
),
],
)
def test_duplicates(
self, materials, project_to_DG, export_len, my_export, function, mesh
self, materials, project_to_DG, filter, export_len, my_export, function, mesh
):
"""
Checks that the exported data does not contain duplicates
except those near interfaces
"""
current_time = 1
my_export.function = function
my_export.filter = filter
my_export.initialise(mesh, project_to_DG, materials)
my_export.write(
current_time=current_time,
final_time=None,
)
print(my_export._unique_indices)
assert len(my_export.data) == export_len
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instead of a print you can do the following:

Suggested change
print(my_export._unique_indices)
assert len(my_export.data) == export_len
assert len(my_export.data) == export_len, f"Wrong export length. Unique indices: {my_export._unique_indices}"

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Didn't know about such a functionality, but I forgot to delete this print-statement



Expand Down
Loading