diff --git a/.github/workflows/push_pull.yml b/.github/workflows/push_pull.yml
index 9044cd67cce..1a2eb4e39fd 100644
--- a/.github/workflows/push_pull.yml
+++ b/.github/workflows/push_pull.yml
@@ -35,7 +35,7 @@ jobs:
debian:
runs-on: ubuntu-latest
container:
- image: ghcr.io/espressomd/docker/debian:fbdf2f2f9d76b761c0aa1308f17fb17e38501850-base-layer
+ image: ghcr.io/espressomd/docker/debian:339903979196fd7e72127f2cb5bfb27759d129f9-base-layer
credentials:
username: ${{ github.actor }}
password: ${{ secrets.github_token }}
@@ -74,7 +74,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository == 'espressomd/espresso' }}
container:
- image: ghcr.io/espressomd/docker/ubuntu-wo-dependencies:fbdf2f2f9d76b761c0aa1308f17fb17e38501850-base-layer
+ image: ghcr.io/espressomd/docker/ubuntu-wo-dependencies:339903979196fd7e72127f2cb5bfb27759d129f9-base-layer
credentials:
username: ${{ github.actor }}
password: ${{ secrets.github_token }}
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index baf65493f2c..e80fe191cf5 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,4 +1,4 @@
-image: ghcr.io/espressomd/docker/ubuntu-22.04:fbdf2f2f9d76b761c0aa1308f17fb17e38501850
+image: ghcr.io/espressomd/docker/ubuntu-22.04:339903979196fd7e72127f2cb5bfb27759d129f9
stages:
- prepare
@@ -144,7 +144,7 @@ no_rotation:
fedora:36:
<<: *global_job_definition
stage: build
- image: ghcr.io/espressomd/docker/fedora:fbdf2f2f9d76b761c0aa1308f17fb17e38501850
+ image: ghcr.io/espressomd/docker/fedora:339903979196fd7e72127f2cb5bfb27759d129f9
variables:
with_cuda: 'false'
with_gsl: 'false'
@@ -390,6 +390,7 @@ installation:
- bash maintainer/CI/build_cmake.sh
- cd build
- make install
+ - cmake . -D ESPRESSO_BUILD_TESTS=ON
# get path of installed files
- CI_INSTALL_DIR="/tmp/espresso-unit-tests"
- CI_INSTALL_PYTHON_PATH=$(dirname $(find "${CI_INSTALL_DIR}/lib" -name espressomd))
@@ -398,9 +399,9 @@ installation:
- cp -r "src/python/object_in_fluid" "${CI_INSTALL_PYTHON_PATH}/object_in_fluid"
# run all tests with the installed files
- sed -i "s|$(pwd)/pypresso|${CI_INSTALL_DIR}/bin/pypresso|" testsuite/{python,scripts/samples,scripts/tutorials}/CTestTestfile.cmake
- - make -j ${CI_CORES} check_python_skip_long
- - make -j ${CI_CORES} check_samples
- - make -j 2 check_tutorials
+ - make check_python_skip_long
+ - make check_samples
+ - make check_tutorials
tags:
- espresso
- cuda
diff --git a/doc/sphinx/conf.py.in b/doc/sphinx/conf.py.in
index 3f9908c61b3..71371752121 100644
--- a/doc/sphinx/conf.py.in
+++ b/doc/sphinx/conf.py.in
@@ -24,7 +24,7 @@ sys.path.insert(0, '@CMAKE_BINARY_DIR@/src/python')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '2.3'
+needs_sphinx = '3.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
diff --git a/doc/sphinx/installation.rst b/doc/sphinx/installation.rst
index 83b894d1b70..0a5edce42fe 100644
--- a/doc/sphinx/installation.rst
+++ b/doc/sphinx/installation.rst
@@ -175,7 +175,7 @@ To generate the Sphinx documentation, install the following packages:
.. code-block:: bash
- pip3 install --user -c requirements.txt \
+ python3 -m pip install --user -c requirements.txt \
sphinx sphinxcontrib-bibtex sphinx-toggleprompt
To generate the Doxygen documentation, install the following packages:
@@ -197,40 +197,36 @@ To run the samples and tutorials, start by installing the following packages:
The tutorials are written in the
`Notebook Format `__
-:cite:`kluyver16a` version <= 4.4 and can be executed by any of these tools:
+:cite:`kluyver16a` version 4.5 and can be executed by any of these tools:
-* `Jupyter Notebook `__
* `JupyterLab `__
-* `IPython `__ (not recommended)
* `VS Code Jupyter `__
+* `Jupyter Notebook `__
+* `IPython `__ (not recommended)
To check whether one of them is installed, run these commands:
.. code-block:: bash
- jupyter notebook --version
jupyter lab --version
+ jupyter notebook --version
ipython --version
code --version
If you don't have any of these tools installed and aren't sure which one
-to use, we recommend installing the historic Jupyter Notebook, since the
-|es| tutorials have been designed with the ``exercise2`` plugin in mind.
-
-To use Jupyter Notebook, install the following packages:
+to use, we recommend installing JupyterLab:
.. code-block:: bash
- pip3 install --user 'nbformat==5.1.3' 'nbconvert==6.4.5' 'notebook==6.4.8' 'jupyter_contrib_nbextensions==0.5.1'
- jupyter contrib nbextension install --user
- jupyter nbextension enable rubberband/main
- jupyter nbextension enable exercise2/main
+ python3 -m pip install --user -c requirements.txt \
+ nbformat nbconvert jupyterlab
-Alternatively, to use JupyterLab, install the following packages:
+If you prefer the look and feel of Jupyter Classic, install the following:
.. code-block:: bash
- pip3 install --user nbformat notebook jupyterlab
+ python3 -m pip install --user -c requirements.txt \
+ nbformat nbconvert jupyterlab nbclassic
Alternatively, to use VS Code Jupyter, install the following extensions:
@@ -316,7 +312,7 @@ Run the following commands:
doxygen gsl numpy scipy ipython jupyter freeglut
brew install hdf5-mpi
brew link --force cython
- pip install -c requirements.txt PyOpenGL matplotlib
+ python -m pip install -c requirements.txt PyOpenGL matplotlib
.. _Quick installation:
diff --git a/doc/sphinx/running.rst b/doc/sphinx/running.rst
index c95059f9293..d99937f7ff7 100644
--- a/doc/sphinx/running.rst
+++ b/doc/sphinx/running.rst
@@ -6,8 +6,19 @@ Running a simulation
|es| is implemented as a Python module. This means that you need to write a
python script for any task you want to perform with |es|. In this chapter,
the basic structure of the interface will be explained. For a practical
-introduction, see the tutorials, which are also part of the
-distribution.
+introduction, see the tutorials, which are also part of the distribution.
+
+Most users should consider building and then installing |es| locally.
+In this way, |es| behaves like any regular Python package and will
+be recognized by the Python interpreter and Jupyter notebooks.
+
+Most developers prefer the ``pypresso`` resp. ``ipypresso`` wrapper scripts,
+which export the build folder into the ``$PYTHONPATH`` environment variable
+and then call ``python`` resp. ``jupyter``. They also introduce extra command
+line options to help developers run simulations inside a debugger.
+Command line examples in this chapter use the wrapper scripts instead of the
+Python and Jupyter programs, although they are perfectly interchangeable
+when not using a debugger.
.. _Running es:
@@ -71,40 +82,25 @@ in the build folder, do:
make tutorials
-The tutorials contain solutions hidden with the ``exercise2`` NB extension.
-Since this extension is only available for Jupyter Notebook, JupyterLab
-users need to convert the tutorials:
-
-.. code-block:: bash
-
- for f in doc/tutorials/*/*.ipynb; do
- ./pypresso doc/tutorials/convert.py exercise2 --to-jupyterlab ${f}
- done
-
-Likewise, VS Code Jupyter users need to convert the tutorials:
-
-.. code-block:: bash
-
- for f in doc/tutorials/*/*.ipynb; do
- ./pypresso doc/tutorials/convert.py exercise2 --to-vscode-jupyter ${f}
- done
+The tutorials contain solutions hidden inside disclosure boxes.
+Click on "Show solution" to reveal them.
To interact with notebooks, move to the directory containing the tutorials
and call the ``ipypresso`` script to start a local Jupyter session.
-For Jupyter Notebook and IPython users:
+For JupyterLab users:
.. code-block:: bash
cd doc/tutorials
- ../../ipypresso notebook
+ ../../ipypresso lab
-For JupyterLab users:
+For Jupyter Classic users:
.. code-block:: bash
cd doc/tutorials
- ../../ipypresso lab
+ ../../ipypresso nbclassic
For VS Code Jupyter users, no action is needed if ``pypresso`` was set as
the interpreter path (see details in :ref:`Running inside an IDE`).
@@ -129,29 +125,15 @@ will exit the Python interpreter and Jupyter will notify you that the current
Python kernel stopped. If a cell takes too long to execute, you may interrupt
it with the stop button.
-Solutions cells are created using the ``exercise2`` plugin from nbextensions.
-To prevent solution code cells from running when clicking on "Run All", these
-code cells need to be converted to Markdown cells and fenced with `````python``
-and ```````.
+Solutions cells are marked up with the code comment ``# SOLUTION CELL``
+(must be on the first line). In the build folder, these solution cells
+will be automatically converted to Markdown cells.
To close the Jupyter session, go to the terminal where it was started and use
the keyboard shortcut Ctrl+C twice.
-When starting a Jupyter session, you may see the following warning in the
-terminal:
-
-.. code-block:: none
-
- [TerminalIPythonApp] WARNING | Subcommand `ipython notebook` is deprecated and will be removed in future versions.
- [TerminalIPythonApp] WARNING | You likely want to use `jupyter notebook` in the future
-
-This only means |es| was compiled with IPython instead of Jupyter. If Jupyter
-is installed on your system, the notebook will automatically close IPython and
-start Jupyter. To recompile |es| with Jupyter, provide ``cmake`` with the flag
-``-D IPYTHON_EXECUTABLE=$(which jupyter)``.
-
-You can find the official Jupyter documentation at
-https://jupyter.readthedocs.io/en/latest/running.html
+You can find the official JupyterLab documentation at
+https://jupyterlab.readthedocs.io/en/latest/user/interface.html
.. _Running inside an IDE:
diff --git a/doc/tutorials/CMakeLists.txt b/doc/tutorials/CMakeLists.txt
index fc0ee979ba6..18798d65b63 100644
--- a/doc/tutorials/CMakeLists.txt
+++ b/doc/tutorials/CMakeLists.txt
@@ -62,8 +62,8 @@ function(NB_EXPORT)
"${NB_FILE};${NB_EXPORT_ADD_SCRIPTS};${CMAKE_BINARY_DIR}/doc/tutorials/convert.py;${CMAKE_BINARY_DIR}/testsuite/scripts/importlib_wrapper.py"
COMMAND
${CMAKE_BINARY_DIR}/pypresso
- ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py ci --execute --exercise2
- --remove-empty-cells --input ${NB_FILE} --output ${NB_FILE_RUN}
+ ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py ci --execute
+ --prepare-for-html --input ${NB_FILE} --output ${NB_FILE_RUN}
--substitutions ${NB_EXPORT_VAR_SUBST} --scripts
${NB_EXPORT_ADD_SCRIPTS})
else()
@@ -71,22 +71,14 @@ function(NB_EXPORT)
endif()
add_custom_command(
- OUTPUT ${HTML_FILE}
- DEPENDS ${NB_FILE_RUN};${NB_EXPORT_ADD_SCRIPTS}
- COMMAND
- ${CMAKE_BINARY_DIR}/pypresso ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py
- ci --exercise2 --input ${NB_FILE_RUN} --output ${NB_FILE_RUN}~
+ OUTPUT ${HTML_FILE} DEPENDS ${NB_FILE_RUN};${NB_EXPORT_ADD_SCRIPTS}
COMMAND ${IPYTHON_EXECUTABLE} nbconvert --to "html" --output ${HTML_FILE}
- ${NB_FILE_RUN}~)
+ ${NB_FILE_RUN})
add_custom_command(
- OUTPUT ${PY_FILE}
- DEPENDS ${NB_FILE}
- COMMAND
- ${CMAKE_BINARY_DIR}/pypresso ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py
- ci --exercise2 --input ${NB_FILE} --output ${NB_FILE}~
+ OUTPUT ${PY_FILE} DEPENDS ${NB_FILE}
COMMAND ${IPYTHON_EXECUTABLE} nbconvert --to "python" --output ${PY_FILE}
- ${NB_FILE}~)
+ ${NB_FILE})
add_custom_target("${NB_EXPORT_TARGET}_html" DEPENDS ${HTML_FILE}
${DEPENDENCY_OF_TARGET})
diff --git a/doc/tutorials/Readme.md b/doc/tutorials/Readme.md
index d70dd75b54a..435919a9c17 100644
--- a/doc/tutorials/Readme.md
+++ b/doc/tutorials/Readme.md
@@ -62,7 +62,7 @@ physical systems.
* **Electrodes**
Modelling electrodes and measuring differential capacitance with the ELC method.
[Part 1](electrodes/electrodes_part1.ipynb) |
- Part 2 (work in progress)
+ [Part 2](electrodes/electrodes_part2.ipynb)
* **Constant pH method**
Modelling an acid dissociation curve using the constant pH method.
[Guide](constant_pH/constant_pH.ipynb)
@@ -91,32 +91,12 @@ in the build folder, do:
make tutorials
```
-The tutorials contain solutions hidden with the ``exercise2`` extension.
-Since this extension is only available for Jupyter Notebook, JupyterLab
-users need to convert the tutorials:
-
-```sh
-for f in doc/tutorials/*/*.ipynb; do
- ./pypresso doc/tutorials/convert.py exercise2 --to-jupyterlab ${f};
-done
-```
-
All tutorials can be viewed with their solutions
[online](https://espressomd.github.io/doc/tutorials.html).
### Running the tutorials interactively
-To view the tutorials, first change to the tutorials directory and then run
-the `ipypresso` script from the directory into which espresso was compiled.
-
-For Jupyter Notebook and IPython users:
-
-```sh
-cd doc/tutorials
-../../ipypresso notebook
-```
-
-For JupyterLab users:
+To view the tutorials in the build folder, run the following commands:
```sh
cd doc/tutorials
diff --git a/doc/tutorials/active_matter/active_matter.ipynb b/doc/tutorials/active_matter/active_matter.ipynb
index 3bdab0431dd..19c6874dffb 100644
--- a/doc/tutorials/active_matter/active_matter.ipynb
+++ b/doc/tutorials/active_matter/active_matter.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "dc98c779",
"metadata": {},
"source": [
"# Active Matter\n",
@@ -16,6 +17,7 @@
},
{
"cell_type": "markdown",
+ "id": "3b481094",
"metadata": {},
"source": [
"## Introduction\n",
@@ -33,6 +35,7 @@
},
{
"cell_type": "markdown",
+ "id": "8c074852",
"metadata": {},
"source": [
"## Active particles\n",
@@ -51,6 +54,7 @@
},
{
"cell_type": "markdown",
+ "id": "c45be7aa",
"metadata": {},
"source": [
"### Active Particles in **ESPResSo**\n",
@@ -64,6 +68,7 @@
},
{
"cell_type": "markdown",
+ "id": "500be443",
"metadata": {},
"source": [
"### Self-Propulsion without Hydrodynamics\n",
@@ -96,6 +101,7 @@
},
{
"cell_type": "markdown",
+ "id": "0a292e51",
"metadata": {},
"source": [
"## Enhanced Diffusion"
@@ -103,6 +109,7 @@
},
{
"cell_type": "markdown",
+ "id": "562026a7",
"metadata": {},
"source": [
"First we import the necessary modules, define the parameters and set up the system."
@@ -111,6 +118,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "3bc0e542",
"metadata": {},
"outputs": [],
"source": [
@@ -130,6 +138,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "730bdd81",
"metadata": {},
"outputs": [],
"source": [
@@ -149,6 +158,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d327f144",
"metadata": {},
"outputs": [],
"source": [
@@ -159,38 +169,38 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "252969af",
+ "metadata": {},
"source": [
"### Exercise\n",
"* Use ``ED_PARAMS`` to set up a [Langevin thermostat](https://espressomd.github.io/doc/espressomd.html#espressomd.thermostat.Thermostat.set_langevin) for translation and rotation of the particles."
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f1af72f6",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"system.thermostat.set_langevin(kT=ED_PARAMS['kT'],\n",
" gamma=ED_PARAMS['gamma'],\n",
" gamma_rotation=ED_PARAMS['gamma_rotation'],\n",
- " seed=42)\n",
- "```"
+ " seed=42)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "0d6301c7",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "19f20627",
"metadata": {},
"source": [
"The configuration for the Langevin-based swimming is exposed as an attribute of\n",
@@ -201,10 +211,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "a2e9e5c2",
+ "metadata": {},
"source": [
"### Exercise\n",
"* Set up one active and one passive particle, call them `part_act` and `part_pass` (Hint: see [the docs](https://espressomd.github.io/doc/espressomd.html#espressomd.particle_data.ParticleHandle.swimming))\n",
@@ -212,28 +220,30 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "37de96eb",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"part_act = system.part.add(pos=[5.0, 5.0, 5.0], swimming={'f_swim': ED_PARAMS['f_swim']},\n",
" mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])\n",
"part_pass = system.part.add(pos=[5.0, 5.0, 5.0],\n",
- " mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])\n",
- "```"
+ " mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "cdca84dc",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "8ed28c6e",
"metadata": {},
"source": [
"Next we set up three **ESPResSo** correlators for the Mean Square Displacement (MSD), Velocity Autocorrelation Function (VACF) and the Angular Velocity Autocorrelation Function (AVACF)."
@@ -242,6 +252,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "6062679b",
"metadata": {},
"outputs": [],
"source": [
@@ -275,6 +286,7 @@
},
{
"cell_type": "markdown",
+ "id": "8206c8f4",
"metadata": {},
"source": [
"No more setup needed! We can run the simulation and plot our observables."
@@ -283,6 +295,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "70a7eeab",
"metadata": {},
"outputs": [],
"source": [
@@ -293,6 +306,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "618f3b2a",
"metadata": {},
"outputs": [],
"source": [
@@ -307,6 +321,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d7c754fe",
"metadata": {},
"outputs": [],
"source": [
@@ -324,6 +339,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "564d3480",
"metadata": {},
"outputs": [],
"source": [
@@ -340,6 +356,7 @@
},
{
"cell_type": "markdown",
+ "id": "f1391566",
"metadata": {},
"source": [
"The Mean Square Displacement of an active particle is characterized by a longer ballistic regime and an increased diffusion coefficient for longer lag times. In the overdamped limit it is given by\n",
@@ -366,6 +383,7 @@
},
{
"cell_type": "markdown",
+ "id": "c53697fa",
"metadata": {},
"source": [
"From the autocorrelation functions of the velocity and the angular velocity we can see that the activity does not influence the rotational diffusion. Yet the directed motion for $t<\\tau_{R}$ leads to an enhanced correlation of the velocity."
@@ -374,6 +392,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8042bfc0",
"metadata": {},
"outputs": [],
"source": [
@@ -389,6 +408,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "10a16494",
"metadata": {},
"outputs": [],
"source": [
@@ -406,6 +426,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ff586ddc",
"metadata": {},
"outputs": [],
"source": [
@@ -422,6 +443,7 @@
},
{
"cell_type": "markdown",
+ "id": "c14bcde3",
"metadata": {},
"source": [
"Before we go to the second part, it is important to clear the state of the system."
@@ -430,6 +452,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "23a99a8e",
"metadata": {},
"outputs": [],
"source": [
@@ -444,6 +467,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "a183da57",
"metadata": {},
"outputs": [],
"source": [
@@ -452,6 +476,7 @@
},
{
"cell_type": "markdown",
+ "id": "b916c470",
"metadata": {},
"source": [
"## Rectification"
@@ -459,6 +484,7 @@
},
{
"cell_type": "markdown",
+ "id": "70bf06fe",
"metadata": {},
"source": [
"In the second part of this tutorial you will consider the ‘rectifying’ properties of certain\n",
@@ -470,6 +496,7 @@
},
{
"cell_type": "markdown",
+ "id": "201b03ee",
"metadata": {},
"source": [
"The geometry we will use is a cylindrical system with a funnel dividing\n",
@@ -487,6 +514,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "a9905037",
"metadata": {},
"outputs": [],
"source": [
@@ -497,6 +525,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "7e171115",
"metadata": {},
"outputs": [],
"source": [
@@ -533,6 +562,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "2d969668",
"metadata": {},
"outputs": [],
"source": [
@@ -552,6 +582,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "f7a66e7c",
"metadata": {},
"outputs": [],
"source": [
@@ -561,22 +592,21 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "97d502e4",
+ "metadata": {},
"source": [
"### Exercise \n",
"* Using `funnel_length` and the geometric parameters in `RECT_PARAMS`, set up the funnel cone (Hint: Use a [Conical Frustum](https://espressomd.github.io/doc/espressomd.html#espressomd.shapes.HollowConicalFrustum))"
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "267e2442",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"ctp = espressomd.math.CylindricalTransformationParameters(\n",
" axis=[1, 0, 0], center=box_l/2.)\n",
"\n",
@@ -586,53 +616,50 @@
" thickness=RECT_PARAMS['funnel_thickness'],\n",
" length=funnel_length,\n",
" direction=1)\n",
- "system.constraints.add(shape=hollow_cone, particle_type=TYPES['boundaries'])\n",
- "```"
+ "system.constraints.add(shape=hollow_cone, particle_type=TYPES['boundaries'])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "8d1cbf01",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "a64435b1",
+ "metadata": {},
"source": [
"### Exercise\n",
"* Set up a WCA potential between the walls and the particles using the parameters in `RECT_PARAMS`"
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c05ca1bf",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"system.non_bonded_inter[TYPES['particles'], TYPES['boundaries']].wca.set_params(\n",
- " epsilon=RECT_PARAMS['wca_epsilon'], sigma=RECT_PARAMS['wca_sigma'])\n",
- "```"
+ " epsilon=RECT_PARAMS['wca_epsilon'], sigma=RECT_PARAMS['wca_sigma'])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "2d7fe980",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "ccff0b28",
+ "metadata": {},
"source": [
"### Exercise\n",
"* Place an equal number of swimming particles (the total number should be `RECT_PARAMS['n_particles']`) in the left and the right part of the box such that the center of mass is exactly in the middle. (Hint: Particles do not interact so you can put multiple in the same position)\n",
@@ -640,12 +667,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "63f12677",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"for i in range(RECT_PARAMS['n_particles']):\n",
" pos = box_l / 2\n",
" pos[0] += (-1)**i * 0.25 * RECT_PARAMS['length']\n",
@@ -658,13 +686,13 @@
" np.cos(theta)]\n",
"\n",
" system.part.add(pos=pos, swimming={'f_swim': RECT_PARAMS['f_swim']},\n",
- " director=director, rotation=3*[True])\n",
- "```"
+ " director=director, rotation=3*[True])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "31b62469",
"metadata": {},
"outputs": [],
"source": []
@@ -672,6 +700,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "a0aa1f61",
"metadata": {},
"outputs": [],
"source": [
@@ -681,10 +710,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "09889174",
+ "metadata": {},
"source": [
"### Exercise\n",
"* Run the simulation using ``RECT_N_SAMPLES`` and ``RECT_STEPS_PER_SAMPLE`` and calculate the deviation of the center of mass from the center of the box in each sample step. (Hint: [Center of mass](https://espressomd.github.io/doc/espressomd.html#espressomd.galilei.GalileiTransform.system_CMS))\n",
@@ -692,22 +719,23 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d58e8dc7",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"for _ in tqdm.tqdm(range(RECT_N_SAMPLES)):\n",
" system.integrator.run(RECT_STEPS_PER_SAMPLE)\n",
" com_deviations.append(system.galilei.system_CMS()[0] - 0.5 * box_l[0])\n",
- " times.append(system.time)\n",
- "```"
+ " times.append(system.time)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "b34b2f32",
"metadata": {},
"outputs": [],
"source": []
@@ -715,6 +743,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "dd3fac45",
"metadata": {},
"outputs": [],
"source": [
@@ -725,6 +754,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "f4f035db",
"metadata": {},
"outputs": [],
"source": [
@@ -740,6 +770,7 @@
},
{
"cell_type": "markdown",
+ "id": "6cd432e4",
"metadata": {},
"source": [
"Even though the potential energy inside the geometry is 0 in every part of the accessible region, the active particles are clearly not Boltzmann distributed (homogenous density). Instead, they get funneled into the right half, showing the inapplicability of equilibrium statistical mechanics."
@@ -747,6 +778,7 @@
},
{
"cell_type": "markdown",
+ "id": "2dc63352",
"metadata": {},
"source": [
"## Hydrodynamics of self-propelled particles"
@@ -754,6 +786,7 @@
},
{
"cell_type": "markdown",
+ "id": "3f5981a2",
"metadata": {},
"source": [
"In this final part of the tutorial we simulate and visualize the flow field around a self-propelled swimmer."
@@ -761,6 +794,7 @@
},
{
"cell_type": "markdown",
+ "id": "3c3b49ce",
"metadata": {},
"source": [
"Of particular importance for self-propulsion at low Reynolds number is the fact\n",
@@ -796,6 +830,7 @@
},
{
"cell_type": "markdown",
+ "id": "d260c5ba",
"metadata": {},
"source": [
"In situations where hydrodynamic interactions between swimmers or swimmers and\n",
@@ -834,6 +869,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "da731448",
"metadata": {},
"outputs": [],
"source": [
@@ -848,6 +884,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "65695350",
"metadata": {},
"outputs": [],
"source": [
@@ -857,6 +894,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d9bcd33d",
"metadata": {},
"outputs": [],
"source": [
@@ -883,34 +921,33 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "44ab55b3",
+ "metadata": {},
"source": [
"### Exercise\n",
"* Using `HYDRO_PARAMS`, set up a lattice-Boltzmann fluid and activate it as a thermostat (Hint: [lattice-Boltzmann](https://espressomd.github.io/doc/lb.html#lattice-boltzmann))"
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e87b87a8",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"lbf = espressomd.lb.LBFluidWalberla(agrid=HYDRO_PARAMS['agrid'],\n",
" density=HYDRO_PARAMS['dens'],\n",
" kinematic_viscosity=HYDRO_PARAMS['visc'],\n",
" tau=HYDRO_PARAMS['time_step'])\n",
"system.lb = lbf\n",
- "system.thermostat.set_lb(LB_fluid=lbf, gamma=HYDRO_PARAMS['gamma'], seed=42)\n",
- "```"
+ "system.thermostat.set_lb(LB_fluid=lbf, gamma=HYDRO_PARAMS['gamma'], seed=42)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "d7ba1609",
"metadata": {},
"outputs": [],
"source": []
@@ -918,6 +955,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "0510840f",
"metadata": {},
"outputs": [],
"source": [
@@ -928,10 +966,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "61f859e5",
+ "metadata": {},
"source": [
"### Exercise\n",
"* Using `HYDRO_PARAMS`, place particle at `pos` that swims in `z`-direction. The particle handle should be called `particle`.\n",
@@ -939,25 +975,26 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "706410a5",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"director = np.array([0,0,1])\n",
"particle = system.part.add(\n",
" pos=pos, \n",
" director=director,\n",
" mass=HYDRO_PARAMS['mass'], rotation=3*[False],\n",
" swimming={'f_swim': HYDRO_PARAMS['f_swim']})\n",
- "espressomd.swimmer_helpers.add_dipole_particle(system, particle, HYDRO_PARAMS['dipole_length'], HYDRO_PARAMS['dipole_particle_type'])\n",
- "```"
+ "espressomd.swimmer_helpers.add_dipole_particle(system, particle, HYDRO_PARAMS['dipole_length'], HYDRO_PARAMS['dipole_particle_type'])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "96ad5f34",
"metadata": {},
"outputs": [],
"source": []
@@ -965,6 +1002,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "34849708",
"metadata": {},
"outputs": [],
"source": [
@@ -974,6 +1012,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ed68163e",
"metadata": {},
"outputs": [],
"source": [
@@ -999,6 +1038,7 @@
},
{
"cell_type": "markdown",
+ "id": "28d64810",
"metadata": {},
"source": [
"We can also export the particle and fluid data to ``.vtk`` format to display the results with a visualization software like ParaView."
@@ -1007,6 +1047,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "06e9f100",
"metadata": {},
"outputs": [],
"source": [
@@ -1029,6 +1070,7 @@
},
{
"cell_type": "markdown",
+ "id": "55f51115",
"metadata": {},
"source": [
"## Further reading\n",
@@ -1101,5 +1143,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/charged_system/charged_system.ipynb b/doc/tutorials/charged_system/charged_system.ipynb
index 780855e6189..032d0ec7e73 100644
--- a/doc/tutorials/charged_system/charged_system.ipynb
+++ b/doc/tutorials/charged_system/charged_system.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "b3844439",
"metadata": {},
"source": [
"# A Charged System: Counterion Condensation\n",
@@ -17,6 +18,7 @@
},
{
"cell_type": "markdown",
+ "id": "c7a006eb",
"metadata": {},
"source": [
"## Introduction\n",
@@ -30,6 +32,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "05ee17a4",
"metadata": {},
"outputs": [],
"source": [
@@ -53,6 +56,7 @@
},
{
"cell_type": "markdown",
+ "id": "4b5777fe",
"metadata": {},
"source": [
"# System setup\n",
@@ -63,6 +67,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "158ac15b",
"metadata": {},
"outputs": [],
"source": [
@@ -81,6 +86,7 @@
},
{
"cell_type": "markdown",
+ "id": "4f46ba70",
"metadata": {},
"source": [
"We will build the charged rod from individual particles that are fixed in space. With this, we can use the particle-based electrostatics methods of **ESPResSo**. For analysis, we give the rod particles a different type than the counterions."
@@ -89,6 +95,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "6f6fc8b9",
"metadata": {},
"outputs": [],
"source": [
@@ -103,10 +110,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "ae520dd7",
+ "metadata": {},
"source": [
"**Exercise:**\n",
"\n",
@@ -118,35 +123,34 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0b77ac12",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"# ion-ion interaction\n",
"system.non_bonded_inter[COUNTERION_TYPE, COUNTERION_TYPE].wca.set_params(\n",
" epsilon=WCA_EPSILON, sigma=ION_DIAMETER)\n",
"\n",
"# ion-rod interaction\n",
"system.non_bonded_inter[COUNTERION_TYPE, ROD_TYPE].wca.set_params(\n",
- " epsilon=WCA_EPSILON, sigma=ION_DIAMETER / 2. + ROD_RADIUS)\n",
- "```"
+ " epsilon=WCA_EPSILON, sigma=ION_DIAMETER / 2. + ROD_RADIUS)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "7260152b",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "ec702d20",
+ "metadata": {},
"source": [
"Now we need to place the particles in the box\n",
"\n",
@@ -166,12 +170,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b1899c20",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def setup_rod_and_counterions(system, ion_valency, counterion_type,\n",
" rod_charge_dens, N_rod_beads, rod_type):\n",
"\n",
@@ -197,13 +202,13 @@
" counter_ions = system.part.add(pos=ion_positions, type=[\n",
" counterion_type] * N_ions, q=[-ion_valency] * N_ions)\n",
"\n",
- " return counter_ions\n",
- "```"
+ " return counter_ions"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "3af0776e",
"metadata": {},
"outputs": [],
"source": []
@@ -211,6 +216,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "4600cd7e",
"metadata": {},
"outputs": [],
"source": [
@@ -230,6 +236,7 @@
},
{
"cell_type": "markdown",
+ "id": "ae2fdc12",
"metadata": {},
"source": [
"Now we set up the electrostatics method to calculate the forces and energies from the long-range Coulomb interaction.\n",
@@ -245,6 +252,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "57ee902f",
"metadata": {},
"outputs": [],
"source": [
@@ -254,6 +262,7 @@
},
{
"cell_type": "markdown",
+ "id": "bbe4b687",
"metadata": {},
"source": [
"For the accuracy, **ESPResSo** estimates the relative error in the force calculation introduced by the approximations of $P^3M$.\n",
@@ -263,36 +272,36 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "3b387784",
+ "metadata": {},
"source": [
"**Exercise:**\n",
"* Set up a ``p3m`` instance and add it to the ``electrostatics`` slot of the system"
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "78d1c835",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"p3m = espressomd.electrostatics.P3M(**p3m_params)\n",
- "system.electrostatics.solver = p3m\n",
- "```"
+ "system.electrostatics.solver = p3m"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "b04cdad5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "742af77e",
"metadata": {},
"source": [
"Before we can start the simulation, we need to remove the overlap between particles to avoid large forces which would crash the simulation.\n",
@@ -302,6 +311,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "6ed2ce87",
"metadata": {},
"outputs": [],
"source": [
@@ -340,6 +350,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "299f819f",
"metadata": {},
"outputs": [],
"source": [
@@ -355,6 +366,7 @@
},
{
"cell_type": "markdown",
+ "id": "add6aeef",
"metadata": {},
"source": [
"After the overlap is removed, we activate a thermostat to simulate the system at a given temperature."
@@ -363,6 +375,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "58b97cdf",
"metadata": {},
"outputs": [],
"source": [
@@ -374,6 +387,7 @@
},
{
"cell_type": "markdown",
+ "id": "d190ecda",
"metadata": {},
"source": [
"## First run and observable setup\n",
@@ -384,6 +398,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "cefbbc86",
"metadata": {},
"outputs": [],
"source": [
@@ -398,6 +413,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8c119bd2",
"metadata": {},
"outputs": [],
"source": [
@@ -413,6 +429,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "32945c22",
"metadata": {},
"outputs": [],
"source": [
@@ -422,10 +439,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "43906d11",
+ "metadata": {},
"source": [
"Now we are ready to implement the observable calculation. As we are interested in the condensation of counterions on the rod, the physical quantity of interest is the density of charges $\\rho(r)$ around the rod, where $r$ is the distance from the rod. We need many samples to calculate the density from histograms.\n",
"\n",
@@ -444,12 +459,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "81590f8c",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def setup_profile_calculation(system, delta_N, ion_types, r_min, n_radial_bins):\n",
" radial_profile_accumulators = {}\n",
" ctp = espressomd.math.CylindricalTransformationParameters(center = np.array(system.box_l) / 2.,\n",
@@ -474,13 +490,13 @@
"\n",
" radial_profile_accumulators[ion_type] = radial_profile_acc\n",
"\n",
- " return radial_profile_accumulators, bin_edges\n",
- "```"
+ " return radial_profile_accumulators, bin_edges"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "167532ae",
"metadata": {},
"outputs": [],
"source": []
@@ -488,6 +504,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "bb69fe23",
"metadata": {},
"outputs": [],
"source": [
@@ -503,10 +520,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "9b3ba2e8",
+ "metadata": {},
"source": [
"To run the simulation with different parameters, we need a way to reset the system and return it to an empty state before setting it up again.\n",
"\n",
@@ -528,24 +543,25 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9615f768",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def clear_system(system):\n",
" system.thermostat.turn_off()\n",
" system.part.clear()\n",
" system.electrostatics.clear()\n",
" system.auto_update_accumulators.clear()\n",
- " system.time = 0.\n",
- "```"
+ " system.time = 0."
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "d850bed8",
"metadata": {},
"outputs": [],
"source": []
@@ -553,6 +569,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "a65bcd23",
"metadata": {},
"outputs": [],
"source": [
@@ -561,6 +578,7 @@
},
{
"cell_type": "markdown",
+ "id": "459fd605",
"metadata": {},
"source": [
"## Production run and analysis\n",
@@ -570,6 +588,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "9049f459",
"metadata": {},
"outputs": [],
"source": [
@@ -583,6 +602,7 @@
},
{
"cell_type": "markdown",
+ "id": "be2b297f",
"metadata": {},
"source": [
"For longer simulation runs it will be convenient to have a progress bar"
@@ -591,6 +611,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "4b873263",
"metadata": {},
"outputs": [],
"source": [
@@ -601,10 +622,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "86d353c7",
+ "metadata": {},
"source": [
"**Exercise:**\n",
"* Run the simulation for the parameters given above and save the histograms in the corresponding dictionary for analysis\n",
@@ -616,12 +635,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c7575e93",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"for run in runs:\n",
" clear_system(system)\n",
" setup_rod_and_counterions(\n",
@@ -638,23 +658,21 @@
" integrate_system(system, N_SAMPLES * STEPS_PER_SAMPLE)\n",
"\n",
" run['histogram'] = radial_profile_accs[COUNTERION_TYPE].mean()\n",
- " print(f'simulation for parameters {run[\"params\"]} done\\n')\n",
- "```"
+ " print(f'simulation for parameters {run[\"params\"]} done\\n')"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "9a2891d9",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "5930693d",
+ "metadata": {},
"source": [
"**Question**\n",
"* Why does the second simulation take much longer than the first one?"
@@ -662,15 +680,15 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "id": "a877bbd9",
+ "metadata": {},
"source": [
"The rod charge density is doubled, so the total charge of the counterions needs to be doubled, too. Since their valency is only half of the one in the first run, there will be four times more counterions in the second run."
]
},
{
"cell_type": "markdown",
+ "id": "05e52489",
"metadata": {},
"source": [
"We plot the density of counterions around the rod as the normalized integrated radial counterion charge distribution function $P(r)$, meaning the integrated probability to find an amount of charge within the radius $r$. We express the rod charge density $\\lambda$ in terms of the dimensionless Manning parameter $\\xi = \\lambda l_B / e$ where $l_B$ is the Bjerrum length and $e$ the elementary charge"
@@ -679,6 +697,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "dbae04f1",
"metadata": {},
"outputs": [],
"source": [
@@ -706,6 +725,7 @@
},
{
"cell_type": "markdown",
+ "id": "a235b93c",
"metadata": {},
"source": [
"In the semilogarithmic plot we see an inflection point of the cumulative charge distribution which is the indicator for ion condensation. To compare to the meanfield approach of PB, we calculate the solution of the analytical expressions given in [10.1021/ma990897o](https://doi.org/10.1021/ma990897o)"
@@ -714,6 +734,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "468cec09",
"metadata": {},
"outputs": [],
"source": [
@@ -734,6 +755,7 @@
},
{
"cell_type": "markdown",
+ "id": "83934b8c",
"metadata": {},
"source": [
"For multivalent counterions, the manning parameter $\\xi$ has to be multiplied by the valency $\\nu$. The result depends only on the product of ``rod_charge_dens`` and ``ion_valency``, so we only need one curve"
@@ -742,6 +764,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "06a2c566",
"metadata": {},
"outputs": [],
"source": [
@@ -764,6 +787,7 @@
},
{
"cell_type": "markdown",
+ "id": "09f03daa",
"metadata": {},
"source": [
"We see that overall agreement is quite good, but the deviations from the PB solution get stronger the more charged the ions are.\n",
@@ -772,6 +796,7 @@
},
{
"cell_type": "markdown",
+ "id": "d5deb375",
"metadata": {},
"source": [
"## Overcharging by added salt\n",
@@ -782,6 +807,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "0ee33efc",
"metadata": {},
"outputs": [],
"source": [
@@ -804,6 +830,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "5fdbdcee",
"metadata": {},
"outputs": [],
"source": [
@@ -828,6 +855,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8c327569",
"metadata": {},
"outputs": [],
"source": [
@@ -843,6 +871,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "953f34fd",
"metadata": {},
"outputs": [],
"source": [
@@ -868,6 +897,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "9207d735",
"metadata": {},
"outputs": [],
"source": [
@@ -883,10 +913,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "ea8dc78c",
+ "metadata": {},
"source": [
"**Exercise:**\n",
"* Use the cumulative histograms from the cell above to create the cumulative charge histogram of the total ion charge\n",
@@ -896,24 +924,25 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "535dafec",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"counterion_charge = sum(counterions.q)\n",
"anion_charge = sum(anions.q)\n",
"cation_charge = sum(cations.q)\n",
"charge_hist = counterion_charge * cum_hists[COUNTERION_TYPE] + \\\n",
" anion_charge * cum_hists[ANION_PARAMS['type']] + \\\n",
- " cation_charge * cum_hists[CATION_PARAMS['type']]\n",
- "```"
+ " cation_charge * cum_hists[CATION_PARAMS['type']]"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "88f39052",
"metadata": {},
"outputs": [],
"source": []
@@ -921,6 +950,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "518b7a64",
"metadata": {},
"outputs": [],
"source": [
@@ -935,6 +965,7 @@
},
{
"cell_type": "markdown",
+ "id": "685514f1",
"metadata": {},
"source": [
"You should observe a strong overcharging effect, where ions accumulate close to the rod."
@@ -961,5 +992,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 4
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/constant_pH/constant_pH.ipynb b/doc/tutorials/constant_pH/constant_pH.ipynb
index 965941fee82..f5f0577aec3 100644
--- a/doc/tutorials/constant_pH/constant_pH.ipynb
+++ b/doc/tutorials/constant_pH/constant_pH.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "ac4ec17f",
"metadata": {},
"source": [
"# The constant-pH ensemble method for acid-base reactions"
@@ -9,6 +10,7 @@
},
{
"cell_type": "markdown",
+ "id": "d28d86e7",
"metadata": {},
"source": [
"## Expected prior knowledge\n",
@@ -22,6 +24,7 @@
},
{
"cell_type": "markdown",
+ "id": "1bf00c68",
"metadata": {},
"source": [
"## Introduction\n",
@@ -42,6 +45,7 @@
},
{
"cell_type": "markdown",
+ "id": "160bd8a5",
"metadata": {},
"source": [
"### The chemical equilibrium and reaction constant\n",
@@ -89,6 +93,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "76c9dbf1",
"metadata": {},
"outputs": [],
"source": [
@@ -99,6 +104,7 @@
},
{
"cell_type": "markdown",
+ "id": "7910719e",
"metadata": {},
"source": [
"### The constant pH method\n",
@@ -119,6 +125,7 @@
},
{
"cell_type": "markdown",
+ "id": "9677c09d",
"metadata": {},
"source": [
"## Simulation setup\n",
@@ -129,6 +136,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "6dd0c277",
"metadata": {},
"outputs": [],
"source": [
@@ -153,6 +161,7 @@
},
{
"cell_type": "markdown",
+ "id": "8a744f51",
"metadata": {},
"source": [
"The package [pint](https://pint.readthedocs.io/en/stable/) is intended to make handling of physical quantities with different units easy. You simply create an instance of [`pint.UnitRegistry`](https://pint.readthedocs.io/en/stable/developers_reference.html#pint.UnitRegistry) and access its unit definitions and automatic conversions. For more information or a quick introduction please look at the [pint-documentation](https://pint.readthedocs.io/en/stable/) or [pint-tutorials](https://pint.readthedocs.io/en/stable/tutorial.html#tutorial)."
@@ -161,6 +170,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "c8e15834",
"metadata": {},
"outputs": [],
"source": [
@@ -169,6 +179,7 @@
},
{
"cell_type": "markdown",
+ "id": "65800134",
"metadata": {},
"source": [
"The inputs that we need to define our system in the simulation include\n",
@@ -187,6 +198,7 @@
},
{
"cell_type": "markdown",
+ "id": "a896dbe6",
"metadata": {},
"source": [
"### Set the reduced units of energy and length\n",
@@ -210,6 +222,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ba76b35b",
"metadata": {},
"outputs": [],
"source": [
@@ -235,6 +248,7 @@
},
{
"cell_type": "markdown",
+ "id": "e2424a62",
"metadata": {},
"source": [
"### Set the key physical parameters that uniquely define the system\n",
@@ -249,6 +263,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d1b40278",
"metadata": {},
"outputs": [],
"source": [
@@ -262,6 +277,7 @@
},
{
"cell_type": "markdown",
+ "id": "8f2cf0bb",
"metadata": {},
"source": [
"#### Set the range of parameters that we want to vary\n",
@@ -272,6 +288,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "64498150",
"metadata": {},
"outputs": [],
"source": [
@@ -286,6 +303,7 @@
},
{
"cell_type": "markdown",
+ "id": "bceb15bc",
"metadata": {},
"source": [
"#### Choose which interactions should be activated\n",
@@ -300,6 +318,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "bd64c859",
"metadata": {},
"outputs": [],
"source": [
@@ -317,6 +336,7 @@
},
{
"cell_type": "markdown",
+ "id": "72a9d84e",
"metadata": {},
"source": [
"#### Set the number of samples to be collected\n",
@@ -329,6 +349,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "c02606b8",
"metadata": {
"scrolled": true
},
@@ -347,6 +368,7 @@
},
{
"cell_type": "markdown",
+ "id": "09b83acd",
"metadata": {},
"source": [
"#### Calculate the dependent parameters\n",
@@ -361,6 +383,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "422098fe",
"metadata": {},
"outputs": [],
"source": [
@@ -383,6 +406,7 @@
},
{
"cell_type": "markdown",
+ "id": "83067f8d",
"metadata": {},
"source": [
"#### Set the particle types and charges\n",
@@ -393,6 +417,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ca876c27",
"metadata": {},
"outputs": [],
"source": [
@@ -416,6 +441,7 @@
},
{
"cell_type": "markdown",
+ "id": "5118d349",
"metadata": {},
"source": [
"### Initialize the ESPResSo system\n",
@@ -431,6 +457,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "698200f1",
"metadata": {
"scrolled": true
},
@@ -444,6 +471,7 @@
},
{
"cell_type": "markdown",
+ "id": "1f23634b",
"metadata": {},
"source": [
"### Set up particles and bonded-interactions\n",
@@ -457,6 +485,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "04feb53f",
"metadata": {},
"outputs": [],
"source": [
@@ -497,6 +526,7 @@
},
{
"cell_type": "markdown",
+ "id": "e16b96c1",
"metadata": {},
"source": [
"### Set up non-bonded-interactions\n",
@@ -512,6 +542,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "c28c10f8",
"metadata": {},
"outputs": [],
"source": [
@@ -549,6 +580,7 @@
},
{
"cell_type": "markdown",
+ "id": "22c08f03",
"metadata": {},
"source": [
"### Set up the constant pH ensemble using the reaction ensemble module"
@@ -556,10 +588,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "631c1c01",
+ "metadata": {},
"source": [
"After the particles have been added to the system we initialize the `espressomd.reaction_methods`. The parameters to set are:\n",
"\n",
@@ -574,12 +604,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2b994257",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"exclusion_range = PARTICLE_SIZE_REDUCED if USE_WCA else 0.0\n",
"RE = espressomd.reaction_methods.ConstantpHEnsemble(\n",
" kT=KT_REDUCED,\n",
@@ -587,19 +618,20 @@
" seed=77,\n",
" constant_pH=2 # temporary value\n",
")\n",
- "RE.set_non_interacting_type(type=len(TYPES)) # this parameter helps speed up the calculation in an interacting system\n",
- "```"
+ "RE.set_non_interacting_type(type=len(TYPES)) # this parameter helps speed up the calculation in an interacting system"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "d3b2554d",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "88f3bcda",
"metadata": {},
"source": [
"The next step is to define the chemical reaction. The order of species in the lists of reactants and products is very important for ESPResSo because it determines which particles are created or deleted in the reaction move. Specifically, identity of the first species in the list of reactants is changed to the first species in the list of products, the second reactant species is changed to the second product species, and so on. If the reactant list has more species than the product list, then excess reactant species are deleted from the system. If the product list has more species than the reactant list, then the excess product species are created and randomly placed inside the simulation box. This convention is especially important if some of the species belong to a chain-like molecule, so that they cannot be inserted at an arbitrary position."
@@ -607,10 +639,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "06e51a82",
+ "metadata": {},
"source": [
"**Exercise:**\n",
"\n",
@@ -621,13 +651,15 @@
]
},
{
- "cell_type": "markdown",
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "96395eb9",
"metadata": {
- "scrolled": false,
- "solution2": "hidden"
+ "scrolled": false
},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"RE.add_reaction(\n",
" gamma=10**(-pKa),\n",
" reactant_types=[TYPES[\"HA\"]],\n",
@@ -635,19 +667,20 @@
" default_charges={TYPES[\"HA\"]: CHARGES[\"HA\"],\n",
" TYPES[\"A\"]: CHARGES[\"A\"],\n",
" TYPES[\"B\"]: CHARGES[\"B\"]}\n",
- ")\n",
- "```"
+ ")"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "41566274",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "ad9817c9",
"metadata": {},
"source": [
"In the example above, the order of reactants and products ensures that identity of $\\mathrm{HA}$ is changed to $\\mathrm{A^{-}}$ and vice versa, while $\\mathrm{B^{+}}$ is inserted/deleted in the reaction move. \n",
@@ -657,6 +690,7 @@
},
{
"cell_type": "markdown",
+ "id": "253d05ee",
"metadata": {},
"source": [
"## Run the simulations\n",
@@ -666,10 +700,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "034fd91b",
+ "metadata": {},
"source": [
"**Exercise:**\n",
"\n",
@@ -679,30 +711,29 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "36523af6",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def equilibrate_reaction(reaction_steps=1):\n",
- " RE.reaction(steps=reaction_steps)\n",
- "```"
+ " RE.reaction(steps=reaction_steps)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "7ebb25e5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "07daa583",
+ "metadata": {},
"source": [
"After the system has been equilibrated, the integration/sampling loop follows.\n",
"\n",
@@ -731,12 +762,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6816aa4d",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def perform_sampling(type_A, num_samples, num_As:np.ndarray, reaction_steps, \n",
" prob_integration=0.5, integration_steps=1000):\n",
" for i in range(num_samples):\n",
@@ -744,19 +776,20 @@
" system.integrator.run(integration_steps)\n",
" # we should do at least one reaction attempt per reactive particle\n",
" RE.reaction(steps=reaction_steps)\n",
- " num_As[i] = system.number_of_particles(type=type_A)\n",
- "```"
+ " num_As[i] = system.number_of_particles(type=type_A)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "4b6ebb99",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "eabceb5b",
"metadata": {},
"source": [
"Finally we have everything together to run our simulations. We set the $\\mathrm{pH}$ value in [`RE.constant_pH`](https://espressomd.github.io/doc/espressomd.html#espressomd.reaction_methods.ConstantpHEnsemble.constant_pH) and use our `equilibrate_reaction` function to equilibrate the system. After that the samplings are performed with our `perform_sampling` function."
@@ -765,6 +798,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "b1489ea7",
"metadata": {},
"outputs": [],
"source": [
@@ -793,6 +827,7 @@
},
{
"cell_type": "markdown",
+ "id": "c652f949",
"metadata": {},
"source": [
"## Results\n",
@@ -815,6 +850,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "5793ceb1",
"metadata": {},
"outputs": [],
"source": [
@@ -853,6 +889,7 @@
},
{
"cell_type": "markdown",
+ "id": "03bbe514",
"metadata": {},
"source": [
"Now, we use the above function to calculate the average number of particles of type $\\mathrm{A^-}$ and estimate its statistical error and autocorrelation time.\n",
@@ -862,6 +899,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "3b8ea33f",
"metadata": {
"scrolled": false
},
@@ -891,6 +929,7 @@
},
{
"cell_type": "markdown",
+ "id": "8115f2cd",
"metadata": {},
"source": [
"The simulation results for the non-interacting case match very well with the analytical solution of Henderson-Hasselbalch equation. There are only minor deviations, and the estimated errors are small too. This situation will change when we introduce interactions.\n",
@@ -903,6 +942,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "cd73fbbb",
"metadata": {
"scrolled": false
},
@@ -923,6 +963,7 @@
},
{
"cell_type": "markdown",
+ "id": "5fa9f088",
"metadata": {},
"source": [
"To look in more detail at the statistical accuracy, it is useful to plot the deviations from the analytical result. This provides another way to check the consistency of error estimates. In the case of non-interacting system, the simulation should exactly reproduce the Henderson-Hasselbalch equation. In such case, about 68% of the results should be within one error bar from the analytical result, whereas about 95% of the results should be within two times the error bar. Indeed, if you plot the deviations by running the script below, you should observe that most of the results are within one error bar from the analytical solution, a smaller fraction of the results is slightly further than one error bar, and one or two might be about two error bars apart. Again, this situation changes when we activate interactions because the ionization of the interacting system deviates from the Henderson-Hasselbalch equation."
@@ -931,6 +972,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "9585afa3",
"metadata": {
"scrolled": false
},
@@ -951,6 +993,7 @@
},
{
"cell_type": "markdown",
+ "id": "8b982f07",
"metadata": {},
"source": [
"### The Neutralizing Ion $\\mathrm{B^+}$\n",
@@ -980,6 +1023,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "33b09ef4",
"metadata": {
"scrolled": false
},
@@ -1029,6 +1073,7 @@
},
{
"cell_type": "markdown",
+ "id": "9e1f5fea",
"metadata": {},
"source": [
"The plot shows that at intermediate $\\mathrm{pH}$ the concentration of $\\mathrm{B^+}$ ions is approximately equal to the concentration of $\\mathrm{M^+}$ ions. Only at one specific $\\mathrm{pH}$ the concentration of $\\mathrm{B^+}$ ions is equal to the concentration of $\\mathrm{H^+}$ ions. This is the $\\mathrm{pH}$ one obtains when dissolving the weak acid $\\mathrm{A}$ in pure water.\n",
@@ -1040,6 +1085,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "002ca2b0",
"metadata": {},
"outputs": [],
"source": [
@@ -1079,6 +1125,7 @@
},
{
"cell_type": "markdown",
+ "id": "ca4a6e56",
"metadata": {},
"source": [
"We see that the ionic strength in the simulation box significantly deviates from the ionic strength of the real solution only at high or low $\\mathrm{pH}$ value. If the $\\mathrm{p}K_{\\mathrm{A}}$ value is sufficiently large, then the deviation at very low $\\mathrm{pH}$ can also be neglected because then the polymer is uncharged in the region where the ionic strength is not correctly represented in the constant-$\\mathrm{pH}$ simulation. At a high $\\mathrm{pH}$ the ionic strength will have an effect on the weak acid, because then it is fully charged. The $\\mathrm{pH}$ range in which the constant-$\\mathrm{pH}$ method uses approximately the right ionic strength depends on salt concentration, weak acid concentration and the $\\mathrm{p}K_{\\mathrm{A}}$ value. See also [Landsgesell2019] for a more detailed discussion of this issue, and its consequences.\n"
@@ -1086,6 +1133,7 @@
},
{
"cell_type": "markdown",
+ "id": "7c63f293",
"metadata": {},
"source": [
"## Suggested problems for further work\n",
@@ -1099,6 +1147,7 @@
},
{
"cell_type": "markdown",
+ "id": "ebb94b70",
"metadata": {},
"source": [
"## References\n",
@@ -1137,5 +1186,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/convert.py b/doc/tutorials/convert.py
index 94cd4291a05..a54505de5cf 100644
--- a/doc/tutorials/convert.py
+++ b/doc/tutorials/convert.py
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2019-2022 The ESPResSo project
+# Copyright (C) 2019-2023 The ESPResSo project
#
# This file is part of ESPResSo.
#
@@ -29,13 +29,15 @@
import nbformat
import re
import os
-import ast
import sys
import uuid
sys.path.append('@CMAKE_SOURCE_DIR@/testsuite/scripts')
import importlib_wrapper as iw
+SOLUTION_CELL_TOKEN = "# SOLUTION CELL"
+
+
def get_code_cells(nb):
return [c['source'] for c in nb['cells'] if c['cell_type'] == 'code']
@@ -79,6 +81,22 @@ def remove_empty_cells(nb):
nb['cells'].pop(i)
+def parse_solution_cell(cell):
+ if cell["cell_type"] == "code":
+ source = cell["source"].strip()
+ if source.startswith(f"{SOLUTION_CELL_TOKEN}\n"):
+ return source.split("\n", 1)[1].strip()
+ return None
+
+
+def convert_exercise2_to_code(nb):
+ for i in range(len(nb["cells"]) - 1, 0, -1):
+ cell = nb["cells"][i]
+ solution = parse_solution_cell(cell)
+ if solution is not None:
+ cell["source"] = solution
+
+
def disable_plot_interactivity(nb):
"""
Replace all occurrences of the magic command ``%matplotlib notebook``
@@ -91,127 +109,29 @@ def disable_plot_interactivity(nb):
cell['source'], flags=re.M)
-def split_matplotlib_cells(nb):
- """
- If a cell imports matplotlib, split the cell to keep the
- import statement separate from the code that uses matplotlib.
- This prevents a known bug in the Jupyter backend which causes
- the plot object to be represented as a string instead of a canvas
- when created in the cell where matplotlib is imported for the
- first time (https://github.com/jupyter/notebook/issues/3523).
- """
- for i in range(len(nb['cells']) - 1, -1, -1):
- cell = nb['cells'][i]
- if cell['cell_type'] == 'code' and 'matplotlib' in cell['source']:
- code = iw.protect_ipython_magics(cell['source'])
- # split cells after matplotlib imports
- mapping = iw.delimit_statements(code)
- tree = ast.parse(code)
- visitor = iw.GetMatplotlibPyplot()
- visitor.visit(tree)
- if visitor.matplotlib_first:
- code = iw.deprotect_ipython_magics(code)
- lines = code.split('\n')
- lineno_end = mapping[visitor.matplotlib_first]
- split_code = '\n'.join(lines[lineno_end:]).lstrip('\n')
- if split_code:
- new_cell = nbformat.v4.new_code_cell(source=split_code)
- if 'id' not in cell and 'id' in new_cell:
- del new_cell['id']
- nb['cells'].insert(i + 1, new_cell)
- lines = lines[:lineno_end]
- nb['cells'][i]['source'] = '\n'.join(lines).rstrip('\n')
-
-
-def convert_exercise2_to_code(nb):
- """
- Walk through the notebook cells and convert exercise2 Markdown cells
- containing fenced python code to exercise2 code cells.
- """
- for i, cell in enumerate(nb['cells']):
- if 'solution2' in cell['metadata']:
- cell['metadata']['solution2'] = 'shown'
- # convert solution markdown cells into code cells
- if cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata'] \
- and 'solution2_first' not in cell['metadata']:
- lines = cell['source'].strip().split('\n')
- if lines[0].strip() == '```python' and lines[-1].strip() == '```':
- source = '\n'.join(lines[1:-1]).strip()
- nb['cells'][i] = nbformat.v4.new_code_cell(source=source)
- nb['cells'][i]['metadata'] = cell['metadata']
- nb['cells'][i]['metadata']['solution2'] = 'shown'
- if 'id' in nb['cells'][i]:
- del nb['cells'][i]['id']
-
-
def convert_exercise2_to_markdown(nb):
"""
- Walk through the notebook cells and convert exercise2 Python cells
- to exercise2 Markdown cells using a fenced code block.
- """
- for i, cell in enumerate(nb['cells']):
- if 'solution2' in cell['metadata']:
- cell['metadata']['solution2'] = 'hidden'
- # convert solution code cells into markdown cells
- if cell['cell_type'] == 'code' and 'solution2' in cell['metadata']:
- content = '```python\n' + cell['source'] + '\n```'
- nb['cells'][i] = nbformat.v4.new_markdown_cell(source=content)
- nb['cells'][i]['metadata'] = cell['metadata']
- nb['cells'][i]['metadata']['solution2'] = 'hidden'
- if 'id' in nb['cells'][i]:
- del nb['cells'][i]['id']
-
-
-def convert_exercise2_to_jupyterlab(nb):
- """
- Walk through the notebook cells and convert exercise2 Markdown cells
- containing fenced python code to a JupyterLab-compatible format.
- As of 2022, there is no equivalent of exercise2 for JupyterLab
- ([chart](https://jupyterlab-contrib.github.io/migrate_from_classical.html)),
- but a similar effect can be obtained with basic HTML.
-
- This also converts a notebook to Notebook Format 4.5. ESPResSo notebooks
- cannot be saved in 4.5 format since both Jupyter Notebook and JupyterLab
- overwrite the cell ids with random strings after each save, which is a
- problem for version control. The notebooks need to be converted to the
- 4.5 format to silence JSON parser errors in JupyterLab.
+ Walk through the notebook cells and convert solutions cells to Markdown
+ format and append an empty code cell.
"""
- jupyterlab_tpl = """\
-\
+ solution_tpl = """\
+\
Show solution
-{1}
+```python
+{0}
+```
\
"""
- for i, cell in enumerate(nb['cells']):
+ for i, cell in reversed(list(enumerate(nb["cells"]))):
# convert solution markdown cells into code cells
- if cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata'] \
- and 'solution2_first' not in cell['metadata']:
- lines = cell['source'].strip().split('\n')
- shown = 'open=""' if cell['metadata']['solution2'] == 'shown' else ''
- if lines[0].strip() == '```python' and lines[-1].strip() == '```':
- source = jupyterlab_tpl.format(shown, '\n'.join(lines).strip())
- nb['cells'][i] = nbformat.v4.new_markdown_cell(source=source)
- # convert cell to notebook format 4.5
- if 'id' not in cell:
- cell = uuid.uuid4().hex[:8]
-
- # change to notebook format 4.5
- current_version = (nb['nbformat'], nb['nbformat_minor'])
- assert current_version >= (4, 0)
- if current_version < (4, 5):
- nb['nbformat_minor'] = 5
-
-
-def convert_exercise2_to_vscode_jupyter(nb):
- """
- Walk through the notebook cells and convert exercise2 Markdown cells
- containing fenced python code to a VS Code Jupyter-compatible format.
- As of 2022, there is no equivalent of exercise2 for VS Code Jupyter.
- """
- convert_exercise2_to_jupyterlab(nb)
+ solution = parse_solution_cell(cell)
+ if solution is not None:
+ source = solution_tpl.format(solution)
+ nb["cells"][i] = nbformat.v4.new_markdown_cell(source=source)
+ nb["cells"].insert(i + 1, nbformat.v4.new_code_cell(source=""))
def apply_autopep8(nb):
@@ -287,20 +207,14 @@ def handle_ci_case(args):
for filepath in args.scripts:
add_cell_from_script(nb, filepath)
- # convert solution cells to code cells
- if args.exercise2:
- convert_exercise2_to_code(nb)
-
- # remove empty cells (e.g. those below exercise2 cells)
- if args.remove_empty_cells:
+ # cleanup solution cells and remove empty cells
+ if args.prepare_for_html:
remove_empty_cells(nb)
+ convert_exercise2_to_code(nb)
# disable plot interactivity
disable_plot_interactivity(nb)
- # guard against a jupyter bug involving matplotlib
- split_matplotlib_cells(nb)
-
if args.substitutions or args.execute:
# substitute global variables
cell_separator = f'\n##{uuid.uuid4().hex}\n'
@@ -326,16 +240,10 @@ def handle_exercise2_case(args):
if args.to_md:
convert_exercise2_to_markdown(nb)
- elif args.to_jupyterlab:
- convert_exercise2_to_jupyterlab(nb)
- elif args.to_vscode_jupyter:
- convert_exercise2_to_vscode_jupyter(nb)
elif args.to_py:
convert_exercise2_to_code(nb)
elif args.pep8:
- convert_exercise2_to_code(nb)
apply_autopep8(nb)
- convert_exercise2_to_markdown(nb)
elif args.remove_empty_cells:
remove_empty_cells(nb)
@@ -358,25 +266,19 @@ def handle_exercise2_case(args):
help='variables to substitute')
parser_ci.add_argument('--scripts', nargs='*',
help='scripts to insert in new cells')
-parser_ci.add_argument('--exercise2', action='store_true',
- help='convert exercise2 solutions into code cells')
-parser_ci.add_argument('--remove-empty-cells', action='store_true',
- help='remove empty cells')
+parser_ci.add_argument('--prepare-for-html', action='store_true',
+ help='remove empty cells and CI/CD comment lines')
parser_ci.add_argument('--execute', action='store_true',
help='run the notebook')
parser_ci.set_defaults(callback=handle_ci_case)
# exercise2 module
parser_exercise2 = subparsers.add_parser(
- 'exercise2', help='module for exercise2 conversion (Markdown <-> Python)')
+ 'cells', help='module to post-process cells')
parser_exercise2.add_argument('input', type=str, help='path to the Jupyter '
'notebook (in-place conversion)')
group_exercise2 = parser_exercise2.add_mutually_exclusive_group(required=True)
group_exercise2.add_argument('--to-md', action='store_true',
help='convert solution cells to Markdown')
-group_exercise2.add_argument('--to-jupyterlab', action='store_true',
- help='convert solution cells to JupyterLab')
-group_exercise2.add_argument('--to-vscode-jupyter', action='store_true',
- help='convert solution cells to VS Code Jupyter')
group_exercise2.add_argument('--to-py', action='store_true',
help='convert solution cells to Python')
group_exercise2.add_argument('--pep8', action='store_true',
diff --git a/doc/tutorials/electrodes/electrodes_part1.ipynb b/doc/tutorials/electrodes/electrodes_part1.ipynb
index acda4631df2..6af99ed2890 100644
--- a/doc/tutorials/electrodes/electrodes_part1.ipynb
+++ b/doc/tutorials/electrodes/electrodes_part1.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "33419441",
"metadata": {},
"source": [
"# Basic simulation of electrodes in ESPResSo part I: ion-pair in a narrow metallic slit-like confinement using ICC$^\\star$"
@@ -9,6 +10,7 @@
},
{
"cell_type": "markdown",
+ "id": "47450c9f",
"metadata": {},
"source": [
"## Prerequisites\n",
@@ -26,6 +28,7 @@
},
{
"cell_type": "markdown",
+ "id": "05f9c1ad",
"metadata": {},
"source": [
"## Introduction\n",
@@ -43,6 +46,7 @@
},
{
"cell_type": "markdown",
+ "id": "02157443",
"metadata": {},
"source": [
"## Theoretical Background \n",
@@ -79,6 +83,7 @@
},
{
"cell_type": "markdown",
+ "id": "a35caac8",
"metadata": {},
"source": [
"### Green's function for charges in a dielectric slab\n",
@@ -108,6 +113,7 @@
},
{
"cell_type": "markdown",
+ "id": "6c28ac46",
"metadata": {},
"source": [
"## 2D+h periodic systems, dielectric interfaces and Induced Charge Computation with ICC$^\\star$\n",
@@ -157,6 +163,7 @@
},
{
"cell_type": "markdown",
+ "id": "eafc3e67",
"metadata": {},
"source": [
"## 1. System setup \n"
@@ -164,6 +171,7 @@
},
{
"cell_type": "markdown",
+ "id": "015f9df5",
"metadata": {},
"source": [
"We first import all ESPResSo features and external modules."
@@ -172,6 +180,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "553e6a9b",
"metadata": {},
"outputs": [],
"source": [
@@ -189,6 +198,7 @@
},
{
"cell_type": "markdown",
+ "id": "d0c56786",
"metadata": {},
"source": [
"We need to define the system dimensions and some physical parameters related to\n",
@@ -212,6 +222,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8778f289",
"metadata": {},
"outputs": [],
"source": [
@@ -257,6 +268,7 @@
},
{
"cell_type": "markdown",
+ "id": "715b2e5c",
"metadata": {},
"source": [
"### Setup of electrostatic interactions\n",
@@ -266,6 +278,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "b00b4235",
"metadata": {},
"outputs": [],
"source": [
@@ -280,10 +293,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "20957c03",
+ "metadata": {},
"source": [
"### Task\n",
"\n",
@@ -291,25 +302,27 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e5a52faf",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
- "elc = espressomd.electrostatics.ELC(actor=p3m, gap_size=ELC_GAP, maxPWerror=MAX_PW_ERROR)\n",
- "```"
+ "# SOLUTION CELL\n",
+ "elc = espressomd.electrostatics.ELC(actor=p3m, gap_size=ELC_GAP, maxPWerror=MAX_PW_ERROR)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "3eec2580",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "243166cf",
"metadata": {},
"source": [
"Next, we set up the ICC particles on both electrodes"
@@ -318,6 +331,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "93f09c66",
"metadata": {},
"outputs": [],
"source": [
@@ -328,10 +342,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "23398839",
+ "metadata": {},
"source": [
"### TASK\n",
"\n",
@@ -341,12 +353,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6d83e7e2",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"line_density = np.sqrt(ICC_PARTCL_NUMBER_DENSITY)\n",
"xs = np.linspace(0, system.box_l[0], num=int(round(system.box_l[0] * line_density)), endpoint=False)\n",
"ys = np.linspace(0, system.box_l[1], num=int(round(system.box_l[1] * line_density)), endpoint=False)\n",
@@ -361,23 +374,21 @@
"for x in xs:\n",
" for y in ys:\n",
" icc_partcls_top.append(system.part.add(pos=[x, y, box_l_z], q=1. / n_partcls_each_electrode,\n",
- " type=TYPES[\"Electrodes\"], fix=3*[True]))\n",
- "```"
+ " type=TYPES[\"Electrodes\"], fix=3*[True]))"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "24de69a4",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "5326e038",
+ "metadata": {},
"source": [
"### Task\n",
"\n",
@@ -391,12 +402,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "11f1f197",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"system.electrostatics.solver = elc\n",
"\n",
"n_icc_partcls = len(icc_partcls_top) + len(icc_partcls_bottom)\n",
@@ -420,19 +432,20 @@
" sigmas=icc_sigmas,\n",
" epsilons=icc_epsilons\n",
")\n",
- "system.electrostatics.extension = icc\n",
- "```"
+ "system.electrostatics.extension = icc"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "48e114a8",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "1937965b",
"metadata": {},
"source": [
"## 2. Calculation of the forces"
@@ -441,6 +454,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "e998bc15",
"metadata": {},
"outputs": [],
"source": [
@@ -467,6 +481,7 @@
},
{
"cell_type": "markdown",
+ "id": "751d418b",
"metadata": {},
"source": [
"## 3. Analysis and Interpretation of the data\n",
@@ -478,6 +493,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8b3579ed",
"metadata": {},
"outputs": [],
"source": [
@@ -512,6 +528,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "cb4dba95",
"metadata": {},
"outputs": [],
"source": [
@@ -534,6 +551,7 @@
},
{
"cell_type": "markdown",
+ "id": "bd5f4d61",
"metadata": {},
"source": [
"## References\n",
@@ -568,5 +586,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/electrodes/electrodes_part2.ipynb b/doc/tutorials/electrodes/electrodes_part2.ipynb
index d723d41474f..ffe79e19bce 100644
--- a/doc/tutorials/electrodes/electrodes_part2.ipynb
+++ b/doc/tutorials/electrodes/electrodes_part2.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "357a65e2",
"metadata": {},
"source": [
"# Basic simulation of electrodes in ESPResSo part II: Electrolyte capacitor and Poisson–Boltzmann theory"
@@ -9,6 +10,7 @@
},
{
"cell_type": "markdown",
+ "id": "a90e9ca7",
"metadata": {},
"source": [
"## Prerequisites\n",
@@ -27,6 +29,7 @@
},
{
"cell_type": "markdown",
+ "id": "4c0ab2c6",
"metadata": {},
"source": [
"## Introduction\n",
@@ -74,6 +77,7 @@
},
{
"cell_type": "markdown",
+ "id": "7b1ef707",
"metadata": {},
"source": [
"## Theoretical Background \n",
@@ -127,6 +131,7 @@
},
{
"cell_type": "markdown",
+ "id": "38ba2c5c",
"metadata": {},
"source": [
"## ELC-IC for 2D+h periodic systems with dielectric interfaces\n",
@@ -182,6 +187,7 @@
},
{
"cell_type": "markdown",
+ "id": "f128c80a",
"metadata": {},
"source": [
"## 1. System setup \n",
@@ -192,6 +198,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "7661ce98",
"metadata": {},
"outputs": [],
"source": [
@@ -215,6 +222,7 @@
},
{
"cell_type": "markdown",
+ "id": "16cce457",
"metadata": {},
"source": [
"We need to define system dimensions and some physical parameters related to\n",
@@ -232,6 +240,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "43ae6591",
"metadata": {},
"outputs": [],
"source": [
@@ -260,6 +269,7 @@
},
{
"cell_type": "markdown",
+ "id": "a3b0c640",
"metadata": {},
"source": [
"### 1.1 Setting up the box dimensions and create system\n",
@@ -293,10 +303,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "867de4db",
+ "metadata": {},
"source": [
"### Task\n",
"\n",
@@ -311,12 +319,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5a65af0b",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def get_box_dimension(concentration, distance, n_ionpairs=N_IONPAIRS):\n",
" \"\"\"\n",
" For a given number of particles, determine the lateral area of the box\n",
@@ -332,13 +341,13 @@
" area = box_volume / (l_z - 2. * LJ_SIGMA) # account for finite ion size in density calculation\n",
" l_xy = np.sqrt(area)\n",
"\n",
- " return l_xy, l_z\n",
- "```"
+ " return l_xy, l_z"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "9b70bad1",
"metadata": {},
"outputs": [],
"source": []
@@ -346,6 +355,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "264d82af",
"metadata": {},
"outputs": [],
"source": [
@@ -358,6 +368,7 @@
},
{
"cell_type": "markdown",
+ "id": "7aaec7a5",
"metadata": {},
"source": [
"We now can create the **ESPResSo** system.\n",
@@ -375,6 +386,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "eb642f0f",
"metadata": {},
"outputs": [],
"source": [
@@ -385,6 +397,7 @@
},
{
"cell_type": "markdown",
+ "id": "df4888a5",
"metadata": {},
"source": [
"### 1.2 Set up the double-layer capacitor\n",
@@ -397,10 +410,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "48abb259",
+ "metadata": {},
"source": [
"### Task\n",
"* add two wall constraints at $z=0$ and $z=L_z$ to stop particles from\n",
@@ -414,12 +425,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "af3cb791",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"# Bottom wall, normal pointing in the +z direction \n",
"floor = espressomd.shapes.Wall(normal=[0, 0, 1])\n",
"c1 = system.constraints.add(\n",
@@ -428,23 +440,21 @@
"# Top wall, normal pointing in the -z direction\n",
"ceiling = espressomd.shapes.Wall(normal=[0, 0, -1], dist=-box_l_z) \n",
"c2 = system.constraints.add(\n",
- " particle_type=types[\"Electrodes\"], penetrable=False, shape=ceiling)\n",
- "```"
+ " particle_type=types[\"Electrodes\"], penetrable=False, shape=ceiling)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "88ba5371",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "a990659a",
+ "metadata": {},
"source": [
"#### 1.2.2 Add particles for the ions\n",
"\n",
@@ -457,12 +467,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5a62db5a",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"offset = LJ_SIGMA # avoid unfavorable overlap at close distance to the walls\n",
"init_part_btw_z1 = offset \n",
"init_part_btw_z2 = box_l_z - offset\n",
@@ -478,23 +489,21 @@
" ion_pos[0] = rng.random(1) * system.box_l[0]\n",
" ion_pos[1] = rng.random(1) * system.box_l[1]\n",
" ion_pos[2] = rng.random(1) * (init_part_btw_z2 - init_part_btw_z1) + init_part_btw_z1\n",
- " system.part.add(pos=ion_pos, type=types[\"Anion\"], q=charges[\"Anion\"])\n",
- "```"
+ " system.part.add(pos=ion_pos, type=types[\"Anion\"], q=charges[\"Anion\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "dc4f8d89",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "b95f1bba",
+ "metadata": {},
"source": [
"#### 1.2.3 Add interactions:\n",
"\n",
@@ -509,31 +518,30 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dacf9cc5",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"for t1 in types.values():\n",
" for t2 in types.values():\n",
- " system.non_bonded_inter[t1, t2].wca.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA)\n",
- "```"
+ " system.non_bonded_inter[t1, t2].wca.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "31de8dc6",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "61700a87",
+ "metadata": {},
"source": [
"For the (2D+h) electrostatic with dielectrics we choose the ELC-IC with P3M.\n",
"\n",
@@ -555,12 +563,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2e566993",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def setup_electrostatic_solver(potential_diff):\n",
" delta_mid_top = -1. # (Fully metallic case both -1) \n",
" delta_mid_bot = -1.\n",
@@ -577,19 +586,20 @@
" maxPWerror=elc_accuracy,\n",
" delta_mid_bot=delta_mid_bot,\n",
" delta_mid_top=delta_mid_top)\n",
- " return elc\n",
- "```"
+ " return elc"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "efbf4cf9",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "03ab39a1",
"metadata": {},
"source": [
"Now add the solver to the system:"
@@ -598,6 +608,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "25219528",
"metadata": {},
"outputs": [],
"source": [
@@ -606,6 +617,7 @@
},
{
"cell_type": "markdown",
+ "id": "5fed3232",
"metadata": {},
"source": [
"## 2. Equilibration\n",
@@ -622,6 +634,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "51a25228",
"metadata": {},
"outputs": [],
"source": [
@@ -636,6 +649,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "1a3cacd2",
"metadata": {
"scrolled": true
},
@@ -663,6 +677,7 @@
},
{
"cell_type": "markdown",
+ "id": "abbfc272",
"metadata": {},
"source": [
"### 2.2 Equilibrate the ion distribution"
@@ -671,6 +686,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "45c444f5",
"metadata": {},
"outputs": [],
"source": [
@@ -682,6 +698,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "e9c7fe2f",
"metadata": {},
"outputs": [],
"source": [
@@ -705,6 +722,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "c45afc27",
"metadata": {},
"outputs": [],
"source": [
@@ -720,6 +738,7 @@
},
{
"cell_type": "markdown",
+ "id": "1f1f7892",
"metadata": {},
"source": [
"Convergence after $t\\sim50$ time units."
@@ -727,10 +746,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "60271c57",
+ "metadata": {},
"source": [
"## 3. Calculate and analyze ion profile\n",
"\n",
@@ -753,12 +770,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "22a22497",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def setup_densityprofile_accumulators(bin_width):\n",
" cations = system.part.select(type=types[\"Cation\"]) \n",
" anions = system.part.select(type=types[\"Anion\"])\n",
@@ -774,13 +792,13 @@
" density_accumulator_anion = espressomd.accumulators.MeanVarianceCalculator(\n",
" obs=density_profile_anion, delta_N=20)\n",
" zs = density_profile_anion.bin_centers()[0, 0, :, 2]\n",
- " return zs, density_accumulator_cation, density_accumulator_anion\n",
- "```"
+ " return zs, density_accumulator_cation, density_accumulator_anion"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "b9a7d815",
"metadata": {},
"outputs": [],
"source": []
@@ -788,6 +806,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "e9843a03",
"metadata": {},
"outputs": [],
"source": [
@@ -797,6 +816,7 @@
},
{
"cell_type": "markdown",
+ "id": "6db1f679",
"metadata": {},
"source": [
"### 3.2 Run the simulation\n",
@@ -807,6 +827,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "c218d24f",
"metadata": {},
"outputs": [],
"source": [
@@ -831,6 +852,7 @@
},
{
"cell_type": "markdown",
+ "id": "b20e4939",
"metadata": {},
"source": [
"### Compare to analytical prediction\n",
@@ -842,6 +864,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ad677e07",
"metadata": {},
"outputs": [],
"source": [
@@ -858,6 +881,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "343d4b27",
"metadata": {},
"outputs": [],
"source": [
@@ -893,6 +917,7 @@
},
{
"cell_type": "markdown",
+ "id": "8c870e94",
"metadata": {},
"source": [
"We see good agreement between our simulation and the meanfield solution of Guy and Chapman. Low density and reasonably low potential make the assumptions of the analytical approach justified."
@@ -900,6 +925,7 @@
},
{
"cell_type": "markdown",
+ "id": "78ee5747",
"metadata": {},
"source": [
"We now check how well the surface charge agrees with Grahame's equation.\n",
@@ -910,6 +936,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8e65a805",
"metadata": {},
"outputs": [],
"source": [
@@ -927,6 +954,7 @@
},
{
"cell_type": "markdown",
+ "id": "d0de3b90",
"metadata": {},
"source": [
"The electric field is readily obtained from the integral \n",
@@ -936,6 +964,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8a6bd126",
"metadata": {},
"outputs": [],
"source": [
@@ -960,6 +989,7 @@
},
{
"cell_type": "markdown",
+ "id": "365290da",
"metadata": {},
"source": [
"We see that the electric field reduces to 0 in the middle of the channel, justifying the assumption that the two electrodes are far enough apart to not influence each other."
@@ -967,6 +997,7 @@
},
{
"cell_type": "markdown",
+ "id": "5f11c2b0",
"metadata": {},
"source": [
"The electric potential can be calculated from $\\phi(z) = \\int_0^z -E(z^\\prime)\\,\\mathrm{d}z^\\prime$."
@@ -975,6 +1006,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "bf3506c3",
"metadata": {},
"outputs": [],
"source": [
@@ -996,6 +1028,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "3ff40269",
"metadata": {},
"outputs": [],
"source": [
@@ -1007,6 +1040,7 @@
},
{
"cell_type": "markdown",
+ "id": "ddc72205",
"metadata": {},
"source": [
"## 4. Differential capacitance\n",
@@ -1019,6 +1053,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ae22f800",
"metadata": {},
"outputs": [],
"source": [
@@ -1055,9 +1090,8 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "scrolled": true
- },
+ "id": "82b62e62",
+ "metadata": {},
"outputs": [],
"source": [
"fig, ax = plt.subplots(figsize=(10, 6))\n",
@@ -1080,6 +1114,7 @@
},
{
"cell_type": "markdown",
+ "id": "85c325bc",
"metadata": {},
"source": [
"For small potential drops, one observes the expected Poisson–Boltzmann behavior. It also agrees with the linearized solution $\\sigma(\\phi_\\mathrm{s}) = \\varepsilon_r\\varepsilon_0 \\frac{\\phi_\\mathrm{s}}{2 \\lambda_\\mathrm{D}}$.\n",
@@ -1088,6 +1123,7 @@
},
{
"cell_type": "markdown",
+ "id": "2e97a52a",
"metadata": {},
"source": [
"## References\n",
@@ -1126,5 +1162,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 4
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/electrokinetics/electrokinetics.ipynb b/doc/tutorials/electrokinetics/electrokinetics.ipynb
index 28510f0d347..65af9f29b3c 100644
--- a/doc/tutorials/electrokinetics/electrokinetics.ipynb
+++ b/doc/tutorials/electrokinetics/electrokinetics.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "594dd6d4",
"metadata": {},
"source": [
"# Electrokinetics\n",
@@ -14,6 +15,7 @@
},
{
"cell_type": "markdown",
+ "id": "c6fb9224",
"metadata": {},
"source": [
"## 1. Introduction"
@@ -21,6 +23,7 @@
},
{
"cell_type": "markdown",
+ "id": "2f74f4f3",
"metadata": {},
"source": [
"In this tutorial we're looking at the electrokinetics feature of ESPResSo, which allows us to describe the motion of potentially charged chemical species solvated in a fluid on a continuum level. The govering equations for the solvent are known as the Poisson-Nernst-Planck equations, which is the combination of the electrostatic Poisson equation and the dynamics of the chemical species described by the Nernst-Planck equation. For the advection we solve the incompressible Navier-Stokes equation. The total set of equations is given by\n",
@@ -38,6 +41,7 @@
},
{
"cell_type": "markdown",
+ "id": "4e0abda7",
"metadata": {},
"source": [
"# 2. Advection-Diffusion equation in 2D"
@@ -45,6 +49,7 @@
},
{
"cell_type": "markdown",
+ "id": "192c4793",
"metadata": {},
"source": [
"The first system that is simulated in this tutorial is the simple advection-diffusion of a drop of uncharged chemical species in a constant velocity field. To keep the computation time small, we restrict ourselves to a 2D problem, but the algorithm is also capable of solving the 3D advection-diffusion equation. Furthermore, we can also skip solving the electrostatic Poisson equation, since there are is no charged species present. The equations we solve thus reduce to\n",
@@ -65,6 +70,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "36a4e973",
"metadata": {},
"outputs": [],
"source": [
@@ -94,6 +100,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "f69ffd36",
"metadata": {},
"outputs": [],
"source": [
@@ -113,6 +120,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "a3f477c1",
"metadata": {},
"outputs": [],
"source": [
@@ -123,6 +131,7 @@
},
{
"cell_type": "markdown",
+ "id": "61278eec",
"metadata": {},
"source": [
"We use a lattice Boltzmann flow field with constant velocity for advection.\n",
@@ -132,6 +141,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "7c581b2d",
"metadata": {},
"outputs": [],
"source": [
@@ -145,6 +155,7 @@
},
{
"cell_type": "markdown",
+ "id": "14c5a43a",
"metadata": {},
"source": [
"To use the electrokinetics-algorithm in ESPResSo, one needs to create an instance of the `EKContainer`-object and pass it a time step `tau` and Poisson solver `solver`.\n",
@@ -154,6 +165,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "75ea3046",
"metadata": {},
"outputs": [],
"source": [
@@ -163,6 +175,7 @@
},
{
"cell_type": "markdown",
+ "id": "e55f53bd",
"metadata": {},
"source": [
"Now, we can add diffusive species to the container to integrate their dynamics."
@@ -170,10 +183,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "ad79ba7d",
+ "metadata": {},
"source": [
"# Exercise:\n",
"- Create an instance of the [`espressomd.electrokinetics.EKSpecies`]() and add it to the system with [`system.ekcontainer.add()`](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKContainer.add). \n",
@@ -185,30 +196,32 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f98a90f0",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"species = espressomd.electrokinetics.EKSpecies(\n",
" lattice=lattice, density=0.0, kT=KT,\n",
" diffusion=DIFFUSION_COEFFICIENT, valency=0.0,\n",
" advection=True, friction_coupling=True,\n",
" ext_efield=[0., 0., 0.], tau=TAU)\n",
- "system.ekcontainer.add(species)\n",
- "```"
+ "system.ekcontainer.add(species)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "a05467d1",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "7d8eeb11",
"metadata": {},
"source": [
"To compare our simulation to the fundamental solution of the advection-diffusion equation, we need to approximate a delta-droplet, which can be achieved by having a non-zero density only at the center of the domain."
@@ -217,6 +230,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "471a4b0d",
"metadata": {},
"outputs": [],
"source": [
@@ -225,6 +239,7 @@
},
{
"cell_type": "markdown",
+ "id": "e58359bf",
"metadata": {},
"source": [
"Now everything is set and we can finally run the simulation by running the integrator."
@@ -233,6 +248,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "bb7f3fa0",
"metadata": {},
"outputs": [],
"source": [
@@ -241,6 +257,7 @@
},
{
"cell_type": "markdown",
+ "id": "07db0e0f",
"metadata": {},
"source": [
"For comparison, we prepare the analytical solution and show the 2D-density as well as a slice through the center of the droplet."
@@ -249,6 +266,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "f81bdd03",
"metadata": {},
"outputs": [],
"source": [
@@ -271,6 +289,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ecb571a7",
"metadata": {},
"outputs": [],
"source": [
@@ -288,6 +307,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "3b340b5f",
"metadata": {},
"outputs": [],
"source": [
@@ -318,6 +338,7 @@
},
{
"cell_type": "markdown",
+ "id": "af461df0",
"metadata": {},
"source": [
"From the plot one can see that the position of the density-peak matches well. However, one also sees that the droplet in the simulation has spread more than it should. The reason is that the discretization used for the advection term introduces an artifical, additional diffusion to the system. This is a fundamental limitation of the algorithm, which is why it cannot be applied to pure advection problems."
@@ -325,6 +346,7 @@
},
{
"cell_type": "markdown",
+ "id": "f2d218ef",
"metadata": {},
"source": [
"# 3. Electroosmotic flow"
@@ -332,6 +354,7 @@
},
{
"cell_type": "markdown",
+ "id": "58a2b410",
"metadata": {},
"source": [
"The next system in this tutorial is a simple slit pore, as shown in Figure 1. It consists of an infinite plate capacitor with an electrolytic solution trapped in between the plates. The plates of the capactior carry a constant surface charge and the counterions are solvated in the liquid. \n",
@@ -348,6 +371,7 @@
},
{
"cell_type": "markdown",
+ "id": "6178d589",
"metadata": {},
"source": [
"### Analytical solution\n",
@@ -400,6 +424,7 @@
},
{
"cell_type": "markdown",
+ "id": "0d16a45d",
"metadata": {},
"source": [
"### Numerical solution\n",
@@ -410,6 +435,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "71b3226b",
"metadata": {},
"outputs": [],
"source": [
@@ -420,6 +446,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "606c2a43",
"metadata": {},
"outputs": [],
"source": [
@@ -449,6 +476,7 @@
},
{
"cell_type": "markdown",
+ "id": "2cc6c6f8",
"metadata": {},
"source": [
"We can now set up the electrokinetics algorithm as in the first part of the tutorial, starting with the LB-method."
@@ -457,6 +485,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "071a0e13",
"metadata": {},
"outputs": [],
"source": [
@@ -466,6 +495,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "7b6c06dc",
"metadata": {},
"outputs": [],
"source": [
@@ -478,6 +508,7 @@
},
{
"cell_type": "markdown",
+ "id": "970b52a5",
"metadata": {},
"source": [
"Since our species are going to carry a charge now, we need to solve the full electrostatic problem. For that, we have to specify an actual solver."
@@ -485,10 +516,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "4b290c32",
+ "metadata": {},
"source": [
"# Exercise: \n",
"- Set up a Poisson solver for the electrostatic interaction and use it to create an instance of the [EKContainer](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKContainer) \n",
@@ -499,27 +528,29 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b8353ef1",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"eksolver = espressomd.electrokinetics.EKFFT(lattice=lattice, permittivity=PERMITTIVITY,\n",
" single_precision=SINGLE_PRECISION)\n",
- "system.ekcontainer = espressomd.electrokinetics.EKContainer(tau=TAU, solver=eksolver)\n",
- "```"
+ "system.ekcontainer = espressomd.electrokinetics.EKContainer(tau=TAU, solver=eksolver)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "8b14c431",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "895a81e1",
"metadata": {},
"source": [
"To simulate the system, we will use two different ion species: The counterions are propagated in the fluid. The second species will be used to describe the surface charge on the plates and therefore has to be stationary (i.e. no advection, no diffusion)."
@@ -528,6 +559,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8f1809a2",
"metadata": {},
"outputs": [],
"source": [
@@ -540,6 +572,7 @@
},
{
"cell_type": "markdown",
+ "id": "c8e5ae3c",
"metadata": {},
"source": [
"Now we set the initial conditions for the ion densities. The counterions will be initialized with a homogeneous distribution, excluding the cells used as boundaries. The surface charge density is homogeneously distributed in the boundary cells."
@@ -548,6 +581,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "994236fc",
"metadata": {},
"outputs": [],
"source": [
@@ -563,6 +597,7 @@
},
{
"cell_type": "markdown",
+ "id": "c2cb78ce",
"metadata": {},
"source": [
"We now have to specify the boundary conditions. For this, we use ESPResSo's`shapes`."
@@ -571,6 +606,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "c9b7f9c6",
"metadata": {},
"outputs": [],
"source": [
@@ -580,6 +616,7 @@
},
{
"cell_type": "markdown",
+ "id": "29a08da2",
"metadata": {},
"source": [
"At both of them we specify no-flux and zero-density boundary conditions for the counterions. Furthermore, we set a no-slip boundary condition for the fluid."
@@ -587,10 +624,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "08b6386b",
+ "metadata": {},
"source": [
"# Exercise\n",
"At both walls, set\n",
@@ -605,28 +640,30 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2cd50ab9",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"for wall in (wall_left, wall_right):\n",
" ekspecies.add_boundary_from_shape(shape=wall, value=[0., 0., 0.], boundary_type=espressomd.electrokinetics.FluxBoundary)\n",
" ekspecies.add_boundary_from_shape(shape=wall, value=0.0, boundary_type=espressomd.electrokinetics.DensityBoundary)\n",
- " lbf.add_boundary_from_shape(shape=wall, velocity=[0., 0., 0.])\n",
- "```"
+ " lbf.add_boundary_from_shape(shape=wall, velocity=[0., 0., 0.])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "3502fb6a",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "2486b696",
"metadata": {},
"source": [
"Now we can finally integrate the system and extract the ion density profile, the fluid velocity profile as well as the pressure-tensor profile."
@@ -635,6 +672,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "b4426bfe",
"metadata": {
"scrolled": true
},
@@ -647,6 +685,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "0a9c1aa2",
"metadata": {},
"outputs": [],
"source": [
@@ -661,6 +700,7 @@
},
{
"cell_type": "markdown",
+ "id": "62087ea3",
"metadata": {},
"source": [
"For comparison, we calculate the analytic solution"
@@ -669,6 +709,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ba5959a0",
"metadata": {},
"outputs": [],
"source": [
@@ -692,6 +733,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "6406c3d3",
"metadata": {},
"outputs": [],
"source": [
@@ -703,6 +745,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d23e8b5b",
"metadata": {},
"outputs": [],
"source": [
@@ -739,6 +782,7 @@
},
{
"cell_type": "markdown",
+ "id": "a58fcfee",
"metadata": {},
"source": [
"In the plots one can see that the analytic solution for the electroosmotic flow matches the simulation very well. "
@@ -746,6 +790,7 @@
},
{
"cell_type": "markdown",
+ "id": "66a80321",
"metadata": {},
"source": [
"### Comparison to pressure-driven flow\n",
@@ -755,6 +800,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "7a23c6f5",
"metadata": {},
"outputs": [],
"source": [
@@ -767,6 +813,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "3c101827",
"metadata": {
"scrolled": true
},
@@ -779,6 +826,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "f12326b5",
"metadata": {},
"outputs": [],
"source": [
@@ -789,6 +837,7 @@
},
{
"cell_type": "markdown",
+ "id": "c33cf2dc",
"metadata": {},
"source": [
"The analytic solution for pressure-driven flow between two infinite parallel plates is known as the Poiseuille flow."
@@ -797,6 +846,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "9c7f9f9f",
"metadata": {},
"outputs": [],
"source": [
@@ -810,6 +860,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "13dd28d8",
"metadata": {},
"outputs": [],
"source": [
@@ -820,6 +871,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "5bed1384",
"metadata": {},
"outputs": [],
"source": [
@@ -856,6 +908,7 @@
},
{
"cell_type": "markdown",
+ "id": "1b00267e",
"metadata": {},
"source": [
"As one can again see, the body force on the fluid did non alter the ion-density profile.\n",
@@ -864,6 +917,7 @@
},
{
"cell_type": "markdown",
+ "id": "30e2df44",
"metadata": {},
"source": [
"To see the difference between the two types of flows, we plot the simulation data together in one plot."
@@ -872,6 +926,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ca814e80",
"metadata": {
"scrolled": true
},
@@ -910,6 +965,7 @@
},
{
"cell_type": "markdown",
+ "id": "7882c507",
"metadata": {},
"source": [
"Looking at the fluid velocity plot, one can see that the electroosmotic flow profile flattens significantly faster towards the center of the channel when compared to the pressure driven flow. The reason for this is the accumulation of the counterion-density towards the oppositely charged plates. Here, the driving electric field causes the highest force on the fluid, which decays towards the center of the channel. In contrast, the Poiseuille-flow is driven by a constant, uniform driving force."
@@ -917,6 +973,7 @@
},
{
"cell_type": "markdown",
+ "id": "5d5b8ded",
"metadata": {},
"source": [
"# 4. Reaction in turbulent flow"
@@ -924,6 +981,7 @@
},
{
"cell_type": "markdown",
+ "id": "0f3280a6",
"metadata": {},
"source": [
"To showcase the reaction feature of our electrokinetics algorithm, we simulate a simple reaction in complex flow.\n",
@@ -939,6 +997,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "b0de9bf5",
"metadata": {},
"outputs": [],
"source": [
@@ -949,6 +1008,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "f180a9d2",
"metadata": {},
"outputs": [],
"source": [
@@ -973,6 +1033,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8883964f",
"metadata": {},
"outputs": [],
"source": [
@@ -981,14 +1042,14 @@
" lattice=lattice, density=DENSITY_FLUID, kinematic_viscosity=VISCOSITY_KINEMATIC,\n",
" tau=TAU, ext_force_density=EXT_FORCE_DENSITY, kT=KT, seed=42)\n",
"system.lb = lbf\n",
- "system.thermostat.set_lb(LB_fluid=lbf, seed=42)",
- "\n",
+ "system.thermostat.set_lb(LB_fluid=lbf, seed=42)\n",
"eksolver = espressomd.electrokinetics.EKNone(lattice=lattice)\n",
"system.ekcontainer = espressomd.electrokinetics.EKContainer(tau=TAU, solver=eksolver)"
]
},
{
"cell_type": "markdown",
+ "id": "83b3763a",
"metadata": {},
"source": [
"Now we can focus on the reactions. In this tutorial we choose the simple case of $A + B \\rightarrow C$, which means that equal parts of the educt species $A$ and $B$ can turn into the product species $C$.\n",
@@ -1003,6 +1064,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "80b66ddf",
"metadata": {},
"outputs": [],
"source": [
@@ -1015,6 +1077,7 @@
},
{
"cell_type": "markdown",
+ "id": "5573021f",
"metadata": {},
"source": [
"We create each involved species and directly specify their boundary-conditions for the domain-boundaries. We set the initial density of the species to 0 and also add Dirichlet boundary conditions of zero density at both the inlet and the outlet of the system."
@@ -1023,6 +1086,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "5e514b85",
"metadata": {},
"outputs": [],
"source": [
@@ -1063,38 +1127,38 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "1b533530",
+ "metadata": {},
"source": [
"# Exercise:\n",
"- Create an instance of [`EKBulkReaction`](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKBulkReaction) using the previously created `reactants` and activate the reaction by adding it to [`system.ekcontainer.reactions`](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKContainer.reactions).\n"
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "94ce97c1",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"reaction = espressomd.electrokinetics.EKBulkReaction(\n",
" reactants=reactants, coefficient=REACTION_RATE_CONSTANT, lattice=lattice, tau=TAU)\n",
"\n",
- "system.ekcontainer.reactions.add(reaction)\n",
- "```"
+ "system.ekcontainer.reactions.add(reaction)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "0d50af30",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "2b6cac10",
"metadata": {},
"source": [
"The next thing to add to the system is the cylindrical obstacles, which act as the boundaries for the Kármán vortices to form. These are placed close to the inlet of the system and also act as impenetrable boundaries for the species.\n",
@@ -1104,6 +1168,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "418bcd31",
"metadata": {},
"outputs": [],
"source": [
@@ -1131,6 +1196,7 @@
},
{
"cell_type": "markdown",
+ "id": "128652bd",
"metadata": {},
"source": [
"Up to this point there is no species present anywhere in the system and also no way for it to enter the system. Since the reaction is irreversible in our setup, we need to introduce some density of both the educt species to the system.\n",
@@ -1140,6 +1206,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d4fae62a",
"metadata": {},
"outputs": [],
"source": [
@@ -1152,6 +1219,7 @@
},
{
"cell_type": "markdown",
+ "id": "9f8b9ac6",
"metadata": {},
"source": [
"With this, the system is now finally complete and we can start the integration. To see the system evolve, we will render a movie from the timeseries of the system. For that we have to setup some helper functions for the plotting, which are beyond the scope of this tutorial."
@@ -1160,6 +1228,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "aa053843",
"metadata": {},
"outputs": [],
"source": [
@@ -1188,6 +1257,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "caa469e0",
"metadata": {},
"outputs": [],
"source": [
@@ -1268,6 +1338,7 @@
},
{
"cell_type": "markdown",
+ "id": "52005dba",
"metadata": {},
"source": [
"Looking at the movie of the species densities one can see that the fluid flow advects the educt species from their source locations past the cylinders into the system. Here, they start to mix and react, such that the product forms.\n",
@@ -1295,5 +1366,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/error_analysis/error_analysis_part1.ipynb b/doc/tutorials/error_analysis/error_analysis_part1.ipynb
index 6261862ebd9..bded433a5ce 100644
--- a/doc/tutorials/error_analysis/error_analysis_part1.ipynb
+++ b/doc/tutorials/error_analysis/error_analysis_part1.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "2df81180",
"metadata": {},
"source": [
"# Tutorial: Error Estimation - Part 1 (Introduction and Binning Analysis)"
@@ -9,6 +10,7 @@
},
{
"cell_type": "markdown",
+ "id": "1d8b8af7",
"metadata": {},
"source": [
"## Table of contents\n",
@@ -21,6 +23,7 @@
},
{
"cell_type": "markdown",
+ "id": "ee6708e7",
"metadata": {},
"source": [
"## Data generation\n",
@@ -31,6 +34,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "137dbad6",
"metadata": {
"scrolled": true
},
@@ -92,6 +96,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "6809e207",
"metadata": {
"scrolled": true
},
@@ -109,6 +114,7 @@
},
{
"cell_type": "markdown",
+ "id": "1269a47b",
"metadata": {},
"source": [
"## Introduction\n",
@@ -153,6 +159,7 @@
},
{
"cell_type": "markdown",
+ "id": "6153fa74",
"metadata": {},
"source": [
"## Uncorrelated samples\n",
@@ -174,6 +181,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "3259bc07",
"metadata": {},
"outputs": [],
"source": [
@@ -187,6 +195,7 @@
},
{
"cell_type": "markdown",
+ "id": "1f2b5888",
"metadata": {},
"source": [
"One can clearly see that each sample lies in the vicinity of the previous one.\n",
@@ -197,6 +206,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d51a25c0",
"metadata": {},
"outputs": [],
"source": [
@@ -211,6 +221,7 @@
},
{
"cell_type": "markdown",
+ "id": "d22288e6",
"metadata": {},
"source": [
"However, you should not trust your eye in deciding whether or not a time series is correlated. In fact, when running molecular dynamics simulations, your best guess is to always assume that samples are correlated, and that you should use one of the following techniques for statistical analysis, and rather not just use equation (2)."
@@ -218,6 +229,7 @@
},
{
"cell_type": "markdown",
+ "id": "6275d654",
"metadata": {},
"source": [
"## Binning analysis\n",
@@ -232,6 +244,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "747fe22a",
"metadata": {},
"outputs": [],
"source": [
@@ -240,10 +253,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "2256078f",
+ "metadata": {},
"source": [
"#### Exercise\n",
"* Determine the maximally possible number of bins of size ```BIN_SIZE``` with the data in ```time_series_1```, and store it in a variable ```N_BINS```.\n",
@@ -252,52 +263,52 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bd4974a4",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"N_BINS = N_SAMPLES // BIN_SIZE\n",
"bin_avgs = np.zeros(N_BINS)\n",
"for i in range(N_BINS):\n",
- " bin_avgs[i] = np.average(time_series_1[i * BIN_SIZE:(i + 1) * BIN_SIZE])\n",
- "```"
+ " bin_avgs[i] = np.average(time_series_1[i * BIN_SIZE:(i + 1) * BIN_SIZE])"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "68bcfd80",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "82cfd09e",
+ "metadata": {},
"source": [
"#### Exercise\n",
"Compute the average of all bin averages and store it in ```avg```. This is the overall average, our best guess for the measured quantity. Furthermore, compute the standard error of the mean using equations (1) and (2) from the values in ```bin_avgs``` and store it in ```sem```."
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a2d52f1b",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"avg = np.average(bin_avgs)\n",
- "sem = np.sqrt(np.sum((bin_avgs - avg)**2) / (N_BINS - 1.5) / N_BINS)\n",
- "```"
+ "sem = np.sqrt(np.sum((bin_avgs - avg)**2) / (N_BINS - 1.5) / N_BINS)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "9ab6b4c5",
"metadata": {},
"outputs": [],
"source": []
@@ -305,6 +316,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "944e116f",
"metadata": {},
"outputs": [],
"source": [
@@ -314,6 +326,7 @@
},
{
"cell_type": "markdown",
+ "id": "9071e349",
"metadata": {},
"source": [
"Now we already have an estimate on how precise our simulation result is. But how do we know if we chose the appropriate bin size? The answer is, we can perform binning analysis for many different bin sizes and check when the SEM converges. For that we would like to define a function that does the binning analysis in one go."
@@ -321,55 +334,53 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "f26a8c23",
+ "metadata": {},
"source": [
"#### Exercise\n",
"Define a function called ```do_binning_analysis``` that takes as arguments ```data``` (a numpy array containing the samples) and ```bin_size``` and returns the estimated SEM. You can reuse your code from the previous exercises and adapt it to be part of the function."
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d0437acb",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def do_binning_analysis(data, bin_size):\n",
" n_samples = len(data)\n",
" n_bins = n_samples // bin_size\n",
" bin_avgs = np.mean(data[:n_bins * bin_size].reshape((n_bins, -1)), axis=1)\n",
- " return np.std(bin_avgs, ddof=1.5) / np.sqrt(n_bins)\n",
- "```"
+ " return np.std(bin_avgs, ddof=1.5) / np.sqrt(n_bins)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "c196eb79",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "2d64004f",
+ "metadata": {},
"source": [
"#### Exercise\n",
"Now take the data in ```time_series_1``` and perform binning analysis for bin sizes from 3 up to 5000 and plot the estimated SEMs against the bin size with logarithmic x axis. Your SEM estimates should be stored in a numpy array called ```sems```."
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f603d820",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"sizes = np.arange(3, 5001, dtype=int)\n",
"sems = np.zeros(5001 - 3, dtype=float)\n",
"for s in range(len(sizes)):\n",
@@ -380,19 +391,20 @@
"plt.xscale(\"log\")\n",
"plt.xlabel(\"$N_B$\")\n",
"plt.ylabel(\"SEM\")\n",
- "plt.show()\n",
- "```"
+ "plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "d84eb650",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
+ "id": "d97952eb",
"metadata": {},
"source": [
"You should see that the series converges to a value between 0.04 and 0.05, before transitioning into a noisy tail. The tail becomes increasingly noisy, because as the block size increases, the number of blocks decreases, thus resulting in worse statistics.\n",
@@ -403,6 +415,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "9a19d01b",
"metadata": {},
"outputs": [],
"source": [
@@ -443,6 +456,7 @@
},
{
"cell_type": "markdown",
+ "id": "06fd9f75",
"metadata": {},
"source": [
"Even though the fit is not perfect, it suffices to give us the position of the asymptote, which is the final estimate for the standard error of the mean. You can see that binning analysis, in fact, managed to estimate the SEM very precisely compared to the analytical solution. This illustrates that most of the time, binning analysis will give you a very reasonable estimate for the SEM, and in fact, is often used in practice because of its simplicity.\n",
@@ -453,6 +467,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "33e71452",
"metadata": {
"scrolled": true
},
@@ -479,6 +494,7 @@
},
{
"cell_type": "markdown",
+ "id": "bb459079",
"metadata": {},
"source": [
"Even though we have the exact same number of samples, we cannot see the binning analysis converge. The SEM simply cannot be determined. Usually, this is due to very long correlations, and can only be compensated by simulating for a longer time.\n",
@@ -489,6 +505,7 @@
},
{
"cell_type": "markdown",
+ "id": "c20159bc",
"metadata": {},
"source": [
"## References\n",
@@ -516,5 +533,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 4
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/error_analysis/error_analysis_part2.ipynb b/doc/tutorials/error_analysis/error_analysis_part2.ipynb
index 0dfda3cc48b..d2060d7beca 100644
--- a/doc/tutorials/error_analysis/error_analysis_part2.ipynb
+++ b/doc/tutorials/error_analysis/error_analysis_part2.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "a926fb8f",
"metadata": {},
"source": [
"# Tutorial: Error Estimation - Part 2 (Autocorrelation Analysis)"
@@ -9,6 +10,7 @@
},
{
"cell_type": "markdown",
+ "id": "8bb2141b",
"metadata": {},
"source": [
"## Table of contents\n",
@@ -21,6 +23,7 @@
},
{
"cell_type": "markdown",
+ "id": "800da589",
"metadata": {},
"source": [
"## Data generation\n",
@@ -31,6 +34,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "fc23ce80",
"metadata": {},
"outputs": [],
"source": [
@@ -90,6 +94,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "d816633f",
"metadata": {},
"outputs": [],
"source": [
@@ -105,6 +110,7 @@
},
{
"cell_type": "markdown",
+ "id": "c8f31417",
"metadata": {},
"source": [
"## Introduction\n",
@@ -157,6 +163,7 @@
},
{
"cell_type": "markdown",
+ "id": "1375526d",
"metadata": {},
"source": [
"## Computing the auto-covariance function\n",
@@ -176,22 +183,21 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "710641e1",
+ "metadata": {},
"source": [
"#### Exercise\n",
"Compute the auto-covariance function of the data in `time_series_1` using the estimator in equation (6) and store it into a numpy array called `autocov`. Compute it for all $j$ from `0` up to `999`. Plot it against $j$."
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "88730cb3",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"# naive Python solution\n",
"autocov = np.zeros(300)\n",
"avg = np.average(time_series_1)\n",
@@ -205,13 +211,13 @@
"plt.plot(autocov)\n",
"plt.xlabel(\"lag time $j$\")\n",
"plt.ylabel(\"$\\hat{K}^{XX}_j$\")\n",
- "plt.show()\n",
- "```"
+ "plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "d77368ef",
"metadata": {
"scrolled": true
},
@@ -220,6 +226,7 @@
},
{
"cell_type": "markdown",
+ "id": "4a59edae",
"metadata": {},
"source": [
"Depending on your implementation, this computation might have taken a significant amount of time (up to a couple tens of seconds). When doing a lot of these computations, using highly optimized routines for numerics can save a lot of time. The following example shows how to utilize the common Numpy package to do the job quicker."
@@ -228,6 +235,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "59d80df9",
"metadata": {},
"outputs": [],
"source": [
@@ -249,6 +257,7 @@
},
{
"cell_type": "markdown",
+ "id": "1319b798",
"metadata": {},
"source": [
"We can see that the auto-covariance function starts at a high value and decreases quickly into a long noisy tail which fluctuates around zero. The high values at short lag times indicate that there are strong correlations at short time scales, as expected. However, even though the tail looks uninteresting, it can bear important information about the statistics of your data. Small systematic deviations from 0 in the tail can be a hint that long-term correlations exist in your system. On the other hand, if there is no sign of a systematic deviation from 0 in the tail, this usually means that the correlation is decaying well within the simulation time, and that the statistics are good enough to estimate an error. In the above example, the correlation quickly decays to zero. Despite the noise in the tail, the statistics seem very reasonable."
@@ -256,6 +265,7 @@
},
{
"cell_type": "markdown",
+ "id": "eb3daafc",
"metadata": {},
"source": [
"## Autocorrelation time\n",
@@ -266,6 +276,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "e0434e74",
"metadata": {
"scrolled": true
},
@@ -302,6 +313,7 @@
},
{
"cell_type": "markdown",
+ "id": "4aaedd58",
"metadata": {},
"source": [
"Since the auto-covariance function is very well matched with an exponential, this analysis already gives us a reasonable estimate of the autocorrelation time. Here we have the luxury to have an analytical ACF at hand which describes the statistics of the simple AR(1) process, which generated our simulation data. It is in fact exponential and agrees very well with the numerical ACF. In practice, however, you will neither know an analytical ACF, nor know if the ACF is exponential, at all. In many systems, the ACF is more or less exponential, but this is not necessarily the case.\n",
@@ -320,6 +332,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "8825cdfb",
"metadata": {},
"outputs": [],
"source": [
@@ -345,6 +358,7 @@
},
{
"cell_type": "markdown",
+ "id": "0ff9c19b",
"metadata": {},
"source": [
"In this plot, we have the analytical solution at hand, which is a luxury not present in real applications. For the analysis, we therefore need to act as if there was no analytic solution:\n",
@@ -361,6 +375,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "ad9d5b6d",
"metadata": {
"scrolled": true
},
@@ -390,6 +405,7 @@
},
{
"cell_type": "markdown",
+ "id": "7f240694",
"metadata": {},
"source": [
"Using this value of $j_\\mathrm{max}$, we can calculate the integrated autocorrelation time $\\hat{\\tau}_{X, \\mathrm{int}}$ and estimate the SEM with equation (5)."
@@ -398,6 +414,7 @@
{
"cell_type": "code",
"execution_count": null,
+ "id": "67ed2850",
"metadata": {},
"outputs": [],
"source": [
@@ -415,10 +432,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "a1f3d619",
+ "metadata": {},
"source": [
"#### Exercise\n",
"* Write a function called `autocorrelation_analysis`, which takes as arguments\n",
@@ -440,12 +455,13 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "175ea951",
+ "metadata": {},
+ "outputs": [],
"source": [
- "```python\n",
+ "# SOLUTION CELL\n",
"def autocorrelation_analysis(data, C, window):\n",
" # initial processing\n",
" data_size = len(data)\n",
@@ -506,13 +522,13 @@
" return sem\n",
"\n",
"\n",
- "sem_2 = autocorrelation_analysis(time_series_2, 5, 20000)\n",
- "```"
+ "sem_2 = autocorrelation_analysis(time_series_2, 5, 20000)"
]
},
{
"cell_type": "code",
"execution_count": null,
+ "id": "8873abe1",
"metadata": {
"scrolled": false
},
@@ -521,10 +537,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden",
- "solution2_first": true
- },
+ "id": "63db4c7a",
+ "metadata": {},
"source": [
"#### Exercise\n",
"Interpret the results of the analysis of `time_series_2`."
@@ -532,9 +546,8 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "solution2": "hidden"
- },
+ "id": "7bdb3d51",
+ "metadata": {},
"source": [
"**Interpretation of the analysis**\n",
"\n",
@@ -543,6 +556,7 @@
},
{
"cell_type": "markdown",
+ "id": "89a99dc9",
"metadata": {},
"source": [
"## References\n",
@@ -573,5 +587,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 4
+ "nbformat_minor": 5
}
diff --git a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb
index e9cfb8ebec8..314c02e45a1 100644
--- a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb
+++ b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb
@@ -2,6 +2,7 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "2f9d7d77",
"metadata": {},
"source": [
"# Ferrofluid - Part 1"
@@ -9,6 +10,7 @@
},
{
"cell_type": "markdown",
+ "id": "0a9f6cb8",
"metadata": {},
"source": [
"## Table of Contents\n",
@@ -26,6 +28,7 @@
},
{
"cell_type": "markdown",
+ "id": "7223c854",
"metadata": {},
"source": [
"## Introduction"
@@ -33,6 +36,7 @@
},
{
"cell_type": "markdown",
+ "id": "12322446",
"metadata": {},
"source": [
"Ferrofluids are colloidal suspensions of ferromagnetic single-domain particles in a liquid carrier. As the single particles contain only one magnetic domain, they can be seen as small permanent magnets. To prevent agglomeration of the particles, due to van-der-Waals or magnetic attraction, they are usually sterically or electrostatically stabilized (see figure 1). The former is achieved by adsorption of long chain molecules onto the particle surface, the latter by adsorption of charged coating particles. The size of the ferromagnetic particles are in the region of 10 nm. With the surfactant layer added they can reach a size of a few hundred nanometers. Have in mind that if we refer to the particle diameter $\\sigma$ we mean the diameter of the magnetic core plus two times the thickness of the surfactant layer.\n",
@@ -46,6 +50,7 @@
},
{
"cell_type": "markdown",
+ "id": "eb5f20dd",
"metadata": {},
"source": [
"