diff --git a/.github/workflows/push_pull.yml b/.github/workflows/push_pull.yml index 9044cd67cce..1a2eb4e39fd 100644 --- a/.github/workflows/push_pull.yml +++ b/.github/workflows/push_pull.yml @@ -35,7 +35,7 @@ jobs: debian: runs-on: ubuntu-latest container: - image: ghcr.io/espressomd/docker/debian:fbdf2f2f9d76b761c0aa1308f17fb17e38501850-base-layer + image: ghcr.io/espressomd/docker/debian:339903979196fd7e72127f2cb5bfb27759d129f9-base-layer credentials: username: ${{ github.actor }} password: ${{ secrets.github_token }} @@ -74,7 +74,7 @@ jobs: runs-on: ubuntu-latest if: ${{ github.repository == 'espressomd/espresso' }} container: - image: ghcr.io/espressomd/docker/ubuntu-wo-dependencies:fbdf2f2f9d76b761c0aa1308f17fb17e38501850-base-layer + image: ghcr.io/espressomd/docker/ubuntu-wo-dependencies:339903979196fd7e72127f2cb5bfb27759d129f9-base-layer credentials: username: ${{ github.actor }} password: ${{ secrets.github_token }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index baf65493f2c..e80fe191cf5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -image: ghcr.io/espressomd/docker/ubuntu-22.04:fbdf2f2f9d76b761c0aa1308f17fb17e38501850 +image: ghcr.io/espressomd/docker/ubuntu-22.04:339903979196fd7e72127f2cb5bfb27759d129f9 stages: - prepare @@ -144,7 +144,7 @@ no_rotation: fedora:36: <<: *global_job_definition stage: build - image: ghcr.io/espressomd/docker/fedora:fbdf2f2f9d76b761c0aa1308f17fb17e38501850 + image: ghcr.io/espressomd/docker/fedora:339903979196fd7e72127f2cb5bfb27759d129f9 variables: with_cuda: 'false' with_gsl: 'false' @@ -390,6 +390,7 @@ installation: - bash maintainer/CI/build_cmake.sh - cd build - make install + - cmake . -D ESPRESSO_BUILD_TESTS=ON # get path of installed files - CI_INSTALL_DIR="/tmp/espresso-unit-tests" - CI_INSTALL_PYTHON_PATH=$(dirname $(find "${CI_INSTALL_DIR}/lib" -name espressomd)) @@ -398,9 +399,9 @@ installation: - cp -r "src/python/object_in_fluid" "${CI_INSTALL_PYTHON_PATH}/object_in_fluid" # run all tests with the installed files - sed -i "s|$(pwd)/pypresso|${CI_INSTALL_DIR}/bin/pypresso|" testsuite/{python,scripts/samples,scripts/tutorials}/CTestTestfile.cmake - - make -j ${CI_CORES} check_python_skip_long - - make -j ${CI_CORES} check_samples - - make -j 2 check_tutorials + - make check_python_skip_long + - make check_samples + - make check_tutorials tags: - espresso - cuda diff --git a/doc/sphinx/conf.py.in b/doc/sphinx/conf.py.in index 3f9908c61b3..71371752121 100644 --- a/doc/sphinx/conf.py.in +++ b/doc/sphinx/conf.py.in @@ -24,7 +24,7 @@ sys.path.insert(0, '@CMAKE_BINARY_DIR@/src/python') # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '2.3' +needs_sphinx = '3.5' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom diff --git a/doc/sphinx/installation.rst b/doc/sphinx/installation.rst index 83b894d1b70..0a5edce42fe 100644 --- a/doc/sphinx/installation.rst +++ b/doc/sphinx/installation.rst @@ -175,7 +175,7 @@ To generate the Sphinx documentation, install the following packages: .. code-block:: bash - pip3 install --user -c requirements.txt \ + python3 -m pip install --user -c requirements.txt \ sphinx sphinxcontrib-bibtex sphinx-toggleprompt To generate the Doxygen documentation, install the following packages: @@ -197,40 +197,36 @@ To run the samples and tutorials, start by installing the following packages: The tutorials are written in the `Notebook Format `__ -:cite:`kluyver16a` version <= 4.4 and can be executed by any of these tools: +:cite:`kluyver16a` version 4.5 and can be executed by any of these tools: -* `Jupyter Notebook `__ * `JupyterLab `__ -* `IPython `__ (not recommended) * `VS Code Jupyter `__ +* `Jupyter Notebook `__ +* `IPython `__ (not recommended) To check whether one of them is installed, run these commands: .. code-block:: bash - jupyter notebook --version jupyter lab --version + jupyter notebook --version ipython --version code --version If you don't have any of these tools installed and aren't sure which one -to use, we recommend installing the historic Jupyter Notebook, since the -|es| tutorials have been designed with the ``exercise2`` plugin in mind. - -To use Jupyter Notebook, install the following packages: +to use, we recommend installing JupyterLab: .. code-block:: bash - pip3 install --user 'nbformat==5.1.3' 'nbconvert==6.4.5' 'notebook==6.4.8' 'jupyter_contrib_nbextensions==0.5.1' - jupyter contrib nbextension install --user - jupyter nbextension enable rubberband/main - jupyter nbextension enable exercise2/main + python3 -m pip install --user -c requirements.txt \ + nbformat nbconvert jupyterlab -Alternatively, to use JupyterLab, install the following packages: +If you prefer the look and feel of Jupyter Classic, install the following: .. code-block:: bash - pip3 install --user nbformat notebook jupyterlab + python3 -m pip install --user -c requirements.txt \ + nbformat nbconvert jupyterlab nbclassic Alternatively, to use VS Code Jupyter, install the following extensions: @@ -316,7 +312,7 @@ Run the following commands: doxygen gsl numpy scipy ipython jupyter freeglut brew install hdf5-mpi brew link --force cython - pip install -c requirements.txt PyOpenGL matplotlib + python -m pip install -c requirements.txt PyOpenGL matplotlib .. _Quick installation: diff --git a/doc/sphinx/running.rst b/doc/sphinx/running.rst index c95059f9293..d99937f7ff7 100644 --- a/doc/sphinx/running.rst +++ b/doc/sphinx/running.rst @@ -6,8 +6,19 @@ Running a simulation |es| is implemented as a Python module. This means that you need to write a python script for any task you want to perform with |es|. In this chapter, the basic structure of the interface will be explained. For a practical -introduction, see the tutorials, which are also part of the -distribution. +introduction, see the tutorials, which are also part of the distribution. + +Most users should consider building and then installing |es| locally. +In this way, |es| behaves like any regular Python package and will +be recognized by the Python interpreter and Jupyter notebooks. + +Most developers prefer the ``pypresso`` resp. ``ipypresso`` wrapper scripts, +which export the build folder into the ``$PYTHONPATH`` environment variable +and then call ``python`` resp. ``jupyter``. They also introduce extra command +line options to help developers run simulations inside a debugger. +Command line examples in this chapter use the wrapper scripts instead of the +Python and Jupyter programs, although they are perfectly interchangeable +when not using a debugger. .. _Running es: @@ -71,40 +82,25 @@ in the build folder, do: make tutorials -The tutorials contain solutions hidden with the ``exercise2`` NB extension. -Since this extension is only available for Jupyter Notebook, JupyterLab -users need to convert the tutorials: - -.. code-block:: bash - - for f in doc/tutorials/*/*.ipynb; do - ./pypresso doc/tutorials/convert.py exercise2 --to-jupyterlab ${f} - done - -Likewise, VS Code Jupyter users need to convert the tutorials: - -.. code-block:: bash - - for f in doc/tutorials/*/*.ipynb; do - ./pypresso doc/tutorials/convert.py exercise2 --to-vscode-jupyter ${f} - done +The tutorials contain solutions hidden inside disclosure boxes. +Click on "Show solution" to reveal them. To interact with notebooks, move to the directory containing the tutorials and call the ``ipypresso`` script to start a local Jupyter session. -For Jupyter Notebook and IPython users: +For JupyterLab users: .. code-block:: bash cd doc/tutorials - ../../ipypresso notebook + ../../ipypresso lab -For JupyterLab users: +For Jupyter Classic users: .. code-block:: bash cd doc/tutorials - ../../ipypresso lab + ../../ipypresso nbclassic For VS Code Jupyter users, no action is needed if ``pypresso`` was set as the interpreter path (see details in :ref:`Running inside an IDE`). @@ -129,29 +125,15 @@ will exit the Python interpreter and Jupyter will notify you that the current Python kernel stopped. If a cell takes too long to execute, you may interrupt it with the stop button. -Solutions cells are created using the ``exercise2`` plugin from nbextensions. -To prevent solution code cells from running when clicking on "Run All", these -code cells need to be converted to Markdown cells and fenced with `````python`` -and ```````. +Solutions cells are marked up with the code comment ``# SOLUTION CELL`` +(must be on the first line). In the build folder, these solution cells +will be automatically converted to Markdown cells. To close the Jupyter session, go to the terminal where it was started and use the keyboard shortcut Ctrl+C twice. -When starting a Jupyter session, you may see the following warning in the -terminal: - -.. code-block:: none - - [TerminalIPythonApp] WARNING | Subcommand `ipython notebook` is deprecated and will be removed in future versions. - [TerminalIPythonApp] WARNING | You likely want to use `jupyter notebook` in the future - -This only means |es| was compiled with IPython instead of Jupyter. If Jupyter -is installed on your system, the notebook will automatically close IPython and -start Jupyter. To recompile |es| with Jupyter, provide ``cmake`` with the flag -``-D IPYTHON_EXECUTABLE=$(which jupyter)``. - -You can find the official Jupyter documentation at -https://jupyter.readthedocs.io/en/latest/running.html +You can find the official JupyterLab documentation at +https://jupyterlab.readthedocs.io/en/latest/user/interface.html .. _Running inside an IDE: diff --git a/doc/tutorials/CMakeLists.txt b/doc/tutorials/CMakeLists.txt index fc0ee979ba6..18798d65b63 100644 --- a/doc/tutorials/CMakeLists.txt +++ b/doc/tutorials/CMakeLists.txt @@ -62,8 +62,8 @@ function(NB_EXPORT) "${NB_FILE};${NB_EXPORT_ADD_SCRIPTS};${CMAKE_BINARY_DIR}/doc/tutorials/convert.py;${CMAKE_BINARY_DIR}/testsuite/scripts/importlib_wrapper.py" COMMAND ${CMAKE_BINARY_DIR}/pypresso - ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py ci --execute --exercise2 - --remove-empty-cells --input ${NB_FILE} --output ${NB_FILE_RUN} + ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py ci --execute + --prepare-for-html --input ${NB_FILE} --output ${NB_FILE_RUN} --substitutions ${NB_EXPORT_VAR_SUBST} --scripts ${NB_EXPORT_ADD_SCRIPTS}) else() @@ -71,22 +71,14 @@ function(NB_EXPORT) endif() add_custom_command( - OUTPUT ${HTML_FILE} - DEPENDS ${NB_FILE_RUN};${NB_EXPORT_ADD_SCRIPTS} - COMMAND - ${CMAKE_BINARY_DIR}/pypresso ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py - ci --exercise2 --input ${NB_FILE_RUN} --output ${NB_FILE_RUN}~ + OUTPUT ${HTML_FILE} DEPENDS ${NB_FILE_RUN};${NB_EXPORT_ADD_SCRIPTS} COMMAND ${IPYTHON_EXECUTABLE} nbconvert --to "html" --output ${HTML_FILE} - ${NB_FILE_RUN}~) + ${NB_FILE_RUN}) add_custom_command( - OUTPUT ${PY_FILE} - DEPENDS ${NB_FILE} - COMMAND - ${CMAKE_BINARY_DIR}/pypresso ${CMAKE_BINARY_DIR}/doc/tutorials/convert.py - ci --exercise2 --input ${NB_FILE} --output ${NB_FILE}~ + OUTPUT ${PY_FILE} DEPENDS ${NB_FILE} COMMAND ${IPYTHON_EXECUTABLE} nbconvert --to "python" --output ${PY_FILE} - ${NB_FILE}~) + ${NB_FILE}) add_custom_target("${NB_EXPORT_TARGET}_html" DEPENDS ${HTML_FILE} ${DEPENDENCY_OF_TARGET}) diff --git a/doc/tutorials/Readme.md b/doc/tutorials/Readme.md index d70dd75b54a..435919a9c17 100644 --- a/doc/tutorials/Readme.md +++ b/doc/tutorials/Readme.md @@ -62,7 +62,7 @@ physical systems. * **Electrodes** Modelling electrodes and measuring differential capacitance with the ELC method. [Part 1](electrodes/electrodes_part1.ipynb) | - Part 2 (work in progress) + [Part 2](electrodes/electrodes_part2.ipynb) * **Constant pH method** Modelling an acid dissociation curve using the constant pH method. [Guide](constant_pH/constant_pH.ipynb) @@ -91,32 +91,12 @@ in the build folder, do: make tutorials ``` -The tutorials contain solutions hidden with the ``exercise2`` extension. -Since this extension is only available for Jupyter Notebook, JupyterLab -users need to convert the tutorials: - -```sh -for f in doc/tutorials/*/*.ipynb; do - ./pypresso doc/tutorials/convert.py exercise2 --to-jupyterlab ${f}; -done -``` - All tutorials can be viewed with their solutions [online](https://espressomd.github.io/doc/tutorials.html). ### Running the tutorials interactively -To view the tutorials, first change to the tutorials directory and then run -the `ipypresso` script from the directory into which espresso was compiled. - -For Jupyter Notebook and IPython users: - -```sh -cd doc/tutorials -../../ipypresso notebook -``` - -For JupyterLab users: +To view the tutorials in the build folder, run the following commands: ```sh cd doc/tutorials diff --git a/doc/tutorials/active_matter/active_matter.ipynb b/doc/tutorials/active_matter/active_matter.ipynb index 3bdab0431dd..19c6874dffb 100644 --- a/doc/tutorials/active_matter/active_matter.ipynb +++ b/doc/tutorials/active_matter/active_matter.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "dc98c779", "metadata": {}, "source": [ "# Active Matter\n", @@ -16,6 +17,7 @@ }, { "cell_type": "markdown", + "id": "3b481094", "metadata": {}, "source": [ "## Introduction\n", @@ -33,6 +35,7 @@ }, { "cell_type": "markdown", + "id": "8c074852", "metadata": {}, "source": [ "## Active particles\n", @@ -51,6 +54,7 @@ }, { "cell_type": "markdown", + "id": "c45be7aa", "metadata": {}, "source": [ "### Active Particles in **ESPResSo**\n", @@ -64,6 +68,7 @@ }, { "cell_type": "markdown", + "id": "500be443", "metadata": {}, "source": [ "### Self-Propulsion without Hydrodynamics\n", @@ -96,6 +101,7 @@ }, { "cell_type": "markdown", + "id": "0a292e51", "metadata": {}, "source": [ "## Enhanced Diffusion" @@ -103,6 +109,7 @@ }, { "cell_type": "markdown", + "id": "562026a7", "metadata": {}, "source": [ "First we import the necessary modules, define the parameters and set up the system." @@ -111,6 +118,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3bc0e542", "metadata": {}, "outputs": [], "source": [ @@ -130,6 +138,7 @@ { "cell_type": "code", "execution_count": null, + "id": "730bdd81", "metadata": {}, "outputs": [], "source": [ @@ -149,6 +158,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d327f144", "metadata": {}, "outputs": [], "source": [ @@ -159,38 +169,38 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "252969af", + "metadata": {}, "source": [ "### Exercise\n", "* Use ``ED_PARAMS`` to set up a [Langevin thermostat](https://espressomd.github.io/doc/espressomd.html#espressomd.thermostat.Thermostat.set_langevin) for translation and rotation of the particles." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "f1af72f6", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "system.thermostat.set_langevin(kT=ED_PARAMS['kT'],\n", " gamma=ED_PARAMS['gamma'],\n", " gamma_rotation=ED_PARAMS['gamma_rotation'],\n", - " seed=42)\n", - "```" + " seed=42)" ] }, { "cell_type": "code", "execution_count": null, + "id": "0d6301c7", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "19f20627", "metadata": {}, "source": [ "The configuration for the Langevin-based swimming is exposed as an attribute of\n", @@ -201,10 +211,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "a2e9e5c2", + "metadata": {}, "source": [ "### Exercise\n", "* Set up one active and one passive particle, call them `part_act` and `part_pass` (Hint: see [the docs](https://espressomd.github.io/doc/espressomd.html#espressomd.particle_data.ParticleHandle.swimming))\n", @@ -212,28 +220,30 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "37de96eb", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "part_act = system.part.add(pos=[5.0, 5.0, 5.0], swimming={'f_swim': ED_PARAMS['f_swim']},\n", " mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])\n", "part_pass = system.part.add(pos=[5.0, 5.0, 5.0],\n", - " mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])\n", - "```" + " mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])" ] }, { "cell_type": "code", "execution_count": null, + "id": "cdca84dc", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "8ed28c6e", "metadata": {}, "source": [ "Next we set up three **ESPResSo** correlators for the Mean Square Displacement (MSD), Velocity Autocorrelation Function (VACF) and the Angular Velocity Autocorrelation Function (AVACF)." @@ -242,6 +252,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6062679b", "metadata": {}, "outputs": [], "source": [ @@ -275,6 +286,7 @@ }, { "cell_type": "markdown", + "id": "8206c8f4", "metadata": {}, "source": [ "No more setup needed! We can run the simulation and plot our observables." @@ -283,6 +295,7 @@ { "cell_type": "code", "execution_count": null, + "id": "70a7eeab", "metadata": {}, "outputs": [], "source": [ @@ -293,6 +306,7 @@ { "cell_type": "code", "execution_count": null, + "id": "618f3b2a", "metadata": {}, "outputs": [], "source": [ @@ -307,6 +321,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d7c754fe", "metadata": {}, "outputs": [], "source": [ @@ -324,6 +339,7 @@ { "cell_type": "code", "execution_count": null, + "id": "564d3480", "metadata": {}, "outputs": [], "source": [ @@ -340,6 +356,7 @@ }, { "cell_type": "markdown", + "id": "f1391566", "metadata": {}, "source": [ "The Mean Square Displacement of an active particle is characterized by a longer ballistic regime and an increased diffusion coefficient for longer lag times. In the overdamped limit it is given by\n", @@ -366,6 +383,7 @@ }, { "cell_type": "markdown", + "id": "c53697fa", "metadata": {}, "source": [ "From the autocorrelation functions of the velocity and the angular velocity we can see that the activity does not influence the rotational diffusion. Yet the directed motion for $t<\\tau_{R}$ leads to an enhanced correlation of the velocity." @@ -374,6 +392,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8042bfc0", "metadata": {}, "outputs": [], "source": [ @@ -389,6 +408,7 @@ { "cell_type": "code", "execution_count": null, + "id": "10a16494", "metadata": {}, "outputs": [], "source": [ @@ -406,6 +426,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ff586ddc", "metadata": {}, "outputs": [], "source": [ @@ -422,6 +443,7 @@ }, { "cell_type": "markdown", + "id": "c14bcde3", "metadata": {}, "source": [ "Before we go to the second part, it is important to clear the state of the system." @@ -430,6 +452,7 @@ { "cell_type": "code", "execution_count": null, + "id": "23a99a8e", "metadata": {}, "outputs": [], "source": [ @@ -444,6 +467,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a183da57", "metadata": {}, "outputs": [], "source": [ @@ -452,6 +476,7 @@ }, { "cell_type": "markdown", + "id": "b916c470", "metadata": {}, "source": [ "## Rectification" @@ -459,6 +484,7 @@ }, { "cell_type": "markdown", + "id": "70bf06fe", "metadata": {}, "source": [ "In the second part of this tutorial you will consider the ‘rectifying’ properties of certain\n", @@ -470,6 +496,7 @@ }, { "cell_type": "markdown", + "id": "201b03ee", "metadata": {}, "source": [ "The geometry we will use is a cylindrical system with a funnel dividing\n", @@ -487,6 +514,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a9905037", "metadata": {}, "outputs": [], "source": [ @@ -497,6 +525,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7e171115", "metadata": {}, "outputs": [], "source": [ @@ -533,6 +562,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2d969668", "metadata": {}, "outputs": [], "source": [ @@ -552,6 +582,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f7a66e7c", "metadata": {}, "outputs": [], "source": [ @@ -561,22 +592,21 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "97d502e4", + "metadata": {}, "source": [ "### Exercise \n", "* Using `funnel_length` and the geometric parameters in `RECT_PARAMS`, set up the funnel cone (Hint: Use a [Conical Frustum](https://espressomd.github.io/doc/espressomd.html#espressomd.shapes.HollowConicalFrustum))" ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "267e2442", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "ctp = espressomd.math.CylindricalTransformationParameters(\n", " axis=[1, 0, 0], center=box_l/2.)\n", "\n", @@ -586,53 +616,50 @@ " thickness=RECT_PARAMS['funnel_thickness'],\n", " length=funnel_length,\n", " direction=1)\n", - "system.constraints.add(shape=hollow_cone, particle_type=TYPES['boundaries'])\n", - "```" + "system.constraints.add(shape=hollow_cone, particle_type=TYPES['boundaries'])" ] }, { "cell_type": "code", "execution_count": null, + "id": "8d1cbf01", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "a64435b1", + "metadata": {}, "source": [ "### Exercise\n", "* Set up a WCA potential between the walls and the particles using the parameters in `RECT_PARAMS`" ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "c05ca1bf", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "system.non_bonded_inter[TYPES['particles'], TYPES['boundaries']].wca.set_params(\n", - " epsilon=RECT_PARAMS['wca_epsilon'], sigma=RECT_PARAMS['wca_sigma'])\n", - "```" + " epsilon=RECT_PARAMS['wca_epsilon'], sigma=RECT_PARAMS['wca_sigma'])" ] }, { "cell_type": "code", "execution_count": null, + "id": "2d7fe980", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ccff0b28", + "metadata": {}, "source": [ "### Exercise\n", "* Place an equal number of swimming particles (the total number should be `RECT_PARAMS['n_particles']`) in the left and the right part of the box such that the center of mass is exactly in the middle. (Hint: Particles do not interact so you can put multiple in the same position)\n", @@ -640,12 +667,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "63f12677", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "for i in range(RECT_PARAMS['n_particles']):\n", " pos = box_l / 2\n", " pos[0] += (-1)**i * 0.25 * RECT_PARAMS['length']\n", @@ -658,13 +686,13 @@ " np.cos(theta)]\n", "\n", " system.part.add(pos=pos, swimming={'f_swim': RECT_PARAMS['f_swim']},\n", - " director=director, rotation=3*[True])\n", - "```" + " director=director, rotation=3*[True])" ] }, { "cell_type": "code", "execution_count": null, + "id": "31b62469", "metadata": {}, "outputs": [], "source": [] @@ -672,6 +700,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a0aa1f61", "metadata": {}, "outputs": [], "source": [ @@ -681,10 +710,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "09889174", + "metadata": {}, "source": [ "### Exercise\n", "* Run the simulation using ``RECT_N_SAMPLES`` and ``RECT_STEPS_PER_SAMPLE`` and calculate the deviation of the center of mass from the center of the box in each sample step. (Hint: [Center of mass](https://espressomd.github.io/doc/espressomd.html#espressomd.galilei.GalileiTransform.system_CMS))\n", @@ -692,22 +719,23 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "d58e8dc7", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "for _ in tqdm.tqdm(range(RECT_N_SAMPLES)):\n", " system.integrator.run(RECT_STEPS_PER_SAMPLE)\n", " com_deviations.append(system.galilei.system_CMS()[0] - 0.5 * box_l[0])\n", - " times.append(system.time)\n", - "```" + " times.append(system.time)" ] }, { "cell_type": "code", "execution_count": null, + "id": "b34b2f32", "metadata": {}, "outputs": [], "source": [] @@ -715,6 +743,7 @@ { "cell_type": "code", "execution_count": null, + "id": "dd3fac45", "metadata": {}, "outputs": [], "source": [ @@ -725,6 +754,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f4f035db", "metadata": {}, "outputs": [], "source": [ @@ -740,6 +770,7 @@ }, { "cell_type": "markdown", + "id": "6cd432e4", "metadata": {}, "source": [ "Even though the potential energy inside the geometry is 0 in every part of the accessible region, the active particles are clearly not Boltzmann distributed (homogenous density). Instead, they get funneled into the right half, showing the inapplicability of equilibrium statistical mechanics." @@ -747,6 +778,7 @@ }, { "cell_type": "markdown", + "id": "2dc63352", "metadata": {}, "source": [ "## Hydrodynamics of self-propelled particles" @@ -754,6 +786,7 @@ }, { "cell_type": "markdown", + "id": "3f5981a2", "metadata": {}, "source": [ "In this final part of the tutorial we simulate and visualize the flow field around a self-propelled swimmer." @@ -761,6 +794,7 @@ }, { "cell_type": "markdown", + "id": "3c3b49ce", "metadata": {}, "source": [ "Of particular importance for self-propulsion at low Reynolds number is the fact\n", @@ -796,6 +830,7 @@ }, { "cell_type": "markdown", + "id": "d260c5ba", "metadata": {}, "source": [ "In situations where hydrodynamic interactions between swimmers or swimmers and\n", @@ -834,6 +869,7 @@ { "cell_type": "code", "execution_count": null, + "id": "da731448", "metadata": {}, "outputs": [], "source": [ @@ -848,6 +884,7 @@ { "cell_type": "code", "execution_count": null, + "id": "65695350", "metadata": {}, "outputs": [], "source": [ @@ -857,6 +894,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d9bcd33d", "metadata": {}, "outputs": [], "source": [ @@ -883,34 +921,33 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "44ab55b3", + "metadata": {}, "source": [ "### Exercise\n", "* Using `HYDRO_PARAMS`, set up a lattice-Boltzmann fluid and activate it as a thermostat (Hint: [lattice-Boltzmann](https://espressomd.github.io/doc/lb.html#lattice-boltzmann))" ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "e87b87a8", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "lbf = espressomd.lb.LBFluidWalberla(agrid=HYDRO_PARAMS['agrid'],\n", " density=HYDRO_PARAMS['dens'],\n", " kinematic_viscosity=HYDRO_PARAMS['visc'],\n", " tau=HYDRO_PARAMS['time_step'])\n", "system.lb = lbf\n", - "system.thermostat.set_lb(LB_fluid=lbf, gamma=HYDRO_PARAMS['gamma'], seed=42)\n", - "```" + "system.thermostat.set_lb(LB_fluid=lbf, gamma=HYDRO_PARAMS['gamma'], seed=42)" ] }, { "cell_type": "code", "execution_count": null, + "id": "d7ba1609", "metadata": {}, "outputs": [], "source": [] @@ -918,6 +955,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0510840f", "metadata": {}, "outputs": [], "source": [ @@ -928,10 +966,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "61f859e5", + "metadata": {}, "source": [ "### Exercise\n", "* Using `HYDRO_PARAMS`, place particle at `pos` that swims in `z`-direction. The particle handle should be called `particle`.\n", @@ -939,25 +975,26 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "706410a5", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "director = np.array([0,0,1])\n", "particle = system.part.add(\n", " pos=pos, \n", " director=director,\n", " mass=HYDRO_PARAMS['mass'], rotation=3*[False],\n", " swimming={'f_swim': HYDRO_PARAMS['f_swim']})\n", - "espressomd.swimmer_helpers.add_dipole_particle(system, particle, HYDRO_PARAMS['dipole_length'], HYDRO_PARAMS['dipole_particle_type'])\n", - "```" + "espressomd.swimmer_helpers.add_dipole_particle(system, particle, HYDRO_PARAMS['dipole_length'], HYDRO_PARAMS['dipole_particle_type'])" ] }, { "cell_type": "code", "execution_count": null, + "id": "96ad5f34", "metadata": {}, "outputs": [], "source": [] @@ -965,6 +1002,7 @@ { "cell_type": "code", "execution_count": null, + "id": "34849708", "metadata": {}, "outputs": [], "source": [ @@ -974,6 +1012,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ed68163e", "metadata": {}, "outputs": [], "source": [ @@ -999,6 +1038,7 @@ }, { "cell_type": "markdown", + "id": "28d64810", "metadata": {}, "source": [ "We can also export the particle and fluid data to ``.vtk`` format to display the results with a visualization software like ParaView." @@ -1007,6 +1047,7 @@ { "cell_type": "code", "execution_count": null, + "id": "06e9f100", "metadata": {}, "outputs": [], "source": [ @@ -1029,6 +1070,7 @@ }, { "cell_type": "markdown", + "id": "55f51115", "metadata": {}, "source": [ "## Further reading\n", @@ -1101,5 +1143,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/doc/tutorials/charged_system/charged_system.ipynb b/doc/tutorials/charged_system/charged_system.ipynb index 780855e6189..032d0ec7e73 100644 --- a/doc/tutorials/charged_system/charged_system.ipynb +++ b/doc/tutorials/charged_system/charged_system.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "b3844439", "metadata": {}, "source": [ "# A Charged System: Counterion Condensation\n", @@ -17,6 +18,7 @@ }, { "cell_type": "markdown", + "id": "c7a006eb", "metadata": {}, "source": [ "## Introduction\n", @@ -30,6 +32,7 @@ { "cell_type": "code", "execution_count": null, + "id": "05ee17a4", "metadata": {}, "outputs": [], "source": [ @@ -53,6 +56,7 @@ }, { "cell_type": "markdown", + "id": "4b5777fe", "metadata": {}, "source": [ "# System setup\n", @@ -63,6 +67,7 @@ { "cell_type": "code", "execution_count": null, + "id": "158ac15b", "metadata": {}, "outputs": [], "source": [ @@ -81,6 +86,7 @@ }, { "cell_type": "markdown", + "id": "4f46ba70", "metadata": {}, "source": [ "We will build the charged rod from individual particles that are fixed in space. With this, we can use the particle-based electrostatics methods of **ESPResSo**. For analysis, we give the rod particles a different type than the counterions." @@ -89,6 +95,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6f6fc8b9", "metadata": {}, "outputs": [], "source": [ @@ -103,10 +110,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ae520dd7", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -118,35 +123,34 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "0b77ac12", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# ion-ion interaction\n", "system.non_bonded_inter[COUNTERION_TYPE, COUNTERION_TYPE].wca.set_params(\n", " epsilon=WCA_EPSILON, sigma=ION_DIAMETER)\n", "\n", "# ion-rod interaction\n", "system.non_bonded_inter[COUNTERION_TYPE, ROD_TYPE].wca.set_params(\n", - " epsilon=WCA_EPSILON, sigma=ION_DIAMETER / 2. + ROD_RADIUS)\n", - "```" + " epsilon=WCA_EPSILON, sigma=ION_DIAMETER / 2. + ROD_RADIUS)" ] }, { "cell_type": "code", "execution_count": null, + "id": "7260152b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ec702d20", + "metadata": {}, "source": [ "Now we need to place the particles in the box\n", "\n", @@ -166,12 +170,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "b1899c20", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def setup_rod_and_counterions(system, ion_valency, counterion_type,\n", " rod_charge_dens, N_rod_beads, rod_type):\n", "\n", @@ -197,13 +202,13 @@ " counter_ions = system.part.add(pos=ion_positions, type=[\n", " counterion_type] * N_ions, q=[-ion_valency] * N_ions)\n", "\n", - " return counter_ions\n", - "```" + " return counter_ions" ] }, { "cell_type": "code", "execution_count": null, + "id": "3af0776e", "metadata": {}, "outputs": [], "source": [] @@ -211,6 +216,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4600cd7e", "metadata": {}, "outputs": [], "source": [ @@ -230,6 +236,7 @@ }, { "cell_type": "markdown", + "id": "ae2fdc12", "metadata": {}, "source": [ "Now we set up the electrostatics method to calculate the forces and energies from the long-range Coulomb interaction.\n", @@ -245,6 +252,7 @@ { "cell_type": "code", "execution_count": null, + "id": "57ee902f", "metadata": {}, "outputs": [], "source": [ @@ -254,6 +262,7 @@ }, { "cell_type": "markdown", + "id": "bbe4b687", "metadata": {}, "source": [ "For the accuracy, **ESPResSo** estimates the relative error in the force calculation introduced by the approximations of $P^3M$.\n", @@ -263,36 +272,36 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "3b387784", + "metadata": {}, "source": [ "**Exercise:**\n", "* Set up a ``p3m`` instance and add it to the ``electrostatics`` slot of the system" ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "78d1c835", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "p3m = espressomd.electrostatics.P3M(**p3m_params)\n", - "system.electrostatics.solver = p3m\n", - "```" + "system.electrostatics.solver = p3m" ] }, { "cell_type": "code", "execution_count": null, + "id": "b04cdad5", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "742af77e", "metadata": {}, "source": [ "Before we can start the simulation, we need to remove the overlap between particles to avoid large forces which would crash the simulation.\n", @@ -302,6 +311,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6ed2ce87", "metadata": {}, "outputs": [], "source": [ @@ -340,6 +350,7 @@ { "cell_type": "code", "execution_count": null, + "id": "299f819f", "metadata": {}, "outputs": [], "source": [ @@ -355,6 +366,7 @@ }, { "cell_type": "markdown", + "id": "add6aeef", "metadata": {}, "source": [ "After the overlap is removed, we activate a thermostat to simulate the system at a given temperature." @@ -363,6 +375,7 @@ { "cell_type": "code", "execution_count": null, + "id": "58b97cdf", "metadata": {}, "outputs": [], "source": [ @@ -374,6 +387,7 @@ }, { "cell_type": "markdown", + "id": "d190ecda", "metadata": {}, "source": [ "## First run and observable setup\n", @@ -384,6 +398,7 @@ { "cell_type": "code", "execution_count": null, + "id": "cefbbc86", "metadata": {}, "outputs": [], "source": [ @@ -398,6 +413,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8c119bd2", "metadata": {}, "outputs": [], "source": [ @@ -413,6 +429,7 @@ { "cell_type": "code", "execution_count": null, + "id": "32945c22", "metadata": {}, "outputs": [], "source": [ @@ -422,10 +439,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "43906d11", + "metadata": {}, "source": [ "Now we are ready to implement the observable calculation. As we are interested in the condensation of counterions on the rod, the physical quantity of interest is the density of charges $\\rho(r)$ around the rod, where $r$ is the distance from the rod. We need many samples to calculate the density from histograms.\n", "\n", @@ -444,12 +459,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "81590f8c", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def setup_profile_calculation(system, delta_N, ion_types, r_min, n_radial_bins):\n", " radial_profile_accumulators = {}\n", " ctp = espressomd.math.CylindricalTransformationParameters(center = np.array(system.box_l) / 2.,\n", @@ -474,13 +490,13 @@ "\n", " radial_profile_accumulators[ion_type] = radial_profile_acc\n", "\n", - " return radial_profile_accumulators, bin_edges\n", - "```" + " return radial_profile_accumulators, bin_edges" ] }, { "cell_type": "code", "execution_count": null, + "id": "167532ae", "metadata": {}, "outputs": [], "source": [] @@ -488,6 +504,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bb69fe23", "metadata": {}, "outputs": [], "source": [ @@ -503,10 +520,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "9b3ba2e8", + "metadata": {}, "source": [ "To run the simulation with different parameters, we need a way to reset the system and return it to an empty state before setting it up again.\n", "\n", @@ -528,24 +543,25 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "9615f768", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def clear_system(system):\n", " system.thermostat.turn_off()\n", " system.part.clear()\n", " system.electrostatics.clear()\n", " system.auto_update_accumulators.clear()\n", - " system.time = 0.\n", - "```" + " system.time = 0." ] }, { "cell_type": "code", "execution_count": null, + "id": "d850bed8", "metadata": {}, "outputs": [], "source": [] @@ -553,6 +569,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a65bcd23", "metadata": {}, "outputs": [], "source": [ @@ -561,6 +578,7 @@ }, { "cell_type": "markdown", + "id": "459fd605", "metadata": {}, "source": [ "## Production run and analysis\n", @@ -570,6 +588,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9049f459", "metadata": {}, "outputs": [], "source": [ @@ -583,6 +602,7 @@ }, { "cell_type": "markdown", + "id": "be2b297f", "metadata": {}, "source": [ "For longer simulation runs it will be convenient to have a progress bar" @@ -591,6 +611,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4b873263", "metadata": {}, "outputs": [], "source": [ @@ -601,10 +622,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "86d353c7", + "metadata": {}, "source": [ "**Exercise:**\n", "* Run the simulation for the parameters given above and save the histograms in the corresponding dictionary for analysis\n", @@ -616,12 +635,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "c7575e93", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "for run in runs:\n", " clear_system(system)\n", " setup_rod_and_counterions(\n", @@ -638,23 +658,21 @@ " integrate_system(system, N_SAMPLES * STEPS_PER_SAMPLE)\n", "\n", " run['histogram'] = radial_profile_accs[COUNTERION_TYPE].mean()\n", - " print(f'simulation for parameters {run[\"params\"]} done\\n')\n", - "```" + " print(f'simulation for parameters {run[\"params\"]} done\\n')" ] }, { "cell_type": "code", "execution_count": null, + "id": "9a2891d9", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "5930693d", + "metadata": {}, "source": [ "**Question**\n", "* Why does the second simulation take much longer than the first one?" @@ -662,15 +680,15 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "id": "a877bbd9", + "metadata": {}, "source": [ "The rod charge density is doubled, so the total charge of the counterions needs to be doubled, too. Since their valency is only half of the one in the first run, there will be four times more counterions in the second run." ] }, { "cell_type": "markdown", + "id": "05e52489", "metadata": {}, "source": [ "We plot the density of counterions around the rod as the normalized integrated radial counterion charge distribution function $P(r)$, meaning the integrated probability to find an amount of charge within the radius $r$. We express the rod charge density $\\lambda$ in terms of the dimensionless Manning parameter $\\xi = \\lambda l_B / e$ where $l_B$ is the Bjerrum length and $e$ the elementary charge" @@ -679,6 +697,7 @@ { "cell_type": "code", "execution_count": null, + "id": "dbae04f1", "metadata": {}, "outputs": [], "source": [ @@ -706,6 +725,7 @@ }, { "cell_type": "markdown", + "id": "a235b93c", "metadata": {}, "source": [ "In the semilogarithmic plot we see an inflection point of the cumulative charge distribution which is the indicator for ion condensation. To compare to the meanfield approach of PB, we calculate the solution of the analytical expressions given in [10.1021/ma990897o](https://doi.org/10.1021/ma990897o)" @@ -714,6 +734,7 @@ { "cell_type": "code", "execution_count": null, + "id": "468cec09", "metadata": {}, "outputs": [], "source": [ @@ -734,6 +755,7 @@ }, { "cell_type": "markdown", + "id": "83934b8c", "metadata": {}, "source": [ "For multivalent counterions, the manning parameter $\\xi$ has to be multiplied by the valency $\\nu$. The result depends only on the product of ``rod_charge_dens`` and ``ion_valency``, so we only need one curve" @@ -742,6 +764,7 @@ { "cell_type": "code", "execution_count": null, + "id": "06a2c566", "metadata": {}, "outputs": [], "source": [ @@ -764,6 +787,7 @@ }, { "cell_type": "markdown", + "id": "09f03daa", "metadata": {}, "source": [ "We see that overall agreement is quite good, but the deviations from the PB solution get stronger the more charged the ions are.\n", @@ -772,6 +796,7 @@ }, { "cell_type": "markdown", + "id": "d5deb375", "metadata": {}, "source": [ "## Overcharging by added salt\n", @@ -782,6 +807,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0ee33efc", "metadata": {}, "outputs": [], "source": [ @@ -804,6 +830,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5fdbdcee", "metadata": {}, "outputs": [], "source": [ @@ -828,6 +855,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8c327569", "metadata": {}, "outputs": [], "source": [ @@ -843,6 +871,7 @@ { "cell_type": "code", "execution_count": null, + "id": "953f34fd", "metadata": {}, "outputs": [], "source": [ @@ -868,6 +897,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9207d735", "metadata": {}, "outputs": [], "source": [ @@ -883,10 +913,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ea8dc78c", + "metadata": {}, "source": [ "**Exercise:**\n", "* Use the cumulative histograms from the cell above to create the cumulative charge histogram of the total ion charge\n", @@ -896,24 +924,25 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "535dafec", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "counterion_charge = sum(counterions.q)\n", "anion_charge = sum(anions.q)\n", "cation_charge = sum(cations.q)\n", "charge_hist = counterion_charge * cum_hists[COUNTERION_TYPE] + \\\n", " anion_charge * cum_hists[ANION_PARAMS['type']] + \\\n", - " cation_charge * cum_hists[CATION_PARAMS['type']]\n", - "```" + " cation_charge * cum_hists[CATION_PARAMS['type']]" ] }, { "cell_type": "code", "execution_count": null, + "id": "88f39052", "metadata": {}, "outputs": [], "source": [] @@ -921,6 +950,7 @@ { "cell_type": "code", "execution_count": null, + "id": "518b7a64", "metadata": {}, "outputs": [], "source": [ @@ -935,6 +965,7 @@ }, { "cell_type": "markdown", + "id": "685514f1", "metadata": {}, "source": [ "You should observe a strong overcharging effect, where ions accumulate close to the rod." @@ -961,5 +992,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/doc/tutorials/constant_pH/constant_pH.ipynb b/doc/tutorials/constant_pH/constant_pH.ipynb index 965941fee82..f5f0577aec3 100644 --- a/doc/tutorials/constant_pH/constant_pH.ipynb +++ b/doc/tutorials/constant_pH/constant_pH.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "ac4ec17f", "metadata": {}, "source": [ "# The constant-pH ensemble method for acid-base reactions" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "d28d86e7", "metadata": {}, "source": [ "## Expected prior knowledge\n", @@ -22,6 +24,7 @@ }, { "cell_type": "markdown", + "id": "1bf00c68", "metadata": {}, "source": [ "## Introduction\n", @@ -42,6 +45,7 @@ }, { "cell_type": "markdown", + "id": "160bd8a5", "metadata": {}, "source": [ "### The chemical equilibrium and reaction constant\n", @@ -89,6 +93,7 @@ { "cell_type": "code", "execution_count": null, + "id": "76c9dbf1", "metadata": {}, "outputs": [], "source": [ @@ -99,6 +104,7 @@ }, { "cell_type": "markdown", + "id": "7910719e", "metadata": {}, "source": [ "### The constant pH method\n", @@ -119,6 +125,7 @@ }, { "cell_type": "markdown", + "id": "9677c09d", "metadata": {}, "source": [ "## Simulation setup\n", @@ -129,6 +136,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6dd0c277", "metadata": {}, "outputs": [], "source": [ @@ -153,6 +161,7 @@ }, { "cell_type": "markdown", + "id": "8a744f51", "metadata": {}, "source": [ "The package [pint](https://pint.readthedocs.io/en/stable/) is intended to make handling of physical quantities with different units easy. You simply create an instance of [`pint.UnitRegistry`](https://pint.readthedocs.io/en/stable/developers_reference.html#pint.UnitRegistry) and access its unit definitions and automatic conversions. For more information or a quick introduction please look at the [pint-documentation](https://pint.readthedocs.io/en/stable/) or [pint-tutorials](https://pint.readthedocs.io/en/stable/tutorial.html#tutorial)." @@ -161,6 +170,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c8e15834", "metadata": {}, "outputs": [], "source": [ @@ -169,6 +179,7 @@ }, { "cell_type": "markdown", + "id": "65800134", "metadata": {}, "source": [ "The inputs that we need to define our system in the simulation include\n", @@ -187,6 +198,7 @@ }, { "cell_type": "markdown", + "id": "a896dbe6", "metadata": {}, "source": [ "### Set the reduced units of energy and length\n", @@ -210,6 +222,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ba76b35b", "metadata": {}, "outputs": [], "source": [ @@ -235,6 +248,7 @@ }, { "cell_type": "markdown", + "id": "e2424a62", "metadata": {}, "source": [ "### Set the key physical parameters that uniquely define the system\n", @@ -249,6 +263,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d1b40278", "metadata": {}, "outputs": [], "source": [ @@ -262,6 +277,7 @@ }, { "cell_type": "markdown", + "id": "8f2cf0bb", "metadata": {}, "source": [ "#### Set the range of parameters that we want to vary\n", @@ -272,6 +288,7 @@ { "cell_type": "code", "execution_count": null, + "id": "64498150", "metadata": {}, "outputs": [], "source": [ @@ -286,6 +303,7 @@ }, { "cell_type": "markdown", + "id": "bceb15bc", "metadata": {}, "source": [ "#### Choose which interactions should be activated\n", @@ -300,6 +318,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bd64c859", "metadata": {}, "outputs": [], "source": [ @@ -317,6 +336,7 @@ }, { "cell_type": "markdown", + "id": "72a9d84e", "metadata": {}, "source": [ "#### Set the number of samples to be collected\n", @@ -329,6 +349,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c02606b8", "metadata": { "scrolled": true }, @@ -347,6 +368,7 @@ }, { "cell_type": "markdown", + "id": "09b83acd", "metadata": {}, "source": [ "#### Calculate the dependent parameters\n", @@ -361,6 +383,7 @@ { "cell_type": "code", "execution_count": null, + "id": "422098fe", "metadata": {}, "outputs": [], "source": [ @@ -383,6 +406,7 @@ }, { "cell_type": "markdown", + "id": "83067f8d", "metadata": {}, "source": [ "#### Set the particle types and charges\n", @@ -393,6 +417,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ca876c27", "metadata": {}, "outputs": [], "source": [ @@ -416,6 +441,7 @@ }, { "cell_type": "markdown", + "id": "5118d349", "metadata": {}, "source": [ "### Initialize the ESPResSo system\n", @@ -431,6 +457,7 @@ { "cell_type": "code", "execution_count": null, + "id": "698200f1", "metadata": { "scrolled": true }, @@ -444,6 +471,7 @@ }, { "cell_type": "markdown", + "id": "1f23634b", "metadata": {}, "source": [ "### Set up particles and bonded-interactions\n", @@ -457,6 +485,7 @@ { "cell_type": "code", "execution_count": null, + "id": "04feb53f", "metadata": {}, "outputs": [], "source": [ @@ -497,6 +526,7 @@ }, { "cell_type": "markdown", + "id": "e16b96c1", "metadata": {}, "source": [ "### Set up non-bonded-interactions\n", @@ -512,6 +542,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c28c10f8", "metadata": {}, "outputs": [], "source": [ @@ -549,6 +580,7 @@ }, { "cell_type": "markdown", + "id": "22c08f03", "metadata": {}, "source": [ "### Set up the constant pH ensemble using the reaction ensemble module" @@ -556,10 +588,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "631c1c01", + "metadata": {}, "source": [ "After the particles have been added to the system we initialize the `espressomd.reaction_methods`. The parameters to set are:\n", "\n", @@ -574,12 +604,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "2b994257", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "exclusion_range = PARTICLE_SIZE_REDUCED if USE_WCA else 0.0\n", "RE = espressomd.reaction_methods.ConstantpHEnsemble(\n", " kT=KT_REDUCED,\n", @@ -587,19 +618,20 @@ " seed=77,\n", " constant_pH=2 # temporary value\n", ")\n", - "RE.set_non_interacting_type(type=len(TYPES)) # this parameter helps speed up the calculation in an interacting system\n", - "```" + "RE.set_non_interacting_type(type=len(TYPES)) # this parameter helps speed up the calculation in an interacting system" ] }, { "cell_type": "code", "execution_count": null, + "id": "d3b2554d", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "88f3bcda", "metadata": {}, "source": [ "The next step is to define the chemical reaction. The order of species in the lists of reactants and products is very important for ESPResSo because it determines which particles are created or deleted in the reaction move. Specifically, identity of the first species in the list of reactants is changed to the first species in the list of products, the second reactant species is changed to the second product species, and so on. If the reactant list has more species than the product list, then excess reactant species are deleted from the system. If the product list has more species than the reactant list, then the excess product species are created and randomly placed inside the simulation box. This convention is especially important if some of the species belong to a chain-like molecule, so that they cannot be inserted at an arbitrary position." @@ -607,10 +639,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "06e51a82", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -621,13 +651,15 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, + "id": "96395eb9", "metadata": { - "scrolled": false, - "solution2": "hidden" + "scrolled": false }, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "RE.add_reaction(\n", " gamma=10**(-pKa),\n", " reactant_types=[TYPES[\"HA\"]],\n", @@ -635,19 +667,20 @@ " default_charges={TYPES[\"HA\"]: CHARGES[\"HA\"],\n", " TYPES[\"A\"]: CHARGES[\"A\"],\n", " TYPES[\"B\"]: CHARGES[\"B\"]}\n", - ")\n", - "```" + ")" ] }, { "cell_type": "code", "execution_count": null, + "id": "41566274", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "ad9817c9", "metadata": {}, "source": [ "In the example above, the order of reactants and products ensures that identity of $\\mathrm{HA}$ is changed to $\\mathrm{A^{-}}$ and vice versa, while $\\mathrm{B^{+}}$ is inserted/deleted in the reaction move. \n", @@ -657,6 +690,7 @@ }, { "cell_type": "markdown", + "id": "253d05ee", "metadata": {}, "source": [ "## Run the simulations\n", @@ -666,10 +700,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "034fd91b", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -679,30 +711,29 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "36523af6", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def equilibrate_reaction(reaction_steps=1):\n", - " RE.reaction(steps=reaction_steps)\n", - "```" + " RE.reaction(steps=reaction_steps)" ] }, { "cell_type": "code", "execution_count": null, + "id": "7ebb25e5", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "07daa583", + "metadata": {}, "source": [ "After the system has been equilibrated, the integration/sampling loop follows.\n", "\n", @@ -731,12 +762,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "6816aa4d", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def perform_sampling(type_A, num_samples, num_As:np.ndarray, reaction_steps, \n", " prob_integration=0.5, integration_steps=1000):\n", " for i in range(num_samples):\n", @@ -744,19 +776,20 @@ " system.integrator.run(integration_steps)\n", " # we should do at least one reaction attempt per reactive particle\n", " RE.reaction(steps=reaction_steps)\n", - " num_As[i] = system.number_of_particles(type=type_A)\n", - "```" + " num_As[i] = system.number_of_particles(type=type_A)" ] }, { "cell_type": "code", "execution_count": null, + "id": "4b6ebb99", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "eabceb5b", "metadata": {}, "source": [ "Finally we have everything together to run our simulations. We set the $\\mathrm{pH}$ value in [`RE.constant_pH`](https://espressomd.github.io/doc/espressomd.html#espressomd.reaction_methods.ConstantpHEnsemble.constant_pH) and use our `equilibrate_reaction` function to equilibrate the system. After that the samplings are performed with our `perform_sampling` function." @@ -765,6 +798,7 @@ { "cell_type": "code", "execution_count": null, + "id": "b1489ea7", "metadata": {}, "outputs": [], "source": [ @@ -793,6 +827,7 @@ }, { "cell_type": "markdown", + "id": "c652f949", "metadata": {}, "source": [ "## Results\n", @@ -815,6 +850,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5793ceb1", "metadata": {}, "outputs": [], "source": [ @@ -853,6 +889,7 @@ }, { "cell_type": "markdown", + "id": "03bbe514", "metadata": {}, "source": [ "Now, we use the above function to calculate the average number of particles of type $\\mathrm{A^-}$ and estimate its statistical error and autocorrelation time.\n", @@ -862,6 +899,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3b8ea33f", "metadata": { "scrolled": false }, @@ -891,6 +929,7 @@ }, { "cell_type": "markdown", + "id": "8115f2cd", "metadata": {}, "source": [ "The simulation results for the non-interacting case match very well with the analytical solution of Henderson-Hasselbalch equation. There are only minor deviations, and the estimated errors are small too. This situation will change when we introduce interactions.\n", @@ -903,6 +942,7 @@ { "cell_type": "code", "execution_count": null, + "id": "cd73fbbb", "metadata": { "scrolled": false }, @@ -923,6 +963,7 @@ }, { "cell_type": "markdown", + "id": "5fa9f088", "metadata": {}, "source": [ "To look in more detail at the statistical accuracy, it is useful to plot the deviations from the analytical result. This provides another way to check the consistency of error estimates. In the case of non-interacting system, the simulation should exactly reproduce the Henderson-Hasselbalch equation. In such case, about 68% of the results should be within one error bar from the analytical result, whereas about 95% of the results should be within two times the error bar. Indeed, if you plot the deviations by running the script below, you should observe that most of the results are within one error bar from the analytical solution, a smaller fraction of the results is slightly further than one error bar, and one or two might be about two error bars apart. Again, this situation changes when we activate interactions because the ionization of the interacting system deviates from the Henderson-Hasselbalch equation." @@ -931,6 +972,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9585afa3", "metadata": { "scrolled": false }, @@ -951,6 +993,7 @@ }, { "cell_type": "markdown", + "id": "8b982f07", "metadata": {}, "source": [ "### The Neutralizing Ion $\\mathrm{B^+}$\n", @@ -980,6 +1023,7 @@ { "cell_type": "code", "execution_count": null, + "id": "33b09ef4", "metadata": { "scrolled": false }, @@ -1029,6 +1073,7 @@ }, { "cell_type": "markdown", + "id": "9e1f5fea", "metadata": {}, "source": [ "The plot shows that at intermediate $\\mathrm{pH}$ the concentration of $\\mathrm{B^+}$ ions is approximately equal to the concentration of $\\mathrm{M^+}$ ions. Only at one specific $\\mathrm{pH}$ the concentration of $\\mathrm{B^+}$ ions is equal to the concentration of $\\mathrm{H^+}$ ions. This is the $\\mathrm{pH}$ one obtains when dissolving the weak acid $\\mathrm{A}$ in pure water.\n", @@ -1040,6 +1085,7 @@ { "cell_type": "code", "execution_count": null, + "id": "002ca2b0", "metadata": {}, "outputs": [], "source": [ @@ -1079,6 +1125,7 @@ }, { "cell_type": "markdown", + "id": "ca4a6e56", "metadata": {}, "source": [ "We see that the ionic strength in the simulation box significantly deviates from the ionic strength of the real solution only at high or low $\\mathrm{pH}$ value. If the $\\mathrm{p}K_{\\mathrm{A}}$ value is sufficiently large, then the deviation at very low $\\mathrm{pH}$ can also be neglected because then the polymer is uncharged in the region where the ionic strength is not correctly represented in the constant-$\\mathrm{pH}$ simulation. At a high $\\mathrm{pH}$ the ionic strength will have an effect on the weak acid, because then it is fully charged. The $\\mathrm{pH}$ range in which the constant-$\\mathrm{pH}$ method uses approximately the right ionic strength depends on salt concentration, weak acid concentration and the $\\mathrm{p}K_{\\mathrm{A}}$ value. See also [Landsgesell2019] for a more detailed discussion of this issue, and its consequences.\n" @@ -1086,6 +1133,7 @@ }, { "cell_type": "markdown", + "id": "7c63f293", "metadata": {}, "source": [ "## Suggested problems for further work\n", @@ -1099,6 +1147,7 @@ }, { "cell_type": "markdown", + "id": "ebb94b70", "metadata": {}, "source": [ "## References\n", @@ -1137,5 +1186,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/doc/tutorials/convert.py b/doc/tutorials/convert.py index 94cd4291a05..a54505de5cf 100644 --- a/doc/tutorials/convert.py +++ b/doc/tutorials/convert.py @@ -1,5 +1,5 @@ # -# Copyright (C) 2019-2022 The ESPResSo project +# Copyright (C) 2019-2023 The ESPResSo project # # This file is part of ESPResSo. # @@ -29,13 +29,15 @@ import nbformat import re import os -import ast import sys import uuid sys.path.append('@CMAKE_SOURCE_DIR@/testsuite/scripts') import importlib_wrapper as iw +SOLUTION_CELL_TOKEN = "# SOLUTION CELL" + + def get_code_cells(nb): return [c['source'] for c in nb['cells'] if c['cell_type'] == 'code'] @@ -79,6 +81,22 @@ def remove_empty_cells(nb): nb['cells'].pop(i) +def parse_solution_cell(cell): + if cell["cell_type"] == "code": + source = cell["source"].strip() + if source.startswith(f"{SOLUTION_CELL_TOKEN}\n"): + return source.split("\n", 1)[1].strip() + return None + + +def convert_exercise2_to_code(nb): + for i in range(len(nb["cells"]) - 1, 0, -1): + cell = nb["cells"][i] + solution = parse_solution_cell(cell) + if solution is not None: + cell["source"] = solution + + def disable_plot_interactivity(nb): """ Replace all occurrences of the magic command ``%matplotlib notebook`` @@ -91,127 +109,29 @@ def disable_plot_interactivity(nb): cell['source'], flags=re.M) -def split_matplotlib_cells(nb): - """ - If a cell imports matplotlib, split the cell to keep the - import statement separate from the code that uses matplotlib. - This prevents a known bug in the Jupyter backend which causes - the plot object to be represented as a string instead of a canvas - when created in the cell where matplotlib is imported for the - first time (https://github.com/jupyter/notebook/issues/3523). - """ - for i in range(len(nb['cells']) - 1, -1, -1): - cell = nb['cells'][i] - if cell['cell_type'] == 'code' and 'matplotlib' in cell['source']: - code = iw.protect_ipython_magics(cell['source']) - # split cells after matplotlib imports - mapping = iw.delimit_statements(code) - tree = ast.parse(code) - visitor = iw.GetMatplotlibPyplot() - visitor.visit(tree) - if visitor.matplotlib_first: - code = iw.deprotect_ipython_magics(code) - lines = code.split('\n') - lineno_end = mapping[visitor.matplotlib_first] - split_code = '\n'.join(lines[lineno_end:]).lstrip('\n') - if split_code: - new_cell = nbformat.v4.new_code_cell(source=split_code) - if 'id' not in cell and 'id' in new_cell: - del new_cell['id'] - nb['cells'].insert(i + 1, new_cell) - lines = lines[:lineno_end] - nb['cells'][i]['source'] = '\n'.join(lines).rstrip('\n') - - -def convert_exercise2_to_code(nb): - """ - Walk through the notebook cells and convert exercise2 Markdown cells - containing fenced python code to exercise2 code cells. - """ - for i, cell in enumerate(nb['cells']): - if 'solution2' in cell['metadata']: - cell['metadata']['solution2'] = 'shown' - # convert solution markdown cells into code cells - if cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata'] \ - and 'solution2_first' not in cell['metadata']: - lines = cell['source'].strip().split('\n') - if lines[0].strip() == '```python' and lines[-1].strip() == '```': - source = '\n'.join(lines[1:-1]).strip() - nb['cells'][i] = nbformat.v4.new_code_cell(source=source) - nb['cells'][i]['metadata'] = cell['metadata'] - nb['cells'][i]['metadata']['solution2'] = 'shown' - if 'id' in nb['cells'][i]: - del nb['cells'][i]['id'] - - def convert_exercise2_to_markdown(nb): """ - Walk through the notebook cells and convert exercise2 Python cells - to exercise2 Markdown cells using a fenced code block. - """ - for i, cell in enumerate(nb['cells']): - if 'solution2' in cell['metadata']: - cell['metadata']['solution2'] = 'hidden' - # convert solution code cells into markdown cells - if cell['cell_type'] == 'code' and 'solution2' in cell['metadata']: - content = '```python\n' + cell['source'] + '\n```' - nb['cells'][i] = nbformat.v4.new_markdown_cell(source=content) - nb['cells'][i]['metadata'] = cell['metadata'] - nb['cells'][i]['metadata']['solution2'] = 'hidden' - if 'id' in nb['cells'][i]: - del nb['cells'][i]['id'] - - -def convert_exercise2_to_jupyterlab(nb): - """ - Walk through the notebook cells and convert exercise2 Markdown cells - containing fenced python code to a JupyterLab-compatible format. - As of 2022, there is no equivalent of exercise2 for JupyterLab - ([chart](https://jupyterlab-contrib.github.io/migrate_from_classical.html)), - but a similar effect can be obtained with basic HTML. - - This also converts a notebook to Notebook Format 4.5. ESPResSo notebooks - cannot be saved in 4.5 format since both Jupyter Notebook and JupyterLab - overwrite the cell ids with random strings after each save, which is a - problem for version control. The notebooks need to be converted to the - 4.5 format to silence JSON parser errors in JupyterLab. + Walk through the notebook cells and convert solutions cells to Markdown + format and append an empty code cell. """ - jupyterlab_tpl = """\ -
\ + solution_tpl = """\ +
\ Show solution
-{1} +```python +{0} +```
\ """ - for i, cell in enumerate(nb['cells']): + for i, cell in reversed(list(enumerate(nb["cells"]))): # convert solution markdown cells into code cells - if cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata'] \ - and 'solution2_first' not in cell['metadata']: - lines = cell['source'].strip().split('\n') - shown = 'open=""' if cell['metadata']['solution2'] == 'shown' else '' - if lines[0].strip() == '```python' and lines[-1].strip() == '```': - source = jupyterlab_tpl.format(shown, '\n'.join(lines).strip()) - nb['cells'][i] = nbformat.v4.new_markdown_cell(source=source) - # convert cell to notebook format 4.5 - if 'id' not in cell: - cell = uuid.uuid4().hex[:8] - - # change to notebook format 4.5 - current_version = (nb['nbformat'], nb['nbformat_minor']) - assert current_version >= (4, 0) - if current_version < (4, 5): - nb['nbformat_minor'] = 5 - - -def convert_exercise2_to_vscode_jupyter(nb): - """ - Walk through the notebook cells and convert exercise2 Markdown cells - containing fenced python code to a VS Code Jupyter-compatible format. - As of 2022, there is no equivalent of exercise2 for VS Code Jupyter. - """ - convert_exercise2_to_jupyterlab(nb) + solution = parse_solution_cell(cell) + if solution is not None: + source = solution_tpl.format(solution) + nb["cells"][i] = nbformat.v4.new_markdown_cell(source=source) + nb["cells"].insert(i + 1, nbformat.v4.new_code_cell(source="")) def apply_autopep8(nb): @@ -287,20 +207,14 @@ def handle_ci_case(args): for filepath in args.scripts: add_cell_from_script(nb, filepath) - # convert solution cells to code cells - if args.exercise2: - convert_exercise2_to_code(nb) - - # remove empty cells (e.g. those below exercise2 cells) - if args.remove_empty_cells: + # cleanup solution cells and remove empty cells + if args.prepare_for_html: remove_empty_cells(nb) + convert_exercise2_to_code(nb) # disable plot interactivity disable_plot_interactivity(nb) - # guard against a jupyter bug involving matplotlib - split_matplotlib_cells(nb) - if args.substitutions or args.execute: # substitute global variables cell_separator = f'\n##{uuid.uuid4().hex}\n' @@ -326,16 +240,10 @@ def handle_exercise2_case(args): if args.to_md: convert_exercise2_to_markdown(nb) - elif args.to_jupyterlab: - convert_exercise2_to_jupyterlab(nb) - elif args.to_vscode_jupyter: - convert_exercise2_to_vscode_jupyter(nb) elif args.to_py: convert_exercise2_to_code(nb) elif args.pep8: - convert_exercise2_to_code(nb) apply_autopep8(nb) - convert_exercise2_to_markdown(nb) elif args.remove_empty_cells: remove_empty_cells(nb) @@ -358,25 +266,19 @@ def handle_exercise2_case(args): help='variables to substitute') parser_ci.add_argument('--scripts', nargs='*', help='scripts to insert in new cells') -parser_ci.add_argument('--exercise2', action='store_true', - help='convert exercise2 solutions into code cells') -parser_ci.add_argument('--remove-empty-cells', action='store_true', - help='remove empty cells') +parser_ci.add_argument('--prepare-for-html', action='store_true', + help='remove empty cells and CI/CD comment lines') parser_ci.add_argument('--execute', action='store_true', help='run the notebook') parser_ci.set_defaults(callback=handle_ci_case) # exercise2 module parser_exercise2 = subparsers.add_parser( - 'exercise2', help='module for exercise2 conversion (Markdown <-> Python)') + 'cells', help='module to post-process cells') parser_exercise2.add_argument('input', type=str, help='path to the Jupyter ' 'notebook (in-place conversion)') group_exercise2 = parser_exercise2.add_mutually_exclusive_group(required=True) group_exercise2.add_argument('--to-md', action='store_true', help='convert solution cells to Markdown') -group_exercise2.add_argument('--to-jupyterlab', action='store_true', - help='convert solution cells to JupyterLab') -group_exercise2.add_argument('--to-vscode-jupyter', action='store_true', - help='convert solution cells to VS Code Jupyter') group_exercise2.add_argument('--to-py', action='store_true', help='convert solution cells to Python') group_exercise2.add_argument('--pep8', action='store_true', diff --git a/doc/tutorials/electrodes/electrodes_part1.ipynb b/doc/tutorials/electrodes/electrodes_part1.ipynb index acda4631df2..6af99ed2890 100644 --- a/doc/tutorials/electrodes/electrodes_part1.ipynb +++ b/doc/tutorials/electrodes/electrodes_part1.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "33419441", "metadata": {}, "source": [ "# Basic simulation of electrodes in ESPResSo part I: ion-pair in a narrow metallic slit-like confinement using ICC$^\\star$" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "47450c9f", "metadata": {}, "source": [ "## Prerequisites\n", @@ -26,6 +28,7 @@ }, { "cell_type": "markdown", + "id": "05f9c1ad", "metadata": {}, "source": [ "## Introduction\n", @@ -43,6 +46,7 @@ }, { "cell_type": "markdown", + "id": "02157443", "metadata": {}, "source": [ "## Theoretical Background \n", @@ -79,6 +83,7 @@ }, { "cell_type": "markdown", + "id": "a35caac8", "metadata": {}, "source": [ "### Green's function for charges in a dielectric slab\n", @@ -108,6 +113,7 @@ }, { "cell_type": "markdown", + "id": "6c28ac46", "metadata": {}, "source": [ "## 2D+h periodic systems, dielectric interfaces and Induced Charge Computation with ICC$^\\star$\n", @@ -157,6 +163,7 @@ }, { "cell_type": "markdown", + "id": "eafc3e67", "metadata": {}, "source": [ "## 1. System setup \n" @@ -164,6 +171,7 @@ }, { "cell_type": "markdown", + "id": "015f9df5", "metadata": {}, "source": [ "We first import all ESPResSo features and external modules." @@ -172,6 +180,7 @@ { "cell_type": "code", "execution_count": null, + "id": "553e6a9b", "metadata": {}, "outputs": [], "source": [ @@ -189,6 +198,7 @@ }, { "cell_type": "markdown", + "id": "d0c56786", "metadata": {}, "source": [ "We need to define the system dimensions and some physical parameters related to\n", @@ -212,6 +222,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8778f289", "metadata": {}, "outputs": [], "source": [ @@ -257,6 +268,7 @@ }, { "cell_type": "markdown", + "id": "715b2e5c", "metadata": {}, "source": [ "### Setup of electrostatic interactions\n", @@ -266,6 +278,7 @@ { "cell_type": "code", "execution_count": null, + "id": "b00b4235", "metadata": {}, "outputs": [], "source": [ @@ -280,10 +293,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "20957c03", + "metadata": {}, "source": [ "### Task\n", "\n", @@ -291,25 +302,27 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "e5a52faf", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", - "elc = espressomd.electrostatics.ELC(actor=p3m, gap_size=ELC_GAP, maxPWerror=MAX_PW_ERROR)\n", - "```" + "# SOLUTION CELL\n", + "elc = espressomd.electrostatics.ELC(actor=p3m, gap_size=ELC_GAP, maxPWerror=MAX_PW_ERROR)" ] }, { "cell_type": "code", "execution_count": null, + "id": "3eec2580", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "243166cf", "metadata": {}, "source": [ "Next, we set up the ICC particles on both electrodes" @@ -318,6 +331,7 @@ { "cell_type": "code", "execution_count": null, + "id": "93f09c66", "metadata": {}, "outputs": [], "source": [ @@ -328,10 +342,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "23398839", + "metadata": {}, "source": [ "### TASK\n", "\n", @@ -341,12 +353,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "6d83e7e2", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "line_density = np.sqrt(ICC_PARTCL_NUMBER_DENSITY)\n", "xs = np.linspace(0, system.box_l[0], num=int(round(system.box_l[0] * line_density)), endpoint=False)\n", "ys = np.linspace(0, system.box_l[1], num=int(round(system.box_l[1] * line_density)), endpoint=False)\n", @@ -361,23 +374,21 @@ "for x in xs:\n", " for y in ys:\n", " icc_partcls_top.append(system.part.add(pos=[x, y, box_l_z], q=1. / n_partcls_each_electrode,\n", - " type=TYPES[\"Electrodes\"], fix=3*[True]))\n", - "```" + " type=TYPES[\"Electrodes\"], fix=3*[True]))" ] }, { "cell_type": "code", "execution_count": null, + "id": "24de69a4", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "5326e038", + "metadata": {}, "source": [ "### Task\n", "\n", @@ -391,12 +402,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "11f1f197", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "system.electrostatics.solver = elc\n", "\n", "n_icc_partcls = len(icc_partcls_top) + len(icc_partcls_bottom)\n", @@ -420,19 +432,20 @@ " sigmas=icc_sigmas,\n", " epsilons=icc_epsilons\n", ")\n", - "system.electrostatics.extension = icc\n", - "```" + "system.electrostatics.extension = icc" ] }, { "cell_type": "code", "execution_count": null, + "id": "48e114a8", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "1937965b", "metadata": {}, "source": [ "## 2. Calculation of the forces" @@ -441,6 +454,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e998bc15", "metadata": {}, "outputs": [], "source": [ @@ -467,6 +481,7 @@ }, { "cell_type": "markdown", + "id": "751d418b", "metadata": {}, "source": [ "## 3. Analysis and Interpretation of the data\n", @@ -478,6 +493,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8b3579ed", "metadata": {}, "outputs": [], "source": [ @@ -512,6 +528,7 @@ { "cell_type": "code", "execution_count": null, + "id": "cb4dba95", "metadata": {}, "outputs": [], "source": [ @@ -534,6 +551,7 @@ }, { "cell_type": "markdown", + "id": "bd5f4d61", "metadata": {}, "source": [ "## References\n", @@ -568,5 +586,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/doc/tutorials/electrodes/electrodes_part2.ipynb b/doc/tutorials/electrodes/electrodes_part2.ipynb index d723d41474f..ffe79e19bce 100644 --- a/doc/tutorials/electrodes/electrodes_part2.ipynb +++ b/doc/tutorials/electrodes/electrodes_part2.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "357a65e2", "metadata": {}, "source": [ "# Basic simulation of electrodes in ESPResSo part II: Electrolyte capacitor and Poisson–Boltzmann theory" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "a90e9ca7", "metadata": {}, "source": [ "## Prerequisites\n", @@ -27,6 +29,7 @@ }, { "cell_type": "markdown", + "id": "4c0ab2c6", "metadata": {}, "source": [ "## Introduction\n", @@ -74,6 +77,7 @@ }, { "cell_type": "markdown", + "id": "7b1ef707", "metadata": {}, "source": [ "## Theoretical Background \n", @@ -127,6 +131,7 @@ }, { "cell_type": "markdown", + "id": "38ba2c5c", "metadata": {}, "source": [ "## ELC-IC for 2D+h periodic systems with dielectric interfaces\n", @@ -182,6 +187,7 @@ }, { "cell_type": "markdown", + "id": "f128c80a", "metadata": {}, "source": [ "## 1. System setup \n", @@ -192,6 +198,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7661ce98", "metadata": {}, "outputs": [], "source": [ @@ -215,6 +222,7 @@ }, { "cell_type": "markdown", + "id": "16cce457", "metadata": {}, "source": [ "We need to define system dimensions and some physical parameters related to\n", @@ -232,6 +240,7 @@ { "cell_type": "code", "execution_count": null, + "id": "43ae6591", "metadata": {}, "outputs": [], "source": [ @@ -260,6 +269,7 @@ }, { "cell_type": "markdown", + "id": "a3b0c640", "metadata": {}, "source": [ "### 1.1 Setting up the box dimensions and create system\n", @@ -293,10 +303,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "867de4db", + "metadata": {}, "source": [ "### Task\n", "\n", @@ -311,12 +319,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "5a65af0b", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def get_box_dimension(concentration, distance, n_ionpairs=N_IONPAIRS):\n", " \"\"\"\n", " For a given number of particles, determine the lateral area of the box\n", @@ -332,13 +341,13 @@ " area = box_volume / (l_z - 2. * LJ_SIGMA) # account for finite ion size in density calculation\n", " l_xy = np.sqrt(area)\n", "\n", - " return l_xy, l_z\n", - "```" + " return l_xy, l_z" ] }, { "cell_type": "code", "execution_count": null, + "id": "9b70bad1", "metadata": {}, "outputs": [], "source": [] @@ -346,6 +355,7 @@ { "cell_type": "code", "execution_count": null, + "id": "264d82af", "metadata": {}, "outputs": [], "source": [ @@ -358,6 +368,7 @@ }, { "cell_type": "markdown", + "id": "7aaec7a5", "metadata": {}, "source": [ "We now can create the **ESPResSo** system.\n", @@ -375,6 +386,7 @@ { "cell_type": "code", "execution_count": null, + "id": "eb642f0f", "metadata": {}, "outputs": [], "source": [ @@ -385,6 +397,7 @@ }, { "cell_type": "markdown", + "id": "df4888a5", "metadata": {}, "source": [ "### 1.2 Set up the double-layer capacitor\n", @@ -397,10 +410,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "48abb259", + "metadata": {}, "source": [ "### Task\n", "* add two wall constraints at $z=0$ and $z=L_z$ to stop particles from\n", @@ -414,12 +425,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "af3cb791", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# Bottom wall, normal pointing in the +z direction \n", "floor = espressomd.shapes.Wall(normal=[0, 0, 1])\n", "c1 = system.constraints.add(\n", @@ -428,23 +440,21 @@ "# Top wall, normal pointing in the -z direction\n", "ceiling = espressomd.shapes.Wall(normal=[0, 0, -1], dist=-box_l_z) \n", "c2 = system.constraints.add(\n", - " particle_type=types[\"Electrodes\"], penetrable=False, shape=ceiling)\n", - "```" + " particle_type=types[\"Electrodes\"], penetrable=False, shape=ceiling)" ] }, { "cell_type": "code", "execution_count": null, + "id": "88ba5371", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "a990659a", + "metadata": {}, "source": [ "#### 1.2.2 Add particles for the ions\n", "\n", @@ -457,12 +467,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "5a62db5a", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "offset = LJ_SIGMA # avoid unfavorable overlap at close distance to the walls\n", "init_part_btw_z1 = offset \n", "init_part_btw_z2 = box_l_z - offset\n", @@ -478,23 +489,21 @@ " ion_pos[0] = rng.random(1) * system.box_l[0]\n", " ion_pos[1] = rng.random(1) * system.box_l[1]\n", " ion_pos[2] = rng.random(1) * (init_part_btw_z2 - init_part_btw_z1) + init_part_btw_z1\n", - " system.part.add(pos=ion_pos, type=types[\"Anion\"], q=charges[\"Anion\"])\n", - "```" + " system.part.add(pos=ion_pos, type=types[\"Anion\"], q=charges[\"Anion\"])" ] }, { "cell_type": "code", "execution_count": null, + "id": "dc4f8d89", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "b95f1bba", + "metadata": {}, "source": [ "#### 1.2.3 Add interactions:\n", "\n", @@ -509,31 +518,30 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "dacf9cc5", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "for t1 in types.values():\n", " for t2 in types.values():\n", - " system.non_bonded_inter[t1, t2].wca.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA)\n", - "```" + " system.non_bonded_inter[t1, t2].wca.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA)" ] }, { "cell_type": "code", "execution_count": null, + "id": "31de8dc6", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "61700a87", + "metadata": {}, "source": [ "For the (2D+h) electrostatic with dielectrics we choose the ELC-IC with P3M.\n", "\n", @@ -555,12 +563,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "2e566993", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def setup_electrostatic_solver(potential_diff):\n", " delta_mid_top = -1. # (Fully metallic case both -1) \n", " delta_mid_bot = -1.\n", @@ -577,19 +586,20 @@ " maxPWerror=elc_accuracy,\n", " delta_mid_bot=delta_mid_bot,\n", " delta_mid_top=delta_mid_top)\n", - " return elc\n", - "```" + " return elc" ] }, { "cell_type": "code", "execution_count": null, + "id": "efbf4cf9", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "03ab39a1", "metadata": {}, "source": [ "Now add the solver to the system:" @@ -598,6 +608,7 @@ { "cell_type": "code", "execution_count": null, + "id": "25219528", "metadata": {}, "outputs": [], "source": [ @@ -606,6 +617,7 @@ }, { "cell_type": "markdown", + "id": "5fed3232", "metadata": {}, "source": [ "## 2. Equilibration\n", @@ -622,6 +634,7 @@ { "cell_type": "code", "execution_count": null, + "id": "51a25228", "metadata": {}, "outputs": [], "source": [ @@ -636,6 +649,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1a3cacd2", "metadata": { "scrolled": true }, @@ -663,6 +677,7 @@ }, { "cell_type": "markdown", + "id": "abbfc272", "metadata": {}, "source": [ "### 2.2 Equilibrate the ion distribution" @@ -671,6 +686,7 @@ { "cell_type": "code", "execution_count": null, + "id": "45c444f5", "metadata": {}, "outputs": [], "source": [ @@ -682,6 +698,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e9c7fe2f", "metadata": {}, "outputs": [], "source": [ @@ -705,6 +722,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c45afc27", "metadata": {}, "outputs": [], "source": [ @@ -720,6 +738,7 @@ }, { "cell_type": "markdown", + "id": "1f1f7892", "metadata": {}, "source": [ "Convergence after $t\\sim50$ time units." @@ -727,10 +746,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "60271c57", + "metadata": {}, "source": [ "## 3. Calculate and analyze ion profile\n", "\n", @@ -753,12 +770,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "22a22497", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def setup_densityprofile_accumulators(bin_width):\n", " cations = system.part.select(type=types[\"Cation\"]) \n", " anions = system.part.select(type=types[\"Anion\"])\n", @@ -774,13 +792,13 @@ " density_accumulator_anion = espressomd.accumulators.MeanVarianceCalculator(\n", " obs=density_profile_anion, delta_N=20)\n", " zs = density_profile_anion.bin_centers()[0, 0, :, 2]\n", - " return zs, density_accumulator_cation, density_accumulator_anion\n", - "```" + " return zs, density_accumulator_cation, density_accumulator_anion" ] }, { "cell_type": "code", "execution_count": null, + "id": "b9a7d815", "metadata": {}, "outputs": [], "source": [] @@ -788,6 +806,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e9843a03", "metadata": {}, "outputs": [], "source": [ @@ -797,6 +816,7 @@ }, { "cell_type": "markdown", + "id": "6db1f679", "metadata": {}, "source": [ "### 3.2 Run the simulation\n", @@ -807,6 +827,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c218d24f", "metadata": {}, "outputs": [], "source": [ @@ -831,6 +852,7 @@ }, { "cell_type": "markdown", + "id": "b20e4939", "metadata": {}, "source": [ "### Compare to analytical prediction\n", @@ -842,6 +864,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ad677e07", "metadata": {}, "outputs": [], "source": [ @@ -858,6 +881,7 @@ { "cell_type": "code", "execution_count": null, + "id": "343d4b27", "metadata": {}, "outputs": [], "source": [ @@ -893,6 +917,7 @@ }, { "cell_type": "markdown", + "id": "8c870e94", "metadata": {}, "source": [ "We see good agreement between our simulation and the meanfield solution of Guy and Chapman. Low density and reasonably low potential make the assumptions of the analytical approach justified." @@ -900,6 +925,7 @@ }, { "cell_type": "markdown", + "id": "78ee5747", "metadata": {}, "source": [ "We now check how well the surface charge agrees with Grahame's equation.\n", @@ -910,6 +936,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8e65a805", "metadata": {}, "outputs": [], "source": [ @@ -927,6 +954,7 @@ }, { "cell_type": "markdown", + "id": "d0de3b90", "metadata": {}, "source": [ "The electric field is readily obtained from the integral \n", @@ -936,6 +964,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8a6bd126", "metadata": {}, "outputs": [], "source": [ @@ -960,6 +989,7 @@ }, { "cell_type": "markdown", + "id": "365290da", "metadata": {}, "source": [ "We see that the electric field reduces to 0 in the middle of the channel, justifying the assumption that the two electrodes are far enough apart to not influence each other." @@ -967,6 +997,7 @@ }, { "cell_type": "markdown", + "id": "5f11c2b0", "metadata": {}, "source": [ "The electric potential can be calculated from $\\phi(z) = \\int_0^z -E(z^\\prime)\\,\\mathrm{d}z^\\prime$." @@ -975,6 +1006,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bf3506c3", "metadata": {}, "outputs": [], "source": [ @@ -996,6 +1028,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3ff40269", "metadata": {}, "outputs": [], "source": [ @@ -1007,6 +1040,7 @@ }, { "cell_type": "markdown", + "id": "ddc72205", "metadata": {}, "source": [ "## 4. Differential capacitance\n", @@ -1019,6 +1053,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ae22f800", "metadata": {}, "outputs": [], "source": [ @@ -1055,9 +1090,8 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": true - }, + "id": "82b62e62", + "metadata": {}, "outputs": [], "source": [ "fig, ax = plt.subplots(figsize=(10, 6))\n", @@ -1080,6 +1114,7 @@ }, { "cell_type": "markdown", + "id": "85c325bc", "metadata": {}, "source": [ "For small potential drops, one observes the expected Poisson–Boltzmann behavior. It also agrees with the linearized solution $\\sigma(\\phi_\\mathrm{s}) = \\varepsilon_r\\varepsilon_0 \\frac{\\phi_\\mathrm{s}}{2 \\lambda_\\mathrm{D}}$.\n", @@ -1088,6 +1123,7 @@ }, { "cell_type": "markdown", + "id": "2e97a52a", "metadata": {}, "source": [ "## References\n", @@ -1126,5 +1162,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/doc/tutorials/electrokinetics/electrokinetics.ipynb b/doc/tutorials/electrokinetics/electrokinetics.ipynb index 28510f0d347..65af9f29b3c 100644 --- a/doc/tutorials/electrokinetics/electrokinetics.ipynb +++ b/doc/tutorials/electrokinetics/electrokinetics.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "594dd6d4", "metadata": {}, "source": [ "# Electrokinetics\n", @@ -14,6 +15,7 @@ }, { "cell_type": "markdown", + "id": "c6fb9224", "metadata": {}, "source": [ "## 1. Introduction" @@ -21,6 +23,7 @@ }, { "cell_type": "markdown", + "id": "2f74f4f3", "metadata": {}, "source": [ "In this tutorial we're looking at the electrokinetics feature of ESPResSo, which allows us to describe the motion of potentially charged chemical species solvated in a fluid on a continuum level. The govering equations for the solvent are known as the Poisson-Nernst-Planck equations, which is the combination of the electrostatic Poisson equation and the dynamics of the chemical species described by the Nernst-Planck equation. For the advection we solve the incompressible Navier-Stokes equation. The total set of equations is given by\n", @@ -38,6 +41,7 @@ }, { "cell_type": "markdown", + "id": "4e0abda7", "metadata": {}, "source": [ "# 2. Advection-Diffusion equation in 2D" @@ -45,6 +49,7 @@ }, { "cell_type": "markdown", + "id": "192c4793", "metadata": {}, "source": [ "The first system that is simulated in this tutorial is the simple advection-diffusion of a drop of uncharged chemical species in a constant velocity field. To keep the computation time small, we restrict ourselves to a 2D problem, but the algorithm is also capable of solving the 3D advection-diffusion equation. Furthermore, we can also skip solving the electrostatic Poisson equation, since there are is no charged species present. The equations we solve thus reduce to\n", @@ -65,6 +70,7 @@ { "cell_type": "code", "execution_count": null, + "id": "36a4e973", "metadata": {}, "outputs": [], "source": [ @@ -94,6 +100,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f69ffd36", "metadata": {}, "outputs": [], "source": [ @@ -113,6 +120,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a3f477c1", "metadata": {}, "outputs": [], "source": [ @@ -123,6 +131,7 @@ }, { "cell_type": "markdown", + "id": "61278eec", "metadata": {}, "source": [ "We use a lattice Boltzmann flow field with constant velocity for advection.\n", @@ -132,6 +141,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7c581b2d", "metadata": {}, "outputs": [], "source": [ @@ -145,6 +155,7 @@ }, { "cell_type": "markdown", + "id": "14c5a43a", "metadata": {}, "source": [ "To use the electrokinetics-algorithm in ESPResSo, one needs to create an instance of the `EKContainer`-object and pass it a time step `tau` and Poisson solver `solver`.\n", @@ -154,6 +165,7 @@ { "cell_type": "code", "execution_count": null, + "id": "75ea3046", "metadata": {}, "outputs": [], "source": [ @@ -163,6 +175,7 @@ }, { "cell_type": "markdown", + "id": "e55f53bd", "metadata": {}, "source": [ "Now, we can add diffusive species to the container to integrate their dynamics." @@ -170,10 +183,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ad79ba7d", + "metadata": {}, "source": [ "# Exercise:\n", "- Create an instance of the [`espressomd.electrokinetics.EKSpecies`]() and add it to the system with [`system.ekcontainer.add()`](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKContainer.add). \n", @@ -185,30 +196,32 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "f98a90f0", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "species = espressomd.electrokinetics.EKSpecies(\n", " lattice=lattice, density=0.0, kT=KT,\n", " diffusion=DIFFUSION_COEFFICIENT, valency=0.0,\n", " advection=True, friction_coupling=True,\n", " ext_efield=[0., 0., 0.], tau=TAU)\n", - "system.ekcontainer.add(species)\n", - "```" + "system.ekcontainer.add(species)" ] }, { "cell_type": "code", "execution_count": null, + "id": "a05467d1", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "7d8eeb11", "metadata": {}, "source": [ "To compare our simulation to the fundamental solution of the advection-diffusion equation, we need to approximate a delta-droplet, which can be achieved by having a non-zero density only at the center of the domain." @@ -217,6 +230,7 @@ { "cell_type": "code", "execution_count": null, + "id": "471a4b0d", "metadata": {}, "outputs": [], "source": [ @@ -225,6 +239,7 @@ }, { "cell_type": "markdown", + "id": "e58359bf", "metadata": {}, "source": [ "Now everything is set and we can finally run the simulation by running the integrator." @@ -233,6 +248,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bb7f3fa0", "metadata": {}, "outputs": [], "source": [ @@ -241,6 +257,7 @@ }, { "cell_type": "markdown", + "id": "07db0e0f", "metadata": {}, "source": [ "For comparison, we prepare the analytical solution and show the 2D-density as well as a slice through the center of the droplet." @@ -249,6 +266,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f81bdd03", "metadata": {}, "outputs": [], "source": [ @@ -271,6 +289,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ecb571a7", "metadata": {}, "outputs": [], "source": [ @@ -288,6 +307,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3b340b5f", "metadata": {}, "outputs": [], "source": [ @@ -318,6 +338,7 @@ }, { "cell_type": "markdown", + "id": "af461df0", "metadata": {}, "source": [ "From the plot one can see that the position of the density-peak matches well. However, one also sees that the droplet in the simulation has spread more than it should. The reason is that the discretization used for the advection term introduces an artifical, additional diffusion to the system. This is a fundamental limitation of the algorithm, which is why it cannot be applied to pure advection problems." @@ -325,6 +346,7 @@ }, { "cell_type": "markdown", + "id": "f2d218ef", "metadata": {}, "source": [ "# 3. Electroosmotic flow" @@ -332,6 +354,7 @@ }, { "cell_type": "markdown", + "id": "58a2b410", "metadata": {}, "source": [ "The next system in this tutorial is a simple slit pore, as shown in Figure 1. It consists of an infinite plate capacitor with an electrolytic solution trapped in between the plates. The plates of the capactior carry a constant surface charge and the counterions are solvated in the liquid. \n", @@ -348,6 +371,7 @@ }, { "cell_type": "markdown", + "id": "6178d589", "metadata": {}, "source": [ "### Analytical solution\n", @@ -400,6 +424,7 @@ }, { "cell_type": "markdown", + "id": "0d16a45d", "metadata": {}, "source": [ "### Numerical solution\n", @@ -410,6 +435,7 @@ { "cell_type": "code", "execution_count": null, + "id": "71b3226b", "metadata": {}, "outputs": [], "source": [ @@ -420,6 +446,7 @@ { "cell_type": "code", "execution_count": null, + "id": "606c2a43", "metadata": {}, "outputs": [], "source": [ @@ -449,6 +476,7 @@ }, { "cell_type": "markdown", + "id": "2cc6c6f8", "metadata": {}, "source": [ "We can now set up the electrokinetics algorithm as in the first part of the tutorial, starting with the LB-method." @@ -457,6 +485,7 @@ { "cell_type": "code", "execution_count": null, + "id": "071a0e13", "metadata": {}, "outputs": [], "source": [ @@ -466,6 +495,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7b6c06dc", "metadata": {}, "outputs": [], "source": [ @@ -478,6 +508,7 @@ }, { "cell_type": "markdown", + "id": "970b52a5", "metadata": {}, "source": [ "Since our species are going to carry a charge now, we need to solve the full electrostatic problem. For that, we have to specify an actual solver." @@ -485,10 +516,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "4b290c32", + "metadata": {}, "source": [ "# Exercise: \n", "- Set up a Poisson solver for the electrostatic interaction and use it to create an instance of the [EKContainer](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKContainer) \n", @@ -499,27 +528,29 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "b8353ef1", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "eksolver = espressomd.electrokinetics.EKFFT(lattice=lattice, permittivity=PERMITTIVITY,\n", " single_precision=SINGLE_PRECISION)\n", - "system.ekcontainer = espressomd.electrokinetics.EKContainer(tau=TAU, solver=eksolver)\n", - "```" + "system.ekcontainer = espressomd.electrokinetics.EKContainer(tau=TAU, solver=eksolver)" ] }, { "cell_type": "code", "execution_count": null, + "id": "8b14c431", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "895a81e1", "metadata": {}, "source": [ "To simulate the system, we will use two different ion species: The counterions are propagated in the fluid. The second species will be used to describe the surface charge on the plates and therefore has to be stationary (i.e. no advection, no diffusion)." @@ -528,6 +559,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8f1809a2", "metadata": {}, "outputs": [], "source": [ @@ -540,6 +572,7 @@ }, { "cell_type": "markdown", + "id": "c8e5ae3c", "metadata": {}, "source": [ "Now we set the initial conditions for the ion densities. The counterions will be initialized with a homogeneous distribution, excluding the cells used as boundaries. The surface charge density is homogeneously distributed in the boundary cells." @@ -548,6 +581,7 @@ { "cell_type": "code", "execution_count": null, + "id": "994236fc", "metadata": {}, "outputs": [], "source": [ @@ -563,6 +597,7 @@ }, { "cell_type": "markdown", + "id": "c2cb78ce", "metadata": {}, "source": [ "We now have to specify the boundary conditions. For this, we use ESPResSo's`shapes`." @@ -571,6 +606,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c9b7f9c6", "metadata": {}, "outputs": [], "source": [ @@ -580,6 +616,7 @@ }, { "cell_type": "markdown", + "id": "29a08da2", "metadata": {}, "source": [ "At both of them we specify no-flux and zero-density boundary conditions for the counterions. Furthermore, we set a no-slip boundary condition for the fluid." @@ -587,10 +624,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "08b6386b", + "metadata": {}, "source": [ "# Exercise\n", "At both walls, set\n", @@ -605,28 +640,30 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "2cd50ab9", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "for wall in (wall_left, wall_right):\n", " ekspecies.add_boundary_from_shape(shape=wall, value=[0., 0., 0.], boundary_type=espressomd.electrokinetics.FluxBoundary)\n", " ekspecies.add_boundary_from_shape(shape=wall, value=0.0, boundary_type=espressomd.electrokinetics.DensityBoundary)\n", - " lbf.add_boundary_from_shape(shape=wall, velocity=[0., 0., 0.])\n", - "```" + " lbf.add_boundary_from_shape(shape=wall, velocity=[0., 0., 0.])" ] }, { "cell_type": "code", "execution_count": null, + "id": "3502fb6a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "2486b696", "metadata": {}, "source": [ "Now we can finally integrate the system and extract the ion density profile, the fluid velocity profile as well as the pressure-tensor profile." @@ -635,6 +672,7 @@ { "cell_type": "code", "execution_count": null, + "id": "b4426bfe", "metadata": { "scrolled": true }, @@ -647,6 +685,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0a9c1aa2", "metadata": {}, "outputs": [], "source": [ @@ -661,6 +700,7 @@ }, { "cell_type": "markdown", + "id": "62087ea3", "metadata": {}, "source": [ "For comparison, we calculate the analytic solution" @@ -669,6 +709,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ba5959a0", "metadata": {}, "outputs": [], "source": [ @@ -692,6 +733,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6406c3d3", "metadata": {}, "outputs": [], "source": [ @@ -703,6 +745,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d23e8b5b", "metadata": {}, "outputs": [], "source": [ @@ -739,6 +782,7 @@ }, { "cell_type": "markdown", + "id": "a58fcfee", "metadata": {}, "source": [ "In the plots one can see that the analytic solution for the electroosmotic flow matches the simulation very well. " @@ -746,6 +790,7 @@ }, { "cell_type": "markdown", + "id": "66a80321", "metadata": {}, "source": [ "### Comparison to pressure-driven flow\n", @@ -755,6 +800,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7a23c6f5", "metadata": {}, "outputs": [], "source": [ @@ -767,6 +813,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3c101827", "metadata": { "scrolled": true }, @@ -779,6 +826,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f12326b5", "metadata": {}, "outputs": [], "source": [ @@ -789,6 +837,7 @@ }, { "cell_type": "markdown", + "id": "c33cf2dc", "metadata": {}, "source": [ "The analytic solution for pressure-driven flow between two infinite parallel plates is known as the Poiseuille flow." @@ -797,6 +846,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9c7f9f9f", "metadata": {}, "outputs": [], "source": [ @@ -810,6 +860,7 @@ { "cell_type": "code", "execution_count": null, + "id": "13dd28d8", "metadata": {}, "outputs": [], "source": [ @@ -820,6 +871,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5bed1384", "metadata": {}, "outputs": [], "source": [ @@ -856,6 +908,7 @@ }, { "cell_type": "markdown", + "id": "1b00267e", "metadata": {}, "source": [ "As one can again see, the body force on the fluid did non alter the ion-density profile.\n", @@ -864,6 +917,7 @@ }, { "cell_type": "markdown", + "id": "30e2df44", "metadata": {}, "source": [ "To see the difference between the two types of flows, we plot the simulation data together in one plot." @@ -872,6 +926,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ca814e80", "metadata": { "scrolled": true }, @@ -910,6 +965,7 @@ }, { "cell_type": "markdown", + "id": "7882c507", "metadata": {}, "source": [ "Looking at the fluid velocity plot, one can see that the electroosmotic flow profile flattens significantly faster towards the center of the channel when compared to the pressure driven flow. The reason for this is the accumulation of the counterion-density towards the oppositely charged plates. Here, the driving electric field causes the highest force on the fluid, which decays towards the center of the channel. In contrast, the Poiseuille-flow is driven by a constant, uniform driving force." @@ -917,6 +973,7 @@ }, { "cell_type": "markdown", + "id": "5d5b8ded", "metadata": {}, "source": [ "# 4. Reaction in turbulent flow" @@ -924,6 +981,7 @@ }, { "cell_type": "markdown", + "id": "0f3280a6", "metadata": {}, "source": [ "To showcase the reaction feature of our electrokinetics algorithm, we simulate a simple reaction in complex flow.\n", @@ -939,6 +997,7 @@ { "cell_type": "code", "execution_count": null, + "id": "b0de9bf5", "metadata": {}, "outputs": [], "source": [ @@ -949,6 +1008,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f180a9d2", "metadata": {}, "outputs": [], "source": [ @@ -973,6 +1033,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8883964f", "metadata": {}, "outputs": [], "source": [ @@ -981,14 +1042,14 @@ " lattice=lattice, density=DENSITY_FLUID, kinematic_viscosity=VISCOSITY_KINEMATIC,\n", " tau=TAU, ext_force_density=EXT_FORCE_DENSITY, kT=KT, seed=42)\n", "system.lb = lbf\n", - "system.thermostat.set_lb(LB_fluid=lbf, seed=42)", - "\n", + "system.thermostat.set_lb(LB_fluid=lbf, seed=42)\n", "eksolver = espressomd.electrokinetics.EKNone(lattice=lattice)\n", "system.ekcontainer = espressomd.electrokinetics.EKContainer(tau=TAU, solver=eksolver)" ] }, { "cell_type": "markdown", + "id": "83b3763a", "metadata": {}, "source": [ "Now we can focus on the reactions. In this tutorial we choose the simple case of $A + B \\rightarrow C$, which means that equal parts of the educt species $A$ and $B$ can turn into the product species $C$.\n", @@ -1003,6 +1064,7 @@ { "cell_type": "code", "execution_count": null, + "id": "80b66ddf", "metadata": {}, "outputs": [], "source": [ @@ -1015,6 +1077,7 @@ }, { "cell_type": "markdown", + "id": "5573021f", "metadata": {}, "source": [ "We create each involved species and directly specify their boundary-conditions for the domain-boundaries. We set the initial density of the species to 0 and also add Dirichlet boundary conditions of zero density at both the inlet and the outlet of the system." @@ -1023,6 +1086,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5e514b85", "metadata": {}, "outputs": [], "source": [ @@ -1063,38 +1127,38 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "1b533530", + "metadata": {}, "source": [ "# Exercise:\n", "- Create an instance of [`EKBulkReaction`](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKBulkReaction) using the previously created `reactants` and activate the reaction by adding it to [`system.ekcontainer.reactions`](https://espressomd.github.io/doc/espressomd.html#espressomd.electrokinetics.EKContainer.reactions).\n" ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "94ce97c1", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "reaction = espressomd.electrokinetics.EKBulkReaction(\n", " reactants=reactants, coefficient=REACTION_RATE_CONSTANT, lattice=lattice, tau=TAU)\n", "\n", - "system.ekcontainer.reactions.add(reaction)\n", - "```" + "system.ekcontainer.reactions.add(reaction)" ] }, { "cell_type": "code", "execution_count": null, + "id": "0d50af30", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "2b6cac10", "metadata": {}, "source": [ "The next thing to add to the system is the cylindrical obstacles, which act as the boundaries for the Kármán vortices to form. These are placed close to the inlet of the system and also act as impenetrable boundaries for the species.\n", @@ -1104,6 +1168,7 @@ { "cell_type": "code", "execution_count": null, + "id": "418bcd31", "metadata": {}, "outputs": [], "source": [ @@ -1131,6 +1196,7 @@ }, { "cell_type": "markdown", + "id": "128652bd", "metadata": {}, "source": [ "Up to this point there is no species present anywhere in the system and also no way for it to enter the system. Since the reaction is irreversible in our setup, we need to introduce some density of both the educt species to the system.\n", @@ -1140,6 +1206,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d4fae62a", "metadata": {}, "outputs": [], "source": [ @@ -1152,6 +1219,7 @@ }, { "cell_type": "markdown", + "id": "9f8b9ac6", "metadata": {}, "source": [ "With this, the system is now finally complete and we can start the integration. To see the system evolve, we will render a movie from the timeseries of the system. For that we have to setup some helper functions for the plotting, which are beyond the scope of this tutorial." @@ -1160,6 +1228,7 @@ { "cell_type": "code", "execution_count": null, + "id": "aa053843", "metadata": {}, "outputs": [], "source": [ @@ -1188,6 +1257,7 @@ { "cell_type": "code", "execution_count": null, + "id": "caa469e0", "metadata": {}, "outputs": [], "source": [ @@ -1268,6 +1338,7 @@ }, { "cell_type": "markdown", + "id": "52005dba", "metadata": {}, "source": [ "Looking at the movie of the species densities one can see that the fluid flow advects the educt species from their source locations past the cylinders into the system. Here, they start to mix and react, such that the product forms.\n", @@ -1295,5 +1366,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/doc/tutorials/error_analysis/error_analysis_part1.ipynb b/doc/tutorials/error_analysis/error_analysis_part1.ipynb index 6261862ebd9..bded433a5ce 100644 --- a/doc/tutorials/error_analysis/error_analysis_part1.ipynb +++ b/doc/tutorials/error_analysis/error_analysis_part1.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "2df81180", "metadata": {}, "source": [ "# Tutorial: Error Estimation - Part 1 (Introduction and Binning Analysis)" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "1d8b8af7", "metadata": {}, "source": [ "## Table of contents\n", @@ -21,6 +23,7 @@ }, { "cell_type": "markdown", + "id": "ee6708e7", "metadata": {}, "source": [ "## Data generation\n", @@ -31,6 +34,7 @@ { "cell_type": "code", "execution_count": null, + "id": "137dbad6", "metadata": { "scrolled": true }, @@ -92,6 +96,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6809e207", "metadata": { "scrolled": true }, @@ -109,6 +114,7 @@ }, { "cell_type": "markdown", + "id": "1269a47b", "metadata": {}, "source": [ "## Introduction\n", @@ -153,6 +159,7 @@ }, { "cell_type": "markdown", + "id": "6153fa74", "metadata": {}, "source": [ "## Uncorrelated samples\n", @@ -174,6 +181,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3259bc07", "metadata": {}, "outputs": [], "source": [ @@ -187,6 +195,7 @@ }, { "cell_type": "markdown", + "id": "1f2b5888", "metadata": {}, "source": [ "One can clearly see that each sample lies in the vicinity of the previous one.\n", @@ -197,6 +206,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d51a25c0", "metadata": {}, "outputs": [], "source": [ @@ -211,6 +221,7 @@ }, { "cell_type": "markdown", + "id": "d22288e6", "metadata": {}, "source": [ "However, you should not trust your eye in deciding whether or not a time series is correlated. In fact, when running molecular dynamics simulations, your best guess is to always assume that samples are correlated, and that you should use one of the following techniques for statistical analysis, and rather not just use equation (2)." @@ -218,6 +229,7 @@ }, { "cell_type": "markdown", + "id": "6275d654", "metadata": {}, "source": [ "## Binning analysis\n", @@ -232,6 +244,7 @@ { "cell_type": "code", "execution_count": null, + "id": "747fe22a", "metadata": {}, "outputs": [], "source": [ @@ -240,10 +253,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "2256078f", + "metadata": {}, "source": [ "#### Exercise\n", "* Determine the maximally possible number of bins of size ```BIN_SIZE``` with the data in ```time_series_1```, and store it in a variable ```N_BINS```.\n", @@ -252,52 +263,52 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "bd4974a4", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "N_BINS = N_SAMPLES // BIN_SIZE\n", "bin_avgs = np.zeros(N_BINS)\n", "for i in range(N_BINS):\n", - " bin_avgs[i] = np.average(time_series_1[i * BIN_SIZE:(i + 1) * BIN_SIZE])\n", - "```" + " bin_avgs[i] = np.average(time_series_1[i * BIN_SIZE:(i + 1) * BIN_SIZE])" ] }, { "cell_type": "code", "execution_count": null, + "id": "68bcfd80", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "82cfd09e", + "metadata": {}, "source": [ "#### Exercise\n", "Compute the average of all bin averages and store it in ```avg```. This is the overall average, our best guess for the measured quantity. Furthermore, compute the standard error of the mean using equations (1) and (2) from the values in ```bin_avgs``` and store it in ```sem```." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "a2d52f1b", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "avg = np.average(bin_avgs)\n", - "sem = np.sqrt(np.sum((bin_avgs - avg)**2) / (N_BINS - 1.5) / N_BINS)\n", - "```" + "sem = np.sqrt(np.sum((bin_avgs - avg)**2) / (N_BINS - 1.5) / N_BINS)" ] }, { "cell_type": "code", "execution_count": null, + "id": "9ab6b4c5", "metadata": {}, "outputs": [], "source": [] @@ -305,6 +316,7 @@ { "cell_type": "code", "execution_count": null, + "id": "944e116f", "metadata": {}, "outputs": [], "source": [ @@ -314,6 +326,7 @@ }, { "cell_type": "markdown", + "id": "9071e349", "metadata": {}, "source": [ "Now we already have an estimate on how precise our simulation result is. But how do we know if we chose the appropriate bin size? The answer is, we can perform binning analysis for many different bin sizes and check when the SEM converges. For that we would like to define a function that does the binning analysis in one go." @@ -321,55 +334,53 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "f26a8c23", + "metadata": {}, "source": [ "#### Exercise\n", "Define a function called ```do_binning_analysis``` that takes as arguments ```data``` (a numpy array containing the samples) and ```bin_size``` and returns the estimated SEM. You can reuse your code from the previous exercises and adapt it to be part of the function." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "d0437acb", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def do_binning_analysis(data, bin_size):\n", " n_samples = len(data)\n", " n_bins = n_samples // bin_size\n", " bin_avgs = np.mean(data[:n_bins * bin_size].reshape((n_bins, -1)), axis=1)\n", - " return np.std(bin_avgs, ddof=1.5) / np.sqrt(n_bins)\n", - "```" + " return np.std(bin_avgs, ddof=1.5) / np.sqrt(n_bins)" ] }, { "cell_type": "code", "execution_count": null, + "id": "c196eb79", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "2d64004f", + "metadata": {}, "source": [ "#### Exercise\n", "Now take the data in ```time_series_1``` and perform binning analysis for bin sizes from 3 up to 5000 and plot the estimated SEMs against the bin size with logarithmic x axis. Your SEM estimates should be stored in a numpy array called ```sems```." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "f603d820", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "sizes = np.arange(3, 5001, dtype=int)\n", "sems = np.zeros(5001 - 3, dtype=float)\n", "for s in range(len(sizes)):\n", @@ -380,19 +391,20 @@ "plt.xscale(\"log\")\n", "plt.xlabel(\"$N_B$\")\n", "plt.ylabel(\"SEM\")\n", - "plt.show()\n", - "```" + "plt.show()" ] }, { "cell_type": "code", "execution_count": null, + "id": "d84eb650", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "d97952eb", "metadata": {}, "source": [ "You should see that the series converges to a value between 0.04 and 0.05, before transitioning into a noisy tail. The tail becomes increasingly noisy, because as the block size increases, the number of blocks decreases, thus resulting in worse statistics.\n", @@ -403,6 +415,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9a19d01b", "metadata": {}, "outputs": [], "source": [ @@ -443,6 +456,7 @@ }, { "cell_type": "markdown", + "id": "06fd9f75", "metadata": {}, "source": [ "Even though the fit is not perfect, it suffices to give us the position of the asymptote, which is the final estimate for the standard error of the mean. You can see that binning analysis, in fact, managed to estimate the SEM very precisely compared to the analytical solution. This illustrates that most of the time, binning analysis will give you a very reasonable estimate for the SEM, and in fact, is often used in practice because of its simplicity.\n", @@ -453,6 +467,7 @@ { "cell_type": "code", "execution_count": null, + "id": "33e71452", "metadata": { "scrolled": true }, @@ -479,6 +494,7 @@ }, { "cell_type": "markdown", + "id": "bb459079", "metadata": {}, "source": [ "Even though we have the exact same number of samples, we cannot see the binning analysis converge. The SEM simply cannot be determined. Usually, this is due to very long correlations, and can only be compensated by simulating for a longer time.\n", @@ -489,6 +505,7 @@ }, { "cell_type": "markdown", + "id": "c20159bc", "metadata": {}, "source": [ "## References\n", @@ -516,5 +533,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/doc/tutorials/error_analysis/error_analysis_part2.ipynb b/doc/tutorials/error_analysis/error_analysis_part2.ipynb index 0dfda3cc48b..d2060d7beca 100644 --- a/doc/tutorials/error_analysis/error_analysis_part2.ipynb +++ b/doc/tutorials/error_analysis/error_analysis_part2.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "a926fb8f", "metadata": {}, "source": [ "# Tutorial: Error Estimation - Part 2 (Autocorrelation Analysis)" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "8bb2141b", "metadata": {}, "source": [ "## Table of contents\n", @@ -21,6 +23,7 @@ }, { "cell_type": "markdown", + "id": "800da589", "metadata": {}, "source": [ "## Data generation\n", @@ -31,6 +34,7 @@ { "cell_type": "code", "execution_count": null, + "id": "fc23ce80", "metadata": {}, "outputs": [], "source": [ @@ -90,6 +94,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d816633f", "metadata": {}, "outputs": [], "source": [ @@ -105,6 +110,7 @@ }, { "cell_type": "markdown", + "id": "c8f31417", "metadata": {}, "source": [ "## Introduction\n", @@ -157,6 +163,7 @@ }, { "cell_type": "markdown", + "id": "1375526d", "metadata": {}, "source": [ "## Computing the auto-covariance function\n", @@ -176,22 +183,21 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "710641e1", + "metadata": {}, "source": [ "#### Exercise\n", "Compute the auto-covariance function of the data in `time_series_1` using the estimator in equation (6) and store it into a numpy array called `autocov`. Compute it for all $j$ from `0` up to `999`. Plot it against $j$." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "88730cb3", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# naive Python solution\n", "autocov = np.zeros(300)\n", "avg = np.average(time_series_1)\n", @@ -205,13 +211,13 @@ "plt.plot(autocov)\n", "plt.xlabel(\"lag time $j$\")\n", "plt.ylabel(\"$\\hat{K}^{XX}_j$\")\n", - "plt.show()\n", - "```" + "plt.show()" ] }, { "cell_type": "code", "execution_count": null, + "id": "d77368ef", "metadata": { "scrolled": true }, @@ -220,6 +226,7 @@ }, { "cell_type": "markdown", + "id": "4a59edae", "metadata": {}, "source": [ "Depending on your implementation, this computation might have taken a significant amount of time (up to a couple tens of seconds). When doing a lot of these computations, using highly optimized routines for numerics can save a lot of time. The following example shows how to utilize the common Numpy package to do the job quicker." @@ -228,6 +235,7 @@ { "cell_type": "code", "execution_count": null, + "id": "59d80df9", "metadata": {}, "outputs": [], "source": [ @@ -249,6 +257,7 @@ }, { "cell_type": "markdown", + "id": "1319b798", "metadata": {}, "source": [ "We can see that the auto-covariance function starts at a high value and decreases quickly into a long noisy tail which fluctuates around zero. The high values at short lag times indicate that there are strong correlations at short time scales, as expected. However, even though the tail looks uninteresting, it can bear important information about the statistics of your data. Small systematic deviations from 0 in the tail can be a hint that long-term correlations exist in your system. On the other hand, if there is no sign of a systematic deviation from 0 in the tail, this usually means that the correlation is decaying well within the simulation time, and that the statistics are good enough to estimate an error. In the above example, the correlation quickly decays to zero. Despite the noise in the tail, the statistics seem very reasonable." @@ -256,6 +265,7 @@ }, { "cell_type": "markdown", + "id": "eb3daafc", "metadata": {}, "source": [ "## Autocorrelation time\n", @@ -266,6 +276,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e0434e74", "metadata": { "scrolled": true }, @@ -302,6 +313,7 @@ }, { "cell_type": "markdown", + "id": "4aaedd58", "metadata": {}, "source": [ "Since the auto-covariance function is very well matched with an exponential, this analysis already gives us a reasonable estimate of the autocorrelation time. Here we have the luxury to have an analytical ACF at hand which describes the statistics of the simple AR(1) process, which generated our simulation data. It is in fact exponential and agrees very well with the numerical ACF. In practice, however, you will neither know an analytical ACF, nor know if the ACF is exponential, at all. In many systems, the ACF is more or less exponential, but this is not necessarily the case.\n", @@ -320,6 +332,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8825cdfb", "metadata": {}, "outputs": [], "source": [ @@ -345,6 +358,7 @@ }, { "cell_type": "markdown", + "id": "0ff9c19b", "metadata": {}, "source": [ "In this plot, we have the analytical solution at hand, which is a luxury not present in real applications. For the analysis, we therefore need to act as if there was no analytic solution:\n", @@ -361,6 +375,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ad9d5b6d", "metadata": { "scrolled": true }, @@ -390,6 +405,7 @@ }, { "cell_type": "markdown", + "id": "7f240694", "metadata": {}, "source": [ "Using this value of $j_\\mathrm{max}$, we can calculate the integrated autocorrelation time $\\hat{\\tau}_{X, \\mathrm{int}}$ and estimate the SEM with equation (5)." @@ -398,6 +414,7 @@ { "cell_type": "code", "execution_count": null, + "id": "67ed2850", "metadata": {}, "outputs": [], "source": [ @@ -415,10 +432,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "a1f3d619", + "metadata": {}, "source": [ "#### Exercise\n", "* Write a function called `autocorrelation_analysis`, which takes as arguments\n", @@ -440,12 +455,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "175ea951", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def autocorrelation_analysis(data, C, window):\n", " # initial processing\n", " data_size = len(data)\n", @@ -506,13 +522,13 @@ " return sem\n", "\n", "\n", - "sem_2 = autocorrelation_analysis(time_series_2, 5, 20000)\n", - "```" + "sem_2 = autocorrelation_analysis(time_series_2, 5, 20000)" ] }, { "cell_type": "code", "execution_count": null, + "id": "8873abe1", "metadata": { "scrolled": false }, @@ -521,10 +537,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "63db4c7a", + "metadata": {}, "source": [ "#### Exercise\n", "Interpret the results of the analysis of `time_series_2`." @@ -532,9 +546,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "id": "7bdb3d51", + "metadata": {}, "source": [ "**Interpretation of the analysis**\n", "\n", @@ -543,6 +556,7 @@ }, { "cell_type": "markdown", + "id": "89a99dc9", "metadata": {}, "source": [ "## References\n", @@ -573,5 +587,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb index e9cfb8ebec8..314c02e45a1 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "2f9d7d77", "metadata": {}, "source": [ "# Ferrofluid - Part 1" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "0a9f6cb8", "metadata": {}, "source": [ "## Table of Contents\n", @@ -26,6 +28,7 @@ }, { "cell_type": "markdown", + "id": "7223c854", "metadata": {}, "source": [ "## Introduction" @@ -33,6 +36,7 @@ }, { "cell_type": "markdown", + "id": "12322446", "metadata": {}, "source": [ "Ferrofluids are colloidal suspensions of ferromagnetic single-domain particles in a liquid carrier. As the single particles contain only one magnetic domain, they can be seen as small permanent magnets. To prevent agglomeration of the particles, due to van-der-Waals or magnetic attraction, they are usually sterically or electrostatically stabilized (see figure 1). The former is achieved by adsorption of long chain molecules onto the particle surface, the latter by adsorption of charged coating particles. The size of the ferromagnetic particles are in the region of 10 nm. With the surfactant layer added they can reach a size of a few hundred nanometers. Have in mind that if we refer to the particle diameter $\\sigma$ we mean the diameter of the magnetic core plus two times the thickness of the surfactant layer.\n", @@ -46,6 +50,7 @@ }, { "cell_type": "markdown", + "id": "eb5f20dd", "metadata": {}, "source": [ "
\n", @@ -58,6 +63,7 @@ }, { "cell_type": "markdown", + "id": "4f88bc70", "metadata": {}, "source": [ "
\n", @@ -70,6 +76,7 @@ }, { "cell_type": "markdown", + "id": "095339c1", "metadata": {}, "source": [ "## The Model" @@ -77,6 +84,7 @@ }, { "cell_type": "markdown", + "id": "7f35fe0f", "metadata": {}, "source": [ "For simplicity in this tutorial we simulate spherical particles in a monodisperse ferrofluid system which means all particles have the same diameter $\\sigma$ and dipole moment $\\mu$. The point dipole moment is placed at the center of the particles and is constant both in magnitude and direction (in the coordinate system of the particle). This can be justified as the Néel relaxation times are usually negligible for the usual sizes of ferrofluid particles.\n", @@ -132,6 +140,7 @@ }, { "cell_type": "markdown", + "id": "62b95d9c", "metadata": {}, "source": [ "
\n", @@ -144,6 +153,7 @@ }, { "cell_type": "markdown", + "id": "07fddbbd", "metadata": {}, "source": [ "## Structure of this tutorial" @@ -151,6 +161,7 @@ }, { "cell_type": "markdown", + "id": "598960a8", "metadata": {}, "source": [ "The aim of this tutorial is to introduce the basic features of **ESPResSo** for ferrofluids or dipolar fluids in general. In **part I** and **part II** we will do this for a monolayer-ferrofluid, in **part III** for a three dimensional system. In **part I** we will examine the clusters which are present in all interesting ferrofluid systems. In **part II** we will examine the influence of the dipole-dipole-interaction on the magnetization curve of a ferrofluid. In **part III** we calculate estimators for the initial susceptibility using fluctuation formulas and sample the magnetization curve.\n", @@ -160,6 +171,7 @@ }, { "cell_type": "markdown", + "id": "5b064e1c", "metadata": {}, "source": [ "**Remark**: The equilibration and sampling times used in this tutorial would be not sufficient for scientific purposes, but they are long enough to get at least a qualitative insight of the behaviour of ferrofluids. They have been shortened so we achieve reasonable computation times for the purpose of a tutorial." @@ -167,6 +179,7 @@ }, { "cell_type": "markdown", + "id": "b6d5a65e", "metadata": {}, "source": [ "## Compiling ESPResSo for this Tutorial" @@ -174,6 +187,7 @@ }, { "cell_type": "markdown", + "id": "4ed10bf7", "metadata": {}, "source": [ "For this tutorial the following features of **ESPResSo** are needed" @@ -181,6 +195,7 @@ }, { "cell_type": "markdown", + "id": "72cfad3e", "metadata": {}, "source": [ "```c++\n", @@ -192,6 +207,7 @@ }, { "cell_type": "markdown", + "id": "a1082056", "metadata": {}, "source": [ "Please uncomment them in the myconfig.hpp and compile **ESPResSo** using this myconfig.hpp." @@ -199,6 +215,7 @@ }, { "cell_type": "markdown", + "id": "a0bffbeb", "metadata": {}, "source": [ "## A Monolayer-Ferrofluid System in ESPResSo" @@ -206,6 +223,7 @@ }, { "cell_type": "markdown", + "id": "73e23bff", "metadata": {}, "source": [ "For interesting ferrofluid systems, where the fraction of ferromagnetic particles in the liquid carrier and their dipole moment are not vanishingly small, the ferromagnetic particles form clusters of different shapes and sizes. If the fraction and/or dipole moments are big enough the clusters can interconnect with each other and form a whole space occupying network.\n", @@ -214,6 +232,7 @@ }, { "cell_type": "markdown", + "id": "2adaed01", "metadata": {}, "source": [ "### Setup" @@ -221,6 +240,7 @@ }, { "cell_type": "markdown", + "id": "f704b1a3", "metadata": {}, "source": [ "We start with checking for the presence of ESPResSo features and importing all necessary packages." @@ -229,6 +249,7 @@ { "cell_type": "code", "execution_count": null, + "id": "73f18167", "metadata": {}, "outputs": [], "source": [ @@ -249,6 +270,7 @@ }, { "cell_type": "markdown", + "id": "126c83a4", "metadata": {}, "source": [ "Now we set up all simulation parameters. " @@ -257,6 +279,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6d82f2ed", "metadata": {}, "outputs": [], "source": [ @@ -286,6 +309,7 @@ }, { "cell_type": "markdown", + "id": "b145e612", "metadata": {}, "source": [ "Note that we declared a lj_cut. This will be used as the cut-off radius of the Lennard-Jones potential to obtain a purely repulsive WCA potential.\n", @@ -295,10 +319,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "136aa645", + "metadata": {}, "source": [ "## Exercise:\n", "How large does `BOX_SIZE` have to be for a system of `N_PART` particles with a volume (area) fraction `PHI`?\n", @@ -307,9 +329,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "id": "db35f94d", + "metadata": {}, "source": [ "$$\n", "L_{\\text{box}} = \\sqrt{\\frac{N A_{\\text{sphere}}}{\\varphi}}\n", @@ -317,19 +338,20 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "403f2e97", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", - "BOX_SIZE = (N_PART * np.pi * (LJ_SIGMA / 2.)**2 / PHI)**0.5\n", - "```" + "# SOLUTION CELL\n", + "BOX_SIZE = (N_PART * np.pi * (LJ_SIGMA / 2.)**2 / PHI)**0.5" ] }, { "cell_type": "code", "execution_count": null, + "id": "be1feaa4", "metadata": {}, "outputs": [], "source": [ @@ -346,6 +368,7 @@ }, { "cell_type": "markdown", + "id": "1db596a3", "metadata": {}, "source": [ "Now we set up the interaction between the particles as a non-bonded interaction and use the Lennard-Jones potential as the interaction potential. Here we use the above mentioned cut-off radius to get a purely repulsive interaction. " @@ -354,6 +377,7 @@ { "cell_type": "code", "execution_count": null, + "id": "89b93a20", "metadata": {}, "outputs": [], "source": [ @@ -363,6 +387,7 @@ }, { "cell_type": "markdown", + "id": "6cb78efb", "metadata": {}, "source": [ "Now we generate random positions and orientations of the particles and their dipole moments. \n", @@ -373,10 +398,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "c49729f7", + "metadata": {}, "source": [ "## Exercise:\n", "How does one set up randomly oriented dipole moments?\n", @@ -386,12 +409,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "20547b95", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# Random dipole moments\n", "np.random.seed(seed=1)\n", "dip_phi = 2. * np.pi * np.random.random((N_PART, 1))\n", @@ -400,13 +424,13 @@ "dip = np.hstack((\n", " dip_sin_theta * np.sin(dip_phi),\n", " dip_sin_theta * np.cos(dip_phi),\n", - " dip_cos_theta))\n", - "```" + " dip_cos_theta))" ] }, { "cell_type": "code", "execution_count": null, + "id": "31d7e504", "metadata": {}, "outputs": [], "source": [ @@ -420,6 +444,7 @@ }, { "cell_type": "markdown", + "id": "c5617300", "metadata": {}, "source": [ "Now we add the particles with their positions and orientations to our system.\n", @@ -431,6 +456,7 @@ { "cell_type": "code", "execution_count": null, + "id": "523fb464", "metadata": {}, "outputs": [], "source": [ @@ -441,6 +467,7 @@ }, { "cell_type": "markdown", + "id": "f227c6ed", "metadata": {}, "source": [ "Be aware that we do not set the magnitude of the magnetic dipole moments to the particles. As in our case all particles have the same dipole moment it is possible to rewrite the dipole-dipole interaction potential to\n", @@ -461,6 +488,7 @@ }, { "cell_type": "markdown", + "id": "a900d33f", "metadata": {}, "source": [ "Now we choose the steepest descent integrator to remove possible overlaps of the particles." @@ -469,6 +497,7 @@ { "cell_type": "code", "execution_count": null, + "id": "cf428a28", "metadata": {}, "outputs": [], "source": [ @@ -479,10 +508,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "8499b618", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -491,12 +518,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "dcbcb568", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "import sys\n", "\n", "energy = system.analysis.energy()['total']\n", @@ -509,19 +537,20 @@ " break\n", " relative_energy_change = (energy - energy_new) / energy\n", " print(f'Minimization, relative change in energy: {relative_energy_change:.4f}')\n", - " energy = energy_new\n", - "```" + " energy = energy_new" ] }, { "cell_type": "code", "execution_count": null, + "id": "63fdafa0", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "7d476671", "metadata": {}, "source": [ "For the simulation of our system we choose the velocity Verlet integrator.\n", @@ -536,6 +565,7 @@ { "cell_type": "code", "execution_count": null, + "id": "767d6139", "metadata": {}, "outputs": [], "source": [ @@ -546,6 +576,7 @@ }, { "cell_type": "markdown", + "id": "6453d4e2", "metadata": {}, "source": [ "To calculate the dipole-dipole interaction we use the dipolar P3M method\n", @@ -562,6 +593,7 @@ { "cell_type": "code", "execution_count": null, + "id": "fd12b1f6", "metadata": { "scrolled": true }, @@ -582,6 +614,7 @@ }, { "cell_type": "markdown", + "id": "b8da8cb2", "metadata": {}, "source": [ "Now we equilibrate the dipole-dipole interaction for some time" @@ -590,6 +623,7 @@ { "cell_type": "code", "execution_count": null, + "id": "95c4a8b6", "metadata": { "scrolled": true }, @@ -604,6 +638,7 @@ }, { "cell_type": "markdown", + "id": "a6c4c46b", "metadata": {}, "source": [ "## Sampling" @@ -611,6 +646,7 @@ }, { "cell_type": "markdown", + "id": "af0906f5", "metadata": {}, "source": [ "The system will be sampled over 100 loops." @@ -619,6 +655,7 @@ { "cell_type": "code", "execution_count": null, + "id": "73683c82", "metadata": {}, "outputs": [], "source": [ @@ -627,6 +664,7 @@ }, { "cell_type": "markdown", + "id": "516c2f33", "metadata": {}, "source": [ "As the system is two dimensional, we can simply do a scatter plot to get a visual representation of a system state. To get a better insight of how a ferrofluid system develops during time we will create a video of the development of our system during the sampling. If you only want to sample the system simply go to [Sampling without animation](#Sampling-without-animation)" @@ -634,6 +672,7 @@ }, { "cell_type": "markdown", + "id": "60dc1653", "metadata": {}, "source": [ "### Sampling with animation" @@ -641,6 +680,7 @@ }, { "cell_type": "markdown", + "id": "5f7fe3a7", "metadata": {}, "source": [ "To get an animation of the system development we have to create a function which will save the video and embed it in an html string." @@ -649,6 +689,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ed0ee691", "metadata": {}, "outputs": [], "source": [ @@ -687,10 +728,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "89d9844a", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -714,12 +753,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "91004d62", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def run(i):\n", " system.integrator.run(100)\n", "\n", @@ -728,19 +768,20 @@ " ax.figure.canvas.draw()\n", " part.set_data(x_data, y_data)\n", " print(f'progress: {(i + 1) * 100. / LOOPS:3.0f}%', end='\\r')\n", - " return part,\n", - "```" + " return part," ] }, { "cell_type": "code", "execution_count": null, + "id": "95caca0c", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "0f7e9093", "metadata": {}, "source": [ "Now we use the animation class of matplotlib to save snapshots of the system as frames of a video which is then displayed after the sampling is finished. Between two frames are 100 integration steps.\n", @@ -751,6 +792,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3d11e830", "metadata": { "scrolled": true }, @@ -764,6 +806,7 @@ }, { "cell_type": "markdown", + "id": "a7ab793c", "metadata": {}, "source": [ "## Cluster analysis\n", @@ -774,10 +817,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "2f6d201a", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -787,26 +828,28 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "fd562d2e", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# Setup cluster analysis\n", - "cluster_structure = espressomd.cluster_analysis.ClusterStructure(pair_criterion=espressomd.pair_criteria.DistanceCriterion(cut_off=1.3 * LJ_SIGMA))\n", - "```" + "cluster_structure = espressomd.cluster_analysis.ClusterStructure(pair_criterion=espressomd.pair_criteria.DistanceCriterion(cut_off=1.3 * LJ_SIGMA))" ] }, { "cell_type": "code", "execution_count": null, + "id": "559d15d5", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "7eb0941f", "metadata": {}, "source": [ "Now we sample our system for some time and do a cluster analysis in order to get an estimator of the cluster observables." @@ -814,6 +857,7 @@ }, { "cell_type": "markdown", + "id": "12ebaed0", "metadata": {}, "source": [ "For the cluster analysis we create two empty lists. The first for the number of clusters and the second for their respective sizes." @@ -822,6 +866,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0fbeabc7", "metadata": {}, "outputs": [], "source": [ @@ -831,6 +876,7 @@ }, { "cell_type": "markdown", + "id": "6b8d8e7f", "metadata": {}, "source": [ "### Sampling without animation" @@ -838,6 +884,7 @@ }, { "cell_type": "markdown", + "id": "2098e033", "metadata": {}, "source": [ "The following code just samples the system and does a cluster analysis every loops (100 by default) simulation steps." @@ -845,10 +892,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "98b99dd6", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -869,13 +914,15 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, + "id": "95b9de2c", "metadata": { - "scrolled": true, - "solution2": "hidden" + "scrolled": true }, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "for i in tqdm.trange(LOOPS):\n", " # Run cluster analysis\n", " cluster_structure.run_for_all_pairs()\n", @@ -884,19 +931,20 @@ " n_clusters.append(len(cluster_structure.clusters))\n", " for c in cluster_structure.clusters:\n", " cluster_sizes.append(c[1].size())\n", - " system.integrator.run(100)\n", - "```" + " system.integrator.run(100)" ] }, { "cell_type": "code", "execution_count": null, + "id": "5095587f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "0042827e", "metadata": {}, "source": [ "You may want to get a visualization of the current state of the system. For that we plot the particle positions folded to the simulation box using matplotlib." @@ -905,6 +953,7 @@ { "cell_type": "code", "execution_count": null, + "id": "387aa7fd", "metadata": {}, "outputs": [], "source": [ @@ -919,6 +968,7 @@ }, { "cell_type": "markdown", + "id": "8efa5ff5", "metadata": {}, "source": [ "In the plot chain-like and ring-like clusters should be visible. Some of them are connected via Y- or X-links to each other. Also some monomers should be present." @@ -926,6 +976,7 @@ }, { "cell_type": "markdown", + "id": "4c0828ea", "metadata": {}, "source": [ "## Cluster distribution" @@ -933,6 +984,7 @@ }, { "cell_type": "markdown", + "id": "562c7f07", "metadata": {}, "source": [ "After having sampled our system we now can calculate estimators for the expectation value of the cluster sizes and their distribution." @@ -940,10 +992,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "1c6be57c", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -957,25 +1007,27 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "40ec5f37", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", - "size_dist = np.histogram(cluster_sizes, range=(2, 21), bins=19)\n", - "```" + "# SOLUTION CELL\n", + "size_dist = np.histogram(cluster_sizes, range=(2, 21), bins=19)" ] }, { "cell_type": "code", "execution_count": null, + "id": "9e14aff6", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "0a018911", "metadata": {}, "source": [ "Now we can plot this histogram and should see the number of clusters decreasing roughly\n", @@ -985,6 +1037,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bd36fb0a", "metadata": {}, "outputs": [], "source": [ @@ -1011,6 +1064,7 @@ }, { "cell_type": "markdown", + "id": "13bafd6c", "metadata": {}, "source": [ "## References\n", @@ -1021,6 +1075,7 @@ }, { "cell_type": "markdown", + "id": "abac8fb7", "metadata": {}, "source": [ "Image sources:\n", @@ -1051,5 +1106,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb index fe508de7c01..4d58268f734 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "a5eaefe4", "metadata": {}, "source": [ "# Ferrofluid - Part 2" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "fdd2f63f", "metadata": {}, "source": [ "## Table of Contents\n", @@ -18,6 +20,7 @@ }, { "cell_type": "markdown", + "id": "ea757679", "metadata": {}, "source": [ "**Remark**: The equilibration and sampling times used in this tutorial would be not sufficient for scientific purposes, but they are long enough to get at least a qualitative insight of the behaviour of ferrofluids. They have been shortened so we achieve reasonable computation times for the purpose of a tutorial." @@ -25,6 +28,7 @@ }, { "cell_type": "markdown", + "id": "3be99e8e", "metadata": {}, "source": [ "## Applying an external magnetic field" @@ -32,6 +36,7 @@ }, { "cell_type": "markdown", + "id": "34fe9898", "metadata": {}, "source": [ "In this part we want to investigate the influence of a homogeneous external magnetic field exposed to a ferrofluid system." @@ -39,6 +44,7 @@ }, { "cell_type": "markdown", + "id": "60c713c0", "metadata": {}, "source": [ "We import all necessary packages and check for the required **ESPResSo** features" @@ -47,6 +53,7 @@ { "cell_type": "code", "execution_count": null, + "id": "81aecd17", "metadata": {}, "outputs": [], "source": [ @@ -64,6 +71,7 @@ }, { "cell_type": "markdown", + "id": "59830e32", "metadata": {}, "source": [ "and set up the simulation parameters where we introduce a new dimensionless parameter \n", @@ -78,6 +86,7 @@ { "cell_type": "code", "execution_count": null, + "id": "501042cb", "metadata": {}, "outputs": [], "source": [ @@ -113,6 +122,7 @@ }, { "cell_type": "markdown", + "id": "da6c52a6", "metadata": {}, "source": [ "Now we set up the system. As in **part I**, the orientation of the dipole moments is set directly on the particles, whereas the magnitude of the moments is taken into account when determining the prefactor of the dipolar P3M (for more details see **part I**). \n", @@ -124,6 +134,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8e48ad19", "metadata": { "scrolled": true }, @@ -185,6 +196,7 @@ }, { "cell_type": "markdown", + "id": "bc4463b8", "metadata": {}, "source": [ "We now apply the external magnetic field which is\n", @@ -200,6 +212,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f26c40d5", "metadata": {}, "outputs": [], "source": [ @@ -210,36 +223,36 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "1c865437", + "metadata": {}, "source": [ "## Exercise:\n", "Define a homogenous magnetic field constraint using `H_field` and add it to system's contraints.\n" ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "36ef7920", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "H_constraint = espressomd.constraints.HomogeneousMagneticField(H=H_field)\n", - "system.constraints.add(H_constraint)\n", - "```" + "system.constraints.add(H_constraint)" ] }, { "cell_type": "code", "execution_count": null, + "id": "d86a0287", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "d298c380", "metadata": {}, "source": [ "Equilibrate the system." @@ -248,6 +261,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7750295c", "metadata": { "scrolled": true }, @@ -263,6 +277,7 @@ }, { "cell_type": "markdown", + "id": "1a462730", "metadata": {}, "source": [ "Now we can visualize the current state and see that the particles mostly create chains oriented in the direction of the external magnetic field. Also some monomers should be present." @@ -271,6 +286,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1dbeb3e4", "metadata": {}, "outputs": [], "source": [ @@ -285,6 +301,7 @@ }, { "cell_type": "markdown", + "id": "2f52d782", "metadata": {}, "source": [ "## Video of the development of the system" @@ -292,6 +309,7 @@ }, { "cell_type": "markdown", + "id": "eb06ab55", "metadata": {}, "source": [ "You may want to get an insight of how the system develops in time. Thus we now create a function which will save a video and embed it in an html string to create a video of the systems development " @@ -300,6 +318,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e25ba03b", "metadata": {}, "outputs": [], "source": [ @@ -350,6 +369,7 @@ }, { "cell_type": "markdown", + "id": "25b6af88", "metadata": {}, "source": [ "We now can start the sampling over the animation class of matplotlib" @@ -358,6 +378,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8c9fe146", "metadata": { "scrolled": true }, @@ -371,6 +392,7 @@ }, { "cell_type": "markdown", + "id": "278cfc7f", "metadata": {}, "source": [ "In the visualization video we can see that the single chains break and connect to each other during time. Also some monomers are present which break from and connect to chains. If you want to have some more frames, i.e. a longer video, just adjust the frames parameter in FuncAnimation." @@ -378,6 +400,7 @@ }, { "cell_type": "markdown", + "id": "5d6caf3f", "metadata": {}, "source": [ "## Magnetization curve" @@ -385,6 +408,7 @@ }, { "cell_type": "markdown", + "id": "496d39cd", "metadata": {}, "source": [ "An important observable of a ferrofluid system is the magnetization $M$ of the system in direction of an external magnetic field $H$\n", @@ -458,6 +482,7 @@ }, { "cell_type": "markdown", + "id": "cd2303b7", "metadata": {}, "source": [ "For the sampling of the magnetization curve we set up a new system, where we decrease the dipolar interaction parameter $\\lambda$ drastically. We do this as we want to compare our results with the approximation of Ref. [1] which is only valid for small dipole-dipole interaction between the particles (decreasing the volume fraction $\\phi$ would also be an appropriate choice). For smaller dipolar interaction parameters it is possible to increase the time step. We do this to get more uncorrelated measurements." @@ -466,6 +491,7 @@ { "cell_type": "code", "execution_count": null, + "id": "81805875", "metadata": {}, "outputs": [], "source": [ @@ -482,6 +508,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6a10f99b", "metadata": { "scrolled": true }, @@ -531,6 +558,7 @@ }, { "cell_type": "markdown", + "id": "d48b29a1", "metadata": {}, "source": [ "To increase the performance we use the built-in function MagneticDipoleMoment to calculate the dipole moment of the whole system. In our case this is only the orientation as we never set the strength of the dipole moments on our particles. " @@ -538,10 +566,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "f390dcdc", + "metadata": {}, "source": [ "## Exercise:\n", "Import the [magnetic dipole moment observable](https://espressomd.github.io/doc/espressomd.html#espressomd.observables.MagneticDipoleMoment) and define an observable object `dipm_tot`.\n", @@ -549,26 +575,28 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "d8aec5f9", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "import espressomd.observables\n", - "dipm_tot = espressomd.observables.MagneticDipoleMoment(ids=particles.id)\n", - "```" + "dipm_tot = espressomd.observables.MagneticDipoleMoment(ids=particles.id)" ] }, { "cell_type": "code", "execution_count": null, + "id": "9b93ea6b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "aa6932b9", "metadata": {}, "source": [ "We use the dimensionless Langevin parameter $\\alpha$ as the parameter for the external magnetic field. As the interesting part of the magnetization curve is the one for small external magnetic field strengths—for large external magnetic fields the magnetization goes into saturation in all cases—we increase the spacing between the Langevin parameters $\\alpha$ up to higher values and write them into a list" @@ -577,6 +605,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4f22e964", "metadata": {}, "outputs": [], "source": [ @@ -585,6 +614,7 @@ }, { "cell_type": "markdown", + "id": "f8a431d2", "metadata": {}, "source": [ "For both the magnetization perpendicular and parallel to the monolayer plane we use the same system for every value of the Langevin parameter $\\alpha$. Thus we use that the system is already more or less equilibrated from the previous run so we save some equilibration time. For scientific purposes one would use a new system for every value for the Langevin parameter to ensure that the systems are independent and no correlation effects are measured. Also one would perform more than just one simulation for each value of $\\alpha$ to increase the precision of the results." @@ -592,6 +622,7 @@ }, { "cell_type": "markdown", + "id": "a8a12b9f", "metadata": {}, "source": [ "Now we sample the magnetization for increasing $\\alpha$ (increasing magnetic field strength) in direction perpendicular to the monolayer plane." @@ -599,10 +630,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "812d5833", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -644,12 +673,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "855b3e9b", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# sampling with magnetic field perpendicular to monolayer plane (in z-direction)\n", "\n", "# remove all constraints\n", @@ -683,19 +713,20 @@ " magnetization_perp[ndx] = magn_temp / loops\n", "\n", " # remove constraint\n", - " system.constraints.clear()\n", - "```" + " system.constraints.clear()" ] }, { "cell_type": "code", "execution_count": null, + "id": "9a405ed8", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "d5b4e3c7", "metadata": {}, "source": [ "and now we sample the magnetization for increasing $\\alpha$ or increasing magnetic field in direction parallel to the monolayer plane." @@ -703,10 +734,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "0fd13c5b", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -717,12 +746,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "b9f032bc", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# sampling with magnetic field parallel to monolayer plane (in x-direction)\n", "\n", "# remove all constraints\n", @@ -756,19 +786,20 @@ " magnetization_para[ndx] = magn_temp / loops\n", "\n", " # remove constraint\n", - " system.constraints.clear()\n", - "```" + " system.constraints.clear()" ] }, { "cell_type": "code", "execution_count": null, + "id": "9179075b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "aa19629f", "metadata": {}, "source": [ "Now we can compare the resulting magnetization curves with the Langevin curve and the more advanced ones of Ref. [1] by plotting all of them in one figure." @@ -776,6 +807,7 @@ }, { "cell_type": "markdown", + "id": "61019264", "metadata": {}, "source": [ "For the approximations of $M_{\\parallel}^{\\text{q2D}}$ and $M_{\\perp}^{\\text{q2D}}$ of Ref. [1] we need the dipole moment of a single particle. Thus we calculate it from our dipolar interaction parameter $\\lambda$" @@ -784,6 +816,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f3757779", "metadata": {}, "outputs": [], "source": [ @@ -794,6 +827,7 @@ }, { "cell_type": "markdown", + "id": "230204bb", "metadata": {}, "source": [ "and the saturation magnetization by using\n", @@ -807,6 +841,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7fc0a392", "metadata": {}, "outputs": [], "source": [ @@ -815,6 +850,7 @@ }, { "cell_type": "markdown", + "id": "f7668e3a", "metadata": {}, "source": [ "Further we need the derivation of the Langevin function after the external field $B$ thus we define the function" @@ -823,6 +859,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a7cee909", "metadata": {}, "outputs": [], "source": [ @@ -832,6 +869,7 @@ }, { "cell_type": "markdown", + "id": "1fa96c47", "metadata": {}, "source": [ "Now we define the approximated magnetization curves parallel and perpendicular to the monolayer plane" @@ -840,6 +878,7 @@ { "cell_type": "code", "execution_count": null, + "id": "34a84484", "metadata": {}, "outputs": [], "source": [ @@ -851,6 +890,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5a943e33", "metadata": {}, "outputs": [], "source": [ @@ -861,6 +901,7 @@ }, { "cell_type": "markdown", + "id": "223f0564", "metadata": {}, "source": [ "Now we define the Langevin function" @@ -869,6 +910,7 @@ { "cell_type": "code", "execution_count": null, + "id": "af45430b", "metadata": {}, "outputs": [], "source": [ @@ -879,6 +921,7 @@ }, { "cell_type": "markdown", + "id": "eacebb2f", "metadata": {}, "source": [ "and plot the three theoretical curves together with our simulation results" @@ -886,10 +929,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "5cc21f99", + "metadata": {}, "source": [ "## Exercise:\n", "\n", @@ -913,12 +954,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "c83872cd", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# list of the values for alpha (x-axis)\n", "x = np.arange(0.01, 9, 0.1, dtype=float)\n", "\n", @@ -931,19 +973,20 @@ "plt.plot(alphas, magnetization_perp / N_PART, 'o', label='simulation results $\\perp$')\n", "plt.plot(alphas, magnetization_para / N_PART, 'o', label='simulation results $\\parallel$')\n", "plt.legend(fontsize=20)\n", - "plt.show()\n", - "```" + "plt.show()" ] }, { "cell_type": "code", "execution_count": null, + "id": "a71b83e2", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "ab839910", "metadata": {}, "source": [ "We can see that the simulation results are better represented by the curves of Ref. [1] compared to the Langevin function. This was to be expected as the Langevin function is the magnetization curve of the real three dimensional system without dipole-dipole interaction. We can also see that the magnetization is smaller in the case of an external magnetic field perpendicular to the monolayer plane compared to the parallel case.\n", @@ -955,6 +998,7 @@ }, { "cell_type": "markdown", + "id": "64ca6d17", "metadata": {}, "source": [ "## References\n", @@ -983,5 +1027,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb index ab23a8dcb5b..5f4843fa35b 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "8c573e96", "metadata": {}, "source": [ "# Ferrofluid - Part 3 " @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "7d975bc1", "metadata": {}, "source": [ "## Table of Contents\n", @@ -20,6 +22,7 @@ }, { "cell_type": "markdown", + "id": "1d6be862", "metadata": {}, "source": [ "**Remark**: The equilibration and sampling times used in this tutorial would be not sufficient for scientific purposes, but they are long enough to get at least a qualitative insight of the behaviour of ferrofluids. They have been shortened so we achieve reasonable computation times for the purpose of a tutorial." @@ -27,6 +30,7 @@ }, { "cell_type": "markdown", + "id": "03d740d7", "metadata": {}, "source": [ "## Susceptibility with fluctuation formulas" @@ -34,6 +38,7 @@ }, { "cell_type": "markdown", + "id": "70e3ca09", "metadata": {}, "source": [ "In this part we want to calculate estimators for the initial susceptibility, i.e. the susceptibility at zero external magnetic field. One could carry out several simulations with different external magnetic field strengths and get the initial susceptibility by fitting a line to the results. We want to go a more elegant way by using fluctuation formulas known from statistical mechanics.\n", @@ -54,6 +59,7 @@ }, { "cell_type": "markdown", + "id": "ce772e10", "metadata": {}, "source": [ "### Derivation of the fluctuation formula" @@ -61,6 +67,7 @@ }, { "cell_type": "markdown", + "id": "7b8e5877", "metadata": {}, "source": [ "We want to derive the fluctuation formula. We start with the definition of the magnetic susceptibility. In general this reads\n", @@ -96,6 +103,7 @@ }, { "cell_type": "markdown", + "id": "683195d6", "metadata": {}, "source": [ "### Simulation" @@ -103,6 +111,7 @@ }, { "cell_type": "markdown", + "id": "f6a20c20", "metadata": {}, "source": [ "In this part we want to consider a three dimensional ferrofluid system and compare our result for the initial susceptibility $\\chi_\\mathrm{init}$ with them of Ref. [1]." @@ -110,6 +119,7 @@ }, { "cell_type": "markdown", + "id": "9a036d6c", "metadata": {}, "source": [ "First we import all necessary packages and check for the required **ESPResSo** features" @@ -118,6 +128,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0da30a6f", "metadata": {}, "outputs": [], "source": [ @@ -135,6 +146,7 @@ }, { "cell_type": "markdown", + "id": "0e57cbe2", "metadata": {}, "source": [ "Now we set up all necessary simulation parameters" @@ -143,6 +155,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d242a26e", "metadata": {}, "outputs": [], "source": [ @@ -178,6 +191,7 @@ }, { "cell_type": "markdown", + "id": "afce9ac8", "metadata": {}, "source": [ "Next we set up the system. As in **part I**, the orientation of the dipole moments is set directly on the particles, whereas the magnitude of the moments is taken into account when determining the prefactor of the dipolar P3M (for more details see **part I**)." @@ -185,6 +199,7 @@ }, { "cell_type": "markdown", + "id": "91a10ae0", "metadata": {}, "source": [ "**Hint:**\n", @@ -194,6 +209,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6f40c8f9", "metadata": { "scrolled": true }, @@ -242,6 +258,7 @@ }, { "cell_type": "markdown", + "id": "c1217680", "metadata": {}, "source": [ "Now we equilibrate for a while" @@ -250,6 +267,7 @@ { "cell_type": "code", "execution_count": null, + "id": "89966f9b", "metadata": { "scrolled": true }, @@ -263,6 +281,7 @@ }, { "cell_type": "markdown", + "id": "5312325f", "metadata": {}, "source": [ "As we need the magnetization of our system, we use espressomd.observables.MagneticDipoleMoment to calculate the total dipole moment of the system which is the magnetization times the volume of the system." @@ -271,6 +290,7 @@ { "cell_type": "code", "execution_count": null, + "id": "079e7e2e", "metadata": {}, "outputs": [], "source": [ @@ -280,6 +300,7 @@ }, { "cell_type": "markdown", + "id": "516565f9", "metadata": {}, "source": [ "Now we set the desired number of loops for the sampling" @@ -288,6 +309,7 @@ { "cell_type": "code", "execution_count": null, + "id": "96d47bec", "metadata": {}, "outputs": [], "source": [ @@ -297,6 +319,7 @@ }, { "cell_type": "markdown", + "id": "4a789882", "metadata": {}, "source": [ "and sample the first and second moment of the magnetization or total dipole moment, by averaging over all total dipole moments occurring during the simulation" @@ -305,6 +328,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bd24f8eb", "metadata": { "scrolled": true }, @@ -325,6 +349,7 @@ }, { "cell_type": "markdown", + "id": "11dcebed", "metadata": {}, "source": [ "For the estimator of the initial susceptibility $\\chi_\\mathrm{init}$ we need the magnitude of one single dipole moment" @@ -333,6 +358,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3a558280", "metadata": {}, "outputs": [], "source": [ @@ -343,6 +369,7 @@ }, { "cell_type": "markdown", + "id": "ae75a205", "metadata": {}, "source": [ "Now we can calculate $\\chi_\\mathrm{init}$ from our simulation data" @@ -351,6 +378,7 @@ { "cell_type": "code", "execution_count": null, + "id": "25e7dc1d", "metadata": {}, "outputs": [], "source": [ @@ -360,6 +388,7 @@ }, { "cell_type": "markdown", + "id": "e051f715", "metadata": {}, "source": [ "and print the result" @@ -368,6 +397,7 @@ { "cell_type": "code", "execution_count": null, + "id": "94038144", "metadata": {}, "outputs": [], "source": [ @@ -376,6 +406,7 @@ }, { "cell_type": "markdown", + "id": "2bbff406", "metadata": {}, "source": [ "Compared with the value $\\chi = 0.822 \\pm 0.017$ of Ref. [1] (see table 1) it should be very similar." @@ -383,6 +414,7 @@ }, { "cell_type": "markdown", + "id": "eec0dc25", "metadata": {}, "source": [ "Now we want to compare the result with the theoretical expectations.\n", @@ -392,6 +424,7 @@ { "cell_type": "code", "execution_count": null, + "id": "996d77b2", "metadata": {}, "outputs": [], "source": [ @@ -401,6 +434,7 @@ }, { "cell_type": "markdown", + "id": "4af7309d", "metadata": {}, "source": [ "and at second with the more advanced one (see Ref. [1] eq. (6)) which has a cubic accuracy in $\\chi_\\mathrm{L}$ and reads\n", @@ -414,6 +448,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d0ec4fe3", "metadata": {}, "outputs": [], "source": [ @@ -423,6 +458,7 @@ }, { "cell_type": "markdown", + "id": "affefcc7", "metadata": {}, "source": [ "Both of them should be smaller than our result, but the second one should be closer to our one. The deviation of the theoretical results to our simulation result can be explained by the fact that in the Langevin model there are no interactions between the particles incorporated at all and the more advanced (mean-field-type) one of Ref. [1] do not take occurring cluster formations into account but assumes a homogeneous distribution of the particles. For higher values of the volume fraction $\\phi$ and the dipolar interaction parameter $\\lambda$ the deviations will increase as the cluster formation will become more pronounced. " @@ -430,6 +466,7 @@ }, { "cell_type": "markdown", + "id": "297bee8b", "metadata": {}, "source": [ "## Magnetization curve of a 3D system" @@ -437,6 +474,7 @@ }, { "cell_type": "markdown", + "id": "8e5a6fcf", "metadata": {}, "source": [ "At the end of this tutorial we now want to sample the magnetization curve of a three dimensional system and compare the results with analytical solutions. Again we will compare with the Langevin function but also with the approximation of Ref. [2] (see also Ref. [1] for the right coefficients) which takes the dipole-dipole interaction into account. For this approximation, which is a modified mean-field theory based on the pair correlation function, the Langevin parameter $\\alpha$ is replaced by\n", @@ -454,6 +492,7 @@ }, { "cell_type": "markdown", + "id": "8afb2079", "metadata": {}, "source": [ "Analogous to **part II** we start at zero external magnetic field and increase the external field successively. At every value of the external field we sample the total dipole moment which is proportional to the magnetization as we have a fixed volume." @@ -461,6 +500,7 @@ }, { "cell_type": "markdown", + "id": "f6c8bfd2", "metadata": {}, "source": [ "First we create a list of values of the Langevin parameter $\\alpha$. As we already sampled the magnetization at zero external field in the last section, we take this value and continue with the sampling of an external field unequal zero" @@ -469,6 +509,7 @@ { "cell_type": "code", "execution_count": null, + "id": "57e7d57e", "metadata": {}, "outputs": [], "source": [ @@ -477,6 +518,7 @@ }, { "cell_type": "markdown", + "id": "ab3dba53", "metadata": {}, "source": [ "Now for each value in this list we sample the total dipole moment / magnetization of the system for a while. Keep in mind that we only the current orientation of the dipole moments, i.e. the unit vector of the dipole moments, is saved in the particle list but not their magnitude. Thus we have to use $H\\cdot \\mu$ as the external magnetic field, where $\\mu$ is the magnitude of a single magnetic dipole moment.\n", @@ -485,6 +527,7 @@ }, { "cell_type": "markdown", + "id": "6a49b59b", "metadata": {}, "source": [ "As in **part II** we use the same system for every value of the Langevin parameter $\\alpha$. Thus we use that the system is already pre-equilibrated from the previous run so we save some equilibration time. For scientific purposes one would use a new system for every value for the Langevin parameter to ensure that the systems are independent and no correlation effects are measured. Also one would perform more than just one simulation for each value of $\\alpha$ to increase the precision of the results." @@ -493,6 +536,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ce9fe0ff", "metadata": {}, "outputs": [], "source": [ @@ -537,6 +581,7 @@ }, { "cell_type": "markdown", + "id": "72769f36", "metadata": {}, "source": [ "Now we define the Langevin function and the modified mean-field-approximation of the Langevin parameter of Ref. [2]" @@ -545,6 +590,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c8dd236f", "metadata": {}, "outputs": [], "source": [ @@ -556,6 +602,7 @@ { "cell_type": "code", "execution_count": null, + "id": "26c510d3", "metadata": {}, "outputs": [], "source": [ @@ -567,6 +614,7 @@ }, { "cell_type": "markdown", + "id": "d0c5e46d", "metadata": {}, "source": [ "We also want to plot the linear approximation at $\\alpha = 0$ to see for which values of $\\alpha$ this approximation holds. We use the initial susceptibility calculated in the first chapter of this part as the gradient. As we want the gradient of $M^*$ with respect to $\\alpha$ which fulfills the relation\n", @@ -583,6 +631,7 @@ { "cell_type": "code", "execution_count": null, + "id": "54e57190", "metadata": {}, "outputs": [], "source": [ @@ -606,6 +655,7 @@ }, { "cell_type": "markdown", + "id": "d1652ed8", "metadata": {}, "source": [ "We can see that the magnetization curve where we used the Langevin parameter of the modified mean-field-theory is closer to our simulation results. Also we can clearly see that the linear approximation holds only for very small values of $\\alpha$.\n", @@ -617,6 +667,7 @@ }, { "cell_type": "markdown", + "id": "81e06b9b", "metadata": {}, "source": [ "## References\n", @@ -648,5 +699,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb b/doc/tutorials/grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb index 41552acb3a0..314d5c72d21 100644 --- a/doc/tutorials/grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb +++ b/doc/tutorials/grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "f9f90587", "metadata": {}, "source": [ "# Simulations in the Grand-Canonical Ensemble\n", @@ -15,6 +16,7 @@ }, { "cell_type": "markdown", + "id": "264f5ae9", "metadata": {}, "source": [ "## Introduction\n", @@ -27,6 +29,7 @@ }, { "cell_type": "markdown", + "id": "ed632f9d", "metadata": {}, "source": [ "## Grand-Canonical Ensemble\n", @@ -79,6 +82,7 @@ }, { "cell_type": "markdown", + "id": "4ed328e8", "metadata": {}, "source": [ "## How to simulate a grand-canonical ensemble?\n", @@ -197,10 +201,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "915bdb77", + "metadata": {}, "source": [ "**Exercise**\n", "\n", @@ -217,9 +219,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "id": "bc6952f9", + "metadata": {}, "source": [ "For the case ($N_i\\rightarrow N_i + 1$) one gets\n", "\n", @@ -238,6 +239,7 @@ }, { "cell_type": "markdown", + "id": "04e9eab2", "metadata": {}, "source": [ "## The Simulated System: Polyelectrolyte solution coupled to a reservoir\n", @@ -279,6 +281,7 @@ }, { "cell_type": "markdown", + "id": "276cb693", "metadata": {}, "source": [ "In the folllowing, we plot the universal partition coefficient over the ratio of the concentrations." @@ -287,6 +290,7 @@ { "cell_type": "code", "execution_count": null, + "id": "cdc9933a", "metadata": {}, "outputs": [], "source": [ @@ -306,6 +310,7 @@ }, { "cell_type": "markdown", + "id": "dfdfc83a", "metadata": {}, "source": [ "## Simulation Setup\n", @@ -314,6 +319,7 @@ }, { "cell_type": "markdown", + "id": "4e740788", "metadata": {}, "source": [ "Let us first add required modules." @@ -322,6 +328,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0f6c7da4", "metadata": {}, "outputs": [], "source": [ @@ -340,6 +347,7 @@ }, { "cell_type": "markdown", + "id": "78f26b52", "metadata": {}, "source": [ "In the next step we define the [Lennard-Jones](https://espressomd.github.io/tutorials4.2.0/lennard_jones/lennard_jones.html) units. We use the module *pint* in order to make unit conversions between reduced simulation units and SI units easier." @@ -348,6 +356,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d38d7d4e", "metadata": {}, "outputs": [], "source": [ @@ -366,6 +375,7 @@ }, { "cell_type": "markdown", + "id": "512c9ba3", "metadata": {}, "source": [ "We load the resulting excess chemical potentials for different salt concentrations of the reservoir from the [Widom insertion tutorial](https://espressomd.github.io/tutorials/widom_insertion/widom_insertion.html). The loaded excess chemical potentials are in units of $k_\\mathrm{B}T$ (i.e. in reduced units)." @@ -374,6 +384,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3e00be65", "metadata": {}, "outputs": [], "source": [ @@ -407,6 +418,7 @@ }, { "cell_type": "markdown", + "id": "1e8b17a6", "metadata": {}, "source": [ "### Initializing the system\n", @@ -416,6 +428,7 @@ { "cell_type": "code", "execution_count": null, + "id": "506632cd", "metadata": { "scrolled": true }, @@ -478,6 +491,7 @@ }, { "cell_type": "markdown", + "id": "149caa36", "metadata": {}, "source": [ "Now we define our box and add our particles. We create the polymer chains as in the corresponding [tutorial](https://espressomd.github.io/tutorials/polymers/polymers.html). In addition, we enable the electrostatic interactions." @@ -486,6 +500,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2eda7b56", "metadata": {}, "outputs": [], "source": [ @@ -557,10 +572,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "54ce1e7b", + "metadata": {}, "source": [ "Now we want to add a coupling to the reservoir. We can formally represent the insertion and deletion of ion pairs as a chemical reaction:\n", "\n", @@ -586,12 +599,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "bd50447f", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "K_XX = c_salt_res_sim.magnitude**2 * np.exp(\n", " excess_chemical_potential_monovalent_pairs_in_bulk(\n", " c_salt_res_sim.magnitude))\n", @@ -609,23 +623,21 @@ " })\n", "# Set the non interacting type at the smallest integer.\n", "# This may speed up the simulation (see Espresso docummentation section 19. Reaction methods)\n", - "RE.set_non_interacting_type(type=len(types) + 1)\n", - "```" + "RE.set_non_interacting_type(type=len(types) + 1)" ] }, { "cell_type": "code", "execution_count": null, + "id": "8119a5cb", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "9fb1b632", + "metadata": {}, "source": [ "Now we are ready to perform the actual sampling.\n", "\n", @@ -639,12 +651,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "2e6bdaed", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def perform_sampling(salt_concentration, number_of_loops):\n", " K_XX = salt_concentration**2 * np.exp(\n", " excess_chemical_potential_monovalent_pairs_in_bulk(salt_concentration))\n", @@ -668,19 +681,20 @@ " / (BOX_LENGTH**3 * salt_concentration)\n", " partition_coefficient_negative = np.mean(np.asarray(particle_numbers_negative))\\\n", " / (BOX_LENGTH**3 * salt_concentration)\n", - " return partition_coefficient_positive, partition_coefficient_negative\n", - "```" + " return partition_coefficient_positive, partition_coefficient_negative" ] }, { "cell_type": "code", "execution_count": null, + "id": "37cfe046", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "a86c7829", "metadata": {}, "source": [ "Now we can perform the actual simulations for the different salt concentrations and measure the partition coefficients." @@ -689,6 +703,7 @@ { "cell_type": "code", "execution_count": null, + "id": "364a385c", "metadata": { "scrolled": false }, @@ -706,6 +721,7 @@ }, { "cell_type": "markdown", + "id": "a7590ed9", "metadata": {}, "source": [ "To compare the results of our simulations we define a function for the analytical solution." @@ -714,6 +730,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1a04143d", "metadata": {}, "outputs": [], "source": [ @@ -723,6 +740,7 @@ }, { "cell_type": "markdown", + "id": "868fec15", "metadata": {}, "source": [ "Then we can measure the partition coefficients derived from the simulations and compare them to the analytical results for an ideal system." @@ -731,6 +749,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5179a31f", "metadata": {}, "outputs": [], "source": [ @@ -753,10 +772,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "98741e5a", + "metadata": {}, "source": [ "**Exercise**\n", "* Interpret the deviation of the simulation results from the ideal prediction." @@ -764,9 +781,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "id": "906cfbd0", + "metadata": {}, "source": [ "First, we note that the data points for negative and positive ions\n", "always collapse onto the same value for ξ, i.e. ξ is indeed a universal value that\n", @@ -783,6 +799,7 @@ }, { "cell_type": "markdown", + "id": "9e8030c5", "metadata": {}, "source": [ "# References" @@ -790,6 +807,7 @@ }, { "cell_type": "markdown", + "id": "d2d15851", "metadata": {}, "source": [ "[1] Daan Frenkel, Berend Smit. Understanding Molecular Simulation: From Algorithms to Applications. 2nd edition, chapter 5: Monte Carlo Simulations in Various Ensembles, section 5.6: Grand-Canonical Ensemble, pp. 126–135. Academic Press, 2002, ISBN: 978-0-12-267351-1, doi:[10.1016/B978-012267351-1/50007-9](https://doi.org/10.1016/B978-012267351-1/50007-9). \n", @@ -818,5 +836,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb b/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb index 859fe4f493d..372ca029cee 100644 --- a/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb +++ b/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "1c62998e", "metadata": {}, "source": [ "# Langevin dynamics" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "5d9f1bb5", "metadata": {}, "source": [ "## Introduction\n", @@ -78,6 +80,7 @@ }, { "cell_type": "markdown", + "id": "19512250", "metadata": {}, "source": [ "## 1. Setting up the observable" @@ -85,73 +88,72 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "a6fe7174", + "metadata": {}, "source": [ "Write a function with signature `msd_correlator(pids, tau_max)` that returns a\n", "mean-squared displacement correlator that is updated every time step. Here, `pids` should be a list of particle ids and `tau_max` the respective parameter for ESPResSo's multiple-tau correlator. This parameter is the maximum time lag $\\tau$ for which the correlation should be computed. The correlator should be constructed using the `ParticlePositions` observable. For help, you can refer to the documentation of [observables and correlators](https://espressomd.github.io/doc/analysis.html#observables-framework)." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "781fb43a", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def msd_correlator(pids, tau_max):\n", " pos = espressomd.observables.ParticlePositions(ids=pids)\n", " pos_corr = espressomd.accumulators.Correlator(\n", " obs1=pos, tau_lin=16, tau_max=tau_max, delta_N=1,\n", " corr_operation=\"square_distance_componentwise\", compress1=\"discard1\")\n", - " return pos_corr\n", - "```" + " return pos_corr" ] }, { "cell_type": "code", "execution_count": null, + "id": "3c458ac6", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "26d68465", + "metadata": {}, "source": [ "Similarly, write a funtion with signature `vel_correlator(pids, tau_max)` that returns a correlator that calculates the time autocorrelation of the particle velocities." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "e7769f12", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def vel_correlator(pids, tau_max):\n", " vel = espressomd.observables.ParticleVelocities(ids=pids)\n", " vel_corr = espressomd.accumulators.Correlator(\n", " obs1=vel, tau_lin=16, tau_max=tau_max, delta_N=1,\n", " corr_operation=\"scalar_product\", compress1=\"discard1\")\n", - " return vel_corr\n", - "```" + " return vel_corr" ] }, { "cell_type": "code", "execution_count": null, + "id": "eee500ce", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "cd9580ca", "metadata": {}, "source": [ "## 2. Simulating Brownian motion" @@ -159,6 +161,7 @@ }, { "cell_type": "markdown", + "id": "c38456fa", "metadata": {}, "source": [ "We will simulate the diffusion of a single particle that is coupled to an implicit solvent." @@ -167,6 +170,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9e5d59e0", "metadata": {}, "outputs": [], "source": [ @@ -228,6 +232,7 @@ }, { "cell_type": "markdown", + "id": "c6bff9e3", "metadata": {}, "source": [ "## 3. Data analysis\n", @@ -237,6 +242,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8dddb4cd", "metadata": {}, "outputs": [], "source": [ @@ -263,6 +269,7 @@ }, { "cell_type": "markdown", + "id": "8a3c5445", "metadata": {}, "source": [ "### 3.2 Calculating the diffusion coefficient using the MSD\n", @@ -276,6 +283,7 @@ }, { "cell_type": "markdown", + "id": "c1a22a74", "metadata": {}, "source": [ "The MSD of a Brownian motion can be decomposed in three main regimes [2]:\n", @@ -296,6 +304,7 @@ }, { "cell_type": "markdown", + "id": "15a5a347", "metadata": {}, "source": [ "Use the function [curve_fit()](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html) from the module scipy.optimize to produce a fit for the linear regime and determine the diffusion coefficients for the different $\\gamma$s." @@ -303,10 +312,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "9c365974", + "metadata": {}, "source": [ "For large $t$ the diffusion coefficient can be obtained using the fluctuation-dissipation theorem [1]\n", "\n", @@ -318,12 +325,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "226d990a", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "import scipy.optimize\n", "\n", "\n", @@ -350,19 +358,20 @@ " diffusion_msd.append(a / 6)\n", "\n", "plt.legend()\n", - "plt.show()\n", - "```" + "plt.show()" ] }, { "cell_type": "code", "execution_count": null, + "id": "0f9ceeb2", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "a1f5394b", "metadata": {}, "source": [ "### 3.3 Calculating the diffusion coefficient using the Green-Kubo relation\n", @@ -374,6 +383,7 @@ { "cell_type": "code", "execution_count": null, + "id": "323f3a9c", "metadata": {}, "outputs": [], "source": [ @@ -390,10 +400,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "4566922c", + "metadata": {}, "source": [ "We find that the velocity-autocorrelation function quickly decays towards zero. However, owing to the relatively short overall sampling time, only the first part of the correlation function is well-sampled and a lot of noise is found in the tail of the autocorrelation function already early on. The obvious solution would be to increase the sampling time and in a production setting one would definitely have to do so in order to smoothly resolve at least several relaxation times. However, depending on a system's characteristics, under certain conditions it might still be necessary to replace a noisy long-time tail with an analytical expression, fitted to the short-time part of the autocorrelation function (again over at least several decay times; typically one would smoothly transition between numerical short-time data and the analytical tail-fit).\n", "\n", @@ -407,12 +415,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "7e367813", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def exp_decay(x, a, b):\n", " return a * np.exp(-x / b)\n", "\n", @@ -435,19 +444,20 @@ " # consequently, the GK relation for the diffusivity is:\n", " diffusion_gk.append(a * b / 3)\n", "plt.legend(loc='upper right', ncol=2, columnspacing=0.5, handlelength=1.3, framealpha=1)\n", - "plt.show()\n", - "```" + "plt.show()" ] }, { "cell_type": "code", "execution_count": null, + "id": "d64536e3", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "fa13b231", "metadata": {}, "source": [ "### 3.3 Comparing to the Stokes-Einstein relation" @@ -455,30 +465,28 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "085532e1", + "metadata": {}, "source": [ "Plot all diffusion coefficients (`diffusion_msd`, `diffusion_gk`) as a function of $\\gamma$. What relation do you observe?" ] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "id": "71c096ea", + "metadata": {}, "source": [ "In the diffusive mode, one can derive $D = k_\\mathrm{B}T / \\gamma$ from the Stokes–Einstein relation [4]. Compare your results to the Stokes-Einstein prediction." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "446cdec4", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "plt.figure(figsize=(10, 6))\n", "plt.xlabel(r'$\\gamma$')\n", "plt.ylabel('Diffusion coefficient [$\\sigma^2/t$]')\n", @@ -488,19 +496,20 @@ "plt.plot(gammas, diffusion_msd, 'o', label=r'$D_\\mathrm{MSD}$')\n", "plt.plot(gammas, diffusion_gk, '^', label=r'$D_\\mathrm{GK}$')\n", "plt.legend()\n", - "plt.show()\n", - "```" + "plt.show()" ] }, { "cell_type": "code", "execution_count": null, + "id": "45496c31", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "3a94e9b5", "metadata": {}, "source": [ "## References\n", @@ -532,5 +541,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/lattice_boltzmann/lattice_boltzmann_poiseuille_flow.ipynb b/doc/tutorials/lattice_boltzmann/lattice_boltzmann_poiseuille_flow.ipynb index 90eca9bf034..7c81979bfdd 100644 --- a/doc/tutorials/lattice_boltzmann/lattice_boltzmann_poiseuille_flow.ipynb +++ b/doc/tutorials/lattice_boltzmann/lattice_boltzmann_poiseuille_flow.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "3d5619cb", "metadata": {}, "source": [ "# Poiseuille flow in ESPResSo\n", @@ -36,6 +37,7 @@ }, { "cell_type": "markdown", + "id": "6529c309", "metadata": {}, "source": [ "## 1. Setting up the system" @@ -44,6 +46,7 @@ { "cell_type": "code", "execution_count": null, + "id": "25273a13", "metadata": {}, "outputs": [], "source": [ @@ -75,6 +78,7 @@ }, { "cell_type": "markdown", + "id": "f0a188d4", "metadata": {}, "source": [ "### 1.1 Setting up the lattice-Boltzmann fluid\n", @@ -85,6 +89,7 @@ { "cell_type": "code", "execution_count": null, + "id": "b9bd76b1", "metadata": {}, "outputs": [], "source": [ @@ -100,10 +105,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "c0a48d0c", + "metadata": {}, "source": [ "Create a lattice-Boltzmann actor and attach it to the system. Use the GPU implementation of LB.\n", "\n", @@ -112,63 +115,64 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "6d1a5f67", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "logging.info(\"Setup LB fluid.\")\n", "lbf = espressomd.lb.LBFluidWalberla(agrid=AGRID, density=DENSITY,\n", " kinematic_viscosity=VISCOSITY,\n", " tau=TIME_STEP,\n", " ext_force_density=FORCE_DENSITY)\n", - "system.lb = lbf\n", - "```" + "system.lb = lbf" ] }, { "cell_type": "code", "execution_count": null, + "id": "53e87979", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "068aa6bc", + "metadata": {}, "source": [ "Use the convenience function ``add_boundary_from_shape`` of the LB actor to mark nodes within a shape as boundaries.\n" ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "2db5800e", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "logging.info(\"Setup LB boundaries.\")\n", "top_wall = espressomd.shapes.Wall(normal=[1, 0, 0], dist=WALL_OFFSET)\n", "bottom_wall = espressomd.shapes.Wall(normal=[-1, 0, 0], dist=-(BOX_L - WALL_OFFSET))\n", "\n", "lbf.add_boundary_from_shape(top_wall)\n", - "lbf.add_boundary_from_shape(bottom_wall)\n", - "```" + "lbf.add_boundary_from_shape(bottom_wall)" ] }, { "cell_type": "code", "execution_count": null, + "id": "e23a4f09", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "3e3912fb", "metadata": {}, "source": [ "## 2. Simulation\n", @@ -179,6 +183,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d9666938", "metadata": {}, "outputs": [], "source": [ @@ -189,6 +194,7 @@ }, { "cell_type": "markdown", + "id": "06b2abb8", "metadata": {}, "source": [ "## 3. Data analysis\n", @@ -199,6 +205,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8bcf3a63", "metadata": {}, "outputs": [], "source": [ @@ -256,5 +263,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/lattice_boltzmann/lattice_boltzmann_sedimentation.ipynb b/doc/tutorials/lattice_boltzmann/lattice_boltzmann_sedimentation.ipynb index 7e1c8587920..ee1fb8737f3 100644 --- a/doc/tutorials/lattice_boltzmann/lattice_boltzmann_sedimentation.ipynb +++ b/doc/tutorials/lattice_boltzmann/lattice_boltzmann_sedimentation.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "71969a58", "metadata": {}, "source": [ "# Sedimentation in a fluid" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "cfd5131a", "metadata": {}, "source": [ "The purpose of this tutorial is to demonstrate how hydrodynamic interactions can have a dramatic impact\n", @@ -59,6 +61,7 @@ }, { "cell_type": "markdown", + "id": "f336eb5e", "metadata": {}, "source": [ "## System setup" @@ -67,6 +70,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a27b6288", "metadata": {}, "outputs": [], "source": [ @@ -86,6 +90,7 @@ }, { "cell_type": "markdown", + "id": "6d83932b", "metadata": {}, "source": [ "The initial particles positions will be chosen to form a two-dimensional hexagonal Bravais lattice structure.\n", @@ -98,6 +103,7 @@ { "cell_type": "code", "execution_count": null, + "id": "67f02130", "metadata": {}, "outputs": [], "source": [ @@ -111,6 +117,7 @@ }, { "cell_type": "markdown", + "id": "dcffadfe", "metadata": {}, "source": [ "Now, we are ready to define the system parameters and initialize the simulation system." @@ -119,6 +126,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a11dc489", "metadata": {}, "outputs": [], "source": [ @@ -153,10 +161,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "778f55ca", + "metadata": {}, "source": [ "We add a wall constraint on bottom and top of the simulation box, respectively.\n", "\n", @@ -169,12 +175,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "71a2590a", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# create wall shapes bottom (b) and top (t)\n", "wall_shape_b = espressomd.shapes.Wall(normal=[0, 1, 0], dist=1)\n", "wall_shape_t = espressomd.shapes.Wall(\n", @@ -182,19 +189,20 @@ "\n", "# add wall constraints\n", "for wall_shape in [wall_shape_b, wall_shape_t]:\n", - " system.constraints.add(shape=wall_shape, particle_type=0)\n", - "```" + " system.constraints.add(shape=wall_shape, particle_type=0)" ] }, { "cell_type": "code", "execution_count": null, + "id": "106acffb", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "ea4057cc", "metadata": {}, "source": [ "We will now calculate the particle initial positions and introduce a small crystalline defect to\n", @@ -205,6 +213,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f807527b", "metadata": {}, "outputs": [], "source": [ @@ -233,10 +242,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "dc44d3b3", + "metadata": {}, "source": [ "## Langevin dynamics\n", "\n", @@ -249,25 +256,27 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "55960560", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", - "system.thermostat.set_langevin(kT=0., gamma=15., seed=12)\n", - "```" + "# SOLUTION CELL\n", + "system.thermostat.set_langevin(kT=0., gamma=15., seed=12)" ] }, { "cell_type": "code", "execution_count": null, + "id": "4fd712ff", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "f093b544", "metadata": {}, "source": [ "We can now sample the particle positions as a function of time.\n", @@ -277,6 +286,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5d578efc", "metadata": {}, "outputs": [], "source": [ @@ -295,6 +305,7 @@ }, { "cell_type": "markdown", + "id": "c9b747b7", "metadata": {}, "source": [ "We will now disable the thermostat, reset the particles to their initial positions and zero out particle velocities." @@ -303,6 +314,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9f1bcb2a", "metadata": {}, "outputs": [], "source": [ @@ -313,6 +325,7 @@ }, { "cell_type": "markdown", + "id": "72b4c682", "metadata": {}, "source": [ "## Hydrodynamics" @@ -320,10 +333,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "53d59163", + "metadata": {}, "source": [ "In this scenario, we want to sample the same system coupled to a lattice-Boltzmann fluid.\n", "\n", @@ -336,34 +347,33 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "76113ae5", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "lbf = espressomd.lb.LBFluidWalberla(agrid=spacing,\n", " density=1.,\n", " kinematic_viscosity=1.,\n", " tau=system.time_step, kT=0.)\n", "system.lb = lbf\n", - "system.thermostat.set_lb(LB_fluid=lbf, gamma=15., seed=0)\n", - "```" + "system.thermostat.set_lb(LB_fluid=lbf, gamma=15., seed=0)" ] }, { "cell_type": "code", "execution_count": null, + "id": "e92214c9", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ca56c3a4", + "metadata": {}, "source": [ "The wall constraints that were previously added now have to be registered as LB boundaries.\n", "\n", @@ -373,31 +383,30 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "a1da7cc2", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# add LB boundaries\n", "for wall_shape in [wall_shape_b, wall_shape_t]:\n", - " lbf.add_boundary_from_shape(wall_shape)\n", - "```" + " lbf.add_boundary_from_shape(wall_shape)" ] }, { "cell_type": "code", "execution_count": null, + "id": "b05ea198", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "b2a4deaf", + "metadata": {}, "source": [ "We will plot the fluid flow field in the final video using 2D vectors.\n", "To this end, we need to record the fluid trajectory with the same frequency as the particle positions.\n", @@ -417,12 +426,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "a18ee853", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "obs_lb_vel = espressomd.observables.LBVelocityProfile(\n", " n_x_bins=n_width,\n", " n_y_bins=n_height - 2, # skip data inside the LB boundaries (top and bottom walls)\n", @@ -440,19 +450,20 @@ " sampling_offset_y=0.5 * spacing,\n", " sampling_offset_z=0.5 * spacing,\n", " allow_empty_bins=True)\n", - "acc_lb_vel = espressomd.accumulators.TimeSeries(obs=obs_lb_vel, delta_N=1)\n", - "```" + "acc_lb_vel = espressomd.accumulators.TimeSeries(obs=obs_lb_vel, delta_N=1)" ] }, { "cell_type": "code", "execution_count": null, + "id": "8f4c3326", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "31e5cc87", "metadata": {}, "source": [ "We can now sample the particle positions and fluid velocity as a function of time." @@ -461,6 +472,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c01e143e", "metadata": {}, "outputs": [], "source": [ @@ -478,6 +490,7 @@ }, { "cell_type": "markdown", + "id": "f225cd58", "metadata": {}, "source": [ "## Visualization\n", @@ -488,6 +501,7 @@ { "cell_type": "code", "execution_count": null, + "id": "db083422", "metadata": {}, "outputs": [], "source": [ @@ -520,6 +534,7 @@ }, { "cell_type": "markdown", + "id": "7fefebcb", "metadata": {}, "source": [ "And now the actual visualization code.\n", @@ -532,6 +547,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d67242dd", "metadata": { "scrolled": false }, @@ -617,5 +633,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/lattice_boltzmann/lattice_boltzmann_theory.ipynb b/doc/tutorials/lattice_boltzmann/lattice_boltzmann_theory.ipynb index bb177147687..8c9bc7af923 100644 --- a/doc/tutorials/lattice_boltzmann/lattice_boltzmann_theory.ipynb +++ b/doc/tutorials/lattice_boltzmann/lattice_boltzmann_theory.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "2993c126", "metadata": {}, "source": [ "# The lattice-Boltzmann method in ESPResSo\n", @@ -19,6 +20,7 @@ }, { "cell_type": "markdown", + "id": "a60bebdd", "metadata": {}, "source": [ "## 1 Introduction\n", @@ -50,6 +52,7 @@ }, { "cell_type": "markdown", + "id": "b46fc0ac", "metadata": {}, "source": [ "## 2 The LBM in brief\n", @@ -77,6 +80,7 @@ }, { "cell_type": "markdown", + "id": "f23a6ec9", "metadata": {}, "source": [ "### Discretization\n", @@ -126,6 +130,7 @@ }, { "cell_type": "markdown", + "id": "1ca71315", "metadata": {}, "source": [ "### The second step: collision\n", @@ -178,6 +183,7 @@ }, { "cell_type": "markdown", + "id": "809b28ad", "metadata": {}, "source": [ "### Particle coupling\n", @@ -205,6 +211,7 @@ }, { "cell_type": "markdown", + "id": "8d997733", "metadata": {}, "source": [ "## 3 The LB interface in ESPResSo\n", @@ -245,6 +252,7 @@ }, { "cell_type": "markdown", + "id": "698926d4", "metadata": {}, "source": [ "### The LBFluidWalberla class\n", @@ -279,6 +287,7 @@ }, { "cell_type": "markdown", + "id": "e98aa80c", "metadata": {}, "source": [ "### Sampling data from a node\n", @@ -299,6 +308,7 @@ }, { "cell_type": "markdown", + "id": "37ecae0f", "metadata": {}, "source": [ "### Setting up boundaries\n", @@ -330,6 +340,7 @@ }, { "cell_type": "markdown", + "id": "6b87f037", "metadata": {}, "source": [ "## References\n", @@ -359,5 +370,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/lennard_jones/lennard_jones.ipynb b/doc/tutorials/lennard_jones/lennard_jones.ipynb index a4ac80f4bc1..f6d846e9a44 100644 --- a/doc/tutorials/lennard_jones/lennard_jones.ipynb +++ b/doc/tutorials/lennard_jones/lennard_jones.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "e49baad8", "metadata": {}, "source": [ "# Introductory Tutorial: Lennard-Jones Liquid" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "00300907", "metadata": {}, "source": [ "## Table of Contents\n", @@ -32,6 +34,7 @@ }, { "cell_type": "markdown", + "id": "ee0dec3c", "metadata": {}, "source": [ "## Introduction\n", @@ -47,6 +50,7 @@ }, { "cell_type": "markdown", + "id": "0de56c40", "metadata": {}, "source": [ "## Background\n", @@ -63,6 +67,7 @@ }, { "cell_type": "markdown", + "id": "964097b5", "metadata": {}, "source": [ "## The Lennard-Jones Potential\n", @@ -90,21 +95,15 @@ { "cell_type": "code", "execution_count": null, + "id": "45a548bd", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", - "plt.rcParams.update({'font.size': 18})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "plt.rcParams.update({'font.size': 18})\n", + "\n", "def lj_pot(x, epsilon, sigma, r_cut, c_shift=0.0):\n", " pot = 4.0 * epsilon * ((sigma / x)**12 - (sigma / x)**6) + c_shift\n", " pot[x > r_cut] = 0.\n", @@ -130,6 +129,7 @@ }, { "cell_type": "markdown", + "id": "8b1c8d97", "metadata": {}, "source": [ "## Units\n", @@ -144,6 +144,7 @@ }, { "cell_type": "markdown", + "id": "b246851e", "metadata": {}, "source": [ "## Lennard-Jones fluid\n", @@ -154,6 +155,7 @@ }, { "cell_type": "markdown", + "id": "9d3191ec", "metadata": {}, "source": [ "## First steps\n", @@ -173,6 +175,7 @@ }, { "cell_type": "markdown", + "id": "b174a451", "metadata": {}, "source": [ "## Overview of a simulation script\n", @@ -188,6 +191,7 @@ }, { "cell_type": "markdown", + "id": "ad5a4030", "metadata": {}, "source": [ "### System setup\n", @@ -198,6 +202,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5e93710a", "metadata": {}, "outputs": [], "source": [ @@ -211,6 +216,7 @@ }, { "cell_type": "markdown", + "id": "9134f428", "metadata": {}, "source": [ "The function ``espressomd.assert_features()`` expects a list of features as argument and checks they are available in the ESPResSo executable. If a required feature is missing, the program will print an error message and halt. To compile ESPResSo with a different set of features, see the [documentation on features](https://espressomd.github.io/doc/installation.html#features)." @@ -219,6 +225,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2e33ba73", "metadata": {}, "outputs": [], "source": [ @@ -234,6 +241,7 @@ }, { "cell_type": "markdown", + "id": "1060ad04", "metadata": {}, "source": [ "The next step would be to create an instance of the System class. This instance is used as a handle to the simulation system. At any time, only one instance of the System class can exist." @@ -241,10 +249,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "1079982f", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -255,19 +261,20 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "a8636f21", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", - "system = espressomd.System(box_l=BOX_L)\n", - "```" + "# SOLUTION CELL\n", + "system = espressomd.System(box_l=BOX_L)" ] }, { "cell_type": "code", "execution_count": null, + "id": "13275327", "metadata": {}, "outputs": [], "source": [] @@ -275,6 +282,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1678565d", "metadata": { "code_folding": [] }, @@ -286,6 +294,7 @@ }, { "cell_type": "markdown", + "id": "ee31215c", "metadata": {}, "source": [ "It can be used to store and manipulate the crucial system parameters like the time step and the size of the simulation box (time_step, and box_l)." @@ -294,6 +303,7 @@ { "cell_type": "code", "execution_count": null, + "id": "608e275e", "metadata": {}, "outputs": [], "source": [ @@ -306,6 +316,7 @@ }, { "cell_type": "markdown", + "id": "b432b7ae", "metadata": {}, "source": [ "The parameter ``SKIN`` affects how often the Verlet lists will be updated. This parameter does not influence the physics of the simulation. It can however have a significant impact on the performance of the simulation. Depending on system parameters such as density and temperature, the optimal ``SKIN`` parameter can vary considerably. Please be aware that ESPResSo implements the function [``tune_skin()``](https://espressomd.github.io/doc/espressomd.html#espressomd.cell_system.CellSystem.tune_skin) that automatically tunes ``SKIN`` for optimal performance.\n", @@ -319,10 +330,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "63abbb11", + "metadata": {}, "source": [ "\n", "\n", @@ -335,19 +344,20 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "0e4f59b3", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", - "particles = system.part.add(type=[0] * N_PART, pos=np.random.random((N_PART, 3)) * system.box_l)\n", - "```" + "# SOLUTION CELL\n", + "particles = system.part.add(type=[0] * N_PART, pos=np.random.random((N_PART, 3)) * system.box_l)" ] }, { "cell_type": "code", "execution_count": null, + "id": "655d49e6", "metadata": {}, "outputs": [], "source": [] @@ -355,6 +365,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2f2f53ee", "metadata": {}, "outputs": [], "source": [ @@ -364,6 +375,7 @@ }, { "cell_type": "markdown", + "id": "b2f887a1", "metadata": {}, "source": [ "The particle properties can be accessed using standard numpy slicing syntax:" @@ -372,6 +384,7 @@ { "cell_type": "code", "execution_count": null, + "id": "fd3176d9", "metadata": {}, "outputs": [], "source": [ @@ -390,6 +403,7 @@ }, { "cell_type": "markdown", + "id": "96de4a92", "metadata": {}, "source": [ "You can also get all particles using ``system.part.all()``, but ``particles`` already contains all particles that are in the simulation so far." @@ -397,6 +411,7 @@ }, { "cell_type": "markdown", + "id": "eca3a2cd", "metadata": {}, "source": [ "Many objects in ESPResSo have a string representation, and thus can be displayed via python's print function:" @@ -405,6 +420,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3af53b21", "metadata": {}, "outputs": [], "source": [ @@ -413,6 +429,7 @@ }, { "cell_type": "markdown", + "id": "e118ebf2", "metadata": {}, "source": [ "### Setting up non-bonded interactions\n", @@ -423,6 +440,7 @@ { "cell_type": "code", "execution_count": null, + "id": "b56c6f99", "metadata": {}, "outputs": [], "source": [ @@ -434,6 +452,7 @@ }, { "cell_type": "markdown", + "id": "61a87a94", "metadata": {}, "source": [ "In a periodic system, it is in general not straightforward to calculate all non-bonded interactions. As mentioned earlier in the text, usually a cutoff distance $r_{\\mathrm{cut}}$ is applied for infinite-range potentials like Lennard-Jones, such that $V(r>r_{\\mathrm{cut}}) = 0$. The potential can be shifted to zero at the cutoff value to ensure continuity using the shift='auto' option of [espressomd.interactions.LennardJonesInteraction](https://espressomd.github.io/doc/espressomd.html#espressomd.interactions.LennardJonesInteraction).\n", @@ -445,6 +464,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6df16ff1", "metadata": {}, "outputs": [], "source": [ @@ -453,10 +473,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ca573cca", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -467,26 +485,28 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "65054f30", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "system.non_bonded_inter[0, 0].lennard_jones.set_params(\n", - " epsilon=LJ_EPS, sigma=LJ_SIG, cutoff=LJ_CUT, shift=0)\n", - "```" + " epsilon=LJ_EPS, sigma=LJ_SIG, cutoff=LJ_CUT, shift=0)" ] }, { "cell_type": "code", "execution_count": null, + "id": "91ac5e57", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "5e6332fb", "metadata": {}, "source": [ "### Energy minimization\n", @@ -502,6 +522,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bdc3bb16", "metadata": {}, "outputs": [], "source": [ @@ -515,10 +536,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "b560a981", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -535,12 +554,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "191c8dc8", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# Set up steepest descent integration\n", "system.integrator.set_steepest_descent(f_max=0, # use a relative convergence criterion only\n", " gamma=DAMPING,\n", @@ -558,13 +578,13 @@ " print(f'rel. force change: {rel_force:.2e}')\n", " if rel_force < F_TOL:\n", " break\n", - " old_force = force\n", - "```" + " old_force = force" ] }, { "cell_type": "code", "execution_count": null, + "id": "62f80d15", "metadata": {}, "outputs": [], "source": [] @@ -572,6 +592,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2a8debc0", "metadata": {}, "outputs": [], "source": [ @@ -581,6 +602,7 @@ }, { "cell_type": "markdown", + "id": "e4351d0b", "metadata": {}, "source": [ "### Choosing the thermodynamic ensemble, thermostat\n", @@ -596,6 +618,7 @@ { "cell_type": "code", "execution_count": null, + "id": "af768ec5", "metadata": {}, "outputs": [], "source": [ @@ -607,10 +630,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "946930a2", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -621,26 +642,28 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "9e8e7ea7", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "system.integrator.set_vv()\n", - "system.thermostat.set_langevin(kT=TEMPERATURE, gamma=GAMMA, seed=42)\n", - "```" + "system.thermostat.set_langevin(kT=TEMPERATURE, gamma=GAMMA, seed=42)" ] }, { "cell_type": "code", "execution_count": null, + "id": "42c573df", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "0a979258", "metadata": {}, "source": [ "### Integrating equations of motion and taking manual measurements\n", @@ -651,6 +674,7 @@ { "cell_type": "code", "execution_count": null, + "id": "28880161", "metadata": { "code_folding": [] }, @@ -668,10 +692,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "070abeaf", + "metadata": {}, "source": [ "**Exercise:**\n", "\n", @@ -682,25 +704,26 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "fd9402b3", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "for i in range(N_SAMPLES):\n", " times[i] = system.time\n", " energy = system.analysis.energy()\n", " e_total[i] = energy['total']\n", " e_kin[i] = energy['kinetic']\n", " system.integrator.run(STEPS_PER_SAMPLE)\n", - "T_inst = 2. / 3. * e_kin / N_PART\n", - "```" + "T_inst = 2. / 3. * e_kin / N_PART" ] }, { "cell_type": "code", "execution_count": null, + "id": "c53290b6", "metadata": {}, "outputs": [], "source": [] @@ -708,6 +731,7 @@ { "cell_type": "code", "execution_count": null, + "id": "aaef92c3", "metadata": {}, "outputs": [], "source": [ @@ -722,6 +746,7 @@ }, { "cell_type": "markdown", + "id": "374b9a32", "metadata": {}, "source": [ "Since the ensemble average $\\langle E_{\\mathrm{kin}}\\rangle=3/2 N k_B T$ is related to the temperature,\n", @@ -733,6 +758,7 @@ }, { "cell_type": "markdown", + "id": "30cd6329", "metadata": {}, "source": [ "In the first simulation run, we picked ``STEPS_PER_SAMPLE`` arbitrarily. To ensure proper statistics, we will subsample the time series to reduce correlation between consecutive measurements. In order to do this, we first have to remove the beginning of the time series, because the system was out of equilibrium. The time to reach equilibrium depends on the thermostat friction coefficient gamma and can be determined visually from the plot." @@ -741,6 +767,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1faa4fa3", "metadata": {}, "outputs": [], "source": [ @@ -754,6 +781,7 @@ }, { "cell_type": "markdown", + "id": "66340c8e", "metadata": {}, "source": [ "Notice that equilibration_time is not known *a priori*. It has to be found by trial and error for the given set of input parameters.\n", @@ -772,6 +800,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7af26ea6", "metadata": {}, "outputs": [], "source": [ @@ -795,10 +824,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "966dcf9c", + "metadata": {}, "source": [ "**Exercise**\n", "* Calculate the autocorrelation of the total energy (store in ``e_total_autocor``). Calculate the correlation time $\\xi$ (``corr_time``). Calculate a quantity ``steps_per_subsample`` that represents the number of integration steps necessary to advance the simulation time by $3\\xi$. This is a conservative quantity that will help us subsample the time series in such a way that the correlation between two consecutive samples is small ($e^{-3} \\simeq 5\\%$).\n", @@ -807,21 +834,22 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "764038ae", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "e_total_autocor = autocor(e_total)\n", "corr_time = fit_correlation_time(e_total_autocor[:100], times[:100])\n", - "steps_per_subsample = int(np.ceil(3 * corr_time / system.time_step))\n", - "```" + "steps_per_subsample = int(np.ceil(3 * corr_time / system.time_step))" ] }, { "cell_type": "code", "execution_count": null, + "id": "94fc76b6", "metadata": {}, "outputs": [], "source": [] @@ -829,6 +857,7 @@ { "cell_type": "code", "execution_count": null, + "id": "66c516f2", "metadata": {}, "outputs": [], "source": [ @@ -837,6 +866,7 @@ }, { "cell_type": "markdown", + "id": "2915cacd", "metadata": {}, "source": [ "We plot the autocorrelation function and the fit to visually confirm a roughly exponential decay" @@ -845,6 +875,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8ad5927d", "metadata": {}, "outputs": [], "source": [ @@ -863,6 +894,7 @@ }, { "cell_type": "markdown", + "id": "44c1172a", "metadata": {}, "source": [ "In order to obtain equilibrium properties, we need to consider ensemble-averaged quantities. Assuming that the simulation is an ergodic process, averaging over uncorrelated samples would provide equilibrium results." @@ -870,10 +902,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "9634848b", + "metadata": {}, "source": [ "**Exercise**:\n", "* Calculate the mean and standard error of the mean potential energy per particle using the formula for uncorrelated samples (define ``mean_pot_energy`` and ``SEM_pot_energy``).\n", @@ -883,22 +913,23 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "d07a547f", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "subsample_step = int(np.ceil(steps_per_subsample / STEPS_PER_SAMPLE))\n", "pot_energies = (e_total - e_kin)[::subsample_step] / N_PART\n", "mean_pot_energy = np.mean(pot_energies)\n", - "SEM_pot_energy = np.std(pot_energies) / np.sqrt(len(pot_energies))\n", - "```" + "SEM_pot_energy = np.std(pot_energies) / np.sqrt(len(pot_energies))" ] }, { "cell_type": "code", "execution_count": null, + "id": "3e4a203c", "metadata": {}, "outputs": [], "source": [] @@ -906,6 +937,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f5689773", "metadata": {}, "outputs": [], "source": [ @@ -914,6 +946,7 @@ }, { "cell_type": "markdown", + "id": "3b9f8f6f", "metadata": {}, "source": [ "For comparison to literature values we need to account for the error made by the LJ truncation.\n", @@ -927,6 +960,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bb07e953", "metadata": {}, "outputs": [], "source": [ @@ -938,6 +972,7 @@ }, { "cell_type": "markdown", + "id": "2d675bda", "metadata": {}, "source": [ "This value differs quite strongly from the uncorrected one but agrees well with the literature value $U^i = -5.38$ given in Table 1 of Ref. [7]." @@ -945,6 +980,7 @@ }, { "cell_type": "markdown", + "id": "be8d3071", "metadata": {}, "source": [ "### Automated data collection\n", @@ -954,6 +990,7 @@ }, { "cell_type": "markdown", + "id": "8ab7a5c6", "metadata": {}, "source": [ "[Observables](https://espressomd.github.io/doc/analysis.html#using-observables) extract properties from the particles and calculate some quantity with it, e.g. the center of mass, the total energy or a histogram.\n", @@ -965,6 +1002,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f5432ffa", "metadata": {}, "outputs": [], "source": [ @@ -977,10 +1015,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "c950b4ce", + "metadata": {}, "source": [ "**Exercise**\n", "* Instantiate a [RDF](https://espressomd.github.io/doc/espressomd.html#espressomd.observables.RDF) observable\n", @@ -989,27 +1025,29 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "f70b7883", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "rdf_obs = espressomd.observables.RDF(ids1=system.part.all().id, min_r=R_MIN, max_r=R_MAX, n_r_bins=N_BINS)\n", "rdf_acc = espressomd.accumulators.MeanVarianceCalculator(obs=rdf_obs, delta_N=steps_per_subsample)\n", - "system.auto_update_accumulators.add(rdf_acc)\n", - "```" + "system.auto_update_accumulators.add(rdf_acc)" ] }, { "cell_type": "code", "execution_count": null, + "id": "8de0795a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "51edfcf1", "metadata": {}, "source": [ "Now we don't need an elaborate integration loop anymore, instead the RDFs are calculated and accumulated automatically." @@ -1018,6 +1056,7 @@ { "cell_type": "code", "execution_count": null, + "id": "db0713b2", "metadata": {}, "outputs": [], "source": [ @@ -1026,10 +1065,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "88c5b87e", + "metadata": {}, "source": [ "**Exercise**\n", "* Get the mean RDF (define ``rdf``) from the accumulator\n", @@ -1037,20 +1074,21 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "9845e4a9", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "rdf = rdf_acc.mean()\n", - "rs = rdf_obs.bin_centers()\n", - "```" + "rs = rdf_obs.bin_centers()" ] }, { "cell_type": "code", "execution_count": null, + "id": "18a10a12", "metadata": {}, "outputs": [], "source": [] @@ -1058,6 +1096,7 @@ { "cell_type": "code", "execution_count": null, + "id": "205cf0a2", "metadata": {}, "outputs": [], "source": [ @@ -1071,6 +1110,7 @@ }, { "cell_type": "markdown", + "id": "719b7f21", "metadata": {}, "source": [ "We now plot the experimental radial distribution.\n", @@ -1080,6 +1120,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ace06c01", "metadata": {}, "outputs": [], "source": [ @@ -1150,6 +1191,7 @@ { "cell_type": "code", "execution_count": null, + "id": "937ac145", "metadata": {}, "outputs": [], "source": [ @@ -1159,6 +1201,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a13fe69d", "metadata": {}, "outputs": [], "source": [ @@ -1169,6 +1212,7 @@ }, { "cell_type": "markdown", + "id": "28c0f073", "metadata": {}, "source": [ "## Further Exercises" @@ -1176,6 +1220,7 @@ }, { "cell_type": "markdown", + "id": "7fabe279", "metadata": {}, "source": [ "### Binary Lennard-Jones Liquid\n", @@ -1190,6 +1235,7 @@ }, { "cell_type": "markdown", + "id": "acc09276", "metadata": {}, "source": [ "## References\n", @@ -1228,5 +1274,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/polymers/polymers.ipynb b/doc/tutorials/polymers/polymers.ipynb index 738a8fff77c..ed3ed3bb8d3 100644 --- a/doc/tutorials/polymers/polymers.ipynb +++ b/doc/tutorials/polymers/polymers.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "7baf0b33", "metadata": {}, "source": [ "# Basic polymer simulations in ESPResSo" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "8666fbcd", "metadata": {}, "source": [ "In this tutorial we are going to investigate diffusion of a dissolved polymer using **ESPResSo**. For this tutorial, you should have fundamental knowledge of the lattice-Boltzmann method and Langevin dynamics. If you are unfamiliar with those, you can go through the respective tutorials in the `lattice_boltzmann` and `langevin_dynamics` folders." @@ -16,6 +18,7 @@ }, { "cell_type": "markdown", + "id": "e465abc7", "metadata": {}, "source": [ "## Introduction\n", @@ -43,6 +46,7 @@ }, { "cell_type": "markdown", + "id": "2a5047d6", "metadata": {}, "source": [ "## Polymer models\n", @@ -130,6 +134,7 @@ }, { "cell_type": "markdown", + "id": "826a975d", "metadata": {}, "source": [ "## Diffusion of a polymer\n", @@ -172,6 +177,7 @@ }, { "cell_type": "markdown", + "id": "bc400287", "metadata": {}, "source": [ "### 1. Setting up the polymer and observables\n", @@ -191,10 +197,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "e450d507", + "metadata": {}, "source": [ "Write a function with signature `build_polymer(system, n_monomers, polymer_params, fene)` that creates\n", "a linear polymer made of `n_monomers` particles, with parameters `polymer_params`. The particles need\n", @@ -202,12 +206,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "25b11c52", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def build_polymer(system, n_monomers, polymer_params, fene):\n", " positions = espressomd.polymer.linear_polymer_positions(\n", " beads_per_chain=n_monomers, **polymer_params)\n", @@ -216,23 +221,21 @@ " p = system.part.add(pos=pos)\n", " if p_previous is not None:\n", " p.add_bond((fene, p_previous))\n", - " p_previous = p\n", - "```" + " p_previous = p" ] }, { "cell_type": "code", "execution_count": null, + "id": "a3ad343c", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "6a6d2d2f", + "metadata": {}, "source": [ "Write a function with signature `correlator_msd(pids_monomers, tau_max)` that returns a center-of-mass\n", "mean-squared displacement correlator that is updated every time step, and a function with signature\n", @@ -242,12 +245,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "c616b467", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def correlator_msd(pids_monomers, tau_max):\n", " com_pos = espressomd.observables.ComPosition(ids=pids_monomers)\n", " com_pos_cor = espressomd.accumulators.Correlator(\n", @@ -261,19 +265,20 @@ " com_vel_cor = espressomd.accumulators.Correlator(\n", " obs1=com_vel, tau_lin=16, tau_max=tau_max, delta_N=10,\n", " corr_operation=\"scalar_product\", compress1=\"discard1\")\n", - " return com_vel_cor\n", - "```" + " return com_vel_cor" ] }, { "cell_type": "code", "execution_count": null, + "id": "c1457b56", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "e04c8781", "metadata": {}, "source": [ "You can simulate a polymer in the Rouse regime using an implicit solvent model, e.g. Langevin dynamics,\n", @@ -283,6 +288,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4a3548e7", "metadata": {}, "outputs": [], "source": [ @@ -306,6 +312,7 @@ }, { "cell_type": "markdown", + "id": "fcabccee", "metadata": {}, "source": [ "### 2. Simulating the polymer" @@ -314,6 +321,7 @@ { "cell_type": "code", "execution_count": null, + "id": "dd4005bb", "metadata": {}, "outputs": [], "source": [ @@ -455,6 +463,7 @@ }, { "cell_type": "markdown", + "id": "f154b541", "metadata": {}, "source": [ "### 3. Data analysis\n", @@ -466,6 +475,7 @@ { "cell_type": "code", "execution_count": null, + "id": "100f2d21", "metadata": {}, "outputs": [], "source": [ @@ -478,6 +488,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2f4905a8", "metadata": {}, "outputs": [], "source": [ @@ -552,6 +563,7 @@ }, { "cell_type": "markdown", + "id": "30771176", "metadata": {}, "source": [ "#### 3.1 Distance-based macromolecular properties\n", @@ -563,6 +575,7 @@ }, { "cell_type": "markdown", + "id": "e961b2cb", "metadata": {}, "source": [ "Plot the end-to-end distance $R_F$ of the polymer as a function of the number of monomers. What relation do you observe?" @@ -570,6 +583,7 @@ }, { "cell_type": "markdown", + "id": "5123327f", "metadata": {}, "source": [ "The end-to-end distance follows the law $R_F = c_F N^\\nu$ with $c_F$ a constant and $\\nu$ the Flory exponent." @@ -578,6 +592,7 @@ { "cell_type": "code", "execution_count": null, + "id": "48baf7a5", "metadata": {}, "outputs": [], "source": [ @@ -601,6 +616,7 @@ }, { "cell_type": "markdown", + "id": "cbe33155", "metadata": {}, "source": [ "Plot the radius of gyration $R_g$ of the polymer as a function of the number of monomers. What relation do you observe?" @@ -608,6 +624,7 @@ }, { "cell_type": "markdown", + "id": "b2fac6d7", "metadata": {}, "source": [ "The radius of gyration follows the law $R_g = c_g N^\\nu$ with $c_g$ a constant and $\\nu$ the Flory exponent." @@ -616,6 +633,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9a29393b", "metadata": {}, "outputs": [], "source": [ @@ -639,6 +657,7 @@ }, { "cell_type": "markdown", + "id": "46f63301", "metadata": {}, "source": [ "For an ideal polymer:\n", @@ -649,6 +668,7 @@ { "cell_type": "code", "execution_count": null, + "id": "58e2ec1f", "metadata": {}, "outputs": [], "source": [ @@ -658,6 +678,7 @@ }, { "cell_type": "markdown", + "id": "42964c1d", "metadata": {}, "source": [ "Plot the hydrodynamic radius $R_h$ of the polymers as a function of the number of monomers. What relation do you observe?" @@ -665,6 +686,7 @@ }, { "cell_type": "markdown", + "id": "530f2b6b", "metadata": {}, "source": [ "The hydrodynamic radius can be calculated via the Stokes radius, i.e. the radius of a sphere that\n", @@ -675,6 +697,7 @@ { "cell_type": "code", "execution_count": null, + "id": "06b65488", "metadata": { "scrolled": true }, @@ -700,6 +723,7 @@ }, { "cell_type": "markdown", + "id": "8601810c", "metadata": {}, "source": [ "#### 3.2 Diffusion coefficient using the MSD method\n", @@ -716,6 +740,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0c403cce", "metadata": {}, "outputs": [], "source": [ @@ -740,6 +765,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f14b18b8", "metadata": {}, "outputs": [], "source": [ @@ -762,6 +788,7 @@ }, { "cell_type": "markdown", + "id": "b24cc243", "metadata": {}, "source": [ "Plot the dependence of the diffusion coefficient on the hydrodynamic radius.\n", @@ -784,6 +811,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3edca774", "metadata": {}, "outputs": [], "source": [ @@ -800,6 +828,7 @@ }, { "cell_type": "markdown", + "id": "28fe18f7", "metadata": {}, "source": [ "#### 3.3 Diffusion coefficient using the Green–Kubo method\n", @@ -814,6 +843,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f31a4495", "metadata": {}, "outputs": [], "source": [ @@ -836,6 +866,7 @@ }, { "cell_type": "markdown", + "id": "81853900", "metadata": {}, "source": [ "The Green–Kubo integral for the diffusion coefficient take the following form:\n", @@ -851,6 +882,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a9bae835", "metadata": {}, "outputs": [], "source": [ @@ -870,6 +902,7 @@ }, { "cell_type": "markdown", + "id": "e68f01ef", "metadata": {}, "source": [ "Plot the dependence of the diffusion coefficient on the hydrodynamic radius." @@ -878,6 +911,7 @@ { "cell_type": "code", "execution_count": null, + "id": "405219b6", "metadata": {}, "outputs": [], "source": [ @@ -894,6 +928,7 @@ }, { "cell_type": "markdown", + "id": "e16f42ac", "metadata": {}, "source": [ "Let us compare the value of the diffusion coefficients calculated with the MSD and Green–Kubo methods:" @@ -902,6 +937,7 @@ { "cell_type": "code", "execution_count": null, + "id": "be4a7ca3", "metadata": {}, "outputs": [], "source": [ @@ -912,6 +948,7 @@ }, { "cell_type": "markdown", + "id": "67070b9a", "metadata": {}, "source": [ "## References\n", @@ -945,5 +982,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb b/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb index 3e71b0771ea..01dda0921ee 100644 --- a/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb +++ b/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "a8830f68", "metadata": {}, "source": [ "# Raspberry Electrophoresis" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "b7351e9d", "metadata": {}, "source": [ "## Introduction\n", @@ -28,6 +30,7 @@ }, { "cell_type": "markdown", + "id": "c45298fe", "metadata": {}, "source": [ "## Compiling ESPResSo for this tutorial\n", @@ -49,6 +52,7 @@ }, { "cell_type": "markdown", + "id": "8f30343a", "metadata": {}, "source": [ "## Global MD variables\n", @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bd4f80ba", "metadata": {}, "outputs": [], "source": [ @@ -74,7 +79,7 @@ "logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n", "\n", "espressomd.assert_features([\"ELECTROSTATICS\", \"ROTATION\", \"ROTATIONAL_INERTIA\", \"EXTERNAL_FORCES\",\n", - " \"MASS\", \"VIRTUAL_SITES_RELATIVE\", \"LENNARD_JONES\"])\n", + " \"MASS\", \"VIRTUAL_SITES_RELATIVE\", \"LENNARD_JONES\", \"WALBERLA\"])\n", "\n", "import numpy as np\n", "%matplotlib inline\n", @@ -119,6 +124,7 @@ }, { "cell_type": "markdown", + "id": "2d20207b", "metadata": {}, "source": [ "The parameter box_l sets the size of the simulation box. In general, one should check for finite\n", @@ -134,6 +140,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3b34fa56", "metadata": {}, "outputs": [], "source": [ @@ -142,6 +149,7 @@ }, { "cell_type": "markdown", + "id": "63adf49d", "metadata": {}, "source": [ "The skin is used for constructing\n", @@ -153,6 +161,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ada95860", "metadata": {}, "outputs": [], "source": [ @@ -161,6 +170,7 @@ }, { "cell_type": "markdown", + "id": "5ae0da81", "metadata": {}, "source": [ "The periodicity parameter indicates that the system is periodic in all three\n", @@ -170,6 +180,7 @@ }, { "cell_type": "markdown", + "id": "ef1cb65e", "metadata": {}, "source": [ "## Setting up the raspberry\n", @@ -186,6 +197,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ebcce8d0", "metadata": {}, "outputs": [], "source": [ @@ -202,6 +214,7 @@ }, { "cell_type": "markdown", + "id": "a1aa821d", "metadata": {}, "source": [ "We set up the central bead and the other beads are initialized at random positions on the surface of the colloid. The beads are then allowed to relax using\n", @@ -211,6 +224,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6b85a02a", "metadata": {}, "outputs": [], "source": [ @@ -230,33 +244,32 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "f11390b7", + "metadata": {}, "source": [ "#### Exercise\n", "Add `n_col_part-1` particles of type `TYPE_SURFACE` to the system and store the returned particle slice in `surface_parts` (see the [user guide](https://espressomd.github.io/doc/particles.html#adding-particles) section on how to add several particles at once). The particles shall be at random positions with a distance of exactly `radius_col` from the colloid's center at `col_pos`." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "bb7490e5", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# Create surface beads uniformly distributed over the surface of the central particle\n", "colSurfPos = np.random.uniform(low=-1, high=1, size=(n_col_part - 1, 3))\n", "colSurfPos = colSurfPos / np.linalg.norm(colSurfPos, axis=1)[:, np.newaxis] * radius_col + colPos\n", "colSurfTypes = np.full(n_col_part - 1, TYPE_SURFACE)\n", - "surface_parts = system.part.add(pos=colSurfPos, type=colSurfTypes)\n", - "```" + "surface_parts = system.part.add(pos=colSurfPos, type=colSurfTypes)" ] }, { "cell_type": "code", "execution_count": null, + "id": "1b3eaf27", "metadata": {}, "outputs": [], "source": [] @@ -264,6 +277,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3fc4bf66", "metadata": {}, "outputs": [], "source": [ @@ -296,6 +310,7 @@ }, { "cell_type": "markdown", + "id": "8f226bfb", "metadata": {}, "source": [ "The best way to ensure a relatively uniform distribution\n", @@ -304,6 +319,7 @@ }, { "cell_type": "markdown", + "id": "809451b3", "metadata": {}, "source": [ "
\n", @@ -316,6 +332,7 @@ }, { "cell_type": "markdown", + "id": "478a2301", "metadata": {}, "source": [ "Now that the beads are arranged in the shape of a raspberry, the surface beads are made virtual particles\n", @@ -328,30 +345,29 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "cb7fc3a7", + "metadata": {}, "source": [ "#### Exercise\n", "Activate the `VirtualSitesRelative` implementation in **ESPResSo**. See the [documentation](https://espressomd.github.io/doc/particles.html#virtual-sites) for help." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "fae9dbe4", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# Select the desired implementation for virtual sites\n", - "system.virtual_sites = espressomd.virtual_sites.VirtualSitesRelative()\n", - "```" + "system.virtual_sites = espressomd.virtual_sites.VirtualSitesRelative()" ] }, { "cell_type": "code", "execution_count": null, + "id": "9f256d8e", "metadata": {}, "outputs": [], "source": [] @@ -359,6 +375,7 @@ { "cell_type": "code", "execution_count": null, + "id": "8673addb", "metadata": {}, "outputs": [], "source": [ @@ -370,10 +387,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "aedcee67", + "metadata": {}, "source": [ "#### Exercise\n", "* Compute the center of mass of all particles in `surface_parts` and store its position in the variable `com`.\n", @@ -381,23 +396,24 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "1da1f3a8", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# Calculate the center of mass position (com) and the moment of inertia (momI) of the colloid\n", "com = np.average(surface_parts.pos, 0) # surface_parts.pos returns an n-by-3 array\n", "momI = 0\n", "for p in surface_parts:\n", - " momI += np.power(np.linalg.norm(com - p.pos), 2)\n", - "```" + " momI += np.power(np.linalg.norm(com - p.pos), 2)" ] }, { "cell_type": "code", "execution_count": null, + "id": "9c67994d", "metadata": {}, "outputs": [], "source": [] @@ -405,6 +421,7 @@ { "cell_type": "code", "execution_count": null, + "id": "079c5bc6", "metadata": {}, "outputs": [], "source": [ @@ -423,6 +440,7 @@ }, { "cell_type": "markdown", + "id": "dd11e968", "metadata": {}, "source": [ "## Inserting counterions and salt ions\n", @@ -434,6 +452,7 @@ { "cell_type": "code", "execution_count": null, + "id": "408ad5ac", "metadata": {}, "outputs": [], "source": [ @@ -465,6 +484,7 @@ }, { "cell_type": "markdown", + "id": "284ad461", "metadata": {}, "source": [ "We then check that charge neutrality is maintained" @@ -473,6 +493,7 @@ { "cell_type": "code", "execution_count": null, + "id": "604c15cf", "metadata": {}, "outputs": [], "source": [ @@ -482,6 +503,7 @@ }, { "cell_type": "markdown", + "id": "220c84e9", "metadata": {}, "source": [ "A WCA potential acts between all of the ions. This potential represents a purely repulsive\n", @@ -494,6 +516,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6042c8b3", "metadata": {}, "outputs": [], "source": [ @@ -511,6 +534,7 @@ }, { "cell_type": "markdown", + "id": "e4d0aeee", "metadata": {}, "source": [ "After inserting the ions, again a short integration is performed with a force cap to\n", @@ -520,6 +544,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a9b6d2ed", "metadata": {}, "outputs": [], "source": [ @@ -540,6 +565,7 @@ }, { "cell_type": "markdown", + "id": "e4e070f0", "metadata": {}, "source": [ "## Electrostatics\n", @@ -550,6 +576,7 @@ { "cell_type": "code", "execution_count": null, + "id": "cf35d2e3", "metadata": {}, "outputs": [], "source": [ @@ -564,6 +591,7 @@ }, { "cell_type": "markdown", + "id": "9bc36feb", "metadata": {}, "source": [ "Generally a Bjerrum length of $2$ is appropriate when using WCA interactions with $\\sigma=1$, since a typical ion has a radius of $0.35\\ \\mathrm{nm}$, while the Bjerrum\n", @@ -576,38 +604,38 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "f90e931d", + "metadata": {}, "source": [ "#### Exercise\n", "Add an external force $F=q\\vec{E}$ to every particle according to its charge, where the electric field is $\\vec{E}=\\begin{pmatrix} 0.1 \\\\ 0 \\\\ 0 \\end{pmatrix}$." ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "52446ef2", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "E = 0.1 # an electric field of 0.1 is the upper limit of the linear response regime for this model\n", "Efield = np.array([E, 0, 0])\n", "for p in system.part:\n", - " p.ext_force = p.q * Efield\n", - "```" + " p.ext_force = p.q * Efield" ] }, { "cell_type": "code", "execution_count": null, + "id": "a4e3dc56", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "d04105af", "metadata": {}, "source": [ "## Lattice-Boltzmann\n", @@ -620,6 +648,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f99df805", "metadata": {}, "outputs": [], "source": [ @@ -628,6 +657,7 @@ }, { "cell_type": "markdown", + "id": "eaed3edf", "metadata": {}, "source": [ "The important parameters for the LB fluid are the density, the viscosity, the time step,\n", @@ -640,6 +670,7 @@ { "cell_type": "code", "execution_count": null, + "id": "37185f8e", "metadata": {}, "outputs": [], "source": [ @@ -650,6 +681,7 @@ }, { "cell_type": "markdown", + "id": "2fce673e", "metadata": {}, "source": [ "A logical way of picking a specific set of parameters is to choose them such that the hydrodynamic radius of an ion roughly matches its physical radius determined by the\n", @@ -671,6 +703,7 @@ { "cell_type": "code", "execution_count": null, + "id": "b5a73643", "metadata": {}, "outputs": [], "source": [ @@ -681,6 +714,7 @@ }, { "cell_type": "markdown", + "id": "f848c501", "metadata": {}, "source": [ "## Simulating electrophoresis\n", @@ -692,6 +726,7 @@ { "cell_type": "code", "execution_count": null, + "id": "32ec13f3", "metadata": {}, "outputs": [], "source": [ @@ -711,6 +746,7 @@ }, { "cell_type": "markdown", + "id": "698a65ca", "metadata": {}, "source": [ "The above code cell saves the trajectory of the raspberry into the file `posVsTime.dat`. For this purpose, the particle's `pos` member should be used, as opposed to its `pos_folded` member, which returns the particle's position folded back into the simulation box. In systems with periodic boundary conditions, particles can \"leave\" the simulation box. When a particle leaves the box, one of its periodic images enters the box from the opposite side, so it might appear that the particle never leaves the box. The truth, however, is that particles *can* leave the simulation box and therefore, their coordinates can end up outside of it. `pos` returns these \"true\" coordinates. On the other hand, `pos_folded` returns the position folded back into the simulation box, which is used to compute interactions between particles, and also for visualization of the simulation box.\n", @@ -722,6 +758,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ca15453e", "metadata": {}, "outputs": [], "source": [ @@ -756,6 +793,7 @@ }, { "cell_type": "markdown", + "id": "d504b45c", "metadata": {}, "source": [ "## References\n", @@ -783,5 +821,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/visualization/visualization.ipynb b/doc/tutorials/visualization/visualization.ipynb index d6ee865ed7b..93c390c105c 100644 --- a/doc/tutorials/visualization/visualization.ipynb +++ b/doc/tutorials/visualization/visualization.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "49107881", "metadata": {}, "source": [ "# Visualization" @@ -9,6 +10,7 @@ }, { "cell_type": "markdown", + "id": "3a2e4e20", "metadata": {}, "source": [ "## Introduction\n", @@ -27,6 +29,7 @@ }, { "cell_type": "markdown", + "id": "790f5705", "metadata": {}, "source": [ "## Simulation\n", @@ -42,6 +45,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0dfc1eee", "metadata": {}, "outputs": [], "source": [ @@ -131,6 +135,7 @@ }, { "cell_type": "markdown", + "id": "7cfde546", "metadata": {}, "source": [ "## Live plotting\n", @@ -143,6 +148,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2a223730", "metadata": { "scrolled": false }, @@ -197,6 +203,7 @@ }, { "cell_type": "markdown", + "id": "7260065c", "metadata": {}, "source": [ "## Live visualization and plotting\n", @@ -207,6 +214,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5cbf7ae5", "metadata": {}, "outputs": [], "source": [ @@ -218,6 +226,7 @@ }, { "cell_type": "markdown", + "id": "55641a5f", "metadata": {}, "source": [ "Then, re-define the main() function to run the visualizer:" @@ -226,6 +235,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a301d67e", "metadata": {}, "outputs": [], "source": [ @@ -242,6 +252,7 @@ }, { "cell_type": "markdown", + "id": "db98f740", "metadata": {}, "source": [ "Next, create a secondary thread for the main() function. However,\n", @@ -255,6 +266,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d2c58b0e", "metadata": {}, "outputs": [], "source": [ @@ -277,6 +289,7 @@ }, { "cell_type": "markdown", + "id": "4d32841d", "metadata": {}, "source": [ "While the simulation is running, you can move and zoom\n", @@ -290,6 +303,7 @@ }, { "cell_type": "markdown", + "id": "8eeae02a", "metadata": {}, "source": [ "### Alternative: live visualization without plotting\n", @@ -319,6 +333,7 @@ }, { "cell_type": "markdown", + "id": "9f26cbcb", "metadata": {}, "source": [ "### Alternative: live visualization only\n", @@ -334,6 +349,7 @@ }, { "cell_type": "markdown", + "id": "040f0ee1", "metadata": {}, "source": [ "## Customizing the OpenGL visualizer\n", @@ -392,5 +408,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 5 } diff --git a/doc/tutorials/widom_insertion/widom_insertion.ipynb b/doc/tutorials/widom_insertion/widom_insertion.ipynb index 1ae231e39be..7e3dff1e60c 100644 --- a/doc/tutorials/widom_insertion/widom_insertion.ipynb +++ b/doc/tutorials/widom_insertion/widom_insertion.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "594eb0fc", "metadata": {}, "source": [ "# Excess Chemical Potential of a Salt Solution: Widom Particle Insertion Method\n", @@ -18,6 +19,7 @@ }, { "cell_type": "markdown", + "id": "d9b12191", "metadata": {}, "source": [ "## Introduction\n", @@ -39,6 +41,7 @@ }, { "cell_type": "markdown", + "id": "c3552a73", "metadata": {}, "source": [ "## Widom Particle Insertion Method\n", @@ -83,6 +86,7 @@ }, { "cell_type": "markdown", + "id": "ac118d95", "metadata": {}, "source": [ "## The Simulated System: Aqueous NaCl solution\n", @@ -107,6 +111,7 @@ }, { "cell_type": "markdown", + "id": "0b9eceeb", "metadata": {}, "source": [ "# Simulation Setup\n", @@ -117,6 +122,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9bcc0534", "metadata": {}, "outputs": [], "source": [ @@ -137,6 +143,7 @@ }, { "cell_type": "markdown", + "id": "f8855713", "metadata": {}, "source": [ "Next, we define the simulation units and set the parameters which define our system and the interactions:" @@ -145,6 +152,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9159f7df", "metadata": {}, "outputs": [], "source": [ @@ -180,10 +188,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "de1ba199", + "metadata": {}, "source": [ "Now we are ready to set up the system. Because we will need to rescale the simulation box, we will initially only set up the short-range interactions.\n", "\n", @@ -197,12 +203,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "01214ecf", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "box_l_init = 10.0\n", "\n", "system = espressomd.System(box_l=3 * [box_l_init])\n", @@ -219,23 +226,21 @@ "\n", "for i in types:\n", " for j in types:\n", - " system.non_bonded_inter[types[i], types[j]].wca.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA)\n", - "```" + " system.non_bonded_inter[types[i], types[j]].wca.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA)" ] }, { "cell_type": "code", "execution_count": null, + "id": "151457b2", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "9e50455c", + "metadata": {}, "source": [ "**Exercise:**\n", "* Implement a function ``system_setup(c_salt_SI)`` that takes the desired salt concentration ``c_salt_SI`` in SI-units (mol/l) as an argument and rescales the simulation box accordingly\n", @@ -247,12 +252,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "6a4f050e", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "ci_params = {} # optional P3M parameters constraints\n", "\n", "def system_setup(c_salt_SI):\n", @@ -266,23 +272,21 @@ " \n", " # add P3M\n", " p3m = espressomd.electrostatics.P3M(prefactor=BJERRUM_LENGTH, accuracy=1e-3, **ci_params)\n", - " system.electrostatics.solver = p3m\n", - "```" + " system.electrostatics.solver = p3m" ] }, { "cell_type": "code", "execution_count": null, + "id": "a3b8fbd5", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ebdd16fe", + "metadata": {}, "source": [ "**Exercise:**\n", "* Implement a function ``warmup()`` that removes potential overlaps between particles with 10000 steps of the steepest descent integrator and performs a warmup integration with 100 loops of 100 simulation steps\n", @@ -295,12 +299,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "fb551bde", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def warmup():\n", " system.integrator.set_steepest_descent(f_max=0, gamma=1e-3, max_displacement=0.01)\n", "\n", @@ -312,23 +317,21 @@ "\n", " print(\"Running warmup integration...\", flush=True)\n", " for i in tqdm.tqdm(range(100)):\n", - " system.integrator.run(100)\n", - "```" + " system.integrator.run(100)" ] }, { "cell_type": "code", "execution_count": null, + "id": "09f05a53", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "ca69b646", + "metadata": {}, "source": [ "**Exercise:**\n", "* Add the functionality to perform Widom insertion moves. Make sure that you always insert an ion pair in order to conserve the system electroneutrality.\n", @@ -339,34 +342,33 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "ec709bf2", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "# add reaction for Widom particle insertion\n", "widom = espressomd.reaction_methods.WidomInsertion(kT=KT, seed=42)\n", "widom.add_reaction(reactant_types=[], reactant_coefficients=[],\n", " product_types=[types[\"Xplus\"], types[\"Xminus\"]], product_coefficients=[1, 1],\n", " default_charges={types[\"Xplus\"]: charges[\"Xplus\"], types[\"Xminus\"]: charges[\"Xminus\"]})\n", - "widom.set_non_interacting_type(type=len(types) + 1)\n", - "```" + "widom.set_non_interacting_type(type=len(types) + 1)" ] }, { "cell_type": "code", "execution_count": null, + "id": "45109fb7", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "502e56c7", + "metadata": {}, "source": [ "**Exercise:**\n", "* Implement a function ``calculate_excess_chemical_potential(n_md_steps, n_mc_steps, sample_size)``\n", @@ -377,12 +379,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "f19ffb83", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def calculate_excess_chemical_potential(n_md_steps, n_mc_steps, sample_size):\n", " potential_energy_samples = []\n", "\n", @@ -402,19 +405,20 @@ "\n", "def system_teardown():\n", " system.electrostatics.clear()\n", - " system.thermostat.turn_off()\n", - "```" + " system.thermostat.turn_off()" ] }, { "cell_type": "code", "execution_count": null, + "id": "52971154", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "dcc9cf9e", "metadata": {}, "source": [ "# Production Run and Comparison with Analytical Theory" @@ -422,6 +426,7 @@ }, { "cell_type": "markdown", + "id": "6d0073ff", "metadata": {}, "source": [ "Now we are ready to perform measurements of the excess chemical potential using the Widom particle insertion method.\n", @@ -431,6 +436,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9ab4422e", "metadata": {}, "outputs": [], "source": [ @@ -452,6 +458,7 @@ }, { "cell_type": "markdown", + "id": "a1638b90", "metadata": {}, "source": [ "We will now plot the measured excess chemical potential as a function of $c_{\\mathrm{salt}}$ using\n", @@ -465,6 +472,7 @@ { "cell_type": "code", "execution_count": null, + "id": "07901af4", "metadata": {}, "outputs": [], "source": [ @@ -485,10 +493,8 @@ }, { "cell_type": "markdown", - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, + "id": "07b08603", + "metadata": {}, "source": [ "There is a number of analytical and phenomenological theories to describe\n", "the excess chemical potential of salt solutions using analytical expressions [2],\n", @@ -520,12 +526,13 @@ ] }, { - "cell_type": "markdown", - "metadata": { - "solution2": "hidden" - }, + "cell_type": "code", + "execution_count": null, + "id": "7bfae353", + "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "# SOLUTION CELL\n", "def davies_equation(c_salt):\n", " A = 0.509\n", " C = 0.2\n", @@ -556,19 +563,20 @@ "plt.xlim((2e-3, 0.6))\n", "plt.ylim((-0.8, 0.0))\n", "plt.legend()\n", - "plt.show()\n", - "```" + "plt.show()" ] }, { "cell_type": "code", "execution_count": null, + "id": "c2a8b8a3", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", + "id": "66f8b9b2", "metadata": {}, "source": [ "# Further Exercises\n", @@ -579,6 +587,7 @@ }, { "cell_type": "markdown", + "id": "b5889126", "metadata": {}, "source": [ "# References" @@ -586,6 +595,7 @@ }, { "cell_type": "markdown", + "id": "fe0581dd", "metadata": {}, "source": [ "[1] B. Widom. Some topics in the theory of fluids. Journal of Chemical Physics, 39:2808, 1963, doi:[10.1063/1.1734110](https://doi.org/10.1063/1.1734110). \n", @@ -614,5 +624,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/maintainer/CI/build_cmake.sh b/maintainer/CI/build_cmake.sh index a2d6496a8c2..66cc2671f5a 100755 --- a/maintainer/CI/build_cmake.sh +++ b/maintainer/CI/build_cmake.sh @@ -150,7 +150,7 @@ fi cmake_params="-D CMAKE_BUILD_TYPE=${build_type} -D CMAKE_CXX_STANDARD=${with_cxx_standard} -D ESPRESSO_WARNINGS_ARE_ERRORS=ON ${cmake_params}" cmake_params="${cmake_params} -D CMAKE_INSTALL_PREFIX=/tmp/espresso-unit-tests -D ESPRESSO_INSIDE_DOCKER=ON" -cmake_params="${cmake_params} -D ESPRESSO_CTEST_ARGS=-j${check_procs} -D ESPRESSO_TEST_TIMEOUT=${test_timeout}" +cmake_params="${cmake_params} -D ESPRESSO_CTEST_ARGS:STRING=-j${check_procs} -D ESPRESSO_TEST_TIMEOUT=${test_timeout}" if [ "${make_check_benchmarks}" = true ]; then cmake_params="${cmake_params} -D ESPRESSO_BUILD_BENCHMARKS=ON" diff --git a/maintainer/CI/deploy_tutorials.py b/maintainer/CI/deploy_tutorials.py index 0099fc83284..65a5b01a550 100755 --- a/maintainer/CI/deploy_tutorials.py +++ b/maintainer/CI/deploy_tutorials.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2022 The ESPResSo project +# Copyright (C) 2019-2023 The ESPResSo project # # This file is part of ESPResSo. # @@ -18,20 +18,46 @@ # along with this program. If not, see . # -"""List all tutorial files to deploy (PDF, HTML and figures)""" +""" +List all tutorial files to deploy (PDF, HTML and figures) and write +their filepaths to a text file. +Update metadata of Jupyter notebooks written in the HTML format. +""" import pathlib +import lxml.etree import lxml.html +import re -deploy_list = list(pathlib.Path().glob('*/*.pdf')) -for filepath in pathlib.Path().glob('*/*.html'): +re_title_head = re.compile("].+?") + +deploy_list = list(pathlib.Path().glob("*/*.pdf")) +for filepath in pathlib.Path().glob("*/*.html"): deploy_list.append(filepath) + root = lxml.html.parse(filepath) # extract all figures - with open(filepath, encoding='utf-8') as f: - html = lxml.html.parse(f) - figures = filter(lambda src: not src.startswith('data:image'), - html.xpath('//img/@src')) + figures = filter(lambda src: not src.startswith("data:image"), + root.xpath("//img/@src")) deploy_list += list(map(lambda src: filepath.parent / src, figures)) + # update metadata + try: + first_title = root.xpath("/html/body//h1")[0] + metadata = root.xpath("/html/head")[0] + meta_title_old = root.xpath("/html/head/title")[0] + meta_title_new = lxml.etree.SubElement( + metadata, "title", attrib=meta_title_old.attrib) + meta_title_new.text = first_title.text + new_title = lxml.html.tostring(meta_title_new, encoding=str) + with open(filepath, "r+") as f: + content = f.read() + assert len(re_title_head.findall(content)) == 1 + content = re_title_head.sub(lambda m: new_title, content, 1) + f.seek(0) + f.truncate() + f.write(content) + except Exception as err: + print(f"could not process '{str(filepath)}':") + print(f"{type(err).__name__}: {err}") -with open('deploy_list.txt', 'w') as f: - f.write('\n'.join(map(str, deploy_list))) +with open("deploy_list.txt", "w") as f: + f.write("\n".join(map(str, deploy_list))) diff --git a/requirements.txt b/requirements.txt index 24b9118e07c..039abc76e21 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,19 +17,21 @@ pystencils==1.2 lbmpy==1.2 sympy==1.9 islpy==2022.2.1 +jinja2>=3.0.3 # CI-related requests>=2.25.1 lxml>=4.8.0 coverage>=6.2 # sphinx and its dependencies -sphinx>=3.4.3 -sphinx-toggleprompt==0.2.0 -sphinxcontrib-bibtex>=2.5.0 +sphinx>=4.3.2 +sphinx-toggleprompt==0.4.0 +sphinxcontrib-bibtex>=2.6.1 numpydoc>=1.5.0 pybtex>=0.23 # jupyter dependencies +jupyterlab>=4.0.8 +nbformat==5.1.3 nbconvert==6.5.1 -jupyter_contrib_nbextensions==0.5.1 tqdm>=4.57.0 # linters and their dependencies autopep8==1.6.0 diff --git a/testsuite/scripts/tutorials/test_convert.py b/testsuite/scripts/tutorials/test_convert.py index 3490cfd9240..a2dd24a842c 100644 --- a/testsuite/scripts/tutorials/test_convert.py +++ b/testsuite/scripts/tutorials/test_convert.py @@ -1,4 +1,5 @@ -# Copyright (C) 2019-2022 The ESPResSo project +# +# Copyright (C) 2019-2023 The ESPResSo project # # This file is part of ESPResSo. # @@ -14,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . +# import unittest as ut import sys @@ -58,12 +60,8 @@ class HtmlRunner(ut.TestCase): cell_py_src = ''' import numpy as np %matplotlib notebook -import matplotlib as mpl # split here -import matplotlib.pyplot as plt # don't split -try: - from espressomd.visualization import openglLive -except ImportError: - mpl.use('Agg') # running in CI without graphical output +import matplotlib as mpl +import matplotlib.pyplot as plt plt.ion() global_var = 5 plt.plot([1, 2], [3, global_var]) @@ -132,58 +130,43 @@ def test_html_wrapper(self): lines = (self.cell_py_src .replace('%matplotlib notebook', '%matplotlib inline') .replace('global_var = 5', 'global_var = 20') - ).split('\n') - self.assertEqual(nb_output['cells'][1]['source'], '\n'.join(lines[:3])) - self.assertEqual(nb_output['cells'][2]['source'], '\n'.join(lines[3:])) + ) + self.assertEqual(nb_output['cells'][1]['source'], lines) # the cell should have produced a plot - graphical_plots = True - try: - from espressomd.visualization import openglLive # pylint: disable=unused-import - except ImportError: - graphical_plots = False # running in CI without graphical output - if graphical_plots: - outputs = nb_output['cells'][2]['outputs'] - self.assertTrue(outputs, 'cell has no output') - self.assertIn('image/png', outputs[0]['data']) - self.assertGreater(len(outputs[0]['data']['image/png']), 6000) + outputs = nb_output['cells'][1]['outputs'] + self.assertTrue(outputs, 'cell has no output') + self.assertIn('image/png', outputs[0]['data']) + self.assertGreater(len(outputs[0]['data']['image/png']), 6000) # check the external script was correctly inserted - self.assertEqual(nb_output['cells'][3]['cell_type'], 'markdown') - self.assertEqual(nb_output['cells'][3]['source'], + self.assertEqual(nb_output['cells'][2]['cell_type'], 'markdown') + self.assertEqual(nb_output['cells'][2]['source'], 'Solution from test_convert_script.py') - self.assertEqual(nb_output['cells'][4]['cell_type'], 'code') - self.assertEqual(nb_output['cells'][4]['source'], 'global_var = 20') + self.assertEqual(nb_output['cells'][3]['cell_type'], 'code') + self.assertEqual(nb_output['cells'][3]['source'], 'global_var = 20') - def test_exercise2_plugin(self): + def test_cells_prepare_for_html(self): root = pathlib.Path("@CMAKE_CURRENT_BINARY_DIR@") - f_input = root / "test_convert_exercise2.ipynb" - f_output = root / "test_convert_exercise2.run.ipynb" + f_input = root / "test_convert_cells_html.ipynb" + f_output = root / "test_convert_cells_html.run.ipynb" # setup f_output.unlink(missing_ok=True) with open(f_input, 'w', encoding='utf-8') as f: nb = nbformat.v4.new_notebook(metadata=self.nb_metadata) # question with 2 answers and an empty cell cell_md = nbformat.v4.new_markdown_cell(source='Question 1') - cell_md['metadata']['solution2_first'] = True - cell_md['metadata']['solution2'] = 'shown' nb['cells'].append(cell_md) - code = '```python\n1\n```' - cell_md = nbformat.v4.new_markdown_cell(source=code) - cell_md['metadata']['solution2'] = 'shown' - cell_md['metadata']['key'] = 'value' + code = '# SOLUTION CELL\n1' + cell_md = nbformat.v4.new_code_cell(source=code) nb['cells'].append(cell_md) cell_md = nbformat.v4.new_markdown_cell(source='1b') - cell_md['metadata']['solution2'] = 'shown' nb['cells'].append(cell_md) cell_code = nbformat.v4.new_code_cell(source='') nb['cells'].append(cell_code) # question with 1 answer and a non-empty cell cell_md = nbformat.v4.new_markdown_cell(source='Question 2') - cell_md['metadata']['solution2_first'] = True - cell_md['metadata']['solution2'] = 'hidden' nb['cells'].append(cell_md) - code = '```python\n2\nimport matplotlib.pyplot\nglobal_var = 5\n```' - cell_md = nbformat.v4.new_markdown_cell(source=code) - cell_md['metadata']['solution2'] = 'hidden' + code = '# SOLUTION CELL\n2\nimport matplotlib.pyplot\nglobal_var = 5' + cell_md = nbformat.v4.new_code_cell(source=code) nb['cells'].append(cell_md) cell_code = nbformat.v4.new_code_cell(source='3') nb['cells'].append(cell_code) @@ -193,7 +176,7 @@ def test_exercise2_plugin(self): '--input', str(f_input), '--output', str(f_output), '--substitutions', 'global_var=20', - '--exercise2', '--remove-empty-cells'] + '--prepare-for-html'] self.run_command(cmd, f_output) # read processed notebook with open(f_output, encoding='utf-8') as f: @@ -206,7 +189,6 @@ def test_exercise2_plugin(self): cell = next(cells) self.assertEqual(cell['cell_type'], 'code') self.assertEqual(cell['source'], '1') - self.assertEqual(cell['metadata']['key'], 'value') cell = next(cells) self.assertEqual(cell['cell_type'], 'markdown') self.assertEqual(cell['source'], '1b') @@ -215,34 +197,29 @@ def test_exercise2_plugin(self): self.assertEqual(cell['source'], 'Question 2') cell = next(cells) self.assertEqual(cell['cell_type'], 'code') - self.assertEqual(cell['source'], '2\nimport matplotlib.pyplot') - cell = next(cells) - self.assertEqual(cell['cell_type'], 'code') - self.assertEqual(cell['source'], 'global_var = 20') + self.assertEqual( + cell['source'], + '2\nimport matplotlib.pyplot\nglobal_var = 20') cell = next(cells) self.assertEqual(cell['cell_type'], 'code') self.assertEqual(cell['source'], '3') self.assertEqual(next(cells, 'EOF'), 'EOF') - def test_exercise2_conversion(self): + def test_cells_conversion(self): root = pathlib.Path("@CMAKE_CURRENT_BINARY_DIR@") - f_input = root / "test_convert_exercise2_conversion.ipynb" + f_input = root / "test_convert_cells_conversion.ipynb" # setup + nb = nbformat.v4.new_notebook(metadata=self.nb_metadata) + # question and code answer + cell_md = nbformat.v4.new_markdown_cell(source='Question 1') + nb['cells'].append(cell_md) + cell_md = nbformat.v4.new_code_cell(source='# SOLUTION CELL\n1') + nb['cells'].append(cell_md) + with open(f_input, 'w', encoding='utf-8') as f: - nb = nbformat.v4.new_notebook(metadata=self.nb_metadata) - # question and code answer - cell_md = nbformat.v4.new_markdown_cell(source='Question 1') - cell_md['metadata']['solution2_first'] = True - cell_md['metadata']['solution2'] = 'hidden' - nb['cells'].append(cell_md) - code = '```python\n1\n```' - cell_md = nbformat.v4.new_markdown_cell(source=code) - cell_md['metadata']['solution2'] = 'hidden' - cell_md['metadata']['key'] = 'value' - nb['cells'].append(cell_md) nbformat.write(nb, f) # run command and check for errors - cmd = ['exercise2', '--to-py', str(f_input)] + cmd = ['cells', '--to-py', str(f_input)] self.run_command(cmd, f_input) # read processed notebook with open(f_input, encoding='utf-8') as f: @@ -255,11 +232,12 @@ def test_exercise2_conversion(self): cell = next(cells) self.assertEqual(cell['cell_type'], 'code') self.assertEqual(cell['source'], '1') - self.assertEqual(cell['metadata']['solution2'], 'shown') - self.assertEqual(cell['metadata']['key'], 'value') self.assertEqual(next(cells, 'EOF'), 'EOF') + + with open(f_input, 'w', encoding='utf-8') as f: + nbformat.write(nb, f) # run command and check for errors - cmd = ['exercise2', '--to-md', str(f_input)] + cmd = ['cells', '--to-md', str(f_input)] self.run_command(cmd, f_input) # read processed notebook with open(f_input, encoding='utf-8') as f: @@ -271,30 +249,28 @@ def test_exercise2_conversion(self): self.assertEqual(cell['source'], 'Question 1') cell = next(cells) self.assertEqual(cell['cell_type'], 'markdown') - self.assertEqual(cell['source'], '```python\n1\n```') - self.assertEqual(cell['metadata']['solution2'], 'hidden') - self.assertEqual(cell['metadata']['key'], 'value') + self.assertIn('```python\n1\n```', cell['source']) + cell = next(cells) + self.assertEqual(cell['cell_type'], 'code') + self.assertEqual(cell['source'], '') self.assertEqual(next(cells, 'EOF'), 'EOF') @skipIfMissingModules - def test_exercise2_autopep8(self): + def test_cells_autopep8(self): root = pathlib.Path("@CMAKE_CURRENT_BINARY_DIR@") - f_input = root / "test_convert_exercise2_autopep8.ipynb" + f_input = root / "test_convert_cells_autopep8.ipynb" # setup with open(f_input, 'w', encoding='utf-8') as f: nb = nbformat.v4.new_notebook(metadata=self.nb_metadata) # question and code answer cell_md = nbformat.v4.new_markdown_cell(source='Question 1') - cell_md['metadata']['solution2_first'] = True - cell_md['metadata']['solution2'] = 'hidden' nb['cells'].append(cell_md) - code = '```python\n\nif 1: #comment\n print( [5+1,4])\n\n```' - cell_md = nbformat.v4.new_markdown_cell(source=code) - cell_md['metadata']['solution2'] = 'hidden' + code = '# SOLUTION CELL\n\nif 1: #comment\n print( [5+1,4])\n\n' + cell_md = nbformat.v4.new_code_cell(source=code) nb['cells'].append(cell_md) nbformat.write(nb, f) # run command and check for errors - cmd = ['exercise2', '--pep8', str(f_input)] + cmd = ['cells', '--pep8', str(f_input)] self.run_command(cmd) # read processed notebook with open(f_input, encoding='utf-8') as f: @@ -305,11 +281,10 @@ def test_exercise2_autopep8(self): self.assertEqual(cell['cell_type'], 'markdown') self.assertEqual(cell['source'], 'Question 1') cell = next(cells) - self.assertEqual(cell['cell_type'], 'markdown') + self.assertEqual(cell['cell_type'], 'code') self.assertEqual( cell['source'], - '```python\nif 1: # comment\n print([5 + 1, 4])\n```') - self.assertEqual(cell['metadata']['solution2'], 'hidden') + '# SOLUTION CELL\n\nif 1: # comment\n print([5 + 1, 4])') self.assertEqual(next(cells, 'EOF'), 'EOF')