diff --git a/doc/bibliography.bib b/doc/bibliography.bib index 8eeb22df13d..c9423a91904 100644 --- a/doc/bibliography.bib +++ b/doc/bibliography.bib @@ -981,6 +981,16 @@ @Article{reed92a publisher={AIP Publishing} } +@InProceedings{rocklin15a, + author = {Rocklin, Matthew}, + title = {{D}ask: Parallel Computation with Blocked algorithms and Task Scheduling}, + booktitle = {Proceedings of the 14\textsuperscript{th} {P}ython in {S}cience {C}onference}, + year = {2015}, + editor = {Huff, Kathryn and Bergstra, James}, + pages = {126--132}, + doi = {10.25080/Majora-7b98e3ed-013}, +} + @Book{rubinstein03a, title = {Polymer Physics}, publisher = {Oxford University Press}, diff --git a/doc/sphinx/samples.py b/doc/sphinx/samples.py index 7e65b47e47e..4c2a768a2d3 100644 --- a/doc/sphinx/samples.py +++ b/doc/sphinx/samples.py @@ -36,6 +36,7 @@ def get_docstring(filenames): # extract docstrings samples = [x for x in os.listdir(samples_dir) if x.endswith('.py')] samples += ['immersed_boundary/sampleImmersedBoundary.py', + 'high_throughput_with_dask/run_pv.py', 'object_in_fluid/motivation.py'] docstrings = get_docstring(samples) diff --git a/doc/tutorials/CMakeLists.txt b/doc/tutorials/CMakeLists.txt index 479f3491e53..fc0ee979ba6 100644 --- a/doc/tutorials/CMakeLists.txt +++ b/doc/tutorials/CMakeLists.txt @@ -115,6 +115,7 @@ add_subdirectory(ferrofluid) add_subdirectory(constant_pH) add_subdirectory(widom_insertion) add_subdirectory(electrodes) +add_subdirectory(grand_canonical_monte_carlo) configure_file(Readme.md ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) configure_file(convert.py ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/doc/tutorials/Readme.md b/doc/tutorials/Readme.md index 869ded3b331..d70dd75b54a 100644 --- a/doc/tutorials/Readme.md +++ b/doc/tutorials/Readme.md @@ -69,7 +69,9 @@ physical systems. * **Widom particle insertion method** Measuring the excess chemical potential of a salt solution using the Widom particle insertion method. [Guide](widom_insertion/widom_insertion.ipynb) - +* **Grand-Canonical Monte Carlo** + Simulating a polyelectrolyte solution coupled to a reservoir of salt. + [Guide](grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb) [comment]: # (End of tutorials landing page) diff --git a/doc/tutorials/constant_pH/constant_pH.ipynb b/doc/tutorials/constant_pH/constant_pH.ipynb index 95a88f258b9..965941fee82 100644 --- a/doc/tutorials/constant_pH/constant_pH.ipynb +++ b/doc/tutorials/constant_pH/constant_pH.ipynb @@ -1119,7 +1119,7 @@ "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" }, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1133,7 +1133,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/doc/tutorials/grand_canonical_monte_carlo/CMakeLists.txt b/doc/tutorials/grand_canonical_monte_carlo/CMakeLists.txt new file mode 100644 index 00000000000..a1981aabe09 --- /dev/null +++ b/doc/tutorials/grand_canonical_monte_carlo/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# Copyright (C) 2023 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +configure_tutorial_target( + TARGET tutorial_grand_canonical_monte_carlo DEPENDS + grand_canonical_monte_carlo.ipynb figures/schematic.svg) + +nb_export(TARGET tutorial_grand_canonical_monte_carlo SUFFIX "" FILE + "grand_canonical_monte_carlo.ipynb" HTML_RUN VAR_SUBST + "\"p3m_params={'mesh':10,'cao':6,'r_cut':8.22}\"") diff --git a/doc/tutorials/grand_canonical_monte_carlo/NotesForTutor.md b/doc/tutorials/grand_canonical_monte_carlo/NotesForTutor.md new file mode 100644 index 00000000000..618949fff21 --- /dev/null +++ b/doc/tutorials/grand_canonical_monte_carlo/NotesForTutor.md @@ -0,0 +1,14 @@ +# Notes for Tutors: Grand-Canonical Monte Carlo + +## Physics learning goals + +After the tutorial, students should be able to explain: + +* what the grand-canonical ensemble is and where it is applicable +* how to simulate in the grand-canonical ensemble using GCMC + +## ESPResSo learning goals + +In the course of this tutorial, students should learn to: + +* to set up a GCMC simulation in ESPResSo diff --git a/doc/tutorials/grand_canonical_monte_carlo/figures/schematic.svg b/doc/tutorials/grand_canonical_monte_carlo/figures/schematic.svg new file mode 100644 index 00000000000..1f605c7dab9 --- /dev/null +++ b/doc/tutorials/grand_canonical_monte_carlo/figures/schematic.svg @@ -0,0 +1,257 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/tutorials/grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb b/doc/tutorials/grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb new file mode 100644 index 00000000000..41552acb3a0 --- /dev/null +++ b/doc/tutorials/grand_canonical_monte_carlo/grand_canonical_monte_carlo.ipynb @@ -0,0 +1,822 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Simulations in the Grand-Canonical Ensemble\n", + "\n", + "## Table of Contents\n", + "* [Introduction](#Introduction)\n", + "* [Grand-Canonical Ensemble](#Grand-Canonical-Ensemble)\n", + "* [How to simulate the grand-canonical ensemble?](#How-to-simulate-a-grand-canonical-ensemble?)\n", + "* [References](#References)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Introduction\n", + "Often, in soft-matter physics and chemistry we are interested in systems where the particle numbers are not fixed. This is for instance the case in systems where chemical reactions occur or when particles can be exchanged with a reservoir. The case of changing particle numbers is best described by the grand canonical ensemble.\n", + "\n", + "Canonical Monte-Carlo or molecular dynamics simulations are not suitable for this task, since they conserve the particle numbers. However, the Metropolis Monte Carlo method can be generalized to the Grand-Canonical ensemble in a straightforward manner, yielding the so-called Grand-Canonical Monte Carlo method (GCMC), which is introduced in this tutorial.\n", + "\n", + "This tutorial builds on the tutorial [Widom insertion](https://espressomd.github.io/tutorials/widom_insertion/widom_insertion.html), in which you measured the chemical potential of a monovalent salt solution by using a combination of MD and MC techniques. In this part you will make use of the results (i. e. the chemical potentials) of the previous tutorial to simulate the partitioning of salt ions between a reservoir and a polymer solution. Therefore we introduce and use the grand-canonical Monte Carlo method." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Grand-Canonical Ensemble\n", + "The case of changing particle numbers is best described by the grand canonical ensemble [1].\n", + "In the grand canonical ensemble, the volume $V$, the temperature $T$ and the chemical potentials $\\mu_i$ of the exchangeable species $i$ rather than the particle numbers $N_i$ are fixed. The chemical potentials $\\mu_i$ correspond to the free energy (Helmholtz free energy $F$, will be defined later) cost of adding a particle of type $i$ while keeping everything else fixed:\n", + "\n", + "$$\n", + "\\mu_i = \\left(\\frac{\\partial F}{\\partial N_i}\\right)_{T, V, \\left\\{N_j\\right\\}_{j\\neq i}}.\n", + "$$\n", + "\n", + "The grand canonical ensemble is thus also referred to as the $(\\mu, V, T)$-ensemble.\n", + "In the grand canonical ensemble, the probability for the system to contain $N_i$ particles of type $i$, i.e. a total of $N=\\sum_{i=1}^{N_\\mathrm{s}}N_i$ ($N_\\mathrm{s}$ is the number of different types) and to be in a specific microstate $\\xi\\in\\Gamma_{N}$ is given by the probability distribution\n", + "\n", + "$$\n", + "p^\\mathrm{G}\\left(\\left\\{N_i\\right\\},\\xi\\right) = \\frac{\\exp\\left(-\\beta \\left(H(\\xi)-\\sum_{i=1}^{N_\\mathrm{s}}\\mu_iN_i\\right)\\right)}{Z^\\mathrm{G}h^{3N}\\prod_{i=1}^{N_\\mathrm{s}} N_i!}. \\quad \\tag{1}\n", + "$$\n", + "\n", + "Here, $H$ denotes the Hamiltonian of the system with $N_i$ particles of type $i$, $h$ is the planck constant, $\\beta=1/k_\\mathrm{B}T$ the inverse temperature and $Z^\\mathrm{G}$ is the grand-canonical partition function which is given by\n", + "\n", + "$$\n", + "Z^\\mathrm{G}\\left(\\left\\{\\mu_i\\right\\},V,T\\right) = \\sum_{N_1=0}^{\\infty}...\\sum_{N_\\mathrm{s}=0}^{\\infty}Z\\left(\\left\\{N_i\\right\\},V,T\\right)\\exp\\left(\\beta \\sum_{i=1}^{N_\\mathrm{s}}\\mu_iN_i\\right),\n", + "$$\n", + "\n", + "with the canonical partition function\n", + "\n", + "$$\n", + "\tZ\\left(\\left\\{N_i\\right\\},V,T\\right) = \\frac{1}{h^{3N} \\prod_{i=1}^{N_s}N_i!}\\int_{\\Gamma_N} \\mathrm{d}\\xi \\ \\exp(-\\beta H(\\xi)).\n", + "$$\n", + "\n", + "The partition function is connected to a thermodynamic potential, called the grand potential (Landau free energy):\n", + "\n", + "$$\n", + " \\Omega\\left(\\left\\{\\mu_i\\right\\},V,T\\right) = -k_\\mathrm{B}T\\ln\\left(Z^\\mathrm{G}\\left(\\left\\{\\mu_i\\right\\},V,T\\right)\\right).\n", + "$$\n", + "\n", + "In a similiar way, the Helmholtz free energy is defined as\n", + "\n", + "$$\n", + "F\\left(\\left\\{N_i\\right\\},V,T\\right) = -k_\\mathrm{B}T\\ln\\left(Z\\left(\\left\\{N_i\\right\\},V,T\\right)\\right).\n", + "$$\n", + "\n", + "\n", + "When considering a system that can exchange particles with a reservoir, the chemical potential $\\mu_i$ for each exchangeable species $i$ has to be equal in the reservoir and in the system:\n", + "\\begin{equation}\n", + "\\mu_i^{\\mathrm{sys}} = \\mu_i^{\\mathrm{res}}.\n", + "\\end{equation}\n", + "This condition is referred to as a chemical equilibrium between the two phases and is equivalent to a minimization of the free energy of the total system (system + reservoir).\n", + "In the case where one requires electroneutrality of the phases, due to the additional constraint, the electrochemical potential $\\overline{\\mu}_i=\\mu_i+z_ie\\psi$ rather than the chemical potential has to be equal. Here, $z_i$ is the valency of type $i$, $e$ is the elementary charge and $\\psi$ is the local electrostatic potential." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How to simulate a grand-canonical ensemble?\n", + "### Metropolis-Hastings Algorithm\n", + "The grand-canonical distribution (see eq. (1)) can be sampled using the Metropolis Monte Carlo method, similiar to the canonical ensemble. Before we derive the acceptance criterion for the GCMC method, let us shortly recall the generic Metropolis-Hastings algorithm.\n", + "\n", + "We want to calculate averages of observables $A$ which are distributed according to some distribution $p^\\mathrm{eq}_s$ (e.g. the canonical or grand-canonical distribution)\n", + "\n", + "$$\n", + "\\langle A\\rangle = \\frac{\\sum_{s\\in\\Gamma} A_s p^\\mathrm{eq}_s}{\\sum_{s\\in\\Gamma} p^\\mathrm{eq}_s}. \\quad \\tag{2}\n", + "\\label{eq:can_average_mc}\n", + "$$\n", + "\n", + "Here, $s$ labels the different states of the systems, $A_s$ is the value of the observable $A$ in the state $s$ and the sum runs over the set of all states $\\Gamma$.\n", + "The sum can also be an integral for systems with continuous degrees of freedom, for this derivation it does not matter.\n", + "For almost all systems, the ensemble average (2) can not be evaluated exactly and only numerical approximations can be obtained.\n", + "The basic idea of all MC methods is to randomly sample only a subset of states.\n", + "In the most naive approach (simple sampling), states are simply selected with a uniform probability from the space $\\Gamma$ of states $s$, resulting in a sample $\\gamma\\subset\\Gamma$.\n", + "The canonical average can then be approximated by a sum over $\\gamma$:\n", + "\n", + "$$\n", + "\\langle A\\rangle \\approx \\frac{\\sum_{s\\in\\gamma} A_s p^\\mathrm{eq}_s}{\\sum_{s\\in\\gamma} p^\\mathrm{eq}_s} \\quad \\tag{3}\n", + "$$\n", + "\n", + "This method is highly inefficient, as most states will typically not contribute significantly to the average.\n", + "To get a more efficient sampling method, let us select the states $s$ according to a non-uniform probability distribution $p_s$.\n", + "Then, the average can be written as \n", + "\n", + "$$\n", + "\\langle A\\rangle^{\\mathrm{can}} \\approx \\frac{\\sum_{s\\in\\gamma} A_s p^\\mathrm{eq}_s/p_s}{\\sum_{s\\in\\gamma} p^\\mathrm{eq}_s/p_s}, \\quad \\tag{4}\n", + "\\label{eq:can_average_mc_prob}\n", + "$$\n", + "\n", + "where we have to divide in the numerator and in the denominator by $p_s$ to compensate for the fact that the samples are distributed non-uniformly.\n", + "It is obvious that we recover (3) from (4) as the special case of a uniform distribution. \n", + "To sample the averages more efficiently, we simply pick\n", + "\n", + "$$\n", + "p_s \\propto p^\\mathrm{eq}_s \\quad \\tag{5}\n", + "\\label{eq:mc_prob}\n", + "$$\n", + "\n", + "such that the selected samples are representative of the desired equilibrium distribution.\n", + "In this case, the estimators of the averages are thus simply given by\n", + "\n", + "$$\n", + "\\langle A\\rangle \\approx \\frac{1}{N}\\sum_{s\\in\\gamma}A_s.\n", + "$$\n", + "The remaining problem is now to find a method which allows us to sample states according to (5).\n", + "This can be done in terms of so-called Markov chains. \n", + "A Markov chain is a stochastic process without memory.\n", + "In our case, the Markov chain is simply a sequence $\\{s_i\\}_{i=1,...N}$ of $N$ states $s_i$ of the system and the fact that the Markov chain has no memory means that the transition probability from a state $s_i$ to a state $s_j$ can be given in terms of a transition matrix $W_{i\\rightarrow j}$ which is constant.\n", + "The equilibrium probability distribution has to be invariant under $W$. Mathematically this condition can be expressed in the form\n", + "\n", + "$$\n", + "\\sum_{i}W_{i\\rightarrow j}p_i^\\mathrm{eq} = p_j^\\mathrm{eq}\n", + "$$\n", + "\n", + "A sufficient constraint on $W_{i\\rightarrow j}$ which ensures the validity of this condition\n", + "is the so-called criterion of detailed balance:\n", + "\n", + "$$\n", + "\\frac{W_{i\\rightarrow j}}{W_{j\\rightarrow i}} = \\frac{p_j^\\mathrm{eq}} {p_i^\\mathrm{eq}}.\n", + "$$\n", + "\n", + "Typically, the transition matrix elements are factorized into a proposal probability $g_{i\\rightarrow j}$ and acceptance probability $P_{i\\rightarrow j}^\\mathrm{acc}$:\n", + "\n", + "$$\n", + "W_{i\\rightarrow j} = g_{i\\rightarrow j} P_{i\\rightarrow j}^\\mathrm{acc}. \n", + "$$\n", + "\n", + "For the symmetric choice $g_{i\\rightarrow j}=g_{j\\rightarrow i}$, the condition of detailed balance reduces to \n", + "\n", + "$$\n", + "\\frac{P_{i\\rightarrow j}^\\mathrm{acc}}{P_{j\\rightarrow i}^\\mathrm{acc}} = \\frac{p_j^\\mathrm{eq}}{p_i^\\mathrm{eq}}. \n", + "\\quad \\tag{6}\n", + "$$\n", + "\n", + "By checking the cases $p_j^\\mathrm{eq}>p_i^\\mathrm{eq}$ and $p_j^\\mathrm{eq} \\leq p_i^\\mathrm{eq}$ one can see that the acceptance probability \n", + "\n", + "$$\n", + "P_{i\\rightarrow j}^\\mathrm{acc,\\text{ }Metropolis} = \\min\\left(1,\\frac{p_j^\\mathrm{eq}}{p_i^\\mathrm{eq}}\\right) \\quad \\tag{7}\n", + "$$\n", + "\n", + "fulfills equation (6). With this, we can now formulate the Metropolis-Hastings algorithm [2].\n", + "We start with an arbitrary initial state $s_1$ and generate a sequence $\\{s_i\\}_{i=1,...N}$ of $N$ states.\n", + "Each new state (n) is generated from the previous state (o) in the following way. \n", + "First, we propose a new state according to the symmetric proposal probability $g_{\\mathrm{o}\\rightarrow\\mathrm{n}}$.\n", + "Then, we accept the new configuration with a probability given by equation (7).\n", + "If the proposed new state is rejected, the old state is retained.\n", + "Applying this algorithm many times results in the desired Markov chain of representative configurations.\n", + "\n", + "### GCMC method\n", + "To simulate a system in a grand-canonical ensemble the described algorithm can be applied the grand-canonical distribution [1]. When considering the possible states of the system, it becomes clear that every state can be reached by a combination of two kinds of moves:\n", + "1. Displacement moves that leave the particle numbers fixed and only change positions. This is equivalent to sampling in the canonical ensemble with the current particle number. Due to the ergodic hypothesis this operation can be performed either using canonical Monte Carlo methods or canonical molecular dynamic simulations (e. g. Langevin dynamics). In this tutorial we choose the latter.\n", + "\n", + "2. Insertion and Deletion moves that add or delete a single particle while keeping all other particle positions fixed.\n", + "\n", + "In the GCMC method the insertion and deletion moves are implemented in the following fashion. First of all it is determined randomly with equal probability if an insertion or deletion move is performed. In the case of an insertion the additional particle is placed at a uniformly chosen random position in the simulation box. Then the proposed new state is accepted according to the criterion\n", + "\n", + "$$\n", + "\\begin{split}\n", + "P_{N_i\\rightarrow N_i+1}^\\mathrm{acc,\\text{ }GCMC} &= \\min\\left(1,\\frac{1}{N_i+1}\\left(\\frac{V}{\\Lambda_i^3}\\right)\\times \\exp\\left(-\\beta \\left(U^{N_i+1}(\\mathbf{q})-U^{N_i}(\\mathbf{q})\\right)+\\beta \\mu_i\\right)\\right).\n", + "\\end{split}\n", + "$$\n", + "\n", + "In the case of a deletion a randomly selected particle is removed from the system. \n", + "We have the analogous result\n", + "\n", + "$$\n", + "\\begin{split}\n", + "P_{N_i\\rightarrow N_i-1}^\\mathrm{acc,\\text{ }GCMC} &= \\min\\left(1,N_i\\left(\\frac{\\Lambda_i^3}{V}\\right)\\times \\exp\\left(-\\beta \\left(U^{N_i-1}(\\mathbf{q})-U^{N_i}(\\mathbf{q})\\right)-\\beta \\mu_i\\right)\\right).\n", + "\\end{split}\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "source": [ + "**Exercise**\n", + "\n", + "* Derive the acceptance criteria for the GCMC method by combining the grand-canonical distribution with the Metropolis-Hastings acceptance criteria.\n", + "\n", + "**Hint**\n", + "\n", + "* Use the grand-canonical distribution with all the momenta integrated out:\n", + "$$\n", + "p^\\mathrm{eq}_{N} \\propto \\exp\\left(-\\beta U(\\mathbf{q}) \\right ) \\prod_i \\frac{1}{N_i!}\\left(\\frac{V}{\\Lambda_i^3}\\right)^{N_i}\\times \\exp \\left (\\beta \\mu_i N_i\\right)\n", + "$$\n", + "This alternative expression is valid if we are only interested in observables which do not depend on the momenta." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden" + }, + "source": [ + "For the case ($N_i\\rightarrow N_i + 1$) one gets\n", + "\n", + "\\begin{align}\n", + "P_{N_i\\rightarrow N+1}^\\mathrm{acc,\\text{ }GCMC} &= \\min\\left(1,\\frac{p^\\mathrm{eq}_{N_i+1}}{p^\\mathrm{eq}_{N_i}}\\right) = \\min\\left(1,\\frac{\\frac{1}{(N_i+1)!}\\left(\\frac{V}{\\Lambda_i^3}\\right)^{N_i+1}\\times \\exp\\left(-\\beta U^{N_i+1}(\\mathbf{q})+\\beta \\mu (N_i+1)\\right)}{\\frac{1}{N_i!}\\left(\\frac{V}{\\Lambda_i^3}\\right)^{N_i}\\times \\exp\\left(-\\beta U^{N_i}(\\mathbf{q})+\\beta \\mu N_i\\right)}\\right)\\\\\n", + "&= \\min\\left(1,\\frac{1}{N_i+1}\\left(\\frac{V}{\\Lambda_i^3}\\right)\\times \\exp\\left(-\\beta \\left(U^{N_i+1}(\\mathbf{q})-U^{N_i}(\\mathbf{q})\\right)+\\beta \\mu_i\\right)\\right).\n", + "\\end{align}\n", + "\n", + "For the other case ($N_i\\rightarrow N_i-1$) we have the analogous result\n", + "\n", + "\\begin{align}\n", + "P_{N_i\\rightarrow N_i-1}^\\mathrm{acc,\\text{ }GCMC} &= \\min\\left(1,\\frac{p^\\mathrm{eq}_{N_i-1}}{p^\\mathrm{eq}_{N_i}}\\right) = \\min\\left(1,\\frac{\\frac{1}{(N_i-1)!}\\left(\\frac{V}{\\Lambda_i^3}\\right)^{N_i-1}\\times \\exp\\left(-\\beta U^{N_i-1}(\\mathbf{q})+\\beta \\mu_i (N_i-1)\\right)}{\\frac{1}{N_i!}\\left(\\frac{V}{\\Lambda_i^3}\\right)^{N_i}\\times \\exp\\left(-\\beta U^{N_i}(\\mathbf{q})+\\beta \\mu_i N_i\\right)}\\right)\\\\\n", + "&= \\min\\left(1,N_i\\left(\\frac{\\Lambda_i^3}{V}\\right)\\times \\exp\\left(-\\beta \\left(U^{N_i-1}(\\mathbf{q})-U^{N_i}(\\mathbf{q})\\right)-\\beta \\mu_i\\right)\\right).\n", + "\\end{align}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The Simulated System: Polyelectrolyte solution coupled to a reservoir\n", + "\n", + "
\n", + "
\n", + "
Fig. 1: Schematic representation of the considered two phase setup.
\n", + "
\n", + "
\n", + "\n", + "Here we want to investigate a polyelectrolyte solution that is coupled to a reservoir containing a monovalent salt, as shown schematically in figure 1. Similar systems are useful to investigate for example the swelling behavior of hydrogels. The surrounding of the gel can be considered as the reservoir and the inner of the gel is the system. The small salt ions (X$^+$ and X$^-$) can be exchanged between the two phases while the polyelectrolyte is confined to the system. Experimentally such a setup could be realized by seperating the two phases with a semi-permeable membrane. Similiar setups also occur in the study of polyelectrolyte hydrogels.\n", + "\n", + "Due to the macroscopic electroneutrality of both phases the salt ions obey an electrochemical equilibrium, i. e. there is a difference in the electrostatic potential between the two phases\n", + "\n", + "$$\n", + "\\mu_i^{\\mathrm{sys}}+z_ie\\psi^{\\mathrm{Donnan}} = \\mu_i^{\\mathrm{res}}.\n", + "$$\n", + "\n", + "This effect is called the Donnan effect [3] and leads to an unequal partioning of salt between the reservoir and the system. The partitioning can be quantified using the partition coefficients $\\xi_i \\equiv c^{\\mathrm{sys}}_i/c^{\\mathrm{res}}_i$, where $c^{\\mathrm{sys}}_i$ and $c^{\\mathrm{res}}_i$ are the particle concentrations of type $i$ in the system and reservoir, respectively. Defining the \"universal\" partition coefficient,\n", + "\n", + "\\begin{align*}\n", + " \\xi\\equiv\\left\\{\n", + " \\begin{array}{ll}\n", + " \\displaystyle\\xi_+ - \\frac{c_{\\text{M}^-}^{\\text{sys}}}{c_{\\text{X}^+,\\text{X}^-}^{\\text{res}}}& \\text{for positive ions}\\\\\n", + " \\displaystyle\\xi_- & \\text{for negative ions,}\\\\\n", + " \\end{array}\\right.\n", + "\\end{align*}\n", + "\n", + "one can obtain the following solution for an ideal system without any interaction between the particles:\n", + "\n", + "$$\n", + "\\xi^{\\text{Donnan}} = -\\frac{c_{\\text{M}^-}^{\\text{sys}}}{2c_{\\text{X}^+,\\text{X}^-}^{\\text{res}}} + \\sqrt{\\left(\\frac{c_{\\text{M}^-}^{\\text{sys}}}{2c_{\\text{X}^+,\\text{X}^-}^{\\text{res}}}\\right)^2+1}.\n", + "$$\n", + "\n", + "This solution shows that salt is rejected by the polyelctrolyte solution. In the following we want to investigate this partitioning for a system with electrostatic interactions.\n", + " \n", + "Because of the electroneutrality condition, the two definitions of the \"universal\" partition coefficients for positive and negative ions are equal. The additional term in the definition for positive ions is a correction due to the counterions in the system, which neutralize the charged polyelectrolytes." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the folllowing, we plot the universal partition coefficient over the ratio of the concentrations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "np.random.seed(42)\n", + "plt.rcParams.update({\"font.size\": 18})\n", + "\n", + "ratios = np.logspace(-1, 2, 1000)\n", + "plt.figure(figsize=(10, 6))\n", + "plt.loglog(ratios, -ratios + np.sqrt((ratios)**2 + 1), label=r\"$\\xi^{\\mathrm{Donnan}}$\")\n", + "plt.xlabel(r\"$\\dfrac{c_{\\mathrm{M}^-}^{\\mathrm{sys}}}{2c_{\\mathrm{X}^+,\\mathrm{X}^-}^{\\mathrm{res}}}$\")\n", + "plt.ylabel(r\"$\\xi$\")\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simulation Setup\n", + "### Load data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let us first add required modules." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tqdm # module for progress bar\n", + "import scipy.interpolate\n", + "import pint # module for unit conversions\n", + "\n", + "import espressomd\n", + "import espressomd.interactions\n", + "import espressomd.electrostatics\n", + "import espressomd.reaction_methods\n", + "import espressomd.polymer\n", + "\n", + "espressomd.assert_features([\"ELECTROSTATICS\", \"P3M\", \"WCA\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the next step we define the [Lennard-Jones](https://espressomd.github.io/tutorials4.2.0/lennard_jones/lennard_jones.html) units. We use the module *pint* in order to make unit conversions between reduced simulation units and SI units easier." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "##### Units\n", + "ureg = pint.UnitRegistry()\n", + "sigma = 0.355 * ureg.nm # Sigma in SI units.\n", + "# This is corresponds to the half of the Bjerrum length in water (eps_r approx 80) and room temperature (293 K)\n", + "T = 298.15 * ureg.K # 25 degree celcius corresponds to approx room temperature\n", + "CREF_IN_MOL_PER_L = 1.0 * ureg.molar #in mol/l\n", + "\n", + "# Define the reduced units\n", + "ureg.define(f\"reduced_length = {sigma}\")\n", + "ureg.define(f\"reduced_charge = 1* e\")\n", + "ureg.define(f\"reduced_energy = {T} * boltzmann_constant\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We load the resulting excess chemical potentials for different salt concentrations of the reservoir from the [Widom insertion tutorial](https://espressomd.github.io/tutorials/widom_insertion/widom_insertion.html). The loaded excess chemical potentials are in units of $k_\\mathrm{B}T$ (i.e. in reduced units)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "salt_concentration_magnitudes_si = [0.001, 0.003, 0.01, 0.03, 0.1]\n", + "salt_concentration_si = np.array(salt_concentration_magnitudes_si) * ureg.molar\n", + "salt_concentration_sim = salt_concentration_si.to(\"molecules/reduced_length^3\")\n", + "excess_chemical_potential_data = [\n", + " -0.0672291585811938, -0.123732052028611, -0.218687259792629,\n", + " -0.326207586236404, -0.510091568808112\n", + "]\n", + "excess_chemical_potential_data_error = [\n", + " 0.000994798843587, 0.00152160176511698, 0.00220162667953136,\n", + " 0.0029299206854553, 0.00422309102221265\n", + "]\n", + "excess_chemical_potential_monovalent_pairs_in_bulk = scipy.interpolate.interp1d(\n", + " salt_concentration_sim.magnitude, excess_chemical_potential_data)\n", + "fig = plt.figure(figsize=(10, 6))\n", + "plt.errorbar(salt_concentration_si.magnitude,\n", + " excess_chemical_potential_data,\n", + " yerr=excess_chemical_potential_data_error,\n", + " linestyle=\"-\",\n", + " marker=\"o\",\n", + " markersize=5.,\n", + " capsize=5.)\n", + "plt.xlabel(r\"salt concentration $c_{\\mathrm{salt}}$ (mol/l)\")\n", + "plt.ylabel(r\"excess chemical potential $\\mu_{\\mathrm{ex}}$ ($k_{\\mathrm{B}}T$)\")\n", + "plt.xscale(\"log\")\n", + "plt.xlim((5e-4, 0.5))\n", + "plt.ylim((-0.8, 0));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initializing the system\n", + "Now we are ready to set up the system. In the first step we define the system parameters such as charges and system size as well as simulation paramters such as the time step. For small salt concentrations we need to produce more samples to get good statistics for $\\xi$ due to the finite system size. For a real production run it is recommended to use larger sample sizes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "##### Particle types and charges\n", + "types = {\n", + " \"Xplus\": 1, # Positive ions\n", + " \"Xminus\": 2, # Negative ions\n", + " \"Monomer\": 3, # Negative monomers\n", + "}\n", + "\n", + "charges = {\n", + " \"Xplus\": 1.0,\n", + " \"Xminus\": -1.0,\n", + " \"Monomer\": -1.0,\n", + "}\n", + "# Initialize concentrations\n", + "c_salt_res_sim = salt_concentration_sim[0]\n", + "c_monomer = 0.1 * ureg.molar\n", + "C_MONOMERS_SIM = c_monomer.to(\"molecules/reduced_length^3\")\n", + "\n", + "##### Number of monomers\n", + "N_CHAINS = 10\n", + "N_MONOMERS_PER_CHAIN = 5\n", + "\n", + "##### System size\n", + "BOX_LENGTH = np.power(\n", + " N_CHAINS * N_MONOMERS_PER_CHAIN / C_MONOMERS_SIM.magnitude, 1.0 / 3.0)\n", + "\n", + "##### Initial number of salt ion pairs\n", + "n_salt = int(c_salt_res_sim.magnitude * BOX_LENGTH**3)\n", + "\n", + "##### Weeks-Chandler-Andersen interaction\n", + "LJ_EPSILON = 1.0 # in reduced units, i.e. 1 sigma\n", + "LJ_SIGMA = 1.0 # in reduced units, i.e. 1 k_B T\n", + "\n", + "##### Electrostatic interaction\n", + "L_BJERRUM = 2.0 # in reduced units, i.e. 2 sigma\n", + "\n", + "##### Langevin-Thermostat\n", + "KT = 1.0 # in reduced units\n", + "GAMMA = 1.0 # in reduced units\n", + "\n", + "##### Integrator parameters\n", + "DT = 0.01 # Integrator time step\n", + "SKIN = 0.4\n", + "\n", + "warmup_loops = 50\n", + "number_of_loops_for_each_concentration = [\n", + " 200, 200, 150, 150, 100\n", + "] # set these higher for better accuracy\n", + "steps_per_loop = 200\n", + "samples_per_loop = 25\n", + "\n", + "##### Seeding\n", + "langevin_seed = 42\n", + "reaction_seed = 42" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we define our box and add our particles. We create the polymer chains as in the corresponding [tutorial](https://espressomd.github.io/tutorials/polymers/polymers.html). In addition, we enable the electrostatic interactions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "##### Create an instance of the system class\n", + "system = espressomd.System(box_l=[BOX_LENGTH] * 3)\n", + "system.time_step = DT\n", + "system.cell_system.skin = SKIN\n", + "print(\"Created system class\")\n", + "\n", + "#### Add the FENE interaction\n", + "fene = espressomd.interactions.FeneBond(k=30, d_r_max=1.5)\n", + "system.bonded_inter.add(fene)\n", + "\n", + "#### Add the polymer chains\n", + "polymer_positions = espressomd.polymer.linear_polymer_positions(\n", + " n_polymers=N_CHAINS,\n", + " beads_per_chain=N_MONOMERS_PER_CHAIN,\n", + " bond_length=0.9,\n", + " seed=42)\n", + "\n", + "for positions in polymer_positions:\n", + " monomers = system.part.add(pos=positions,\n", + " type=[types[\"Monomer\"]] * N_MONOMERS_PER_CHAIN,\n", + " q=[charges[\"Monomer\"]] * N_MONOMERS_PER_CHAIN)\n", + " previous_part = None\n", + " for part in monomers:\n", + " if not previous_part is None:\n", + " part.add_bond((fene, previous_part))\n", + " previous_part = part\n", + "\n", + "##### Add the particles\n", + "N_plus = n_salt + N_CHAINS * N_MONOMERS_PER_CHAIN\n", + "system.part.add(type=[types[\"Xplus\"]] * N_plus,\n", + " pos=np.random.rand(N_plus, 3) * BOX_LENGTH,\n", + " q=[charges[\"Xplus\"]] * N_plus)\n", + "system.part.add(type=[types[\"Xminus\"]] * n_salt,\n", + " pos=np.random.rand(n_salt, 3) * BOX_LENGTH,\n", + " q=[charges[\"Xminus\"]] * n_salt)\n", + "print(\"Added particles\")\n", + "\n", + "##### Add non-bonded interactions\n", + "for i in types:\n", + " for j in types:\n", + " system.non_bonded_inter[types[i],\n", + " types[j]].wca.set_params(epsilon=LJ_EPSILON,\n", + " sigma=LJ_SIGMA)\n", + "\n", + "print(\"Added WCA interaction\")\n", + "\n", + "##### Minimize forces\n", + "system.integrator.set_steepest_descent(f_max=N_plus, gamma=10.0,\n", + " max_displacement=LJ_SIGMA / 100.)\n", + "n_steps = system.integrator.run(steps_per_loop)\n", + "assert n_steps < steps_per_loop\n", + "\n", + "print(\"Removed overlaps with steepest descent...\")\n", + "\n", + "##### Add thermostat and long-range solvers\n", + "system.thermostat.set_langevin(kT=KT, gamma=GAMMA, seed=langevin_seed)\n", + "system.integrator.set_vv()\n", + "\n", + "p3m_params = {\"mesh\": 10}\n", + "p3m = espressomd.electrostatics.P3M(prefactor=L_BJERRUM * KT, accuracy=1e-3, **p3m_params)\n", + "system.electrostatics.solver = p3m\n", + "system.integrator.run(2 * steps_per_loop)\n", + "\n", + "print(\"Added electrostatics\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "source": [ + "Now we want to add a coupling to the reservoir. We can formally represent the insertion and deletion of ion pairs as a chemical reaction:\n", + "\n", + "$$\n", + "\\emptyset \\rightleftharpoons \\mathrm{X}^- + \\mathrm{X}^+\n", + "$$\n", + "\n", + "It is important to note that the reaction can run in both directions, i. e. insertion and deletion of salt ions. These reactions are described by a concentration based equilibrium constant:\n", + "\n", + "$$\n", + "K = c_{\\mathrm{salt,res}}^2 \\exp \\left ( \\mu_{\\mathrm{salt,res}}^{\\mathrm{ex}} \\right )\n", + "$$\n", + "\n", + "Here $\\mu_{\\mathrm{salt,res}}^{\\mathrm{ex}}$ denotes the excess chemical potential of a salt ion pair as we measured in the [Widom insertion tutorial](https://espressomd.github.io/tutorials/widom_insertion/widom_insertion.html).\n", + "\n", + "**Exercise**\n", + "\n", + "* Set up the reaction defined above with the corresponding equilibrium constant.\n", + "\n", + "**Hint**\n", + "\n", + "* You can find further explanation for setting up a reaction [here](https://espressomd.github.io/doc/reaction_methods.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden" + }, + "source": [ + "```python\n", + "K_XX = c_salt_res_sim.magnitude**2 * np.exp(\n", + " excess_chemical_potential_monovalent_pairs_in_bulk(\n", + " c_salt_res_sim.magnitude))\n", + "RE = espressomd.reaction_methods.ReactionEnsemble(kT=KT,\n", + " exclusion_range=1.0,\n", + " seed=reaction_seed)\n", + "RE.add_reaction(gamma=K_XX,\n", + " reactant_types=[],\n", + " reactant_coefficients=[],\n", + " product_types=[types[\"Xplus\"], types[\"Xminus\"]],\n", + " product_coefficients=[1, 1],\n", + " default_charges={\n", + " types[\"Xplus\"]: charges[\"Xplus\"],\n", + " types[\"Xminus\"]: charges[\"Xminus\"]\n", + " })\n", + "# Set the non interacting type at the smallest integer.\n", + "# This may speed up the simulation (see Espresso docummentation section 19. Reaction methods)\n", + "RE.set_non_interacting_type(type=len(types) + 1)\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "source": [ + "Now we are ready to perform the actual sampling.\n", + "\n", + "**Exercise**\n", + "* In order to sample for different reservoir salt concentrations, set up a function ``perform_sampling(salt_concentration)`` that takes as input the salt concentration of the reservoir.\n", + "* In this function perform a warm up loop in order to equilibrate the system to the new salt concentration before the production run.\n", + "* Measure the particle numbers of positive and negative salt ions in each iteration in order to return the partition coefficients $\\xi_+$ and $\\xi_-$ at the end.\n", + "\n", + "**Hint**\n", + "* Make sure to reset the equilibrium constant according to the salt concentration." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden" + }, + "source": [ + "```python\n", + "def perform_sampling(salt_concentration, number_of_loops):\n", + " K_XX = salt_concentration**2 * np.exp(\n", + " excess_chemical_potential_monovalent_pairs_in_bulk(salt_concentration))\n", + " RE.change_reaction_constant(gamma=K_XX, reaction_id=0)\n", + " for i in tqdm.trange(warmup_loops, bar_format=\"{l_bar}{bar:60}{r_bar}{bar:-60b}\"):\n", + " system.integrator.run(steps_per_loop)\n", + " RE.reaction(samples_per_loop)\n", + "\n", + " particle_numbers_positive = []\n", + " particle_numbers_negative = []\n", + "\n", + " for i in tqdm.trange(number_of_loops, bar_format=\"{l_bar}{bar:60}{r_bar}{bar:-60b}\"):\n", + " system.integrator.run(steps_per_loop)\n", + " RE.reaction(samples_per_loop)\n", + "\n", + " particle_numbers_positive.append(\n", + " system.number_of_particles(type=types[\"Xplus\"]))\n", + " particle_numbers_negative.append(\n", + " system.number_of_particles(type=types[\"Xminus\"]))\n", + " partition_coefficient_positive = np.mean(np.asarray(particle_numbers_positive))\\\n", + " / (BOX_LENGTH**3 * salt_concentration)\n", + " partition_coefficient_negative = np.mean(np.asarray(particle_numbers_negative))\\\n", + " / (BOX_LENGTH**3 * salt_concentration)\n", + " return partition_coefficient_positive, partition_coefficient_negative\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can perform the actual simulations for the different salt concentrations and measure the partition coefficients." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "partition_coefficients_positives_array = np.zeros_like(salt_concentration_sim)\n", + "partition_coefficients_negatives_array = np.zeros_like(salt_concentration_sim)\n", + "\n", + "for i, salt_concentration in enumerate(salt_concentration_sim.magnitude):\n", + " print(f\"Salt concentration {i+1}/{len(salt_concentration_sim)}\")\n", + " partition_coefficients_positives_array[\n", + " i], partition_coefficients_negatives_array[i] = perform_sampling(\n", + " salt_concentration, number_of_loops_for_each_concentration[i])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To compare the results of our simulations we define a function for the analytical solution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def analytical_solution(ratio):\n", + " return -ratio + np.sqrt(ratio**2 + 1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we can measure the partition coefficients derived from the simulations and compare them to the analytical results for an ideal system." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "universal_partion_coefficient_positive = \\\n", + " partition_coefficients_positives_array - c_monomer.magnitude / salt_concentration_si.magnitude\n", + "fig = plt.figure(figsize=(10, 6))\n", + "ratios = np.logspace(-1, 2, num=200)\n", + "plt.loglog(ratios, analytical_solution(ratios), color=\"black\", label=r\"$\\xi^{\\mathrm{Donnan}}$\")\n", + "plt.loglog(c_monomer.magnitude / (2 * salt_concentration_si.magnitude), partition_coefficients_negatives_array,\n", + " marker=\"o\", markersize=10, markeredgewidth=1.5, markeredgecolor=\"k\", markerfacecolor=\"none\",\n", + " linestyle=\"none\", label=r\"$\\xi_-$\")\n", + "plt.loglog(c_monomer.magnitude / (2 * salt_concentration_si.magnitude), universal_partion_coefficient_positive,\n", + " marker=\"o\", markersize=4.5, markeredgewidth=0., markeredgecolor=\"k\", markerfacecolor=\"k\",\n", + " linestyle=\"none\", label=r\"$\\xi_+ - c_{\\mathrm{M}^-}^{\\mathrm{sys}}/c_{\\mathrm{salt}}^{\\mathrm{res}}$\")\n", + "plt.xlabel(r\"$\\dfrac{c_{\\mathrm{M}^-}^{\\mathrm{sys}}}{2c_{\\mathrm{salt}}^{\\mathrm{res}}}$\")\n", + "plt.ylabel(r\"$\\xi$\")\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "source": [ + "**Exercise**\n", + "* Interpret the deviation of the simulation results from the ideal prediction." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "solution2": "hidden" + }, + "source": [ + "First, we note that the data points for negative and positive ions\n", + "always collapse onto the same value for ξ, i.e. ξ is indeed a universal value that\n", + "is always the same for negative and positive ions. When comparing these results\n", + "to the analytical prediction for an ideal gas, it is easy to see that the partition\n", + "coefficient is higher for the case with electrostatic interactions. This is essentially\n", + "due to the fact that the overall charge density inside the system is higher than in\n", + "the reservoir, and thus the activity coefficient of an ion pair inside the system is\n", + "smaller than in the reservoir, i.e. the free energy cost of putting additional ion\n", + "pairs into the system is overall lowered as compared to an ideal gas. Still, we see\n", + "that ξ < 1 even for the interacting case, i.e. the salt concentration is still smaller\n", + "inside the system than in the reservoir." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# References" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[1] Daan Frenkel, Berend Smit. Understanding Molecular Simulation: From Algorithms to Applications. 2nd edition, chapter 5: Monte Carlo Simulations in Various Ensembles, section 5.6: Grand-Canonical Ensemble, pp. 126–135. Academic Press, 2002, ISBN: 978-0-12-267351-1, doi:[10.1016/B978-012267351-1/50007-9](https://doi.org/10.1016/B978-012267351-1/50007-9). \n", + "[2] N. Metropolis, A. Rosenbluth, M. Rosenbluth, A. Teller. Equation of state calculations by fast computing machines. *J. Chem. Phys.* 21, pp. 1087–1092 (1953), doi:[10.1063/1.1699114](https://doi.org/10.1063/1.1699114). \n", + "[3] F. Donnan. The theory of membrane equilibria. *Chemical Reviews* 1(1), pp. 73–90 (1924), doi:[10.1021/cr60001a003](https://doi.org/10.1021/cr60001a003). " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/samples/high_throughput_with_dask/Readme.md b/samples/high_throughput_with_dask/Readme.md new file mode 100644 index 00000000000..682dbc46d4f --- /dev/null +++ b/samples/high_throughput_with_dask/Readme.md @@ -0,0 +1,79 @@ +# Introduction + +This sample illustrates how to run a large amount of short ESPResSo simulations +with Dask. Dask is a parallel computing library in Python that enables efficient +handling of large datasets and computation tasks. +Note that this sample is not meant to produce meaningful physics results. +The sample consists of the following parts: + +- `espresso_dask.py`: contains helper functions that handle running ESPResSo + within Dask and communicating data between Dask and ESPResSo +- `lj_pressure.py`: simulation script which obtains the average pressure + for a Lennard-Jones liquid at a given volume fraction +- `run_pv.py`: Uses Dask to run the simulation script at various volume + fractions and obtain a pressure vs volume fraction curve. +- `test_dask_espresso.py`: corresponding unit tests, to be run with `pytest` +- `echo.py`: Used to mock an ESPResSo simulation for the unit tests + +## How to Use + +Note: It is not possible to use ESPResSo with `dask.distributed.LocalCluster`. +Instead, follow the procedure described below: + +1. Move to the sample directory + ```bash + cd samples/high_throughput_with_dask + ``` +1. Open `run_pv.py` in an editor and adapt the `PYPRESSO` variable + to the correct path to `pypresso` +1. Set the `PYTHONPATH` environment variable such that it includes + the directory in which `dask_espresso.py` resides: + ```bash + export PYTHONPATH="${PYTHONPATH:+$PYTHONPATH:}$(realpath .)" + ``` +1. Start the Dask scheduler + ```bash + dask scheduler & + ``` +1. Note the address of the scheduler (e.g., `tcp://127.0.0.1:8786`) +1. Launch a few workers using the correct scheduler address: + ```bash + for i in {1..5}; do dask worker SCHEDULER_ADDRESS & done + ``` +1. Run `python3 run_pv.py SCHEDULER_ADDRESS`, again inserting the scheduler address from above +1. Use `fg` and Ctrl-C to shut down the Dask workers and scheduler, + or use `pkill "dask"` if you don't have any other Dask scheduler + running in the background. + +Note that Dask can also be used on compute clusters with HTCondor and Slurm. + +## Technical Notes + +- Since currently only one ESPResSo instance can be used in a Python script, + ESPResSo is run as a separate process. This is accomplished by the + `dask_espresso_task` function in `dask_espresso.py`. +- Also, the data transfer between Dask and ESPResSo has to be handled such that + it is safe for inter-process communication. This is achieved via the `pickle` + and `base64` Python modules. Encoding and decoding functions can be found in + `dask_espresso.py` +- The communication happens via the standard input and output of the simulation + script. Therefore, it is essential not to use simple `print()` calls in the + simulation script. Instead, use the `logging` module for status messages. + These will go to the standard error stream. +- To use this sample for your own simulations: + - Use `dask_espresso.py` as is. + - Adapt `run_pv.py` to run simulations with the parameters you need. + The keyword arguments passed to `dask_espresso_task()` will be passed + as a dictionary to the simulation. + - Use `data = dask_espresso.get_data_from_stdin()` to get the parameters + at the beginning of the simulation script. + - Use `print(dask_espresso.encode_transport_data(result))` at the end + of your simulation to pass the result to Dask. + - The simulation parameters and results can be any Python object that + can be safely pickled and do not require additional context. Basic data + types (int, float, string, list, dict) as well as numpy arrays work, + whereas objects that require additional context to be valid do not + (e.g. file objects and ESPResSo particles). + - To test your simulation script, including the transfer of parameters + and results outside Dask, you can also use + the `dask_espresso.dask_espresso_task.py` function. diff --git a/samples/high_throughput_with_dask/dask_espresso.py b/samples/high_throughput_with_dask/dask_espresso.py new file mode 100644 index 00000000000..e6dd8cc4906 --- /dev/null +++ b/samples/high_throughput_with_dask/dask_espresso.py @@ -0,0 +1,77 @@ +# +# Copyright (C) 2023 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Helper functions to use ESPResSo with Dask.""" + +import pickle +import base64 +import sys +import subprocess +import logging +import dask + + +def encode_transport_data(data): + """ + Use ``pickle`` and ``base64`` to convert the provided data to a string + which can be passed safely between the Dask scheduler, worker and ESPResSo. + """ + return base64.b64encode(pickle.dumps(data)).decode("utf-8") + + +def decode_transport_data(encoded_data): + """ + Convert the transport data back to a Python object via ``base64`` + and ``pickle``. + """ + pickle_data = base64.b64decode(encoded_data) + return pickle.loads(pickle_data) + + +def get_data_from_stdin(): + return decode_transport_data(sys.stdin.read()) + + +@dask.delayed +def dask_espresso_task(pypresso, script, **kwargs): + """ + Run ESPResSo asynchronously as a Dask task. + + pypresso: :obj:`str` + Path to pypresso + script: :obj:`str` + Simulation script to run with pypresso + kwargs: + The keyword arguments are passed encoded and sent to + the standard input of the simulation script. + Use ``data = get_data_from_stdin()`` to obtain it. + """ + + logger = logging.getLogger(__name__) + encoded_data = encode_transport_data(kwargs) + espresso = subprocess.Popen([pypresso, script], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) + espresso.stdin.write(encoded_data) + out, err = espresso.communicate() + if err != "": + logger.warning("STDERR output from ESPResSo\n", err) + return decode_transport_data(out) diff --git a/samples/high_throughput_with_dask/echo.py b/samples/high_throughput_with_dask/echo.py new file mode 100644 index 00000000000..d6603841e6a --- /dev/null +++ b/samples/high_throughput_with_dask/echo.py @@ -0,0 +1,29 @@ +# +# Copyright (C) 2023 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +""" +This is part of the unit tests. It reads encoded simulation data from stdin, +decodes it, adds ``processed=True`` and outputs the encoded result to stdout. +""" + +import dask_espresso as de +data = de.get_data_from_stdin() +data.update(processed=True) + +print(de.encode_transport_data(data)) diff --git a/samples/high_throughput_with_dask/lj_pressure.py b/samples/high_throughput_with_dask/lj_pressure.py new file mode 100644 index 00000000000..ca382982e27 --- /dev/null +++ b/samples/high_throughput_with_dask/lj_pressure.py @@ -0,0 +1,120 @@ +# +# Copyright (C) 2013-2023 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +""" +Obtain the average pressure of a Lennard-Jones liquid. +For use with ``dask_espresso``. +Adapted from :file:`samples/lj_liquid.py`. +""" + +import espressomd +import numpy as np + +import dask_espresso as de + + +# Note: Avoid print() in this script, as the standard output is used +# for data transfer to Dask. Use the logging module for status messages. +import logging +logger = logging.getLogger(__name__) + + +# Get parameters from Dask via the standard input stream +params = de.get_data_from_stdin() + +logger.info("Parameters:", params) +n_particles = int(params["n_particles"]) +volume_fraction = float(params["volume_fraction"]) +n_steps = int(params["n_steps"]) + +required_features = ["LENNARD_JONES"] +espressomd.assert_features(required_features) + +rng = np.random.default_rng() + +# System +############################################################# + +# Interaction parameters (Lennard-Jones) +############################################################# + +lj_eps = 1.0 # LJ epsilon +lj_sig = 1.0 # particle diameter +lj_cut = lj_sig * 2**(1. / 6.) # cutoff distance + +# System parameters +############################################################# +# volume of N spheres with radius r: N * (4/3*pi*r^3) +box_l = (n_particles * 4. / 3. * np.pi * (lj_sig / 2.)**3 + / volume_fraction)**(1. / 3.) + +# System +############################################################# +system = espressomd.System(box_l=3 * [box_l]) + +# Integration parameters +############################################################# +system.time_step = 0.01 +system.cell_system.skin = 0.4 + +# Interaction setup +############################################################# +system.non_bonded_inter[0, 0].lennard_jones.set_params( + epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto") + +# Particle setup +############################################################# + +system.part.add(pos=rng.random((n_particles, 3)) * system.box_l) + +# Warmup Integration +############################################################# + +# warmup +logger.info("Warmup") +system.integrator.set_steepest_descent( + f_max=0, max_displacement=0.01, gamma=1E-3) +system.integrator.run(1) +while np.any(np.abs(system.part.all().f) * system.time_step > .1): + system.integrator.run(10) + +system.integrator.set_vv() +system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42) + +system.integrator.run(1000) +min_skin = 0.2 +max_skin = 1.0 +# tuning and equilibration +logger.info("Tune skin: {:.3f}".format(system.cell_system.tune_skin( + min_skin=min_skin, max_skin=max_skin, tol=0.05, int_steps=100))) +system.integrator.run(1000) + +logger.info("Measuring") + +pressures = np.zeros(n_steps) +for i in range(n_steps): + system.integrator.run(10) + pressures[i] = system.analysis.pressure()["total"] + +# Put the simulation results into a dictionary +result = {"pressure": np.mean(pressures), + "pressure_std_dev": np.std(pressures)} + +# Output the results for Dask via the standard output stream +print(de.encode_transport_data(result)) diff --git a/samples/high_throughput_with_dask/run_pv.py b/samples/high_throughput_with_dask/run_pv.py new file mode 100644 index 00000000000..860c523423d --- /dev/null +++ b/samples/high_throughput_with_dask/run_pv.py @@ -0,0 +1,75 @@ +# +# Copyright (C) 2013-2023 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +""" +Run a large amount of short ESPResSo simulations with the Dask parallel computing +library :cite:`rocklin15a`. Print the mean and standard error of the mean of the +scalar pressure of a Lennard-Jones gas simulated at different volume fractions. +""" + +import sys +import dask.distributed +import logging +import numpy as np + +import dask_espresso + +logging.basicConfig(level=logging.WARN) + +PYPRESSO = "/data/weeber/es/build/pypresso" # adapt this path +SIM_SCRIPT = "lj_pressure.py" + +# Simulation parameters +N_STEPS = int(2E4) +N_PARTICLES = 100 +VOLUME_FRACTIONS = np.arange(0.1, 0.52, 0.01) + + +# Scheduler address +if len(sys.argv) != 2: + raise Exception("Pass the scheduler address as command-line argument") +scheduler_address = sys.argv[1] + +# Connect to scheduler +# Note: We pass a scheduler address here. +# dask.distributed.LocalCluster cannot be used, but clusters with +# remote workers such as HTCondorCluster likely can. +client = dask.distributed.Client(scheduler_address) + + +# List of futures for simulation results +futures = [] + +# Launch simulations asynchronously +for volume_fraction in VOLUME_FRACTIONS: + sim_params = {"volume_fraction": volume_fraction, + "n_particles": N_PARTICLES, + "n_steps": N_STEPS} + futures.append(client.compute(dask_espresso.dask_espresso_task( + PYPRESSO, SIM_SCRIPT, **sim_params))) + +# Show progress of calculation (optional) +dask.distributed.progress(futures) + +# Gather the results of all futures, waiting for completion, if necessary +sim_results = client.gather(futures) + +# Display results +for vol_frac, out in zip(VOLUME_FRACTIONS, sim_results): + print(f"{vol_frac:3f} {out['pressure']:.3f} {out['pressure_std_dev']:.3f}") diff --git a/samples/high_throughput_with_dask/test_dask_espresso.py b/samples/high_throughput_with_dask/test_dask_espresso.py new file mode 100644 index 00000000000..877a70564a5 --- /dev/null +++ b/samples/high_throughput_with_dask/test_dask_espresso.py @@ -0,0 +1,39 @@ +# +# Copyright (C) 2023 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Unit tests for the ``dask_espresso`` python module.""" + +import dask_espresso as de +import numpy as np +import sys + + +def test_encode_decode(): + data = {"a": 1, "b": np.random.random((3, 3))} + encoded = de.encode_transport_data(data) + decoded = de.decode_transport_data(encoded) + for k, v in decoded.items(): + assert np.all(data[k]) == np.all(v) + assert list(data.keys()) == list(decoded.keys()) + + +def test_espresso_runner(): + data = {"hello": "world"} + result = de.dask_espresso_task(sys.executable, "echo.py", **data).compute() + assert result == {"hello": "world", "processed": True} diff --git a/src/core/ParticlePropertyIterator.hpp b/src/core/ParticlePropertyIterator.hpp new file mode 100644 index 00000000000..2034be8ac3e --- /dev/null +++ b/src/core/ParticlePropertyIterator.hpp @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2023 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#include "BoxGeometry.hpp" +#include "Particle.hpp" +#include "ParticleRange.hpp" + +#include +#include + +#include + +namespace ParticlePropertyRange { + +namespace detail { +template +auto create_transform_range(ParticleRange const &particles, Kernel kernel) { + auto transform_iterator_begin = + boost::make_transform_iterator(particles.begin(), kernel); + auto transform_iterator_end = + boost::make_transform_iterator(particles.end(), kernel); + return boost::make_iterator_range( + transform_iterator_begin, transform_iterator_end); +} +} // namespace detail + +auto unfolded_pos_range(ParticleRange const &particles, + BoxGeometry const &box) { + auto return_unfolded_pos = [&box](Particle &p) { + return ::detail::unfolded_position(p.pos(), p.image_box(), box.length()); + }; + return detail::create_transform_range(particles, return_unfolded_pos); +} + +auto charge_range(ParticleRange const &particles) { + auto return_charge = [](Particle &p) -> double & { return p.q(); }; + return detail::create_transform_range(particles, return_charge); +} +auto force_range(ParticleRange const &particles) { + auto return_force = [](Particle &p) -> Utils::Vector3d & { + return p.force(); + }; + return detail::create_transform_range(particles, return_force); +} + +} // namespace ParticlePropertyRange diff --git a/src/core/electrostatics/p3m.cpp b/src/core/electrostatics/p3m.cpp index b60b566afd9..03758b444ad 100644 --- a/src/core/electrostatics/p3m.cpp +++ b/src/core/electrostatics/p3m.cpp @@ -41,6 +41,7 @@ #include "BoxGeometry.hpp" #include "LocalBox.hpp" #include "Particle.hpp" +#include "ParticlePropertyIterator.hpp" #include "ParticleRange.hpp" #include "actor/visitors.hpp" #include "cell_system/CellStructure.hpp" @@ -63,6 +64,7 @@ #include #include #include +#include #include #include @@ -352,8 +354,9 @@ void CoulombP3M::assign_charge(double q, Utils::Vector3d const &real_pos) { } template struct AssignForces { + template void operator()(p3m_data_struct &p3m, double force_prefac, - ParticleRange const &particles) const { + combined_ranges const &p_q_force_range) const { using Utils::make_const_span; using Utils::Span; using Utils::Vector; @@ -363,9 +366,11 @@ template struct AssignForces { /* charged particle counter */ auto p_index = std::size_t{0ul}; - for (auto &p : particles) { - if (p.q() != 0.0) { - auto const pref = p.q() * force_prefac; + for (auto zipped : p_q_force_range) { + auto p_q = boost::get<0>(zipped); + auto &p_force = boost::get<1>(zipped); + if (p_q != 0.0) { + auto const pref = p_q * force_prefac; auto const w = p3m.inter_weights.load(p_index); Utils::Vector3d force{}; @@ -374,22 +379,23 @@ template struct AssignForces { p3m.E_mesh[2][ind]}; }); - p.force() -= pref * force; + p_force -= pref * force; ++p_index; } } } }; +template static auto calc_dipole_moment(boost::mpi::communicator const &comm, - ParticleRange const &particles, - BoxGeometry const &box_geo) { - auto const local_dip = boost::accumulate( - particles, Utils::Vector3d{}, - [&box_geo](Utils::Vector3d const &dip, auto const &p) { - return dip + p.q() * box_geo.unfolded_position(p.pos(), p.image_box()); - }); - + combined_ranges const &p_q_unfolded_pos_range) { + auto const local_dip = + boost::accumulate(p_q_unfolded_pos_range, Utils::Vector3d{}, + [](Utils::Vector3d const &dip, auto const &q_pos) { + auto const p_q = boost::get<0>(q_pos); + auto const &p_unfolded_pos = boost::get<1>(q_pos); + return dip + p_q * p_unfolded_pos; + }); return boost::mpi::all_reduce(comm, local_dip, std::plus<>()); } @@ -465,13 +471,19 @@ double CoulombP3M::long_range_kernel(bool force_flag, bool energy_flag, p3m.sm.gather_grid(p3m.rs_mesh.data(), comm_cart, p3m.local_mesh.dim); fft_perform_forw(p3m.rs_mesh.data(), p3m.fft, comm_cart); + auto p_q_range = ParticlePropertyRange::charge_range(particles); + auto p_force_range = ParticlePropertyRange::force_range(particles); + auto p_unfolded_pos_range = + ParticlePropertyRange::unfolded_pos_range(particles, box_geo); + // Note: after these calls, the grids are in the order yzx and not xyz // anymore!!! /* The dipole moment is only needed if we don't have metallic boundaries. */ auto const box_dipole = (p3m.params.epsilon != P3M_EPSILON_METALLIC) - ? calc_dipole_moment(comm_cart, particles, box_geo) - : Utils::Vector3d::broadcast(0.); + ? std::make_optional(calc_dipole_moment( + comm_cart, boost::combine(p_q_range, p_unfolded_pos_range))) + : std::nullopt; auto const volume = box_geo.volume(); auto const pref = 4. * Utils::pi() / volume / (2. * p3m.params.epsilon + 1.); @@ -518,14 +530,17 @@ double CoulombP3M::long_range_kernel(bool force_flag, bool energy_flag, p3m.local_mesh.dim); auto const force_prefac = prefactor / volume; - Utils::integral_parameter(p3m.params.cao, p3m, - force_prefac, particles); + Utils::integral_parameter( + p3m.params.cao, p3m, force_prefac, + boost::combine(p_q_range, p_force_range)); // add dipole forces - if (p3m.params.epsilon != P3M_EPSILON_METALLIC) { - auto const dm = prefactor * pref * box_dipole; - for (auto &p : particles) { - p.force() -= p.q() * dm; + if (box_dipole) { + auto const dm = prefactor * pref * box_dipole.value(); + for (auto zipped : boost::combine(p_q_range, p_force_range)) { + auto p_q = boost::get<0>(zipped); + auto &p_force = boost::get<1>(zipped); + p_force -= p_q * dm; } } } @@ -549,8 +564,8 @@ double CoulombP3M::long_range_kernel(bool force_flag, bool energy_flag, energy -= p3m.square_sum_q * Utils::pi() / (2. * volume * Utils::sqr(p3m.params.alpha)); /* dipole correction */ - if (p3m.params.epsilon != P3M_EPSILON_METALLIC) { - energy += pref * box_dipole.norm2(); + if (box_dipole) { + energy += pref * box_dipole.value().norm2(); } } return prefactor * energy; diff --git a/src/python/object_in_fluid/oif_classes.py b/src/python/object_in_fluid/oif_classes.py index f471f18d45f..7d5fa4e956b 100644 --- a/src/python/object_in_fluid/oif_classes.py +++ b/src/python/object_in_fluid/oif_classes.py @@ -1147,7 +1147,7 @@ def elastic_forces( if (el_forces[0] == 1) or (el_forces[5] == 1) or ( f_metric[0] == 1) or (f_metric[5] == 1): # initialize list - stretching_forces_list = np.zeros((self.mesh.points, 3)) + stretching_forces_list = np.zeros((len(self.mesh.points), 3)) # calculation uses edges, but results are stored for nodes for e in self.mesh.edges: a_current_pos = e.A.get_pos() @@ -1171,7 +1171,7 @@ def elastic_forces( if (el_forces[1] == 1) or (el_forces[5] == 1) or ( f_metric[1] == 1) or (f_metric[5] == 1): # initialize list - bending_forces_list = np.zeros((self.mesh.points, 3)) + bending_forces_list = np.zeros((len(self.mesh.points), 3)) # calculation uses bending incidences, but results are stored for # nodes for angle in self.mesh.angles: @@ -1209,7 +1209,7 @@ def elastic_forces( if (el_forces[2] == 1) or (el_forces[5] == 1) or ( f_metric[2] == 1) or (f_metric[5] == 1): # initialize list - local_area_forces_list = np.zeros((self.mesh.points, 3)) + local_area_forces_list = np.zeros((len(self.mesh.points), 3)) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() @@ -1243,9 +1243,7 @@ def elastic_forces( if (el_forces[3] == 1) or (el_forces[5] == 1) or ( f_metric[3] == 1) or (f_metric[5] == 1): # initialize list - global_area_forces_list = [] - for _ in self.mesh.points: - global_area_forces_list.append([0.0, 0.0, 0.0]) + global_area_forces_list = np.zeros((len(self.mesh.points), 3)) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() @@ -1275,7 +1273,7 @@ def elastic_forces( if (el_forces[4] == 1) or (el_forces[5] == 1) or ( f_metric[4] == 1) or (f_metric[5] == 1): # initialize list - volume_forces_list = np.zeros((self.mesh.points, 3)) + volume_forces_list = np.zeros((len(self.mesh.points), 3)) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() @@ -1344,7 +1342,7 @@ def elastic_forces( # output vtk (folded) if vtk_file is not None: - if el_forces == (0, 0, 0, 0, 0, 0): + if sum(el_forces) == 0: raise Exception("OifCell: elastic_forces: The option elastic_forces was not used. " "Nothing to output to vtk file.") self.output_vtk_pos_folded(vtk_file) @@ -1405,8 +1403,7 @@ def elastic_forces( file_name=raw_data_file, data=elastic_forces_list) # return f_metric - if f_metric[0] + f_metric[1] + f_metric[2] + \ - f_metric[3] + f_metric[4] + f_metric[5] > 0: + if sum(f_metric) > 0: results = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] if f_metric[0] == 1: results[0] = ks_f_metric diff --git a/testsuite/python/oif_volume_conservation.py b/testsuite/python/oif_volume_conservation.py index 054787f24f0..ca1b0673e7a 100644 --- a/testsuite/python/oif_volume_conservation.py +++ b/testsuite/python/oif_volume_conservation.py @@ -27,40 +27,37 @@ @utx.skipIfMissingFeatures("MASS") -class OifVolumeConservation(ut.TestCase): - - """Loads a soft elastic sphere via object_in_fluid, stretches it and checks - restoration of original volume due to elastic forces.""" +class Test(ut.TestCase): system = espressomd.System(box_l=(50., 50., 50.)) system.time_step = 0.4 system.cell_system.skin = 0.5 - def check_relaxation(self, **kwargs): - self.system.part.clear() + def tearDown(self): self.system.thermostat.turn_off() - half_box_l = np.copy(self.system.box_l) / 2. + self.system.part.clear() - # creating the template for OIF object - cell_type = oif.OifCellType( + def make_cell_type(self, **kwargs): + return oif.OifCellType( nodes_file=str(tests_common.data_path("sphere393nodes.dat")), triangles_file=str( tests_common.data_path("sphere393triangles.dat")), system=self.system, **kwargs, kb=1.0, kal=1.0, kag=0.1, kv=0.1, check_orientation=False, resize=(3.0, 3.0, 3.0)) - # creating the OIF object - cell0 = oif.OifCell( - cell_type=cell_type, particle_type=0, origin=half_box_l) + def check_relaxation(self, **kwargs): + half_box_l = np.copy(self.system.box_l) / 2. + cell = oif.OifCell(cell_type=self.make_cell_type(**kwargs), + particle_type=0, origin=half_box_l) partcls = self.system.part.all() - diameter_init = cell0.diameter() + diameter_init = cell.diameter() self.assertAlmostEqual(diameter_init, 24., delta=1e-3) # OIF object is being stretched by factor 1.5 partcls.pos = (partcls.pos - half_box_l) * 1.5 + half_box_l - diameter_stretched = cell0.diameter() + diameter_stretched = cell.diameter() self.assertAlmostEqual(diameter_stretched, 36., delta=1e-3) # Apply non-isotropic deformation @@ -85,7 +82,7 @@ def check_relaxation(self, **kwargs): for _ in range(20): self.system.integrator.run(steps=20) xdata.append(self.system.time) - ydata.append(cell0.diameter()) + ydata.append(cell.diameter()) # check exponential decay (prefactor, lam, _, diameter_final), _ = scipy.optimize.curve_fit( lambda x, a, b, c, d: a * np.exp(-b * x + c) + d, xdata, ydata, @@ -94,16 +91,42 @@ def check_relaxation(self, **kwargs): self.assertGreater(prefactor, 0.) self.assertAlmostEqual(diameter_final, diameter_init, delta=0.005) self.assertAlmostEqual(lam, 0.0325, delta=0.0001) + self.system.thermostat.turn_off() + self.system.part.clear() - def test(self): + def test_00_volume_relaxation(self): + """ + Load a soft elastic sphere via ``object_in_fluid``, stretch it and + check restoration of original volume due to elastic forces. + """ self.assertEqual(self.system.max_oif_objects, 0) - with self.subTest(msg='linear stretching'): + with self.subTest(msg="linear stretching"): self.check_relaxation(kslin=1.) self.assertEqual(self.system.max_oif_objects, 1) - with self.subTest(msg='neo-Hookean stretching'): + with self.subTest(msg="neo-Hookean stretching"): self.check_relaxation(ks=1.) self.assertEqual(self.system.max_oif_objects, 2) + def test_01_elastic_forces(self): + half_box_l = np.copy(self.system.box_l) / 2. + cell = oif.OifCell(cell_type=self.make_cell_type(), + particle_type=0, origin=half_box_l) + # stretch cell + partcls = self.system.part.all() + partcls.pos = (partcls.pos - half_box_l) * 1.5 + half_box_l + # reduce number of triangles to speed up calculation + cell.mesh.triangles = cell.mesh.triangles[:20] + # smoke test + results = cell.elastic_forces(el_forces=6 * [0], f_metric=6 * [1]) + ref = [0., 1.36730815e-12, 22.4985704, 6838.5749, 7.3767594, 6816.6342] + np.testing.assert_allclose(results, ref, atol=1e-10, rtol=1e-7) + self.assertEqual(cell.elastic_forces(), 0) + # check exceptions + with self.assertRaises(Exception): + cell.elastic_forces(el_forces=6 * [0], vtk_file="test") + with self.assertRaises(Exception): + cell.elastic_forces(el_forces=6 * [0], raw_data_file="test") + if __name__ == "__main__": ut.main() diff --git a/testsuite/scripts/tutorials/CMakeLists.txt b/testsuite/scripts/tutorials/CMakeLists.txt index 7991e706561..1779def85d6 100644 --- a/testsuite/scripts/tutorials/CMakeLists.txt +++ b/testsuite/scripts/tutorials/CMakeLists.txt @@ -65,6 +65,7 @@ tutorial_test(FILE test_electrodes_1.py) # tutorial_test(FILE test_electrodes_2.py) # TODO: unstable, see issue #4798 tutorial_test(FILE test_constant_pH__interactions.py) tutorial_test(FILE test_widom_insertion.py) +tutorial_test(FILE test_grand_canonical_monte_carlo.py) add_custom_target( check_tutorials diff --git a/testsuite/scripts/tutorials/test_grand_canonical_monte_carlo.py b/testsuite/scripts/tutorials/test_grand_canonical_monte_carlo.py new file mode 100644 index 00000000000..7b4408d4643 --- /dev/null +++ b/testsuite/scripts/tutorials/test_grand_canonical_monte_carlo.py @@ -0,0 +1,44 @@ +# +# Copyright (C) 2023 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import unittest as ut +import importlib_wrapper +import numpy as np + +tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import( + "@TUTORIALS_DIR@/grand_canonical_monte_carlo/grand_canonical_monte_carlo.py", + p3m_params={"mesh": 10, "cao": 6, "r_cut": 8.22}) + + +@skipIfMissingFeatures +class Tutorial(ut.TestCase): + + def test(self): + ratios = tutorial.c_monomer.magnitude / \ + (2. * tutorial.salt_concentration_si.magnitude) + ref_xi = tutorial.analytical_solution(ratios) + sim_xi_minus = tutorial.partition_coefficients_negatives_array + sim_xi_plus = tutorial.universal_partion_coefficient_positive + np.testing.assert_allclose( + sim_xi_minus, sim_xi_plus, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose(sim_xi_minus / ref_xi, 2., rtol=0., atol=2.) + + +if __name__ == "__main__": + ut.main()