From f54e2ce045f35abe4b0cfa5b0002e4dfc74606cf Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 24 Aug 2021 18:33:33 -0400 Subject: [PATCH 1/2] add docstring for `EnerFitting` --- deepmd/fit/ener.py | 43 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/deepmd/fit/ener.py b/deepmd/fit/ener.py index c24df42e7e..34be1dc4ee 100644 --- a/deepmd/fit/ener.py +++ b/deepmd/fit/ener.py @@ -14,17 +14,46 @@ from deepmd.env import GLOBAL_TF_FLOAT_PRECISION class EnerFitting (): - """Fitting the energy of the system. The force and the virial can also be trained. + r"""Fitting the energy of the system. The force and the virial can also be trained. + + The potential energy :math:`E` is a fitting network function of the descriptor :math:`\mathcal{D}`: + + .. math:: + E(\mathcal{D}) = \mathcal{L}^{(n)} \circ \mathcal{L}^{(n-1)} + \circ \cdots \circ \mathcal{L}^{(1)} \circ \mathcal{L}^{(0)} + + The first :math:`n` layers :math:`\mathcal{L}^{(0)}, \cdots, \mathcal{L}^{(n-1)}` is given by + + .. math:: + \mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})= + \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + + where :math:`\mathbf{x} \in \mathbb{R}^{N_1}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}` + is the output vector. :math:`\mathbf{w} \in \mathbb{R}^{N_1 \times N_2}` and + :math:`\mathbf{b} \in \mathbb{R}^{N_2}`$` are weights and biases, respectively, + both of which are trainable if `trainable[i]` is `True`. :math:`\boldsymbol{\phi}` + is the activation function. + + The last layer :math:`\mathcal{L}^{(n)}` is given by + + .. math:: + \mathbf{y}=\mathcal{L}^{(n)}(\mathbf{x};\mathbf{w},\mathbf{b})= + \mathbf{x}^T\mathbf{w}+\mathbf{b} + + where :math:`\mathbf{x} \in \mathbb{R}^{N_{n-1}}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}` + is the output scalar. :math:`\mathbf{w} \in \mathbb{R}^{N_{n-1}}` and + :math:`\mathbf{b} \in \mathbb{R}`$` are weights and bias, respectively, + both of which are trainable if `trainable[n]` is `True`. Parameters ---------- descrpt - The descrptor + The descrptor :math:`\mathcal{D}` neuron - Number of neurons in each hidden layer of the fitting net + Number of neurons :math:`N` in each hidden layer of the fitting net resnet_dt Time-step `dt` in the resnet construction: - y = x + dt * \phi (Wx + b) + :math:`y = x + dt * \phi (Wx + b)` numb_fparam Number of frame parameter numb_aparam @@ -35,14 +64,14 @@ class EnerFitting (): Force the total energy to zero. Useful for the charge fitting. trainable If the weights of fitting net are trainable. - Suppose that we have N_l hidden layers in the fitting net, - this list is of length N_l + 1, specifying if the hidden layers and the output layer are trainable. + Suppose that we have :math:`N_l` hidden layers in the fitting net, + this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. seed Random seed for initializing the network parameters. atom_ener Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. activation_function - The activation function in the embedding net. Supported options are {0} + The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are {0} precision The precision of the embedding net parameters. Supported options are {1} uniform_seed From 233b8ccb456c901f62fa5ed88fe83d27656e23b6 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 24 Aug 2021 21:07:44 -0400 Subject: [PATCH 2/2] Apply suggestions from code review Co-authored-by: Han Wang --- deepmd/fit/ener.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepmd/fit/ener.py b/deepmd/fit/ener.py index 34be1dc4ee..37900a70d8 100644 --- a/deepmd/fit/ener.py +++ b/deepmd/fit/ener.py @@ -22,7 +22,7 @@ class EnerFitting (): E(\mathcal{D}) = \mathcal{L}^{(n)} \circ \mathcal{L}^{(n-1)} \circ \cdots \circ \mathcal{L}^{(1)} \circ \mathcal{L}^{(0)} - The first :math:`n` layers :math:`\mathcal{L}^{(0)}, \cdots, \mathcal{L}^{(n-1)}` is given by + The first :math:`n` hidden layers :math:`\mathcal{L}^{(0)}, \cdots, \mathcal{L}^{(n-1)}` are given by .. math:: \mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})= @@ -34,7 +34,7 @@ class EnerFitting (): both of which are trainable if `trainable[i]` is `True`. :math:`\boldsymbol{\phi}` is the activation function. - The last layer :math:`\mathcal{L}^{(n)}` is given by + The output layer :math:`\mathcal{L}^{(n)}` is given by .. math:: \mathbf{y}=\mathcal{L}^{(n)}(\mathbf{x};\mathbf{w},\mathbf{b})=