Skip to content

Commit

Permalink
Reorganize repo (#33)
Browse files Browse the repository at this point in the history
* Move source files into "src"

* Update the build system

* Fix the build script

* Update .gitignore

* Fix funcion name collision

* Compile SchNet kernels

* Build the test executables

* Enable testing

* Move the build documenation

* Fix the molecule path

* Update the documetation

* Create __init__.py

* Add an import test

* Update a test tolerance

* Use "static" to avoid fubction name collistions

* Clean up the docs
  • Loading branch information
Raimondas Galvelis authored Sep 23, 2021
1 parent 3d0bab6 commit d4190d8
Show file tree
Hide file tree
Showing 38 changed files with 102 additions and 106 deletions.
35 changes: 3 additions & 32 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,32 +1,3 @@
# Prerequisites
*.d

# Compiled Object files
*.slo
*.lo
*.o
*.obj

# Precompiled Headers
*.gch
*.pch

# Compiled Dynamic libraries
*.so
*.dylib
*.dll

# Fortran module files
*.mod
*.smod

# Compiled Static libraries
*.lai
*.la
*.a
*.lib

# Executables
*.exe
*.out
*.app
.vscode
__pycache__
build
36 changes: 36 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
cmake_minimum_required(VERSION 3.20 FATAL_ERROR)

set(NAME NNPOps)
set(LIBRARY ${NAME}PyTorch)
project(${NAME} LANGUAGES CXX CUDA)

find_package(Python REQUIRED)
find_package(PythonLibs REQUIRED)
find_package(Torch REQUIRED)
enable_testing()

add_library(${LIBRARY} SHARED src/ani/CpuANISymmetryFunctions.cpp
src/ani/CudaANISymmetryFunctions.cu
src/pytorch/SymmetryFunctions.cpp
src/schnet/CpuCFConv.cpp
src/schnet/CudaCFConv.cu)
target_include_directories(${LIBRARY} PRIVATE src/ani src/schnet)
target_link_libraries(${LIBRARY} ${TORCH_LIBRARIES} ${PYTHON_LIBRARIES})

set(TEST_PATHS src/ani/TestCpuANISymmetryFunctions.cpp
src/ani/TestCudaANISymmetryFunctions.cpp
src/schnet/TestCpuCFConv.cpp
src/schnet/TestCudaCFConv.cu)
foreach(TEST_PATH ${TEST_PATHS})
cmake_path(GET TEST_PATH STEM TEST_NAME)
add_executable(${TEST_NAME} ${TEST_PATH})
target_link_libraries(${TEST_NAME} ${LIBRARY})
add_test(${TEST_NAME} ${TEST_NAME})
endforeach()

add_test(TestSymmetryFunctions pytest ${CMAKE_SOURCE_DIR}/src/pytorch/TestSymmetryFunctions.py)

install(TARGETS ${LIBRARY} DESTINATION ${Python_SITEARCH}/${NAME})
install(FILES src/pytorch/__init__.py
src/pytorch/SymmetryFunctions.py
DESTINATION ${Python_SITEARCH}/${NAME})
44 changes: 43 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
## NNPOps
# NNPOps

The goal of this repository is to promote the use of neural network potentials (NNPs)
by providing highly optimized, open source implementations of bottleneck operations
Expand All @@ -19,3 +19,45 @@ for optimum performance.
5. This code is designed for inference (running simulations), not training (creating
new potential functions). It computes gradients with respect to particle positions,
not model parameters.

## Installation

### From the source

#### Prerequisites

- *CUDA Toolkit* (https://developer.nvidia.com/cuda-downloads)
- *Miniconda* (https://docs.conda.io/en/latest/miniconda.html#linux-installers)

#### Build & install

- Get the source code
```bash
$ git clone https://github.com/openmm/NNPOps.git
```

- Set `CUDA_HOME`
```bash
$ export CUDA_HOME=/usr/local/cuda-11.2
```

- Crate and activate a *Conda* environment
```bash
$ cd NNPOps
$ conda env create -n nnpops -f environment.yml
$ conda activate nnpops
```

- Configure, build, and install
```bash
$ mkdir build && cd build
$ cmake .. \
-DTorch_DIR=$CONDA_PREFIX/lib/python3.9/site-packages/torch/share/cmake/Torch \
-DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
$ make install
```

- Run the tests
```bash
$ ctest
```
6 changes: 3 additions & 3 deletions pytorch/environment.yml → environment.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
name: nnpops
channels:
- conda-forge
dependencies:
- cmake
- gxx_linux-64
- gxx_linux-64 <=10
- make
- mdtraj
- nvcc_linux-64
- torchani 2.2
- pytest
- python 3.9
- pytorch 1.8.0
- pytorch-gpu 1.8.0
22 changes: 0 additions & 22 deletions pytorch/CMakeLists.txt

This file was deleted.

File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,11 @@ __device__ void computeDisplacement(float3 pos1, float3 pos2, float3& delta, flo
r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
}

__device__ float cutoffFunction(float r, float rc) {
static __device__ float cutoffFunction(float r, float rc) {
return 0.5f * cosf(Pi*r/rc) + 0.5f;
}

__device__ float cutoffDeriv(float r, float rc) {
static __device__ float cutoffDeriv(float r, float rc) {
return -(0.5f*Pi/rc) * sinf(Pi*r/rc);
}

Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
40 changes: 0 additions & 40 deletions pytorch/README.md → src/pytorch/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,44 +32,4 @@ energy.backward()
forces = -positions.grad.clone()

print(energy, forces)
```

## Installation

### Prerequisites

- *Linux*
- Complete *CUDA Toolkit* (https://developer.nvidia.com/cuda-downloads)
- *Miniconda* (https://docs.conda.io/en/latest/miniconda.html#linux-installers)

### Build & install

- Get the source code
```bash
$ git clone https://github.com/openmm/NNPOps.git
```

- Crate a *Conda* environment
```bash
$ cd NNPOps
$ conda create -f pytorch/environment.yml
$ conda activate nnpops
```

- Configure, build, and install
```bash
$ mkdir build
$ cd build
$ cmake ../pytorch \
-DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc \
-DCMAKE_CUDA_HOST_COMPILER=$CXX \
-DTorch_DIR=$CONDA_PREFIX/lib/python3.9/site-packages/torch/share/cmake/Torch \
-DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
$ make install
```
- Optional: run tests and benchmarks
```bash
$ cd ../pytorch
$ pytest TestSymmetryFunctions.py
$ python BenchmarkTorchANISymmetryFunctions.py
```
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,27 @@
#

import mdtraj
import os
import pytest
import tempfile
import torch
import torchani

from NNPOps.SymmetryFunctions import TorchANISymmetryFunctions
molecules = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'molecules')

def test_import():
import NNPOps
import NNPOps.SymmetryFunctions

@pytest.mark.parametrize('deviceString', ['cpu', 'cuda'])
@pytest.mark.parametrize('molFile', ['1hvj', '1hvk', '2iuz', '3hkw', '3hky', '3lka', '3o99'])
def test_compare_with_native(deviceString, molFile):

from NNPOps.SymmetryFunctions import TorchANISymmetryFunctions

device = torch.device(deviceString)

mol = mdtraj.load(f'molecules/{molFile}_ligand.mol2')
mol = mdtraj.load(os.path.join(molecules, f'{molFile}_ligand.mol2'))
atomicNumbers = torch.tensor([[atom.element.atomic_number for atom in mol.top.atoms]], device=device)
atomicPositions = torch.tensor(mol.xyz * 10, dtype=torch.float32, requires_grad=True, device=device)

Expand All @@ -54,15 +61,17 @@ def test_compare_with_native(deviceString, molFile):
grad_error = torch.max(torch.abs((grad - grad_ref)/grad_ref))

assert energy_error < 5e-7
assert grad_error < 5e-3
assert grad_error < 7e-3

@pytest.mark.parametrize('deviceString', ['cpu', 'cuda'])
@pytest.mark.parametrize('molFile', ['1hvj', '1hvk', '2iuz', '3hkw', '3hky', '3lka', '3o99'])
def test_model_serialization(deviceString, molFile):

from NNPOps.SymmetryFunctions import TorchANISymmetryFunctions

device = torch.device(deviceString)

mol = mdtraj.load(f'molecules/{molFile}_ligand.mol2')
mol = mdtraj.load(os.path.join(molecules, f'{molFile}_ligand.mol2'))
atomicNumbers = torch.tensor([[atom.element.atomic_number for atom in mol.top.atoms]], device=device)
atomicPositions = torch.tensor(mol.xyz * 10, dtype=torch.float32, requires_grad=True, device=device)

Expand Down
Empty file added src/pytorch/__init__.py
Empty file.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
4 changes: 2 additions & 2 deletions schnet/CudaCFConv.cu → src/schnet/CudaCFConv.cu
Original file line number Diff line number Diff line change
Expand Up @@ -260,11 +260,11 @@ const float* CudaCFConv::ensureOnDevice(const float* arg, float*& deviceMemory,
return (const float*) attrib.devicePointer;
}

__device__ float cutoffFunction(float r, float rc) {
static __device__ float cutoffFunction(float r, float rc) {
return 0.5f * cosf(Pi*r/rc) + 0.5f;
}

__device__ float cutoffDeriv(float r, float rc) {
static __device__ float cutoffDeriv(float r, float rc) {
return -(0.5f*Pi/rc) * sinf(Pi*r/rc);
}

Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 comments on commit d4190d8

Please sign in to comment.