Skip to content

Commit

Permalink
Initial Commit
Browse files Browse the repository at this point in the history
  • Loading branch information
Jo Schlemper committed May 30, 2017
0 parents commit 1f8a5f8
Show file tree
Hide file tree
Showing 35 changed files with 3,622 additions and 0 deletions.
9 changes: 9 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
*.pyc
models
*~
*.tmp
*.orig
*.remote
*.base
deep_mri
*.#*
13 changes: 13 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
Copyright 2017 Jo Schlemper

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
14 changes: 14 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
Deep Cascade of Convolutional Neural Networks for MR Image Reconstruction

Reconstruct MR images from its undersampled measurements using Deep Cascade of Convolutional Neural Networks. This repository contains the implementation of DC-CNN using Theano and Lasagne and the simple demo on toy dataset borrowed from http://mridata.org

Usage:

python main_2d.py --num_epoch 5 --batch_size 2


If you used this code for your work, you must cite the following work:

Schlemper, J., Caballero, J., Hajnal, J. V., Price, A., & Rueckert, D. (2017). A Deep Cascade of Convolutional Neural Networks for MR Image Reconstruction. Information Processing in Medical Imaging (IPMI), 2017

The paper is also available on arXiv:1703.00555.
Empty file added cascadenet/__init__.py
Empty file.
Empty file added cascadenet/network/__init__.py
Empty file.
10 changes: 10 additions & 0 deletions cascadenet/network/layers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from __future__ import print_function

from .input import *
from .simple import *
from .conv import *
from .pool import *
from .shape import *
from .fourier import *
from .data_consistency import *
from .helper import *
64 changes: 64 additions & 0 deletions cascadenet/network/layers/conv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import theano
import lasagne
from lasagne.layers import Layer, prelu
from helper import ensure_set_name

if theano.config.device == 'cuda':
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
# MaxPool2DDNNLayer as MaxPool2DLayer
else:
from lasagne.layers import Conv2DLayer as ConvLayer


def Conv(incoming, num_filters, filter_size=3,
stride=(1, 1), pad='same', W=lasagne.init.HeNormal(),
b=None, nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
"""
Overrides the default parameters for ConvLayer
"""
ensure_set_name('conv', kwargs)

return ConvLayer(incoming, num_filters, filter_size, stride, pad, W=W, b=b,
nonlinearity=nonlinearity, **kwargs)


class ConvPrelu(Layer):
def __init__(self, incoming, num_filters, filter_size=3, stride=(1, 1),
pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
# Enforce name
ensure_set_name('conv_prelu', kwargs)

super(ConvPrelu, self).__init__(incoming, **kwargs)
self.conv = Conv(incoming, num_filters, filter_size, stride,
pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.prelu = prelu(self.conv, **kwargs)

self.params = self.conv.params.copy()
self.params.update(self.prelu.params)

def get_output_for(self, input, **kwargs):
out_conv = self.conv.get_output_for(input)
out_prelu = self.prelu.get_output_for(out_conv)
# return get_output(self.prelu, {self.conv: input})
return out_prelu

def get_output_shape_for(self, input, **kwargs):
return self.conv.get_output_shape_for(input)


class ConvAggr(Layer):
def __init__(self, incoming, num_channels, filter_size=3, stride=(1, 1),
pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv_aggr', kwargs)
super(ConvAggr, self).__init__(incoming, **kwargs)
self.conv = Conv(incoming, num_channels, filter_size, stride, pad=pad,
W=W, b=b, nonlinearity=None, **kwargs)

# copy params
self.params = self.conv.params.copy()

def get_output_for(self, input, **kwargs):
return self.conv.get_output_for(input)

def get_output_shape_for(self, input_shape):
return self.conv.get_output_shape_for(input_shape)
107 changes: 107 additions & 0 deletions cascadenet/network/layers/data_consistency.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import theano.tensor as T
from lasagne.layers import MergeLayer, get_output
from fourier import FFT2Layer, FFTCLayer
from cascadenet.network.theano_extensions.fft_helper import fftshift, ifftshift
from cascadenet.network.theano_extensions.fft import fft, ifft


class DataConsistencyLayer(MergeLayer):
'''
Data consistency layer
'''

def __init__(self, incomings, inv_noise_level=None, **kwargs):
super(DataConsistencyLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level

def get_output_for(self, inputs, **kwargs):
'''
Parameters
------------------------------
inputs: 2 4d tensors, first is data, second is the k-space samples
Returns
------------------------------
output: 4d tensor, data input with entries replaced with sampled vals
'''
x = inputs[0]
x_sampled = inputs[1]
v = self.inv_noise_level
if v: # noisy case
out = (x + v * x_sampled) / (1 + v)
else: # noiseless case
mask = T.set_subtensor(x_sampled[T.neq(x_sampled, 0).nonzero()], 1)
out = (1 - mask) * x + x_sampled
return out

def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]


class DataConsistencyWithMaskLayer(MergeLayer):
'''
Data consistency layer
'''

def __init__(self, incomings, inv_noise_level=None, **kwargs):
super(DataConsistencyWithMaskLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level

def get_output_for(self, inputs, **kwargs):
'''
Parameters
------------------------------
inputs: 3 4d tensors
First is data, second is the mask, third is the k-space samples
Returns
------------------------------
output: 4d tensor, data input with entries replaced with the sampled
values
'''
x = inputs[0]
mask = inputs[1]
x_sampled = inputs[2]
v = self.inv_noise_level
if v: # noisy case
out = (x + v * x_sampled) / (1 + v)
else: # noiseless case
out = (1 - mask) * x + x_sampled
return out

def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]


class DCLayer(MergeLayer):
'''
Data consistency layer
'''
def __init__(self, incomings, data_shape, inv_noise_level=None, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = 'dc'

super(DCLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level
data, mask, sampled = incomings
self.data = data
self.mask = mask
self.sampled = sampled
self.dft2 = FFT2Layer(data, data_shape, name='dc_dft2')
self.dc = DataConsistencyWithMaskLayer([self.dft2, mask, sampled],
name='dc_consistency')
self.idft2 = FFT2Layer(self.dc, data_shape, inv=True, name='dc_idft2')

def get_output_for(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
x_sampled = inputs[2]
return get_output(self.idft2,
{self.data: x,
self.mask: mask,
self.sampled: x_sampled})

def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]
Loading

0 comments on commit 1f8a5f8

Please sign in to comment.