Skip to content
This repository has been archived by the owner on Jul 10, 2021. It is now read-only.

Commit

Permalink
Adding upscaling support in the convolution layers so it's easy to bu…
Browse files Browse the repository at this point in the history
…ild convolutional autoencoders manually.
  • Loading branch information
alexjc committed Nov 25, 2015
1 parent 8bf26e4 commit 81e9a46
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 9 deletions.
13 changes: 9 additions & 4 deletions sknn/backend/lasagne/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _create_trainer(self, params, cost):
allow_input_downcast=True)
return trainer, validator

def _get_activation(self, l):
def _get_activation(self, l):
nonlinearities = {'Rectifier': nl.rectify,
'Sigmoid': nl.sigmoid,
'Tanh': nl.tanh,
Expand All @@ -119,9 +119,14 @@ def _create_convolution_layer(self, name, layer, network):

if layer.pool_shape != (1, 1):
network = lasagne.layers.Pool2DLayer(
network,
pool_size=layer.pool_shape,
stride=layer.pool_shape)
network,
pool_size=layer.pool_shape,
stride=layer.pool_shape)

if layer.scale_factor != (1, 1):
network = lasagne.layers.Upscale2DLayer(
network,
scale_factor=layer.scale_factor)

return network

Expand Down
4 changes: 4 additions & 0 deletions sknn/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,10 @@ def _create_specs(self, X, y=None):
if l.border_mode == 'full':
res = (int((res[0] + l.kernel_shape[0]) / l.pool_shape[0]) - 1,
int((res[1] + l.kernel_shape[1]) / l.pool_shape[1]) - 1)

if l.scale_factor != (1, 1):
res = (int(l.scale_factor[0] * res[0]), int(l.scale_factor[1] * res[1]))

unit_count = numpy.prod(res) * l.channels
else:
unit_count = l.units
Expand Down
21 changes: 16 additions & 5 deletions sknn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,15 +170,24 @@ class Convolution(Layer):
but for `valid` (default) it will be smaller or equal.
pool_shape: tuple of ints, optional
A two-dimensional tuple of integers corresponding to the pool size. This should be
square, for example `(2,2)` to reduce the size by half, or `(4,4)` to make the output
a quarter of the original.
A two-dimensional tuple of integers corresponding to the pool size for downsampling.
This should be square, for example `(2,2)` to reduce the size by half, or `(4,4)` to make
the output a quarter of the original.
Pooling is applied after the convolution and calculation of its activation.
pool_type: str, optional
Type of the pooling to be used; can be either `max` or `mean`. If a `pool_shape` is
specified the default is to take the maximum value of all inputs that fall into this
pool. Otherwise, the default is None and no pooling is used for performance.
scale_factor: tuple of ints, optional
A two-dimensional tuple of integers corresponding to upscaling ration. This should be
square, for example `(2,2)` to increase the size by double, or `(4,4)` to make the
output four times the original.
Upscaling is applied after the convolution and calculation of its activation.
weight_decay: float, optional
The coefficient for L1 or L2 regularization of the weights. For example, a value of
0.0001 is multiplied by the L1 or L2 weight decay equation.
Expand Down Expand Up @@ -208,6 +217,7 @@ def __init__(
border_mode='valid',
pool_shape=None,
pool_type=None,
scale_factor=None,
weight_decay=None,
dropout=None,
frozen=False):
Expand All @@ -229,11 +239,12 @@ def __init__(
frozen=frozen)

self.channels = channels
self.pool_shape = pool_shape or (1,1)
self.pool_type = pool_type or ('max' if pool_shape else None)
self.kernel_shape = kernel_shape
self.kernel_stride = kernel_stride or (1,1)
self.border_mode = border_mode
self.pool_shape = pool_shape or (1,1)
self.pool_type = pool_type or ('max' if pool_shape else None)
self.scale_factor = scale_factor or (1,1)


class NeuralNetwork(object):
Expand Down
19 changes: 19 additions & 0 deletions sknn/tests/test_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,16 @@ def test_PoolingMeanType(self):
n_iter=1))


class TestUpscaling(unittest.TestCase):

def test_Upscaling(self):
TestConvolution._run(self, MLPR(
layers=[
C("Rectifier", channels=4, kernel_shape=(2,2), scale_factor=(2,2)),
L("Linear")],
n_iter=1))


class TestConvolutionSpecs(unittest.TestCase):

def test_SmallSquareKernel(self):
Expand Down Expand Up @@ -180,6 +190,15 @@ def test_MultiLayerPooling(self):
nn._initialize(a_in, a_out)
assert_equal(nn.unit_counts, [1024, 900, 196, 16])

def test_Upscaling(self):
nn = MLPR(layers=[
C("Rectifier", channels=4, kernel_shape=(1,1), scale_factor=(2,2), border_mode='same'),
L("Linear", units=5)])

a_in = numpy.zeros((8,32,32,1))
nn._create_specs(a_in)
assert_equal(nn.unit_counts, [1024, 64 * 64 * 4, 5])


class TestActivationTypes(unittest.TestCase):

Expand Down

0 comments on commit 81e9a46

Please sign in to comment.