Skip to content

Commit

Permalink
✨ feat(layer_2d): Deconvolution2D (#42)
Browse files Browse the repository at this point in the history
  • Loading branch information
jean-francoisreboud committed Dec 31, 2022
1 parent 4640dea commit 96ab721
Show file tree
Hide file tree
Showing 18 changed files with 2,851 additions and 56 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ All notable changes to this project will be documented in this file.

## [unreleased]

🪜 **layer_2d:** Deconvolution2D ([#42](https://github.com/owkin/GrAIdient/pull/42))\
🪜 **feat:** getDeltaWeightsGPU per sample API ([#41](https://github.com/owkin/GrAIdient/pull/41))\
🎉 **refactor!:** re brand the framework ([#40](https://github.com/owkin/GrAIdient/pull/40))

Expand Down
4 changes: 2 additions & 2 deletions Sources/GrAIdient/Layer1D/Base/Layer1D.swift
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ open class Layer1D: Layer
/// Output buffer (result of the forward pass) used in the GPU execution context.
/// Shape ~ (batch, nbNeurons).
///
public internal(set) var outs: MetalPrivateBuffer<Float>! = nil
public var outs: MetalPrivateBuffer<Float>! = nil
///
/// Gradient buffer (result of the backward pass) used in the GPU execution context.
/// Shape ~ (batch, nbNeurons).
///
public internal(set) var delta: MetalPrivateBuffer<Float>! = nil
public var delta: MetalPrivateBuffer<Float>! = nil

/// Number of neurons.
public let nbNeurons: Int
Expand Down
4 changes: 2 additions & 2 deletions Sources/GrAIdient/Layer2D/Base/Layer2D.swift
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ open class Layer2D: Layer
/// Output buffer (result of the forward pass) used in the GPU execution context.
/// Shape ~ (batch, nbChannels, height, width).
///
public internal(set) var outs: MetalPrivateBuffer<Float>! = nil
public var outs: MetalPrivateBuffer<Float>! = nil
///
/// Gradient buffer (result of the backward pass) used in the GPU execution context.
/// Shape ~ (batch, nbChannels, height, width).
///
public internal(set) var delta: MetalPrivateBuffer<Float>! = nil
public var delta: MetalPrivateBuffer<Float>! = nil

/// Number of channels.
public let nbChannels: Int
Expand Down
45 changes: 41 additions & 4 deletions Sources/GrAIdient/Layer2D/Convolution2D.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ import MetalKit
///
/// This is the fundamental learning layer of a 2D model.
///
/// The implementation here corresponds to the half padding version of the link below:
/// https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
///
public class Convolution2D: BN2D
{
/// Downscale factor of the resolution (height and width).
Expand Down Expand Up @@ -391,6 +394,42 @@ public class Convolution2D: BN2D
params: params)
}

///
/// Create a layer with a 2D shape neural structure.
///
/// - Parameters:
/// - layerPrev: Previous layer that has been queued to the model.
/// - size: Size (height, weight) of the weights kernels.
/// - nbChannels: Number of channels.
/// - stride: Downscale factor of the resolution (height and width).
/// - height: Height of the output grids.
/// - width: Width of the output grids.
/// - activation: The activation function.
/// - biases: Whether to update biases or not.
/// - bn: Whether to use batch normalization or not.
/// - params: Contextual parameters linking to the model.
///
init(layerPrev: Layer2D, size: Int, nbChannels: Int, stride: Int,
height: Int, width: Int,
activation: String?, biases: Bool, bn: Bool,
params: GrAI.Model.Params)
{
_stride = stride

nbWeights = nbChannels * layerPrev.nbChannels
weightWidth = size
weightHeight = size
_updateBiases = biases

super.init(layerPrev: layerPrev,
nbChannels: nbChannels,
height: height,
width: width,
activation: activation,
bn: bn,
params: params)
}

///
/// Decode from the disk.
///
Expand Down Expand Up @@ -1351,13 +1390,11 @@ public class Convolution2D: BN2D

if layerPrev.dirty
{
neuronsPrev[depthPrev].get(i, j)!.v[elem].delta =
tmp
neuronsPrev[depthPrev].get(i, j)!.v[elem].delta = tmp
}
else
{
neuronsPrev[depthPrev].get(i, j)!.v[elem].delta +=
tmp
neuronsPrev[depthPrev].get(i, j)!.v[elem].delta += tmp
}
}}
}}
Expand Down
Loading

0 comments on commit 96ab721

Please sign in to comment.