Skip to content

Commit

Permalink
📚 docs: PropertyListEncoder by default (#51)
Browse files Browse the repository at this point in the history
  • Loading branch information
jean-francoisreboud committed Feb 13, 2023
1 parent 0c1b18f commit 267b0fa
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 44 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ All notable changes to this project will be documented in this file.

## [unreleased]

📚 **docs:** PropertyListEncoder by default ([#51](https://github.com/owkin/GrAIdient/pull/51))\
🐛 **fix:** use buffers for neuron selection in SelectNeurons1D ([#50](https://github.com/owkin/GrAIdient/pull/50))\
🪜 **feat:** Softmax1D, DotProduct1D & Constant1D ([#49](https://github.com/owkin/GrAIdient/pull/49))\
🪜 **feat:** remove activation from layer ([#47](https://github.com/owkin/GrAIdient/pull/47))\
Expand Down
11 changes: 5 additions & 6 deletions Docs/Concepts/MODEL.md
Original file line number Diff line number Diff line change
Expand Up @@ -135,10 +135,9 @@ Let us imagine the two models have been trained and we want to save them
to the disk:

```swift
// Use JSONEncoder to encode a readable file
// or PropertyListEncoder to encode a binary file
// (takes less space on the disk).
let encoder = JSONEncoder()
// Use PropertyListEncoder to encode a binary file (readable with Xcode)
// or JSONEncoder to encode a json file (takes more space on the disk).
let encoder = PropertyListEncoder()

// Encode first model.
var data = try! encoder.encode(cnn)
Expand Down Expand Up @@ -166,8 +165,8 @@ let baseCNN = try! JSONDecoder().decode(
// Load the data of our second model from the disk.
data = try! Data(contentsOf: URL(fileURLWithPath: "/path/to/model2.plist"))
// Initialize a base model ouf of it.
// Use PropertyListDecoder instead to decode a binary file.
let baseClassifier = try! JSONDecoder().decode(
// Use JSONDecoder instead to decode a json file.
let baseClassifier = try! PropertyListDecoder().decode(
BaseModel.self, from: data
)

Expand Down
14 changes: 6 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,24 @@ Ready for the grAIt descent? <br />

GrAIdient is a framework that exposes the graph of layers as its unique way to
design deep learning models. This "flattened" vision enforces
great understanding, control and reproducibility with the models human interact
with.
great understanding, control and reproducibility over these models.

Though deeply grounded to the data driven way of life, the goal is to challenge
Though deeply grounded to the data driven pipeline, the goal is to challenge
the very understanding of deep learning models and inject human intelligence
to go from black box models to white box models.

Let us find our grAI between them both! <br />
<br />

Key features:
## ✨ Key Features

- what you do is what you get
- flat design with direct access to the graph of layers and the backward pass
- run natively on Mac Intel GPU, eGPU, Mac M1, M2...
- compiled language
- gradient checking
- extendable design
- PyTorch interoperability
- compute gradients per batch & per sample (differential privacy)
- debugging at the neuron level
- gradients per batch & per sample (ready for differential privacy)
- debug at the neuron level

## 📦 Swift Package Manager

Expand Down
54 changes: 27 additions & 27 deletions Sources/GrAIdient/Core/Layer/LayerNormalization.swift
Original file line number Diff line number Diff line change
Expand Up @@ -148,12 +148,12 @@ public class BatchNormalization: BatchNormalizationBase
let : Double = 1e-5

///
/// Arrays of weights to scale the normalization result.
/// Array of weights to scale the normalization result.
/// Shape ~ (nbNeurons,).
///
var : WeightArrays! = nil
///
/// Arrays of biases to add to the normalization result.
/// Array of biases to add to the normalization result.
/// Shape ~ (nbNeurons,).
///
var : WeightArrays! = nil
Expand Down Expand Up @@ -521,15 +521,15 @@ public class BatchNormalization: BatchNormalizationBase
class BatchNormalizationGPU: BatchNormalizationBase
{
///
/// Buffers of weights to scale the normalization result.
/// Buffer of weights to scale the normalization result.
/// Shape ~ (nbNeurons,).
///
var _ƔBuffers: IWeightBuffers! = nil
var : IWeightBuffers! = nil
///
/// Buffers of biases to add to the normalization result.
/// Buffer of biases to add to the normalization result.
/// Shape ~ (nbNeurons,).
///
var _βBuffers: IWeightBuffers! = nil
var : IWeightBuffers! = nil

///
/// Buffer of averages of data for the different independent batch normalization units.
Expand Down Expand Up @@ -576,16 +576,16 @@ class BatchNormalizationGPU: BatchNormalizationBase
override var weights: [Float]
{
get {
if _ƔBuffers == nil
if == nil
{
return super.weights
}

MetalKernel.get.download([_βBuffers.w_p!, _ƔBuffers.w_p!])
MetalKernel.get.download([.w_p!, .w_p!])

var weightsTmp = [Float]()
weightsTmp += _ƔBuffers.w_p!.shared.array
weightsTmp += _βBuffers.w_p!.shared.array
weightsTmp += .w_p!.shared.array
weightsTmp += .w_p!.shared.array
return weightsTmp
}
set {
Expand Down Expand Up @@ -645,8 +645,8 @@ class BatchNormalizationGPU: BatchNormalizationBase
_sum1 = nil
_sum2 = nil

_ƔBuffers?.reset()
_βBuffers?.reset()
?.reset()
?.reset()
}

///
Expand All @@ -669,11 +669,11 @@ class BatchNormalizationGPU: BatchNormalizationBase
///
func initWeights()
{
_βBuffers = WeightBuffers(nbElems: _nbNeurons, deviceID: _deviceID)
_ƔBuffers = WeightBuffers(nbElems: _nbNeurons, deviceID: _deviceID)
= WeightBuffers(nbElems: _nbNeurons, deviceID: _deviceID)
= WeightBuffers(nbElems: _nbNeurons, deviceID: _deviceID)

let βPtr = _βBuffers.w_p!.shared.buffer
let ƔPtr = _ƔBuffers.w_p!.shared.buffer
let βPtr = .w_p!.shared.buffer
let ƔPtr = .w_p!.shared.buffer

if _weightsList.count == 0
{
Expand All @@ -693,7 +693,7 @@ class BatchNormalizationGPU: BatchNormalizationBase
_weightsList = []
}

MetalKernel.get.upload([_βBuffers.w_p!, _ƔBuffers.w_p!])
MetalKernel.get.upload([.w_p!, .w_p!])
}

/// Initialize stats in the GPU execution context.
Expand Down Expand Up @@ -848,8 +848,8 @@ class BatchNormalizationGPU: BatchNormalizationBase
let command = MetalKernel.get.createCommand(
"forwardBNConvTraining", deviceID: _deviceID
)
command.setBuffer(_βBuffers.w.metal, atIndex: 0)
command.setBuffer(_ƔBuffers.w.metal, atIndex: 1)
command.setBuffer(.w.metal, atIndex: 0)
command.setBuffer(.w.metal, atIndex: 1)
command.setBuffer(.metal, atIndex: 2)
command.setBuffer(_σ2.metal, atIndex: 3)
command.setBytes(pNbChannels, atIndex: 4)
Expand Down Expand Up @@ -886,8 +886,8 @@ class BatchNormalizationGPU: BatchNormalizationBase
"forwardBNConvInference",
deviceID: _deviceID
)
command.setBuffer(_βBuffers.w.metal, atIndex: 0)
command.setBuffer(_ƔBuffers.w.metal, atIndex: 1)
command.setBuffer(.w.metal, atIndex: 0)
command.setBuffer(.w.metal, atIndex: 1)
command.setBuffer(_Eμ.metal, atIndex: 2)
command.setBuffer(_Eσ2.metal, atIndex: 3)
command.setBytes(pNbChannels, atIndex: 4)
Expand Down Expand Up @@ -939,15 +939,15 @@ class BatchNormalizationGPU: BatchNormalizationBase
)
command.setBuffer(layer.delta.metal, atIndex: 0)
command.setBuffer(_xHat.metal, atIndex: 1)
command.setBuffer(_ƔBuffers.w.metal, atIndex: 2)
command.setBuffer(.w.metal, atIndex: 2)
command.setBytes(pNbChannels, atIndex: 3)
command.setBytes(pNbBatch, atIndex: 4)
command.setBytes(pDimensions, atIndex: 5)
command.setBytes(pAccumulate, atIndex: 6)
command.setBuffer(_sum1.metal, atIndex: 7)
command.setBuffer(_sum2.metal, atIndex: 8)
command.setBuffer(_ƔBuffers.g.metal, atIndex: 9)
command.setBuffer(_βBuffers.g.metal, atIndex: 10)
command.setBuffer(.g.metal, atIndex: 9)
command.setBuffer(.g.metal, atIndex: 10)

command.dispatchThreads(_nbNeurons)
command.enqueue()
Expand All @@ -971,7 +971,7 @@ class BatchNormalizationGPU: BatchNormalizationBase
)
command.setBuffer(_σ2.metal, atIndex: 0)
command.setBuffer(_xHat.metal, atIndex: 1)
command.setBuffer(_ƔBuffers.w.metal, atIndex: 2)
command.setBuffer(.w.metal, atIndex: 2)
command.setBuffer(_sum1.metal, atIndex: 3)
command.setBuffer(_sum2.metal, atIndex: 4)
command.setBytes(pNbChannels, atIndex: 5)
Expand Down Expand Up @@ -1001,7 +1001,7 @@ class BatchNormalizationGPU: BatchNormalizationBase
let command = MetalKernel.get.createCommand(
"backwardBNConvInference", deviceID: _deviceID
)
command.setBuffer(_ƔBuffers.w.metal, atIndex: 0)
command.setBuffer(.w.metal, atIndex: 0)
command.setBuffer(_Eσ2.metal, atIndex: 1)
command.setBytes(pNbChannels, atIndex: 2)
command.setBytes(pNbBatch, atIndex: 3)
Expand All @@ -1019,6 +1019,6 @@ class BatchNormalizationGPU: BatchNormalizationBase
/// Get the weights in the GPU execution context.
func collectWeights() -> [IWeightBuffers]
{
return [_ƔBuffers, _βBuffers]
return [,]
}
}
6 changes: 3 additions & 3 deletions Tests/GrAIExamples/VGGExample.swift
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ final class VGGExample: XCTestCase

// Decode it as a base model
// (model where `layerPrev` links are not initialized).
let baseModel = try! JSONDecoder().decode(
let baseModel = try! PropertyListDecoder().decode(
BaseModel.self,
from: data
)
Expand Down Expand Up @@ -351,7 +351,7 @@ final class VGGExample: XCTestCase
XCTAssert(ratio < 60)

// Encode the model.
let encoder = JSONEncoder()
let encoder = PropertyListEncoder()
let data = try! encoder.encode(vgg)

// Save it to the disk.
Expand Down Expand Up @@ -486,7 +486,7 @@ final class VGGExample: XCTestCase
}

// Encode the trained model.
let encoder = JSONEncoder()
let encoder = PropertyListEncoder()
let data = try! encoder.encode(vgg)

// Save it to the disk.
Expand Down

0 comments on commit 267b0fa

Please sign in to comment.