Skip to content

Commit

Permalink
feat: multi models support with MKL-DNN backend (intel-analytics#2936)
Browse files Browse the repository at this point in the history
* feat: multi models support with MKL-DNN backend
  • Loading branch information
i8run committed Oct 26, 2019
1 parent e1a0f05 commit 5754718
Show file tree
Hide file tree
Showing 5 changed files with 31 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,11 @@ trait MklDnnLayer extends AbstractModule[Activity, Activity, Float] with MklDnnM
}

override def setQuantize(value: Boolean): MklDnnLayer.this.type = this

def paramsMMap(): (Array[TensorMMap], Array[TensorMMap]) = {
// return null for weight and gradWeight by default
(Array.empty[TensorMMap], Array.empty[TensorMMap])
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,10 @@ class Linear(
(Array(weight.dense, bias.dense), Array(gradWeight.dense, gradBias.dense))
}

override def paramsMMap(): (Array[TensorMMap], Array[TensorMMap]) = {
(Array(weight, bias), Array(gradWeight, gradBias))
}

override def zeroGradParameters(): Unit = {
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -413,6 +413,10 @@ class SpatialBatchNormalization(
(Array(weightAndBias.dense), Array(gradWeightAndBias.dense))
}

override def paramsMMap(): (Array[TensorMMap], Array[TensorMMap]) = {
(Array(weightAndBias), Array(gradWeightAndBias))
}

override def getExtraParameter(): Array[Tensor[Float]] = {
if (needScale) {
runningMeanScaled.copy(runningMean.dense).div(scaleFactor)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -641,6 +641,10 @@ class SpatialConvolution(

}

override def paramsMMap(): (Array[TensorMMap], Array[TensorMMap]) = {
(Array(weight, bias), Array(gradWeight, gradBias))
}

// we need not implement it, because the grad parameters will clean by mkldnn
override def zeroGradParameters(): Unit = {
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import com.intel.analytics.bigdl.nn.abstractnn.Activity
import com.intel.analytics.bigdl.nn.mkldnn.Phase.{InferencePhase, TrainingPhase}
import com.intel.analytics.bigdl.tensor.{DnnTensor, FloatType, Tensor}

import scala.reflect.ClassTag

/**
* `TensorMMap` contains two tensors, dense and native, which are a map of each other.
* It's used in the layer which contains weights. For the weight, we should sync the
Expand All @@ -29,7 +31,7 @@ import com.intel.analytics.bigdl.tensor.{DnnTensor, FloatType, Tensor}
*
* @param _size the shape of Tensor, such as Array(4, 3, 224, 224)
*/
private[mkldnn] class TensorMMap(_size: Array[Int])(implicit owner: MemoryOwner)
private[bigdl] class TensorMMap(_size: Array[Int])(implicit owner: MemoryOwner)
extends Serializable {
// dense weight on heap is used to optimizer and so on, which is exposed to
// AbstractModule level.
Expand Down Expand Up @@ -115,4 +117,15 @@ private[mkldnn] class TensorMMap(_size: Array[Int])(implicit owner: MemoryOwner)
dense.size(index)
}

def release(): Unit = {
if (native != null) {
native.release()
}
}

def setNative(another: TensorMMap): Unit = {
if (native != null && another.native != null) {
native.set(another.native.asInstanceOf[Tensor[_]])
}
}
}

0 comments on commit 5754718

Please sign in to comment.