From 45eabcd2b97c64e9861a12cd4d14556c33fa863d Mon Sep 17 00:00:00 2001 From: Xiao Date: Mon, 30 Sep 2019 10:03:07 +0800 Subject: [PATCH] Onnx support: modify unsqueeze function (#2910) * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function * modeify unsqueeze function --- .../analytics/bigdl/dllib/nn/Unsqueeze.scala | 74 ++++++++++++------- .../bigdl/dllib/tensor/DenseTensor.scala | 38 ++++++++++ .../bigdl/dllib/tensor/DnnTensor.scala | 1 + .../tensor/QuantizedTensorUnsupported.scala | 10 +++ .../bigdl/dllib/tensor/SparseTensor.scala | 4 + .../analytics/bigdl/dllib/tensor/Tensor.scala | 9 +++ .../bigdl/python/api/PythonBigDL.scala | 4 +- .../bigdl/torch/MultiCriterionSpec.scala | 4 +- .../analytics/bigdl/torch/TorchSpec.scala | 1 - .../analytics/bigdl/torch/UnsqueezeSpec.scala | 2 +- 10 files changed, 116 insertions(+), 31 deletions(-) diff --git a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/nn/Unsqueeze.scala b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/nn/Unsqueeze.scala index 1ed93ce4f4b..723c466e299 100644 --- a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/nn/Unsqueeze.scala +++ b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/nn/Unsqueeze.scala @@ -23,43 +23,55 @@ import com.intel.analytics.bigdl.tensor._ import scala.reflect.ClassTag /** - * Insert singleton dim (i.e., dimension 1) at position pos. For an input with dim = input.dim(), + * Insert singleton dim (i.e., dimension 1) at position array pos. + * For an input with dim = input.dim(), * there are dim + 1 possible positions to insert the singleton dimension. + * Dimension index are 1-based. 0 and negative pos correspond to unsqueeze() applied at + * pos = pos + input.dim() + 1 * - * @param pos The position will be insert singleton. + * @param pos The array of position will insert singleton. * @param numInputDims Optional. If in a batch model, set to the inputDims. */ @SerialVersionUID(- 5180889605872472241L) class Unsqueeze[T: ClassTag]( - val pos: Int, + val pos: Array[Int], var numInputDims: Int = Int.MinValue )(implicit ev: TensorNumeric[T]) extends AbstractModule[Tensor[_], Tensor[_], T] { + def this( pos: Int, numInputDims: Int )(implicit ev: TensorNumeric[T]) = { + this(Array(pos), numInputDims) + } + + def this( pos: Int )(implicit ev: TensorNumeric[T]) = { + this(Array(pos)) + } def setNumInputDims(numInputDims: Int): Unit = { this.numInputDims = numInputDims } - private def getActualPosition(input: Tensor[_]) : Int = { - val dim = if (pos <= 0) { - input.dim() + pos + 1 - } else { - pos + private def getActualPosition(input: Tensor[_]) : Array[Int] = { + for (index <- 0 until pos.length) { + // dimension index are 1-based + pos(index) = if (pos(index) <= 0) { + input.dim() + pos(index) + 1 + } + else { + pos(index) + } + // get valid dimension offset for batchMode (if any) + val inputDim = input.dim() // data batch dim + numInputDims = if (numInputDims != Int.MinValue) numInputDims else inputDim // feature map dim + val offsetDim = inputDim - numInputDims + require(offsetDim >= 0, "input feature map dim (numInputDims) must be <= input:dim()," + + s" input feature map dim ${numInputDims}, inputdim ${inputDim}") + // the actual position; clearer error message for batchMode (if any) + val actualPos = pos(index) + offsetDim + require(actualPos >= 1 && actualPos <= (inputDim + 1), s"Invalid position: ${pos(index)}. " + + s"input:dim() is $input, input feature map dim (numInputDims) is $numInputDims.") + pos(index) = actualPos } - - // get valid dimension offset for batchMode (if any) - val inputDim = input.dim() // data batch dim - numInputDims = if (numInputDims != Int.MinValue) numInputDims else inputDim // feature map dim - val offsetDim = inputDim - numInputDims - require(offsetDim >= 0, "input feature map dim (numInputDims) must be <= input:dim()," + - s" input feature map dim ${numInputDims}, inputdim ${inputDim}") - - // the actual position; clearer error message for batchMode (if any) - val actualPos = dim + offsetDim - require(actualPos >= 1 && actualPos <= (inputDim + 1), s"Invalid position: $pos. " + - s"input:dim() is $input, input feature map dim (numInputDims) is $numInputDims.") - - actualPos + pos } override def updateOutput(input: Tensor[_]): Tensor[_] = { @@ -68,9 +80,8 @@ class Unsqueeze[T: ClassTag]( output = input.emptyInstance() } - output - .asInstanceOf[Tensor[NumericWildcard]] - .addSingletonDimension(input.asInstanceOf[Tensor[NumericWildcard]], actualPos) + output.asInstanceOf[Tensor[NumericWildcard]] + .addMultiDimension(input.asInstanceOf[Tensor[NumericWildcard]], actualPos) output } @@ -108,6 +119,17 @@ object Unsqueeze { def apply[@specialized(Float, Double) T: ClassTag]( pos: Int, numInputDims: Int = Int.MinValue)(implicit ev: TensorNumeric[T]) : Unsqueeze[T] = { - new Unsqueeze[T](pos, numInputDims) + new Unsqueeze[T](Array(pos), numInputDims) + } + + def apply[T: ClassTag]( + posList: Array[Int], + numInputDims: Int)(implicit ev: TensorNumeric[T]) : Unsqueeze[T] = { + new Unsqueeze[T](posList, numInputDims) + } + + def apply[T: ClassTag]( + posList: Array[Int])(implicit ev: TensorNumeric[T]) : Unsqueeze[T] = { + new Unsqueeze[T](posList) } } diff --git a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DenseTensor.scala b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DenseTensor.scala index 51762d04f05..666108e6892 100644 --- a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DenseTensor.scala +++ b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DenseTensor.scala @@ -2092,6 +2092,44 @@ private[tensor] class DenseTensor[@specialized T: ClassTag]( this.set(t.storage(), t.storageOffset(), size, stride) } + override def addMultiDimension( t: Tensor[T], dims: Array[Int] = Array(1)): Tensor[T] = { + // increase 1 to the following pos after a previous smaller pos have one dimension inserted. + for (i <- 0 until dims.length) { + for (j <- i + 1 until dims.length) { + if (dims(j) > dims(i)) { + dims(j) = dims(j) + 1 + } + } + } + var temp = t.clone() + var size = new Array[Int](t.dim()) + var stride = new Array[Int](t.dim()) + + for ( i <- 0 until dims.length) { + require(dims(i) > 0 && dims(i) <= temp.dim() + 1, s"invalid dimension: ${dims(i)}. " + + s"Tensor is of ${temp.dim()} dimensions.") + + size = new Array[Int](temp.dim() + 1) + stride = new Array[Int](temp.dim() + 1) + var d = 0 + while (d < dims(i) - 1) { + size(d) = temp.size(d + 1) + stride(d) = temp.stride(d + 1) + d += 1 + } + size(dims(i) - 1) = 1 + stride(dims(i) - 1) = 1 + d += 1 + while (d < temp.dim + 1) { + size(d) = temp.size(d) + stride(d) = temp.stride(d) + d += 1 + } + temp.set(temp.storage(), temp.storageOffset(), size, stride) + } + this.set(temp.storage(), temp.storageOffset(), size, stride) + } + /** * Implements >= operator comparing each element in x with value * diff --git a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DnnTensor.scala b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DnnTensor.scala index c5aa0a2c40d..840652ef5c1 100644 --- a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DnnTensor.scala +++ b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DnnTensor.scala @@ -332,6 +332,7 @@ object DnnTensor { override def getType(): TensorDataType = ??? override def diff(other: Tensor[T], count: Int, reverse: Boolean): Boolean = ??? override def addSingletonDimension(t: Tensor[T], dim: Int): Tensor[T] = ??? + override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] = ??? override def reshape(sizes: Array[Int]): Tensor[T] = ??? override def save(path: String, overWrite: Boolean): DnnTensorUnsupportOperations.this.type = ??? override def getTensorNumeric(): TensorNumeric[T] = ??? diff --git a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/QuantizedTensorUnsupported.scala b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/QuantizedTensorUnsupported.scala index 317162353f0..2dfaa77704c 100644 --- a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/QuantizedTensorUnsupported.scala +++ b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/QuantizedTensorUnsupported.scala @@ -525,6 +525,16 @@ abstract class QuantizedTensorUnsupported[T: ClassTag] extends Tensor[T] { override def addSingletonDimension(t: Tensor[T], dim: Int): Tensor[T] = throw new UnsupportedOperationException(errorString) + /** + * view this.tensor and add multiple Dimensions to `dim` dimension + * + * @param t source tensor + * @param dim the specific dimension array, default is [1] + * @return this + */ + override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] = + throw new UnsupportedOperationException(errorString) + /** * create a new tensor without any change of the tensor * diff --git a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/SparseTensor.scala b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/SparseTensor.scala index 51f4594b660..9970ab922ef 100644 --- a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/SparseTensor.scala +++ b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/SparseTensor.scala @@ -445,6 +445,10 @@ private[tensor] class SparseTensor[@specialized(Float, Double) T: ClassTag]( throw new UnsupportedOperationException(s"SparseTensor: Unimplemented method") } + override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] = { + throw new UnsupportedOperationException(s"SparseTensor: Unimplemented method") + } + override def reshape(sizes: Array[Int]): Tensor[T] = { throw new UnsupportedOperationException(s"SparseTensor: Unimplemented method") } diff --git a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/Tensor.scala b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/Tensor.scala index 5a384a7d581..2a2d7e14da3 100644 --- a/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/Tensor.scala +++ b/spark/dl/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/Tensor.scala @@ -717,6 +717,15 @@ trait Tensor[T] extends Serializable with TensorMath[T] with Activity { */ def addSingletonDimension(t: Tensor[T] = this, dim: Int = 1): Tensor[T] + /** + * view this.tensor and add multiple Dimensions to `dim` dimension + * + * @param t source tensor + * @param dim the specific dimension array, default is [1] + * @return this + */ + def addMultiDimension(t: Tensor[T] = this, dims: Array[Int] = Array(1)): Tensor[T] + /** * create a new tensor without any change of the tensor * diff --git a/spark/dl/src/main/scala/com/intel/analytics/bigdl/python/api/PythonBigDL.scala b/spark/dl/src/main/scala/com/intel/analytics/bigdl/python/api/PythonBigDL.scala index 0c5c7e76ea4..bcc62e6ff82 100644 --- a/spark/dl/src/main/scala/com/intel/analytics/bigdl/python/api/PythonBigDL.scala +++ b/spark/dl/src/main/scala/com/intel/analytics/bigdl/python/api/PythonBigDL.scala @@ -1537,10 +1537,10 @@ class PythonBigDL[T: ClassTag](implicit ev: TensorNumeric[T]) extends Serializab ip) } - def createUnsqueeze(pos: Int, + def createUnsqueeze(pos: JList[Int], numInputDims: Int = Int.MinValue) : Unsqueeze[T] = { - Unsqueeze[T](pos, + Unsqueeze[T](pos.asScala.toArray, numInputDims) } diff --git a/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/MultiCriterionSpec.scala b/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/MultiCriterionSpec.scala index c078742182d..8b0102ed507 100644 --- a/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/MultiCriterionSpec.scala +++ b/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/MultiCriterionSpec.scala @@ -18,6 +18,7 @@ package com.intel.analytics.bigdl.torch import com.intel.analytics.bigdl.nn._ import com.intel.analytics.bigdl._ import com.intel.analytics.bigdl.tensor.Tensor +import com.intel.analytics.bigdl.utils.RandomGenerator.RNG import scala.util.Random @@ -25,13 +26,14 @@ import scala.util.Random class MultiCriterionSpec extends TorchSpec { "A MultiCriterion Module " should "generate correct output and grad with Tensor input" in { torchCheck() + RNG.setSeed(10) val module = new MultiCriterion[Double]() val nll = new ClassNLLCriterion[Double]() val nll2 = new MSECriterion[Double]() module.add(nll, 0.5) module.add(nll2) - val input = Tensor[Double](5).apply1(e => Random.nextDouble()) + val input = Tensor[Double](5).rand() val target = Tensor[Double](5) target(Array(1)) = 1 target(Array(2)) = 2 diff --git a/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/TorchSpec.scala b/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/TorchSpec.scala index 6d040a4b647..4689db0eb71 100644 --- a/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/TorchSpec.scala +++ b/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/TorchSpec.scala @@ -22,5 +22,4 @@ class TorchSpec extends FlatSpec with BeforeAndAfter with Matchers { cancel("Torch is not installed") } } - } diff --git a/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/UnsqueezeSpec.scala b/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/UnsqueezeSpec.scala index ebf129d6807..99f8bd4dbee 100644 --- a/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/UnsqueezeSpec.scala +++ b/spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/UnsqueezeSpec.scala @@ -134,6 +134,6 @@ class UnsqueezeSpec extends TorchSpec { "A Unsqueeze(0)" should "generate correct output and grad" in { val layer = new Unsqueeze[Double](0) val input = Tensor[Double](2, 2).rand() - layer.forward(input).size() should be(Array(2, 2, 1)) + layer.forward(input).size() should be (Array(2, 2, 1)) } }