Skip to content

Commit

Permalink
Onnx support: modify unsqueeze function (#2910)
Browse files Browse the repository at this point in the history
* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function

* modeify unsqueeze function
  • Loading branch information
lingxiao1989 committed Sep 30, 2019
1 parent 95e3c47 commit 45eabcd
Show file tree
Hide file tree
Showing 10 changed files with 116 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,43 +23,55 @@ import com.intel.analytics.bigdl.tensor._
import scala.reflect.ClassTag

/**
* Insert singleton dim (i.e., dimension 1) at position pos. For an input with dim = input.dim(),
* Insert singleton dim (i.e., dimension 1) at position array pos.
* For an input with dim = input.dim(),
* there are dim + 1 possible positions to insert the singleton dimension.
* Dimension index are 1-based. 0 and negative pos correspond to unsqueeze() applied at
* pos = pos + input.dim() + 1
*
* @param pos The position will be insert singleton.
* @param pos The array of position will insert singleton.
* @param numInputDims Optional. If in a batch model, set to the inputDims.
*/

@SerialVersionUID(- 5180889605872472241L)
class Unsqueeze[T: ClassTag](
val pos: Int,
val pos: Array[Int],
var numInputDims: Int = Int.MinValue
)(implicit ev: TensorNumeric[T]) extends AbstractModule[Tensor[_], Tensor[_], T] {
def this( pos: Int, numInputDims: Int )(implicit ev: TensorNumeric[T]) = {
this(Array(pos), numInputDims)
}

def this( pos: Int )(implicit ev: TensorNumeric[T]) = {
this(Array(pos))
}

def setNumInputDims(numInputDims: Int): Unit = {
this.numInputDims = numInputDims
}

private def getActualPosition(input: Tensor[_]) : Int = {
val dim = if (pos <= 0) {
input.dim() + pos + 1
} else {
pos
private def getActualPosition(input: Tensor[_]) : Array[Int] = {
for (index <- 0 until pos.length) {
// dimension index are 1-based
pos(index) = if (pos(index) <= 0) {
input.dim() + pos(index) + 1
}
else {
pos(index)
}
// get valid dimension offset for batchMode (if any)
val inputDim = input.dim() // data batch dim
numInputDims = if (numInputDims != Int.MinValue) numInputDims else inputDim // feature map dim
val offsetDim = inputDim - numInputDims
require(offsetDim >= 0, "input feature map dim (numInputDims) must be <= input:dim()," +
s" input feature map dim ${numInputDims}, inputdim ${inputDim}")
// the actual position; clearer error message for batchMode (if any)
val actualPos = pos(index) + offsetDim
require(actualPos >= 1 && actualPos <= (inputDim + 1), s"Invalid position: ${pos(index)}. " +
s"input:dim() is $input, input feature map dim (numInputDims) is $numInputDims.")
pos(index) = actualPos
}

// get valid dimension offset for batchMode (if any)
val inputDim = input.dim() // data batch dim
numInputDims = if (numInputDims != Int.MinValue) numInputDims else inputDim // feature map dim
val offsetDim = inputDim - numInputDims
require(offsetDim >= 0, "input feature map dim (numInputDims) must be <= input:dim()," +
s" input feature map dim ${numInputDims}, inputdim ${inputDim}")

// the actual position; clearer error message for batchMode (if any)
val actualPos = dim + offsetDim
require(actualPos >= 1 && actualPos <= (inputDim + 1), s"Invalid position: $pos. " +
s"input:dim() is $input, input feature map dim (numInputDims) is $numInputDims.")

actualPos
pos
}

override def updateOutput(input: Tensor[_]): Tensor[_] = {
Expand All @@ -68,9 +80,8 @@ class Unsqueeze[T: ClassTag](
output = input.emptyInstance()
}

output
.asInstanceOf[Tensor[NumericWildcard]]
.addSingletonDimension(input.asInstanceOf[Tensor[NumericWildcard]], actualPos)
output.asInstanceOf[Tensor[NumericWildcard]]
.addMultiDimension(input.asInstanceOf[Tensor[NumericWildcard]], actualPos)

output
}
Expand Down Expand Up @@ -108,6 +119,17 @@ object Unsqueeze {
def apply[@specialized(Float, Double) T: ClassTag](
pos: Int,
numInputDims: Int = Int.MinValue)(implicit ev: TensorNumeric[T]) : Unsqueeze[T] = {
new Unsqueeze[T](pos, numInputDims)
new Unsqueeze[T](Array(pos), numInputDims)
}

def apply[T: ClassTag](
posList: Array[Int],
numInputDims: Int)(implicit ev: TensorNumeric[T]) : Unsqueeze[T] = {
new Unsqueeze[T](posList, numInputDims)
}

def apply[T: ClassTag](
posList: Array[Int])(implicit ev: TensorNumeric[T]) : Unsqueeze[T] = {
new Unsqueeze[T](posList)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2092,6 +2092,44 @@ private[tensor] class DenseTensor[@specialized T: ClassTag](
this.set(t.storage(), t.storageOffset(), size, stride)
}

override def addMultiDimension( t: Tensor[T], dims: Array[Int] = Array(1)): Tensor[T] = {
// increase 1 to the following pos after a previous smaller pos have one dimension inserted.
for (i <- 0 until dims.length) {
for (j <- i + 1 until dims.length) {
if (dims(j) > dims(i)) {
dims(j) = dims(j) + 1
}
}
}
var temp = t.clone()
var size = new Array[Int](t.dim())
var stride = new Array[Int](t.dim())

for ( i <- 0 until dims.length) {
require(dims(i) > 0 && dims(i) <= temp.dim() + 1, s"invalid dimension: ${dims(i)}. " +
s"Tensor is of ${temp.dim()} dimensions.")

size = new Array[Int](temp.dim() + 1)
stride = new Array[Int](temp.dim() + 1)
var d = 0
while (d < dims(i) - 1) {
size(d) = temp.size(d + 1)
stride(d) = temp.stride(d + 1)
d += 1
}
size(dims(i) - 1) = 1
stride(dims(i) - 1) = 1
d += 1
while (d < temp.dim + 1) {
size(d) = temp.size(d)
stride(d) = temp.stride(d)
d += 1
}
temp.set(temp.storage(), temp.storageOffset(), size, stride)
}
this.set(temp.storage(), temp.storageOffset(), size, stride)
}

/**
* Implements >= operator comparing each element in x with value
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,7 @@ object DnnTensor {
override def getType(): TensorDataType = ???
override def diff(other: Tensor[T], count: Int, reverse: Boolean): Boolean = ???
override def addSingletonDimension(t: Tensor[T], dim: Int): Tensor[T] = ???
override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] = ???
override def reshape(sizes: Array[Int]): Tensor[T] = ???
override def save(path: String, overWrite: Boolean): DnnTensorUnsupportOperations.this.type = ???
override def getTensorNumeric(): TensorNumeric[T] = ???
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,16 @@ abstract class QuantizedTensorUnsupported[T: ClassTag] extends Tensor[T] {
override def addSingletonDimension(t: Tensor[T], dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)

/**
* view this.tensor and add multiple Dimensions to `dim` dimension
*
* @param t source tensor
* @param dim the specific dimension array, default is [1]
* @return this
*/
override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)

/**
* create a new tensor without any change of the tensor
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -445,6 +445,10 @@ private[tensor] class SparseTensor[@specialized(Float, Double) T: ClassTag](
throw new UnsupportedOperationException(s"SparseTensor: Unimplemented method")
}

override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] = {
throw new UnsupportedOperationException(s"SparseTensor: Unimplemented method")
}

override def reshape(sizes: Array[Int]): Tensor[T] = {
throw new UnsupportedOperationException(s"SparseTensor: Unimplemented method")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -717,6 +717,15 @@ trait Tensor[T] extends Serializable with TensorMath[T] with Activity {
*/
def addSingletonDimension(t: Tensor[T] = this, dim: Int = 1): Tensor[T]

/**
* view this.tensor and add multiple Dimensions to `dim` dimension
*
* @param t source tensor
* @param dim the specific dimension array, default is [1]
* @return this
*/
def addMultiDimension(t: Tensor[T] = this, dims: Array[Int] = Array(1)): Tensor[T]

/**
* create a new tensor without any change of the tensor
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1537,10 +1537,10 @@ class PythonBigDL[T: ClassTag](implicit ev: TensorNumeric[T]) extends Serializab
ip)
}

def createUnsqueeze(pos: Int,
def createUnsqueeze(pos: JList[Int],
numInputDims: Int = Int.MinValue)
: Unsqueeze[T] = {
Unsqueeze[T](pos,
Unsqueeze[T](pos.asScala.toArray,
numInputDims)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,22 @@ package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator.RNG

import scala.util.Random

@com.intel.analytics.bigdl.tags.Serial
class MultiCriterionSpec extends TorchSpec {
"A MultiCriterion Module " should "generate correct output and grad with Tensor input" in {
torchCheck()
RNG.setSeed(10)
val module = new MultiCriterion[Double]()
val nll = new ClassNLLCriterion[Double]()
val nll2 = new MSECriterion[Double]()
module.add(nll, 0.5)
module.add(nll2)

val input = Tensor[Double](5).apply1(e => Random.nextDouble())
val input = Tensor[Double](5).rand()
val target = Tensor[Double](5)
target(Array(1)) = 1
target(Array(2)) = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,4 @@ class TorchSpec extends FlatSpec with BeforeAndAfter with Matchers {
cancel("Torch is not installed")
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,6 @@ class UnsqueezeSpec extends TorchSpec {
"A Unsqueeze(0)" should "generate correct output and grad" in {
val layer = new Unsqueeze[Double](0)
val input = Tensor[Double](2, 2).rand()
layer.forward(input).size() should be(Array(2, 2, 1))
layer.forward(input).size() should be (Array(2, 2, 1))
}
}

0 comments on commit 45eabcd

Please sign in to comment.