Skip to content

Commit

Permalink
refactor sonnx examples
Browse files Browse the repository at this point in the history
  • Loading branch information
joddiy committed Jun 2, 2020
1 parent e88f1f8 commit e93770b
Show file tree
Hide file tree
Showing 12 changed files with 641 additions and 538 deletions.
59 changes: 26 additions & 33 deletions examples/onnx/arcface.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,9 @@

from singa import device
from singa import tensor
from singa import autograd
from singa import sonnx
import onnx
from utils import download_model, update_batch_size, check_exist_or_download
from utils import download_model, check_exist_or_download

import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
Expand All @@ -54,18 +53,17 @@ def get_image():
return img1, img2


class Infer:
class MyModel(sonnx.SONNXModel):

def __init__(self, sg_ir):
self.sg_ir = sg_ir
for idx, tens in sg_ir.tensor_map.items():
# allow the tensors to be updated
tens.requires_grad = True
tens.stores_grad = True
sg_ir.tensor_map[idx] = tens
def __init__(self, onnx_model):
super(MyModel, self).__init__(onnx_model)

def forward(self, x):
return sg_ir.run([x])[0]
def forward(self, *x):
y = super(MyModel, self).forward(*x)
return y

def train_one_batch(self, x, y):
pass


if __name__ == "__main__":
Expand All @@ -78,35 +76,30 @@ def forward(self, x):
download_model(url)
onnx_model = onnx.load(model_path)

# set batch size
onnx_model = update_batch_size(onnx_model, 2)
# inference demo
logging.info("preprocessing...")
img1, img2 = get_image()
img1 = preprocess(img1)
img2 = preprocess(img2)
# sg_ir = sonnx.prepare(onnx_model) # run without graph
# y = sg_ir.run([img1, img2])

# prepare the model
logging.info("prepare model...")
logging.info("model compling...")
dev = device.create_cuda_gpu()
sg_ir = sonnx.prepare(onnx_model, device=dev)
autograd.training = False
model = Infer(sg_ir)
x = tensor.Tensor(device=dev, data=np.concatenate((img1, img2), axis=0))
m = MyModel(onnx_model)
m.compile([x], is_train=False, use_graph=True, sequential=True)

# verifty the test dataset
# verifty the test
# from utils import load_dataset
# inputs, ref_outputs = load_dataset(
# os.path.join('/tmp', 'resnet100', 'test_data_set_0'))
# inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'resnet100', 'test_data_set_0'))
# x_batch = tensor.Tensor(device=dev, data=inputs[0])
# outputs = model.forward(x_batch)
# outputs = sg_ir.run([x_batch])
# for ref_o, o in zip(ref_outputs, outputs):
# np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4)

# inference demo
logging.info("preprocessing...")
img1, img2 = get_image()
img1 = preprocess(img1)
img2 = preprocess(img2)

x_batch = tensor.Tensor(device=dev,
data=np.concatenate((img1, img2), axis=0))
logging.info("model running...")
y = model.forward(x_batch)
y = m.forward(*[x])[0]

logging.info("postprocessing...")
embedding = tensor.to_numpy(y)
Expand All @@ -120,4 +113,4 @@ def forward(self, x):
sim = np.dot(embedding1, embedding2.T)
# logging.info predictions
logging.info('Distance = %f' % (dist))
logging.info('Similarity = %f' % (sim))
logging.info('Similarity = %f' % (sim))
50 changes: 23 additions & 27 deletions examples/onnx/bert/bert-squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,13 @@
from singa import device
from singa import tensor
from singa import sonnx
from singa import autograd
import onnx
import tokenization
from run_onnx_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions

import sys
sys.path.append(os.path.dirname(__file__) + '/..')
from utils import download_model, update_batch_size, check_exist_or_download
from utils import download_model, check_exist_or_download

import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
Expand All @@ -54,15 +53,6 @@ def load_vocab():
return filename


class Infer:

def __init__(self, sg_ir):
self.sg_ir = sg_ir

def forward(self, x):
return sg_ir.run(x)


def preprocess():
vocab_file = load_vocab()
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
Expand Down Expand Up @@ -96,6 +86,19 @@ def postprocess(eval_examples, extra_data, all_results):
print("The result is:", json.dumps(test_data, indent=2))


class MyModel(sonnx.SONNXModel):

def __init__(self, onnx_model):
super(MyModel, self).__init__(onnx_model)

def forward(self, *x):
y = super(MyModel, self).forward(*x)
return y

def train_one_batch(self, x, y):
pass


if __name__ == "__main__":

url = 'https://media.githubusercontent.com/media/onnx/models/master/text/machine_comprehension/bert-squad/model/bertsquad-10.tar.gz'
Expand All @@ -107,16 +110,12 @@ def postprocess(eval_examples, extra_data, all_results):
download_model(url)
onnx_model = onnx.load(model_path)

# set batch size
onnx_model = update_batch_size(onnx_model, batch_size)
dev = device.create_cuda_gpu()
autograd.training = False

# inference
logging.info("preprocessing...")
input_ids, input_mask, segment_ids, extra_data, eval_examples = preprocess()

sg_ir = None
m = None
dev = device.create_cuda_gpu()
n = len(input_ids)
bs = batch_size
all_results = []
Expand All @@ -132,23 +131,20 @@ def postprocess(eval_examples, extra_data, all_results):
input_ids[idx:idx + bs].astype(np.int32),
]

if sg_ir is None:
# prepare the model
logging.info("model is none, prepare model...")
sg_ir = sonnx.prepare(onnx_model,
device=dev,
init_inputs=inputs,
keep_initializers_as_inputs=False)
model = Infer(sg_ir)

x_batch = []
for inp in inputs:
tmp_tensor = tensor.from_numpy(inp)
tmp_tensor.to_device(dev)
x_batch.append(tmp_tensor)

# prepare the model
if m is None:
logging.info("model compling...")
m = MyModel(onnx_model)
# m.compile(x_batch, is_train=False, use_graph=True, sequential=True)

logging.info("model running for sample {}...".format(idx))
outputs = model.forward(x_batch)
outputs = m.forward(*x_batch)

logging.info("hanlde the result of sample {}...".format(idx))
result = []
Expand Down
51 changes: 23 additions & 28 deletions examples/onnx/fer_emotion.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,9 @@

from singa import device
from singa import tensor
from singa import autograd
from singa import sonnx
import onnx
from utils import download_model, update_batch_size, check_exist_or_download
from utils import download_model, check_exist_or_download

import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
Expand All @@ -51,18 +50,17 @@ def get_image_labe():
return img, labels


class Infer:
class MyModel(sonnx.SONNXModel):

def __init__(self, sg_ir):
self.sg_ir = sg_ir
for idx, tens in sg_ir.tensor_map.items():
# allow the tensors to be updated
tens.requires_grad = True
tens.stores_grad = True
sg_ir.tensor_map[idx] = tens
def __init__(self, onnx_model):
super(MyModel, self).__init__(onnx_model)

def forward(self, x):
return sg_ir.run([x])[0]
def forward(self, *x):
y = super(MyModel, self).forward(*x)
return y

def train_one_batch(self, x, y):
pass


if __name__ == "__main__":
Expand All @@ -75,33 +73,30 @@ def forward(self, x):
download_model(url)
onnx_model = onnx.load(model_path)

# set batch size
onnx_model = update_batch_size(onnx_model, 1)
# inference
logging.info("preprocessing...")
img, labels = get_image_labe()
img = preprocess(img)
# sg_ir = sonnx.prepare(onnx_model) # run without graph
# y = sg_ir.run([img])

# prepare the model
logging.info("prepare model...")
logging.info("model compling...")
dev = device.create_cuda_gpu()
sg_ir = sonnx.prepare(onnx_model, device=dev)
autograd.training = False
model = Infer(sg_ir)
x = tensor.PlaceHolder(img.shape, device=dev)
m = MyModel(onnx_model)
m.compile([x], is_train=False, use_graph=True, sequential=True)

# verifty the test
# from utils import load_dataset
# inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'emotion_ferplus', 'test_data_set_0'))
# x_batch = tensor.Tensor(device=dev, data=inputs[0])
# outputs = model.forward(x_batch)
# outputs = sg_ir.run([x_batch])
# for ref_o, o in zip(ref_outputs, outputs):
# np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4)

# inference
logging.info("preprocessing...")
img, labels = get_image_labe()
img = preprocess(img)

x_batch = tensor.Tensor(device=dev, data=img)

logging.info("model running...")
y = model.forward(x_batch)
x = tensor.Tensor(device=dev, data=img)
y = m.forward(*[x])[0]

logging.info("postprocessing...")
y = tensor.softmax(y)
Expand Down
Loading

0 comments on commit e93770b

Please sign in to comment.