-
Notifications
You must be signed in to change notification settings - Fork 3
/
modelExporters.py
310 lines (259 loc) · 14.4 KB
/
modelExporters.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
from pysipfenn import Calculator
import torch
import onnx
import io
from tqdm import tqdm
class ONNXExporter:
"""Export models to the ONNX format (what they ship in by default) to allow (1) exporting modified pySIPFENN models,
(2) simplify the models using ONNX optimizer, and (3) convert them to `FP16` precision, cutting the size in half.
Note: Some of the dependencies (``onnxconverter_common`` and ``onnxsim``) are not installed by default. If you need them,
you have to install pySIPFENN in `dev` mode like: ``pip install "pysipfenn[dev]"``, or like ``pip install -e ".[dev]"``.
Args:
calculator: A ``Calculator`` object with loaded models that has loaded PyTorch models (happens automatically
when the ``autoLoad`` argument is kept to its default value of ``True`` when initializing the Calculator). During the
initialization, the loaded PyTorch models are converted back to ONNX (in memory) to be then either adjusted or
persisted to disk.
Attributes:
calculator: A Calculator object with ONNX loaded models.
simplifiedDict: A boolean dictionary of models that have been simplified.
fp16Dict: A boolean dictionary of models that have been converted to FP16.
"""
def __init__(self, calculator: Calculator):
"""Initialize the ``ONNXExporter`` using a calculator object."""
self.simplifiedDict = {model: False for model in calculator.loadedModels.keys()}
self.fp16Dict = {model: False for model in calculator.loadedModels.keys()}
self.calculator = calculator
assert len(self.calculator.loadedModels) > 0, 'No models loaded in calculator. Nothing to export.'
print(f'Initialized ONNXExporter with PyTorch models: '
f'{list(self.calculator.loadedModels.keys())}'
f'\n Converting to ONNX models...')
for model in calculator.loadedModels:
print(f'Converting {model} to ONNX')
assert 'descriptor' in self.calculator.models[model], f'{model} does not have a descriptor. Cannot export.'
descriptorUsed = self.calculator.models[model]['descriptor']
if descriptorUsed == 'Ward2017':
dLen = 271
elif descriptorUsed == 'KS2022':
dLen = 256
else:
raise NotImplementedError(f'ONNX export for {descriptorUsed} not implemented yet.')
assert model in self.calculator.loadedModels, f'{model} not loaded in calculator. Nothing to export.'
loadedModel = self.calculator.loadedModels[model]
loadedModel.eval()
inputs_tracer = torch.zeros(dLen, )
if 'OnnxDropoutDynamic()' in {str(module) for module in list(loadedModel._modules.values())}:
inputs_tracer = (inputs_tracer, torch.zeros(1, ))
temp = io.BytesIO()
torch.onnx.export(
loadedModel,
inputs_tracer,
temp,
export_params=True,
opset_version=16,
do_constant_folding=True,
input_names=[descriptorUsed],
output_names=['property'],
)
temp.seek(0)
self.calculator.loadedModels.update({
model: onnx.load(temp)
})
del temp
print(f'Initialized ONNXExporter with models: {list(self.calculator.loadedModels.keys())}')
def simplify(self, model: str) -> None:
"""Simplify a loaded model using the ONNX optimizer.
Args:
model: The name of the model to simplify (must be loaded in the ``Calculator``).
Returns:
None
"""
try:
from onnxsim import simplify
except ModuleNotFoundError as e:
print('\n\nNote: Export Dependencies are not installed by default. If you need them, you have to install pySIPFENN in '
'`dev` mode like: `pip install "pysipfenn[dev]"`, or like `pip install -e ".[dev]"` (see pysipfenn.org)')
print(f'Simplifying {model}')
assert model in self.calculator.loadedModels, f'{model} not loaded in calculator. Nothing to simplify.'
loadedModel = self.calculator.loadedModels[model]
onnx_model_simp, check = simplify(loadedModel)
assert check, "Simplified ONNX model could not be validated"
self.calculator.loadedModels[model] = onnx_model_simp
self.simplifiedDict[model] = True
print(f'--> Simplified {model}', flush=True)
def simplifyAll(self):
"""Simplify all loaded models with the simplify function."""
for model in tqdm(self.calculator.loadedModels):
self.simplify(model)
print('***** Done simplifying all models! *****')
def toFP16(self, model: str) -> None:
"""Convert a loaded model to FP16 precision.
Args:
model: The name of the model to convert to FP16 (must be loaded in the ``Calculator``).
Returns:
None
"""
try:
from onnxconverter_common import float16
except ModuleNotFoundError as e:
print('\n\nNote: Export Dependencies are not installed by default. If you need them, you have to install pySIPFENN in '
'`dev` mode like: `pip install "pysipfenn[dev]"`, or like `pip install -e ".[dev]"` (see pysipfenn.org)')
print(f'Converting {model} to FP16')
assert model in self.calculator.loadedModels, f'{model} not loaded in calculator. Nothing to convert to FP16.'
loadedModel = self.calculator.loadedModels[model]
# Convert to FP16
onnx_model_fp16 = float16.convert_float_to_float16(loadedModel)
self.calculator.loadedModels[model] = onnx_model_fp16
self.fp16Dict[model] = True
print(f'--> Converted {model} to FP16', flush=True)
def toFP16All(self):
"""Convert all loaded models to FP16 precision with the toFP16 function."""
for model in tqdm(self.calculator.loadedModels):
self.toFP16(model)
print('***** Done converting all models to FP16! *****')
def export(self, model: str, append: str = '') -> None:
"""Export a loaded model to ``ONNX``format.
Args:
model: The name of the model to export (must be loaded in the ``Calculator``).
append: A string to append to the exported model name after the model name, simplification marker, and
FP16 marker. Useful for adding a version number or other information to the exported model name.
Returns:
None
"""
print(f'Exporting {model} to ONNX')
assert model in self.calculator.loadedModels, f'{model} not loaded in calculator. Nothing to export.'
loadedModel = self.calculator.loadedModels[model]
name = f"{model}"
if self.simplifiedDict[model]:
name += '_simplified'
if self.fp16Dict[model]:
name += '_fp16'
if append:
name += f'_{append}'
name += '.onnx'
onnx.save(loadedModel, name)
print(f'--> Exported as {name}', flush=True)
def exportAll(self, append: str = '') -> None:
"""Export all loaded models to ``ONNX`` format with the export function. ``append`` string can be passed to the export
function to append to the exported model name.
"""
for model in tqdm(self.calculator.loadedModels):
self.export(model, append=append)
print('***** Done exporting all models! *****')
class TorchExporter:
"""Export models to the ``PyTorch PT`` format to allow for easy loading and inference in PyTorch in other projects.
Args:
calculator: A ``Calculator`` object with loaded models.
Attributes:
calculator: A ``Calculator`` object with loaded models.
"""
def __init__(self, calculator: Calculator):
"""Initialize the TorchExporter with a calculator object that has loaded models."""
self.calculator = calculator
assert len(self.calculator.loadedModels) > 0, 'No models loaded in calculator. Nothing to export.'
print(f'Initialized TorchExporter with models: {list(self.calculator.loadedModels.keys())}')
def export(self, model: str, append: str = '') -> None:
"""Export a loaded model to ``PyTorch PT`` format. Models are exported in eval mode (no dropout) and saved in the
current working directory.
Args:
model: The name of the model to export (must be loaded in the ``Calculator``) and it must have a descriptor
(``Ward2017`` or ``KS2022``) defined in the ``Calculator.models`` dictionary created when the ``Calculator`` was
initialized.
append: A string to append to the exported model name after the model name. Useful for adding a version
number or other information to the exported model name.
Returns:
None
"""
print(f'Exporting {model} to PyTorch PT format')
assert model in self.calculator.loadedModels, f'{model} not loaded in calculator. Nothing to export.'
loadedModel = self.calculator.loadedModels[model]
assert 'descriptor' in self.calculator.models[model], f'{model} does not have a descriptor. Cannot export.'
descriptorUsed = self.calculator.models[model]['descriptor']
if descriptorUsed == 'Ward2017':
dLen = 271
elif descriptorUsed == 'KS2022':
dLen = 256
else:
raise NotImplementedError(f'TorchExporter export for {descriptorUsed} not implemented yet.')
loadedModel.eval()
inputs_tracer = torch.zeros(dLen, )
if 'OnnxDropoutDynamic()' in {str(module) for module in list(loadedModel._modules.values())}:
inputs_tracer = (inputs_tracer, torch.zeros(1, ))
tracedModel = torch.jit.trace(loadedModel, inputs_tracer)
name = f"{model}{f'_{append}' if append else ''}.pt"
tracedModel.save(name)
print(f'--> Exported as {name}', flush=True)
def exportAll(self, append: str = '') -> None:
"""Exports all loaded models to PyTorch PT format with the export function. `append` can be passed to the export
function
"""
for model in tqdm(self.calculator.loadedModels):
self.export(model, append=append)
print('***** Done exporting all models! *****')
class CoreMLExporter:
"""Export models to the ``CoreML`` format to allow for easy loading and inference in ``CoreML`` in other projects,
particularly valuable for Apple devices, as pySIPFENN models can be run using the Neural Engine accelerator
with minimal power consumption and neat optimizations.
Note: Some of the dependencies (``coremltools``) are not installed by default. If you need them,
you have to install pySIPFENN in `dev` mode like: ``pip install "pysipfenn[dev]"``, or like ``pip install -e ".[dev]"``.
Args:
calculator: A ``Calculator`` object with loaded models.
Attributes:
calculator: A ``Calculator`` object with loaded models.
"""
def __init__(self, calculator: Calculator):
self.calculator = calculator
assert len(self.calculator.loadedModels)>0, 'No models loaded in calculator. Nothing to export.'
print(f'Initialized CoreMLExporter with models: {list(self.calculator.loadedModels.keys())}')
def export(self, model: str, append: str = '') -> None:
"""Export a loaded model to ``CoreML`` format. Models will be saved as ``{model}.mlpackage`` in the current working
directory. Models will be annotated with the feature vector name (``Ward2017`` or ``KS2022``) and the output will be
named "property". The latter behavior will be adjusted in the future when model output name and unit will be
added to the model JSON metadata.
Args:
model: The name of the model to export (must be loaded in the ``Calculator``) and it must have a descriptor
(``Ward2017`` or ``KS2022``) defined in the ``calculator.models`` dictionary created when the ``Calculator`` was
initialized.
append: A string to append to the exported model name after the model name. Useful for adding a version
number or other information to the exported model name.
Returns:
None
"""
try:
import coremltools as ct
except ModuleNotFoundError as e:
print('\n\nNote: Export Dependencies are not installed by default. If you need them, you have to install pySIPFENN in '
'`dev` mode like: `pip install "pysipfenn[dev]"`, or like `pip install -e ".[dev]"` (see pysipfenn.org)')
print(f'Exporting {model} to CoreML')
assert model in self.calculator.loadedModels, f'{model} not loaded in calculator. Nothing to export.'
loadedModel = self.calculator.loadedModels[model]
assert 'descriptor' in self.calculator.models[model], f'{model} does not have a descriptor. Cannot export.'
descriptorUsed = self.calculator.models[model]['descriptor']
if descriptorUsed == 'Ward2017':
dLen = 271
elif descriptorUsed == 'KS2022':
dLen = 256
else:
raise NotImplementedError(f'CoreML export for {descriptorUsed} not implemented yet.')
loadedModel.eval()
inputs_converter = [ct.TensorType(name=descriptorUsed, shape=(dLen,))]
inputs_tracer = torch.zeros(dLen,)
if 'OnnxDropoutDynamic()' in {str(module) for module in list(loadedModel._modules.values())}:
inputs_tracer = (inputs_tracer, torch.zeros(1,))
inputs_converter.append(ct.TensorType(name='DropoutMode', shape=(1,)))
tracedModel = torch.jit.trace(loadedModel, inputs_tracer)
coreml_model = ct.convert(
model=tracedModel,
convert_to='mlprogram',
inputs=inputs_converter,
outputs=[ct.TensorType(name='property')]
)
name = f"{model}{f'_{append}' if append else ''}.mlpackage"
coreml_model.save(name)
print(f'--> Exported as {name}', flush=True)
def exportAll(self, append: str = '') -> None:
"""Export all loaded models to ``CoreML`` format with the export function. ``append`` can be passed to the export
function to append to all exported model names.
"""
for model in tqdm(self.calculator.loadedModels):
self.export(model, append=append)
print('***** Done exporting all models! *****')