9
9
import keras2onnx
10
10
import json
11
11
from os .path import dirname , abspath
12
+
12
13
sys .path .insert (0 , os .path .join (dirname (abspath (__file__ )), '../../tests/' ))
13
14
from test_utils import run_onnx_runtime
14
15
from keras2onnx .proto import is_tensorflow_older_than
18
19
enable_transformer_test = True
19
20
20
21
21
- @unittest .skipIf (is_tensorflow_older_than ( '2.1.0' ) or not enable_transformer_test ,
22
+ @unittest .skipIf (not enable_transformer_test ,
22
23
"Need enable transformer test before Transformers conversion." )
23
24
class TestTransformers (unittest .TestCase ):
24
25
@@ -38,6 +39,18 @@ def _prepare_inputs(self, tokenizer):
38
39
inputs_onnx = {k_ : v_ .numpy () for k_ , v_ in inputs .items ()}
39
40
return text , inputs , inputs_onnx
40
41
42
+ def test_3layer_gpt2 (self ):
43
+ from transformers import GPT2Config , TFGPT2Model , BertTokenizer
44
+ keras2onnx .proto .keras .backend .set_learning_phase (0 )
45
+ config = GPT2Config (n_layer = 3 )
46
+ model = TFGPT2Model (config )
47
+ tokenizer = BertTokenizer .from_pretrained ("bert-base-uncased" )
48
+ text , inputs , inputs_onnx = self ._prepare_inputs (tokenizer )
49
+ inputs = tokenizer .encode_plus (text , add_special_tokens = True , return_tensors = 'tf' )
50
+ predictions = model .predict (inputs )
51
+ onnx_model = keras2onnx .convert_keras (model , model .name )
52
+ self .assertTrue (run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files ))
53
+
41
54
def test_TFBertModel (self ):
42
55
from transformers import BertTokenizer , TFBertModel
43
56
pretrained_weights = 'bert-base-uncased'
@@ -56,7 +69,9 @@ def test_TFBertForPreTraining(self):
56
69
model = TFBertForPreTraining .from_pretrained (pretrained_weights )
57
70
predictions = model .predict (inputs )
58
71
onnx_model = keras2onnx .convert_keras (model , model .name )
59
- self .assertTrue (run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 , atol = 1.e-4 ))
72
+ self .assertTrue (
73
+ run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 ,
74
+ atol = 1.e-4 ))
60
75
61
76
def test_TFBertForMaskedLM (self ):
62
77
from transformers import BertTokenizer , TFBertForMaskedLM
@@ -66,7 +81,9 @@ def test_TFBertForMaskedLM(self):
66
81
model = TFBertForMaskedLM .from_pretrained (pretrained_weights )
67
82
predictions = model .predict (inputs )
68
83
onnx_model = keras2onnx .convert_keras (model , model .name )
69
- self .assertTrue (run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 , atol = 1.e-4 ))
84
+ self .assertTrue (
85
+ run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 ,
86
+ atol = 1.e-4 ))
70
87
71
88
def test_TFBertForNextSentencePrediction (self ):
72
89
from transformers import BertTokenizer , TFBertForNextSentencePrediction
@@ -146,7 +163,9 @@ def test_TFXLMModel(self):
146
163
model = TFXLMModel .from_pretrained (pretrained_weights )
147
164
predictions = model .predict (inputs )
148
165
onnx_model = keras2onnx .convert_keras (model , model .name )
149
- self .assertTrue (run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 , atol = 1.e-4 ))
166
+ self .assertTrue (
167
+ run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 ,
168
+ atol = 1.e-4 ))
150
169
151
170
def test_TFXLMWithLMHeadModel (self ):
152
171
from transformers import XLMTokenizer , TFXLMWithLMHeadModel
@@ -156,7 +175,9 @@ def test_TFXLMWithLMHeadModel(self):
156
175
model = TFXLMWithLMHeadModel .from_pretrained (pretrained_weights )
157
176
predictions = model .predict (inputs )
158
177
onnx_model = keras2onnx .convert_keras (model , model .name )
159
- self .assertTrue (run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 , atol = 1.e-4 ))
178
+ self .assertTrue (
179
+ run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 ,
180
+ atol = 1.e-4 ))
160
181
161
182
def test_TFXLMForSequenceClassification (self ):
162
183
from transformers import XLMTokenizer , TFXLMForSequenceClassification
@@ -196,7 +217,9 @@ def test_TFDistilBertForMaskedLM(self):
196
217
model = TFDistilBertForMaskedLM .from_pretrained (pretrained_weights )
197
218
predictions = model .predict (inputs )
198
219
onnx_model = keras2onnx .convert_keras (model , model .name )
199
- self .assertTrue (run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 , atol = 1.e-4 ))
220
+ self .assertTrue (
221
+ run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 ,
222
+ atol = 1.e-4 ))
200
223
201
224
def test_TFDistilBertForSequenceClassification (self ):
202
225
from transformers import DistilBertTokenizer , TFDistilBertForSequenceClassification
@@ -246,7 +269,9 @@ def test_TFRobertaForMaskedLM(self):
246
269
model = TFRobertaForMaskedLM .from_pretrained (pretrained_weights )
247
270
predictions = model .predict (inputs )
248
271
onnx_model = keras2onnx .convert_keras (model , model .name )
249
- self .assertTrue (run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 , atol = 1.e-4 ))
272
+ self .assertTrue (
273
+ run_onnx_runtime (onnx_model .graph .name , onnx_model , inputs_onnx , predictions , self .model_files , rtol = 1.e-2 ,
274
+ atol = 1.e-4 ))
250
275
251
276
def test_TFRobertaForSequenceClassification (self ):
252
277
from transformers import RobertaTokenizer , TFRobertaForSequenceClassification
0 commit comments