@@ -125,9 +125,11 @@ def create_full_integer_quantization(
125125 optimizations: A list of optimizations to apply when converting the model.
126126 If not set, use `[Optimize.DEFAULT]` by default.
127127 inference_input_type: Target data type of real-number input arrays. Used
128- only when `is_integer_only` is True. Must be `{tf.uint8, tf.int8}`.
128+ only when `is_integer_only` is True. For TensorFlow 2, it is set to
129+ `tf.float32`. For TensorFlow 1, it must be in `{tf.uint8, tf.int8}`.
129130 inference_output_type: Target data type of real-number output arrays. Used
130- only when `is_integer_only` is True. Must be `{tf.uint8, tf.int8}`.
131+ only when `is_integer_only` is True. For TensorFlow 2, it is set to
132+ `tf.float32`. For TensorFlow 1, it must be in `{tf.uint8, tf.int8}`.
131133 is_integer_only: If True, enforces full integer quantization for all ops
132134 including the input and output. If False, uses integer with float
133135 fallback (using default float input/output) that mean to fully integer
@@ -143,13 +145,24 @@ def create_full_integer_quantization(
143145 representative_data = representative_data ,
144146 quantization_steps = quantization_steps )
145147 else :
146- if inference_input_type != tf .int8 and inference_input_type != tf .uint8 :
147- raise ValueError ('For integer only quantization, `inference_input_type`'
148- 'should be tf.int8 or tf.uint8.' )
149- if inference_output_type != tf .int8 and inference_output_type != tf .uint8 :
150- raise ValueError (
151- 'For integer only quantization, '
152- '`inference_output_type` should be tf.int8 or tf.uint8.' )
148+ if compat .get_tf_behavior () == 2 :
149+ # TODO(b/153576655): Replicate inference_input_type and
150+ # inference_output_type flags in TFLiteConverterV2
151+ tf .compat .v1 .logging .warning (
152+ 'For integer only quantization, `inference_input_type` and '
153+ '`inference_output_type` are set to tf.float32. Support for '
154+ 'tf.int8 and tf.uint8 will be added soon.' )
155+ inference_input_type = tf .float32
156+ inference_output_type = tf .float32
157+ else :
158+ if inference_input_type not in [tf .uint8 , tf .int8 ]:
159+ raise ValueError ('For integer only quantization, '
160+ '`inference_input_type` '
161+ 'should be tf.int8 or tf.uint8.' )
162+ if inference_output_type not in [tf .uint8 , tf .int8 ]:
163+ raise ValueError ('For integer only quantization, '
164+ '`inference_output_type` '
165+ 'should be tf.int8 or tf.uint8.' )
153166
154167 return QuantizationConfig (
155168 optimizations ,
0 commit comments