Searched refs:inttype (Results 1 – 6 of 6) sorted by relevance
/cmsis-nn-latest/Tests/UnitTest/ |
D | add_mul_settings.py | 87 inttype = "int16_t" 90 inttype = "int8_t" 130 self.generate_c_array("input1", input_data1, datatype=inttype) 131 self.generate_c_array("input2", input_data2, datatype=inttype) 134 datatype=inttype)
|
D | pooling_settings.py | 77 inttype = tf.int16 80 inttype = tf.int8 106 interpreter = self.convert_and_interpret(model, inttype, input_data)
|
D | softmax_settings.py | 120 inttype = tf.int16 123 inttype = tf.int8 154 … interpreter = self.convert_and_interpret(model, inttype, tf.expand_dims(input_data, axis=0))
|
D | fully_connected_settings.py | 140 inttype = tf.int16 144 inttype = tf.int8 249 self.convert_model(model, inttype) 257 interpreter = self.interpret_model(input_data, inttype)
|
D | test_settings.py | 448 def convert_and_interpret(self, model, inttype, input_data=None, dataset_shape=None): argument 452 self.convert_model(model, inttype, dataset_shape) 453 return self.interpret_model(input_data, inttype) 455 def convert_model(self, model, inttype, dataset_shape=None, int16x8_int32bias=False): argument 480 converter.inference_input_type = inttype 481 converter.inference_output_type = inttype 488 def interpret_model(self, input_data, inttype): argument 501 interpreter.set_tensor(input_details[0]["index"], tf.cast(input_data, inttype))
|
D | conv_settings.py | 207 inttype = tf.int16 211 inttype = tf.int8 389 self.convert_model(model, inttype, int16x8_int32bias=self.int16xint8_int32) 391 interpreter = self.interpret_model(input_data, inttype)
|