1# SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates <open-source-office@arm.com> 2# 3# SPDX-License-Identifier: Apache-2.0 4# 5# Licensed under the Apache License, Version 2.0 (the License); you may 6# not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an AS IS BASIS, WITHOUT 13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16# 17import math 18from test_settings import TestSettings 19import tensorflow as tf 20import tf_keras as keras 21 22class SoftmaxSettings(TestSettings): 23 softmax_input_integer_bits = 5 24 25 def __init__(self, 26 dataset, 27 testtype, 28 regenerate_weights, 29 regenerate_input, 30 regenerate_biases, 31 schema_file, 32 x_in=5, 33 y_in=1, 34 randmin=TestSettings.INT8_MIN, 35 randmax=TestSettings.INT8_MAX, 36 int16xint8=False, 37 inInt8outInt16=False, 38 input_scale=0.003922, 39 input_zp=-128, 40 interpreter="tensorflow"): 41 super().__init__(dataset, 42 testtype, 43 regenerate_weights, 44 regenerate_input, 45 regenerate_biases, 46 schema_file, 47 1, 48 1, 49 x_in, 50 y_in, 51 1, 52 1, 53 1, 54 1, 55 False, 56 randmin, 57 randmax, 58 int16xint8=int16xint8, 59 interpreter=interpreter) 60 self.x_input = self.x_output = x_in 61 self.y_input = self.y_output = y_in 62 self.inInt8outInt16 = inInt8outInt16 63 64 if self.inInt8outInt16 and self.is_int16xint8: 65 raise RuntimeError("Specify input as either s8 or s16") 66 67 if self.inInt8outInt16: 68 self.input_scale = input_scale 69 self.json_template = "TestCases/Common/Softmax/softmax_int8_to_int16_template.json" 70 self.json_replacements = { 71 "num_rows": self.y_input, 72 "row_size": self.x_input, 73 "input_scale": input_scale, 74 "input_zp": input_zp 75 } 76 77 def calc_softmax_params(self): 78 if self.is_int16xint8: 79 input_scale_beta_rescale = self.input_scale / (10.0 / 65535.0) 80 (self.input_multiplier, self.input_left_shift) = self.quantize_scale(input_scale_beta_rescale) 81 else: 82 input_real_multiplier = min(self.input_scale * (1 << (31 - self.softmax_input_integer_bits)), (1 << 31) - 1) 83 (self.input_multiplier, self.input_left_shift) = self.quantize_scale(input_real_multiplier) 84 85 self.diff_min = ((1 << self.softmax_input_integer_bits) - 1) * \ 86 (1 << (31 - self.softmax_input_integer_bits)) / \ 87 (1 << self.input_left_shift) 88 self.diff_min = math.floor(self.diff_min) 89 90 def write_c_config_header(self) -> None: 91 super().write_c_config_header(write_common_parameters=False) 92 93 filename = self.config_data 94 filepath = self.headers_dir + filename 95 prefix = self.testdataset.upper() 96 97 with open(filepath, "a") as f: 98 f.write("#define {}_NUM_ROWS {}\n".format(prefix, self.y_input)) 99 f.write("#define {}_ROW_SIZE {}\n".format(prefix, self.x_input)) 100 f.write("#define {}_INPUT_MULT {}\n".format(prefix, self.input_multiplier)) 101 f.write("#define {}_INPUT_LEFT_SHIFT {}\n".format(prefix, self.input_left_shift)) 102 if not self.is_int16xint8: 103 f.write("#define {}_DIFF_MIN {}\n".format(prefix, -self.diff_min)) 104 f.write("#define {}_DST_SIZE {}\n".format(prefix, self.x_output * self.y_output)) 105 106 def get_softmax_randomized_input_data(self, input_data, input_shape): 107 # Generate or load saved input data unless hardcoded data provided. 108 if input_data is not None: 109 input_data = tf.reshape(input_data, input_shape) 110 else: 111 input_data = self.get_randomized_data(input_shape, 112 self.inputs_table_file, 113 regenerate=self.regenerate_new_input) 114 return input_data 115 116 def generate_data(self, input_data=None, weights=None, biases=None) -> None: 117 input_data = self.get_softmax_randomized_input_data(input_data, [self.y_input, self.x_input]) 118 119 if self.is_int16xint8: 120 inttype = tf.int16 121 datatype = "int16_t" 122 else: 123 inttype = tf.int8 124 datatype = "int8_t" 125 126 self.generate_c_array(self.input_data_file_prefix, input_data, datatype=datatype) 127 128 # Generate reference. 129 if self.inInt8outInt16: 130 # Output is int16. 131 datatype = "int16_t" 132 133 # Keras does not support int8 input and int16 output for Softmax. 134 # Using a template json instead. 135 generated_json = self.generate_json_from_template() 136 self.flatc_generate_tflite(generated_json, self.schema_file) 137 138 interpreter = self.Interpreter(model_path=str(self.model_path_tflite), 139 experimental_op_resolver_type=self.OpResolverType.BUILTIN_REF) 140 interpreter.allocate_tensors() 141 all_layers_details = interpreter.get_tensor_details() 142 input_layer = all_layers_details[0] 143 output_layer = all_layers_details[1] 144 145 interpreter.set_tensor(input_layer["index"], tf.cast(input_data, tf.int8)) 146 interpreter.invoke() 147 output_data = interpreter.get_tensor(output_layer["index"]) 148 else: 149 # Create a one-layer Keras model. 150 model = keras.models.Sequential() 151 input_shape = (self.y_input, self.x_input) 152 model.add(keras.layers.Softmax(input_shape=input_shape)) 153 154 interpreter = self.convert_and_interpret(model, inttype, tf.expand_dims(input_data, axis=0)) 155 output_details = interpreter.get_output_details() 156 interpreter.invoke() 157 output_data = interpreter.get_tensor(output_details[0]["index"]) 158 159 self.calc_softmax_params() 160 self.generate_c_array(self.output_data_file_prefix, output_data, datatype=datatype) 161 162 self.write_c_config_header() 163 self.write_c_header_wrapper() 164