1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/micro/examples/micro_speech/main_functions.h"
17 
18 #include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
19 #include "tensorflow/lite/micro/examples/micro_speech/command_responder.h"
20 #include "tensorflow/lite/micro/examples/micro_speech/feature_provider.h"
21 #include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
22 #include "tensorflow/lite/micro/examples/micro_speech/micro_features/model.h"
23 #include "tensorflow/lite/micro/examples/micro_speech/recognize_commands.h"
24 #include "tensorflow/lite/micro/micro_error_reporter.h"
25 #include "tensorflow/lite/micro/micro_interpreter.h"
26 #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
27 #include "tensorflow/lite/micro/system_setup.h"
28 #include "tensorflow/lite/schema/schema_generated.h"
29 
30 // Globals, used for compatibility with Arduino-style sketches.
31 namespace {
32 tflite::ErrorReporter* error_reporter = nullptr;
33 const tflite::Model* model = nullptr;
34 tflite::MicroInterpreter* interpreter = nullptr;
35 TfLiteTensor* model_input = nullptr;
36 FeatureProvider* feature_provider = nullptr;
37 RecognizeCommands* recognizer = nullptr;
38 int32_t previous_time = 0;
39 
40 // Create an area of memory to use for input, output, and intermediate arrays.
41 // The size of this will depend on the model you're using, and may need to be
42 // determined by experimentation.
43 constexpr int kTensorArenaSize = 10 * 1024;
44 uint8_t tensor_arena[kTensorArenaSize];
45 int8_t feature_buffer[kFeatureElementCount];
46 int8_t* model_input_buffer = nullptr;
47 }  // namespace
48 
49 // The name of this function is important for Arduino compatibility.
setup()50 void setup() {
51   tflite::InitializeTarget();
52 
53   // Set up logging. Google style is to avoid globals or statics because of
54   // lifetime uncertainty, but since this has a trivial destructor it's okay.
55   // NOLINTNEXTLINE(runtime-global-variables)
56   static tflite::MicroErrorReporter micro_error_reporter;
57   error_reporter = &micro_error_reporter;
58 
59   // Map the model into a usable data structure. This doesn't involve any
60   // copying or parsing, it's a very lightweight operation.
61   model = tflite::GetModel(g_model);
62   if (model->version() != TFLITE_SCHEMA_VERSION) {
63     TF_LITE_REPORT_ERROR(error_reporter,
64                          "Model provided is schema version %d not equal "
65                          "to supported version %d.",
66                          model->version(), TFLITE_SCHEMA_VERSION);
67     return;
68   }
69 
70   // Pull in only the operation implementations we need.
71   // This relies on a complete list of all the ops needed by this graph.
72   // An easier approach is to just use the AllOpsResolver, but this will
73   // incur some penalty in code space for op implementations that are not
74   // needed by this graph.
75   //
76   // tflite::AllOpsResolver resolver;
77   // NOLINTNEXTLINE(runtime-global-variables)
78   static tflite::MicroMutableOpResolver<4> micro_op_resolver(error_reporter);
79   if (micro_op_resolver.AddDepthwiseConv2D() != kTfLiteOk) {
80     return;
81   }
82   if (micro_op_resolver.AddFullyConnected() != kTfLiteOk) {
83     return;
84   }
85   if (micro_op_resolver.AddSoftmax() != kTfLiteOk) {
86     return;
87   }
88   if (micro_op_resolver.AddReshape() != kTfLiteOk) {
89     return;
90   }
91 
92   // Build an interpreter to run the model with.
93   static tflite::MicroInterpreter static_interpreter(
94       model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
95   interpreter = &static_interpreter;
96 
97   // Allocate memory from the tensor_arena for the model's tensors.
98   TfLiteStatus allocate_status = interpreter->AllocateTensors();
99   if (allocate_status != kTfLiteOk) {
100     TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
101     return;
102   }
103 
104   // Get information about the memory area to use for the model's input.
105   model_input = interpreter->input(0);
106   if ((model_input->dims->size != 2) || (model_input->dims->data[0] != 1) ||
107       (model_input->dims->data[1] !=
108        (kFeatureSliceCount * kFeatureSliceSize)) ||
109       (model_input->type != kTfLiteInt8)) {
110     TF_LITE_REPORT_ERROR(error_reporter,
111                          "Bad input tensor parameters in model");
112     return;
113   }
114   model_input_buffer = model_input->data.int8;
115 
116   // Prepare to access the audio spectrograms from a microphone or other source
117   // that will provide the inputs to the neural network.
118   // NOLINTNEXTLINE(runtime-global-variables)
119   static FeatureProvider static_feature_provider(kFeatureElementCount,
120                                                  feature_buffer);
121   feature_provider = &static_feature_provider;
122 
123   static RecognizeCommands static_recognizer(error_reporter);
124   recognizer = &static_recognizer;
125 
126   previous_time = 0;
127 }
128 
129 // The name of this function is important for Arduino compatibility.
loop()130 void loop() {
131   // Fetch the spectrogram for the current time.
132   const int32_t current_time = LatestAudioTimestamp();
133   int how_many_new_slices = 0;
134   TfLiteStatus feature_status = feature_provider->PopulateFeatureData(
135       error_reporter, previous_time, current_time, &how_many_new_slices);
136   if (feature_status != kTfLiteOk) {
137     TF_LITE_REPORT_ERROR(error_reporter, "Feature generation failed");
138     return;
139   }
140   previous_time = current_time;
141   // If no new audio samples have been received since last time, don't bother
142   // running the network model.
143   if (how_many_new_slices == 0) {
144     return;
145   }
146 
147   // Copy feature buffer to input tensor
148   for (int i = 0; i < kFeatureElementCount; i++) {
149     model_input_buffer[i] = feature_buffer[i];
150   }
151 
152   // Run the model on the spectrogram input and make sure it succeeds.
153   TfLiteStatus invoke_status = interpreter->Invoke();
154   if (invoke_status != kTfLiteOk) {
155     TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed");
156     return;
157   }
158 
159   // Obtain a pointer to the output tensor
160   TfLiteTensor* output = interpreter->output(0);
161   // Determine whether a command was recognized based on the output of inference
162   const char* found_command = nullptr;
163   uint8_t score = 0;
164   bool is_new_command = false;
165   TfLiteStatus process_status = recognizer->ProcessLatestResults(
166       output, current_time, &found_command, &score, &is_new_command);
167   if (process_status != kTfLiteOk) {
168     TF_LITE_REPORT_ERROR(error_reporter,
169                          "RecognizeCommands::ProcessLatestResults() failed");
170     return;
171   }
172   // Do something based on the recognized command. The default implementation
173   // just prints to the error console, but you should replace this with your
174   // own function for a real application.
175   RespondToCommand(error_reporter, current_time, found_command, score,
176                    is_new_command);
177 }
178