1 /* ----------------------------------------------------------------------
2  * Project:      CMSIS DSP Library
3  * Title:        IORunner.cpp
4  * Description:  IORunner
5  *
6  *               Runner implementation for runner running on device
7  *               under test
8  *
9  * $Date:        20. June 2019
10  * $Revision:    V1.0.0
11  *
12  * Target Processor: Cortex-M cores
13  * -------------------------------------------------------------------- */
14 /*
15  * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
16  *
17  * SPDX-License-Identifier: Apache-2.0
18  *
19  * Licensed under the Apache License, Version 2.0 (the License); you may
20  * not use this file except in compliance with the License.
21  * You may obtain a copy of the License at
22  *
23  * www.apache.org/licenses/LICENSE-2.0
24  *
25  * Unless required by applicable law or agreed to in writing, software
26  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
27  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
28  * See the License for the specific language governing permissions and
29  * limitations under the License.
30  */
31 #include "Test.h"
32 
33 #include <string>
34 #include <cstddef>
35 #include <cstdlib>
36 #include <cstdio>
37 #include "IORunner.h"
38 #include "Error.h"
39 #include "Timing.h"
40 #include "arm_math_types.h"
41 #include "Calibrate.h"
42 
43 #ifdef CORTEXA
44 #define CALIBNB 1
45 #else
46 #define CALIBNB 20
47 #endif
48 
49 using namespace std;
50 
51 namespace Client
52 {
53 
IORunner(IO * io,PatternMgr * mgr,Testing::RunningMode runningMode)54       IORunner::IORunner(IO *io,PatternMgr *mgr,  Testing::RunningMode runningMode):m_io(io), m_mgr(mgr)
55       {
56         volatile Testing::cycles_t current;
57 
58         this->m_runningMode = runningMode;
59         // Set running mode on PatternMgr.
60         if (runningMode == Testing::kDumpOnly)
61         {
62           mgr->setDumpMode();
63         }
64         if (runningMode == Testing::kTestAndDump)
65         {
66           mgr->setTestAndDumpMode();
67         }
68 
69         initCycleMeasurement();
70 
71 /*
72 
73 For calibration :
74 
75 Calibration means, in this context, removing the overhad of calling
76 a C++ function pointer from the cycle measurements.
77 
78 
79 */
80         Calibrate c((Testing::testID_t)0);
81         Client::Suite *s=(Client::Suite *)&c;
82         Client::test t = (Client::test)&Calibrate::empty;
83         calibration = 0;
84 
85 
86 
87 /*
88 
89 For calibration, we measure the time it takes to call 20 times an empty benchmark and compute
90 the average.
91 (20 is an arbitrary value.)
92 
93 This overhead is removed from benchmarks in the Runner..
94 
95 Calibration is removed from the python script when external trace is used for the cycles.
96 Indeed, in that case the calibration value can only be measured by parsing the trace.
97 
98 Otherwise, the calibration is measured below.
99 
100 */
101 
102 /*
103 
104 We want to ensure that the calibration of the overhead of the
105 measurement is the same here and when we do the measurement later.
106 
107 So to ensure the conditions are always the same, the instruction cache
108 and branch predictor are flushed.
109 
110 */
111 #if defined(CORTEXA) && !defined(__GNUC_PYTHON__)
112   __set_BPIALL(0);
113   __DSB();
114   __ISB();
115 
116   __set_ICIALLU(0);
117   __DSB();
118   __ISB();
119 #endif
120 
121 /*
122 
123 We always call the empty function once to ensure it is in the cache
124 because it is how the measurement is done.
125 
126 */
127         if (!m_mgr->HasMemError())
128         {
129              (s->*t)();
130         }
131 
132 /*
133 
134 We measure the cycles required for a measurement,
135 The cycleMeasurement starts, getCycles and cycleMeasurementStop
136 should not be in the cache.
137 
138 So, for the overhead we always have the value corresponding to
139 the code not in cache.
140 
141 While for the code itself we have the value for the code in cache.
142 
143 */
144 
145 /*
146 
147 EXTBENCH is set when benchmarking is done through external traces
148 instead of using internal counters.
149 
150 Currently the post-processing scripts are only supporting traces generated from
151 fast models.
152 
153 */
154 #if defined(EXTBENCH)  || defined(CACHEANALYSIS)
155         startSection();
156 #endif
157 
158         for(int i=0;i < CALIBNB;i++)
159         {
160           cycleMeasurementStart();
161           if (!m_mgr->HasMemError())
162           {
163              (s->*t)();
164           }
165           #ifndef EXTBENCH
166              current = getCycles();
167           #endif
168           calibration += current;
169           cycleMeasurementStop();
170         }
171 #if defined(EXTBENCH)  || defined(CACHEANALYSIS)
172         stopSection();
173 #endif
174 
175 #ifndef EXTBENCH
176         calibration=calibration / CALIBNB;
177 #endif
178       }
179 
180       // Testing.
181       // When false we are in dump mode and the failed assertion are ignored
182       // (But exception is taken so assert should be at end of the test and not in the
183       // middle )
IORunner(IO * io,PatternMgr * mgr)184       IORunner::IORunner(IO *io,PatternMgr *mgr):m_io(io), m_mgr(mgr)
185       {
186         this->m_runningMode = Testing::kTestOnly;
187       }
188 
~IORunner()189       IORunner::~IORunner()
190       {
191 
192       }
193 
194 
195       /** Read driver data to control execution of a suite
196       */
run(Suite * s)197       Testing::TestStatus IORunner::run(Suite *s)
198       {
199         Testing::TestStatus finalResult = Testing::kTestPassed;
200         int nbTests = s->getNbTests();
201         //int failedTests=0;
202         Testing::errorID_t error=0;
203         unsigned long line = 0;
204         char details[200];
205         volatile Testing::cycles_t cycles=0;
206         Testing::nbParameters_t nbParams;
207 
208         // Read node identification (suite)
209         m_io->ReadIdentification();
210         // Read suite nb of parameters
211         nbParams = m_io->ReadNbParameters();
212 
213         // Read list of patterns
214         m_io->ReadPatternList();
215         // Read list of output
216         m_io->ReadOutputList();
217         // Read list of parameters
218         m_io->ReadParameterList(nbParams);
219 
220         // Iterate on tests
221         for(int i=1; i <= nbTests; i++)
222         {
223             test t = s->getTest(i);
224             Testing::TestStatus result = Testing::kTestPassed;
225             error = UNKNOWN_ERROR;
226             line = 0;
227             cycles = 0;
228             details[0]='\0';
229             Testing::param_t *paramData=NULL;
230             Testing::nbParameterEntries_t entries=0;
231             std::vector<Testing::param_t> params(nbParams);
232             bool canExecute=true;
233             unsigned long  dataIndex=0;
234             Testing::ParameterKind paramKind;
235 
236             // Read test identification (test ID)
237             m_io->ReadTestIdentification();
238 
239 
240             if (m_io->hasParam())
241             {
242                Testing::PatternID_t paramID=m_io->getParamID();
243                paramData = m_io->ImportParams(paramID,entries,paramKind);
244                dataIndex = 0;
245             }
246 
247 
248             while(canExecute)
249             {
250               canExecute = false;
251 
252               if (m_io->hasParam() && paramData)
253               {
254                 // Load new params
255                 for(unsigned long j=0; j < nbParams ; j++)
256                 {
257                   params[j] = paramData[nbParams*dataIndex+j];
258                 }
259                 // Update condition for new execution
260                 dataIndex += 1;
261                 canExecute = dataIndex < entries;
262               }
263               // Execute test
264               try {
265                 // Prepare memory for test
266                 // setUp will generally load patterns
267                 // and do specific initialization for the tests
268                 s->setUp(m_io->CurrentTestID(),params,m_mgr);
269 
270                 // Run the test once to force the code to be in cache.
271                 // By default it is disabled in the suite.
272 #if defined(CORTEXA) && !defined(__GNUC_PYTHON__)
273   __set_BPIALL(0);
274   __DSB();
275   __ISB();
276 
277   __set_ICIALLU(0);
278   __DSB();
279   __ISB();
280 #endif
281 
282 /* If cache analysis mode, we don't force the code to be in cache. */
283 #if !defined(CACHEANALYSIS)
284                 if (s->isForcedInCache())
285                 {
286                    if (!m_mgr->HasMemError())
287                    {
288                       (s->*t)();
289                    }
290                 }
291 #endif
292                 // Run the test
293                 cycleMeasurementStart();
294 
295 #if defined(EXTBENCH) || defined(CACHEANALYSIS)
296                 startSection();
297 #endif
298                 if (!m_mgr->HasMemError())
299                 {
300                     (s->*t)();
301                 }
302 
303 #if defined(EXTBENCH) || defined(CACHEANALYSIS)
304                 stopSection();
305 #endif
306 
307 #ifndef EXTBENCH
308                 cycles=getCycles();
309                 cycles=cycles-calibration;
310 #endif
311                 cycleMeasurementStop();
312               }
313               catch(Error &ex)
314               {
315                  cycleMeasurementStop();
316                  // In dump only mode we ignore the tests
317                  // since the reference patterns are not loaded
318                  // so tests will fail.
319                  if (this->m_runningMode != Testing::kDumpOnly)
320                  {
321                     error = ex.errorID;
322                     line = ex.lineNumber;
323                     strcpy(details,ex.details);
324                     result=Testing::kTestFailed;
325                  }
326               }
327               catch (...) {
328                 cycleMeasurementStop();
329                 // In dump only mode we ignore the tests
330                 // since the reference patterns are not loaded
331                 // so tests will fail.
332                 if (this->m_runningMode != Testing::kDumpOnly)
333                 {
334                   result = Testing::kTestFailed;
335                   error = UNKNOWN_ERROR;
336                   line = 0;
337                 }
338               }
339               try {
340                  // Clean memory after this test
341                  // May dump output and do specific cleaning for a test
342                  s->tearDown(m_io->CurrentTestID(),m_mgr);
343               }
344               catch(...)
345               {
346 
347               }
348 
349               if (m_mgr->HasMemError())
350               {
351                 /* We keep the current error if set.
352                 */
353                 if (result == Testing::kTestPassed)
354                 {
355                   result = Testing::kTestFailed;
356                   error = MEMORY_ALLOCATION_ERROR;
357                   line = 0;
358                 }
359               }
360 
361               // Free all memory of memory manager so that next test
362               // is starting in a clean and controlled tests
363               m_mgr->freeAll();
364 
365               // Dump test status to output
366               m_io->DispStatus(result,error,line,cycles);
367               m_io->DispErrorDetails(details);
368               m_io->DumpParams(params);
369             }
370             if (paramData)
371             {
372                 if (paramKind == Testing::kDynamicBuffer)
373                 {
374                   free(paramData);
375                 }
376                 paramData = NULL;
377             }
378 
379             if (result == Testing::kTestFailed)
380             {
381               //failedTests ++;
382               finalResult = Testing::kTestFailed;
383             }
384         }
385         // Signal end of group processing to output
386         m_io->EndGroup();
387         return(finalResult);
388      }
389 
390       /** Read driver data to control execution of a group
391       */
run(Group * g)392       Testing::TestStatus IORunner::run(Group *g)
393       {
394         int nbTests = g->getNbContainer();
395         //int failedTests=0;
396 
397 
398         // Read Node identification
399         m_io->ReadIdentification();
400 
401 
402         Testing::TestStatus finalResult = Testing::kTestPassed;
403         // Iterate on group elements
404         for(int i=1; i <= nbTests; i++)
405         {
406             TestContainer *c = g->getContainer(i);
407             if (c != NULL)
408             {
409                 // Execute runner for this group
410                 Testing::TestStatus result = c->accept(this);
411 
412                 if (result == Testing::kTestFailed)
413                 {
414                    //failedTests ++;
415                    finalResult = Testing::kTestFailed;
416                 }
417             }
418 
419         }
420         // Signal to output that processing of this group has finished.
421         m_io->EndGroup();
422         return(finalResult);
423       }
424 
425 
426 }
427