1 /*
2  * Copyright 2019-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #pragma once
8 
9 #include <array>
10 #include <queue>
11 #include <stdlib.h>
12 #include <string>
13 #include <vector>
14 
15 namespace InferenceProcess
16 {
17 struct DataPtr {
18 	void *data;
19 	size_t size;
20 
21 	DataPtr(void *data = nullptr, size_t size = 0);
22 
23 	void invalidate();
24 	void clean();
25 };
26 
27 struct InferenceJob {
28 	std::string name;
29 	DataPtr networkModel;
30 	std::vector<DataPtr> input;
31 	std::vector<DataPtr> output;
32 	std::vector<DataPtr> expectedOutput;
33 
34 	InferenceJob();
35 	InferenceJob(const std::string &name, const DataPtr &networkModel,
36 		     const std::vector<DataPtr> &input, const std::vector<DataPtr> &output,
37 		     const std::vector<DataPtr> &expectedOutput);
38 
39 	void invalidate();
40 	void clean();
41 };
42 
43 class InferenceProcess {
44     public:
InferenceProcess(uint8_t * _tensorArena,size_t _tensorArenaSize)45 	InferenceProcess(uint8_t *_tensorArena, size_t _tensorArenaSize)
46 		: tensorArena(_tensorArena), tensorArenaSize(_tensorArenaSize)
47 	{
48 	}
49 
50 	bool runJob(InferenceJob &job);
51 
52     private:
53 	uint8_t *tensorArena;
54 	const size_t tensorArenaSize;
55 };
56 } /* namespace InferenceProcess */
57