1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include "main.h"
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <pthread.h>
8 #include <malloc.h>
9 #include <assert.h>
10 #include <errno.h>
11 #include <limits.h>
12 
13 #define SMP_CACHE_BYTES 64
14 #define cache_line_size() SMP_CACHE_BYTES
15 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
16 #define unlikely(x)    (__builtin_expect(!!(x), 0))
17 #define likely(x)    (__builtin_expect(!!(x), 1))
18 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19 #define SIZE_MAX        (~(size_t)0)
20 #define KMALLOC_MAX_SIZE SIZE_MAX
21 #define BUG_ON(x) assert(x)
22 
23 typedef pthread_spinlock_t  spinlock_t;
24 
25 typedef int gfp_t;
26 #define __GFP_ZERO 0x1
27 
kmalloc(unsigned size,gfp_t gfp)28 static void *kmalloc(unsigned size, gfp_t gfp)
29 {
30 	void *p = memalign(64, size);
31 	if (!p)
32 		return p;
33 
34 	if (gfp & __GFP_ZERO)
35 		memset(p, 0, size);
36 	return p;
37 }
38 
kzalloc(unsigned size,gfp_t flags)39 static inline void *kzalloc(unsigned size, gfp_t flags)
40 {
41 	return kmalloc(size, flags | __GFP_ZERO);
42 }
43 
kmalloc_array(size_t n,size_t size,gfp_t flags)44 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
45 {
46 	if (size != 0 && n > SIZE_MAX / size)
47 		return NULL;
48 	return kmalloc(n * size, flags);
49 }
50 
kcalloc(size_t n,size_t size,gfp_t flags)51 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
52 {
53 	return kmalloc_array(n, size, flags | __GFP_ZERO);
54 }
55 
kfree(void * p)56 static void kfree(void *p)
57 {
58 	if (p)
59 		free(p);
60 }
61 
62 #define kvmalloc_array kmalloc_array
63 #define kvfree kfree
64 
spin_lock_init(spinlock_t * lock)65 static void spin_lock_init(spinlock_t *lock)
66 {
67 	int r = pthread_spin_init(lock, 0);
68 	assert(!r);
69 }
70 
spin_lock(spinlock_t * lock)71 static void spin_lock(spinlock_t *lock)
72 {
73 	int ret = pthread_spin_lock(lock);
74 	assert(!ret);
75 }
76 
spin_unlock(spinlock_t * lock)77 static void spin_unlock(spinlock_t *lock)
78 {
79 	int ret = pthread_spin_unlock(lock);
80 	assert(!ret);
81 }
82 
spin_lock_bh(spinlock_t * lock)83 static void spin_lock_bh(spinlock_t *lock)
84 {
85 	spin_lock(lock);
86 }
87 
spin_unlock_bh(spinlock_t * lock)88 static void spin_unlock_bh(spinlock_t *lock)
89 {
90 	spin_unlock(lock);
91 }
92 
spin_lock_irq(spinlock_t * lock)93 static void spin_lock_irq(spinlock_t *lock)
94 {
95 	spin_lock(lock);
96 }
97 
spin_unlock_irq(spinlock_t * lock)98 static void spin_unlock_irq(spinlock_t *lock)
99 {
100 	spin_unlock(lock);
101 }
102 
spin_lock_irqsave(spinlock_t * lock,unsigned long f)103 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
104 {
105 	spin_lock(lock);
106 }
107 
spin_unlock_irqrestore(spinlock_t * lock,unsigned long f)108 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
109 {
110 	spin_unlock(lock);
111 }
112 
113 #include "../../../include/linux/ptr_ring.h"
114 
115 static unsigned long long headcnt, tailcnt;
116 static struct ptr_ring array ____cacheline_aligned_in_smp;
117 
118 /* implemented by ring */
alloc_ring(void)119 void alloc_ring(void)
120 {
121 	int ret = ptr_ring_init(&array, ring_size, 0);
122 	assert(!ret);
123 	/* Hacky way to poke at ring internals. Useful for testing though. */
124 	if (param)
125 		array.batch = param;
126 }
127 
128 /* guest side */
add_inbuf(unsigned len,void * buf,void * datap)129 int add_inbuf(unsigned len, void *buf, void *datap)
130 {
131 	int ret;
132 
133 	ret = __ptr_ring_produce(&array, buf);
134 	if (ret >= 0) {
135 		ret = 0;
136 		headcnt++;
137 	}
138 
139 	return ret;
140 }
141 
142 /*
143  * ptr_ring API provides no way for producer to find out whether a given
144  * buffer was consumed.  Our tests merely require that a successful get_buf
145  * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
146  * fake it accordingly.
147  */
get_buf(unsigned * lenp,void ** bufp)148 void *get_buf(unsigned *lenp, void **bufp)
149 {
150 	void *datap;
151 
152 	if (tailcnt == headcnt || __ptr_ring_full(&array))
153 		datap = NULL;
154 	else {
155 		datap = "Buffer\n";
156 		++tailcnt;
157 	}
158 
159 	return datap;
160 }
161 
used_empty()162 bool used_empty()
163 {
164 	return (tailcnt == headcnt || __ptr_ring_full(&array));
165 }
166 
disable_call()167 void disable_call()
168 {
169 	assert(0);
170 }
171 
enable_call()172 bool enable_call()
173 {
174 	assert(0);
175 }
176 
kick_available(void)177 void kick_available(void)
178 {
179 	assert(0);
180 }
181 
182 /* host side */
disable_kick()183 void disable_kick()
184 {
185 	assert(0);
186 }
187 
enable_kick()188 bool enable_kick()
189 {
190 	assert(0);
191 }
192 
avail_empty()193 bool avail_empty()
194 {
195 	return __ptr_ring_empty(&array);
196 }
197 
use_buf(unsigned * lenp,void ** bufp)198 bool use_buf(unsigned *lenp, void **bufp)
199 {
200 	void *ptr;
201 
202 	ptr = __ptr_ring_consume(&array);
203 
204 	return ptr;
205 }
206 
call_used(void)207 void call_used(void)
208 {
209 	assert(0);
210 }
211