1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * queue_stack_maps.c: BPF queue and stack maps
4 *
5 * Copyright (c) 2018 Politecnico di Torino
6 */
7 #include <linux/bpf.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/capability.h>
11 #include <linux/btf_ids.h>
12 #include "percpu_freelist.h"
13
14 #define QUEUE_STACK_CREATE_FLAG_MASK \
15 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
16
17 struct bpf_queue_stack {
18 struct bpf_map map;
19 raw_spinlock_t lock;
20 u32 head, tail;
21 u32 size; /* max_entries + 1 */
22
23 char elements[] __aligned(8);
24 };
25
bpf_queue_stack(struct bpf_map * map)26 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
27 {
28 return container_of(map, struct bpf_queue_stack, map);
29 }
30
queue_stack_map_is_empty(struct bpf_queue_stack * qs)31 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
32 {
33 return qs->head == qs->tail;
34 }
35
queue_stack_map_is_full(struct bpf_queue_stack * qs)36 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
37 {
38 u32 head = qs->head + 1;
39
40 if (unlikely(head >= qs->size))
41 head = 0;
42
43 return head == qs->tail;
44 }
45
46 /* Called from syscall */
queue_stack_map_alloc_check(union bpf_attr * attr)47 static int queue_stack_map_alloc_check(union bpf_attr *attr)
48 {
49 if (!bpf_capable())
50 return -EPERM;
51
52 /* check sanity of attributes */
53 if (attr->max_entries == 0 || attr->key_size != 0 ||
54 attr->value_size == 0 ||
55 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
56 !bpf_map_flags_access_ok(attr->map_flags))
57 return -EINVAL;
58
59 if (attr->value_size > KMALLOC_MAX_SIZE)
60 /* if value_size is bigger, the user space won't be able to
61 * access the elements.
62 */
63 return -E2BIG;
64
65 return 0;
66 }
67
queue_stack_map_alloc(union bpf_attr * attr)68 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
69 {
70 int numa_node = bpf_map_attr_numa_node(attr);
71 struct bpf_queue_stack *qs;
72 u64 size, queue_size;
73
74 size = (u64) attr->max_entries + 1;
75 queue_size = sizeof(*qs) + size * attr->value_size;
76
77 qs = bpf_map_area_alloc(queue_size, numa_node);
78 if (!qs)
79 return ERR_PTR(-ENOMEM);
80
81 bpf_map_init_from_attr(&qs->map, attr);
82
83 qs->size = size;
84
85 raw_spin_lock_init(&qs->lock);
86
87 return &qs->map;
88 }
89
90 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
queue_stack_map_free(struct bpf_map * map)91 static void queue_stack_map_free(struct bpf_map *map)
92 {
93 struct bpf_queue_stack *qs = bpf_queue_stack(map);
94
95 bpf_map_area_free(qs);
96 }
97
__queue_map_get(struct bpf_map * map,void * value,bool delete)98 static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
99 {
100 struct bpf_queue_stack *qs = bpf_queue_stack(map);
101 unsigned long flags;
102 int err = 0;
103 void *ptr;
104
105 raw_spin_lock_irqsave(&qs->lock, flags);
106
107 if (queue_stack_map_is_empty(qs)) {
108 memset(value, 0, qs->map.value_size);
109 err = -ENOENT;
110 goto out;
111 }
112
113 ptr = &qs->elements[qs->tail * qs->map.value_size];
114 memcpy(value, ptr, qs->map.value_size);
115
116 if (delete) {
117 if (unlikely(++qs->tail >= qs->size))
118 qs->tail = 0;
119 }
120
121 out:
122 raw_spin_unlock_irqrestore(&qs->lock, flags);
123 return err;
124 }
125
126
__stack_map_get(struct bpf_map * map,void * value,bool delete)127 static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
128 {
129 struct bpf_queue_stack *qs = bpf_queue_stack(map);
130 unsigned long flags;
131 int err = 0;
132 void *ptr;
133 u32 index;
134
135 raw_spin_lock_irqsave(&qs->lock, flags);
136
137 if (queue_stack_map_is_empty(qs)) {
138 memset(value, 0, qs->map.value_size);
139 err = -ENOENT;
140 goto out;
141 }
142
143 index = qs->head - 1;
144 if (unlikely(index >= qs->size))
145 index = qs->size - 1;
146
147 ptr = &qs->elements[index * qs->map.value_size];
148 memcpy(value, ptr, qs->map.value_size);
149
150 if (delete)
151 qs->head = index;
152
153 out:
154 raw_spin_unlock_irqrestore(&qs->lock, flags);
155 return err;
156 }
157
158 /* Called from syscall or from eBPF program */
queue_map_peek_elem(struct bpf_map * map,void * value)159 static int queue_map_peek_elem(struct bpf_map *map, void *value)
160 {
161 return __queue_map_get(map, value, false);
162 }
163
164 /* Called from syscall or from eBPF program */
stack_map_peek_elem(struct bpf_map * map,void * value)165 static int stack_map_peek_elem(struct bpf_map *map, void *value)
166 {
167 return __stack_map_get(map, value, false);
168 }
169
170 /* Called from syscall or from eBPF program */
queue_map_pop_elem(struct bpf_map * map,void * value)171 static int queue_map_pop_elem(struct bpf_map *map, void *value)
172 {
173 return __queue_map_get(map, value, true);
174 }
175
176 /* Called from syscall or from eBPF program */
stack_map_pop_elem(struct bpf_map * map,void * value)177 static int stack_map_pop_elem(struct bpf_map *map, void *value)
178 {
179 return __stack_map_get(map, value, true);
180 }
181
182 /* Called from syscall or from eBPF program */
queue_stack_map_push_elem(struct bpf_map * map,void * value,u64 flags)183 static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
184 u64 flags)
185 {
186 struct bpf_queue_stack *qs = bpf_queue_stack(map);
187 unsigned long irq_flags;
188 int err = 0;
189 void *dst;
190
191 /* BPF_EXIST is used to force making room for a new element in case the
192 * map is full
193 */
194 bool replace = (flags & BPF_EXIST);
195
196 /* Check supported flags for queue and stack maps */
197 if (flags & BPF_NOEXIST || flags > BPF_EXIST)
198 return -EINVAL;
199
200 raw_spin_lock_irqsave(&qs->lock, irq_flags);
201
202 if (queue_stack_map_is_full(qs)) {
203 if (!replace) {
204 err = -E2BIG;
205 goto out;
206 }
207 /* advance tail pointer to overwrite oldest element */
208 if (unlikely(++qs->tail >= qs->size))
209 qs->tail = 0;
210 }
211
212 dst = &qs->elements[qs->head * qs->map.value_size];
213 memcpy(dst, value, qs->map.value_size);
214
215 if (unlikely(++qs->head >= qs->size))
216 qs->head = 0;
217
218 out:
219 raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
220 return err;
221 }
222
223 /* Called from syscall or from eBPF program */
queue_stack_map_lookup_elem(struct bpf_map * map,void * key)224 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
225 {
226 return NULL;
227 }
228
229 /* Called from syscall or from eBPF program */
queue_stack_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)230 static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
231 void *value, u64 flags)
232 {
233 return -EINVAL;
234 }
235
236 /* Called from syscall or from eBPF program */
queue_stack_map_delete_elem(struct bpf_map * map,void * key)237 static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
238 {
239 return -EINVAL;
240 }
241
242 /* Called from syscall */
queue_stack_map_get_next_key(struct bpf_map * map,void * key,void * next_key)243 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
244 void *next_key)
245 {
246 return -EINVAL;
247 }
248
249 BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
250 const struct bpf_map_ops queue_map_ops = {
251 .map_meta_equal = bpf_map_meta_equal,
252 .map_alloc_check = queue_stack_map_alloc_check,
253 .map_alloc = queue_stack_map_alloc,
254 .map_free = queue_stack_map_free,
255 .map_lookup_elem = queue_stack_map_lookup_elem,
256 .map_update_elem = queue_stack_map_update_elem,
257 .map_delete_elem = queue_stack_map_delete_elem,
258 .map_push_elem = queue_stack_map_push_elem,
259 .map_pop_elem = queue_map_pop_elem,
260 .map_peek_elem = queue_map_peek_elem,
261 .map_get_next_key = queue_stack_map_get_next_key,
262 .map_btf_id = &queue_map_btf_ids[0],
263 };
264
265 const struct bpf_map_ops stack_map_ops = {
266 .map_meta_equal = bpf_map_meta_equal,
267 .map_alloc_check = queue_stack_map_alloc_check,
268 .map_alloc = queue_stack_map_alloc,
269 .map_free = queue_stack_map_free,
270 .map_lookup_elem = queue_stack_map_lookup_elem,
271 .map_update_elem = queue_stack_map_update_elem,
272 .map_delete_elem = queue_stack_map_delete_elem,
273 .map_push_elem = queue_stack_map_push_elem,
274 .map_pop_elem = stack_map_pop_elem,
275 .map_peek_elem = stack_map_peek_elem,
276 .map_get_next_key = queue_stack_map_get_next_key,
277 .map_btf_id = &queue_map_btf_ids[0],
278 };
279