1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _BCACHE_UTIL_H
4 #define _BCACHE_UTIL_H
5
6 #include <linux/blkdev.h>
7 #include <linux/errno.h>
8 #include <linux/kernel.h>
9 #include <linux/sched/clock.h>
10 #include <linux/llist.h>
11 #include <linux/ratelimit.h>
12 #include <linux/vmalloc.h>
13 #include <linux/workqueue.h>
14 #include <linux/crc64.h>
15
16 #include "closure.h"
17
18 #define PAGE_SECTORS (PAGE_SIZE / 512)
19
20 struct closure;
21
22 #ifdef CONFIG_BCACHE_DEBUG
23
24 #define EBUG_ON(cond) BUG_ON(cond)
25 #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
26 #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
27
28 #else /* DEBUG */
29
30 #define EBUG_ON(cond) do { if (cond); } while (0)
31 #define atomic_dec_bug(v) atomic_dec(v)
32 #define atomic_inc_bug(v, i) atomic_inc(v)
33
34 #endif
35
36 #define DECLARE_HEAP(type, name) \
37 struct { \
38 size_t size, used; \
39 type *data; \
40 } name
41
42 #define init_heap(heap, _size, gfp) \
43 ({ \
44 size_t _bytes; \
45 (heap)->used = 0; \
46 (heap)->size = (_size); \
47 _bytes = (heap)->size * sizeof(*(heap)->data); \
48 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
49 (heap)->data; \
50 })
51
52 #define free_heap(heap) \
53 do { \
54 kvfree((heap)->data); \
55 (heap)->data = NULL; \
56 } while (0)
57
58 #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
59
60 #define heap_sift(h, i, cmp) \
61 do { \
62 size_t _r, _j = i; \
63 \
64 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
65 _r = _j * 2 + 1; \
66 if (_r + 1 < (h)->used && \
67 cmp((h)->data[_r], (h)->data[_r + 1])) \
68 _r++; \
69 \
70 if (cmp((h)->data[_r], (h)->data[_j])) \
71 break; \
72 heap_swap(h, _r, _j); \
73 } \
74 } while (0)
75
76 #define heap_sift_down(h, i, cmp) \
77 do { \
78 while (i) { \
79 size_t p = (i - 1) / 2; \
80 if (cmp((h)->data[i], (h)->data[p])) \
81 break; \
82 heap_swap(h, i, p); \
83 i = p; \
84 } \
85 } while (0)
86
87 #define heap_add(h, d, cmp) \
88 ({ \
89 bool _r = !heap_full(h); \
90 if (_r) { \
91 size_t _i = (h)->used++; \
92 (h)->data[_i] = d; \
93 \
94 heap_sift_down(h, _i, cmp); \
95 heap_sift(h, _i, cmp); \
96 } \
97 _r; \
98 })
99
100 #define heap_pop(h, d, cmp) \
101 ({ \
102 bool _r = (h)->used; \
103 if (_r) { \
104 (d) = (h)->data[0]; \
105 (h)->used--; \
106 heap_swap(h, 0, (h)->used); \
107 heap_sift(h, 0, cmp); \
108 } \
109 _r; \
110 })
111
112 #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
113
114 #define heap_full(h) ((h)->used == (h)->size)
115
116 #define heap_empty(h) ((h)->used == 0)
117
118 #define DECLARE_FIFO(type, name) \
119 struct { \
120 size_t front, back, size, mask; \
121 type *data; \
122 } name
123
124 #define fifo_for_each(c, fifo, iter) \
125 for (iter = (fifo)->front; \
126 c = (fifo)->data[iter], iter != (fifo)->back; \
127 iter = (iter + 1) & (fifo)->mask)
128
129 #define __init_fifo(fifo, gfp) \
130 ({ \
131 size_t _allocated_size, _bytes; \
132 BUG_ON(!(fifo)->size); \
133 \
134 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
135 _bytes = _allocated_size * sizeof(*(fifo)->data); \
136 \
137 (fifo)->mask = _allocated_size - 1; \
138 (fifo)->front = (fifo)->back = 0; \
139 \
140 (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
141 (fifo)->data; \
142 })
143
144 #define init_fifo_exact(fifo, _size, gfp) \
145 ({ \
146 (fifo)->size = (_size); \
147 __init_fifo(fifo, gfp); \
148 })
149
150 #define init_fifo(fifo, _size, gfp) \
151 ({ \
152 (fifo)->size = (_size); \
153 if ((fifo)->size > 4) \
154 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
155 __init_fifo(fifo, gfp); \
156 })
157
158 #define free_fifo(fifo) \
159 do { \
160 kvfree((fifo)->data); \
161 (fifo)->data = NULL; \
162 } while (0)
163
164 #define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
165 #define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
166
167 #define fifo_empty(fifo) (!fifo_used(fifo))
168 #define fifo_full(fifo) (!fifo_free(fifo))
169
170 #define fifo_front(fifo) ((fifo)->data[(fifo)->front])
171 #define fifo_back(fifo) \
172 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
173
174 #define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
175
176 #define fifo_push_back(fifo, i) \
177 ({ \
178 bool _r = !fifo_full((fifo)); \
179 if (_r) { \
180 (fifo)->data[(fifo)->back++] = (i); \
181 (fifo)->back &= (fifo)->mask; \
182 } \
183 _r; \
184 })
185
186 #define fifo_pop_front(fifo, i) \
187 ({ \
188 bool _r = !fifo_empty((fifo)); \
189 if (_r) { \
190 (i) = (fifo)->data[(fifo)->front++]; \
191 (fifo)->front &= (fifo)->mask; \
192 } \
193 _r; \
194 })
195
196 #define fifo_push_front(fifo, i) \
197 ({ \
198 bool _r = !fifo_full((fifo)); \
199 if (_r) { \
200 --(fifo)->front; \
201 (fifo)->front &= (fifo)->mask; \
202 (fifo)->data[(fifo)->front] = (i); \
203 } \
204 _r; \
205 })
206
207 #define fifo_pop_back(fifo, i) \
208 ({ \
209 bool _r = !fifo_empty((fifo)); \
210 if (_r) { \
211 --(fifo)->back; \
212 (fifo)->back &= (fifo)->mask; \
213 (i) = (fifo)->data[(fifo)->back] \
214 } \
215 _r; \
216 })
217
218 #define fifo_push(fifo, i) fifo_push_back(fifo, (i))
219 #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
220
221 #define fifo_swap(l, r) \
222 do { \
223 swap((l)->front, (r)->front); \
224 swap((l)->back, (r)->back); \
225 swap((l)->size, (r)->size); \
226 swap((l)->mask, (r)->mask); \
227 swap((l)->data, (r)->data); \
228 } while (0)
229
230 #define fifo_move(dest, src) \
231 do { \
232 typeof(*((dest)->data)) _t; \
233 while (!fifo_full(dest) && \
234 fifo_pop(src, _t)) \
235 fifo_push(dest, _t); \
236 } while (0)
237
238 /*
239 * Simple array based allocator - preallocates a number of elements and you can
240 * never allocate more than that, also has no locking.
241 *
242 * Handy because if you know you only need a fixed number of elements you don't
243 * have to worry about memory allocation failure, and sometimes a mempool isn't
244 * what you want.
245 *
246 * We treat the free elements as entries in a singly linked list, and the
247 * freelist as a stack - allocating and freeing push and pop off the freelist.
248 */
249
250 #define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
251 struct { \
252 type *freelist; \
253 type data[size]; \
254 } name
255
256 #define array_alloc(array) \
257 ({ \
258 typeof((array)->freelist) _ret = (array)->freelist; \
259 \
260 if (_ret) \
261 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
262 \
263 _ret; \
264 })
265
266 #define array_free(array, ptr) \
267 do { \
268 typeof((array)->freelist) _ptr = ptr; \
269 \
270 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
271 (array)->freelist = _ptr; \
272 } while (0)
273
274 #define array_allocator_init(array) \
275 do { \
276 typeof((array)->freelist) _i; \
277 \
278 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
279 (array)->freelist = NULL; \
280 \
281 for (_i = (array)->data; \
282 _i < (array)->data + ARRAY_SIZE((array)->data); \
283 _i++) \
284 array_free(array, _i); \
285 } while (0)
286
287 #define array_freelist_empty(array) ((array)->freelist == NULL)
288
289 #define ANYSINT_MAX(t) \
290 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
291
292 int bch_strtoint_h(const char *cp, int *res);
293 int bch_strtouint_h(const char *cp, unsigned int *res);
294 int bch_strtoll_h(const char *cp, long long *res);
295 int bch_strtoull_h(const char *cp, unsigned long long *res);
296
bch_strtol_h(const char * cp,long * res)297 static inline int bch_strtol_h(const char *cp, long *res)
298 {
299 #if BITS_PER_LONG == 32
300 return bch_strtoint_h(cp, (int *) res);
301 #else
302 return bch_strtoll_h(cp, (long long *) res);
303 #endif
304 }
305
bch_strtoul_h(const char * cp,long * res)306 static inline int bch_strtoul_h(const char *cp, long *res)
307 {
308 #if BITS_PER_LONG == 32
309 return bch_strtouint_h(cp, (unsigned int *) res);
310 #else
311 return bch_strtoull_h(cp, (unsigned long long *) res);
312 #endif
313 }
314
315 #define strtoi_h(cp, res) \
316 (__builtin_types_compatible_p(typeof(*res), int) \
317 ? bch_strtoint_h(cp, (void *) res) \
318 : __builtin_types_compatible_p(typeof(*res), long) \
319 ? bch_strtol_h(cp, (void *) res) \
320 : __builtin_types_compatible_p(typeof(*res), long long) \
321 ? bch_strtoll_h(cp, (void *) res) \
322 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
323 ? bch_strtouint_h(cp, (void *) res) \
324 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
325 ? bch_strtoul_h(cp, (void *) res) \
326 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
327 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
328
329 #define strtoul_safe(cp, var) \
330 ({ \
331 unsigned long _v; \
332 int _r = kstrtoul(cp, 10, &_v); \
333 if (!_r) \
334 var = _v; \
335 _r; \
336 })
337
338 #define strtoul_safe_clamp(cp, var, min, max) \
339 ({ \
340 unsigned long _v; \
341 int _r = kstrtoul(cp, 10, &_v); \
342 if (!_r) \
343 var = clamp_t(typeof(var), _v, min, max); \
344 _r; \
345 })
346
347 #define snprint(buf, size, var) \
348 snprintf(buf, size, \
349 __builtin_types_compatible_p(typeof(var), int) \
350 ? "%i\n" : \
351 __builtin_types_compatible_p(typeof(var), unsigned int) \
352 ? "%u\n" : \
353 __builtin_types_compatible_p(typeof(var), long) \
354 ? "%li\n" : \
355 __builtin_types_compatible_p(typeof(var), unsigned long)\
356 ? "%lu\n" : \
357 __builtin_types_compatible_p(typeof(var), int64_t) \
358 ? "%lli\n" : \
359 __builtin_types_compatible_p(typeof(var), uint64_t) \
360 ? "%llu\n" : \
361 __builtin_types_compatible_p(typeof(var), const char *) \
362 ? "%s\n" : "%i\n", var)
363
364 ssize_t bch_hprint(char *buf, int64_t v);
365
366 bool bch_is_zero(const char *p, size_t n);
367 int bch_parse_uuid(const char *s, char *uuid);
368
369 struct time_stats {
370 spinlock_t lock;
371 /*
372 * all fields are in nanoseconds, averages are ewmas stored left shifted
373 * by 8
374 */
375 uint64_t max_duration;
376 uint64_t average_duration;
377 uint64_t average_frequency;
378 uint64_t last;
379 };
380
381 void bch_time_stats_update(struct time_stats *stats, uint64_t time);
382
local_clock_us(void)383 static inline unsigned int local_clock_us(void)
384 {
385 return local_clock() >> 10;
386 }
387
388 #define NSEC_PER_ns 1L
389 #define NSEC_PER_us NSEC_PER_USEC
390 #define NSEC_PER_ms NSEC_PER_MSEC
391 #define NSEC_PER_sec NSEC_PER_SEC
392
393 #define __print_time_stat(stats, name, stat, units) \
394 sysfs_print(name ## _ ## stat ## _ ## units, \
395 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
396
397 #define sysfs_print_time_stats(stats, name, \
398 frequency_units, \
399 duration_units) \
400 do { \
401 __print_time_stat(stats, name, \
402 average_frequency, frequency_units); \
403 __print_time_stat(stats, name, \
404 average_duration, duration_units); \
405 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
406 div_u64((stats)->max_duration, \
407 NSEC_PER_ ## duration_units)); \
408 \
409 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
410 ? div_s64(local_clock() - (stats)->last, \
411 NSEC_PER_ ## frequency_units) \
412 : -1LL); \
413 } while (0)
414
415 #define sysfs_time_stats_attribute(name, \
416 frequency_units, \
417 duration_units) \
418 read_attribute(name ## _average_frequency_ ## frequency_units); \
419 read_attribute(name ## _average_duration_ ## duration_units); \
420 read_attribute(name ## _max_duration_ ## duration_units); \
421 read_attribute(name ## _last_ ## frequency_units)
422
423 #define sysfs_time_stats_attribute_list(name, \
424 frequency_units, \
425 duration_units) \
426 &sysfs_ ## name ## _average_frequency_ ## frequency_units, \
427 &sysfs_ ## name ## _average_duration_ ## duration_units, \
428 &sysfs_ ## name ## _max_duration_ ## duration_units, \
429 &sysfs_ ## name ## _last_ ## frequency_units,
430
431 #define ewma_add(ewma, val, weight, factor) \
432 ({ \
433 (ewma) *= (weight) - 1; \
434 (ewma) += (val) << factor; \
435 (ewma) /= (weight); \
436 (ewma) >> factor; \
437 })
438
439 struct bch_ratelimit {
440 /* Next time we want to do some work, in nanoseconds */
441 uint64_t next;
442
443 /*
444 * Rate at which we want to do work, in units per second
445 * The units here correspond to the units passed to bch_next_delay()
446 */
447 atomic_long_t rate;
448 };
449
bch_ratelimit_reset(struct bch_ratelimit * d)450 static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
451 {
452 d->next = local_clock();
453 }
454
455 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
456
457 #define __DIV_SAFE(n, d, zero) \
458 ({ \
459 typeof(n) _n = (n); \
460 typeof(d) _d = (d); \
461 _d ? _n / _d : zero; \
462 })
463
464 #define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
465
466 #define container_of_or_null(ptr, type, member) \
467 ({ \
468 typeof(ptr) _ptr = ptr; \
469 _ptr ? container_of(_ptr, type, member) : NULL; \
470 })
471
472 #define RB_INSERT(root, new, member, cmp) \
473 ({ \
474 __label__ dup; \
475 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
476 typeof(new) this; \
477 int res, ret = -1; \
478 \
479 while (*n) { \
480 parent = *n; \
481 this = container_of(*n, typeof(*(new)), member); \
482 res = cmp(new, this); \
483 if (!res) \
484 goto dup; \
485 n = res < 0 \
486 ? &(*n)->rb_left \
487 : &(*n)->rb_right; \
488 } \
489 \
490 rb_link_node(&(new)->member, parent, n); \
491 rb_insert_color(&(new)->member, root); \
492 ret = 0; \
493 dup: \
494 ret; \
495 })
496
497 #define RB_SEARCH(root, search, member, cmp) \
498 ({ \
499 struct rb_node *n = (root)->rb_node; \
500 typeof(&(search)) this, ret = NULL; \
501 int res; \
502 \
503 while (n) { \
504 this = container_of(n, typeof(search), member); \
505 res = cmp(&(search), this); \
506 if (!res) { \
507 ret = this; \
508 break; \
509 } \
510 n = res < 0 \
511 ? n->rb_left \
512 : n->rb_right; \
513 } \
514 ret; \
515 })
516
517 #define RB_GREATER(root, search, member, cmp) \
518 ({ \
519 struct rb_node *n = (root)->rb_node; \
520 typeof(&(search)) this, ret = NULL; \
521 int res; \
522 \
523 while (n) { \
524 this = container_of(n, typeof(search), member); \
525 res = cmp(&(search), this); \
526 if (res < 0) { \
527 ret = this; \
528 n = n->rb_left; \
529 } else \
530 n = n->rb_right; \
531 } \
532 ret; \
533 })
534
535 #define RB_FIRST(root, type, member) \
536 container_of_or_null(rb_first(root), type, member)
537
538 #define RB_LAST(root, type, member) \
539 container_of_or_null(rb_last(root), type, member)
540
541 #define RB_NEXT(ptr, member) \
542 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
543
544 #define RB_PREV(ptr, member) \
545 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
546
bch_crc64(const void * p,size_t len)547 static inline uint64_t bch_crc64(const void *p, size_t len)
548 {
549 uint64_t crc = 0xffffffffffffffffULL;
550
551 crc = crc64_be(crc, p, len);
552 return crc ^ 0xffffffffffffffffULL;
553 }
554
bch_crc64_update(uint64_t crc,const void * p,size_t len)555 static inline uint64_t bch_crc64_update(uint64_t crc,
556 const void *p,
557 size_t len)
558 {
559 crc = crc64_be(crc, p, len);
560 return crc;
561 }
562
563 /* Does linear interpolation between powers of two */
fract_exp_two(unsigned int x,unsigned int fract_bits)564 static inline unsigned int fract_exp_two(unsigned int x,
565 unsigned int fract_bits)
566 {
567 unsigned int fract = x & ~(~0 << fract_bits);
568
569 x >>= fract_bits;
570 x = 1 << x;
571 x += (x * fract) >> fract_bits;
572
573 return x;
574 }
575
576 void bch_bio_map(struct bio *bio, void *base);
577 int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
578
bdev_sectors(struct block_device * bdev)579 static inline sector_t bdev_sectors(struct block_device *bdev)
580 {
581 return bdev->bd_inode->i_size >> 9;
582 }
583 #endif /* _BCACHE_UTIL_H */
584