1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __XFS_SUPPORT_KMEM_H__
7 #define __XFS_SUPPORT_KMEM_H__
8
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13
14 /*
15 * General memory allocation interfaces
16 */
17
18 typedef unsigned __bitwise xfs_km_flags_t;
19 #define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
20 #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
21 #define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
22
23 /*
24 * We use a special process flag to avoid recursive callbacks into
25 * the filesystem during transactions. We will also issue our own
26 * warnings, so we explicitly skip any generic ones (silly of us).
27 */
28 static inline gfp_t
kmem_flags_convert(xfs_km_flags_t flags)29 kmem_flags_convert(xfs_km_flags_t flags)
30 {
31 gfp_t lflags;
32
33 BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO));
34
35 lflags = GFP_KERNEL | __GFP_NOWARN;
36 if (flags & KM_NOFS)
37 lflags &= ~__GFP_FS;
38
39 /*
40 * Default page/slab allocator behavior is to retry for ever
41 * for small allocations. We can override this behavior by using
42 * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
43 * as it is feasible but rather fail than retry forever for all
44 * request sizes.
45 */
46 if (flags & KM_MAYFAIL)
47 lflags |= __GFP_RETRY_MAYFAIL;
48
49 if (flags & KM_ZERO)
50 lflags |= __GFP_ZERO;
51
52 return lflags;
53 }
54
55 extern void *kmem_alloc(size_t, xfs_km_flags_t);
56 extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
57 extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
58 extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
kmem_free(const void * ptr)59 static inline void kmem_free(const void *ptr)
60 {
61 kvfree(ptr);
62 }
63
64
65 static inline void *
kmem_zalloc(size_t size,xfs_km_flags_t flags)66 kmem_zalloc(size_t size, xfs_km_flags_t flags)
67 {
68 return kmem_alloc(size, flags | KM_ZERO);
69 }
70
71 static inline void *
kmem_zalloc_large(size_t size,xfs_km_flags_t flags)72 kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
73 {
74 return kmem_alloc_large(size, flags | KM_ZERO);
75 }
76
77 /*
78 * Zone interfaces
79 */
80
81 #define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
82 #define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
83 #define KM_ZONE_SPREAD SLAB_MEM_SPREAD
84 #define KM_ZONE_ACCOUNT SLAB_ACCOUNT
85
86 #define kmem_zone kmem_cache
87 #define kmem_zone_t struct kmem_cache
88
89 static inline kmem_zone_t *
kmem_zone_init(int size,char * zone_name)90 kmem_zone_init(int size, char *zone_name)
91 {
92 return kmem_cache_create(zone_name, size, 0, 0, NULL);
93 }
94
95 static inline kmem_zone_t *
kmem_zone_init_flags(int size,char * zone_name,slab_flags_t flags,void (* construct)(void *))96 kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
97 void (*construct)(void *))
98 {
99 return kmem_cache_create(zone_name, size, 0, flags, construct);
100 }
101
102 static inline void
kmem_zone_free(kmem_zone_t * zone,void * ptr)103 kmem_zone_free(kmem_zone_t *zone, void *ptr)
104 {
105 kmem_cache_free(zone, ptr);
106 }
107
108 static inline void
kmem_zone_destroy(kmem_zone_t * zone)109 kmem_zone_destroy(kmem_zone_t *zone)
110 {
111 kmem_cache_destroy(zone);
112 }
113
114 extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
115
116 static inline void *
kmem_zone_zalloc(kmem_zone_t * zone,xfs_km_flags_t flags)117 kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
118 {
119 return kmem_zone_alloc(zone, flags | KM_ZERO);
120 }
121
122 static inline struct page *
kmem_to_page(void * addr)123 kmem_to_page(void *addr)
124 {
125 if (is_vmalloc_addr(addr))
126 return vmalloc_to_page(addr);
127 return virt_to_page(addr);
128 }
129
130 #endif /* __XFS_SUPPORT_KMEM_H__ */
131