1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 *
9 */
10
11 #ifndef _IOVA_H_
12 #define _IOVA_H_
13
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/rbtree.h>
17 #include <linux/atomic.h>
18 #include <linux/dma-mapping.h>
19
20 /* iova structure */
21 struct iova {
22 struct rb_node node;
23 unsigned long pfn_hi; /* Highest allocated pfn */
24 unsigned long pfn_lo; /* Lowest allocated pfn */
25 };
26
27 struct iova_magazine;
28 struct iova_cpu_rcache;
29
30 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
31 #define MAX_GLOBAL_MAGS 32 /* magazines per bin */
32
33 struct iova_rcache {
34 spinlock_t lock;
35 unsigned long depot_size;
36 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
37 struct iova_cpu_rcache __percpu *cpu_rcaches;
38 };
39
40 struct iova_domain;
41
42 /* Call-Back from IOVA code into IOMMU drivers */
43 typedef void (* iova_flush_cb)(struct iova_domain *domain);
44
45 /* Destructor for per-entry data */
46 typedef void (* iova_entry_dtor)(unsigned long data);
47
48 /* Number of entries per Flush Queue */
49 #define IOVA_FQ_SIZE 256
50
51 /* Timeout (in ms) after which entries are flushed from the Flush-Queue */
52 #define IOVA_FQ_TIMEOUT 10
53
54 /* Flush Queue entry for defered flushing */
55 struct iova_fq_entry {
56 unsigned long iova_pfn;
57 unsigned long pages;
58 unsigned long data;
59 u64 counter; /* Flush counter when this entrie was added */
60 };
61
62 /* Per-CPU Flush Queue structure */
63 struct iova_fq {
64 struct iova_fq_entry entries[IOVA_FQ_SIZE];
65 unsigned head, tail;
66 spinlock_t lock;
67 };
68
69 /* holds all the iova translations for a domain */
70 struct iova_domain {
71 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
72 struct rb_root rbroot; /* iova domain rbtree root */
73 struct rb_node *cached_node; /* Save last alloced node */
74 struct rb_node *cached32_node; /* Save last 32-bit alloced node */
75 unsigned long granule; /* pfn granularity for this domain */
76 unsigned long start_pfn; /* Lower limit for this domain */
77 unsigned long dma_32bit_pfn;
78 struct iova anchor; /* rbtree lookup anchor */
79 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
80
81 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
82 TLBs */
83
84 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
85 iova entry */
86
87 struct iova_fq __percpu *fq; /* Flush Queue */
88
89 atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
90 have been started */
91
92 atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
93 have been finished */
94
95 struct timer_list fq_timer; /* Timer to regularily empty the
96 flush-queues */
97 atomic_t fq_timer_on; /* 1 when timer is active, 0
98 when not */
99 };
100
iova_size(struct iova * iova)101 static inline unsigned long iova_size(struct iova *iova)
102 {
103 return iova->pfn_hi - iova->pfn_lo + 1;
104 }
105
iova_shift(struct iova_domain * iovad)106 static inline unsigned long iova_shift(struct iova_domain *iovad)
107 {
108 return __ffs(iovad->granule);
109 }
110
iova_mask(struct iova_domain * iovad)111 static inline unsigned long iova_mask(struct iova_domain *iovad)
112 {
113 return iovad->granule - 1;
114 }
115
iova_offset(struct iova_domain * iovad,dma_addr_t iova)116 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
117 {
118 return iova & iova_mask(iovad);
119 }
120
iova_align(struct iova_domain * iovad,size_t size)121 static inline size_t iova_align(struct iova_domain *iovad, size_t size)
122 {
123 return ALIGN(size, iovad->granule);
124 }
125
iova_dma_addr(struct iova_domain * iovad,struct iova * iova)126 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
127 {
128 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
129 }
130
iova_pfn(struct iova_domain * iovad,dma_addr_t iova)131 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
132 {
133 return iova >> iova_shift(iovad);
134 }
135
136 #if IS_ENABLED(CONFIG_IOMMU_IOVA)
137 int iova_cache_get(void);
138 void iova_cache_put(void);
139
140 struct iova *alloc_iova_mem(void);
141 void free_iova_mem(struct iova *iova);
142 void free_iova(struct iova_domain *iovad, unsigned long pfn);
143 void __free_iova(struct iova_domain *iovad, struct iova *iova);
144 struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
145 unsigned long limit_pfn,
146 bool size_aligned);
147 void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
148 unsigned long size);
149 void queue_iova(struct iova_domain *iovad,
150 unsigned long pfn, unsigned long pages,
151 unsigned long data);
152 unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
153 unsigned long limit_pfn, bool flush_rcache);
154 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
155 unsigned long pfn_hi);
156 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
157 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
158 unsigned long start_pfn);
159 int init_iova_flush_queue(struct iova_domain *iovad,
160 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
161 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
162 void put_iova_domain(struct iova_domain *iovad);
163 struct iova *split_and_remove_iova(struct iova_domain *iovad,
164 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
165 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
166 #else
iova_cache_get(void)167 static inline int iova_cache_get(void)
168 {
169 return -ENOTSUPP;
170 }
171
iova_cache_put(void)172 static inline void iova_cache_put(void)
173 {
174 }
175
alloc_iova_mem(void)176 static inline struct iova *alloc_iova_mem(void)
177 {
178 return NULL;
179 }
180
free_iova_mem(struct iova * iova)181 static inline void free_iova_mem(struct iova *iova)
182 {
183 }
184
free_iova(struct iova_domain * iovad,unsigned long pfn)185 static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
186 {
187 }
188
__free_iova(struct iova_domain * iovad,struct iova * iova)189 static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
190 {
191 }
192
alloc_iova(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool size_aligned)193 static inline struct iova *alloc_iova(struct iova_domain *iovad,
194 unsigned long size,
195 unsigned long limit_pfn,
196 bool size_aligned)
197 {
198 return NULL;
199 }
200
free_iova_fast(struct iova_domain * iovad,unsigned long pfn,unsigned long size)201 static inline void free_iova_fast(struct iova_domain *iovad,
202 unsigned long pfn,
203 unsigned long size)
204 {
205 }
206
queue_iova(struct iova_domain * iovad,unsigned long pfn,unsigned long pages,unsigned long data)207 static inline void queue_iova(struct iova_domain *iovad,
208 unsigned long pfn, unsigned long pages,
209 unsigned long data)
210 {
211 }
212
alloc_iova_fast(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool flush_rcache)213 static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
214 unsigned long size,
215 unsigned long limit_pfn,
216 bool flush_rcache)
217 {
218 return 0;
219 }
220
reserve_iova(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)221 static inline struct iova *reserve_iova(struct iova_domain *iovad,
222 unsigned long pfn_lo,
223 unsigned long pfn_hi)
224 {
225 return NULL;
226 }
227
copy_reserved_iova(struct iova_domain * from,struct iova_domain * to)228 static inline void copy_reserved_iova(struct iova_domain *from,
229 struct iova_domain *to)
230 {
231 }
232
init_iova_domain(struct iova_domain * iovad,unsigned long granule,unsigned long start_pfn)233 static inline void init_iova_domain(struct iova_domain *iovad,
234 unsigned long granule,
235 unsigned long start_pfn)
236 {
237 }
238
init_iova_flush_queue(struct iova_domain * iovad,iova_flush_cb flush_cb,iova_entry_dtor entry_dtor)239 static inline int init_iova_flush_queue(struct iova_domain *iovad,
240 iova_flush_cb flush_cb,
241 iova_entry_dtor entry_dtor)
242 {
243 return -ENODEV;
244 }
245
find_iova(struct iova_domain * iovad,unsigned long pfn)246 static inline struct iova *find_iova(struct iova_domain *iovad,
247 unsigned long pfn)
248 {
249 return NULL;
250 }
251
put_iova_domain(struct iova_domain * iovad)252 static inline void put_iova_domain(struct iova_domain *iovad)
253 {
254 }
255
split_and_remove_iova(struct iova_domain * iovad,struct iova * iova,unsigned long pfn_lo,unsigned long pfn_hi)256 static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
257 struct iova *iova,
258 unsigned long pfn_lo,
259 unsigned long pfn_hi)
260 {
261 return NULL;
262 }
263
free_cpu_cached_iovas(unsigned int cpu,struct iova_domain * iovad)264 static inline void free_cpu_cached_iovas(unsigned int cpu,
265 struct iova_domain *iovad)
266 {
267 }
268 #endif
269
270 #endif
271