1 /*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H
21 #define _ASM_X86_AMD_IOMMU_TYPES_H
22
23 #include <linux/types.h>
24 #include <linux/mutex.h>
25 #include <linux/msi.h>
26 #include <linux/list.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/irqreturn.h>
30
31 /*
32 * Maximum number of IOMMUs supported
33 */
34 #define MAX_IOMMUS 32
35
36 /*
37 * some size calculation constants
38 */
39 #define DEV_TABLE_ENTRY_SIZE 32
40 #define ALIAS_TABLE_ENTRY_SIZE 2
41 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
42
43 /* Capability offsets used by the driver */
44 #define MMIO_CAP_HDR_OFFSET 0x00
45 #define MMIO_RANGE_OFFSET 0x0c
46 #define MMIO_MISC_OFFSET 0x10
47
48 /* Masks, shifts and macros to parse the device range capability */
49 #define MMIO_RANGE_LD_MASK 0xff000000
50 #define MMIO_RANGE_FD_MASK 0x00ff0000
51 #define MMIO_RANGE_BUS_MASK 0x0000ff00
52 #define MMIO_RANGE_LD_SHIFT 24
53 #define MMIO_RANGE_FD_SHIFT 16
54 #define MMIO_RANGE_BUS_SHIFT 8
55 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
56 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
57 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
58 #define MMIO_MSI_NUM(x) ((x) & 0x1f)
59
60 /* Flag masks for the AMD IOMMU exclusion range */
61 #define MMIO_EXCL_ENABLE_MASK 0x01ULL
62 #define MMIO_EXCL_ALLOW_MASK 0x02ULL
63
64 /* Used offsets into the MMIO space */
65 #define MMIO_DEV_TABLE_OFFSET 0x0000
66 #define MMIO_CMD_BUF_OFFSET 0x0008
67 #define MMIO_EVT_BUF_OFFSET 0x0010
68 #define MMIO_CONTROL_OFFSET 0x0018
69 #define MMIO_EXCL_BASE_OFFSET 0x0020
70 #define MMIO_EXCL_LIMIT_OFFSET 0x0028
71 #define MMIO_EXT_FEATURES 0x0030
72 #define MMIO_PPR_LOG_OFFSET 0x0038
73 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0
74 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8
75 #define MMIO_CMD_HEAD_OFFSET 0x2000
76 #define MMIO_CMD_TAIL_OFFSET 0x2008
77 #define MMIO_EVT_HEAD_OFFSET 0x2010
78 #define MMIO_EVT_TAIL_OFFSET 0x2018
79 #define MMIO_STATUS_OFFSET 0x2020
80 #define MMIO_PPR_HEAD_OFFSET 0x2030
81 #define MMIO_PPR_TAIL_OFFSET 0x2038
82 #define MMIO_GA_HEAD_OFFSET 0x2040
83 #define MMIO_GA_TAIL_OFFSET 0x2048
84 #define MMIO_CNTR_CONF_OFFSET 0x4000
85 #define MMIO_CNTR_REG_OFFSET 0x40000
86 #define MMIO_REG_END_OFFSET 0x80000
87
88
89
90 /* Extended Feature Bits */
91 #define FEATURE_PREFETCH (1ULL<<0)
92 #define FEATURE_PPR (1ULL<<1)
93 #define FEATURE_X2APIC (1ULL<<2)
94 #define FEATURE_NX (1ULL<<3)
95 #define FEATURE_GT (1ULL<<4)
96 #define FEATURE_IA (1ULL<<6)
97 #define FEATURE_GA (1ULL<<7)
98 #define FEATURE_HE (1ULL<<8)
99 #define FEATURE_PC (1ULL<<9)
100 #define FEATURE_GAM_VAPIC (1ULL<<21)
101 #define FEATURE_EPHSUP (1ULL<<50)
102
103 #define FEATURE_PASID_SHIFT 32
104 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
105
106 #define FEATURE_GLXVAL_SHIFT 14
107 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
108
109 /* Note:
110 * The current driver only support 16-bit PASID.
111 * Currently, hardware only implement upto 16-bit PASID
112 * even though the spec says it could have upto 20 bits.
113 */
114 #define PASID_MASK 0x0000ffff
115
116 /* MMIO status bits */
117 #define MMIO_STATUS_EVT_INT_MASK (1 << 1)
118 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
119 #define MMIO_STATUS_PPR_INT_MASK (1 << 6)
120 #define MMIO_STATUS_GALOG_RUN_MASK (1 << 8)
121 #define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9)
122 #define MMIO_STATUS_GALOG_INT_MASK (1 << 10)
123
124 /* event logging constants */
125 #define EVENT_ENTRY_SIZE 0x10
126 #define EVENT_TYPE_SHIFT 28
127 #define EVENT_TYPE_MASK 0xf
128 #define EVENT_TYPE_ILL_DEV 0x1
129 #define EVENT_TYPE_IO_FAULT 0x2
130 #define EVENT_TYPE_DEV_TAB_ERR 0x3
131 #define EVENT_TYPE_PAGE_TAB_ERR 0x4
132 #define EVENT_TYPE_ILL_CMD 0x5
133 #define EVENT_TYPE_CMD_HARD_ERR 0x6
134 #define EVENT_TYPE_IOTLB_INV_TO 0x7
135 #define EVENT_TYPE_INV_DEV_REQ 0x8
136 #define EVENT_TYPE_INV_PPR_REQ 0x9
137 #define EVENT_DEVID_MASK 0xffff
138 #define EVENT_DEVID_SHIFT 0
139 #define EVENT_DOMID_MASK 0xffff
140 #define EVENT_DOMID_SHIFT 0
141 #define EVENT_FLAGS_MASK 0xfff
142 #define EVENT_FLAGS_SHIFT 0x10
143
144 /* feature control bits */
145 #define CONTROL_IOMMU_EN 0x00ULL
146 #define CONTROL_HT_TUN_EN 0x01ULL
147 #define CONTROL_EVT_LOG_EN 0x02ULL
148 #define CONTROL_EVT_INT_EN 0x03ULL
149 #define CONTROL_COMWAIT_EN 0x04ULL
150 #define CONTROL_INV_TIMEOUT 0x05ULL
151 #define CONTROL_PASSPW_EN 0x08ULL
152 #define CONTROL_RESPASSPW_EN 0x09ULL
153 #define CONTROL_COHERENT_EN 0x0aULL
154 #define CONTROL_ISOC_EN 0x0bULL
155 #define CONTROL_CMDBUF_EN 0x0cULL
156 #define CONTROL_PPFLOG_EN 0x0dULL
157 #define CONTROL_PPFINT_EN 0x0eULL
158 #define CONTROL_PPR_EN 0x0fULL
159 #define CONTROL_GT_EN 0x10ULL
160 #define CONTROL_GA_EN 0x11ULL
161 #define CONTROL_GAM_EN 0x19ULL
162 #define CONTROL_GALOG_EN 0x1CULL
163 #define CONTROL_GAINT_EN 0x1DULL
164 #define CONTROL_XT_EN 0x32ULL
165
166 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
167 #define CTRL_INV_TO_NONE 0
168 #define CTRL_INV_TO_1MS 1
169 #define CTRL_INV_TO_10MS 2
170 #define CTRL_INV_TO_100MS 3
171 #define CTRL_INV_TO_1S 4
172 #define CTRL_INV_TO_10S 5
173 #define CTRL_INV_TO_100S 6
174
175 /* command specific defines */
176 #define CMD_COMPL_WAIT 0x01
177 #define CMD_INV_DEV_ENTRY 0x02
178 #define CMD_INV_IOMMU_PAGES 0x03
179 #define CMD_INV_IOTLB_PAGES 0x04
180 #define CMD_INV_IRT 0x05
181 #define CMD_COMPLETE_PPR 0x07
182 #define CMD_INV_ALL 0x08
183
184 #define CMD_COMPL_WAIT_STORE_MASK 0x01
185 #define CMD_COMPL_WAIT_INT_MASK 0x02
186 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
187 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
188 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
189
190 #define PPR_STATUS_MASK 0xf
191 #define PPR_STATUS_SHIFT 12
192
193 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
194
195 /* macros and definitions for device table entries */
196 #define DEV_ENTRY_VALID 0x00
197 #define DEV_ENTRY_TRANSLATION 0x01
198 #define DEV_ENTRY_PPR 0x34
199 #define DEV_ENTRY_IR 0x3d
200 #define DEV_ENTRY_IW 0x3e
201 #define DEV_ENTRY_NO_PAGE_FAULT 0x62
202 #define DEV_ENTRY_EX 0x67
203 #define DEV_ENTRY_SYSMGT1 0x68
204 #define DEV_ENTRY_SYSMGT2 0x69
205 #define DEV_ENTRY_IRQ_TBL_EN 0x80
206 #define DEV_ENTRY_INIT_PASS 0xb8
207 #define DEV_ENTRY_EINT_PASS 0xb9
208 #define DEV_ENTRY_NMI_PASS 0xba
209 #define DEV_ENTRY_LINT0_PASS 0xbe
210 #define DEV_ENTRY_LINT1_PASS 0xbf
211 #define DEV_ENTRY_MODE_MASK 0x07
212 #define DEV_ENTRY_MODE_SHIFT 0x09
213
214 #define MAX_DEV_TABLE_ENTRIES 0xffff
215
216 /* constants to configure the command buffer */
217 #define CMD_BUFFER_SIZE 8192
218 #define CMD_BUFFER_UNINITIALIZED 1
219 #define CMD_BUFFER_ENTRIES 512
220 #define MMIO_CMD_SIZE_SHIFT 56
221 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
222
223 /* constants for event buffer handling */
224 #define EVT_BUFFER_SIZE 8192 /* 512 entries */
225 #define EVT_LEN_MASK (0x9ULL << 56)
226
227 /* Constants for PPR Log handling */
228 #define PPR_LOG_ENTRIES 512
229 #define PPR_LOG_SIZE_SHIFT 56
230 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
231 #define PPR_ENTRY_SIZE 16
232 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
233
234 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
235 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
236 #define PPR_DEVID(x) ((x) & 0xffffULL)
237 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
238 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
239 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
240 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
241
242 #define PPR_REQ_FAULT 0x01
243
244 /* Constants for GA Log handling */
245 #define GA_LOG_ENTRIES 512
246 #define GA_LOG_SIZE_SHIFT 56
247 #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT)
248 #define GA_ENTRY_SIZE 8
249 #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES)
250
251 #define GA_TAG(x) (u32)(x & 0xffffffffULL)
252 #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL)
253 #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
254
255 #define GA_GUEST_NR 0x1
256
257 /* Bit value definition for dte irq remapping fields*/
258 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
259 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
260 #define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
261 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
262 #define DTE_IRQ_TABLE_LEN (8ULL << 1)
263 #define DTE_IRQ_REMAP_ENABLE 1ULL
264
265 #define PAGE_MODE_NONE 0x00
266 #define PAGE_MODE_1_LEVEL 0x01
267 #define PAGE_MODE_2_LEVEL 0x02
268 #define PAGE_MODE_3_LEVEL 0x03
269 #define PAGE_MODE_4_LEVEL 0x04
270 #define PAGE_MODE_5_LEVEL 0x05
271 #define PAGE_MODE_6_LEVEL 0x06
272
273 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
274 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \
275 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
276 (0xffffffffffffffffULL))
277 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
278 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
279 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
280 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
281 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
282
283 #define PM_MAP_4k 0
284 #define PM_ADDR_MASK 0x000ffffffffff000ULL
285 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
286 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
287 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
288
289 /*
290 * Returns the page table level to use for a given page size
291 * Pagesize is expected to be a power-of-two
292 */
293 #define PAGE_SIZE_LEVEL(pagesize) \
294 ((__ffs(pagesize) - 12) / 9)
295 /*
296 * Returns the number of ptes to use for a given page size
297 * Pagesize is expected to be a power-of-two
298 */
299 #define PAGE_SIZE_PTE_COUNT(pagesize) \
300 (1ULL << ((__ffs(pagesize) - 12) % 9))
301
302 /*
303 * Aligns a given io-virtual address to a given page size
304 * Pagesize is expected to be a power-of-two
305 */
306 #define PAGE_SIZE_ALIGN(address, pagesize) \
307 ((address) & ~((pagesize) - 1))
308 /*
309 * Creates an IOMMU PTE for an address and a given pagesize
310 * The PTE has no permission bits set
311 * Pagesize is expected to be a power-of-two larger than 4096
312 */
313 #define PAGE_SIZE_PTE(address, pagesize) \
314 (((address) | ((pagesize) - 1)) & \
315 (~(pagesize >> 1)) & PM_ADDR_MASK)
316
317 /*
318 * Takes a PTE value with mode=0x07 and returns the page size it maps
319 */
320 #define PTE_PAGE_SIZE(pte) \
321 (1ULL << (1 + ffz(((pte) | 0xfffULL))))
322
323 /*
324 * Takes a page-table level and returns the default page-size for this level
325 */
326 #define PTE_LEVEL_PAGE_SIZE(level) \
327 (1ULL << (12 + (9 * (level))))
328
329 /*
330 * Bit value definition for I/O PTE fields
331 */
332 #define IOMMU_PTE_PR (1ULL << 0)
333 #define IOMMU_PTE_U (1ULL << 59)
334 #define IOMMU_PTE_FC (1ULL << 60)
335 #define IOMMU_PTE_IR (1ULL << 61)
336 #define IOMMU_PTE_IW (1ULL << 62)
337
338 /*
339 * Bit value definition for DTE fields
340 */
341 #define DTE_FLAG_V (1ULL << 0)
342 #define DTE_FLAG_TV (1ULL << 1)
343 #define DTE_FLAG_IR (1ULL << 61)
344 #define DTE_FLAG_IW (1ULL << 62)
345
346 #define DTE_FLAG_IOTLB (1ULL << 32)
347 #define DTE_FLAG_GV (1ULL << 55)
348 #define DTE_FLAG_MASK (0x3ffULL << 32)
349 #define DTE_GLX_SHIFT (56)
350 #define DTE_GLX_MASK (3)
351 #define DEV_DOMID_MASK 0xffffULL
352
353 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
354 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
355 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
356
357 #define DTE_GCR3_INDEX_A 0
358 #define DTE_GCR3_INDEX_B 1
359 #define DTE_GCR3_INDEX_C 1
360
361 #define DTE_GCR3_SHIFT_A 58
362 #define DTE_GCR3_SHIFT_B 16
363 #define DTE_GCR3_SHIFT_C 43
364
365 #define GCR3_VALID 0x01ULL
366
367 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
368 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
369 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
370 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
371
372 #define IOMMU_PROT_MASK 0x03
373 #define IOMMU_PROT_IR 0x01
374 #define IOMMU_PROT_IW 0x02
375
376 /* IOMMU capabilities */
377 #define IOMMU_CAP_IOTLB 24
378 #define IOMMU_CAP_NPCACHE 26
379 #define IOMMU_CAP_EFR 27
380
381 /* IOMMU Feature Reporting Field (for IVHD type 10h */
382 #define IOMMU_FEAT_XTSUP_SHIFT 0
383 #define IOMMU_FEAT_GASUP_SHIFT 6
384
385 /* IOMMU Extended Feature Register (EFR) */
386 #define IOMMU_EFR_XTSUP_SHIFT 2
387 #define IOMMU_EFR_GASUP_SHIFT 7
388
389 #define MAX_DOMAIN_ID 65536
390
391 /* Protection domain flags */
392 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
393 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
394 domain for an IOMMU */
395 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
396 translation */
397 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
398
399 extern bool amd_iommu_dump;
400 #define DUMP_printk(format, arg...) \
401 do { \
402 if (amd_iommu_dump) \
403 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
404 } while(0);
405
406 /* global flag if IOMMUs cache non-present entries */
407 extern bool amd_iommu_np_cache;
408 /* Only true if all IOMMUs support device IOTLBs */
409 extern bool amd_iommu_iotlb_sup;
410
411 #define MAX_IRQS_PER_TABLE 256
412 #define IRQ_TABLE_ALIGNMENT 128
413
414 struct irq_remap_table {
415 raw_spinlock_t lock;
416 unsigned min_index;
417 u32 *table;
418 };
419
420 extern struct irq_remap_table **irq_lookup_table;
421
422 /* Interrupt remapping feature used? */
423 extern bool amd_iommu_irq_remap;
424
425 /* kmem_cache to get tables with 128 byte alignement */
426 extern struct kmem_cache *amd_iommu_irq_cache;
427
428 /*
429 * Make iterating over all IOMMUs easier
430 */
431 #define for_each_iommu(iommu) \
432 list_for_each_entry((iommu), &amd_iommu_list, list)
433 #define for_each_iommu_safe(iommu, next) \
434 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
435
436 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */
437 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
438 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
439 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
440 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
441 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
442
443 /*
444 * This struct is used to pass information about
445 * incoming PPR faults around.
446 */
447 struct amd_iommu_fault {
448 u64 address; /* IO virtual address of the fault*/
449 u32 pasid; /* Address space identifier */
450 u16 device_id; /* Originating PCI device id */
451 u16 tag; /* PPR tag */
452 u16 flags; /* Fault flags */
453
454 };
455
456
457 struct iommu_domain;
458 struct irq_domain;
459 struct amd_irte_ops;
460
461 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
462
463 /*
464 * This structure contains generic data for IOMMU protection domains
465 * independent of their use.
466 */
467 struct protection_domain {
468 struct list_head list; /* for list of all protection domains */
469 struct list_head dev_list; /* List of all devices in this domain */
470 struct iommu_domain domain; /* generic domain handle used by
471 iommu core code */
472 spinlock_t lock; /* mostly used to lock the page table*/
473 struct mutex api_lock; /* protect page tables in the iommu-api path */
474 u16 id; /* the domain id written to the device table */
475 int mode; /* paging mode (0-6 levels) */
476 u64 *pt_root; /* page table root pointer */
477 int glx; /* Number of levels for GCR3 table */
478 u64 *gcr3_tbl; /* Guest CR3 table */
479 unsigned long flags; /* flags to find out type of domain */
480 bool updated; /* complete domain flush required */
481 unsigned dev_cnt; /* devices assigned to this domain */
482 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
483 };
484
485 /*
486 * Structure where we save information about one hardware AMD IOMMU in the
487 * system.
488 */
489 struct amd_iommu {
490 struct list_head list;
491
492 /* Index within the IOMMU array */
493 int index;
494
495 /* locks the accesses to the hardware */
496 raw_spinlock_t lock;
497
498 /* Pointer to PCI device of this IOMMU */
499 struct pci_dev *dev;
500
501 /* Cache pdev to root device for resume quirks */
502 struct pci_dev *root_pdev;
503
504 /* physical address of MMIO space */
505 u64 mmio_phys;
506
507 /* physical end address of MMIO space */
508 u64 mmio_phys_end;
509
510 /* virtual address of MMIO space */
511 u8 __iomem *mmio_base;
512
513 /* capabilities of that IOMMU read from ACPI */
514 u32 cap;
515
516 /* flags read from acpi table */
517 u8 acpi_flags;
518
519 /* Extended features */
520 u64 features;
521
522 /* IOMMUv2 */
523 bool is_iommu_v2;
524
525 /* PCI device id of the IOMMU device */
526 u16 devid;
527
528 /*
529 * Capability pointer. There could be more than one IOMMU per PCI
530 * device function if there are more than one AMD IOMMU capability
531 * pointers.
532 */
533 u16 cap_ptr;
534
535 /* pci domain of this IOMMU */
536 u16 pci_seg;
537
538 /* start of exclusion range of that IOMMU */
539 u64 exclusion_start;
540 /* length of exclusion range of that IOMMU */
541 u64 exclusion_length;
542
543 /* command buffer virtual address */
544 u8 *cmd_buf;
545 u32 cmd_buf_head;
546 u32 cmd_buf_tail;
547
548 /* event buffer virtual address */
549 u8 *evt_buf;
550
551 /* Base of the PPR log, if present */
552 u8 *ppr_log;
553
554 /* Base of the GA log, if present */
555 u8 *ga_log;
556
557 /* Tail of the GA log, if present */
558 u8 *ga_log_tail;
559
560 /* true if interrupts for this IOMMU are already enabled */
561 bool int_enabled;
562
563 /* if one, we need to send a completion wait command */
564 bool need_sync;
565
566 /* Handle for IOMMU core code */
567 struct iommu_device iommu;
568
569 /*
570 * We can't rely on the BIOS to restore all values on reinit, so we
571 * need to stash them
572 */
573
574 /* The iommu BAR */
575 u32 stored_addr_lo;
576 u32 stored_addr_hi;
577
578 /*
579 * Each iommu has 6 l1s, each of which is documented as having 0x12
580 * registers
581 */
582 u32 stored_l1[6][0x12];
583
584 /* The l2 indirect registers */
585 u32 stored_l2[0x83];
586
587 /* The maximum PC banks and counters/bank (PCSup=1) */
588 u8 max_banks;
589 u8 max_counters;
590 #ifdef CONFIG_IRQ_REMAP
591 struct irq_domain *ir_domain;
592 struct irq_domain *msi_domain;
593
594 struct amd_irte_ops *irte_ops;
595 #endif
596
597 u32 flags;
598 volatile u64 __aligned(8) cmd_sem;
599
600 #ifdef CONFIG_AMD_IOMMU_DEBUGFS
601 /* DebugFS Info */
602 struct dentry *debugfs;
603 #endif
604 };
605
dev_to_amd_iommu(struct device * dev)606 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
607 {
608 struct iommu_device *iommu = dev_to_iommu_device(dev);
609
610 return container_of(iommu, struct amd_iommu, iommu);
611 }
612
613 #define ACPIHID_UID_LEN 256
614 #define ACPIHID_HID_LEN 9
615
616 struct acpihid_map_entry {
617 struct list_head list;
618 u8 uid[ACPIHID_UID_LEN];
619 u8 hid[ACPIHID_HID_LEN];
620 u16 devid;
621 u16 root_devid;
622 bool cmd_line;
623 struct iommu_group *group;
624 };
625
626 struct devid_map {
627 struct list_head list;
628 u8 id;
629 u16 devid;
630 bool cmd_line;
631 };
632
633 /*
634 * This struct contains device specific data for the IOMMU
635 */
636 struct iommu_dev_data {
637 struct list_head list; /* For domain->dev_list */
638 struct llist_node dev_data_list; /* For global dev_data_list */
639 struct protection_domain *domain; /* Domain the device is bound to */
640 u16 devid; /* PCI Device ID */
641 u16 alias; /* Alias Device ID */
642 bool iommu_v2; /* Device can make use of IOMMUv2 */
643 bool passthrough; /* Device is identity mapped */
644 struct {
645 bool enabled;
646 int qdep;
647 } ats; /* ATS state */
648 bool pri_tlp; /* PASID TLB required for
649 PPR completions */
650 u32 errata; /* Bitmap for errata to apply */
651 bool use_vapic; /* Enable device to use vapic mode */
652 bool defer_attach;
653
654 struct ratelimit_state rs; /* Ratelimit IOPF messages */
655 };
656
657 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */
658 extern struct list_head ioapic_map;
659 extern struct list_head hpet_map;
660 extern struct list_head acpihid_map;
661
662 /*
663 * List with all IOMMUs in the system. This list is not locked because it is
664 * only written and read at driver initialization or suspend time
665 */
666 extern struct list_head amd_iommu_list;
667
668 /*
669 * Array with pointers to each IOMMU struct
670 * The indices are referenced in the protection domains
671 */
672 extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
673
674 /*
675 * Declarations for the global list of all protection domains
676 */
677 extern spinlock_t amd_iommu_pd_lock;
678 extern struct list_head amd_iommu_pd_list;
679
680 /*
681 * Structure defining one entry in the device table
682 */
683 struct dev_table_entry {
684 u64 data[4];
685 };
686
687 /*
688 * One entry for unity mappings parsed out of the ACPI table.
689 */
690 struct unity_map_entry {
691 struct list_head list;
692
693 /* starting device id this entry is used for (including) */
694 u16 devid_start;
695 /* end device id this entry is used for (including) */
696 u16 devid_end;
697
698 /* start address to unity map (including) */
699 u64 address_start;
700 /* end address to unity map (including) */
701 u64 address_end;
702
703 /* required protection */
704 int prot;
705 };
706
707 /*
708 * List of all unity mappings. It is not locked because as runtime it is only
709 * read. It is created at ACPI table parsing time.
710 */
711 extern struct list_head amd_iommu_unity_map;
712
713 /*
714 * Data structures for device handling
715 */
716
717 /*
718 * Device table used by hardware. Read and write accesses by software are
719 * locked with the amd_iommu_pd_table lock.
720 */
721 extern struct dev_table_entry *amd_iommu_dev_table;
722
723 /*
724 * Alias table to find requestor ids to device ids. Not locked because only
725 * read on runtime.
726 */
727 extern u16 *amd_iommu_alias_table;
728
729 /*
730 * Reverse lookup table to find the IOMMU which translates a specific device.
731 */
732 extern struct amd_iommu **amd_iommu_rlookup_table;
733
734 /* size of the dma_ops aperture as power of 2 */
735 extern unsigned amd_iommu_aperture_order;
736
737 /* largest PCI device id we expect translation requests for */
738 extern u16 amd_iommu_last_bdf;
739
740 /* allocation bitmap for domain ids */
741 extern unsigned long *amd_iommu_pd_alloc_bitmap;
742
743 /*
744 * If true, the addresses will be flushed on unmap time, not when
745 * they are reused
746 */
747 extern bool amd_iommu_unmap_flush;
748
749 /* Smallest max PASID supported by any IOMMU in the system */
750 extern u32 amd_iommu_max_pasid;
751
752 extern bool amd_iommu_v2_present;
753
754 extern bool amd_iommu_force_isolation;
755
756 /* Max levels of glxval supported */
757 extern int amd_iommu_max_glx_val;
758
759 /*
760 * This function flushes all internal caches of
761 * the IOMMU used by this driver.
762 */
763 extern void iommu_flush_all_caches(struct amd_iommu *iommu);
764
get_ioapic_devid(int id)765 static inline int get_ioapic_devid(int id)
766 {
767 struct devid_map *entry;
768
769 list_for_each_entry(entry, &ioapic_map, list) {
770 if (entry->id == id)
771 return entry->devid;
772 }
773
774 return -EINVAL;
775 }
776
get_hpet_devid(int id)777 static inline int get_hpet_devid(int id)
778 {
779 struct devid_map *entry;
780
781 list_for_each_entry(entry, &hpet_map, list) {
782 if (entry->id == id)
783 return entry->devid;
784 }
785
786 return -EINVAL;
787 }
788
789 enum amd_iommu_intr_mode_type {
790 AMD_IOMMU_GUEST_IR_LEGACY,
791
792 /* This mode is not visible to users. It is used when
793 * we cannot fully enable vAPIC and fallback to only support
794 * legacy interrupt remapping via 128-bit IRTE.
795 */
796 AMD_IOMMU_GUEST_IR_LEGACY_GA,
797 AMD_IOMMU_GUEST_IR_VAPIC,
798 };
799
800 #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \
801 x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
802
803 #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC)
804
805 union irte {
806 u32 val;
807 struct {
808 u32 valid : 1,
809 no_fault : 1,
810 int_type : 3,
811 rq_eoi : 1,
812 dm : 1,
813 rsvd_1 : 1,
814 destination : 8,
815 vector : 8,
816 rsvd_2 : 8;
817 } fields;
818 };
819
820 #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff)
821 #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff)
822
823 union irte_ga_lo {
824 u64 val;
825
826 /* For int remapping */
827 struct {
828 u64 valid : 1,
829 no_fault : 1,
830 /* ------ */
831 int_type : 3,
832 rq_eoi : 1,
833 dm : 1,
834 /* ------ */
835 guest_mode : 1,
836 destination : 24,
837 ga_tag : 32;
838 } fields_remap;
839
840 /* For guest vAPIC */
841 struct {
842 u64 valid : 1,
843 no_fault : 1,
844 /* ------ */
845 ga_log_intr : 1,
846 rsvd1 : 3,
847 is_run : 1,
848 /* ------ */
849 guest_mode : 1,
850 destination : 24,
851 ga_tag : 32;
852 } fields_vapic;
853 };
854
855 union irte_ga_hi {
856 u64 val;
857 struct {
858 u64 vector : 8,
859 rsvd_1 : 4,
860 ga_root_ptr : 40,
861 rsvd_2 : 4,
862 destination : 8;
863 } fields;
864 };
865
866 struct irte_ga {
867 union irte_ga_lo lo;
868 union irte_ga_hi hi;
869 };
870
871 struct irq_2_irte {
872 u16 devid; /* Device ID for IRTE table */
873 u16 index; /* Index into IRTE table*/
874 };
875
876 struct amd_ir_data {
877 u32 cached_ga_tag;
878 struct irq_2_irte irq_2_irte;
879 struct msi_msg msi_entry;
880 void *entry; /* Pointer to union irte or struct irte_ga */
881 void *ref; /* Pointer to the actual irte */
882 };
883
884 struct amd_irte_ops {
885 void (*prepare)(void *, u32, u32, u8, u32, int);
886 void (*activate)(void *, u16, u16);
887 void (*deactivate)(void *, u16, u16);
888 void (*set_affinity)(void *, u16, u16, u8, u32);
889 void *(*get)(struct irq_remap_table *, int);
890 void (*set_allocated)(struct irq_remap_table *, int);
891 bool (*is_allocated)(struct irq_remap_table *, int);
892 void (*clear_allocated)(struct irq_remap_table *, int);
893 };
894
895 #ifdef CONFIG_IRQ_REMAP
896 extern struct amd_irte_ops irte_32_ops;
897 extern struct amd_irte_ops irte_128_ops;
898 #endif
899
900 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
901