1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * VFIO API definition
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #ifndef _UAPIVFIO_H
13 #define _UAPIVFIO_H
14 
15 #include <linux/types.h>
16 #include <linux/ioctl.h>
17 
18 #define VFIO_API_VERSION	0
19 
20 
21 /* Kernel & User level defines for VFIO IOCTLs. */
22 
23 /* Extensions */
24 
25 #define VFIO_TYPE1_IOMMU		1
26 #define VFIO_SPAPR_TCE_IOMMU		2
27 #define VFIO_TYPE1v2_IOMMU		3
28 /*
29  * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
30  * capability is subject to change as groups are added or removed.
31  */
32 #define VFIO_DMA_CC_IOMMU		4
33 
34 /* Check if EEH is supported */
35 #define VFIO_EEH			5
36 
37 /* Two-stage IOMMU */
38 #define VFIO_TYPE1_NESTING_IOMMU	6	/* Implies v2 */
39 
40 #define VFIO_SPAPR_TCE_v2_IOMMU		7
41 
42 /*
43  * The No-IOMMU IOMMU offers no translation or isolation for devices and
44  * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
45  * code will taint the host kernel and should be used with extreme caution.
46  */
47 #define VFIO_NOIOMMU_IOMMU		8
48 
49 /*
50  * The IOCTL interface is designed for extensibility by embedding the
51  * structure length (argsz) and flags into structures passed between
52  * kernel and userspace.  We therefore use the _IO() macro for these
53  * defines to avoid implicitly embedding a size into the ioctl request.
54  * As structure fields are added, argsz will increase to match and flag
55  * bits will be defined to indicate additional fields with valid data.
56  * It's *always* the caller's responsibility to indicate the size of
57  * the structure passed by setting argsz appropriately.
58  */
59 
60 #define VFIO_TYPE	(';')
61 #define VFIO_BASE	100
62 
63 /*
64  * For extension of INFO ioctls, VFIO makes use of a capability chain
65  * designed after PCI/e capabilities.  A flag bit indicates whether
66  * this capability chain is supported and a field defined in the fixed
67  * structure defines the offset of the first capability in the chain.
68  * This field is only valid when the corresponding bit in the flags
69  * bitmap is set.  This offset field is relative to the start of the
70  * INFO buffer, as is the next field within each capability header.
71  * The id within the header is a shared address space per INFO ioctl,
72  * while the version field is specific to the capability id.  The
73  * contents following the header are specific to the capability id.
74  */
75 struct vfio_info_cap_header {
76 	__u16	id;		/* Identifies capability */
77 	__u16	version;	/* Version specific to the capability ID */
78 	__u32	next;		/* Offset of next capability */
79 };
80 
81 /*
82  * Callers of INFO ioctls passing insufficiently sized buffers will see
83  * the capability chain flag bit set, a zero value for the first capability
84  * offset (if available within the provided argsz), and argsz will be
85  * updated to report the necessary buffer size.  For compatibility, the
86  * INFO ioctl will not report error in this case, but the capability chain
87  * will not be available.
88  */
89 
90 /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
91 
92 /**
93  * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
94  *
95  * Report the version of the VFIO API.  This allows us to bump the entire
96  * API version should we later need to add or change features in incompatible
97  * ways.
98  * Return: VFIO_API_VERSION
99  * Availability: Always
100  */
101 #define VFIO_GET_API_VERSION		_IO(VFIO_TYPE, VFIO_BASE + 0)
102 
103 /**
104  * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
105  *
106  * Check whether an extension is supported.
107  * Return: 0 if not supported, 1 (or some other positive integer) if supported.
108  * Availability: Always
109  */
110 #define VFIO_CHECK_EXTENSION		_IO(VFIO_TYPE, VFIO_BASE + 1)
111 
112 /**
113  * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
114  *
115  * Set the iommu to the given type.  The type must be supported by an
116  * iommu driver as verified by calling CHECK_EXTENSION using the same
117  * type.  A group must be set to this file descriptor before this
118  * ioctl is available.  The IOMMU interfaces enabled by this call are
119  * specific to the value set.
120  * Return: 0 on success, -errno on failure
121  * Availability: When VFIO group attached
122  */
123 #define VFIO_SET_IOMMU			_IO(VFIO_TYPE, VFIO_BASE + 2)
124 
125 /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
126 
127 /**
128  * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
129  *						struct vfio_group_status)
130  *
131  * Retrieve information about the group.  Fills in provided
132  * struct vfio_group_info.  Caller sets argsz.
133  * Return: 0 on succes, -errno on failure.
134  * Availability: Always
135  */
136 struct vfio_group_status {
137 	__u32	argsz;
138 	__u32	flags;
139 #define VFIO_GROUP_FLAGS_VIABLE		(1 << 0)
140 #define VFIO_GROUP_FLAGS_CONTAINER_SET	(1 << 1)
141 };
142 #define VFIO_GROUP_GET_STATUS		_IO(VFIO_TYPE, VFIO_BASE + 3)
143 
144 /**
145  * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
146  *
147  * Set the container for the VFIO group to the open VFIO file
148  * descriptor provided.  Groups may only belong to a single
149  * container.  Containers may, at their discretion, support multiple
150  * groups.  Only when a container is set are all of the interfaces
151  * of the VFIO file descriptor and the VFIO group file descriptor
152  * available to the user.
153  * Return: 0 on success, -errno on failure.
154  * Availability: Always
155  */
156 #define VFIO_GROUP_SET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 4)
157 
158 /**
159  * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
160  *
161  * Remove the group from the attached container.  This is the
162  * opposite of the SET_CONTAINER call and returns the group to
163  * an initial state.  All device file descriptors must be released
164  * prior to calling this interface.  When removing the last group
165  * from a container, the IOMMU will be disabled and all state lost,
166  * effectively also returning the VFIO file descriptor to an initial
167  * state.
168  * Return: 0 on success, -errno on failure.
169  * Availability: When attached to container
170  */
171 #define VFIO_GROUP_UNSET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 5)
172 
173 /**
174  * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
175  *
176  * Return a new file descriptor for the device object described by
177  * the provided string.  The string should match a device listed in
178  * the devices subdirectory of the IOMMU group sysfs entry.  The
179  * group containing the device must already be added to this context.
180  * Return: new file descriptor on success, -errno on failure.
181  * Availability: When attached to container
182  */
183 #define VFIO_GROUP_GET_DEVICE_FD	_IO(VFIO_TYPE, VFIO_BASE + 6)
184 
185 /* --------------- IOCTLs for DEVICE file descriptors --------------- */
186 
187 /**
188  * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
189  *						struct vfio_device_info)
190  *
191  * Retrieve information about the device.  Fills in provided
192  * struct vfio_device_info.  Caller sets argsz.
193  * Return: 0 on success, -errno on failure.
194  */
195 struct vfio_device_info {
196 	__u32	argsz;
197 	__u32	flags;
198 #define VFIO_DEVICE_FLAGS_RESET	(1 << 0)	/* Device supports reset */
199 #define VFIO_DEVICE_FLAGS_PCI	(1 << 1)	/* vfio-pci device */
200 #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)	/* vfio-platform device */
201 #define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)	/* vfio-amba device */
202 #define VFIO_DEVICE_FLAGS_CCW	(1 << 4)	/* vfio-ccw device */
203 #define VFIO_DEVICE_FLAGS_AP	(1 << 5)	/* vfio-ap device */
204 #define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)	/* vfio-fsl-mc device */
205 #define VFIO_DEVICE_FLAGS_CAPS	(1 << 7)	/* Info supports caps */
206 	__u32	num_regions;	/* Max region index + 1 */
207 	__u32	num_irqs;	/* Max IRQ index + 1 */
208 	__u32   cap_offset;	/* Offset within info struct of first cap */
209 };
210 #define VFIO_DEVICE_GET_INFO		_IO(VFIO_TYPE, VFIO_BASE + 7)
211 
212 /*
213  * Vendor driver using Mediated device framework should provide device_api
214  * attribute in supported type attribute groups. Device API string should be one
215  * of the following corresponding to device flags in vfio_device_info structure.
216  */
217 
218 #define VFIO_DEVICE_API_PCI_STRING		"vfio-pci"
219 #define VFIO_DEVICE_API_PLATFORM_STRING		"vfio-platform"
220 #define VFIO_DEVICE_API_AMBA_STRING		"vfio-amba"
221 #define VFIO_DEVICE_API_CCW_STRING		"vfio-ccw"
222 #define VFIO_DEVICE_API_AP_STRING		"vfio-ap"
223 
224 /*
225  * The following capabilities are unique to s390 zPCI devices.  Their contents
226  * are further-defined in vfio_zdev.h
227  */
228 #define VFIO_DEVICE_INFO_CAP_ZPCI_BASE		1
229 #define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP		2
230 #define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL		3
231 #define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP		4
232 
233 /**
234  * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
235  *				       struct vfio_region_info)
236  *
237  * Retrieve information about a device region.  Caller provides
238  * struct vfio_region_info with index value set.  Caller sets argsz.
239  * Implementation of region mapping is bus driver specific.  This is
240  * intended to describe MMIO, I/O port, as well as bus specific
241  * regions (ex. PCI config space).  Zero sized regions may be used
242  * to describe unimplemented regions (ex. unimplemented PCI BARs).
243  * Return: 0 on success, -errno on failure.
244  */
245 struct vfio_region_info {
246 	__u32	argsz;
247 	__u32	flags;
248 #define VFIO_REGION_INFO_FLAG_READ	(1 << 0) /* Region supports read */
249 #define VFIO_REGION_INFO_FLAG_WRITE	(1 << 1) /* Region supports write */
250 #define VFIO_REGION_INFO_FLAG_MMAP	(1 << 2) /* Region supports mmap */
251 #define VFIO_REGION_INFO_FLAG_CAPS	(1 << 3) /* Info supports caps */
252 	__u32	index;		/* Region index */
253 	__u32	cap_offset;	/* Offset within info struct of first cap */
254 	__u64	size;		/* Region size (bytes) */
255 	__u64	offset;		/* Region offset from start of device fd */
256 };
257 #define VFIO_DEVICE_GET_REGION_INFO	_IO(VFIO_TYPE, VFIO_BASE + 8)
258 
259 /*
260  * The sparse mmap capability allows finer granularity of specifying areas
261  * within a region with mmap support.  When specified, the user should only
262  * mmap the offset ranges specified by the areas array.  mmaps outside of the
263  * areas specified may fail (such as the range covering a PCI MSI-X table) or
264  * may result in improper device behavior.
265  *
266  * The structures below define version 1 of this capability.
267  */
268 #define VFIO_REGION_INFO_CAP_SPARSE_MMAP	1
269 
270 struct vfio_region_sparse_mmap_area {
271 	__u64	offset;	/* Offset of mmap'able area within region */
272 	__u64	size;	/* Size of mmap'able area */
273 };
274 
275 struct vfio_region_info_cap_sparse_mmap {
276 	struct vfio_info_cap_header header;
277 	__u32	nr_areas;
278 	__u32	reserved;
279 	struct vfio_region_sparse_mmap_area areas[];
280 };
281 
282 /*
283  * The device specific type capability allows regions unique to a specific
284  * device or class of devices to be exposed.  This helps solve the problem for
285  * vfio bus drivers of defining which region indexes correspond to which region
286  * on the device, without needing to resort to static indexes, as done by
287  * vfio-pci.  For instance, if we were to go back in time, we might remove
288  * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
289  * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
290  * make a "VGA" device specific type to describe the VGA access space.  This
291  * means that non-VGA devices wouldn't need to waste this index, and thus the
292  * address space associated with it due to implementation of device file
293  * descriptor offsets in vfio-pci.
294  *
295  * The current implementation is now part of the user ABI, so we can't use this
296  * for VGA, but there are other upcoming use cases, such as opregions for Intel
297  * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
298  * use this for future additions.
299  *
300  * The structure below defines version 1 of this capability.
301  */
302 #define VFIO_REGION_INFO_CAP_TYPE	2
303 
304 struct vfio_region_info_cap_type {
305 	struct vfio_info_cap_header header;
306 	__u32 type;	/* global per bus driver */
307 	__u32 subtype;	/* type specific */
308 };
309 
310 /*
311  * List of region types, global per bus driver.
312  * If you introduce a new type, please add it here.
313  */
314 
315 /* PCI region type containing a PCI vendor part */
316 #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE	(1 << 31)
317 #define VFIO_REGION_TYPE_PCI_VENDOR_MASK	(0xffff)
318 #define VFIO_REGION_TYPE_GFX                    (1)
319 #define VFIO_REGION_TYPE_CCW			(2)
320 #define VFIO_REGION_TYPE_MIGRATION              (3)
321 
322 /* sub-types for VFIO_REGION_TYPE_PCI_* */
323 
324 /* 8086 vendor PCI sub-types */
325 #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION	(1)
326 #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG	(2)
327 #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG	(3)
328 
329 /* 10de vendor PCI sub-types */
330 /*
331  * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
332  */
333 #define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM	(1)
334 
335 /* 1014 vendor PCI sub-types */
336 /*
337  * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
338  * to do TLB invalidation on a GPU.
339  */
340 #define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD	(1)
341 
342 /* sub-types for VFIO_REGION_TYPE_GFX */
343 #define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
344 
345 /**
346  * struct vfio_region_gfx_edid - EDID region layout.
347  *
348  * Set display link state and EDID blob.
349  *
350  * The EDID blob has monitor information such as brand, name, serial
351  * number, physical size, supported video modes and more.
352  *
353  * This special region allows userspace (typically qemu) set a virtual
354  * EDID for the virtual monitor, which allows a flexible display
355  * configuration.
356  *
357  * For the edid blob spec look here:
358  *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
359  *
360  * On linux systems you can find the EDID blob in sysfs:
361  *    /sys/class/drm/${card}/${connector}/edid
362  *
363  * You can use the edid-decode ulility (comes with xorg-x11-utils) to
364  * decode the EDID blob.
365  *
366  * @edid_offset: location of the edid blob, relative to the
367  *               start of the region (readonly).
368  * @edid_max_size: max size of the edid blob (readonly).
369  * @edid_size: actual edid size (read/write).
370  * @link_state: display link state (read/write).
371  * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
372  * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
373  * @max_xres: max display width (0 == no limitation, readonly).
374  * @max_yres: max display height (0 == no limitation, readonly).
375  *
376  * EDID update protocol:
377  *   (1) set link-state to down.
378  *   (2) update edid blob and size.
379  *   (3) set link-state to up.
380  */
381 struct vfio_region_gfx_edid {
382 	__u32 edid_offset;
383 	__u32 edid_max_size;
384 	__u32 edid_size;
385 	__u32 max_xres;
386 	__u32 max_yres;
387 	__u32 link_state;
388 #define VFIO_DEVICE_GFX_LINK_STATE_UP    1
389 #define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
390 };
391 
392 /* sub-types for VFIO_REGION_TYPE_CCW */
393 #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD	(1)
394 #define VFIO_REGION_SUBTYPE_CCW_SCHIB		(2)
395 #define VFIO_REGION_SUBTYPE_CCW_CRW		(3)
396 
397 /* sub-types for VFIO_REGION_TYPE_MIGRATION */
398 #define VFIO_REGION_SUBTYPE_MIGRATION           (1)
399 
400 /*
401  * The structure vfio_device_migration_info is placed at the 0th offset of
402  * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related
403  * migration information. Field accesses from this structure are only supported
404  * at their native width and alignment. Otherwise, the result is undefined and
405  * vendor drivers should return an error.
406  *
407  * device_state: (read/write)
408  *      - The user application writes to this field to inform the vendor driver
409  *        about the device state to be transitioned to.
410  *      - The vendor driver should take the necessary actions to change the
411  *        device state. After successful transition to a given state, the
412  *        vendor driver should return success on write(device_state, state)
413  *        system call. If the device state transition fails, the vendor driver
414  *        should return an appropriate -errno for the fault condition.
415  *      - On the user application side, if the device state transition fails,
416  *	  that is, if write(device_state, state) returns an error, read
417  *	  device_state again to determine the current state of the device from
418  *	  the vendor driver.
419  *      - The vendor driver should return previous state of the device unless
420  *        the vendor driver has encountered an internal error, in which case
421  *        the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR.
422  *      - The user application must use the device reset ioctl to recover the
423  *        device from VFIO_DEVICE_STATE_ERROR state. If the device is
424  *        indicated to be in a valid device state by reading device_state, the
425  *        user application may attempt to transition the device to any valid
426  *        state reachable from the current state or terminate itself.
427  *
428  *      device_state consists of 3 bits:
429  *      - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear,
430  *        it indicates the _STOP state. When the device state is changed to
431  *        _STOP, driver should stop the device before write() returns.
432  *      - If bit 1 is set, it indicates the _SAVING state, which means that the
433  *        driver should start gathering device state information that will be
434  *        provided to the VFIO user application to save the device's state.
435  *      - If bit 2 is set, it indicates the _RESUMING state, which means that
436  *        the driver should prepare to resume the device. Data provided through
437  *        the migration region should be used to resume the device.
438  *      Bits 3 - 31 are reserved for future use. To preserve them, the user
439  *      application should perform a read-modify-write operation on this
440  *      field when modifying the specified bits.
441  *
442  *  +------- _RESUMING
443  *  |+------ _SAVING
444  *  ||+----- _RUNNING
445  *  |||
446  *  000b => Device Stopped, not saving or resuming
447  *  001b => Device running, which is the default state
448  *  010b => Stop the device & save the device state, stop-and-copy state
449  *  011b => Device running and save the device state, pre-copy state
450  *  100b => Device stopped and the device state is resuming
451  *  101b => Invalid state
452  *  110b => Error state
453  *  111b => Invalid state
454  *
455  * State transitions:
456  *
457  *              _RESUMING  _RUNNING    Pre-copy    Stop-and-copy   _STOP
458  *                (100b)     (001b)     (011b)        (010b)       (000b)
459  * 0. Running or default state
460  *                             |
461  *
462  * 1. Normal Shutdown (optional)
463  *                             |------------------------------------->|
464  *
465  * 2. Save the state or suspend
466  *                             |------------------------->|---------->|
467  *
468  * 3. Save the state during live migration
469  *                             |----------->|------------>|---------->|
470  *
471  * 4. Resuming
472  *                  |<---------|
473  *
474  * 5. Resumed
475  *                  |--------->|
476  *
477  * 0. Default state of VFIO device is _RUNNING when the user application starts.
478  * 1. During normal shutdown of the user application, the user application may
479  *    optionally change the VFIO device state from _RUNNING to _STOP. This
480  *    transition is optional. The vendor driver must support this transition but
481  *    must not require it.
482  * 2. When the user application saves state or suspends the application, the
483  *    device state transitions from _RUNNING to stop-and-copy and then to _STOP.
484  *    On state transition from _RUNNING to stop-and-copy, driver must stop the
485  *    device, save the device state and send it to the application through the
486  *    migration region. The sequence to be followed for such transition is given
487  *    below.
488  * 3. In live migration of user application, the state transitions from _RUNNING
489  *    to pre-copy, to stop-and-copy, and to _STOP.
490  *    On state transition from _RUNNING to pre-copy, the driver should start
491  *    gathering the device state while the application is still running and send
492  *    the device state data to application through the migration region.
493  *    On state transition from pre-copy to stop-and-copy, the driver must stop
494  *    the device, save the device state and send it to the user application
495  *    through the migration region.
496  *    Vendor drivers must support the pre-copy state even for implementations
497  *    where no data is provided to the user before the stop-and-copy state. The
498  *    user must not be required to consume all migration data before the device
499  *    transitions to a new state, including the stop-and-copy state.
500  *    The sequence to be followed for above two transitions is given below.
501  * 4. To start the resuming phase, the device state should be transitioned from
502  *    the _RUNNING to the _RESUMING state.
503  *    In the _RESUMING state, the driver should use the device state data
504  *    received through the migration region to resume the device.
505  * 5. After providing saved device data to the driver, the application should
506  *    change the state from _RESUMING to _RUNNING.
507  *
508  * reserved:
509  *      Reads on this field return zero and writes are ignored.
510  *
511  * pending_bytes: (read only)
512  *      The number of pending bytes still to be migrated from the vendor driver.
513  *
514  * data_offset: (read only)
515  *      The user application should read data_offset field from the migration
516  *      region. The user application should read the device data from this
517  *      offset within the migration region during the _SAVING state or write
518  *      the device data during the _RESUMING state. See below for details of
519  *      sequence to be followed.
520  *
521  * data_size: (read/write)
522  *      The user application should read data_size to get the size in bytes of
523  *      the data copied in the migration region during the _SAVING state and
524  *      write the size in bytes of the data copied in the migration region
525  *      during the _RESUMING state.
526  *
527  * The format of the migration region is as follows:
528  *  ------------------------------------------------------------------
529  * |vfio_device_migration_info|    data section                      |
530  * |                          |     ///////////////////////////////  |
531  * ------------------------------------------------------------------
532  *   ^                              ^
533  *  offset 0-trapped part        data_offset
534  *
535  * The structure vfio_device_migration_info is always followed by the data
536  * section in the region, so data_offset will always be nonzero. The offset
537  * from where the data is copied is decided by the kernel driver. The data
538  * section can be trapped, mmapped, or partitioned, depending on how the kernel
539  * driver defines the data section. The data section partition can be defined
540  * as mapped by the sparse mmap capability. If mmapped, data_offset must be
541  * page aligned, whereas initial section which contains the
542  * vfio_device_migration_info structure, might not end at the offset, which is
543  * page aligned. The user is not required to access through mmap regardless
544  * of the capabilities of the region mmap.
545  * The vendor driver should determine whether and how to partition the data
546  * section. The vendor driver should return data_offset accordingly.
547  *
548  * The sequence to be followed while in pre-copy state and stop-and-copy state
549  * is as follows:
550  * a. Read pending_bytes, indicating the start of a new iteration to get device
551  *    data. Repeated read on pending_bytes at this stage should have no side
552  *    effects.
553  *    If pending_bytes == 0, the user application should not iterate to get data
554  *    for that device.
555  *    If pending_bytes > 0, perform the following steps.
556  * b. Read data_offset, indicating that the vendor driver should make data
557  *    available through the data section. The vendor driver should return this
558  *    read operation only after data is available from (region + data_offset)
559  *    to (region + data_offset + data_size).
560  * c. Read data_size, which is the amount of data in bytes available through
561  *    the migration region.
562  *    Read on data_offset and data_size should return the offset and size of
563  *    the current buffer if the user application reads data_offset and
564  *    data_size more than once here.
565  * d. Read data_size bytes of data from (region + data_offset) from the
566  *    migration region.
567  * e. Process the data.
568  * f. Read pending_bytes, which indicates that the data from the previous
569  *    iteration has been read. If pending_bytes > 0, go to step b.
570  *
571  * The user application can transition from the _SAVING|_RUNNING
572  * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the
573  * number of pending bytes. The user application should iterate in _SAVING
574  * (stop-and-copy) until pending_bytes is 0.
575  *
576  * The sequence to be followed while _RESUMING device state is as follows:
577  * While data for this device is available, repeat the following steps:
578  * a. Read data_offset from where the user application should write data.
579  * b. Write migration data starting at the migration region + data_offset for
580  *    the length determined by data_size from the migration source.
581  * c. Write data_size, which indicates to the vendor driver that data is
582  *    written in the migration region. Vendor driver must return this write
583  *    operations on consuming data. Vendor driver should apply the
584  *    user-provided migration region data to the device resume state.
585  *
586  * If an error occurs during the above sequences, the vendor driver can return
587  * an error code for next read() or write() operation, which will terminate the
588  * loop. The user application should then take the next necessary action, for
589  * example, failing migration or terminating the user application.
590  *
591  * For the user application, data is opaque. The user application should write
592  * data in the same order as the data is received and the data should be of
593  * same transaction size at the source.
594  */
595 
596 struct vfio_device_migration_info {
597 	__u32 device_state;         /* VFIO device state */
598 #define VFIO_DEVICE_STATE_STOP      (0)
599 #define VFIO_DEVICE_STATE_RUNNING   (1 << 0)
600 #define VFIO_DEVICE_STATE_SAVING    (1 << 1)
601 #define VFIO_DEVICE_STATE_RESUMING  (1 << 2)
602 #define VFIO_DEVICE_STATE_MASK      (VFIO_DEVICE_STATE_RUNNING | \
603 				     VFIO_DEVICE_STATE_SAVING |  \
604 				     VFIO_DEVICE_STATE_RESUMING)
605 
606 #define VFIO_DEVICE_STATE_VALID(state) \
607 	(state & VFIO_DEVICE_STATE_RESUMING ? \
608 	(state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1)
609 
610 #define VFIO_DEVICE_STATE_IS_ERROR(state) \
611 	((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \
612 					      VFIO_DEVICE_STATE_RESUMING))
613 
614 #define VFIO_DEVICE_STATE_SET_ERROR(state) \
615 	((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \
616 					     VFIO_DEVICE_STATE_RESUMING)
617 
618 	__u32 reserved;
619 	__u64 pending_bytes;
620 	__u64 data_offset;
621 	__u64 data_size;
622 };
623 
624 /*
625  * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
626  * which allows direct access to non-MSIX registers which happened to be within
627  * the same system page.
628  *
629  * Even though the userspace gets direct access to the MSIX data, the existing
630  * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
631  */
632 #define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE	3
633 
634 /*
635  * Capability with compressed real address (aka SSA - small system address)
636  * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
637  * and by the userspace to associate a NVLink bridge with a GPU.
638  */
639 #define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT	4
640 
641 struct vfio_region_info_cap_nvlink2_ssatgt {
642 	struct vfio_info_cap_header header;
643 	__u64 tgt;
644 };
645 
646 /*
647  * Capability with an NVLink link speed. The value is read by
648  * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
649  * property in the device tree. The value is fixed in the hardware
650  * and failing to provide the correct value results in the link
651  * not working with no indication from the driver why.
652  */
653 #define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD	5
654 
655 struct vfio_region_info_cap_nvlink2_lnkspd {
656 	struct vfio_info_cap_header header;
657 	__u32 link_speed;
658 	__u32 __pad;
659 };
660 
661 /**
662  * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
663  *				    struct vfio_irq_info)
664  *
665  * Retrieve information about a device IRQ.  Caller provides
666  * struct vfio_irq_info with index value set.  Caller sets argsz.
667  * Implementation of IRQ mapping is bus driver specific.  Indexes
668  * using multiple IRQs are primarily intended to support MSI-like
669  * interrupt blocks.  Zero count irq blocks may be used to describe
670  * unimplemented interrupt types.
671  *
672  * The EVENTFD flag indicates the interrupt index supports eventfd based
673  * signaling.
674  *
675  * The MASKABLE flags indicates the index supports MASK and UNMASK
676  * actions described below.
677  *
678  * AUTOMASKED indicates that after signaling, the interrupt line is
679  * automatically masked by VFIO and the user needs to unmask the line
680  * to receive new interrupts.  This is primarily intended to distinguish
681  * level triggered interrupts.
682  *
683  * The NORESIZE flag indicates that the interrupt lines within the index
684  * are setup as a set and new subindexes cannot be enabled without first
685  * disabling the entire index.  This is used for interrupts like PCI MSI
686  * and MSI-X where the driver may only use a subset of the available
687  * indexes, but VFIO needs to enable a specific number of vectors
688  * upfront.  In the case of MSI-X, where the user can enable MSI-X and
689  * then add and unmask vectors, it's up to userspace to make the decision
690  * whether to allocate the maximum supported number of vectors or tear
691  * down setup and incrementally increase the vectors as each is enabled.
692  */
693 struct vfio_irq_info {
694 	__u32	argsz;
695 	__u32	flags;
696 #define VFIO_IRQ_INFO_EVENTFD		(1 << 0)
697 #define VFIO_IRQ_INFO_MASKABLE		(1 << 1)
698 #define VFIO_IRQ_INFO_AUTOMASKED	(1 << 2)
699 #define VFIO_IRQ_INFO_NORESIZE		(1 << 3)
700 	__u32	index;		/* IRQ index */
701 	__u32	count;		/* Number of IRQs within this index */
702 };
703 #define VFIO_DEVICE_GET_IRQ_INFO	_IO(VFIO_TYPE, VFIO_BASE + 9)
704 
705 /**
706  * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
707  *
708  * Set signaling, masking, and unmasking of interrupts.  Caller provides
709  * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
710  * the range of subindexes being specified.
711  *
712  * The DATA flags specify the type of data provided.  If DATA_NONE, the
713  * operation performs the specified action immediately on the specified
714  * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
715  * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
716  *
717  * DATA_BOOL allows sparse support for the same on arrays of interrupts.
718  * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
719  * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
720  * data = {1,0,1}
721  *
722  * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
723  * A value of -1 can be used to either de-assign interrupts if already
724  * assigned or skip un-assigned interrupts.  For example, to set an eventfd
725  * to be trigger for interrupts [0,0] and [0,2]:
726  * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
727  * data = {fd1, -1, fd2}
728  * If index [0,1] is previously set, two count = 1 ioctls calls would be
729  * required to set [0,0] and [0,2] without changing [0,1].
730  *
731  * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
732  * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
733  * from userspace (ie. simulate hardware triggering).
734  *
735  * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
736  * enables the interrupt index for the device.  Individual subindex interrupts
737  * can be disabled using the -1 value for DATA_EVENTFD or the index can be
738  * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
739  *
740  * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
741  * ACTION_TRIGGER specifies kernel->user signaling.
742  */
743 struct vfio_irq_set {
744 	__u32	argsz;
745 	__u32	flags;
746 #define VFIO_IRQ_SET_DATA_NONE		(1 << 0) /* Data not present */
747 #define VFIO_IRQ_SET_DATA_BOOL		(1 << 1) /* Data is bool (u8) */
748 #define VFIO_IRQ_SET_DATA_EVENTFD	(1 << 2) /* Data is eventfd (s32) */
749 #define VFIO_IRQ_SET_ACTION_MASK	(1 << 3) /* Mask interrupt */
750 #define VFIO_IRQ_SET_ACTION_UNMASK	(1 << 4) /* Unmask interrupt */
751 #define VFIO_IRQ_SET_ACTION_TRIGGER	(1 << 5) /* Trigger interrupt */
752 	__u32	index;
753 	__u32	start;
754 	__u32	count;
755 	__u8	data[];
756 };
757 #define VFIO_DEVICE_SET_IRQS		_IO(VFIO_TYPE, VFIO_BASE + 10)
758 
759 #define VFIO_IRQ_SET_DATA_TYPE_MASK	(VFIO_IRQ_SET_DATA_NONE | \
760 					 VFIO_IRQ_SET_DATA_BOOL | \
761 					 VFIO_IRQ_SET_DATA_EVENTFD)
762 #define VFIO_IRQ_SET_ACTION_TYPE_MASK	(VFIO_IRQ_SET_ACTION_MASK | \
763 					 VFIO_IRQ_SET_ACTION_UNMASK | \
764 					 VFIO_IRQ_SET_ACTION_TRIGGER)
765 /**
766  * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
767  *
768  * Reset a device.
769  */
770 #define VFIO_DEVICE_RESET		_IO(VFIO_TYPE, VFIO_BASE + 11)
771 
772 /*
773  * The VFIO-PCI bus driver makes use of the following fixed region and
774  * IRQ index mapping.  Unimplemented regions return a size of zero.
775  * Unimplemented IRQ types return a count of zero.
776  */
777 
778 enum {
779 	VFIO_PCI_BAR0_REGION_INDEX,
780 	VFIO_PCI_BAR1_REGION_INDEX,
781 	VFIO_PCI_BAR2_REGION_INDEX,
782 	VFIO_PCI_BAR3_REGION_INDEX,
783 	VFIO_PCI_BAR4_REGION_INDEX,
784 	VFIO_PCI_BAR5_REGION_INDEX,
785 	VFIO_PCI_ROM_REGION_INDEX,
786 	VFIO_PCI_CONFIG_REGION_INDEX,
787 	/*
788 	 * Expose VGA regions defined for PCI base class 03, subclass 00.
789 	 * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
790 	 * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
791 	 * range is found at it's identity mapped offset from the region
792 	 * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
793 	 * between described ranges are unimplemented.
794 	 */
795 	VFIO_PCI_VGA_REGION_INDEX,
796 	VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
797 				 /* device specific cap to define content. */
798 };
799 
800 enum {
801 	VFIO_PCI_INTX_IRQ_INDEX,
802 	VFIO_PCI_MSI_IRQ_INDEX,
803 	VFIO_PCI_MSIX_IRQ_INDEX,
804 	VFIO_PCI_ERR_IRQ_INDEX,
805 	VFIO_PCI_REQ_IRQ_INDEX,
806 	VFIO_PCI_NUM_IRQS
807 };
808 
809 /*
810  * The vfio-ccw bus driver makes use of the following fixed region and
811  * IRQ index mapping. Unimplemented regions return a size of zero.
812  * Unimplemented IRQ types return a count of zero.
813  */
814 
815 enum {
816 	VFIO_CCW_CONFIG_REGION_INDEX,
817 	VFIO_CCW_NUM_REGIONS
818 };
819 
820 enum {
821 	VFIO_CCW_IO_IRQ_INDEX,
822 	VFIO_CCW_CRW_IRQ_INDEX,
823 	VFIO_CCW_NUM_IRQS
824 };
825 
826 /**
827  * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
828  *					      struct vfio_pci_hot_reset_info)
829  *
830  * Return: 0 on success, -errno on failure:
831  *	-enospc = insufficient buffer, -enodev = unsupported for device.
832  */
833 struct vfio_pci_dependent_device {
834 	__u32	group_id;
835 	__u16	segment;
836 	__u8	bus;
837 	__u8	devfn; /* Use PCI_SLOT/PCI_FUNC */
838 };
839 
840 struct vfio_pci_hot_reset_info {
841 	__u32	argsz;
842 	__u32	flags;
843 	__u32	count;
844 	struct vfio_pci_dependent_device	devices[];
845 };
846 
847 #define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
848 
849 /**
850  * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
851  *				    struct vfio_pci_hot_reset)
852  *
853  * Return: 0 on success, -errno on failure.
854  */
855 struct vfio_pci_hot_reset {
856 	__u32	argsz;
857 	__u32	flags;
858 	__u32	count;
859 	__s32	group_fds[];
860 };
861 
862 #define VFIO_DEVICE_PCI_HOT_RESET	_IO(VFIO_TYPE, VFIO_BASE + 13)
863 
864 /**
865  * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
866  *                                    struct vfio_device_query_gfx_plane)
867  *
868  * Set the drm_plane_type and flags, then retrieve the gfx plane info.
869  *
870  * flags supported:
871  * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
872  *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
873  *   support for dma-buf.
874  * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
875  *   to ask if the mdev supports region. 0 on support, -EINVAL on no
876  *   support for region.
877  * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
878  *   with each call to query the plane info.
879  * - Others are invalid and return -EINVAL.
880  *
881  * Note:
882  * 1. Plane could be disabled by guest. In that case, success will be
883  *    returned with zero-initialized drm_format, size, width and height
884  *    fields.
885  * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
886  *
887  * Return: 0 on success, -errno on other failure.
888  */
889 struct vfio_device_gfx_plane_info {
890 	__u32 argsz;
891 	__u32 flags;
892 #define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
893 #define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
894 #define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
895 	/* in */
896 	__u32 drm_plane_type;	/* type of plane: DRM_PLANE_TYPE_* */
897 	/* out */
898 	__u32 drm_format;	/* drm format of plane */
899 	__u64 drm_format_mod;   /* tiled mode */
900 	__u32 width;	/* width of plane */
901 	__u32 height;	/* height of plane */
902 	__u32 stride;	/* stride of plane */
903 	__u32 size;	/* size of plane in bytes, align on page*/
904 	__u32 x_pos;	/* horizontal position of cursor plane */
905 	__u32 y_pos;	/* vertical position of cursor plane*/
906 	__u32 x_hot;    /* horizontal position of cursor hotspot */
907 	__u32 y_hot;    /* vertical position of cursor hotspot */
908 	union {
909 		__u32 region_index;	/* region index */
910 		__u32 dmabuf_id;	/* dma-buf id */
911 	};
912 };
913 
914 #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
915 
916 /**
917  * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
918  *
919  * Return a new dma-buf file descriptor for an exposed guest framebuffer
920  * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
921  * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
922  */
923 
924 #define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
925 
926 /**
927  * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
928  *                              struct vfio_device_ioeventfd)
929  *
930  * Perform a write to the device at the specified device fd offset, with
931  * the specified data and width when the provided eventfd is triggered.
932  * vfio bus drivers may not support this for all regions, for all widths,
933  * or at all.  vfio-pci currently only enables support for BAR regions,
934  * excluding the MSI-X vector table.
935  *
936  * Return: 0 on success, -errno on failure.
937  */
938 struct vfio_device_ioeventfd {
939 	__u32	argsz;
940 	__u32	flags;
941 #define VFIO_DEVICE_IOEVENTFD_8		(1 << 0) /* 1-byte write */
942 #define VFIO_DEVICE_IOEVENTFD_16	(1 << 1) /* 2-byte write */
943 #define VFIO_DEVICE_IOEVENTFD_32	(1 << 2) /* 4-byte write */
944 #define VFIO_DEVICE_IOEVENTFD_64	(1 << 3) /* 8-byte write */
945 #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK	(0xf)
946 	__u64	offset;			/* device fd offset of write */
947 	__u64	data;			/* data to be written */
948 	__s32	fd;			/* -1 for de-assignment */
949 };
950 
951 #define VFIO_DEVICE_IOEVENTFD		_IO(VFIO_TYPE, VFIO_BASE + 16)
952 
953 /**
954  * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17,
955  *			       struct vfio_device_feature)
956  *
957  * Get, set, or probe feature data of the device.  The feature is selected
958  * using the FEATURE_MASK portion of the flags field.  Support for a feature
959  * can be probed by setting both the FEATURE_MASK and PROBE bits.  A probe
960  * may optionally include the GET and/or SET bits to determine read vs write
961  * access of the feature respectively.  Probing a feature will return success
962  * if the feature is supported and all of the optionally indicated GET/SET
963  * methods are supported.  The format of the data portion of the structure is
964  * specific to the given feature.  The data portion is not required for
965  * probing.  GET and SET are mutually exclusive, except for use with PROBE.
966  *
967  * Return 0 on success, -errno on failure.
968  */
969 struct vfio_device_feature {
970 	__u32	argsz;
971 	__u32	flags;
972 #define VFIO_DEVICE_FEATURE_MASK	(0xffff) /* 16-bit feature index */
973 #define VFIO_DEVICE_FEATURE_GET		(1 << 16) /* Get feature into data[] */
974 #define VFIO_DEVICE_FEATURE_SET		(1 << 17) /* Set feature from data[] */
975 #define VFIO_DEVICE_FEATURE_PROBE	(1 << 18) /* Probe feature support */
976 	__u8	data[];
977 };
978 
979 #define VFIO_DEVICE_FEATURE		_IO(VFIO_TYPE, VFIO_BASE + 17)
980 
981 /*
982  * Provide support for setting a PCI VF Token, which is used as a shared
983  * secret between PF and VF drivers.  This feature may only be set on a
984  * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
985  * open VFs.  Data provided when setting this feature is a 16-byte array
986  * (__u8 b[16]), representing a UUID.
987  */
988 #define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN	(0)
989 
990 /* -------- API for Type1 VFIO IOMMU -------- */
991 
992 /**
993  * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
994  *
995  * Retrieve information about the IOMMU object. Fills in provided
996  * struct vfio_iommu_info. Caller sets argsz.
997  *
998  * XXX Should we do these by CHECK_EXTENSION too?
999  */
1000 struct vfio_iommu_type1_info {
1001 	__u32	argsz;
1002 	__u32	flags;
1003 #define VFIO_IOMMU_INFO_PGSIZES (1 << 0)	/* supported page sizes info */
1004 #define VFIO_IOMMU_INFO_CAPS	(1 << 1)	/* Info supports caps */
1005 	__u64	iova_pgsizes;	/* Bitmap of supported page sizes */
1006 	__u32   cap_offset;	/* Offset within info struct of first cap */
1007 };
1008 
1009 /*
1010  * The IOVA capability allows to report the valid IOVA range(s)
1011  * excluding any non-relaxable reserved regions exposed by
1012  * devices attached to the container. Any DMA map attempt
1013  * outside the valid iova range will return error.
1014  *
1015  * The structures below define version 1 of this capability.
1016  */
1017 #define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
1018 
1019 struct vfio_iova_range {
1020 	__u64	start;
1021 	__u64	end;
1022 };
1023 
1024 struct vfio_iommu_type1_info_cap_iova_range {
1025 	struct	vfio_info_cap_header header;
1026 	__u32	nr_iovas;
1027 	__u32	reserved;
1028 	struct	vfio_iova_range iova_ranges[];
1029 };
1030 
1031 /*
1032  * The migration capability allows to report supported features for migration.
1033  *
1034  * The structures below define version 1 of this capability.
1035  *
1036  * The existence of this capability indicates that IOMMU kernel driver supports
1037  * dirty page logging.
1038  *
1039  * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
1040  * page logging.
1041  * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
1042  * size in bytes that can be used by user applications when getting the dirty
1043  * bitmap.
1044  */
1045 #define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
1046 
1047 struct vfio_iommu_type1_info_cap_migration {
1048 	struct	vfio_info_cap_header header;
1049 	__u32	flags;
1050 	__u64	pgsize_bitmap;
1051 	__u64	max_dirty_bitmap_size;		/* in bytes */
1052 };
1053 
1054 /*
1055  * The DMA available capability allows to report the current number of
1056  * simultaneously outstanding DMA mappings that are allowed.
1057  *
1058  * The structure below defines version 1 of this capability.
1059  *
1060  * avail: specifies the current number of outstanding DMA mappings allowed.
1061  */
1062 #define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
1063 
1064 struct vfio_iommu_type1_info_dma_avail {
1065 	struct	vfio_info_cap_header header;
1066 	__u32	avail;
1067 };
1068 
1069 #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
1070 
1071 /**
1072  * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
1073  *
1074  * Map process virtual addresses to IO virtual addresses using the
1075  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
1076  */
1077 struct vfio_iommu_type1_dma_map {
1078 	__u32	argsz;
1079 	__u32	flags;
1080 #define VFIO_DMA_MAP_FLAG_READ (1 << 0)		/* readable from device */
1081 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)	/* writable from device */
1082 	__u64	vaddr;				/* Process virtual address */
1083 	__u64	iova;				/* IO virtual address */
1084 	__u64	size;				/* Size of mapping (bytes) */
1085 };
1086 
1087 #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
1088 
1089 struct vfio_bitmap {
1090 	__u64        pgsize;	/* page size for bitmap in bytes */
1091 	__u64        size;	/* in bytes */
1092 	__u64 __user *data;	/* one bit per page */
1093 };
1094 
1095 /**
1096  * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
1097  *							struct vfio_dma_unmap)
1098  *
1099  * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
1100  * Caller sets argsz.  The actual unmapped size is returned in the size
1101  * field.  No guarantee is made to the user that arbitrary unmaps of iova
1102  * or size different from those used in the original mapping call will
1103  * succeed.
1104  * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
1105  * before unmapping IO virtual addresses. When this flag is set, the user must
1106  * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
1107  * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
1108  * A bit in the bitmap represents one page, of user provided page size in
1109  * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
1110  * indicates that the page at that offset from iova is dirty. A Bitmap of the
1111  * pages in the range of unmapped size is returned in the user-provided
1112  * vfio_bitmap.data.
1113  */
1114 struct vfio_iommu_type1_dma_unmap {
1115 	__u32	argsz;
1116 	__u32	flags;
1117 #define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
1118 	__u64	iova;				/* IO virtual address */
1119 	__u64	size;				/* Size of mapping (bytes) */
1120 	__u8    data[];
1121 };
1122 
1123 #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
1124 
1125 /*
1126  * IOCTLs to enable/disable IOMMU container usage.
1127  * No parameters are supported.
1128  */
1129 #define VFIO_IOMMU_ENABLE	_IO(VFIO_TYPE, VFIO_BASE + 15)
1130 #define VFIO_IOMMU_DISABLE	_IO(VFIO_TYPE, VFIO_BASE + 16)
1131 
1132 /**
1133  * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
1134  *                                     struct vfio_iommu_type1_dirty_bitmap)
1135  * IOCTL is used for dirty pages logging.
1136  * Caller should set flag depending on which operation to perform, details as
1137  * below:
1138  *
1139  * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
1140  * the IOMMU driver to log pages that are dirtied or potentially dirtied by
1141  * the device; designed to be used when a migration is in progress. Dirty pages
1142  * are logged until logging is disabled by user application by calling the IOCTL
1143  * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
1144  *
1145  * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
1146  * the IOMMU driver to stop logging dirtied pages.
1147  *
1148  * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
1149  * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
1150  * The user must specify the IOVA range and the pgsize through the structure
1151  * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
1152  * supports getting a bitmap of the smallest supported pgsize only and can be
1153  * modified in future to get a bitmap of any specified supported pgsize. The
1154  * user must provide a zeroed memory area for the bitmap memory and specify its
1155  * size in bitmap.size. One bit is used to represent one page consecutively
1156  * starting from iova offset. The user should provide page size in bitmap.pgsize
1157  * field. A bit set in the bitmap indicates that the page at that offset from
1158  * iova is dirty. The caller must set argsz to a value including the size of
1159  * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
1160  * actual bitmap. If dirty pages logging is not enabled, an error will be
1161  * returned.
1162  *
1163  * Only one of the flags _START, _STOP and _GET may be specified at a time.
1164  *
1165  */
1166 struct vfio_iommu_type1_dirty_bitmap {
1167 	__u32        argsz;
1168 	__u32        flags;
1169 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_START	(1 << 0)
1170 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP	(1 << 1)
1171 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP	(1 << 2)
1172 	__u8         data[];
1173 };
1174 
1175 struct vfio_iommu_type1_dirty_bitmap_get {
1176 	__u64              iova;	/* IO virtual address */
1177 	__u64              size;	/* Size of iova range */
1178 	struct vfio_bitmap bitmap;
1179 };
1180 
1181 #define VFIO_IOMMU_DIRTY_PAGES             _IO(VFIO_TYPE, VFIO_BASE + 17)
1182 
1183 /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
1184 
1185 /*
1186  * The SPAPR TCE DDW info struct provides the information about
1187  * the details of Dynamic DMA window capability.
1188  *
1189  * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
1190  * @max_dynamic_windows_supported tells the maximum number of windows
1191  * which the platform can create.
1192  * @levels tells the maximum number of levels in multi-level IOMMU tables;
1193  * this allows splitting a table into smaller chunks which reduces
1194  * the amount of physically contiguous memory required for the table.
1195  */
1196 struct vfio_iommu_spapr_tce_ddw_info {
1197 	__u64 pgsizes;			/* Bitmap of supported page sizes */
1198 	__u32 max_dynamic_windows_supported;
1199 	__u32 levels;
1200 };
1201 
1202 /*
1203  * The SPAPR TCE info struct provides the information about the PCI bus
1204  * address ranges available for DMA, these values are programmed into
1205  * the hardware so the guest has to know that information.
1206  *
1207  * The DMA 32 bit window start is an absolute PCI bus address.
1208  * The IOVA address passed via map/unmap ioctls are absolute PCI bus
1209  * addresses too so the window works as a filter rather than an offset
1210  * for IOVA addresses.
1211  *
1212  * Flags supported:
1213  * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
1214  *   (DDW) support is present. @ddw is only supported when DDW is present.
1215  */
1216 struct vfio_iommu_spapr_tce_info {
1217 	__u32 argsz;
1218 	__u32 flags;
1219 #define VFIO_IOMMU_SPAPR_INFO_DDW	(1 << 0)	/* DDW supported */
1220 	__u32 dma32_window_start;	/* 32 bit window start (bytes) */
1221 	__u32 dma32_window_size;	/* 32 bit window size (bytes) */
1222 	struct vfio_iommu_spapr_tce_ddw_info ddw;
1223 };
1224 
1225 #define VFIO_IOMMU_SPAPR_TCE_GET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
1226 
1227 /*
1228  * EEH PE operation struct provides ways to:
1229  * - enable/disable EEH functionality;
1230  * - unfreeze IO/DMA for frozen PE;
1231  * - read PE state;
1232  * - reset PE;
1233  * - configure PE;
1234  * - inject EEH error.
1235  */
1236 struct vfio_eeh_pe_err {
1237 	__u32 type;
1238 	__u32 func;
1239 	__u64 addr;
1240 	__u64 mask;
1241 };
1242 
1243 struct vfio_eeh_pe_op {
1244 	__u32 argsz;
1245 	__u32 flags;
1246 	__u32 op;
1247 	union {
1248 		struct vfio_eeh_pe_err err;
1249 	};
1250 };
1251 
1252 #define VFIO_EEH_PE_DISABLE		0	/* Disable EEH functionality */
1253 #define VFIO_EEH_PE_ENABLE		1	/* Enable EEH functionality  */
1254 #define VFIO_EEH_PE_UNFREEZE_IO		2	/* Enable IO for frozen PE   */
1255 #define VFIO_EEH_PE_UNFREEZE_DMA	3	/* Enable DMA for frozen PE  */
1256 #define VFIO_EEH_PE_GET_STATE		4	/* PE state retrieval        */
1257 #define  VFIO_EEH_PE_STATE_NORMAL	0	/* PE in functional state    */
1258 #define  VFIO_EEH_PE_STATE_RESET	1	/* PE reset in progress      */
1259 #define  VFIO_EEH_PE_STATE_STOPPED	2	/* Stopped DMA and IO        */
1260 #define  VFIO_EEH_PE_STATE_STOPPED_DMA	4	/* Stopped DMA only          */
1261 #define  VFIO_EEH_PE_STATE_UNAVAIL	5	/* State unavailable         */
1262 #define VFIO_EEH_PE_RESET_DEACTIVATE	5	/* Deassert PE reset         */
1263 #define VFIO_EEH_PE_RESET_HOT		6	/* Assert hot reset          */
1264 #define VFIO_EEH_PE_RESET_FUNDAMENTAL	7	/* Assert fundamental reset  */
1265 #define VFIO_EEH_PE_CONFIGURE		8	/* PE configuration          */
1266 #define VFIO_EEH_PE_INJECT_ERR		9	/* Inject EEH error          */
1267 
1268 #define VFIO_EEH_PE_OP			_IO(VFIO_TYPE, VFIO_BASE + 21)
1269 
1270 /**
1271  * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
1272  *
1273  * Registers user space memory where DMA is allowed. It pins
1274  * user pages and does the locked memory accounting so
1275  * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
1276  * get faster.
1277  */
1278 struct vfio_iommu_spapr_register_memory {
1279 	__u32	argsz;
1280 	__u32	flags;
1281 	__u64	vaddr;				/* Process virtual address */
1282 	__u64	size;				/* Size of mapping (bytes) */
1283 };
1284 #define VFIO_IOMMU_SPAPR_REGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 17)
1285 
1286 /**
1287  * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
1288  *
1289  * Unregisters user space memory registered with
1290  * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
1291  * Uses vfio_iommu_spapr_register_memory for parameters.
1292  */
1293 #define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 18)
1294 
1295 /**
1296  * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
1297  *
1298  * Creates an additional TCE table and programs it (sets a new DMA window)
1299  * to every IOMMU group in the container. It receives page shift, window
1300  * size and number of levels in the TCE table being created.
1301  *
1302  * It allocates and returns an offset on a PCI bus of the new DMA window.
1303  */
1304 struct vfio_iommu_spapr_tce_create {
1305 	__u32 argsz;
1306 	__u32 flags;
1307 	/* in */
1308 	__u32 page_shift;
1309 	__u32 __resv1;
1310 	__u64 window_size;
1311 	__u32 levels;
1312 	__u32 __resv2;
1313 	/* out */
1314 	__u64 start_addr;
1315 };
1316 #define VFIO_IOMMU_SPAPR_TCE_CREATE	_IO(VFIO_TYPE, VFIO_BASE + 19)
1317 
1318 /**
1319  * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
1320  *
1321  * Unprograms a TCE table from all groups in the container and destroys it.
1322  * It receives a PCI bus offset as a window id.
1323  */
1324 struct vfio_iommu_spapr_tce_remove {
1325 	__u32 argsz;
1326 	__u32 flags;
1327 	/* in */
1328 	__u64 start_addr;
1329 };
1330 #define VFIO_IOMMU_SPAPR_TCE_REMOVE	_IO(VFIO_TYPE, VFIO_BASE + 20)
1331 
1332 /* ***************************************************************** */
1333 
1334 #endif /* _UAPIVFIO_H */
1335