Home
last modified time | relevance | path

Searched refs:guest (Results 1 – 25 of 188) sorted by relevance

12345678

/Linux-v5.4/tools/virtio/ringtest/
Dvirtio_ring_0_9.c41 struct guest { struct
52 } guest; argument
78 guest.avail_idx = 0; in alloc_ring()
79 guest.kicked_avail_idx = -1; in alloc_ring()
80 guest.last_used_idx = 0; in alloc_ring()
83 guest.free_head = 0; in alloc_ring()
89 guest.num_free = ring_size; in alloc_ring()
107 if (!guest.num_free) in add_inbuf()
111 head = (ring_size - 1) & (guest.avail_idx++); in add_inbuf()
113 head = guest.free_head; in add_inbuf()
[all …]
Dring.c59 struct guest { struct
65 } guest; argument
92 guest.avail_idx = 0; in alloc_ring()
93 guest.kicked_avail_idx = -1; in alloc_ring()
94 guest.last_used_idx = 0; in alloc_ring()
103 guest.num_free = ring_size; in alloc_ring()
116 if (!guest.num_free) in add_inbuf()
119 guest.num_free--; in add_inbuf()
120 head = (ring_size - 1) & (guest.avail_idx++); in add_inbuf()
145 unsigned head = (ring_size - 1) & guest.last_used_idx; in get_buf()
[all …]
/Linux-v5.4/drivers/misc/cxl/
Dof.c88 afu->guest->handle = addr; in read_phys_addr()
91 afu->guest->p2n_phys += addr; in read_phys_addr()
92 afu->guest->p2n_size = size; in read_phys_addr()
133 if (read_handle(afu_np, &afu->guest->handle)) in cxl_of_read_afu_handle()
135 pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle); in cxl_of_read_afu_handle()
190 read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints); in cxl_of_read_afu_properties()
191 afu->irqs_max = afu->guest->max_ints; in cxl_of_read_afu_properties()
269 pr_devel("AFU handle: %#llx\n", afu->guest->handle); in cxl_of_read_afu_properties()
271 afu->guest->p2n_phys, afu->guest->p2n_size); in cxl_of_read_afu_properties()
301 adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail), in read_adapter_irq_config()
[all …]
Dguest.c111 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, in guest_collect_vpd()
114 rc = cxl_h_collect_vpd(afu->guest->handle, 0, in guest_collect_vpd()
152 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); in guest_get_irq_info()
180 rc = cxl_h_read_error_state(afu->guest->handle, &state); in afu_read_error_state()
197 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); in guest_slice_irq_err()
208 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); in guest_slice_irq_err()
222 for (i = 0; i < adapter->guest->irq_nranges; i++) { in irq_alloc_range()
223 cur = &adapter->guest->irq_avail[i]; in irq_alloc_range()
246 for (i = 0; i < adapter->guest->irq_nranges; i++) { in irq_free_range()
247 cur = &adapter->guest->irq_avail[i]; in irq_free_range()
[all …]
Dflash.c190 unwa->unit_address = cpu_to_be64(adapter->guest->handle); in update_devicetree()
244 header->vendor = cpu_to_be16(adapter->guest->vendor); in handle_image()
245 header->device = cpu_to_be16(adapter->guest->device); in handle_image()
246 header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor); in handle_image()
247 header->subsystem = cpu_to_be16(adapter->guest->subsystem); in handle_image()
311 rc = fct(adapter->guest->handle, virt_to_phys(le), entries, in handle_image()
334 cxl_h_reset_adapter(adapter->guest->handle); in transfer_image()
343 cxl_h_reset_adapter(adapter->guest->handle); in transfer_image()
352 cxl_h_reset_adapter(adapter->guest->handle); in transfer_image()
506 cxl_h_reset_adapter(adapter->guest->handle); in device_close()
[all …]
/Linux-v5.4/Documentation/arm64/
Dperf.txt26 For the guest this attribute will exclude EL1. Please note that EL2 is
27 never counted within a guest.
40 guest/host transitions.
42 For the guest this attribute has no effect. Please note that EL2 is
43 never counted within a guest.
49 These attributes exclude the KVM host and guest, respectively.
54 The KVM guest may run at EL0 (userspace) and EL1 (kernel).
58 must enable/disable counting on the entry and exit to the guest. This is
62 exiting the guest we disable/enable the event as appropriate based on the
66 for exclude_host. Upon entering and exiting the guest we modify the event
[all …]
/Linux-v5.4/arch/mips/include/asm/
Dcpu-features.h623 #define cpu_guest_has_conf1 (cpu_data[0].guest.conf & (1 << 1))
626 #define cpu_guest_has_conf2 (cpu_data[0].guest.conf & (1 << 2))
629 #define cpu_guest_has_conf3 (cpu_data[0].guest.conf & (1 << 3))
632 #define cpu_guest_has_conf4 (cpu_data[0].guest.conf & (1 << 4))
635 #define cpu_guest_has_conf5 (cpu_data[0].guest.conf & (1 << 5))
638 #define cpu_guest_has_conf6 (cpu_data[0].guest.conf & (1 << 6))
641 #define cpu_guest_has_conf7 (cpu_data[0].guest.conf & (1 << 7))
644 #define cpu_guest_has_fpu (cpu_data[0].guest.options & MIPS_CPU_FPU)
647 #define cpu_guest_has_watch (cpu_data[0].guest.options & MIPS_CPU_WATCH)
650 #define cpu_guest_has_contextconfig (cpu_data[0].guest.options & MIPS_CPU_CTXTC)
[all …]
/Linux-v5.4/Documentation/virt/kvm/
Dmmu.txt5 for presenting a standard x86 mmu to the guest, while translating guest
10 - correctness: the guest should not be able to determine that it is running
14 - security: the guest must not be able to touch host memory not assigned
19 - integration: Linux memory management code must be in control of guest memory
22 - dirty tracking: report writes to guest memory to enable live migration
34 gfn guest frame number
35 gpa guest physical address
36 gva guest virtual address
37 ngpa nested guest physical address
38 ngva nested guest virtual address
[all …]
Damd-memory-encryption.rst50 The SEV guest key management is handled by a separate processor called the AMD
53 encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
71 context. To create the encryption context, user must provide a guest policy,
82 __u32 policy; /* guest's policy */
84 … __u64 dh_uaddr; /* userspace address pointing to the guest owner's PDH key */
87 … __u64 session_addr; /* userspace address which points to the guest session information */
100 of the memory contents that can be sent to the guest owner as an attestation
120 data encrypted by the KVM_SEV_LAUNCH_UPDATE_DATA command. The guest owner may
121 wait to provide the guest with confidential information until it can verify the
122 measurement. Since the guest owner knows the initial contents of the guest at
[all …]
Dhypercalls.txt48 Purpose: Trigger guest exit so that the host can check for pending
62 Purpose: Expose hypercall availability to the guest. On x86 platforms, cpuid
71 Purpose: To enable communication between the hypervisor and guest there is a
73 The guest can map this shared page to access its supervisor register through
81 Usage example : A vcpu of a paravirtualized guest that is busywaiting in guest
86 same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall,
95 Purpose: Hypercall used to synchronize host and guest clocks.
98 a0: guest physical address where host copies
115 * tsc: guest TSC value used to calculate sec/nsec pair
118 The hypercall lets a guest compute a precise timestamp across
[all …]
Dcpuid.rst9 A guest running on a kvm host, can check some of its features using
12 a guest.
65 KVM_FEATURE_PV_UNHAULT 7 guest checks this feature bit
69 KVM_FEATURE_PV_TLB_FLUSH 9 guest checks this feature bit
77 KVM_FEATURE_PV_SEND_IPI 11 guest checks this feature bit
85 KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit
89 KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side
103 KVM_HINTS_REALTIME 0 guest checks this feature bit to
Dmsr.txt19 in guest RAM. This memory is expected to hold a copy of the following
33 version: guest has to check version before and after grabbing
53 guest RAM, plus an enable bit in bit 0. This memory is expected to hold
75 version: guest has to check version before and after grabbing
110 coordinated between the guest and the hypervisor. Availability
120 | | guest vcpu has been paused by
168 64 byte memory area which must be in guest RAM and must be
184 fault guest must reset the reason to 0 before it does
190 be used to notify a guest when missing page becomes
201 type 1 was, but guest should not rely on that.
[all …]
Dreview-checklist.txt29 10. User/kernel interfaces and guest/host interfaces must be 64-bit clean
33 11. New guest visible features must either be documented in a hardware manual
37 host/guest memory must be unshared to prevent the host from writing to
38 guest memory that the guest has not reserved for this purpose.
/Linux-v5.4/tools/perf/Documentation/
Dperf-kvm.txt6 perf-kvm - Tool to trace/measure kvm guest os
11 'perf kvm' [--host] [--guest] [--guestmount=<path>
14 'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
23 a performance counter profile of guest os in realtime
28 default behavior of perf kvm as --guest, so if neither --host nor --guest
29 is input, the perf data file name is perf.data.guest. If --host is input,
31 perf.data.host, please input --host --no-guest. The behaviors are shown as
33 Default('') -> perf.data.guest
35 --guest -> perf.data.guest
36 --host --guest -> perf.data.kvm
[all …]
/Linux-v5.4/tools/testing/vsock/
DREADME3 These tests exercise net/vmw_vsock/ host<->guest sockets for VMware, KVM, and
15 3. Install the kernel and tests inside the guest.
16 4. Boot the guest and ensure that the AF_VSOCK transport is enabled.
20 # host=server, guest=client
24 (guest)# $TEST_BINARY --mode=client \
29 # host=client, guest=server
30 (guest)# $TEST_BINARY --mode=server \
/Linux-v5.4/Documentation/ABI/testing/
Dsysfs-hypervisor-xen6 Type of guest:
7 "Xen": standard guest type on arm
8 "HVM": fully virtualized guest (x86)
9 "PV": paravirtualized guest (x86)
10 "PVH": fully virtualized guest without legacy emulation (x86)
20 "self" -- The guest can profile itself
21 "hv" -- The guest can profile itself and, if it is
23 "all" -- The guest can profile itself, the hypervisor
/Linux-v5.4/Documentation/virt/kvm/arm/
Dpsci.txt3 and power-off to the guest.
9 This means that a guest booted on two different versions of KVM can
11 a given guest is tied to a particular PSCI revision (unlikely), or if
13 blue to an unsuspecting guest.
34 offered by KVM to the guest via a HVC call. The workaround is described
39 guest is unknown.
41 available to the guest and required for the mitigation.
43 is available to the guest, but it is not needed on this VCPU.
47 offered by KVM to the guest via a HVC call. The workaround is described
/Linux-v5.4/drivers/staging/unisys/Documentation/
Doverview.txt10 allow guest partitions on the same server to share devices that would
20 commonly referred to as "guest drivers" or "client drivers". All drivers
21 except visorbus expose a device of a specific usable class to the Linux guest
27 with each guest partition sharing that device through an area of shared memory
32 Each virtual device requires exactly 1 dedicated channel, which the guest
40 because this requires no specific support in the guest partitions, it will
45 guest, the already-existing efifb Linux driver is used to provide guest
47 provide a guest graphics console are for keyboard and mouse (via visorinput).
86 special control channel called the "controlvm channel" (each guest partition
197 clientpartition handle identifying the guest (client) side
[all …]
/Linux-v5.4/arch/x86/xen/
DKconfig7 bool "Xen guest support"
19 bool "Xen PV guest support"
26 Support running as a Xen PV guest.
38 Support running as a Xen PV Dom0 guest.
41 bool "Xen PVHVM guest support"
45 Support running as a Xen PVHVM guest.
78 bool "Support for running as a Xen PVH guest"
/Linux-v5.4/Documentation/ABI/stable/
Dsysfs-hypervisor-xen33 Space separated list of supported guest system types. Each type
38 <major>: major guest interface version
39 <minor>: minor guest interface version
41 "x86_32": 32 bit x86 guest without PAE
42 "x86_32p": 32 bit x86 guest with PAE
43 "x86_64": 64 bit x86 guest
44 "armv7l": 32 bit arm guest
45 "aarch64": 64 bit arm guest
61 Features the Xen hypervisor supports for the guest as defined
93 UUID of the guest as known to the Xen hypervisor.
/Linux-v5.4/tools/virtio/virtio-trace/
DREADME4 Trace agent is a user tool for sending trace data of a guest to a Host in low
48 For example, if a guest use three CPUs, the names are
83 example, if a guest use three CPUs, chardev names should be trace-path-cpu0,
86 3) Boot the guest
87 You can find some chardev in /dev/virtio-ports/ in the guest.
93 0) Build trace agent in a guest
96 1) Enable ftrace in the guest
100 2) Run trace agent in the guest
104 option, trace data are output via stdout in the guest.
109 the guest will stop by specification of chardev in QEMU. This blocking mode may
[all …]
/Linux-v5.4/tools/kvm/kvm_stat/
Dkvm_stat.txt16 state transitions such as guest mode entry and exit.
18 This tool is useful for observing guest behavior from the host perspective.
40 *g*:: filter by guest name/PID
46 *p*:: filter by guest name/PID
87 -g<guest>::
88 --guest=<guest_name>::
89 limit statistics to one virtual machine (guest name)
/Linux-v5.4/Documentation/filesystems/
Dvirtiofs.rst4 virtiofs: virtio-fs host<->guest shared file system
12 VIRTIO "virtio-fs" device for guest<->host file system sharing. It allows a
13 guest to mount a directory that has been exported on the host.
22 expose the storage network to the guest. The virtio-fs device was designed to
26 guest and host to increase performance and provide semantics that are not
35 guest# mount -t virtiofs myfs /mnt
44 client. The guest acts as the FUSE client while the host acts as the FUSE
49 response portion of the buffer is filled in by the host and the guest handles
/Linux-v5.4/drivers/staging/unisys/Documentation/ABI/
Dsysfs-platform-visorchipset29 the previous guest boot) has no effect.
37 this guest. Setting the flag will cause the guest to boot from
48 action to perform on the next guest boot-up. The meaning of the
50 commission the guest.
58 guest, and triggered by a udev event. The support script is
60 PF device is being recovered in another guest.
77 guest, and triggered by a udev event. The support script is
79 PF device is being recovered in another guest.
/Linux-v5.4/arch/powerpc/kvm/
DKconfig58 Support running unmodified book3s_32 guest kernels
74 Support running unmodified book3s_64 and book3s_32 guest kernels
89 Support running unmodified book3s_64 guest kernels in
95 guest operating systems will run at full hardware speed
108 Support running guest kernels in virtual machines on processors
110 guest in user mode (problem state) and emulating all
123 Calculate time taken for each vcpu in the real-mode guest entry,
124 exit, and interrupt handling code, plus time spent in the guest
126 in the guest. The total, minimum and maximum times in nanoseconds
154 Support running unmodified E500 guest kernels in virtual machines on
[all …]

12345678