1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
4 */
5
6 #ifndef _KVM_PPC_BOOK3S_XIVE_H
7 #define _KVM_PPC_BOOK3S_XIVE_H
8
9 #ifdef CONFIG_KVM_XICS
10 #include "book3s_xics.h"
11
12 /*
13 * The XIVE Interrupt source numbers are within the range 0 to
14 * KVMPPC_XICS_NR_IRQS.
15 */
16 #define KVMPPC_XIVE_FIRST_IRQ 0
17 #define KVMPPC_XIVE_NR_IRQS KVMPPC_XICS_NR_IRQS
18
19 /*
20 * State for one guest irq source.
21 *
22 * For each guest source we allocate a HW interrupt in the XIVE
23 * which we use for all SW triggers. It will be unused for
24 * pass-through but it's easier to keep around as the same
25 * guest interrupt can alternatively be emulated or pass-through
26 * if a physical device is hot unplugged and replaced with an
27 * emulated one.
28 *
29 * This state structure is very similar to the XICS one with
30 * additional XIVE specific tracking.
31 */
32 struct kvmppc_xive_irq_state {
33 bool valid; /* Interrupt entry is valid */
34
35 u32 number; /* Guest IRQ number */
36 u32 ipi_number; /* XIVE IPI HW number */
37 struct xive_irq_data ipi_data; /* XIVE IPI associated data */
38 u32 pt_number; /* XIVE Pass-through number if any */
39 struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
40
41 /* Targetting as set by guest */
42 u8 guest_priority; /* Guest set priority */
43 u8 saved_priority; /* Saved priority when masking */
44
45 /* Actual targetting */
46 u32 act_server; /* Actual server */
47 u8 act_priority; /* Actual priority */
48
49 /* Various state bits */
50 bool in_eoi; /* Synchronize with H_EOI */
51 bool old_p; /* P bit state when masking */
52 bool old_q; /* Q bit state when masking */
53 bool lsi; /* level-sensitive interrupt */
54 bool asserted; /* Only for emulated LSI: current state */
55
56 /* Saved for migration state */
57 bool in_queue;
58 bool saved_p;
59 bool saved_q;
60 u8 saved_scan_prio;
61
62 /* Xive native */
63 u32 eisn; /* Guest Effective IRQ number */
64 };
65
66 /* Select the "right" interrupt (IPI vs. passthrough) */
kvmppc_xive_select_irq(struct kvmppc_xive_irq_state * state,u32 * out_hw_irq,struct xive_irq_data ** out_xd)67 static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
68 u32 *out_hw_irq,
69 struct xive_irq_data **out_xd)
70 {
71 if (state->pt_number) {
72 if (out_hw_irq)
73 *out_hw_irq = state->pt_number;
74 if (out_xd)
75 *out_xd = state->pt_data;
76 } else {
77 if (out_hw_irq)
78 *out_hw_irq = state->ipi_number;
79 if (out_xd)
80 *out_xd = &state->ipi_data;
81 }
82 }
83
84 /*
85 * This corresponds to an "ICS" in XICS terminology, we use it
86 * as a mean to break up source information into multiple structures.
87 */
88 struct kvmppc_xive_src_block {
89 arch_spinlock_t lock;
90 u16 id;
91 struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
92 };
93
94 struct kvmppc_xive;
95
96 struct kvmppc_xive_ops {
97 int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
98 };
99
100 struct kvmppc_xive {
101 struct kvm *kvm;
102 struct kvm_device *dev;
103 struct dentry *dentry;
104
105 /* VP block associated with the VM */
106 u32 vp_base;
107
108 /* Blocks of sources */
109 struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
110 u32 max_sbid;
111
112 /*
113 * For state save, we lazily scan the queues on the first interrupt
114 * being migrated. We don't have a clean way to reset that flags
115 * so we keep track of the number of valid sources and how many of
116 * them were migrated so we can reset when all of them have been
117 * processed.
118 */
119 u32 src_count;
120 u32 saved_src_count;
121
122 /*
123 * Some irqs are delayed on restore until the source is created,
124 * keep track here of how many of them
125 */
126 u32 delayed_irqs;
127
128 /* Which queues (priorities) are in use by the guest */
129 u8 qmap;
130
131 /* Queue orders */
132 u32 q_order;
133 u32 q_page_order;
134
135 /* Flags */
136 u8 single_escalation;
137
138 /* Number of entries in the VP block */
139 u32 nr_servers;
140
141 struct kvmppc_xive_ops *ops;
142 struct address_space *mapping;
143 struct mutex mapping_lock;
144 struct mutex lock;
145 };
146
147 #define KVMPPC_XIVE_Q_COUNT 8
148
149 struct kvmppc_xive_vcpu {
150 struct kvmppc_xive *xive;
151 struct kvm_vcpu *vcpu;
152 bool valid;
153
154 /* Server number. This is the HW CPU ID from a guest perspective */
155 u32 server_num;
156
157 /*
158 * HW VP corresponding to this VCPU. This is the base of the VP
159 * block plus the server number.
160 */
161 u32 vp_id;
162 u32 vp_chip_id;
163 u32 vp_cam;
164
165 /* IPI used for sending ... IPIs */
166 u32 vp_ipi;
167 struct xive_irq_data vp_ipi_data;
168
169 /* Local emulation state */
170 uint8_t cppr; /* guest CPPR */
171 uint8_t hw_cppr;/* Hardware CPPR */
172 uint8_t mfrr;
173 uint8_t pending;
174
175 /* Each VP has 8 queues though we only provision some */
176 struct xive_q queues[KVMPPC_XIVE_Q_COUNT];
177 u32 esc_virq[KVMPPC_XIVE_Q_COUNT];
178 char *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
179
180 /* Stash a delayed irq on restore from migration (see set_icp) */
181 u32 delayed_irq;
182
183 /* Stats */
184 u64 stat_rm_h_xirr;
185 u64 stat_rm_h_ipoll;
186 u64 stat_rm_h_cppr;
187 u64 stat_rm_h_eoi;
188 u64 stat_rm_h_ipi;
189 u64 stat_vm_h_xirr;
190 u64 stat_vm_h_ipoll;
191 u64 stat_vm_h_cppr;
192 u64 stat_vm_h_eoi;
193 u64 stat_vm_h_ipi;
194 };
195
kvmppc_xive_find_server(struct kvm * kvm,u32 nr)196 static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
197 {
198 struct kvm_vcpu *vcpu = NULL;
199 int i;
200
201 kvm_for_each_vcpu(i, vcpu, kvm) {
202 if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
203 return vcpu;
204 }
205 return NULL;
206 }
207
kvmppc_xive_find_source(struct kvmppc_xive * xive,u32 irq,u16 * source)208 static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
209 u32 irq, u16 *source)
210 {
211 u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
212 u16 src = irq & KVMPPC_XICS_SRC_MASK;
213
214 if (source)
215 *source = src;
216 if (bid > KVMPPC_XICS_MAX_ICS_ID)
217 return NULL;
218 return xive->src_blocks[bid];
219 }
220
kvmppc_xive_vp(struct kvmppc_xive * xive,u32 server)221 static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
222 {
223 return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
224 }
225
kvmppc_xive_vp_in_use(struct kvm * kvm,u32 vp_id)226 static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
227 {
228 struct kvm_vcpu *vcpu = NULL;
229 int i;
230
231 kvm_for_each_vcpu(i, vcpu, kvm) {
232 if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
233 return true;
234 }
235 return false;
236 }
237
238 /*
239 * Mapping between guest priorities and host priorities
240 * is as follow.
241 *
242 * Guest request for 0...6 are honored. Guest request for anything
243 * higher results in a priority of 6 being applied.
244 *
245 * Similar mapping is done for CPPR values
246 */
xive_prio_from_guest(u8 prio)247 static inline u8 xive_prio_from_guest(u8 prio)
248 {
249 if (prio == 0xff || prio < 6)
250 return prio;
251 return 6;
252 }
253
xive_prio_to_guest(u8 prio)254 static inline u8 xive_prio_to_guest(u8 prio)
255 {
256 return prio;
257 }
258
__xive_read_eq(__be32 * qpage,u32 msk,u32 * idx,u32 * toggle)259 static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
260 {
261 u32 cur;
262
263 if (!qpage)
264 return 0;
265 cur = be32_to_cpup(qpage + *idx);
266 if ((cur >> 31) == *toggle)
267 return 0;
268 *idx = (*idx + 1) & msk;
269 if (*idx == 0)
270 (*toggle) ^= 1;
271 return cur & 0x7fffffff;
272 }
273
274 extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
275 extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
276 extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
277 unsigned long mfrr);
278 extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
279 extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
280
281 extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
282 extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
283 extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
284 unsigned long mfrr);
285 extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
286 extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
287
288 /*
289 * Common Xive routines for XICS-over-XIVE and XIVE native
290 */
291 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
292 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
293 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
294 struct kvmppc_xive *xive, int irq);
295 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb);
296 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
297 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
298 bool single_escalation);
299 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
300 void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
301 struct kvmppc_xive_vcpu *xc, int irq);
302 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
303 int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr);
304
305 #endif /* CONFIG_KVM_XICS */
306 #endif /* _KVM_PPC_BOOK3S_XICS_H */
307