1 /*
2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9 #ifndef _KVM_PPC_BOOK3S_XIVE_H
10 #define _KVM_PPC_BOOK3S_XIVE_H
11
12 #ifdef CONFIG_KVM_XICS
13 #include "book3s_xics.h"
14
15 /*
16 * State for one guest irq source.
17 *
18 * For each guest source we allocate a HW interrupt in the XIVE
19 * which we use for all SW triggers. It will be unused for
20 * pass-through but it's easier to keep around as the same
21 * guest interrupt can alternatively be emulated or pass-through
22 * if a physical device is hot unplugged and replaced with an
23 * emulated one.
24 *
25 * This state structure is very similar to the XICS one with
26 * additional XIVE specific tracking.
27 */
28 struct kvmppc_xive_irq_state {
29 bool valid; /* Interrupt entry is valid */
30
31 u32 number; /* Guest IRQ number */
32 u32 ipi_number; /* XIVE IPI HW number */
33 struct xive_irq_data ipi_data; /* XIVE IPI associated data */
34 u32 pt_number; /* XIVE Pass-through number if any */
35 struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
36
37 /* Targetting as set by guest */
38 u8 guest_priority; /* Guest set priority */
39 u8 saved_priority; /* Saved priority when masking */
40
41 /* Actual targetting */
42 u32 act_server; /* Actual server */
43 u8 act_priority; /* Actual priority */
44
45 /* Various state bits */
46 bool in_eoi; /* Synchronize with H_EOI */
47 bool old_p; /* P bit state when masking */
48 bool old_q; /* Q bit state when masking */
49 bool lsi; /* level-sensitive interrupt */
50 bool asserted; /* Only for emulated LSI: current state */
51
52 /* Saved for migration state */
53 bool in_queue;
54 bool saved_p;
55 bool saved_q;
56 u8 saved_scan_prio;
57 };
58
59 /* Select the "right" interrupt (IPI vs. passthrough) */
kvmppc_xive_select_irq(struct kvmppc_xive_irq_state * state,u32 * out_hw_irq,struct xive_irq_data ** out_xd)60 static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
61 u32 *out_hw_irq,
62 struct xive_irq_data **out_xd)
63 {
64 if (state->pt_number) {
65 if (out_hw_irq)
66 *out_hw_irq = state->pt_number;
67 if (out_xd)
68 *out_xd = state->pt_data;
69 } else {
70 if (out_hw_irq)
71 *out_hw_irq = state->ipi_number;
72 if (out_xd)
73 *out_xd = &state->ipi_data;
74 }
75 }
76
77 /*
78 * This corresponds to an "ICS" in XICS terminology, we use it
79 * as a mean to break up source information into multiple structures.
80 */
81 struct kvmppc_xive_src_block {
82 arch_spinlock_t lock;
83 u16 id;
84 struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
85 };
86
87
88 struct kvmppc_xive {
89 struct kvm *kvm;
90 struct kvm_device *dev;
91 struct dentry *dentry;
92
93 /* VP block associated with the VM */
94 u32 vp_base;
95
96 /* Blocks of sources */
97 struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
98 u32 max_sbid;
99
100 /*
101 * For state save, we lazily scan the queues on the first interrupt
102 * being migrated. We don't have a clean way to reset that flags
103 * so we keep track of the number of valid sources and how many of
104 * them were migrated so we can reset when all of them have been
105 * processed.
106 */
107 u32 src_count;
108 u32 saved_src_count;
109
110 /*
111 * Some irqs are delayed on restore until the source is created,
112 * keep track here of how many of them
113 */
114 u32 delayed_irqs;
115
116 /* Which queues (priorities) are in use by the guest */
117 u8 qmap;
118
119 /* Queue orders */
120 u32 q_order;
121 u32 q_page_order;
122
123 /* Flags */
124 u8 single_escalation;
125 };
126
127 #define KVMPPC_XIVE_Q_COUNT 8
128
129 struct kvmppc_xive_vcpu {
130 struct kvmppc_xive *xive;
131 struct kvm_vcpu *vcpu;
132 bool valid;
133
134 /* Server number. This is the HW CPU ID from a guest perspective */
135 u32 server_num;
136
137 /*
138 * HW VP corresponding to this VCPU. This is the base of the VP
139 * block plus the server number.
140 */
141 u32 vp_id;
142 u32 vp_chip_id;
143 u32 vp_cam;
144
145 /* IPI used for sending ... IPIs */
146 u32 vp_ipi;
147 struct xive_irq_data vp_ipi_data;
148
149 /* Local emulation state */
150 uint8_t cppr; /* guest CPPR */
151 uint8_t hw_cppr;/* Hardware CPPR */
152 uint8_t mfrr;
153 uint8_t pending;
154
155 /* Each VP has 8 queues though we only provision some */
156 struct xive_q queues[KVMPPC_XIVE_Q_COUNT];
157 u32 esc_virq[KVMPPC_XIVE_Q_COUNT];
158 char *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
159
160 /* Stash a delayed irq on restore from migration (see set_icp) */
161 u32 delayed_irq;
162
163 /* Stats */
164 u64 stat_rm_h_xirr;
165 u64 stat_rm_h_ipoll;
166 u64 stat_rm_h_cppr;
167 u64 stat_rm_h_eoi;
168 u64 stat_rm_h_ipi;
169 u64 stat_vm_h_xirr;
170 u64 stat_vm_h_ipoll;
171 u64 stat_vm_h_cppr;
172 u64 stat_vm_h_eoi;
173 u64 stat_vm_h_ipi;
174 };
175
kvmppc_xive_find_server(struct kvm * kvm,u32 nr)176 static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
177 {
178 struct kvm_vcpu *vcpu = NULL;
179 int i;
180
181 kvm_for_each_vcpu(i, vcpu, kvm) {
182 if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
183 return vcpu;
184 }
185 return NULL;
186 }
187
kvmppc_xive_find_source(struct kvmppc_xive * xive,u32 irq,u16 * source)188 static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
189 u32 irq, u16 *source)
190 {
191 u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
192 u16 src = irq & KVMPPC_XICS_SRC_MASK;
193
194 if (source)
195 *source = src;
196 if (bid > KVMPPC_XICS_MAX_ICS_ID)
197 return NULL;
198 return xive->src_blocks[bid];
199 }
200
201 /*
202 * Mapping between guest priorities and host priorities
203 * is as follow.
204 *
205 * Guest request for 0...6 are honored. Guest request for anything
206 * higher results in a priority of 6 being applied.
207 *
208 * Similar mapping is done for CPPR values
209 */
xive_prio_from_guest(u8 prio)210 static inline u8 xive_prio_from_guest(u8 prio)
211 {
212 if (prio == 0xff || prio < 6)
213 return prio;
214 return 6;
215 }
216
xive_prio_to_guest(u8 prio)217 static inline u8 xive_prio_to_guest(u8 prio)
218 {
219 return prio;
220 }
221
__xive_read_eq(__be32 * qpage,u32 msk,u32 * idx,u32 * toggle)222 static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
223 {
224 u32 cur;
225
226 if (!qpage)
227 return 0;
228 cur = be32_to_cpup(qpage + *idx);
229 if ((cur >> 31) == *toggle)
230 return 0;
231 *idx = (*idx + 1) & msk;
232 if (*idx == 0)
233 (*toggle) ^= 1;
234 return cur & 0x7fffffff;
235 }
236
237 extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
238 extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
239 extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
240 unsigned long mfrr);
241 extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
242 extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
243
244 extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
245 extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
246 extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
247 unsigned long mfrr);
248 extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
249 extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
250
251 #endif /* CONFIG_KVM_XICS */
252 #endif /* _KVM_PPC_BOOK3S_XICS_H */
253