1 /*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
4 *
5 * MandrakeSoft S.A.
6 * 43, rue d'Aboukir
7 * 75002 Paris - France
8 * http://www.linux-mandrake.com/
9 * http://www.mandrakesoft.com/
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 * Yunhong Jiang <yunhong.jiang@intel.com>
26 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
27 * Based on Xen 3.1 code.
28 */
29
30 #include <linux/kvm_host.h>
31 #include <linux/kvm.h>
32 #include <linux/mm.h>
33 #include <linux/highmem.h>
34 #include <linux/smp.h>
35 #include <linux/hrtimer.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/export.h>
39 #include <asm/processor.h>
40 #include <asm/page.h>
41 #include <asm/current.h>
42 #include <trace/events/kvm.h>
43
44 #include "ioapic.h"
45 #include "lapic.h"
46 #include "irq.h"
47
48 #if 0
49 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
50 #else
51 #define ioapic_debug(fmt, arg...)
52 #endif
53 static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
54 bool line_status);
55
ioapic_read_indirect(struct kvm_ioapic * ioapic,unsigned long addr,unsigned long length)56 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
57 unsigned long addr,
58 unsigned long length)
59 {
60 unsigned long result = 0;
61
62 switch (ioapic->ioregsel) {
63 case IOAPIC_REG_VERSION:
64 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
65 | (IOAPIC_VERSION_ID & 0xff));
66 break;
67
68 case IOAPIC_REG_APIC_ID:
69 case IOAPIC_REG_ARB_ID:
70 result = ((ioapic->id & 0xf) << 24);
71 break;
72
73 default:
74 {
75 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
76 u64 redir_content;
77
78 if (redir_index < IOAPIC_NUM_PINS)
79 redir_content =
80 ioapic->redirtbl[redir_index].bits;
81 else
82 redir_content = ~0ULL;
83
84 result = (ioapic->ioregsel & 0x1) ?
85 (redir_content >> 32) & 0xffffffff :
86 redir_content & 0xffffffff;
87 break;
88 }
89 }
90
91 return result;
92 }
93
rtc_irq_eoi_tracking_reset(struct kvm_ioapic * ioapic)94 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
95 {
96 ioapic->rtc_status.pending_eoi = 0;
97 bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
98 }
99
100 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
101
rtc_status_pending_eoi_check_valid(struct kvm_ioapic * ioapic)102 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
103 {
104 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
105 kvm_rtc_eoi_tracking_restore_all(ioapic);
106 }
107
__rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu * vcpu)108 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
109 {
110 bool new_val, old_val;
111 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
112 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
113 union kvm_ioapic_redirect_entry *e;
114
115 e = &ioapic->redirtbl[RTC_GSI];
116 if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id,
117 e->fields.dest_mode))
118 return;
119
120 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
121 old_val = test_bit(vcpu->vcpu_id, dest_map->map);
122
123 if (new_val == old_val)
124 return;
125
126 if (new_val) {
127 __set_bit(vcpu->vcpu_id, dest_map->map);
128 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
129 ioapic->rtc_status.pending_eoi++;
130 } else {
131 __clear_bit(vcpu->vcpu_id, dest_map->map);
132 ioapic->rtc_status.pending_eoi--;
133 rtc_status_pending_eoi_check_valid(ioapic);
134 }
135 }
136
kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu * vcpu)137 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
138 {
139 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
140
141 spin_lock(&ioapic->lock);
142 __rtc_irq_eoi_tracking_restore_one(vcpu);
143 spin_unlock(&ioapic->lock);
144 }
145
kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic * ioapic)146 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
147 {
148 struct kvm_vcpu *vcpu;
149 int i;
150
151 if (RTC_GSI >= IOAPIC_NUM_PINS)
152 return;
153
154 rtc_irq_eoi_tracking_reset(ioapic);
155 kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
156 __rtc_irq_eoi_tracking_restore_one(vcpu);
157 }
158
rtc_irq_eoi(struct kvm_ioapic * ioapic,struct kvm_vcpu * vcpu)159 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
160 {
161 if (test_and_clear_bit(vcpu->vcpu_id,
162 ioapic->rtc_status.dest_map.map)) {
163 --ioapic->rtc_status.pending_eoi;
164 rtc_status_pending_eoi_check_valid(ioapic);
165 }
166 }
167
rtc_irq_check_coalesced(struct kvm_ioapic * ioapic)168 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
169 {
170 if (ioapic->rtc_status.pending_eoi > 0)
171 return true; /* coalesced */
172
173 return false;
174 }
175
ioapic_set_irq(struct kvm_ioapic * ioapic,unsigned int irq,int irq_level,bool line_status)176 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
177 int irq_level, bool line_status)
178 {
179 union kvm_ioapic_redirect_entry entry;
180 u32 mask = 1 << irq;
181 u32 old_irr;
182 int edge, ret;
183
184 entry = ioapic->redirtbl[irq];
185 edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
186
187 if (!irq_level) {
188 ioapic->irr &= ~mask;
189 ret = 1;
190 goto out;
191 }
192
193 /*
194 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
195 * this only happens if a previous edge has not been delivered due
196 * do masking. For level interrupts, the remote_irr field tells
197 * us if the interrupt is waiting for an EOI.
198 *
199 * RTC is special: it is edge-triggered, but userspace likes to know
200 * if it has been already ack-ed via EOI because coalesced RTC
201 * interrupts lead to time drift in Windows guests. So we track
202 * EOI manually for the RTC interrupt.
203 */
204 if (irq == RTC_GSI && line_status &&
205 rtc_irq_check_coalesced(ioapic)) {
206 ret = 0;
207 goto out;
208 }
209
210 old_irr = ioapic->irr;
211 ioapic->irr |= mask;
212 if (edge) {
213 ioapic->irr_delivered &= ~mask;
214 if (old_irr == ioapic->irr) {
215 ret = 0;
216 goto out;
217 }
218 }
219
220 ret = ioapic_service(ioapic, irq, line_status);
221
222 out:
223 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
224 return ret;
225 }
226
kvm_ioapic_inject_all(struct kvm_ioapic * ioapic,unsigned long irr)227 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
228 {
229 u32 idx;
230
231 rtc_irq_eoi_tracking_reset(ioapic);
232 for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
233 ioapic_set_irq(ioapic, idx, 1, true);
234
235 kvm_rtc_eoi_tracking_restore_all(ioapic);
236 }
237
238
kvm_ioapic_scan_entry(struct kvm_vcpu * vcpu,ulong * ioapic_handled_vectors)239 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
240 {
241 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
242 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
243 union kvm_ioapic_redirect_entry *e;
244 int index;
245
246 spin_lock(&ioapic->lock);
247
248 /* Make sure we see any missing RTC EOI */
249 if (test_bit(vcpu->vcpu_id, dest_map->map))
250 __set_bit(dest_map->vectors[vcpu->vcpu_id],
251 ioapic_handled_vectors);
252
253 for (index = 0; index < IOAPIC_NUM_PINS; index++) {
254 e = &ioapic->redirtbl[index];
255 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
256 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
257 index == RTC_GSI) {
258 if (kvm_apic_match_dest(vcpu, NULL, 0,
259 e->fields.dest_id, e->fields.dest_mode) ||
260 kvm_apic_pending_eoi(vcpu, e->fields.vector))
261 __set_bit(e->fields.vector,
262 ioapic_handled_vectors);
263 }
264 }
265 spin_unlock(&ioapic->lock);
266 }
267
kvm_arch_post_irq_ack_notifier_list_update(struct kvm * kvm)268 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
269 {
270 if (!ioapic_in_kernel(kvm))
271 return;
272 kvm_make_scan_ioapic_request(kvm);
273 }
274
ioapic_write_indirect(struct kvm_ioapic * ioapic,u32 val)275 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
276 {
277 unsigned index;
278 bool mask_before, mask_after;
279 int old_remote_irr, old_delivery_status;
280 union kvm_ioapic_redirect_entry *e;
281
282 switch (ioapic->ioregsel) {
283 case IOAPIC_REG_VERSION:
284 /* Writes are ignored. */
285 break;
286
287 case IOAPIC_REG_APIC_ID:
288 ioapic->id = (val >> 24) & 0xf;
289 break;
290
291 case IOAPIC_REG_ARB_ID:
292 break;
293
294 default:
295 index = (ioapic->ioregsel - 0x10) >> 1;
296
297 ioapic_debug("change redir index %x val %x\n", index, val);
298 if (index >= IOAPIC_NUM_PINS)
299 return;
300 e = &ioapic->redirtbl[index];
301 mask_before = e->fields.mask;
302 /* Preserve read-only fields */
303 old_remote_irr = e->fields.remote_irr;
304 old_delivery_status = e->fields.delivery_status;
305 if (ioapic->ioregsel & 1) {
306 e->bits &= 0xffffffff;
307 e->bits |= (u64) val << 32;
308 } else {
309 e->bits &= ~0xffffffffULL;
310 e->bits |= (u32) val;
311 }
312 e->fields.remote_irr = old_remote_irr;
313 e->fields.delivery_status = old_delivery_status;
314
315 /*
316 * Some OSes (Linux, Xen) assume that Remote IRR bit will
317 * be cleared by IOAPIC hardware when the entry is configured
318 * as edge-triggered. This behavior is used to simulate an
319 * explicit EOI on IOAPICs that don't have the EOI register.
320 */
321 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
322 e->fields.remote_irr = 0;
323
324 mask_after = e->fields.mask;
325 if (mask_before != mask_after)
326 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
327 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
328 && ioapic->irr & (1 << index))
329 ioapic_service(ioapic, index, false);
330 kvm_make_scan_ioapic_request(ioapic->kvm);
331 break;
332 }
333 }
334
ioapic_service(struct kvm_ioapic * ioapic,int irq,bool line_status)335 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
336 {
337 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
338 struct kvm_lapic_irq irqe;
339 int ret;
340
341 if (entry->fields.mask ||
342 (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
343 entry->fields.remote_irr))
344 return -1;
345
346 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
347 "vector=%x trig_mode=%x\n",
348 entry->fields.dest_id, entry->fields.dest_mode,
349 entry->fields.delivery_mode, entry->fields.vector,
350 entry->fields.trig_mode);
351
352 irqe.dest_id = entry->fields.dest_id;
353 irqe.vector = entry->fields.vector;
354 irqe.dest_mode = entry->fields.dest_mode;
355 irqe.trig_mode = entry->fields.trig_mode;
356 irqe.delivery_mode = entry->fields.delivery_mode << 8;
357 irqe.level = 1;
358 irqe.shorthand = 0;
359 irqe.msi_redir_hint = false;
360
361 if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
362 ioapic->irr_delivered |= 1 << irq;
363
364 if (irq == RTC_GSI && line_status) {
365 /*
366 * pending_eoi cannot ever become negative (see
367 * rtc_status_pending_eoi_check_valid) and the caller
368 * ensures that it is only called if it is >= zero, namely
369 * if rtc_irq_check_coalesced returns false).
370 */
371 BUG_ON(ioapic->rtc_status.pending_eoi != 0);
372 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
373 &ioapic->rtc_status.dest_map);
374 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
375 } else
376 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
377
378 if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
379 entry->fields.remote_irr = 1;
380
381 return ret;
382 }
383
kvm_ioapic_set_irq(struct kvm_ioapic * ioapic,int irq,int irq_source_id,int level,bool line_status)384 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
385 int level, bool line_status)
386 {
387 int ret, irq_level;
388
389 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
390
391 spin_lock(&ioapic->lock);
392 irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
393 irq_source_id, level);
394 ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
395
396 spin_unlock(&ioapic->lock);
397
398 return ret;
399 }
400
kvm_ioapic_clear_all(struct kvm_ioapic * ioapic,int irq_source_id)401 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
402 {
403 int i;
404
405 spin_lock(&ioapic->lock);
406 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
407 __clear_bit(irq_source_id, &ioapic->irq_states[i]);
408 spin_unlock(&ioapic->lock);
409 }
410
kvm_ioapic_eoi_inject_work(struct work_struct * work)411 static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
412 {
413 int i;
414 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
415 eoi_inject.work);
416 spin_lock(&ioapic->lock);
417 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
418 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
419
420 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
421 continue;
422
423 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
424 ioapic_service(ioapic, i, false);
425 }
426 spin_unlock(&ioapic->lock);
427 }
428
429 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
430
__kvm_ioapic_update_eoi(struct kvm_vcpu * vcpu,struct kvm_ioapic * ioapic,int vector,int trigger_mode)431 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
432 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
433 {
434 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
435 struct kvm_lapic *apic = vcpu->arch.apic;
436 int i;
437
438 /* RTC special handling */
439 if (test_bit(vcpu->vcpu_id, dest_map->map) &&
440 vector == dest_map->vectors[vcpu->vcpu_id])
441 rtc_irq_eoi(ioapic, vcpu);
442
443 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
444 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
445
446 if (ent->fields.vector != vector)
447 continue;
448
449 /*
450 * We are dropping lock while calling ack notifiers because ack
451 * notifier callbacks for assigned devices call into IOAPIC
452 * recursively. Since remote_irr is cleared only after call
453 * to notifiers if the same vector will be delivered while lock
454 * is dropped it will be put into irr and will be delivered
455 * after ack notifier returns.
456 */
457 spin_unlock(&ioapic->lock);
458 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
459 spin_lock(&ioapic->lock);
460
461 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
462 kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
463 continue;
464
465 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
466 ent->fields.remote_irr = 0;
467 if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
468 ++ioapic->irq_eoi[i];
469 if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
470 /*
471 * Real hardware does not deliver the interrupt
472 * immediately during eoi broadcast, and this
473 * lets a buggy guest make slow progress
474 * even if it does not correctly handle a
475 * level-triggered interrupt. Emulate this
476 * behavior if we detect an interrupt storm.
477 */
478 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
479 ioapic->irq_eoi[i] = 0;
480 trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
481 } else {
482 ioapic_service(ioapic, i, false);
483 }
484 } else {
485 ioapic->irq_eoi[i] = 0;
486 }
487 }
488 }
489
kvm_ioapic_update_eoi(struct kvm_vcpu * vcpu,int vector,int trigger_mode)490 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
491 {
492 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
493
494 spin_lock(&ioapic->lock);
495 __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
496 spin_unlock(&ioapic->lock);
497 }
498
to_ioapic(struct kvm_io_device * dev)499 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
500 {
501 return container_of(dev, struct kvm_ioapic, dev);
502 }
503
ioapic_in_range(struct kvm_ioapic * ioapic,gpa_t addr)504 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
505 {
506 return ((addr >= ioapic->base_address &&
507 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
508 }
509
ioapic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * val)510 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
511 gpa_t addr, int len, void *val)
512 {
513 struct kvm_ioapic *ioapic = to_ioapic(this);
514 u32 result;
515 if (!ioapic_in_range(ioapic, addr))
516 return -EOPNOTSUPP;
517
518 ioapic_debug("addr %lx\n", (unsigned long)addr);
519 ASSERT(!(addr & 0xf)); /* check alignment */
520
521 addr &= 0xff;
522 spin_lock(&ioapic->lock);
523 switch (addr) {
524 case IOAPIC_REG_SELECT:
525 result = ioapic->ioregsel;
526 break;
527
528 case IOAPIC_REG_WINDOW:
529 result = ioapic_read_indirect(ioapic, addr, len);
530 break;
531
532 default:
533 result = 0;
534 break;
535 }
536 spin_unlock(&ioapic->lock);
537
538 switch (len) {
539 case 8:
540 *(u64 *) val = result;
541 break;
542 case 1:
543 case 2:
544 case 4:
545 memcpy(val, (char *)&result, len);
546 break;
547 default:
548 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
549 }
550 return 0;
551 }
552
ioapic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)553 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
554 gpa_t addr, int len, const void *val)
555 {
556 struct kvm_ioapic *ioapic = to_ioapic(this);
557 u32 data;
558 if (!ioapic_in_range(ioapic, addr))
559 return -EOPNOTSUPP;
560
561 ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
562 (void*)addr, len, val);
563 ASSERT(!(addr & 0xf)); /* check alignment */
564
565 switch (len) {
566 case 8:
567 case 4:
568 data = *(u32 *) val;
569 break;
570 case 2:
571 data = *(u16 *) val;
572 break;
573 case 1:
574 data = *(u8 *) val;
575 break;
576 default:
577 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
578 return 0;
579 }
580
581 addr &= 0xff;
582 spin_lock(&ioapic->lock);
583 switch (addr) {
584 case IOAPIC_REG_SELECT:
585 ioapic->ioregsel = data & 0xFF; /* 8-bit register */
586 break;
587
588 case IOAPIC_REG_WINDOW:
589 ioapic_write_indirect(ioapic, data);
590 break;
591
592 default:
593 break;
594 }
595 spin_unlock(&ioapic->lock);
596 return 0;
597 }
598
kvm_ioapic_reset(struct kvm_ioapic * ioapic)599 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
600 {
601 int i;
602
603 cancel_delayed_work_sync(&ioapic->eoi_inject);
604 for (i = 0; i < IOAPIC_NUM_PINS; i++)
605 ioapic->redirtbl[i].fields.mask = 1;
606 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
607 ioapic->ioregsel = 0;
608 ioapic->irr = 0;
609 ioapic->irr_delivered = 0;
610 ioapic->id = 0;
611 memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
612 rtc_irq_eoi_tracking_reset(ioapic);
613 }
614
615 static const struct kvm_io_device_ops ioapic_mmio_ops = {
616 .read = ioapic_mmio_read,
617 .write = ioapic_mmio_write,
618 };
619
kvm_ioapic_init(struct kvm * kvm)620 int kvm_ioapic_init(struct kvm *kvm)
621 {
622 struct kvm_ioapic *ioapic;
623 int ret;
624
625 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
626 if (!ioapic)
627 return -ENOMEM;
628 spin_lock_init(&ioapic->lock);
629 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
630 kvm->arch.vioapic = ioapic;
631 kvm_ioapic_reset(ioapic);
632 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
633 ioapic->kvm = kvm;
634 mutex_lock(&kvm->slots_lock);
635 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
636 IOAPIC_MEM_LENGTH, &ioapic->dev);
637 mutex_unlock(&kvm->slots_lock);
638 if (ret < 0) {
639 kvm->arch.vioapic = NULL;
640 kfree(ioapic);
641 }
642
643 return ret;
644 }
645
kvm_ioapic_destroy(struct kvm * kvm)646 void kvm_ioapic_destroy(struct kvm *kvm)
647 {
648 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
649
650 if (!ioapic)
651 return;
652
653 cancel_delayed_work_sync(&ioapic->eoi_inject);
654 mutex_lock(&kvm->slots_lock);
655 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
656 mutex_unlock(&kvm->slots_lock);
657 kvm->arch.vioapic = NULL;
658 kfree(ioapic);
659 }
660
kvm_get_ioapic(struct kvm * kvm,struct kvm_ioapic_state * state)661 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
662 {
663 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
664
665 spin_lock(&ioapic->lock);
666 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
667 state->irr &= ~ioapic->irr_delivered;
668 spin_unlock(&ioapic->lock);
669 }
670
kvm_set_ioapic(struct kvm * kvm,struct kvm_ioapic_state * state)671 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
672 {
673 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
674
675 spin_lock(&ioapic->lock);
676 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
677 ioapic->irr = 0;
678 ioapic->irr_delivered = 0;
679 kvm_make_scan_ioapic_request(kvm);
680 kvm_ioapic_inject_all(ioapic, state->irr);
681 spin_unlock(&ioapic->lock);
682 }
683