Lines Matching refs:apicd
91 struct apic_chip_data *apicd = apic_chip_data(irqd); in irqd_cfg() local
93 return apicd ? &apicd->hw_irq_cfg : NULL; in irqd_cfg()
104 struct apic_chip_data *apicd; in alloc_apic_chip_data() local
106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node); in alloc_apic_chip_data()
107 if (apicd) in alloc_apic_chip_data()
108 INIT_HLIST_NODE(&apicd->clist); in alloc_apic_chip_data()
109 return apicd; in alloc_apic_chip_data()
112 static void free_apic_chip_data(struct apic_chip_data *apicd) in free_apic_chip_data() argument
114 kfree(apicd); in free_apic_chip_data()
120 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_update_irq_cfg() local
124 apicd->hw_irq_cfg.vector = vector; in apic_update_irq_cfg()
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); in apic_update_irq_cfg()
128 apicd->hw_irq_cfg.dest_apicid); in apic_update_irq_cfg()
134 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_update_vector() local
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, in apic_update_vector()
141 apicd->cpu); in apic_update_vector()
149 apicd->prev_vector = 0; in apic_update_vector()
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) in apic_update_vector()
160 if (cpu_online(apicd->cpu)) { in apic_update_vector()
161 apicd->move_in_progress = true; in apic_update_vector()
162 apicd->prev_vector = apicd->vector; in apic_update_vector()
163 apicd->prev_cpu = apicd->cpu; in apic_update_vector()
165 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, in apic_update_vector()
170 apicd->vector = newvec; in apic_update_vector()
171 apicd->cpu = newcpu; in apic_update_vector()
186 struct apic_chip_data *apicd = apic_chip_data(irqd); in reserve_managed_vector() local
191 apicd->is_managed = true; in reserve_managed_vector()
200 struct apic_chip_data *apicd = apic_chip_data(irqd); in reserve_irq_vector_locked() local
203 apicd->can_reserve = true; in reserve_irq_vector_locked()
204 apicd->has_reserved = true; in reserve_irq_vector_locked()
223 struct apic_chip_data *apicd = apic_chip_data(irqd); in assign_vector_locked() local
224 bool resvd = apicd->has_reserved; in assign_vector_locked()
225 unsigned int cpu = apicd->cpu; in assign_vector_locked()
226 int vector = apicd->vector; in assign_vector_locked()
244 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) in assign_vector_locked()
311 struct apic_chip_data *apicd = apic_chip_data(irqd); in assign_managed_vector() local
317 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) in assign_managed_vector()
331 struct apic_chip_data *apicd = apic_chip_data(irqd); in clear_irq_vector() local
333 unsigned int vector = apicd->vector; in clear_irq_vector()
340 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, in clear_irq_vector()
341 apicd->prev_cpu); in clear_irq_vector()
343 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
344 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); in clear_irq_vector()
345 apicd->vector = 0; in clear_irq_vector()
348 vector = apicd->prev_vector; in clear_irq_vector()
352 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
353 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); in clear_irq_vector()
354 apicd->prev_vector = 0; in clear_irq_vector()
355 apicd->move_in_progress = 0; in clear_irq_vector()
356 hlist_del_init(&apicd->clist); in clear_irq_vector()
361 struct apic_chip_data *apicd = apic_chip_data(irqd); in x86_vector_deactivate() local
364 trace_vector_deactivate(irqd->irq, apicd->is_managed, in x86_vector_deactivate()
365 apicd->can_reserve, false); in x86_vector_deactivate()
368 if (!apicd->is_managed && !apicd->can_reserve) in x86_vector_deactivate()
371 if (apicd->has_reserved) in x86_vector_deactivate()
376 if (apicd->can_reserve) in x86_vector_deactivate()
385 struct apic_chip_data *apicd = apic_chip_data(irqd); in activate_reserved() local
390 apicd->has_reserved = false; in activate_reserved()
399 apicd->can_reserve = false; in activate_reserved()
442 struct apic_chip_data *apicd = apic_chip_data(irqd); in x86_vector_activate() local
446 trace_vector_activate(irqd->irq, apicd->is_managed, in x86_vector_activate()
447 apicd->can_reserve, reserve); in x86_vector_activate()
450 if (!apicd->can_reserve && !apicd->is_managed) in x86_vector_activate()
456 else if (apicd->is_managed) in x86_vector_activate()
458 else if (apicd->has_reserved) in x86_vector_activate()
467 struct apic_chip_data *apicd = apic_chip_data(irqd); in vector_free_reserved_and_managed() local
469 trace_vector_teardown(irqd->irq, apicd->is_managed, in vector_free_reserved_and_managed()
470 apicd->has_reserved); in vector_free_reserved_and_managed()
472 if (apicd->has_reserved) in vector_free_reserved_and_managed()
474 if (apicd->is_managed) in vector_free_reserved_and_managed()
481 struct apic_chip_data *apicd; in x86_vector_free_irqs() local
492 apicd = irqd->chip_data; in x86_vector_free_irqs()
495 free_apic_chip_data(apicd); in x86_vector_free_irqs()
501 struct apic_chip_data *apicd) in vector_configure_legacy() argument
506 apicd->vector = ISA_IRQ_VECTOR(virq); in vector_configure_legacy()
507 apicd->cpu = 0; in vector_configure_legacy()
516 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); in vector_configure_legacy()
519 apicd->can_reserve = true; in vector_configure_legacy()
532 struct apic_chip_data *apicd; in x86_vector_alloc_irqs() local
548 apicd = alloc_apic_chip_data(node); in x86_vector_alloc_irqs()
549 if (!apicd) { in x86_vector_alloc_irqs()
554 apicd->irq = virq + i; in x86_vector_alloc_irqs()
556 irqd->chip_data = apicd; in x86_vector_alloc_irqs()
567 if (!vector_configure_legacy(virq + i, irqd, apicd)) in x86_vector_alloc_irqs()
575 free_apic_chip_data(apicd); in x86_vector_alloc_irqs()
591 struct apic_chip_data apicd; in x86_vector_debug_show() local
613 memcpy(&apicd, irqd->chip_data, sizeof(apicd)); in x86_vector_debug_show()
616 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector); in x86_vector_debug_show()
617 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); in x86_vector_debug_show()
618 if (apicd.prev_vector) { in x86_vector_debug_show()
619 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); in x86_vector_debug_show()
620 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); in x86_vector_debug_show()
622 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); in x86_vector_debug_show()
623 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0); in x86_vector_debug_show()
624 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0); in x86_vector_debug_show()
625 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0); in x86_vector_debug_show()
626 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist)); in x86_vector_debug_show()
772 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_set_affinity() local
784 (apicd->is_managed || apicd->can_reserve)) in apic_set_affinity()
803 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_retrigger_irq() local
807 apic->send_IPI(apicd->cpu, apicd->vector); in apic_retrigger_irq()
834 static void free_moved_vector(struct apic_chip_data *apicd) in free_moved_vector() argument
836 unsigned int vector = apicd->prev_vector; in free_moved_vector()
837 unsigned int cpu = apicd->prev_cpu; in free_moved_vector()
838 bool managed = apicd->is_managed; in free_moved_vector()
848 trace_vector_free_moved(apicd->irq, cpu, vector, managed); in free_moved_vector()
851 hlist_del_init(&apicd->clist); in free_moved_vector()
852 apicd->prev_vector = 0; in free_moved_vector()
853 apicd->move_in_progress = 0; in free_moved_vector()
859 struct apic_chip_data *apicd; in smp_irq_move_cleanup_interrupt() local
866 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) { in smp_irq_move_cleanup_interrupt()
867 unsigned int irr, vector = apicd->prev_vector; in smp_irq_move_cleanup_interrupt()
883 free_moved_vector(apicd); in smp_irq_move_cleanup_interrupt()
890 static void __send_cleanup_vector(struct apic_chip_data *apicd) in __send_cleanup_vector() argument
895 apicd->move_in_progress = 0; in __send_cleanup_vector()
896 cpu = apicd->prev_cpu; in __send_cleanup_vector()
898 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu)); in __send_cleanup_vector()
901 apicd->prev_vector = 0; in __send_cleanup_vector()
908 struct apic_chip_data *apicd; in send_cleanup_vector() local
910 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); in send_cleanup_vector()
911 if (apicd->move_in_progress) in send_cleanup_vector()
912 __send_cleanup_vector(apicd); in send_cleanup_vector()
917 struct apic_chip_data *apicd; in __irq_complete_move() local
919 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); in __irq_complete_move()
920 if (likely(!apicd->move_in_progress)) in __irq_complete_move()
923 if (vector == apicd->vector && apicd->cpu == smp_processor_id()) in __irq_complete_move()
924 __send_cleanup_vector(apicd); in __irq_complete_move()
937 struct apic_chip_data *apicd; in irq_force_complete_move() local
956 apicd = apic_chip_data(irqd); in irq_force_complete_move()
957 if (!apicd) in irq_force_complete_move()
963 vector = apicd->prev_vector; in irq_force_complete_move()
982 if (apicd->move_in_progress) { in irq_force_complete_move()
1018 free_moved_vector(apicd); in irq_force_complete_move()