1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
4  *
5  * Provides a framework for enqueueing and running callbacks from hardirq
6  * context. The enqueueing is NMI-safe.
7  */
8 
9 #include <linux/bug.h>
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/irq_work.h>
13 #include <linux/percpu.h>
14 #include <linux/hardirq.h>
15 #include <linux/irqflags.h>
16 #include <linux/sched.h>
17 #include <linux/tick.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <asm/processor.h>
22 #include <linux/kasan.h>
23 
24 static DEFINE_PER_CPU(struct llist_head, raised_list);
25 static DEFINE_PER_CPU(struct llist_head, lazy_list);
26 
27 /*
28  * Claim the entry so that no one else will poke at it.
29  */
irq_work_claim(struct irq_work * work)30 static bool irq_work_claim(struct irq_work *work)
31 {
32 	int oflags;
33 
34 	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
35 	/*
36 	 * If the work is already pending, no need to raise the IPI.
37 	 * The pairing smp_mb() in irq_work_single() makes sure
38 	 * everything we did before is visible.
39 	 */
40 	if (oflags & IRQ_WORK_PENDING)
41 		return false;
42 	return true;
43 }
44 
arch_irq_work_raise(void)45 void __weak arch_irq_work_raise(void)
46 {
47 	/*
48 	 * Lame architectures will get the timer tick callback
49 	 */
50 }
51 
52 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
__irq_work_queue_local(struct irq_work * work)53 static void __irq_work_queue_local(struct irq_work *work)
54 {
55 	/* If the work is "lazy", handle it from next tick if any */
56 	if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) {
57 		if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) &&
58 		    tick_nohz_tick_stopped())
59 			arch_irq_work_raise();
60 	} else {
61 		if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list)))
62 			arch_irq_work_raise();
63 	}
64 }
65 
66 /* Enqueue the irq work @work on the current CPU */
irq_work_queue(struct irq_work * work)67 bool irq_work_queue(struct irq_work *work)
68 {
69 	/* Only queue if not already pending */
70 	if (!irq_work_claim(work))
71 		return false;
72 
73 	/* Queue the entry and raise the IPI if needed. */
74 	preempt_disable();
75 	__irq_work_queue_local(work);
76 	preempt_enable();
77 
78 	return true;
79 }
80 EXPORT_SYMBOL_GPL(irq_work_queue);
81 
82 /*
83  * Enqueue the irq_work @work on @cpu unless it's already pending
84  * somewhere.
85  *
86  * Can be re-enqueued while the callback is still in progress.
87  */
irq_work_queue_on(struct irq_work * work,int cpu)88 bool irq_work_queue_on(struct irq_work *work, int cpu)
89 {
90 #ifndef CONFIG_SMP
91 	return irq_work_queue(work);
92 
93 #else /* CONFIG_SMP: */
94 	/* All work should have been flushed before going offline */
95 	WARN_ON_ONCE(cpu_is_offline(cpu));
96 
97 	/* Only queue if not already pending */
98 	if (!irq_work_claim(work))
99 		return false;
100 
101 	kasan_record_aux_stack(work);
102 
103 	preempt_disable();
104 	if (cpu != smp_processor_id()) {
105 		/* Arch remote IPI send/receive backend aren't NMI safe */
106 		WARN_ON_ONCE(in_nmi());
107 		__smp_call_single_queue(cpu, &work->node.llist);
108 	} else {
109 		__irq_work_queue_local(work);
110 	}
111 	preempt_enable();
112 
113 	return true;
114 #endif /* CONFIG_SMP */
115 }
116 
117 
irq_work_needs_cpu(void)118 bool irq_work_needs_cpu(void)
119 {
120 	struct llist_head *raised, *lazy;
121 
122 	raised = this_cpu_ptr(&raised_list);
123 	lazy = this_cpu_ptr(&lazy_list);
124 
125 	if (llist_empty(raised) || arch_irq_work_has_interrupt())
126 		if (llist_empty(lazy))
127 			return false;
128 
129 	/* All work should have been flushed before going offline */
130 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
131 
132 	return true;
133 }
134 
irq_work_single(void * arg)135 void irq_work_single(void *arg)
136 {
137 	struct irq_work *work = arg;
138 	int flags;
139 
140 	/*
141 	 * Clear the PENDING bit, after this point the @work can be re-used.
142 	 * The PENDING bit acts as a lock, and we own it, so we can clear it
143 	 * without atomic ops.
144 	 */
145 	flags = atomic_read(&work->node.a_flags);
146 	flags &= ~IRQ_WORK_PENDING;
147 	atomic_set(&work->node.a_flags, flags);
148 
149 	/*
150 	 * See irq_work_claim().
151 	 */
152 	smp_mb();
153 
154 	lockdep_irq_work_enter(flags);
155 	work->func(work);
156 	lockdep_irq_work_exit(flags);
157 
158 	/*
159 	 * Clear the BUSY bit, if set, and return to the free state if no-one
160 	 * else claimed it meanwhile.
161 	 */
162 	(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
163 }
164 
irq_work_run_list(struct llist_head * list)165 static void irq_work_run_list(struct llist_head *list)
166 {
167 	struct irq_work *work, *tmp;
168 	struct llist_node *llnode;
169 
170 	BUG_ON(!irqs_disabled());
171 
172 	if (llist_empty(list))
173 		return;
174 
175 	llnode = llist_del_all(list);
176 	llist_for_each_entry_safe(work, tmp, llnode, node.llist)
177 		irq_work_single(work);
178 }
179 
180 /*
181  * hotplug calls this through:
182  *  hotplug_cfd() -> flush_smp_call_function_queue()
183  */
irq_work_run(void)184 void irq_work_run(void)
185 {
186 	irq_work_run_list(this_cpu_ptr(&raised_list));
187 	irq_work_run_list(this_cpu_ptr(&lazy_list));
188 }
189 EXPORT_SYMBOL_GPL(irq_work_run);
190 
irq_work_tick(void)191 void irq_work_tick(void)
192 {
193 	struct llist_head *raised = this_cpu_ptr(&raised_list);
194 
195 	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
196 		irq_work_run_list(raised);
197 	irq_work_run_list(this_cpu_ptr(&lazy_list));
198 }
199 
200 /*
201  * Synchronize against the irq_work @entry, ensures the entry is not
202  * currently in use.
203  */
irq_work_sync(struct irq_work * work)204 void irq_work_sync(struct irq_work *work)
205 {
206 	lockdep_assert_irqs_enabled();
207 
208 	while (irq_work_is_busy(work))
209 		cpu_relax();
210 }
211 EXPORT_SYMBOL_GPL(irq_work_sync);
212