1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
3
4 #include <linux/spinlock.h>
5 #include <linux/seq_file.h>
6 #include <linux/bitmap.h>
7 #include <linux/percpu.h>
8 #include <linux/cpu.h>
9 #include <linux/irq.h>
10
11 #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS))
12
13 struct cpumap {
14 unsigned int available;
15 unsigned int allocated;
16 unsigned int managed;
17 unsigned int managed_allocated;
18 bool initialized;
19 bool online;
20 unsigned long alloc_map[IRQ_MATRIX_SIZE];
21 unsigned long managed_map[IRQ_MATRIX_SIZE];
22 };
23
24 struct irq_matrix {
25 unsigned int matrix_bits;
26 unsigned int alloc_start;
27 unsigned int alloc_end;
28 unsigned int alloc_size;
29 unsigned int global_available;
30 unsigned int global_reserved;
31 unsigned int systembits_inalloc;
32 unsigned int total_allocated;
33 unsigned int online_maps;
34 struct cpumap __percpu *maps;
35 unsigned long scratch_map[IRQ_MATRIX_SIZE];
36 unsigned long system_map[IRQ_MATRIX_SIZE];
37 };
38
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/irq_matrix.h>
41
42 /**
43 * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
44 * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
45 * @alloc_start: From which bit the allocation search starts
46 * @alloc_end: At which bit the allocation search ends, i.e first
47 * invalid bit
48 */
irq_alloc_matrix(unsigned int matrix_bits,unsigned int alloc_start,unsigned int alloc_end)49 __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
50 unsigned int alloc_start,
51 unsigned int alloc_end)
52 {
53 struct irq_matrix *m;
54
55 if (matrix_bits > IRQ_MATRIX_BITS)
56 return NULL;
57
58 m = kzalloc(sizeof(*m), GFP_KERNEL);
59 if (!m)
60 return NULL;
61
62 m->matrix_bits = matrix_bits;
63 m->alloc_start = alloc_start;
64 m->alloc_end = alloc_end;
65 m->alloc_size = alloc_end - alloc_start;
66 m->maps = alloc_percpu(*m->maps);
67 if (!m->maps) {
68 kfree(m);
69 return NULL;
70 }
71 return m;
72 }
73
74 /**
75 * irq_matrix_online - Bring the local CPU matrix online
76 * @m: Matrix pointer
77 */
irq_matrix_online(struct irq_matrix * m)78 void irq_matrix_online(struct irq_matrix *m)
79 {
80 struct cpumap *cm = this_cpu_ptr(m->maps);
81
82 BUG_ON(cm->online);
83
84 if (!cm->initialized) {
85 cm->available = m->alloc_size;
86 cm->available -= cm->managed + m->systembits_inalloc;
87 cm->initialized = true;
88 }
89 m->global_available += cm->available;
90 cm->online = true;
91 m->online_maps++;
92 trace_irq_matrix_online(m);
93 }
94
95 /**
96 * irq_matrix_offline - Bring the local CPU matrix offline
97 * @m: Matrix pointer
98 */
irq_matrix_offline(struct irq_matrix * m)99 void irq_matrix_offline(struct irq_matrix *m)
100 {
101 struct cpumap *cm = this_cpu_ptr(m->maps);
102
103 /* Update the global available size */
104 m->global_available -= cm->available;
105 cm->online = false;
106 m->online_maps--;
107 trace_irq_matrix_offline(m);
108 }
109
matrix_alloc_area(struct irq_matrix * m,struct cpumap * cm,unsigned int num,bool managed)110 static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
111 unsigned int num, bool managed)
112 {
113 unsigned int area, start = m->alloc_start;
114 unsigned int end = m->alloc_end;
115
116 bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
117 bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
118 area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
119 if (area >= end)
120 return area;
121 if (managed)
122 bitmap_set(cm->managed_map, area, num);
123 else
124 bitmap_set(cm->alloc_map, area, num);
125 return area;
126 }
127
128 /* Find the best CPU which has the lowest vector allocation count */
matrix_find_best_cpu(struct irq_matrix * m,const struct cpumask * msk)129 static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
130 const struct cpumask *msk)
131 {
132 unsigned int cpu, best_cpu, maxavl = 0;
133 struct cpumap *cm;
134
135 best_cpu = UINT_MAX;
136
137 for_each_cpu(cpu, msk) {
138 cm = per_cpu_ptr(m->maps, cpu);
139
140 if (!cm->online || cm->available <= maxavl)
141 continue;
142
143 best_cpu = cpu;
144 maxavl = cm->available;
145 }
146 return best_cpu;
147 }
148
149 /* Find the best CPU which has the lowest number of managed IRQs allocated */
matrix_find_best_cpu_managed(struct irq_matrix * m,const struct cpumask * msk)150 static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
151 const struct cpumask *msk)
152 {
153 unsigned int cpu, best_cpu, allocated = UINT_MAX;
154 struct cpumap *cm;
155
156 best_cpu = UINT_MAX;
157
158 for_each_cpu(cpu, msk) {
159 cm = per_cpu_ptr(m->maps, cpu);
160
161 if (!cm->online || cm->managed_allocated > allocated)
162 continue;
163
164 best_cpu = cpu;
165 allocated = cm->managed_allocated;
166 }
167 return best_cpu;
168 }
169
170 /**
171 * irq_matrix_assign_system - Assign system wide entry in the matrix
172 * @m: Matrix pointer
173 * @bit: Which bit to reserve
174 * @replace: Replace an already allocated vector with a system
175 * vector at the same bit position.
176 *
177 * The BUG_ON()s below are on purpose. If this goes wrong in the
178 * early boot process, then the chance to survive is about zero.
179 * If this happens when the system is life, it's not much better.
180 */
irq_matrix_assign_system(struct irq_matrix * m,unsigned int bit,bool replace)181 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
182 bool replace)
183 {
184 struct cpumap *cm = this_cpu_ptr(m->maps);
185
186 BUG_ON(bit > m->matrix_bits);
187 BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
188
189 set_bit(bit, m->system_map);
190 if (replace) {
191 BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
192 cm->allocated--;
193 m->total_allocated--;
194 }
195 if (bit >= m->alloc_start && bit < m->alloc_end)
196 m->systembits_inalloc++;
197
198 trace_irq_matrix_assign_system(bit, m);
199 }
200
201 /**
202 * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
203 * @m: Matrix pointer
204 * @msk: On which CPUs the bits should be reserved.
205 *
206 * Can be called for offline CPUs. Note, this will only reserve one bit
207 * on all CPUs in @msk, but it's not guaranteed that the bits are at the
208 * same offset on all CPUs
209 */
irq_matrix_reserve_managed(struct irq_matrix * m,const struct cpumask * msk)210 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
211 {
212 unsigned int cpu, failed_cpu;
213
214 for_each_cpu(cpu, msk) {
215 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
216 unsigned int bit;
217
218 bit = matrix_alloc_area(m, cm, 1, true);
219 if (bit >= m->alloc_end)
220 goto cleanup;
221 cm->managed++;
222 if (cm->online) {
223 cm->available--;
224 m->global_available--;
225 }
226 trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
227 }
228 return 0;
229 cleanup:
230 failed_cpu = cpu;
231 for_each_cpu(cpu, msk) {
232 if (cpu == failed_cpu)
233 break;
234 irq_matrix_remove_managed(m, cpumask_of(cpu));
235 }
236 return -ENOSPC;
237 }
238
239 /**
240 * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
241 * @m: Matrix pointer
242 * @msk: On which CPUs the bits should be removed
243 *
244 * Can be called for offline CPUs
245 *
246 * This removes not allocated managed interrupts from the map. It does
247 * not matter which one because the managed interrupts free their
248 * allocation when they shut down. If not, the accounting is screwed,
249 * but all what can be done at this point is warn about it.
250 */
irq_matrix_remove_managed(struct irq_matrix * m,const struct cpumask * msk)251 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
252 {
253 unsigned int cpu;
254
255 for_each_cpu(cpu, msk) {
256 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
257 unsigned int bit, end = m->alloc_end;
258
259 if (WARN_ON_ONCE(!cm->managed))
260 continue;
261
262 /* Get managed bit which are not allocated */
263 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
264
265 bit = find_first_bit(m->scratch_map, end);
266 if (WARN_ON_ONCE(bit >= end))
267 continue;
268
269 clear_bit(bit, cm->managed_map);
270
271 cm->managed--;
272 if (cm->online) {
273 cm->available++;
274 m->global_available++;
275 }
276 trace_irq_matrix_remove_managed(bit, cpu, m, cm);
277 }
278 }
279
280 /**
281 * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
282 * @m: Matrix pointer
283 * @msk: Which CPUs to search in
284 * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
285 */
irq_matrix_alloc_managed(struct irq_matrix * m,const struct cpumask * msk,unsigned int * mapped_cpu)286 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
287 unsigned int *mapped_cpu)
288 {
289 unsigned int bit, cpu, end = m->alloc_end;
290 struct cpumap *cm;
291
292 if (cpumask_empty(msk))
293 return -EINVAL;
294
295 cpu = matrix_find_best_cpu_managed(m, msk);
296 if (cpu == UINT_MAX)
297 return -ENOSPC;
298
299 cm = per_cpu_ptr(m->maps, cpu);
300 end = m->alloc_end;
301 /* Get managed bit which are not allocated */
302 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
303 bit = find_first_bit(m->scratch_map, end);
304 if (bit >= end)
305 return -ENOSPC;
306 set_bit(bit, cm->alloc_map);
307 cm->allocated++;
308 cm->managed_allocated++;
309 m->total_allocated++;
310 *mapped_cpu = cpu;
311 trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
312 return bit;
313 }
314
315 /**
316 * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
317 * @m: Matrix pointer
318 * @bit: Which bit to mark
319 *
320 * This should only be used to mark preallocated vectors
321 */
irq_matrix_assign(struct irq_matrix * m,unsigned int bit)322 void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
323 {
324 struct cpumap *cm = this_cpu_ptr(m->maps);
325
326 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
327 return;
328 if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
329 return;
330 cm->allocated++;
331 m->total_allocated++;
332 cm->available--;
333 m->global_available--;
334 trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
335 }
336
337 /**
338 * irq_matrix_reserve - Reserve interrupts
339 * @m: Matrix pointer
340 *
341 * This is merely a book keeping call. It increments the number of globally
342 * reserved interrupt bits w/o actually allocating them. This allows to
343 * setup interrupt descriptors w/o assigning low level resources to it.
344 * The actual allocation happens when the interrupt gets activated.
345 */
irq_matrix_reserve(struct irq_matrix * m)346 void irq_matrix_reserve(struct irq_matrix *m)
347 {
348 if (m->global_reserved == m->global_available)
349 pr_warn("Interrupt reservation exceeds available resources\n");
350
351 m->global_reserved++;
352 trace_irq_matrix_reserve(m);
353 }
354
355 /**
356 * irq_matrix_remove_reserved - Remove interrupt reservation
357 * @m: Matrix pointer
358 *
359 * This is merely a book keeping call. It decrements the number of globally
360 * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
361 * interrupt was never in use and a real vector allocated, which undid the
362 * reservation.
363 */
irq_matrix_remove_reserved(struct irq_matrix * m)364 void irq_matrix_remove_reserved(struct irq_matrix *m)
365 {
366 m->global_reserved--;
367 trace_irq_matrix_remove_reserved(m);
368 }
369
370 /**
371 * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
372 * @m: Matrix pointer
373 * @msk: Which CPUs to search in
374 * @reserved: Allocate previously reserved interrupts
375 * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
376 */
irq_matrix_alloc(struct irq_matrix * m,const struct cpumask * msk,bool reserved,unsigned int * mapped_cpu)377 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
378 bool reserved, unsigned int *mapped_cpu)
379 {
380 unsigned int cpu, bit;
381 struct cpumap *cm;
382
383 /*
384 * Not required in theory, but matrix_find_best_cpu() uses
385 * for_each_cpu() which ignores the cpumask on UP .
386 */
387 if (cpumask_empty(msk))
388 return -EINVAL;
389
390 cpu = matrix_find_best_cpu(m, msk);
391 if (cpu == UINT_MAX)
392 return -ENOSPC;
393
394 cm = per_cpu_ptr(m->maps, cpu);
395 bit = matrix_alloc_area(m, cm, 1, false);
396 if (bit >= m->alloc_end)
397 return -ENOSPC;
398 cm->allocated++;
399 cm->available--;
400 m->total_allocated++;
401 m->global_available--;
402 if (reserved)
403 m->global_reserved--;
404 *mapped_cpu = cpu;
405 trace_irq_matrix_alloc(bit, cpu, m, cm);
406 return bit;
407
408 }
409
410 /**
411 * irq_matrix_free - Free allocated interrupt in the matrix
412 * @m: Matrix pointer
413 * @cpu: Which CPU map needs be updated
414 * @bit: The bit to remove
415 * @managed: If true, the interrupt is managed and not accounted
416 * as available.
417 */
irq_matrix_free(struct irq_matrix * m,unsigned int cpu,unsigned int bit,bool managed)418 void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
419 unsigned int bit, bool managed)
420 {
421 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
422
423 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
424 return;
425
426 if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
427 return;
428
429 cm->allocated--;
430 if(managed)
431 cm->managed_allocated--;
432
433 if (cm->online)
434 m->total_allocated--;
435
436 if (!managed) {
437 cm->available++;
438 if (cm->online)
439 m->global_available++;
440 }
441 trace_irq_matrix_free(bit, cpu, m, cm);
442 }
443
444 /**
445 * irq_matrix_available - Get the number of globally available irqs
446 * @m: Pointer to the matrix to query
447 * @cpudown: If true, the local CPU is about to go down, adjust
448 * the number of available irqs accordingly
449 */
irq_matrix_available(struct irq_matrix * m,bool cpudown)450 unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
451 {
452 struct cpumap *cm = this_cpu_ptr(m->maps);
453
454 if (!cpudown)
455 return m->global_available;
456 return m->global_available - cm->available;
457 }
458
459 /**
460 * irq_matrix_reserved - Get the number of globally reserved irqs
461 * @m: Pointer to the matrix to query
462 */
irq_matrix_reserved(struct irq_matrix * m)463 unsigned int irq_matrix_reserved(struct irq_matrix *m)
464 {
465 return m->global_reserved;
466 }
467
468 /**
469 * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
470 * @m: Pointer to the matrix to search
471 *
472 * This returns number of allocated irqs
473 */
irq_matrix_allocated(struct irq_matrix * m)474 unsigned int irq_matrix_allocated(struct irq_matrix *m)
475 {
476 struct cpumap *cm = this_cpu_ptr(m->maps);
477
478 return cm->allocated;
479 }
480
481 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
482 /**
483 * irq_matrix_debug_show - Show detailed allocation information
484 * @sf: Pointer to the seq_file to print to
485 * @m: Pointer to the matrix allocator
486 * @ind: Indentation for the print format
487 *
488 * Note, this is a lockless snapshot.
489 */
irq_matrix_debug_show(struct seq_file * sf,struct irq_matrix * m,int ind)490 void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
491 {
492 unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
493 int cpu;
494
495 seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
496 seq_printf(sf, "Global available: %6u\n", m->global_available);
497 seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
498 seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
499 seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
500 m->system_map);
501 seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
502 cpus_read_lock();
503 for_each_online_cpu(cpu) {
504 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
505
506 seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
507 cpu, cm->available, cm->managed,
508 cm->managed_allocated, cm->allocated,
509 m->matrix_bits, cm->alloc_map);
510 }
511 cpus_read_unlock();
512 }
513 #endif
514