]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - kernel/irq/matrix.c
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / kernel / irq / matrix.c
CommitLineData
90cafdd5
TG
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
3
2f75d9e1
TG
4#include <linux/spinlock.h>
5#include <linux/seq_file.h>
6#include <linux/bitmap.h>
7#include <linux/percpu.h>
8#include <linux/cpu.h>
9#include <linux/irq.h>
10
11#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
12
13struct cpumap {
14 unsigned int available;
15 unsigned int allocated;
16 unsigned int managed;
651ca2c0 17 bool initialized;
2f75d9e1
TG
18 bool online;
19 unsigned long alloc_map[IRQ_MATRIX_SIZE];
20 unsigned long managed_map[IRQ_MATRIX_SIZE];
21};
22
23struct irq_matrix {
24 unsigned int matrix_bits;
25 unsigned int alloc_start;
26 unsigned int alloc_end;
27 unsigned int alloc_size;
28 unsigned int global_available;
29 unsigned int global_reserved;
30 unsigned int systembits_inalloc;
31 unsigned int total_allocated;
32 unsigned int online_maps;
33 struct cpumap __percpu *maps;
34 unsigned long scratch_map[IRQ_MATRIX_SIZE];
35 unsigned long system_map[IRQ_MATRIX_SIZE];
36};
37
ec0f7cd2
TG
38#define CREATE_TRACE_POINTS
39#include <trace/events/irq_matrix.h>
40
2f75d9e1
TG
41/**
42 * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
43 * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
44 * @alloc_start: From which bit the allocation search starts
45 * @alloc_end: At which bit the allocation search ends, i.e first
46 * invalid bit
47 */
48__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
49 unsigned int alloc_start,
50 unsigned int alloc_end)
51{
52 struct irq_matrix *m;
53
54 if (matrix_bits > IRQ_MATRIX_BITS)
55 return NULL;
56
57 m = kzalloc(sizeof(*m), GFP_KERNEL);
58 if (!m)
59 return NULL;
60
61 m->matrix_bits = matrix_bits;
62 m->alloc_start = alloc_start;
63 m->alloc_end = alloc_end;
64 m->alloc_size = alloc_end - alloc_start;
65 m->maps = alloc_percpu(*m->maps);
66 if (!m->maps) {
67 kfree(m);
68 return NULL;
69 }
70 return m;
71}
72
73/**
74 * irq_matrix_online - Bring the local CPU matrix online
75 * @m: Matrix pointer
76 */
77void irq_matrix_online(struct irq_matrix *m)
78{
79 struct cpumap *cm = this_cpu_ptr(m->maps);
80
81 BUG_ON(cm->online);
82
651ca2c0
TG
83 if (!cm->initialized) {
84 cm->available = m->alloc_size;
85 cm->available -= cm->managed + m->systembits_inalloc;
86 cm->initialized = true;
87 }
2f75d9e1
TG
88 m->global_available += cm->available;
89 cm->online = true;
90 m->online_maps++;
ec0f7cd2 91 trace_irq_matrix_online(m);
2f75d9e1
TG
92}
93
94/**
95 * irq_matrix_offline - Bring the local CPU matrix offline
96 * @m: Matrix pointer
97 */
98void irq_matrix_offline(struct irq_matrix *m)
99{
100 struct cpumap *cm = this_cpu_ptr(m->maps);
101
102 /* Update the global available size */
103 m->global_available -= cm->available;
104 cm->online = false;
105 m->online_maps--;
ec0f7cd2 106 trace_irq_matrix_offline(m);
2f75d9e1
TG
107}
108
109static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
110 unsigned int num, bool managed)
111{
112 unsigned int area, start = m->alloc_start;
113 unsigned int end = m->alloc_end;
114
115 bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
116 bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
117 area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
118 if (area >= end)
119 return area;
120 if (managed)
121 bitmap_set(cm->managed_map, area, num);
122 else
123 bitmap_set(cm->alloc_map, area, num);
124 return area;
125}
126
8ffe4e61
DL
127/* Find the best CPU which has the lowest vector allocation count */
128static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
129 const struct cpumask *msk)
130{
131 unsigned int cpu, best_cpu, maxavl = 0;
132 struct cpumap *cm;
133
134 best_cpu = UINT_MAX;
135
136 for_each_cpu(cpu, msk) {
137 cm = per_cpu_ptr(m->maps, cpu);
138
139 if (!cm->online || cm->available <= maxavl)
140 continue;
141
142 best_cpu = cpu;
143 maxavl = cm->available;
144 }
145 return best_cpu;
146}
147
2f75d9e1
TG
148/**
149 * irq_matrix_assign_system - Assign system wide entry in the matrix
150 * @m: Matrix pointer
151 * @bit: Which bit to reserve
152 * @replace: Replace an already allocated vector with a system
153 * vector at the same bit position.
154 *
155 * The BUG_ON()s below are on purpose. If this goes wrong in the
156 * early boot process, then the chance to survive is about zero.
157 * If this happens when the system is life, it's not much better.
158 */
159void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
160 bool replace)
161{
162 struct cpumap *cm = this_cpu_ptr(m->maps);
163
164 BUG_ON(bit > m->matrix_bits);
165 BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
166
167 set_bit(bit, m->system_map);
168 if (replace) {
169 BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
170 cm->allocated--;
171 m->total_allocated--;
172 }
173 if (bit >= m->alloc_start && bit < m->alloc_end)
174 m->systembits_inalloc++;
ec0f7cd2
TG
175
176 trace_irq_matrix_assign_system(bit, m);
2f75d9e1
TG
177}
178
179/**
180 * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
181 * @m: Matrix pointer
182 * @msk: On which CPUs the bits should be reserved.
183 *
184 * Can be called for offline CPUs. Note, this will only reserve one bit
185 * on all CPUs in @msk, but it's not guaranteed that the bits are at the
186 * same offset on all CPUs
187 */
188int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
189{
190 unsigned int cpu, failed_cpu;
191
192 for_each_cpu(cpu, msk) {
193 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
194 unsigned int bit;
195
196 bit = matrix_alloc_area(m, cm, 1, true);
197 if (bit >= m->alloc_end)
198 goto cleanup;
199 cm->managed++;
200 if (cm->online) {
201 cm->available--;
202 m->global_available--;
203 }
ec0f7cd2 204 trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
2f75d9e1
TG
205 }
206 return 0;
207cleanup:
208 failed_cpu = cpu;
209 for_each_cpu(cpu, msk) {
210 if (cpu == failed_cpu)
211 break;
212 irq_matrix_remove_managed(m, cpumask_of(cpu));
213 }
214 return -ENOSPC;
215}
216
217/**
218 * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
219 * @m: Matrix pointer
220 * @msk: On which CPUs the bits should be removed
221 *
222 * Can be called for offline CPUs
223 *
224 * This removes not allocated managed interrupts from the map. It does
225 * not matter which one because the managed interrupts free their
226 * allocation when they shut down. If not, the accounting is screwed,
227 * but all what can be done at this point is warn about it.
228 */
229void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
230{
231 unsigned int cpu;
232
233 for_each_cpu(cpu, msk) {
234 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
235 unsigned int bit, end = m->alloc_end;
236
237 if (WARN_ON_ONCE(!cm->managed))
238 continue;
239
240 /* Get managed bit which are not allocated */
241 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
242
243 bit = find_first_bit(m->scratch_map, end);
244 if (WARN_ON_ONCE(bit >= end))
245 continue;
246
247 clear_bit(bit, cm->managed_map);
248
249 cm->managed--;
250 if (cm->online) {
251 cm->available++;
252 m->global_available++;
253 }
ec0f7cd2 254 trace_irq_matrix_remove_managed(bit, cpu, m, cm);
2f75d9e1
TG
255 }
256}
257
258/**
259 * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
260 * @m: Matrix pointer
261 * @cpu: On which CPU the interrupt should be allocated
262 */
76f99ae5
DL
263int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
264 unsigned int *mapped_cpu)
2f75d9e1 265{
76f99ae5
DL
266 unsigned int bit, cpu, end = m->alloc_end;
267 struct cpumap *cm;
268
269 if (cpumask_empty(msk))
270 return -EINVAL;
271
272 cpu = matrix_find_best_cpu(m, msk);
273 if (cpu == UINT_MAX)
274 return -ENOSPC;
2f75d9e1 275
76f99ae5
DL
276 cm = per_cpu_ptr(m->maps, cpu);
277 end = m->alloc_end;
2f75d9e1
TG
278 /* Get managed bit which are not allocated */
279 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
280 bit = find_first_bit(m->scratch_map, end);
281 if (bit >= end)
282 return -ENOSPC;
283 set_bit(bit, cm->alloc_map);
284 cm->allocated++;
285 m->total_allocated++;
76f99ae5 286 *mapped_cpu = cpu;
ec0f7cd2 287 trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
2f75d9e1
TG
288 return bit;
289}
290
291/**
292 * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
293 * @m: Matrix pointer
294 * @bit: Which bit to mark
295 *
296 * This should only be used to mark preallocated vectors
297 */
298void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
299{
300 struct cpumap *cm = this_cpu_ptr(m->maps);
301
302 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
303 return;
304 if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
305 return;
306 cm->allocated++;
307 m->total_allocated++;
308 cm->available--;
309 m->global_available--;
ec0f7cd2 310 trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
2f75d9e1
TG
311}
312
313/**
314 * irq_matrix_reserve - Reserve interrupts
315 * @m: Matrix pointer
316 *
317 * This is merily a book keeping call. It increments the number of globally
318 * reserved interrupt bits w/o actually allocating them. This allows to
319 * setup interrupt descriptors w/o assigning low level resources to it.
320 * The actual allocation happens when the interrupt gets activated.
321 */
322void irq_matrix_reserve(struct irq_matrix *m)
323{
324 if (m->global_reserved <= m->global_available &&
325 m->global_reserved + 1 > m->global_available)
326 pr_warn("Interrupt reservation exceeds available resources\n");
327
328 m->global_reserved++;
ec0f7cd2 329 trace_irq_matrix_reserve(m);
2f75d9e1
TG
330}
331
332/**
333 * irq_matrix_remove_reserved - Remove interrupt reservation
334 * @m: Matrix pointer
335 *
336 * This is merily a book keeping call. It decrements the number of globally
337 * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
338 * interrupt was never in use and a real vector allocated, which undid the
339 * reservation.
340 */
341void irq_matrix_remove_reserved(struct irq_matrix *m)
342{
343 m->global_reserved--;
ec0f7cd2 344 trace_irq_matrix_remove_reserved(m);
2f75d9e1
TG
345}
346
347/**
348 * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
349 * @m: Matrix pointer
350 * @msk: Which CPUs to search in
351 * @reserved: Allocate previously reserved interrupts
352 * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
353 */
354int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
355 bool reserved, unsigned int *mapped_cpu)
356{
8ffe4e61 357 unsigned int cpu, bit;
a0c9259d 358 struct cpumap *cm;
2f75d9e1 359
8ffe4e61
DL
360 cpu = matrix_find_best_cpu(m, msk);
361 if (cpu == UINT_MAX)
362 return -ENOSPC;
2f75d9e1 363
8ffe4e61
DL
364 cm = per_cpu_ptr(m->maps, cpu);
365 bit = matrix_alloc_area(m, cm, 1, false);
366 if (bit >= m->alloc_end)
367 return -ENOSPC;
368 cm->allocated++;
369 cm->available--;
370 m->total_allocated++;
371 m->global_available--;
372 if (reserved)
373 m->global_reserved--;
374 *mapped_cpu = cpu;
375 trace_irq_matrix_alloc(bit, cpu, m, cm);
376 return bit;
a0c9259d 377
2f75d9e1
TG
378}
379
380/**
381 * irq_matrix_free - Free allocated interrupt in the matrix
382 * @m: Matrix pointer
383 * @cpu: Which CPU map needs be updated
384 * @bit: The bit to remove
385 * @managed: If true, the interrupt is managed and not accounted
386 * as available.
387 */
388void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
389 unsigned int bit, bool managed)
390{
391 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
392
393 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
394 return;
395
651ca2c0
TG
396 clear_bit(bit, cm->alloc_map);
397 cm->allocated--;
398
399 if (cm->online)
2f75d9e1 400 m->total_allocated--;
651ca2c0
TG
401
402 if (!managed) {
403 cm->available++;
404 if (cm->online)
2f75d9e1 405 m->global_available++;
2f75d9e1 406 }
ec0f7cd2 407 trace_irq_matrix_free(bit, cpu, m, cm);
2f75d9e1
TG
408}
409
410/**
411 * irq_matrix_available - Get the number of globally available irqs
412 * @m: Pointer to the matrix to query
413 * @cpudown: If true, the local CPU is about to go down, adjust
414 * the number of available irqs accordingly
415 */
416unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
417{
418 struct cpumap *cm = this_cpu_ptr(m->maps);
419
bb5c4342
TG
420 if (!cpudown)
421 return m->global_available;
422 return m->global_available - cm->available;
2f75d9e1
TG
423}
424
425/**
426 * irq_matrix_reserved - Get the number of globally reserved irqs
427 * @m: Pointer to the matrix to query
428 */
429unsigned int irq_matrix_reserved(struct irq_matrix *m)
430{
431 return m->global_reserved;
432}
433
434/**
435 * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
436 * @m: Pointer to the matrix to search
437 *
438 * This returns number of allocated irqs
439 */
440unsigned int irq_matrix_allocated(struct irq_matrix *m)
441{
442 struct cpumap *cm = this_cpu_ptr(m->maps);
443
444 return cm->allocated;
445}
446
447#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
448/**
449 * irq_matrix_debug_show - Show detailed allocation information
450 * @sf: Pointer to the seq_file to print to
451 * @m: Pointer to the matrix allocator
452 * @ind: Indentation for the print format
453 *
454 * Note, this is a lockless snapshot.
455 */
456void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
457{
458 unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
459 int cpu;
460
461 seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
462 seq_printf(sf, "Global available: %6u\n", m->global_available);
463 seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
464 seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
465 seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
466 m->system_map);
467 seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
468 cpus_read_lock();
469 for_each_online_cpu(cpu) {
470 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
471
472 seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
473 cpu, cm->available, cm->managed, cm->allocated,
474 m->matrix_bits, cm->alloc_map);
475 }
476 cpus_read_unlock();
477}
478#endif