]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/apic/vector.c
mmc: core: prepend 0x to OCR entry in sysfs
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / apic / vector.c
1 /*
2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13 #include <linux/interrupt.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/compiler.h>
17 #include <linux/slab.h>
18 #include <asm/irqdomain.h>
19 #include <asm/hw_irq.h>
20 #include <asm/apic.h>
21 #include <asm/i8259.h>
22 #include <asm/desc.h>
23 #include <asm/irq_remapping.h>
24
25 #include <asm/trace/irq_vectors.h>
26
27 struct apic_chip_data {
28 struct irq_cfg hw_irq_cfg;
29 unsigned int vector;
30 unsigned int prev_vector;
31 unsigned int cpu;
32 unsigned int prev_cpu;
33 unsigned int irq;
34 struct hlist_node clist;
35 unsigned int move_in_progress : 1,
36 is_managed : 1,
37 can_reserve : 1,
38 has_reserved : 1;
39 };
40
41 struct irq_domain *x86_vector_domain;
42 EXPORT_SYMBOL_GPL(x86_vector_domain);
43 static DEFINE_RAW_SPINLOCK(vector_lock);
44 static cpumask_var_t vector_searchmask;
45 static struct irq_chip lapic_controller;
46 static struct irq_matrix *vector_matrix;
47 #ifdef CONFIG_SMP
48 static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
49 #endif
50
51 void lock_vector_lock(void)
52 {
53 /* Used to the online set of cpus does not change
54 * during assign_irq_vector.
55 */
56 raw_spin_lock(&vector_lock);
57 }
58
59 void unlock_vector_lock(void)
60 {
61 raw_spin_unlock(&vector_lock);
62 }
63
64 void init_irq_alloc_info(struct irq_alloc_info *info,
65 const struct cpumask *mask)
66 {
67 memset(info, 0, sizeof(*info));
68 info->mask = mask;
69 }
70
71 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
72 {
73 if (src)
74 *dst = *src;
75 else
76 memset(dst, 0, sizeof(*dst));
77 }
78
79 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
80 {
81 if (!irqd)
82 return NULL;
83
84 while (irqd->parent_data)
85 irqd = irqd->parent_data;
86
87 return irqd->chip_data;
88 }
89
90 struct irq_cfg *irqd_cfg(struct irq_data *irqd)
91 {
92 struct apic_chip_data *apicd = apic_chip_data(irqd);
93
94 return apicd ? &apicd->hw_irq_cfg : NULL;
95 }
96 EXPORT_SYMBOL_GPL(irqd_cfg);
97
98 struct irq_cfg *irq_cfg(unsigned int irq)
99 {
100 return irqd_cfg(irq_get_irq_data(irq));
101 }
102
103 static struct apic_chip_data *alloc_apic_chip_data(int node)
104 {
105 struct apic_chip_data *apicd;
106
107 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
108 if (apicd)
109 INIT_HLIST_NODE(&apicd->clist);
110 return apicd;
111 }
112
113 static void free_apic_chip_data(struct apic_chip_data *apicd)
114 {
115 kfree(apicd);
116 }
117
118 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
119 unsigned int cpu)
120 {
121 struct apic_chip_data *apicd = apic_chip_data(irqd);
122
123 lockdep_assert_held(&vector_lock);
124
125 apicd->hw_irq_cfg.vector = vector;
126 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
127 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
128 trace_vector_config(irqd->irq, vector, cpu,
129 apicd->hw_irq_cfg.dest_apicid);
130 }
131
132 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
133 unsigned int newcpu)
134 {
135 struct apic_chip_data *apicd = apic_chip_data(irqd);
136 struct irq_desc *desc = irq_data_to_desc(irqd);
137
138 lockdep_assert_held(&vector_lock);
139
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
141 apicd->cpu);
142
143 /* Setup the vector move, if required */
144 if (apicd->vector && cpu_online(apicd->cpu)) {
145 apicd->move_in_progress = true;
146 apicd->prev_vector = apicd->vector;
147 apicd->prev_cpu = apicd->cpu;
148 } else {
149 apicd->prev_vector = 0;
150 }
151
152 apicd->vector = newvec;
153 apicd->cpu = newcpu;
154 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
155 per_cpu(vector_irq, newcpu)[newvec] = desc;
156 }
157
158 static void vector_assign_managed_shutdown(struct irq_data *irqd)
159 {
160 unsigned int cpu = cpumask_first(cpu_online_mask);
161
162 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
163 }
164
165 static int reserve_managed_vector(struct irq_data *irqd)
166 {
167 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
168 struct apic_chip_data *apicd = apic_chip_data(irqd);
169 unsigned long flags;
170 int ret;
171
172 raw_spin_lock_irqsave(&vector_lock, flags);
173 apicd->is_managed = true;
174 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
175 raw_spin_unlock_irqrestore(&vector_lock, flags);
176 trace_vector_reserve_managed(irqd->irq, ret);
177 return ret;
178 }
179
180 static void reserve_irq_vector_locked(struct irq_data *irqd)
181 {
182 struct apic_chip_data *apicd = apic_chip_data(irqd);
183
184 irq_matrix_reserve(vector_matrix);
185 apicd->can_reserve = true;
186 apicd->has_reserved = true;
187 trace_vector_reserve(irqd->irq, 0);
188 vector_assign_managed_shutdown(irqd);
189 }
190
191 static int reserve_irq_vector(struct irq_data *irqd)
192 {
193 unsigned long flags;
194
195 raw_spin_lock_irqsave(&vector_lock, flags);
196 reserve_irq_vector_locked(irqd);
197 raw_spin_unlock_irqrestore(&vector_lock, flags);
198 return 0;
199 }
200
201 static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
202 {
203 struct apic_chip_data *apicd = apic_chip_data(irqd);
204 bool resvd = apicd->has_reserved;
205 unsigned int cpu = apicd->cpu;
206 int vector = apicd->vector;
207
208 lockdep_assert_held(&vector_lock);
209
210 /*
211 * If the current target CPU is online and in the new requested
212 * affinity mask, there is no point in moving the interrupt from
213 * one CPU to another.
214 */
215 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
216 return 0;
217
218 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
219 if (vector > 0)
220 apic_update_vector(irqd, vector, cpu);
221 trace_vector_alloc(irqd->irq, vector, resvd, vector);
222 return vector;
223 }
224
225 static int assign_vector_locked(struct irq_data *irqd,
226 const struct cpumask *dest)
227 {
228 struct apic_chip_data *apicd = apic_chip_data(irqd);
229 int vector = allocate_vector(irqd, dest);
230
231 if (vector < 0)
232 return vector;
233
234 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
235 return 0;
236 }
237
238 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
239 {
240 unsigned long flags;
241 int ret;
242
243 raw_spin_lock_irqsave(&vector_lock, flags);
244 cpumask_and(vector_searchmask, dest, cpu_online_mask);
245 ret = assign_vector_locked(irqd, vector_searchmask);
246 raw_spin_unlock_irqrestore(&vector_lock, flags);
247 return ret;
248 }
249
250 static int assign_irq_vector_any_locked(struct irq_data *irqd)
251 {
252 /* Get the affinity mask - either irq_default_affinity or (user) set */
253 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
254 int node = irq_data_get_node(irqd);
255
256 if (node == NUMA_NO_NODE)
257 goto all;
258 /* Try the intersection of @affmsk and node mask */
259 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
260 if (!assign_vector_locked(irqd, vector_searchmask))
261 return 0;
262 /* Try the node mask */
263 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
264 return 0;
265 all:
266 /* Try the full affinity mask */
267 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
268 if (!assign_vector_locked(irqd, vector_searchmask))
269 return 0;
270 /* Try the full online mask */
271 return assign_vector_locked(irqd, cpu_online_mask);
272 }
273
274 static int
275 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
276 {
277 if (irqd_affinity_is_managed(irqd))
278 return reserve_managed_vector(irqd);
279 if (info->mask)
280 return assign_irq_vector(irqd, info->mask);
281 /*
282 * Make only a global reservation with no guarantee. A real vector
283 * is associated at activation time.
284 */
285 return reserve_irq_vector(irqd);
286 }
287
288 static int
289 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
290 {
291 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
292 struct apic_chip_data *apicd = apic_chip_data(irqd);
293 int vector, cpu;
294
295 cpumask_and(vector_searchmask, vector_searchmask, affmsk);
296 cpu = cpumask_first(vector_searchmask);
297 if (cpu >= nr_cpu_ids)
298 return -EINVAL;
299 /* set_affinity might call here for nothing */
300 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
301 return 0;
302 vector = irq_matrix_alloc_managed(vector_matrix, cpu);
303 trace_vector_alloc_managed(irqd->irq, vector, vector);
304 if (vector < 0)
305 return vector;
306 apic_update_vector(irqd, vector, cpu);
307 apic_update_irq_cfg(irqd, vector, cpu);
308 return 0;
309 }
310
311 static void clear_irq_vector(struct irq_data *irqd)
312 {
313 struct apic_chip_data *apicd = apic_chip_data(irqd);
314 bool managed = irqd_affinity_is_managed(irqd);
315 unsigned int vector = apicd->vector;
316
317 lockdep_assert_held(&vector_lock);
318
319 if (!vector)
320 return;
321
322 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
323 apicd->prev_cpu);
324
325 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
326 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
327 apicd->vector = 0;
328
329 /* Clean up move in progress */
330 vector = apicd->prev_vector;
331 if (!vector)
332 return;
333
334 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
335 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
336 apicd->prev_vector = 0;
337 apicd->move_in_progress = 0;
338 hlist_del_init(&apicd->clist);
339 }
340
341 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
342 {
343 struct apic_chip_data *apicd = apic_chip_data(irqd);
344 unsigned long flags;
345
346 trace_vector_deactivate(irqd->irq, apicd->is_managed,
347 apicd->can_reserve, false);
348
349 /* Regular fixed assigned interrupt */
350 if (!apicd->is_managed && !apicd->can_reserve)
351 return;
352 /* If the interrupt has a global reservation, nothing to do */
353 if (apicd->has_reserved)
354 return;
355
356 raw_spin_lock_irqsave(&vector_lock, flags);
357 clear_irq_vector(irqd);
358 if (apicd->can_reserve)
359 reserve_irq_vector_locked(irqd);
360 else
361 vector_assign_managed_shutdown(irqd);
362 raw_spin_unlock_irqrestore(&vector_lock, flags);
363 }
364
365 static int activate_reserved(struct irq_data *irqd)
366 {
367 struct apic_chip_data *apicd = apic_chip_data(irqd);
368 int ret;
369
370 ret = assign_irq_vector_any_locked(irqd);
371 if (!ret)
372 apicd->has_reserved = false;
373 return ret;
374 }
375
376 static int activate_managed(struct irq_data *irqd)
377 {
378 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
379 int ret;
380
381 cpumask_and(vector_searchmask, dest, cpu_online_mask);
382 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
383 /* Something in the core code broke! Survive gracefully */
384 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
385 return EINVAL;
386 }
387
388 ret = assign_managed_vector(irqd, vector_searchmask);
389 /*
390 * This should not happen. The vector reservation got buggered. Handle
391 * it gracefully.
392 */
393 if (WARN_ON_ONCE(ret < 0)) {
394 pr_err("Managed startup irq %u, no vector available\n",
395 irqd->irq);
396 }
397 return ret;
398 }
399
400 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
401 bool early)
402 {
403 struct apic_chip_data *apicd = apic_chip_data(irqd);
404 unsigned long flags;
405 int ret = 0;
406
407 trace_vector_activate(irqd->irq, apicd->is_managed,
408 apicd->can_reserve, early);
409
410 /* Nothing to do for fixed assigned vectors */
411 if (!apicd->can_reserve && !apicd->is_managed)
412 return 0;
413
414 raw_spin_lock_irqsave(&vector_lock, flags);
415 if (early || irqd_is_managed_and_shutdown(irqd))
416 vector_assign_managed_shutdown(irqd);
417 else if (apicd->is_managed)
418 ret = activate_managed(irqd);
419 else if (apicd->has_reserved)
420 ret = activate_reserved(irqd);
421 raw_spin_unlock_irqrestore(&vector_lock, flags);
422 return ret;
423 }
424
425 static void vector_free_reserved_and_managed(struct irq_data *irqd)
426 {
427 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
428 struct apic_chip_data *apicd = apic_chip_data(irqd);
429
430 trace_vector_teardown(irqd->irq, apicd->is_managed,
431 apicd->has_reserved);
432
433 if (apicd->has_reserved)
434 irq_matrix_remove_reserved(vector_matrix);
435 if (apicd->is_managed)
436 irq_matrix_remove_managed(vector_matrix, dest);
437 }
438
439 static void x86_vector_free_irqs(struct irq_domain *domain,
440 unsigned int virq, unsigned int nr_irqs)
441 {
442 struct apic_chip_data *apicd;
443 struct irq_data *irqd;
444 unsigned long flags;
445 int i;
446
447 for (i = 0; i < nr_irqs; i++) {
448 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
449 if (irqd && irqd->chip_data) {
450 raw_spin_lock_irqsave(&vector_lock, flags);
451 clear_irq_vector(irqd);
452 vector_free_reserved_and_managed(irqd);
453 apicd = irqd->chip_data;
454 irq_domain_reset_irq_data(irqd);
455 raw_spin_unlock_irqrestore(&vector_lock, flags);
456 free_apic_chip_data(apicd);
457 }
458 }
459 }
460
461 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
462 struct apic_chip_data *apicd)
463 {
464 unsigned long flags;
465 bool realloc = false;
466
467 apicd->vector = ISA_IRQ_VECTOR(virq);
468 apicd->cpu = 0;
469
470 raw_spin_lock_irqsave(&vector_lock, flags);
471 /*
472 * If the interrupt is activated, then it must stay at this vector
473 * position. That's usually the timer interrupt (0).
474 */
475 if (irqd_is_activated(irqd)) {
476 trace_vector_setup(virq, true, 0);
477 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
478 } else {
479 /* Release the vector */
480 apicd->can_reserve = true;
481 clear_irq_vector(irqd);
482 realloc = true;
483 }
484 raw_spin_unlock_irqrestore(&vector_lock, flags);
485 return realloc;
486 }
487
488 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
489 unsigned int nr_irqs, void *arg)
490 {
491 struct irq_alloc_info *info = arg;
492 struct apic_chip_data *apicd;
493 struct irq_data *irqd;
494 int i, err, node;
495
496 if (disable_apic)
497 return -ENXIO;
498
499 /* Currently vector allocator can't guarantee contiguous allocations */
500 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
501 return -ENOSYS;
502
503 for (i = 0; i < nr_irqs; i++) {
504 irqd = irq_domain_get_irq_data(domain, virq + i);
505 BUG_ON(!irqd);
506 node = irq_data_get_node(irqd);
507 WARN_ON_ONCE(irqd->chip_data);
508 apicd = alloc_apic_chip_data(node);
509 if (!apicd) {
510 err = -ENOMEM;
511 goto error;
512 }
513
514 apicd->irq = virq + i;
515 irqd->chip = &lapic_controller;
516 irqd->chip_data = apicd;
517 irqd->hwirq = virq + i;
518 irqd_set_single_target(irqd);
519 /*
520 * Legacy vectors are already assigned when the IOAPIC
521 * takes them over. They stay on the same vector. This is
522 * required for check_timer() to work correctly as it might
523 * switch back to legacy mode. Only update the hardware
524 * config.
525 */
526 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
527 if (!vector_configure_legacy(virq + i, irqd, apicd))
528 continue;
529 }
530
531 err = assign_irq_vector_policy(irqd, info);
532 trace_vector_setup(virq + i, false, err);
533 if (err)
534 goto error;
535 }
536
537 return 0;
538
539 error:
540 x86_vector_free_irqs(domain, virq, i + 1);
541 return err;
542 }
543
544 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
545 void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
546 struct irq_data *irqd, int ind)
547 {
548 unsigned int cpu, vector, prev_cpu, prev_vector;
549 struct apic_chip_data *apicd;
550 unsigned long flags;
551 int irq;
552
553 if (!irqd) {
554 irq_matrix_debug_show(m, vector_matrix, ind);
555 return;
556 }
557
558 irq = irqd->irq;
559 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
560 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
561 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
562 return;
563 }
564
565 apicd = irqd->chip_data;
566 if (!apicd) {
567 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
568 return;
569 }
570
571 raw_spin_lock_irqsave(&vector_lock, flags);
572 cpu = apicd->cpu;
573 vector = apicd->vector;
574 prev_cpu = apicd->prev_cpu;
575 prev_vector = apicd->prev_vector;
576 raw_spin_unlock_irqrestore(&vector_lock, flags);
577 seq_printf(m, "%*sVector: %5u\n", ind, "", vector);
578 seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu);
579 if (prev_vector) {
580 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector);
581 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu);
582 }
583 }
584 #endif
585
586 static const struct irq_domain_ops x86_vector_domain_ops = {
587 .alloc = x86_vector_alloc_irqs,
588 .free = x86_vector_free_irqs,
589 .activate = x86_vector_activate,
590 .deactivate = x86_vector_deactivate,
591 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
592 .debug_show = x86_vector_debug_show,
593 #endif
594 };
595
596 int __init arch_probe_nr_irqs(void)
597 {
598 int nr;
599
600 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
601 nr_irqs = NR_VECTORS * nr_cpu_ids;
602
603 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
604 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
605 /*
606 * for MSI and HT dyn irq
607 */
608 if (gsi_top <= NR_IRQS_LEGACY)
609 nr += 8 * nr_cpu_ids;
610 else
611 nr += gsi_top * 16;
612 #endif
613 if (nr < nr_irqs)
614 nr_irqs = nr;
615
616 /*
617 * We don't know if PIC is present at this point so we need to do
618 * probe() to get the right number of legacy IRQs.
619 */
620 return legacy_pic->probe();
621 }
622
623 void lapic_assign_legacy_vector(unsigned int irq, bool replace)
624 {
625 /*
626 * Use assign system here so it wont get accounted as allocated
627 * and moveable in the cpu hotplug check and it prevents managed
628 * irq reservation from touching it.
629 */
630 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
631 }
632
633 void __init lapic_assign_system_vectors(void)
634 {
635 unsigned int i, vector = 0;
636
637 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
638 irq_matrix_assign_system(vector_matrix, vector, false);
639
640 if (nr_legacy_irqs() > 1)
641 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
642
643 /* System vectors are reserved, online it */
644 irq_matrix_online(vector_matrix);
645
646 /* Mark the preallocated legacy interrupts */
647 for (i = 0; i < nr_legacy_irqs(); i++) {
648 if (i != PIC_CASCADE_IR)
649 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
650 }
651 }
652
653 int __init arch_early_irq_init(void)
654 {
655 struct fwnode_handle *fn;
656
657 fn = irq_domain_alloc_named_fwnode("VECTOR");
658 BUG_ON(!fn);
659 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
660 NULL);
661 BUG_ON(x86_vector_domain == NULL);
662 irq_domain_free_fwnode(fn);
663 irq_set_default_host(x86_vector_domain);
664
665 arch_init_msi_domain(x86_vector_domain);
666 arch_init_htirq_domain(x86_vector_domain);
667
668 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
669
670 /*
671 * Allocate the vector matrix allocator data structure and limit the
672 * search area.
673 */
674 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
675 FIRST_SYSTEM_VECTOR);
676 BUG_ON(!vector_matrix);
677
678 return arch_early_ioapic_init();
679 }
680
681 #ifdef CONFIG_SMP
682
683 static struct irq_desc *__setup_vector_irq(int vector)
684 {
685 int isairq = vector - ISA_IRQ_VECTOR(0);
686
687 /* Check whether the irq is in the legacy space */
688 if (isairq < 0 || isairq >= nr_legacy_irqs())
689 return VECTOR_UNUSED;
690 /* Check whether the irq is handled by the IOAPIC */
691 if (test_bit(isairq, &io_apic_irqs))
692 return VECTOR_UNUSED;
693 return irq_to_desc(isairq);
694 }
695
696 /* Online the local APIC infrastructure and initialize the vectors */
697 void lapic_online(void)
698 {
699 unsigned int vector;
700
701 lockdep_assert_held(&vector_lock);
702
703 /* Online the vector matrix array for this CPU */
704 irq_matrix_online(vector_matrix);
705
706 /*
707 * The interrupt affinity logic never targets interrupts to offline
708 * CPUs. The exception are the legacy PIC interrupts. In general
709 * they are only targeted to CPU0, but depending on the platform
710 * they can be distributed to any online CPU in hardware. The
711 * kernel has no influence on that. So all active legacy vectors
712 * must be installed on all CPUs. All non legacy interrupts can be
713 * cleared.
714 */
715 for (vector = 0; vector < NR_VECTORS; vector++)
716 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
717 }
718
719 void lapic_offline(void)
720 {
721 lock_vector_lock();
722 irq_matrix_offline(vector_matrix);
723 unlock_vector_lock();
724 }
725
726 static int apic_set_affinity(struct irq_data *irqd,
727 const struct cpumask *dest, bool force)
728 {
729 struct apic_chip_data *apicd = apic_chip_data(irqd);
730 int err;
731
732 /*
733 * Core code can call here for inactive interrupts. For inactive
734 * interrupts which use managed or reservation mode there is no
735 * point in going through the vector assignment right now as the
736 * activation will assign a vector which fits the destination
737 * cpumask. Let the core code store the destination mask and be
738 * done with it.
739 */
740 if (!irqd_is_activated(irqd) &&
741 (apicd->is_managed || apicd->can_reserve))
742 return IRQ_SET_MASK_OK;
743
744 raw_spin_lock(&vector_lock);
745 cpumask_and(vector_searchmask, dest, cpu_online_mask);
746 if (irqd_affinity_is_managed(irqd))
747 err = assign_managed_vector(irqd, vector_searchmask);
748 else
749 err = assign_vector_locked(irqd, vector_searchmask);
750 raw_spin_unlock(&vector_lock);
751 return err ? err : IRQ_SET_MASK_OK;
752 }
753
754 #else
755 # define apic_set_affinity NULL
756 #endif
757
758 static int apic_retrigger_irq(struct irq_data *irqd)
759 {
760 struct apic_chip_data *apicd = apic_chip_data(irqd);
761 unsigned long flags;
762
763 raw_spin_lock_irqsave(&vector_lock, flags);
764 apic->send_IPI(apicd->cpu, apicd->vector);
765 raw_spin_unlock_irqrestore(&vector_lock, flags);
766
767 return 1;
768 }
769
770 void apic_ack_edge(struct irq_data *irqd)
771 {
772 irq_complete_move(irqd_cfg(irqd));
773 irq_move_irq(irqd);
774 ack_APIC_irq();
775 }
776
777 static struct irq_chip lapic_controller = {
778 .name = "APIC",
779 .irq_ack = apic_ack_edge,
780 .irq_set_affinity = apic_set_affinity,
781 .irq_retrigger = apic_retrigger_irq,
782 };
783
784 #ifdef CONFIG_SMP
785
786 static void free_moved_vector(struct apic_chip_data *apicd)
787 {
788 unsigned int vector = apicd->prev_vector;
789 unsigned int cpu = apicd->prev_cpu;
790 bool managed = apicd->is_managed;
791
792 /*
793 * This should never happen. Managed interrupts are not
794 * migrated except on CPU down, which does not involve the
795 * cleanup vector. But try to keep the accounting correct
796 * nevertheless.
797 */
798 WARN_ON_ONCE(managed);
799
800 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
801 irq_matrix_free(vector_matrix, cpu, vector, managed);
802 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
803 hlist_del_init(&apicd->clist);
804 apicd->prev_vector = 0;
805 apicd->move_in_progress = 0;
806 }
807
808 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
809 {
810 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
811 struct apic_chip_data *apicd;
812 struct hlist_node *tmp;
813
814 entering_ack_irq();
815 /* Prevent vectors vanishing under us */
816 raw_spin_lock(&vector_lock);
817
818 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
819 unsigned int irr, vector = apicd->prev_vector;
820
821 /*
822 * Paranoia: Check if the vector that needs to be cleaned
823 * up is registered at the APICs IRR. If so, then this is
824 * not the best time to clean it up. Clean it up in the
825 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
826 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
827 * priority external vector, so on return from this
828 * interrupt the device interrupt will happen first.
829 */
830 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
831 if (irr & (1U << (vector % 32))) {
832 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
833 continue;
834 }
835 free_moved_vector(apicd);
836 }
837
838 raw_spin_unlock(&vector_lock);
839 exiting_irq();
840 }
841
842 static void __send_cleanup_vector(struct apic_chip_data *apicd)
843 {
844 unsigned int cpu;
845
846 raw_spin_lock(&vector_lock);
847 apicd->move_in_progress = 0;
848 cpu = apicd->prev_cpu;
849 if (cpu_online(cpu)) {
850 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
851 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
852 } else {
853 apicd->prev_vector = 0;
854 }
855 raw_spin_unlock(&vector_lock);
856 }
857
858 void send_cleanup_vector(struct irq_cfg *cfg)
859 {
860 struct apic_chip_data *apicd;
861
862 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
863 if (apicd->move_in_progress)
864 __send_cleanup_vector(apicd);
865 }
866
867 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
868 {
869 struct apic_chip_data *apicd;
870
871 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
872 if (likely(!apicd->move_in_progress))
873 return;
874
875 if (vector == apicd->vector && apicd->cpu == smp_processor_id())
876 __send_cleanup_vector(apicd);
877 }
878
879 void irq_complete_move(struct irq_cfg *cfg)
880 {
881 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
882 }
883
884 /*
885 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
886 */
887 void irq_force_complete_move(struct irq_desc *desc)
888 {
889 struct apic_chip_data *apicd;
890 struct irq_data *irqd;
891 unsigned int vector;
892
893 /*
894 * The function is called for all descriptors regardless of which
895 * irqdomain they belong to. For example if an IRQ is provided by
896 * an irq_chip as part of a GPIO driver, the chip data for that
897 * descriptor is specific to the irq_chip in question.
898 *
899 * Check first that the chip_data is what we expect
900 * (apic_chip_data) before touching it any further.
901 */
902 irqd = irq_domain_get_irq_data(x86_vector_domain,
903 irq_desc_get_irq(desc));
904 if (!irqd)
905 return;
906
907 raw_spin_lock(&vector_lock);
908 apicd = apic_chip_data(irqd);
909 if (!apicd)
910 goto unlock;
911
912 /*
913 * If prev_vector is empty, no action required.
914 */
915 vector = apicd->prev_vector;
916 if (!vector)
917 goto unlock;
918
919 /*
920 * This is tricky. If the cleanup of the old vector has not been
921 * done yet, then the following setaffinity call will fail with
922 * -EBUSY. This can leave the interrupt in a stale state.
923 *
924 * All CPUs are stuck in stop machine with interrupts disabled so
925 * calling __irq_complete_move() would be completely pointless.
926 *
927 * 1) The interrupt is in move_in_progress state. That means that we
928 * have not seen an interrupt since the io_apic was reprogrammed to
929 * the new vector.
930 *
931 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
932 * have not been processed yet.
933 */
934 if (apicd->move_in_progress) {
935 /*
936 * In theory there is a race:
937 *
938 * set_ioapic(new_vector) <-- Interrupt is raised before update
939 * is effective, i.e. it's raised on
940 * the old vector.
941 *
942 * So if the target cpu cannot handle that interrupt before
943 * the old vector is cleaned up, we get a spurious interrupt
944 * and in the worst case the ioapic irq line becomes stale.
945 *
946 * But in case of cpu hotplug this should be a non issue
947 * because if the affinity update happens right before all
948 * cpus rendevouz in stop machine, there is no way that the
949 * interrupt can be blocked on the target cpu because all cpus
950 * loops first with interrupts enabled in stop machine, so the
951 * old vector is not yet cleaned up when the interrupt fires.
952 *
953 * So the only way to run into this issue is if the delivery
954 * of the interrupt on the apic/system bus would be delayed
955 * beyond the point where the target cpu disables interrupts
956 * in stop machine. I doubt that it can happen, but at least
957 * there is a theroretical chance. Virtualization might be
958 * able to expose this, but AFAICT the IOAPIC emulation is not
959 * as stupid as the real hardware.
960 *
961 * Anyway, there is nothing we can do about that at this point
962 * w/o refactoring the whole fixup_irq() business completely.
963 * We print at least the irq number and the old vector number,
964 * so we have the necessary information when a problem in that
965 * area arises.
966 */
967 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
968 irqd->irq, vector);
969 }
970 free_moved_vector(apicd);
971 unlock:
972 raw_spin_unlock(&vector_lock);
973 }
974
975 #ifdef CONFIG_HOTPLUG_CPU
976 /*
977 * Note, this is not accurate accounting, but at least good enough to
978 * prevent that the actual interrupt move will run out of vectors.
979 */
980 int lapic_can_unplug_cpu(void)
981 {
982 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
983 int ret = 0;
984
985 raw_spin_lock(&vector_lock);
986 tomove = irq_matrix_allocated(vector_matrix);
987 avl = irq_matrix_available(vector_matrix, true);
988 if (avl < tomove) {
989 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
990 cpu, tomove, avl);
991 ret = -ENOSPC;
992 goto out;
993 }
994 rsvd = irq_matrix_reserved(vector_matrix);
995 if (avl < rsvd) {
996 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
997 rsvd, avl);
998 }
999 out:
1000 raw_spin_unlock(&vector_lock);
1001 return ret;
1002 }
1003 #endif /* HOTPLUG_CPU */
1004 #endif /* SMP */
1005
1006 static void __init print_APIC_field(int base)
1007 {
1008 int i;
1009
1010 printk(KERN_DEBUG);
1011
1012 for (i = 0; i < 8; i++)
1013 pr_cont("%08x", apic_read(base + i*0x10));
1014
1015 pr_cont("\n");
1016 }
1017
1018 static void __init print_local_APIC(void *dummy)
1019 {
1020 unsigned int i, v, ver, maxlvt;
1021 u64 icr;
1022
1023 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1024 smp_processor_id(), hard_smp_processor_id());
1025 v = apic_read(APIC_ID);
1026 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1027 v = apic_read(APIC_LVR);
1028 pr_info("... APIC VERSION: %08x\n", v);
1029 ver = GET_APIC_VERSION(v);
1030 maxlvt = lapic_get_maxlvt();
1031
1032 v = apic_read(APIC_TASKPRI);
1033 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1034
1035 /* !82489DX */
1036 if (APIC_INTEGRATED(ver)) {
1037 if (!APIC_XAPIC(ver)) {
1038 v = apic_read(APIC_ARBPRI);
1039 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1040 v, v & APIC_ARBPRI_MASK);
1041 }
1042 v = apic_read(APIC_PROCPRI);
1043 pr_debug("... APIC PROCPRI: %08x\n", v);
1044 }
1045
1046 /*
1047 * Remote read supported only in the 82489DX and local APIC for
1048 * Pentium processors.
1049 */
1050 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1051 v = apic_read(APIC_RRR);
1052 pr_debug("... APIC RRR: %08x\n", v);
1053 }
1054
1055 v = apic_read(APIC_LDR);
1056 pr_debug("... APIC LDR: %08x\n", v);
1057 if (!x2apic_enabled()) {
1058 v = apic_read(APIC_DFR);
1059 pr_debug("... APIC DFR: %08x\n", v);
1060 }
1061 v = apic_read(APIC_SPIV);
1062 pr_debug("... APIC SPIV: %08x\n", v);
1063
1064 pr_debug("... APIC ISR field:\n");
1065 print_APIC_field(APIC_ISR);
1066 pr_debug("... APIC TMR field:\n");
1067 print_APIC_field(APIC_TMR);
1068 pr_debug("... APIC IRR field:\n");
1069 print_APIC_field(APIC_IRR);
1070
1071 /* !82489DX */
1072 if (APIC_INTEGRATED(ver)) {
1073 /* Due to the Pentium erratum 3AP. */
1074 if (maxlvt > 3)
1075 apic_write(APIC_ESR, 0);
1076
1077 v = apic_read(APIC_ESR);
1078 pr_debug("... APIC ESR: %08x\n", v);
1079 }
1080
1081 icr = apic_icr_read();
1082 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1083 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1084
1085 v = apic_read(APIC_LVTT);
1086 pr_debug("... APIC LVTT: %08x\n", v);
1087
1088 if (maxlvt > 3) {
1089 /* PC is LVT#4. */
1090 v = apic_read(APIC_LVTPC);
1091 pr_debug("... APIC LVTPC: %08x\n", v);
1092 }
1093 v = apic_read(APIC_LVT0);
1094 pr_debug("... APIC LVT0: %08x\n", v);
1095 v = apic_read(APIC_LVT1);
1096 pr_debug("... APIC LVT1: %08x\n", v);
1097
1098 if (maxlvt > 2) {
1099 /* ERR is LVT#3. */
1100 v = apic_read(APIC_LVTERR);
1101 pr_debug("... APIC LVTERR: %08x\n", v);
1102 }
1103
1104 v = apic_read(APIC_TMICT);
1105 pr_debug("... APIC TMICT: %08x\n", v);
1106 v = apic_read(APIC_TMCCT);
1107 pr_debug("... APIC TMCCT: %08x\n", v);
1108 v = apic_read(APIC_TDCR);
1109 pr_debug("... APIC TDCR: %08x\n", v);
1110
1111 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1112 v = apic_read(APIC_EFEAT);
1113 maxlvt = (v >> 16) & 0xff;
1114 pr_debug("... APIC EFEAT: %08x\n", v);
1115 v = apic_read(APIC_ECTRL);
1116 pr_debug("... APIC ECTRL: %08x\n", v);
1117 for (i = 0; i < maxlvt; i++) {
1118 v = apic_read(APIC_EILVTn(i));
1119 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1120 }
1121 }
1122 pr_cont("\n");
1123 }
1124
1125 static void __init print_local_APICs(int maxcpu)
1126 {
1127 int cpu;
1128
1129 if (!maxcpu)
1130 return;
1131
1132 preempt_disable();
1133 for_each_online_cpu(cpu) {
1134 if (cpu >= maxcpu)
1135 break;
1136 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1137 }
1138 preempt_enable();
1139 }
1140
1141 static void __init print_PIC(void)
1142 {
1143 unsigned int v;
1144 unsigned long flags;
1145
1146 if (!nr_legacy_irqs())
1147 return;
1148
1149 pr_debug("\nprinting PIC contents\n");
1150
1151 raw_spin_lock_irqsave(&i8259A_lock, flags);
1152
1153 v = inb(0xa1) << 8 | inb(0x21);
1154 pr_debug("... PIC IMR: %04x\n", v);
1155
1156 v = inb(0xa0) << 8 | inb(0x20);
1157 pr_debug("... PIC IRR: %04x\n", v);
1158
1159 outb(0x0b, 0xa0);
1160 outb(0x0b, 0x20);
1161 v = inb(0xa0) << 8 | inb(0x20);
1162 outb(0x0a, 0xa0);
1163 outb(0x0a, 0x20);
1164
1165 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1166
1167 pr_debug("... PIC ISR: %04x\n", v);
1168
1169 v = inb(0x4d1) << 8 | inb(0x4d0);
1170 pr_debug("... PIC ELCR: %04x\n", v);
1171 }
1172
1173 static int show_lapic __initdata = 1;
1174 static __init int setup_show_lapic(char *arg)
1175 {
1176 int num = -1;
1177
1178 if (strcmp(arg, "all") == 0) {
1179 show_lapic = CONFIG_NR_CPUS;
1180 } else {
1181 get_option(&arg, &num);
1182 if (num >= 0)
1183 show_lapic = num;
1184 }
1185
1186 return 1;
1187 }
1188 __setup("show_lapic=", setup_show_lapic);
1189
1190 static int __init print_ICs(void)
1191 {
1192 if (apic_verbosity == APIC_QUIET)
1193 return 0;
1194
1195 print_PIC();
1196
1197 /* don't print out if apic is not there */
1198 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1199 return 0;
1200
1201 print_local_APICs(show_lapic);
1202 print_IO_APICs();
1203
1204 return 0;
1205 }
1206
1207 late_initcall(print_ICs);