]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/hfi1/affinity.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / hfi1 / affinity.c
1 /*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47 #include <linux/topology.h>
48 #include <linux/cpumask.h>
49 #include <linux/module.h>
50 #include <linux/interrupt.h>
51
52 #include "hfi.h"
53 #include "affinity.h"
54 #include "sdma.h"
55 #include "trace.h"
56
57 struct hfi1_affinity_node_list node_affinity = {
58 .list = LIST_HEAD_INIT(node_affinity.list),
59 .lock = __MUTEX_INITIALIZER(node_affinity.lock)
60 };
61
62 /* Name of IRQ types, indexed by enum irq_type */
63 static const char * const irq_type_names[] = {
64 "SDMA",
65 "RCVCTXT",
66 "GENERAL",
67 "OTHER",
68 };
69
70 /* Per NUMA node count of HFI devices */
71 static unsigned int *hfi1_per_node_cntr;
72
73 static inline void init_cpu_mask_set(struct cpu_mask_set *set)
74 {
75 cpumask_clear(&set->mask);
76 cpumask_clear(&set->used);
77 set->gen = 0;
78 }
79
80 /* Initialize non-HT cpu cores mask */
81 void init_real_cpu_mask(void)
82 {
83 int possible, curr_cpu, i, ht;
84
85 cpumask_clear(&node_affinity.real_cpu_mask);
86
87 /* Start with cpu online mask as the real cpu mask */
88 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
89
90 /*
91 * Remove HT cores from the real cpu mask. Do this in two steps below.
92 */
93 possible = cpumask_weight(&node_affinity.real_cpu_mask);
94 ht = cpumask_weight(topology_sibling_cpumask(
95 cpumask_first(&node_affinity.real_cpu_mask)));
96 /*
97 * Step 1. Skip over the first N HT siblings and use them as the
98 * "real" cores. Assumes that HT cores are not enumerated in
99 * succession (except in the single core case).
100 */
101 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
102 for (i = 0; i < possible / ht; i++)
103 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
104 /*
105 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
106 * skip any gaps.
107 */
108 for (; i < possible; i++) {
109 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
110 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
111 }
112 }
113
114 int node_affinity_init(void)
115 {
116 int node;
117 struct pci_dev *dev = NULL;
118 const struct pci_device_id *ids = hfi1_pci_tbl;
119
120 cpumask_clear(&node_affinity.proc.used);
121 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
122
123 node_affinity.proc.gen = 0;
124 node_affinity.num_core_siblings =
125 cpumask_weight(topology_sibling_cpumask(
126 cpumask_first(&node_affinity.proc.mask)
127 ));
128 node_affinity.num_possible_nodes = num_possible_nodes();
129 node_affinity.num_online_nodes = num_online_nodes();
130 node_affinity.num_online_cpus = num_online_cpus();
131
132 /*
133 * The real cpu mask is part of the affinity struct but it has to be
134 * initialized early. It is needed to calculate the number of user
135 * contexts in set_up_context_variables().
136 */
137 init_real_cpu_mask();
138
139 hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
140 sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
141 if (!hfi1_per_node_cntr)
142 return -ENOMEM;
143
144 while (ids->vendor) {
145 dev = NULL;
146 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
147 node = pcibus_to_node(dev->bus);
148 if (node < 0)
149 node = numa_node_id();
150
151 hfi1_per_node_cntr[node]++;
152 }
153 ids++;
154 }
155
156 return 0;
157 }
158
159 void node_affinity_destroy(void)
160 {
161 struct list_head *pos, *q;
162 struct hfi1_affinity_node *entry;
163
164 mutex_lock(&node_affinity.lock);
165 list_for_each_safe(pos, q, &node_affinity.list) {
166 entry = list_entry(pos, struct hfi1_affinity_node,
167 list);
168 list_del(pos);
169 kfree(entry);
170 }
171 mutex_unlock(&node_affinity.lock);
172 kfree(hfi1_per_node_cntr);
173 }
174
175 static struct hfi1_affinity_node *node_affinity_allocate(int node)
176 {
177 struct hfi1_affinity_node *entry;
178
179 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
180 if (!entry)
181 return NULL;
182 entry->node = node;
183 INIT_LIST_HEAD(&entry->list);
184
185 return entry;
186 }
187
188 /*
189 * It appends an entry to the list.
190 * It *must* be called with node_affinity.lock held.
191 */
192 static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
193 {
194 list_add_tail(&entry->list, &node_affinity.list);
195 }
196
197 /* It must be called with node_affinity.lock held */
198 static struct hfi1_affinity_node *node_affinity_lookup(int node)
199 {
200 struct list_head *pos;
201 struct hfi1_affinity_node *entry;
202
203 list_for_each(pos, &node_affinity.list) {
204 entry = list_entry(pos, struct hfi1_affinity_node, list);
205 if (entry->node == node)
206 return entry;
207 }
208
209 return NULL;
210 }
211
212 /*
213 * Interrupt affinity.
214 *
215 * non-rcv avail gets a default mask that
216 * starts as possible cpus with threads reset
217 * and each rcv avail reset.
218 *
219 * rcv avail gets node relative 1 wrapping back
220 * to the node relative 1 as necessary.
221 *
222 */
223 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
224 {
225 int node = pcibus_to_node(dd->pcidev->bus);
226 struct hfi1_affinity_node *entry;
227 const struct cpumask *local_mask;
228 int curr_cpu, possible, i;
229
230 if (node < 0)
231 node = numa_node_id();
232 dd->node = node;
233
234 local_mask = cpumask_of_node(dd->node);
235 if (cpumask_first(local_mask) >= nr_cpu_ids)
236 local_mask = topology_core_cpumask(0);
237
238 mutex_lock(&node_affinity.lock);
239 entry = node_affinity_lookup(dd->node);
240
241 /*
242 * If this is the first time this NUMA node's affinity is used,
243 * create an entry in the global affinity structure and initialize it.
244 */
245 if (!entry) {
246 entry = node_affinity_allocate(node);
247 if (!entry) {
248 dd_dev_err(dd,
249 "Unable to allocate global affinity node\n");
250 mutex_unlock(&node_affinity.lock);
251 return -ENOMEM;
252 }
253 init_cpu_mask_set(&entry->def_intr);
254 init_cpu_mask_set(&entry->rcv_intr);
255 cpumask_clear(&entry->general_intr_mask);
256 /* Use the "real" cpu mask of this node as the default */
257 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
258 local_mask);
259
260 /* fill in the receive list */
261 possible = cpumask_weight(&entry->def_intr.mask);
262 curr_cpu = cpumask_first(&entry->def_intr.mask);
263
264 if (possible == 1) {
265 /* only one CPU, everyone will use it */
266 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
267 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
268 } else {
269 /*
270 * The general/control context will be the first CPU in
271 * the default list, so it is removed from the default
272 * list and added to the general interrupt list.
273 */
274 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
275 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
276 curr_cpu = cpumask_next(curr_cpu,
277 &entry->def_intr.mask);
278
279 /*
280 * Remove the remaining kernel receive queues from
281 * the default list and add them to the receive list.
282 */
283 for (i = 0;
284 i < (dd->n_krcv_queues - 1) *
285 hfi1_per_node_cntr[dd->node];
286 i++) {
287 cpumask_clear_cpu(curr_cpu,
288 &entry->def_intr.mask);
289 cpumask_set_cpu(curr_cpu,
290 &entry->rcv_intr.mask);
291 curr_cpu = cpumask_next(curr_cpu,
292 &entry->def_intr.mask);
293 if (curr_cpu >= nr_cpu_ids)
294 break;
295 }
296
297 /*
298 * If there ends up being 0 CPU cores leftover for SDMA
299 * engines, use the same CPU cores as general/control
300 * context.
301 */
302 if (cpumask_weight(&entry->def_intr.mask) == 0)
303 cpumask_copy(&entry->def_intr.mask,
304 &entry->general_intr_mask);
305 }
306
307 node_affinity_add_tail(entry);
308 }
309 mutex_unlock(&node_affinity.lock);
310 return 0;
311 }
312
313 /*
314 * Function updates the irq affinity hint for msix after it has been changed
315 * by the user using the /proc/irq interface. This function only accepts
316 * one cpu in the mask.
317 */
318 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
319 {
320 struct sdma_engine *sde = msix->arg;
321 struct hfi1_devdata *dd = sde->dd;
322 struct hfi1_affinity_node *entry;
323 struct cpu_mask_set *set;
324 int i, old_cpu;
325
326 if (cpu > num_online_cpus() || cpu == sde->cpu)
327 return;
328
329 mutex_lock(&node_affinity.lock);
330 entry = node_affinity_lookup(dd->node);
331 if (!entry)
332 goto unlock;
333
334 old_cpu = sde->cpu;
335 sde->cpu = cpu;
336 cpumask_clear(&msix->mask);
337 cpumask_set_cpu(cpu, &msix->mask);
338 dd_dev_dbg(dd, "IRQ vector: %u, type %s engine %u -> cpu: %d\n",
339 msix->msix.vector, irq_type_names[msix->type],
340 sde->this_idx, cpu);
341 irq_set_affinity_hint(msix->msix.vector, &msix->mask);
342
343 /*
344 * Set the new cpu in the hfi1_affinity_node and clean
345 * the old cpu if it is not used by any other IRQ
346 */
347 set = &entry->def_intr;
348 cpumask_set_cpu(cpu, &set->mask);
349 cpumask_set_cpu(cpu, &set->used);
350 for (i = 0; i < dd->num_msix_entries; i++) {
351 struct hfi1_msix_entry *other_msix;
352
353 other_msix = &dd->msix_entries[i];
354 if (other_msix->type != IRQ_SDMA || other_msix == msix)
355 continue;
356
357 if (cpumask_test_cpu(old_cpu, &other_msix->mask))
358 goto unlock;
359 }
360 cpumask_clear_cpu(old_cpu, &set->mask);
361 cpumask_clear_cpu(old_cpu, &set->used);
362 unlock:
363 mutex_unlock(&node_affinity.lock);
364 }
365
366 static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
367 const cpumask_t *mask)
368 {
369 int cpu = cpumask_first(mask);
370 struct hfi1_msix_entry *msix = container_of(notify,
371 struct hfi1_msix_entry,
372 notify);
373
374 /* Only one CPU configuration supported currently */
375 hfi1_update_sdma_affinity(msix, cpu);
376 }
377
378 static void hfi1_irq_notifier_release(struct kref *ref)
379 {
380 /*
381 * This is required by affinity notifier. We don't have anything to
382 * free here.
383 */
384 }
385
386 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
387 {
388 struct irq_affinity_notify *notify = &msix->notify;
389
390 notify->irq = msix->msix.vector;
391 notify->notify = hfi1_irq_notifier_notify;
392 notify->release = hfi1_irq_notifier_release;
393
394 if (irq_set_affinity_notifier(notify->irq, notify))
395 pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
396 notify->irq);
397 }
398
399 static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
400 {
401 struct irq_affinity_notify *notify = &msix->notify;
402
403 if (irq_set_affinity_notifier(notify->irq, NULL))
404 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
405 notify->irq);
406 }
407
408 /*
409 * Function sets the irq affinity for msix.
410 * It *must* be called with node_affinity.lock held.
411 */
412 static int get_irq_affinity(struct hfi1_devdata *dd,
413 struct hfi1_msix_entry *msix)
414 {
415 int ret;
416 cpumask_var_t diff;
417 struct hfi1_affinity_node *entry;
418 struct cpu_mask_set *set = NULL;
419 struct sdma_engine *sde = NULL;
420 struct hfi1_ctxtdata *rcd = NULL;
421 char extra[64];
422 int cpu = -1;
423
424 extra[0] = '\0';
425 cpumask_clear(&msix->mask);
426
427 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
428 if (!ret)
429 return -ENOMEM;
430
431 entry = node_affinity_lookup(dd->node);
432
433 switch (msix->type) {
434 case IRQ_SDMA:
435 sde = (struct sdma_engine *)msix->arg;
436 scnprintf(extra, 64, "engine %u", sde->this_idx);
437 set = &entry->def_intr;
438 break;
439 case IRQ_GENERAL:
440 cpu = cpumask_first(&entry->general_intr_mask);
441 break;
442 case IRQ_RCVCTXT:
443 rcd = (struct hfi1_ctxtdata *)msix->arg;
444 if (rcd->ctxt == HFI1_CTRL_CTXT)
445 cpu = cpumask_first(&entry->general_intr_mask);
446 else
447 set = &entry->rcv_intr;
448 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
449 break;
450 default:
451 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
452 return -EINVAL;
453 }
454
455 /*
456 * The general and control contexts are placed on a particular
457 * CPU, which is set above. Skip accounting for it. Everything else
458 * finds its CPU here.
459 */
460 if (cpu == -1 && set) {
461 if (cpumask_equal(&set->mask, &set->used)) {
462 /*
463 * We've used up all the CPUs, bump up the generation
464 * and reset the 'used' map
465 */
466 set->gen++;
467 cpumask_clear(&set->used);
468 }
469 cpumask_andnot(diff, &set->mask, &set->used);
470 cpu = cpumask_first(diff);
471 cpumask_set_cpu(cpu, &set->used);
472 }
473
474 cpumask_set_cpu(cpu, &msix->mask);
475 dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
476 msix->msix.vector, irq_type_names[msix->type],
477 extra, cpu);
478 irq_set_affinity_hint(msix->msix.vector, &msix->mask);
479
480 if (msix->type == IRQ_SDMA) {
481 sde->cpu = cpu;
482 hfi1_setup_sdma_notifier(msix);
483 }
484
485 free_cpumask_var(diff);
486 return 0;
487 }
488
489 int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
490 {
491 int ret;
492
493 mutex_lock(&node_affinity.lock);
494 ret = get_irq_affinity(dd, msix);
495 mutex_unlock(&node_affinity.lock);
496 return ret;
497 }
498
499 void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
500 struct hfi1_msix_entry *msix)
501 {
502 struct cpu_mask_set *set = NULL;
503 struct hfi1_ctxtdata *rcd;
504 struct hfi1_affinity_node *entry;
505
506 mutex_lock(&node_affinity.lock);
507 entry = node_affinity_lookup(dd->node);
508
509 switch (msix->type) {
510 case IRQ_SDMA:
511 set = &entry->def_intr;
512 hfi1_cleanup_sdma_notifier(msix);
513 break;
514 case IRQ_GENERAL:
515 /* Don't do accounting for general contexts */
516 break;
517 case IRQ_RCVCTXT:
518 rcd = (struct hfi1_ctxtdata *)msix->arg;
519 /* Don't do accounting for control contexts */
520 if (rcd->ctxt != HFI1_CTRL_CTXT)
521 set = &entry->rcv_intr;
522 break;
523 default:
524 mutex_unlock(&node_affinity.lock);
525 return;
526 }
527
528 if (set) {
529 cpumask_andnot(&set->used, &set->used, &msix->mask);
530 if (cpumask_empty(&set->used) && set->gen) {
531 set->gen--;
532 cpumask_copy(&set->used, &set->mask);
533 }
534 }
535
536 irq_set_affinity_hint(msix->msix.vector, NULL);
537 cpumask_clear(&msix->mask);
538 mutex_unlock(&node_affinity.lock);
539 }
540
541 /* This should be called with node_affinity.lock held */
542 static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
543 struct hfi1_affinity_node_list *affinity)
544 {
545 int possible, curr_cpu, i;
546 uint num_cores_per_socket = node_affinity.num_online_cpus /
547 affinity->num_core_siblings /
548 node_affinity.num_online_nodes;
549
550 cpumask_copy(hw_thread_mask, &affinity->proc.mask);
551 if (affinity->num_core_siblings > 0) {
552 /* Removing other siblings not needed for now */
553 possible = cpumask_weight(hw_thread_mask);
554 curr_cpu = cpumask_first(hw_thread_mask);
555 for (i = 0;
556 i < num_cores_per_socket * node_affinity.num_online_nodes;
557 i++)
558 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
559
560 for (; i < possible; i++) {
561 cpumask_clear_cpu(curr_cpu, hw_thread_mask);
562 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
563 }
564
565 /* Identifying correct HW threads within physical cores */
566 cpumask_shift_left(hw_thread_mask, hw_thread_mask,
567 num_cores_per_socket *
568 node_affinity.num_online_nodes *
569 hw_thread_no);
570 }
571 }
572
573 int hfi1_get_proc_affinity(int node)
574 {
575 int cpu = -1, ret, i;
576 struct hfi1_affinity_node *entry;
577 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
578 const struct cpumask *node_mask,
579 *proc_mask = tsk_cpus_allowed(current);
580 struct hfi1_affinity_node_list *affinity = &node_affinity;
581 struct cpu_mask_set *set = &affinity->proc;
582
583 /*
584 * check whether process/context affinity has already
585 * been set
586 */
587 if (cpumask_weight(proc_mask) == 1) {
588 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
589 current->pid, current->comm,
590 cpumask_pr_args(proc_mask));
591 /*
592 * Mark the pre-set CPU as used. This is atomic so we don't
593 * need the lock
594 */
595 cpu = cpumask_first(proc_mask);
596 cpumask_set_cpu(cpu, &set->used);
597 goto done;
598 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
599 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
600 current->pid, current->comm,
601 cpumask_pr_args(proc_mask));
602 goto done;
603 }
604
605 /*
606 * The process does not have a preset CPU affinity so find one to
607 * recommend using the following algorithm:
608 *
609 * For each user process that is opening a context on HFI Y:
610 * a) If all cores are filled, reinitialize the bitmask
611 * b) Fill real cores first, then HT cores (First set of HT
612 * cores on all physical cores, then second set of HT core,
613 * and, so on) in the following order:
614 *
615 * 1. Same NUMA node as HFI Y and not running an IRQ
616 * handler
617 * 2. Same NUMA node as HFI Y and running an IRQ handler
618 * 3. Different NUMA node to HFI Y and not running an IRQ
619 * handler
620 * 4. Different NUMA node to HFI Y and running an IRQ
621 * handler
622 * c) Mark core as filled in the bitmask. As user processes are
623 * done, clear cores from the bitmask.
624 */
625
626 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
627 if (!ret)
628 goto done;
629 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
630 if (!ret)
631 goto free_diff;
632 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
633 if (!ret)
634 goto free_hw_thread_mask;
635 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
636 if (!ret)
637 goto free_available_mask;
638
639 mutex_lock(&affinity->lock);
640 /*
641 * If we've used all available HW threads, clear the mask and start
642 * overloading.
643 */
644 if (cpumask_equal(&set->mask, &set->used)) {
645 set->gen++;
646 cpumask_clear(&set->used);
647 }
648
649 /*
650 * If NUMA node has CPUs used by interrupt handlers, include them in the
651 * interrupt handler mask.
652 */
653 entry = node_affinity_lookup(node);
654 if (entry) {
655 cpumask_copy(intrs_mask, (entry->def_intr.gen ?
656 &entry->def_intr.mask :
657 &entry->def_intr.used));
658 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
659 &entry->rcv_intr.mask :
660 &entry->rcv_intr.used));
661 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
662 }
663 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
664 cpumask_pr_args(intrs_mask));
665
666 cpumask_copy(hw_thread_mask, &set->mask);
667
668 /*
669 * If HT cores are enabled, identify which HW threads within the
670 * physical cores should be used.
671 */
672 if (affinity->num_core_siblings > 0) {
673 for (i = 0; i < affinity->num_core_siblings; i++) {
674 find_hw_thread_mask(i, hw_thread_mask, affinity);
675
676 /*
677 * If there's at least one available core for this HW
678 * thread number, stop looking for a core.
679 *
680 * diff will always be not empty at least once in this
681 * loop as the used mask gets reset when
682 * (set->mask == set->used) before this loop.
683 */
684 cpumask_andnot(diff, hw_thread_mask, &set->used);
685 if (!cpumask_empty(diff))
686 break;
687 }
688 }
689 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
690 cpumask_pr_args(hw_thread_mask));
691
692 node_mask = cpumask_of_node(node);
693 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
694 cpumask_pr_args(node_mask));
695
696 /* Get cpumask of available CPUs on preferred NUMA */
697 cpumask_and(available_mask, hw_thread_mask, node_mask);
698 cpumask_andnot(available_mask, available_mask, &set->used);
699 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
700 cpumask_pr_args(available_mask));
701
702 /*
703 * At first, we don't want to place processes on the same
704 * CPUs as interrupt handlers. Then, CPUs running interrupt
705 * handlers are used.
706 *
707 * 1) If diff is not empty, then there are CPUs not running
708 * non-interrupt handlers available, so diff gets copied
709 * over to available_mask.
710 * 2) If diff is empty, then all CPUs not running interrupt
711 * handlers are taken, so available_mask contains all
712 * available CPUs running interrupt handlers.
713 * 3) If available_mask is empty, then all CPUs on the
714 * preferred NUMA node are taken, so other NUMA nodes are
715 * used for process assignments using the same method as
716 * the preferred NUMA node.
717 */
718 cpumask_andnot(diff, available_mask, intrs_mask);
719 if (!cpumask_empty(diff))
720 cpumask_copy(available_mask, diff);
721
722 /* If we don't have CPUs on the preferred node, use other NUMA nodes */
723 if (cpumask_empty(available_mask)) {
724 cpumask_andnot(available_mask, hw_thread_mask, &set->used);
725 /* Excluding preferred NUMA cores */
726 cpumask_andnot(available_mask, available_mask, node_mask);
727 hfi1_cdbg(PROC,
728 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
729 cpumask_pr_args(available_mask));
730
731 /*
732 * At first, we don't want to place processes on the same
733 * CPUs as interrupt handlers.
734 */
735 cpumask_andnot(diff, available_mask, intrs_mask);
736 if (!cpumask_empty(diff))
737 cpumask_copy(available_mask, diff);
738 }
739 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
740 cpumask_pr_args(available_mask));
741
742 cpu = cpumask_first(available_mask);
743 if (cpu >= nr_cpu_ids) /* empty */
744 cpu = -1;
745 else
746 cpumask_set_cpu(cpu, &set->used);
747
748 mutex_unlock(&affinity->lock);
749 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
750
751 free_cpumask_var(intrs_mask);
752 free_available_mask:
753 free_cpumask_var(available_mask);
754 free_hw_thread_mask:
755 free_cpumask_var(hw_thread_mask);
756 free_diff:
757 free_cpumask_var(diff);
758 done:
759 return cpu;
760 }
761
762 void hfi1_put_proc_affinity(int cpu)
763 {
764 struct hfi1_affinity_node_list *affinity = &node_affinity;
765 struct cpu_mask_set *set = &affinity->proc;
766
767 if (cpu < 0)
768 return;
769
770 mutex_lock(&affinity->lock);
771 cpumask_clear_cpu(cpu, &set->used);
772 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
773 if (cpumask_empty(&set->used) && set->gen) {
774 set->gen--;
775 cpumask_copy(&set->used, &set->mask);
776 }
777 mutex_unlock(&affinity->lock);
778 }