]>
Commit | Line | Data |
---|---|---|
5e385a6e CH |
1 | |
2 | #include <linux/interrupt.h> | |
3 | #include <linux/kernel.h> | |
4 | #include <linux/slab.h> | |
5 | #include <linux/cpu.h> | |
6 | ||
34c3d981 TG |
7 | static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, |
8 | int cpus_per_vec) | |
9 | { | |
10 | const struct cpumask *siblmsk; | |
11 | int cpu, sibl; | |
12 | ||
13 | for ( ; cpus_per_vec > 0; ) { | |
14 | cpu = cpumask_first(nmsk); | |
15 | ||
16 | /* Should not happen, but I'm too lazy to think about it */ | |
17 | if (cpu >= nr_cpu_ids) | |
18 | return; | |
19 | ||
20 | cpumask_clear_cpu(cpu, nmsk); | |
21 | cpumask_set_cpu(cpu, irqmsk); | |
22 | cpus_per_vec--; | |
23 | ||
24 | /* If the cpu has siblings, use them first */ | |
25 | siblmsk = topology_sibling_cpumask(cpu); | |
26 | for (sibl = -1; cpus_per_vec > 0; ) { | |
27 | sibl = cpumask_next(sibl, siblmsk); | |
28 | if (sibl >= nr_cpu_ids) | |
29 | break; | |
30 | if (!cpumask_test_and_clear_cpu(sibl, nmsk)) | |
31 | continue; | |
32 | cpumask_set_cpu(sibl, irqmsk); | |
33 | cpus_per_vec--; | |
34 | } | |
35 | } | |
36 | } | |
37 | ||
38 | static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) | |
39 | { | |
c0af5243 | 40 | int n, nodes = 0; |
34c3d981 TG |
41 | |
42 | /* Calculate the number of nodes in the supplied affinity mask */ | |
c0af5243 | 43 | for_each_online_node(n) { |
34c3d981 TG |
44 | if (cpumask_intersects(mask, cpumask_of_node(n))) { |
45 | node_set(n, *nodemsk); | |
46 | nodes++; | |
47 | } | |
48 | } | |
49 | return nodes; | |
50 | } | |
51 | ||
52 | /** | |
53 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading | |
67c93c21 CH |
54 | * @nvecs: The total number of vectors |
55 | * @affd: Description of the affinity requirements | |
34c3d981 TG |
56 | * |
57 | * Returns the masks pointer or NULL if allocation failed. | |
58 | */ | |
67c93c21 CH |
59 | struct cpumask * |
60 | irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) | |
34c3d981 | 61 | { |
7bf8222b | 62 | int n, nodes, cpus_per_vec, extra_vecs, curvec; |
67c93c21 | 63 | int affv = nvecs - affd->pre_vectors - affd->post_vectors; |
bfe13077 | 64 | int last_affv = affv + affd->pre_vectors; |
34c3d981 TG |
65 | nodemask_t nodemsk = NODE_MASK_NONE; |
66 | struct cpumask *masks; | |
67 | cpumask_var_t nmsk; | |
68 | ||
69 | if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) | |
70 | return NULL; | |
71 | ||
67c93c21 | 72 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); |
34c3d981 TG |
73 | if (!masks) |
74 | goto out; | |
75 | ||
67c93c21 CH |
76 | /* Fill out vectors at the beginning that don't need affinity */ |
77 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) | |
b6e5d5b9 | 78 | cpumask_copy(masks + curvec, irq_default_affinity); |
67c93c21 | 79 | |
34c3d981 TG |
80 | /* Stabilize the cpumasks */ |
81 | get_online_cpus(); | |
67c93c21 | 82 | nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk); |
34c3d981 TG |
83 | |
84 | /* | |
c0af5243 | 85 | * If the number of nodes in the mask is greater than or equal the |
34c3d981 TG |
86 | * number of vectors we just spread the vectors across the nodes. |
87 | */ | |
67c93c21 | 88 | if (affv <= nodes) { |
34c3d981 TG |
89 | for_each_node_mask(n, nodemsk) { |
90 | cpumask_copy(masks + curvec, cpumask_of_node(n)); | |
bfe13077 | 91 | if (++curvec == last_affv) |
34c3d981 TG |
92 | break; |
93 | } | |
67c93c21 | 94 | goto done; |
34c3d981 TG |
95 | } |
96 | ||
34c3d981 | 97 | for_each_node_mask(n, nodemsk) { |
7bf8222b KB |
98 | int ncpus, v, vecs_to_assign, vecs_per_node; |
99 | ||
100 | /* Spread the vectors per node */ | |
b72f8051 | 101 | vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes; |
34c3d981 TG |
102 | |
103 | /* Get the cpus on this node which are in the mask */ | |
67c93c21 | 104 | cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); |
34c3d981 TG |
105 | |
106 | /* Calculate the number of cpus per vector */ | |
107 | ncpus = cpumask_weight(nmsk); | |
7bf8222b KB |
108 | vecs_to_assign = min(vecs_per_node, ncpus); |
109 | ||
110 | /* Account for rounding errors */ | |
3412386b | 111 | extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); |
34c3d981 | 112 | |
bfe13077 CH |
113 | for (v = 0; curvec < last_affv && v < vecs_to_assign; |
114 | curvec++, v++) { | |
34c3d981 TG |
115 | cpus_per_vec = ncpus / vecs_to_assign; |
116 | ||
117 | /* Account for extra vectors to compensate rounding errors */ | |
118 | if (extra_vecs) { | |
119 | cpus_per_vec++; | |
7bf8222b | 120 | --extra_vecs; |
34c3d981 TG |
121 | } |
122 | irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); | |
123 | } | |
124 | ||
bfe13077 | 125 | if (curvec >= last_affv) |
34c3d981 | 126 | break; |
7bf8222b | 127 | --nodes; |
34c3d981 TG |
128 | } |
129 | ||
67c93c21 | 130 | done: |
34c3d981 | 131 | put_online_cpus(); |
67c93c21 CH |
132 | |
133 | /* Fill out vectors at the end that don't need affinity */ | |
134 | for (; curvec < nvecs; curvec++) | |
b6e5d5b9 | 135 | cpumask_copy(masks + curvec, irq_default_affinity); |
34c3d981 TG |
136 | out: |
137 | free_cpumask_var(nmsk); | |
138 | return masks; | |
139 | } | |
140 | ||
141 | /** | |
212bd846 CH |
142 | * irq_calc_affinity_vectors - Calculate the optimal number of vectors |
143 | * @maxvec: The maximum number of vectors available | |
144 | * @affd: Description of the affinity requirements | |
34c3d981 | 145 | */ |
212bd846 | 146 | int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) |
34c3d981 | 147 | { |
212bd846 CH |
148 | int resv = affd->pre_vectors + affd->post_vectors; |
149 | int vecs = maxvec - resv; | |
150 | int cpus; | |
34c3d981 TG |
151 | |
152 | /* Stabilize the cpumasks */ | |
153 | get_online_cpus(); | |
212bd846 | 154 | cpus = cpumask_weight(cpu_online_mask); |
34c3d981 | 155 | put_online_cpus(); |
212bd846 CH |
156 | |
157 | return min(cpus, vecs) + resv; | |
34c3d981 | 158 | } |