]> git.proxmox.com Git - mirror_ovs.git/blob - lib/ovs-numa.c
ovsdb-idl: Fix memleak when reinserting tracked orphan rows.
[mirror_ovs.git] / lib / ovs-numa.c
1 /*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "ovs-numa.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #ifdef __linux__
23 #include <dirent.h>
24 #include <stddef.h>
25 #include <string.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #endif /* __linux__ */
29
30 #include "hash.h"
31 #include "openvswitch/hmap.h"
32 #include "openvswitch/list.h"
33 #include "ovs-thread.h"
34 #include "openvswitch/vlog.h"
35 #include "util.h"
36
37 VLOG_DEFINE_THIS_MODULE(ovs_numa);
38
39 /* ovs-numa module
40 * ===============
41 *
42 * This module stores the affinity information of numa nodes and cpu cores.
43 * It also provides functions to bookkeep the pin of threads on cpu cores.
44 *
45 * It is assumed that the numa node ids and cpu core ids all start from 0 and
46 * range continuously. So, for example, if 'ovs_numa_get_n_cores()' returns N,
47 * user can assume core ids from 0 to N-1 are all valid and there is a
48 * 'struct cpu_core' for each id.
49 *
50 * NOTE, this module should only be used by the main thread.
51 *
52 * NOTE, the assumption above will fail when cpu hotplug is used. In that
53 * case ovs-numa will not function correctly. For now, add a TODO entry
54 * for addressing it in the future.
55 *
56 * TODO: Fix ovs-numa when cpu hotplug is used.
57 */
58
59 #define MAX_NUMA_NODES 128
60
61 /* numa node. */
62 struct numa_node {
63 struct hmap_node hmap_node; /* In the 'all_numa_nodes'. */
64 struct ovs_list cores; /* List of cpu cores on the numa node. */
65 int numa_id; /* numa node id. */
66 };
67
68 /* Cpu core on a numa node. */
69 struct cpu_core {
70 struct hmap_node hmap_node;/* In the 'all_cpu_cores'. */
71 struct ovs_list list_node; /* In 'numa_node->cores' list. */
72 struct numa_node *numa; /* numa node containing the core. */
73 unsigned core_id; /* Core id. */
74 };
75
76 /* Contains all 'struct numa_node's. */
77 static struct hmap all_numa_nodes = HMAP_INITIALIZER(&all_numa_nodes);
78 /* Contains all 'struct cpu_core's. */
79 static struct hmap all_cpu_cores = HMAP_INITIALIZER(&all_cpu_cores);
80 /* True if numa node and core info are correctly extracted. */
81 static bool found_numa_and_core;
82 /* True if the module was initialized with dummy options. In this case, the
83 * module must not interact with the actual cpus/nodes in the system. */
84 static bool dummy_numa = false;
85 /* If 'dummy_numa' is true, contains a copy of the dummy numa configuration
86 * parameter */
87 static char *dummy_config;
88
89 static struct numa_node *get_numa_by_numa_id(int numa_id);
90
91 #ifdef __linux__
92 /* Returns true if 'str' contains all digits. Returns false otherwise. */
93 static bool
94 contain_all_digits(const char *str)
95 {
96 return str[strspn(str, "0123456789")] == '\0';
97 }
98 #endif /* __linux__ */
99
100 static struct numa_node *
101 insert_new_numa_node(int numa_id)
102 {
103 struct numa_node *n = xzalloc(sizeof *n);
104
105 hmap_insert(&all_numa_nodes, &n->hmap_node, hash_int(numa_id, 0));
106 ovs_list_init(&n->cores);
107 n->numa_id = numa_id;
108
109 return n;
110 }
111
112 static struct cpu_core *
113 insert_new_cpu_core(struct numa_node *n, unsigned core_id)
114 {
115 struct cpu_core *c = xzalloc(sizeof *c);
116
117 hmap_insert(&all_cpu_cores, &c->hmap_node, hash_int(core_id, 0));
118 ovs_list_insert(&n->cores, &c->list_node);
119 c->core_id = core_id;
120 c->numa = n;
121
122 return c;
123 }
124
125 /* Has the same effect as discover_numa_and_core(), but instead of
126 * reading sysfs entries, extracts the info from the global variable
127 * 'dummy_config', which is set with ovs_numa_set_dummy().
128 *
129 * 'dummy_config' lists the numa_ids of each CPU separated by a comma, e.g.
130 * - "0,0,0,0": four cores on numa socket 0.
131 * - "0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1": 16 cores on two numa sockets.
132 * - "0,0,0,0,1,1,1,1": 8 cores on two numa sockets.
133 *
134 * The different numa ids must be consecutives or the function will abort. */
135 static void
136 discover_numa_and_core_dummy(void)
137 {
138 char *conf = xstrdup(dummy_config);
139 char *id, *saveptr = NULL;
140 unsigned i = 0;
141 long max_numa_id = 0;
142
143 for (id = strtok_r(conf, ",", &saveptr); id;
144 id = strtok_r(NULL, ",", &saveptr)) {
145 struct hmap_node *hnode;
146 struct numa_node *n;
147 long numa_id;
148
149 numa_id = strtol(id, NULL, 10);
150 if (numa_id < 0 || numa_id >= MAX_NUMA_NODES) {
151 VLOG_WARN("Invalid numa node %ld", numa_id);
152 continue;
153 }
154
155 max_numa_id = MAX(max_numa_id, numa_id);
156
157 hnode = hmap_first_with_hash(&all_numa_nodes, hash_int(numa_id, 0));
158
159 if (hnode) {
160 n = CONTAINER_OF(hnode, struct numa_node, hmap_node);
161 } else {
162 n = insert_new_numa_node(numa_id);
163 }
164
165 insert_new_cpu_core(n, i);
166
167 i++;
168 }
169
170 free(conf);
171
172 if (max_numa_id + 1 != hmap_count(&all_numa_nodes)) {
173 ovs_fatal(0, "dummy numa contains non consecutive numa ids");
174 }
175 }
176
177 /* Discovers all numa nodes and the corresponding cpu cores.
178 * Constructs the 'struct numa_node' and 'struct cpu_core'. */
179 static void
180 discover_numa_and_core(void)
181 {
182 #ifdef __linux__
183 int i;
184 DIR *dir;
185 bool numa_supported = true;
186
187 /* Check if NUMA supported on this system. */
188 dir = opendir("/sys/devices/system/node");
189
190 if (!dir && errno == ENOENT) {
191 numa_supported = false;
192 }
193 if (dir) {
194 closedir(dir);
195 }
196
197 for (i = 0; i < MAX_NUMA_NODES; i++) {
198 char* path;
199
200 if (numa_supported) {
201 /* Constructs the path to node /sys/devices/system/nodeX. */
202 path = xasprintf("/sys/devices/system/node/node%d", i);
203 } else {
204 path = xasprintf("/sys/devices/system/cpu/");
205 }
206
207 dir = opendir(path);
208
209 /* Creates 'struct numa_node' if the 'dir' is non-null. */
210 if (dir) {
211 struct numa_node *n;
212 struct dirent *subdir;
213
214 n = insert_new_numa_node(i);
215
216 while ((subdir = readdir(dir)) != NULL) {
217 if (!strncmp(subdir->d_name, "cpu", 3)
218 && contain_all_digits(subdir->d_name + 3)) {
219 unsigned core_id;
220
221 core_id = strtoul(subdir->d_name + 3, NULL, 10);
222 insert_new_cpu_core(n, core_id);
223 }
224 }
225 closedir(dir);
226 } else if (errno != ENOENT) {
227 VLOG_WARN("opendir(%s) failed (%s)", path,
228 ovs_strerror(errno));
229 }
230
231 free(path);
232 if (!dir || !numa_supported) {
233 break;
234 }
235 }
236 #endif /* __linux__ */
237 }
238
239 /* Gets 'struct cpu_core' by 'core_id'. */
240 static struct cpu_core*
241 get_core_by_core_id(unsigned core_id)
242 {
243 struct cpu_core *core;
244
245 HMAP_FOR_EACH_WITH_HASH (core, hmap_node, hash_int(core_id, 0),
246 &all_cpu_cores) {
247 if (core->core_id == core_id) {
248 return core;
249 }
250 }
251
252 return NULL;
253 }
254
255 /* Gets 'struct numa_node' by 'numa_id'. */
256 static struct numa_node*
257 get_numa_by_numa_id(int numa_id)
258 {
259 struct numa_node *numa;
260
261 HMAP_FOR_EACH_WITH_HASH (numa, hmap_node, hash_int(numa_id, 0),
262 &all_numa_nodes) {
263 if (numa->numa_id == numa_id) {
264 return numa;
265 }
266 }
267
268 return NULL;
269 }
270
271 \f
272 /* Initializes the numa module. */
273 void
274 ovs_numa_init(void)
275 {
276 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
277
278 if (ovsthread_once_start(&once)) {
279 const struct numa_node *n;
280
281 if (dummy_numa) {
282 discover_numa_and_core_dummy();
283 } else {
284 discover_numa_and_core();
285 }
286
287 HMAP_FOR_EACH(n, hmap_node, &all_numa_nodes) {
288 VLOG_INFO("Discovered %"PRIuSIZE" CPU cores on NUMA node %d",
289 ovs_list_size(&n->cores), n->numa_id);
290 }
291
292 VLOG_INFO("Discovered %"PRIuSIZE" NUMA nodes and %"PRIuSIZE" CPU cores",
293 hmap_count(&all_numa_nodes), hmap_count(&all_cpu_cores));
294
295 if (hmap_count(&all_numa_nodes) && hmap_count(&all_cpu_cores)) {
296 found_numa_and_core = true;
297 }
298
299 ovsthread_once_done(&once);
300 }
301 }
302
303 /* Extracts the numa node and core info from the 'config'. This is useful for
304 * testing purposes. The function must be called once, before ovs_numa_init().
305 *
306 * The format of 'config' is explained in the comment above
307 * discover_numa_and_core_dummy().*/
308 void
309 ovs_numa_set_dummy(const char *config)
310 {
311 dummy_numa = true;
312 ovs_assert(config);
313 free(dummy_config);
314 dummy_config = xstrdup(config);
315 }
316
317 bool
318 ovs_numa_numa_id_is_valid(int numa_id)
319 {
320 return found_numa_and_core && numa_id < ovs_numa_get_n_numas();
321 }
322
323 bool
324 ovs_numa_core_id_is_valid(unsigned core_id)
325 {
326 return found_numa_and_core && core_id < ovs_numa_get_n_cores();
327 }
328
329 /* Returns the number of numa nodes. */
330 int
331 ovs_numa_get_n_numas(void)
332 {
333 return found_numa_and_core ? hmap_count(&all_numa_nodes)
334 : OVS_NUMA_UNSPEC;
335 }
336
337 /* Returns the number of cpu cores. */
338 int
339 ovs_numa_get_n_cores(void)
340 {
341 return found_numa_and_core ? hmap_count(&all_cpu_cores)
342 : OVS_CORE_UNSPEC;
343 }
344
345 /* Given 'core_id', returns the corresponding numa node id. Returns
346 * OVS_NUMA_UNSPEC if 'core_id' is invalid. */
347 int
348 ovs_numa_get_numa_id(unsigned core_id)
349 {
350 struct cpu_core *core = get_core_by_core_id(core_id);
351
352 if (core) {
353 return core->numa->numa_id;
354 }
355
356 return OVS_NUMA_UNSPEC;
357 }
358
359 /* Returns the number of cpu cores on numa node. Returns OVS_CORE_UNSPEC
360 * if 'numa_id' is invalid. */
361 int
362 ovs_numa_get_n_cores_on_numa(int numa_id)
363 {
364 struct numa_node *numa = get_numa_by_numa_id(numa_id);
365
366 if (numa) {
367 return ovs_list_size(&numa->cores);
368 }
369
370 return OVS_CORE_UNSPEC;
371 }
372
373 static struct ovs_numa_dump *
374 ovs_numa_dump_create(void)
375 {
376 struct ovs_numa_dump *dump = xmalloc(sizeof *dump);
377
378 hmap_init(&dump->cores);
379 hmap_init(&dump->numas);
380
381 return dump;
382 }
383
384 static void
385 ovs_numa_dump_add(struct ovs_numa_dump *dump, int numa_id, int core_id)
386 {
387 struct ovs_numa_info_core *c = xzalloc(sizeof *c);
388 struct ovs_numa_info_numa *n;
389
390 c->numa_id = numa_id;
391 c->core_id = core_id;
392 hmap_insert(&dump->cores, &c->hmap_node, hash_2words(numa_id, core_id));
393
394 HMAP_FOR_EACH_WITH_HASH (n, hmap_node, hash_int(numa_id, 0),
395 &dump->numas) {
396 if (n->numa_id == numa_id) {
397 n->n_cores++;
398 return;
399 }
400 }
401
402 n = xzalloc(sizeof *n);
403 n->numa_id = numa_id;
404 n->n_cores = 1;
405 hmap_insert(&dump->numas, &n->hmap_node, hash_int(numa_id, 0));
406 }
407
408 /* Given the 'numa_id', returns dump of all cores on the numa node. */
409 struct ovs_numa_dump *
410 ovs_numa_dump_cores_on_numa(int numa_id)
411 {
412 struct ovs_numa_dump *dump = ovs_numa_dump_create();
413 struct numa_node *numa = get_numa_by_numa_id(numa_id);
414
415 if (numa) {
416 struct cpu_core *core;
417
418 LIST_FOR_EACH (core, list_node, &numa->cores) {
419 ovs_numa_dump_add(dump, numa->numa_id, core->core_id);
420 }
421 }
422
423 return dump;
424 }
425
426 struct ovs_numa_dump *
427 ovs_numa_dump_cores_with_cmask(const char *cmask)
428 {
429 struct ovs_numa_dump *dump = ovs_numa_dump_create();
430 int core_id = 0;
431 int end_idx;
432
433 /* Ignore leading 0x. */
434 end_idx = 0;
435 if (!strncmp(cmask, "0x", 2) || !strncmp(cmask, "0X", 2)) {
436 end_idx = 2;
437 }
438
439 for (int i = strlen(cmask) - 1; i >= end_idx; i--) {
440 char hex = cmask[i];
441 int bin;
442
443 bin = hexit_value(hex);
444 if (bin == -1) {
445 VLOG_WARN("Invalid cpu mask: %c", cmask[i]);
446 bin = 0;
447 }
448
449 for (int j = 0; j < 4; j++) {
450 if ((bin >> j) & 0x1) {
451 struct cpu_core *core = get_core_by_core_id(core_id);
452
453 if (core) {
454 ovs_numa_dump_add(dump,
455 core->numa->numa_id,
456 core->core_id);
457 }
458 }
459
460 core_id++;
461 }
462 }
463
464 return dump;
465 }
466
467 struct ovs_numa_dump *
468 ovs_numa_dump_n_cores_per_numa(int cores_per_numa)
469 {
470 struct ovs_numa_dump *dump = ovs_numa_dump_create();
471 const struct numa_node *n;
472
473 HMAP_FOR_EACH (n, hmap_node, &all_numa_nodes) {
474 const struct cpu_core *core;
475 int i = 0;
476
477 LIST_FOR_EACH (core, list_node, &n->cores) {
478 if (i++ >= cores_per_numa) {
479 break;
480 }
481
482 ovs_numa_dump_add(dump, core->numa->numa_id, core->core_id);
483 }
484 }
485
486 return dump;
487 }
488
489 bool
490 ovs_numa_dump_contains_core(const struct ovs_numa_dump *dump,
491 int numa_id, unsigned core_id)
492 {
493 struct ovs_numa_info_core *core;
494
495 HMAP_FOR_EACH_WITH_HASH (core, hmap_node, hash_2words(numa_id, core_id),
496 &dump->cores) {
497 if (core->core_id == core_id && core->numa_id == numa_id) {
498 return true;
499 }
500 }
501
502 return false;
503 }
504
505 size_t
506 ovs_numa_dump_count(const struct ovs_numa_dump *dump)
507 {
508 return hmap_count(&dump->cores);
509 }
510
511 void
512 ovs_numa_dump_destroy(struct ovs_numa_dump *dump)
513 {
514 struct ovs_numa_info_core *c;
515 struct ovs_numa_info_numa *n;
516
517 if (!dump) {
518 return;
519 }
520
521 HMAP_FOR_EACH_POP (c, hmap_node, &dump->cores) {
522 free(c);
523 }
524
525 HMAP_FOR_EACH_POP (n, hmap_node, &dump->numas) {
526 free(n);
527 }
528
529 hmap_destroy(&dump->cores);
530 hmap_destroy(&dump->numas);
531
532 free(dump);
533 }
534
535 struct ovs_numa_dump *
536 ovs_numa_thread_getaffinity_dump(void)
537 {
538 if (dummy_numa) {
539 /* Nothing to do. */
540 return NULL;
541 }
542
543 #ifndef __linux__
544 return NULL;
545 #else
546 struct ovs_numa_dump *dump;
547 const struct numa_node *n;
548 cpu_set_t cpuset;
549 int err;
550
551 CPU_ZERO(&cpuset);
552 err = pthread_getaffinity_np(pthread_self(), sizeof cpuset, &cpuset);
553 if (err) {
554 VLOG_ERR("Thread getaffinity error: %s", ovs_strerror(err));
555 return NULL;
556 }
557
558 dump = ovs_numa_dump_create();
559
560 HMAP_FOR_EACH (n, hmap_node, &all_numa_nodes) {
561 const struct cpu_core *core;
562
563 LIST_FOR_EACH (core, list_node, &n->cores) {
564 if (CPU_ISSET(core->core_id, &cpuset)) {
565 ovs_numa_dump_add(dump, core->numa->numa_id, core->core_id);
566 }
567 }
568 }
569
570 if (!ovs_numa_dump_count(dump)) {
571 ovs_numa_dump_destroy(dump);
572 return NULL;
573 }
574 return dump;
575 #endif /* __linux__ */
576 }
577
578 int
579 ovs_numa_thread_setaffinity_dump(const struct ovs_numa_dump *dump)
580 {
581 if (!dump || dummy_numa) {
582 /* Nothing to do. */
583 return 0;
584 }
585
586 #ifdef __linux__
587 const struct ovs_numa_info_core *core;
588 cpu_set_t cpuset;
589 int err;
590
591 CPU_ZERO(&cpuset);
592 FOR_EACH_CORE_ON_DUMP (core, dump) {
593 CPU_SET(core->core_id, &cpuset);
594 }
595 err = pthread_setaffinity_np(pthread_self(), sizeof cpuset, &cpuset);
596 if (err) {
597 VLOG_ERR("Thread setaffinity error: %s", ovs_strerror(err));
598 return err;
599 }
600
601 return 0;
602 #else /* !__linux__ */
603 return EOPNOTSUPP;
604 #endif /* __linux__ */
605 }
606
607 int ovs_numa_thread_setaffinity_core(unsigned core_id)
608 {
609 const struct cpu_core *core = get_core_by_core_id(core_id);
610 struct ovs_numa_dump *affinity = ovs_numa_dump_create();
611 int ret = EINVAL;
612
613 if (core) {
614 ovs_numa_dump_add(affinity, core->numa->numa_id, core->core_id);
615 ret = ovs_numa_thread_setaffinity_dump(affinity);
616 }
617
618 ovs_numa_dump_destroy(affinity);
619 return ret;
620 }