]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/examples/vm_power_manager/channel_manager.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / examples / vm_power_manager / channel_manager.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <sys/un.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <inttypes.h>
40 #include <dirent.h>
41 #include <errno.h>
42
43 #include <sys/queue.h>
44 #include <sys/types.h>
45 #include <sys/socket.h>
46 #include <sys/select.h>
47
48 #include <rte_malloc.h>
49 #include <rte_memory.h>
50 #include <rte_mempool.h>
51 #include <rte_log.h>
52 #include <rte_atomic.h>
53 #include <rte_spinlock.h>
54
55 #include <libvirt/libvirt.h>
56
57 #include "channel_manager.h"
58 #include "channel_commands.h"
59 #include "channel_monitor.h"
60
61
62 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
63
64 #define ITERATIVE_BITMASK_CHECK_64(mask_u64b, i) \
65 for (i = 0; mask_u64b; mask_u64b &= ~(1ULL << i++)) \
66 if ((mask_u64b >> i) & 1) \
67
68 /* Global pointer to libvirt connection */
69 static virConnectPtr global_vir_conn_ptr;
70
71 static unsigned char *global_cpumaps;
72 static virVcpuInfo *global_vircpuinfo;
73 static size_t global_maplen;
74
75 static unsigned global_n_host_cpus;
76
77 /*
78 * Represents a single Virtual Machine
79 */
80 struct virtual_machine_info {
81 char name[CHANNEL_MGR_MAX_NAME_LEN];
82 rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
83 struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
84 uint64_t channel_mask;
85 uint8_t num_channels;
86 enum vm_status status;
87 virDomainPtr domainPtr;
88 virDomainInfo info;
89 rte_spinlock_t config_spinlock;
90 LIST_ENTRY(virtual_machine_info) vms_info;
91 };
92
93 LIST_HEAD(, virtual_machine_info) vm_list_head;
94
95 static struct virtual_machine_info *
96 find_domain_by_name(const char *name)
97 {
98 struct virtual_machine_info *info;
99 LIST_FOREACH(info, &vm_list_head, vms_info) {
100 if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1))
101 return info;
102 }
103 return NULL;
104 }
105
106 static int
107 update_pcpus_mask(struct virtual_machine_info *vm_info)
108 {
109 virVcpuInfoPtr cpuinfo;
110 unsigned i, j;
111 int n_vcpus;
112 uint64_t mask;
113
114 memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
115
116 if (!virDomainIsActive(vm_info->domainPtr)) {
117 n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr,
118 vm_info->info.nrVirtCpu, global_cpumaps, global_maplen,
119 VIR_DOMAIN_AFFECT_CONFIG);
120 if (n_vcpus < 0) {
121 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
122 "in-active VM '%s'\n", vm_info->name);
123 return -1;
124 }
125 goto update_pcpus;
126 }
127
128 memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)*
129 CHANNEL_CMDS_MAX_CPUS);
130
131 cpuinfo = global_vircpuinfo;
132
133 n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo,
134 CHANNEL_CMDS_MAX_CPUS, global_cpumaps, global_maplen);
135 if (n_vcpus < 0) {
136 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
137 "active VM '%s'\n", vm_info->name);
138 return -1;
139 }
140 update_pcpus:
141 if (n_vcpus >= CHANNEL_CMDS_MAX_CPUS) {
142 RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range "
143 "0...%d\n", n_vcpus, CHANNEL_CMDS_MAX_CPUS-1);
144 return -1;
145 }
146 if (n_vcpus != vm_info->info.nrVirtCpu) {
147 RTE_LOG(INFO, CHANNEL_MANAGER, "Updating the number of vCPUs for VM '%s"
148 " from %d -> %d\n", vm_info->name, vm_info->info.nrVirtCpu,
149 n_vcpus);
150 vm_info->info.nrVirtCpu = n_vcpus;
151 }
152 for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
153 mask = 0;
154 for (j = 0; j < global_n_host_cpus; j++) {
155 if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
156 mask |= 1ULL << j;
157 }
158 }
159 rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
160 }
161 return 0;
162 }
163
164 int
165 set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
166 {
167 unsigned i = 0;
168 int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
169 struct virtual_machine_info *vm_info;
170 uint64_t mask = core_mask;
171
172 if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
173 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
174 vcpu, CHANNEL_CMDS_MAX_CPUS-1);
175 return -1;
176 }
177
178 vm_info = find_domain_by_name(vm_name);
179 if (vm_info == NULL) {
180 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
181 return -1;
182 }
183
184 if (!virDomainIsActive(vm_info->domainPtr)) {
185 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
186 "mask(0x%"PRIx64") for VM '%s', VM is not active\n",
187 vcpu, core_mask, vm_info->name);
188 return -1;
189 }
190
191 if (vcpu >= vm_info->info.nrVirtCpu) {
192 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds the assigned number of "
193 "vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu);
194 return -1;
195 }
196 memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
197 ITERATIVE_BITMASK_CHECK_64(mask, i) {
198 VIR_USE_CPU(global_cpumaps, i);
199 if (i >= global_n_host_cpus) {
200 RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
201 "number of CPUs(%u)\n", i, global_n_host_cpus);
202 return -1;
203 }
204 }
205 if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
206 global_maplen, flags) < 0) {
207 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
208 "mask(0x%"PRIx64") for VM '%s'\n", vcpu, core_mask,
209 vm_info->name);
210 return -1;
211 }
212 rte_atomic64_set(&vm_info->pcpu_mask[vcpu], core_mask);
213 return 0;
214
215 }
216
217 int
218 set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
219 {
220 uint64_t mask = 1ULL << core_num;
221
222 return set_pcpus_mask(vm_name, vcpu, mask);
223 }
224
225 uint64_t
226 get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
227 {
228 struct virtual_machine_info *vm_info =
229 (struct virtual_machine_info *)chan_info->priv_info;
230 return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
231 }
232
233 static inline int
234 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
235 {
236 rte_spinlock_lock(&(vm_info->config_spinlock));
237 if (vm_info->channel_mask & (1ULL << channel_num)) {
238 rte_spinlock_unlock(&(vm_info->config_spinlock));
239 return 1;
240 }
241 rte_spinlock_unlock(&(vm_info->config_spinlock));
242 return 0;
243 }
244
245
246
247 static int
248 open_non_blocking_channel(struct channel_info *info)
249 {
250 int ret, flags;
251 struct sockaddr_un sock_addr;
252 fd_set soc_fd_set;
253 struct timeval tv;
254
255 info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
256 if (info->fd == -1) {
257 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n",
258 strerror(errno),
259 info->channel_path);
260 return -1;
261 }
262 sock_addr.sun_family = AF_UNIX;
263 memcpy(&sock_addr.sun_path, info->channel_path,
264 strlen(info->channel_path)+1);
265
266 /* Get current flags */
267 flags = fcntl(info->fd, F_GETFL, 0);
268 if (flags < 0) {
269 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
270 "'%s'\n", strerror(errno), info->channel_path);
271 return 1;
272 }
273 /* Set to Non Blocking */
274 flags |= O_NONBLOCK;
275 if (fcntl(info->fd, F_SETFL, flags) < 0) {
276 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) setting non-blocking "
277 "socket for '%s'\n", strerror(errno), info->channel_path);
278 return -1;
279 }
280 ret = connect(info->fd, (struct sockaddr *)&sock_addr,
281 sizeof(sock_addr));
282 if (ret < 0) {
283 /* ECONNREFUSED error is given when VM is not active */
284 if (errno == ECONNREFUSED) {
285 RTE_LOG(WARNING, CHANNEL_MANAGER, "VM is not active or has not "
286 "activated its endpoint to channel %s\n",
287 info->channel_path);
288 return -1;
289 }
290 /* Wait for tv_sec if in progress */
291 else if (errno == EINPROGRESS) {
292 tv.tv_sec = 2;
293 tv.tv_usec = 0;
294 FD_ZERO(&soc_fd_set);
295 FD_SET(info->fd, &soc_fd_set);
296 if (select(info->fd+1, NULL, &soc_fd_set, NULL, &tv) > 0) {
297 RTE_LOG(WARNING, CHANNEL_MANAGER, "Timeout or error on channel "
298 "'%s'\n", info->channel_path);
299 return -1;
300 }
301 } else {
302 /* Any other error */
303 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) connecting socket"
304 " for '%s'\n", strerror(errno), info->channel_path);
305 return -1;
306 }
307 }
308 return 0;
309 }
310
311 static int
312 setup_channel_info(struct virtual_machine_info **vm_info_dptr,
313 struct channel_info **chan_info_dptr, unsigned channel_num)
314 {
315 struct channel_info *chan_info = *chan_info_dptr;
316 struct virtual_machine_info *vm_info = *vm_info_dptr;
317
318 chan_info->channel_num = channel_num;
319 chan_info->priv_info = (void *)vm_info;
320 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
321 if (open_non_blocking_channel(chan_info) < 0) {
322 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
323 "'%s' for VM '%s'\n",
324 chan_info->channel_path, vm_info->name);
325 return -1;
326 }
327 if (add_channel_to_monitor(&chan_info) < 0) {
328 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
329 "'%s' to epoll ctl for VM '%s'\n",
330 chan_info->channel_path, vm_info->name);
331 return -1;
332
333 }
334 rte_spinlock_lock(&(vm_info->config_spinlock));
335 vm_info->num_channels++;
336 vm_info->channel_mask |= 1ULL << channel_num;
337 vm_info->channels[channel_num] = chan_info;
338 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
339 rte_spinlock_unlock(&(vm_info->config_spinlock));
340 return 0;
341 }
342
343 int
344 add_all_channels(const char *vm_name)
345 {
346 DIR *d;
347 struct dirent *dir;
348 struct virtual_machine_info *vm_info;
349 struct channel_info *chan_info;
350 char *token, *remaining, *tail_ptr;
351 char socket_name[PATH_MAX];
352 unsigned channel_num;
353 int num_channels_enabled = 0;
354
355 /* verify VM exists */
356 vm_info = find_domain_by_name(vm_name);
357 if (vm_info == NULL) {
358 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' not found"
359 " during channel discovery\n", vm_name);
360 return 0;
361 }
362 if (!virDomainIsActive(vm_info->domainPtr)) {
363 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
364 vm_info->status = CHANNEL_MGR_VM_INACTIVE;
365 return 0;
366 }
367 d = opendir(CHANNEL_MGR_SOCKET_PATH);
368 if (d == NULL) {
369 RTE_LOG(ERR, CHANNEL_MANAGER, "Error opening directory '%s': %s\n",
370 CHANNEL_MGR_SOCKET_PATH, strerror(errno));
371 return -1;
372 }
373 while ((dir = readdir(d)) != NULL) {
374 if (!strncmp(dir->d_name, ".", 1) ||
375 !strncmp(dir->d_name, "..", 2))
376 continue;
377
378 snprintf(socket_name, sizeof(socket_name), "%s", dir->d_name);
379 remaining = socket_name;
380 /* Extract vm_name from "<vm_name>.<channel_num>" */
381 token = strsep(&remaining, ".");
382 if (remaining == NULL)
383 continue;
384 if (strncmp(vm_name, token, CHANNEL_MGR_MAX_NAME_LEN))
385 continue;
386
387 /* remaining should contain only <channel_num> */
388 errno = 0;
389 channel_num = (unsigned)strtol(remaining, &tail_ptr, 0);
390 if ((errno != 0) || (remaining[0] == '\0') ||
391 tail_ptr == NULL || (*tail_ptr != '\0')) {
392 RTE_LOG(WARNING, CHANNEL_MANAGER, "Malformed channel name"
393 "'%s' found it should be in the form of "
394 "'<guest_name>.<channel_num>(decimal)'\n",
395 dir->d_name);
396 continue;
397 }
398 if (channel_num >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
399 RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is "
400 "greater than max allowable: %d, skipping '%s%s'\n",
401 channel_num, CHANNEL_CMDS_MAX_VM_CHANNELS-1,
402 CHANNEL_MGR_SOCKET_PATH, dir->d_name);
403 continue;
404 }
405 /* if channel has not been added previously */
406 if (channel_exists(vm_info, channel_num))
407 continue;
408
409 chan_info = rte_malloc(NULL, sizeof(*chan_info),
410 RTE_CACHE_LINE_SIZE);
411 if (chan_info == NULL) {
412 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
413 "channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name);
414 continue;
415 }
416
417 snprintf(chan_info->channel_path,
418 sizeof(chan_info->channel_path), "%s%s",
419 CHANNEL_MGR_SOCKET_PATH, dir->d_name);
420
421 if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) {
422 rte_free(chan_info);
423 continue;
424 }
425
426 num_channels_enabled++;
427 }
428 closedir(d);
429 return num_channels_enabled;
430 }
431
432 int
433 add_channels(const char *vm_name, unsigned *channel_list,
434 unsigned len_channel_list)
435 {
436 struct virtual_machine_info *vm_info;
437 struct channel_info *chan_info;
438 char socket_path[PATH_MAX];
439 unsigned i;
440 int num_channels_enabled = 0;
441
442 vm_info = find_domain_by_name(vm_name);
443 if (vm_info == NULL) {
444 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
445 "not found\n", vm_name);
446 return 0;
447 }
448
449 if (!virDomainIsActive(vm_info->domainPtr)) {
450 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
451 vm_info->status = CHANNEL_MGR_VM_INACTIVE;
452 return 0;
453 }
454
455 for (i = 0; i < len_channel_list; i++) {
456
457 if (channel_list[i] >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
458 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range "
459 "0...%d\n", channel_list[i],
460 CHANNEL_CMDS_MAX_VM_CHANNELS-1);
461 continue;
462 }
463 if (channel_exists(vm_info, channel_list[i])) {
464 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel already exists, skipping "
465 "'%s.%u'\n", vm_name, i);
466 continue;
467 }
468
469 snprintf(socket_path, sizeof(socket_path), "%s%s.%u",
470 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
471 errno = 0;
472 if (access(socket_path, F_OK) < 0) {
473 RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
474 "%s\n", socket_path, strerror(errno));
475 continue;
476 }
477 chan_info = rte_malloc(NULL, sizeof(*chan_info),
478 RTE_CACHE_LINE_SIZE);
479 if (chan_info == NULL) {
480 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
481 "channel '%s'\n", socket_path);
482 continue;
483 }
484 snprintf(chan_info->channel_path,
485 sizeof(chan_info->channel_path), "%s%s.%u",
486 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
487 if (setup_channel_info(&vm_info, &chan_info, channel_list[i]) < 0) {
488 rte_free(chan_info);
489 continue;
490 }
491 num_channels_enabled++;
492
493 }
494 return num_channels_enabled;
495 }
496
497 int
498 remove_channel(struct channel_info **chan_info_dptr)
499 {
500 struct virtual_machine_info *vm_info;
501 struct channel_info *chan_info = *chan_info_dptr;
502
503 close(chan_info->fd);
504
505 vm_info = (struct virtual_machine_info *)chan_info->priv_info;
506
507 rte_spinlock_lock(&(vm_info->config_spinlock));
508 vm_info->channel_mask &= ~(1ULL << chan_info->channel_num);
509 vm_info->num_channels--;
510 rte_spinlock_unlock(&(vm_info->config_spinlock));
511
512 rte_free(chan_info);
513 return 0;
514 }
515
516 int
517 set_channel_status_all(const char *vm_name, enum channel_status status)
518 {
519 struct virtual_machine_info *vm_info;
520 unsigned i;
521 uint64_t mask;
522 int num_channels_changed = 0;
523
524 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
525 status == CHANNEL_MGR_CHANNEL_DISABLED)) {
526 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
527 "disabled: Unable to change status for VM '%s'\n", vm_name);
528 }
529 vm_info = find_domain_by_name(vm_name);
530 if (vm_info == NULL) {
531 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to disable channels: VM '%s' "
532 "not found\n", vm_name);
533 return 0;
534 }
535
536 rte_spinlock_lock(&(vm_info->config_spinlock));
537 mask = vm_info->channel_mask;
538 ITERATIVE_BITMASK_CHECK_64(mask, i) {
539 vm_info->channels[i]->status = status;
540 num_channels_changed++;
541 }
542 rte_spinlock_unlock(&(vm_info->config_spinlock));
543 return num_channels_changed;
544
545 }
546
547 int
548 set_channel_status(const char *vm_name, unsigned *channel_list,
549 unsigned len_channel_list, enum channel_status status)
550 {
551 struct virtual_machine_info *vm_info;
552 unsigned i;
553 int num_channels_changed = 0;
554
555 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
556 status == CHANNEL_MGR_CHANNEL_DISABLED)) {
557 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
558 "disabled: Unable to change status for VM '%s'\n", vm_name);
559 }
560 vm_info = find_domain_by_name(vm_name);
561 if (vm_info == NULL) {
562 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
563 "not found\n", vm_name);
564 return 0;
565 }
566 for (i = 0; i < len_channel_list; i++) {
567 if (channel_exists(vm_info, channel_list[i])) {
568 rte_spinlock_lock(&(vm_info->config_spinlock));
569 vm_info->channels[channel_list[i]]->status = status;
570 rte_spinlock_unlock(&(vm_info->config_spinlock));
571 num_channels_changed++;
572 }
573 }
574 return num_channels_changed;
575 }
576
577 int
578 get_info_vm(const char *vm_name, struct vm_info *info)
579 {
580 struct virtual_machine_info *vm_info;
581 unsigned i, channel_num = 0;
582 uint64_t mask;
583
584 vm_info = find_domain_by_name(vm_name);
585 if (vm_info == NULL) {
586 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
587 return -1;
588 }
589 info->status = CHANNEL_MGR_VM_ACTIVE;
590 if (!virDomainIsActive(vm_info->domainPtr))
591 info->status = CHANNEL_MGR_VM_INACTIVE;
592
593 rte_spinlock_lock(&(vm_info->config_spinlock));
594
595 mask = vm_info->channel_mask;
596 ITERATIVE_BITMASK_CHECK_64(mask, i) {
597 info->channels[channel_num].channel_num = i;
598 memcpy(info->channels[channel_num].channel_path,
599 vm_info->channels[i]->channel_path, UNIX_PATH_MAX);
600 info->channels[channel_num].status = vm_info->channels[i]->status;
601 info->channels[channel_num].fd = vm_info->channels[i]->fd;
602 channel_num++;
603 }
604
605 info->num_channels = channel_num;
606 info->num_vcpus = vm_info->info.nrVirtCpu;
607 rte_spinlock_unlock(&(vm_info->config_spinlock));
608
609 memcpy(info->name, vm_info->name, sizeof(vm_info->name));
610 for (i = 0; i < info->num_vcpus; i++) {
611 info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
612 }
613 return 0;
614 }
615
616 int
617 add_vm(const char *vm_name)
618 {
619 struct virtual_machine_info *new_domain;
620 virDomainPtr dom_ptr;
621 int i;
622
623 if (find_domain_by_name(vm_name) != NULL) {
624 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add VM: VM '%s' "
625 "already exists\n", vm_name);
626 return -1;
627 }
628
629 if (global_vir_conn_ptr == NULL) {
630 RTE_LOG(ERR, CHANNEL_MANAGER, "No connection to hypervisor exists\n");
631 return -1;
632 }
633 dom_ptr = virDomainLookupByName(global_vir_conn_ptr, vm_name);
634 if (dom_ptr == NULL) {
635 RTE_LOG(ERR, CHANNEL_MANAGER, "Error on VM lookup with libvirt: "
636 "VM '%s' not found\n", vm_name);
637 return -1;
638 }
639
640 new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain),
641 RTE_CACHE_LINE_SIZE);
642 if (new_domain == NULL) {
643 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM "
644 "info\n");
645 return -1;
646 }
647 new_domain->domainPtr = dom_ptr;
648 if (virDomainGetInfo(new_domain->domainPtr, &new_domain->info) != 0) {
649 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get libvirt VM info\n");
650 rte_free(new_domain);
651 return -1;
652 }
653 if (new_domain->info.nrVirtCpu > CHANNEL_CMDS_MAX_CPUS) {
654 RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is "
655 "greater than allowable(%d)\n", new_domain->info.nrVirtCpu,
656 CHANNEL_CMDS_MAX_CPUS);
657 rte_free(new_domain);
658 return -1;
659 }
660
661 for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
662 rte_atomic64_init(&new_domain->pcpu_mask[i]);
663 }
664 if (update_pcpus_mask(new_domain) < 0) {
665 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
666 rte_free(new_domain);
667 return -1;
668 }
669 strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
670 new_domain->name[sizeof(new_domain->name) - 1] = '\0';
671 new_domain->channel_mask = 0;
672 new_domain->num_channels = 0;
673
674 if (!virDomainIsActive(dom_ptr))
675 new_domain->status = CHANNEL_MGR_VM_INACTIVE;
676 else
677 new_domain->status = CHANNEL_MGR_VM_ACTIVE;
678
679 rte_spinlock_init(&(new_domain->config_spinlock));
680 LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
681 return 0;
682 }
683
684 int
685 remove_vm(const char *vm_name)
686 {
687 struct virtual_machine_info *vm_info = find_domain_by_name(vm_name);
688
689 if (vm_info == NULL) {
690 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM: VM '%s' "
691 "not found\n", vm_name);
692 return -1;
693 }
694 rte_spinlock_lock(&vm_info->config_spinlock);
695 if (vm_info->num_channels != 0) {
696 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM '%s', there are "
697 "%"PRId8" channels still active\n",
698 vm_name, vm_info->num_channels);
699 rte_spinlock_unlock(&vm_info->config_spinlock);
700 return -1;
701 }
702 LIST_REMOVE(vm_info, vms_info);
703 rte_spinlock_unlock(&vm_info->config_spinlock);
704 rte_free(vm_info);
705 return 0;
706 }
707
708 static void
709 disconnect_hypervisor(void)
710 {
711 if (global_vir_conn_ptr != NULL) {
712 virConnectClose(global_vir_conn_ptr);
713 global_vir_conn_ptr = NULL;
714 }
715 }
716
717 static int
718 connect_hypervisor(const char *path)
719 {
720 if (global_vir_conn_ptr != NULL) {
721 RTE_LOG(ERR, CHANNEL_MANAGER, "Error connecting to %s, connection "
722 "already established\n", path);
723 return -1;
724 }
725 global_vir_conn_ptr = virConnectOpen(path);
726 if (global_vir_conn_ptr == NULL) {
727 RTE_LOG(ERR, CHANNEL_MANAGER, "Error failed to open connection to "
728 "Hypervisor '%s'\n", path);
729 return -1;
730 }
731 return 0;
732 }
733
734 int
735 channel_manager_init(const char *path)
736 {
737 virNodeInfo info;
738
739 LIST_INIT(&vm_list_head);
740 if (connect_hypervisor(path) < 0) {
741 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
742 return -1;
743 }
744
745 global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
746
747 global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
748 CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
749 if (global_vircpuinfo == NULL) {
750 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
751 goto error;
752 }
753 global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
754 RTE_CACHE_LINE_SIZE);
755 if (global_cpumaps == NULL) {
756 goto error;
757 }
758
759 if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
760 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
761 goto error;
762 }
763
764 global_n_host_cpus = (unsigned)info.cpus;
765
766 if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
767 RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
768 "maximum of %u. No cores over %u should be used.\n",
769 global_n_host_cpus, CHANNEL_CMDS_MAX_CPUS,
770 CHANNEL_CMDS_MAX_CPUS - 1);
771 global_n_host_cpus = CHANNEL_CMDS_MAX_CPUS;
772 }
773
774 return 0;
775 error:
776 disconnect_hypervisor();
777 return -1;
778 }
779
780 void
781 channel_manager_exit(void)
782 {
783 unsigned i;
784 uint64_t mask;
785 struct virtual_machine_info *vm_info;
786
787 LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
788
789 rte_spinlock_lock(&(vm_info->config_spinlock));
790
791 mask = vm_info->channel_mask;
792 ITERATIVE_BITMASK_CHECK_64(mask, i) {
793 remove_channel_from_monitor(vm_info->channels[i]);
794 close(vm_info->channels[i]->fd);
795 rte_free(vm_info->channels[i]);
796 }
797 rte_spinlock_unlock(&(vm_info->config_spinlock));
798
799 LIST_REMOVE(vm_info, vms_info);
800 rte_free(vm_info);
801 }
802
803 rte_free(global_cpumaps);
804 rte_free(global_vircpuinfo);
805 disconnect_hypervisor();
806 }