2 Copyright (C) 2010 - 2020 Proxmox Server Solutions GmbH
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU Affero General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU Affero General Public License for more details.
14 You should have received a copy of the GNU Affero General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>.
17 Author: Dietmar Maurer <dietmar@proxmox.com>
21 #define G_LOG_DOMAIN "status"
25 #endif /* HAVE_CONFIG_H */
32 #include <sys/syslog.h>
34 #include <rrd_client.h>
38 #include "cfs-utils.h"
43 #define KVSTORE_CPG_GROUP_NAME "pve_kvstore_v1"
46 KVSTORE_MESSAGE_UPDATE
= 1,
47 KVSTORE_MESSAGE_UPDATE_COMPLETE
= 2,
48 KVSTORE_MESSAGE_LOG
= 3,
51 static uint32_t vminfo_version_counter
;
79 static memdb_change_t memdb_change_array
[] = {
80 { .path
= "corosync.conf" },
81 { .path
= "corosync.conf.new" },
82 { .path
= "storage.cfg" },
83 { .path
= "user.cfg" },
84 { .path
= "domains.cfg" },
85 { .path
= "priv/shadow.cfg" },
86 { .path
= "priv/acme/plugins.cfg" },
87 { .path
= "priv/tfa.cfg" },
88 { .path
= "priv/token.cfg" },
89 { .path
= "priv/ipam.db" },
90 { .path
= "datacenter.cfg" },
91 { .path
= "vzdump.cron" },
92 { .path
= "jobs.cfg" },
93 { .path
= "ha/crm_commands" },
94 { .path
= "ha/manager_status" },
95 { .path
= "ha/resources.cfg" },
96 { .path
= "ha/groups.cfg" },
97 { .path
= "ha/fence.cfg" },
98 { .path
= "status.cfg" },
99 { .path
= "replication.cfg" },
100 { .path
= "ceph.conf" },
101 { .path
= "sdn/vnets.cfg" },
102 { .path
= "sdn/zones.cfg" },
103 { .path
= "sdn/controllers.cfg" },
104 { .path
= "sdn/subnets.cfg" },
105 { .path
= "sdn/ipams.cfg" },
106 { .path
= "sdn/dns.cfg" },
107 { .path
= "sdn/.running-config" },
108 { .path
= "virtual-guest/cpu-models.conf" },
109 { .path
= "firewall/cluster.fw" },
119 cfs_clinfo_t
*clinfo
;
120 uint32_t clinfo_version
;
123 uint32_t vmlist_version
;
130 GHashTable
*memdb_changes
;
132 clusterlog_t
*clusterlog
;
135 static cfs_status_t cfs_status
;
147 uint32_t cman_version
;
149 GHashTable
*nodes_byid
;
150 GHashTable
*nodes_byname
;
154 g_int32_hash (gconstpointer v
)
156 return *(const uint32_t *) v
;
160 g_int32_equal (gconstpointer v1
,
163 return *((const uint32_t*) v1
) == *((const uint32_t*) v2
);
166 static void vminfo_free(vminfo_t
*vminfo
)
168 g_return_if_fail(vminfo
!= NULL
);
170 if (vminfo
->nodename
)
171 g_free(vminfo
->nodename
);
177 static const char *vminfo_type_to_string(vminfo_t
*vminfo
)
179 if (vminfo
->vmtype
== VMTYPE_QEMU
) {
181 } else if (vminfo
->vmtype
== VMTYPE_OPENVZ
) {
182 // FIXME: remove openvz stuff for 7.x
184 } else if (vminfo
->vmtype
== VMTYPE_LXC
) {
191 static const char *vminfo_type_to_path_type(vminfo_t
*vminfo
)
193 if (vminfo
->vmtype
== VMTYPE_QEMU
) {
194 return "qemu-server"; // special case..
196 return vminfo_type_to_string(vminfo
);
200 int vminfo_to_path(vminfo_t
*vminfo
, GString
*path
)
202 g_return_val_if_fail(vminfo
!= NULL
, -1);
203 g_return_val_if_fail(path
!= NULL
, -1);
205 if (!vminfo
->nodename
)
208 const char *type
= vminfo_type_to_path_type(vminfo
);
209 g_string_printf(path
, "/nodes/%s/%s/%u.conf", vminfo
->nodename
, type
, vminfo
->vmid
);
214 void cfs_clnode_destroy(
215 cfs_clnode_t
*clnode
)
217 g_return_if_fail(clnode
!= NULL
);
220 g_hash_table_destroy(clnode
->kvhash
);
223 g_free(clnode
->name
);
228 cfs_clnode_t
*cfs_clnode_new(
233 g_return_val_if_fail(name
!= NULL
, NULL
);
235 cfs_clnode_t
*clnode
= g_new0(cfs_clnode_t
, 1);
239 clnode
->name
= g_strdup(name
);
240 clnode
->nodeid
= nodeid
;
241 clnode
->votes
= votes
;
246 gboolean
cfs_clinfo_destroy(
247 cfs_clinfo_t
*clinfo
)
249 g_return_val_if_fail(clinfo
!= NULL
, FALSE
);
251 if (clinfo
->cluster_name
)
252 g_free(clinfo
->cluster_name
);
254 if (clinfo
->nodes_byname
)
255 g_hash_table_destroy(clinfo
->nodes_byname
);
257 if (clinfo
->nodes_byid
)
258 g_hash_table_destroy(clinfo
->nodes_byid
);
265 cfs_clinfo_t
*cfs_clinfo_new(
266 const char *cluster_name
,
267 uint32_t cman_version
)
269 g_return_val_if_fail(cluster_name
!= NULL
, NULL
);
271 cfs_clinfo_t
*clinfo
= g_new0(cfs_clinfo_t
, 1);
275 clinfo
->cluster_name
= g_strdup(cluster_name
);
276 clinfo
->cman_version
= cman_version
;
278 if (!(clinfo
->nodes_byid
= g_hash_table_new_full(
279 g_int32_hash
, g_int32_equal
, NULL
,
280 (GDestroyNotify
)cfs_clnode_destroy
)))
283 if (!(clinfo
->nodes_byname
= g_hash_table_new(g_str_hash
, g_str_equal
)))
289 cfs_clinfo_destroy(clinfo
);
294 gboolean
cfs_clinfo_add_node(
295 cfs_clinfo_t
*clinfo
,
296 cfs_clnode_t
*clnode
)
298 g_return_val_if_fail(clinfo
!= NULL
, FALSE
);
299 g_return_val_if_fail(clnode
!= NULL
, FALSE
);
301 g_hash_table_replace(clinfo
->nodes_byid
, &clnode
->nodeid
, clnode
);
302 g_hash_table_replace(clinfo
->nodes_byname
, clnode
->name
, clnode
);
308 cfs_create_memberlist_msg(
311 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
313 g_mutex_lock (&mutex
);
315 g_string_append_printf(str
,"{\n");
319 cfs_clinfo_t
*clinfo
= cfs_status
.clinfo
;
321 if (clinfo
&& clinfo
->nodes_byid
)
322 nodecount
= g_hash_table_size(clinfo
->nodes_byid
);
325 g_string_append_printf(str
, "\"nodename\": \"%s\",\n", cfs
.nodename
);
326 g_string_append_printf(str
, "\"version\": %u,\n", cfs_status
.clinfo_version
);
328 g_string_append_printf(str
, "\"cluster\": { ");
329 g_string_append_printf(str
, "\"name\": \"%s\", \"version\": %d, "
330 "\"nodes\": %d, \"quorate\": %d ",
331 clinfo
->cluster_name
, clinfo
->cman_version
,
332 nodecount
, cfs_status
.quorate
);
334 g_string_append_printf(str
,"},\n");
335 g_string_append_printf(str
,"\"nodelist\": {\n");
337 GHashTable
*ht
= clinfo
->nodes_byid
;
341 g_hash_table_iter_init (&iter
, ht
);
344 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
345 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
346 if (i
) g_string_append_printf(str
, ",\n");
349 g_string_append_printf(str
, " \"%s\": { \"id\": %d, \"online\": %d",
350 node
->name
, node
->nodeid
, node
->online
);
353 char *ip
= (char *)g_hash_table_lookup(cfs_status
.iphash
, node
->name
);
355 g_string_append_printf(str
, ", \"ip\": \"%s\"", ip
);
358 g_string_append_printf(str
, "}");
361 g_string_append_printf(str
,"\n }\n");
363 g_string_append_printf(str
, "\"nodename\": \"%s\",\n", cfs
.nodename
);
364 g_string_append_printf(str
, "\"version\": %u\n", cfs_status
.clinfo_version
);
367 g_string_append_printf(str
,"}\n");
369 g_mutex_unlock (&mutex
);
375 kventry_free(kventry_t
*entry
)
377 g_return_if_fail(entry
!= NULL
);
385 kventry_hash_new(void)
387 return g_hash_table_new_full(g_str_hash
, g_str_equal
, NULL
,
388 (GDestroyNotify
)kventry_free
);
392 rrdentry_free(rrdentry_t
*entry
)
394 g_return_if_fail(entry
!= NULL
);
402 rrdentry_hash_new(void)
404 return g_hash_table_new_full(g_str_hash
, g_str_equal
, NULL
,
405 (GDestroyNotify
)rrdentry_free
);
409 cfs_cluster_log_dump(GString
*str
, const char *user
, guint max_entries
)
411 clusterlog_dump(cfs_status
.clusterlog
, str
, user
, max_entries
);
415 cfs_cluster_log(clog_entry_t
*entry
)
417 g_return_if_fail(entry
!= NULL
);
419 clusterlog_insert(cfs_status
.clusterlog
, entry
);
421 if (cfs_status
.kvstore
) {
423 iov
[0].iov_base
= (char *)entry
;
424 iov
[0].iov_len
= clog_entry_size(entry
);
426 if (dfsm_is_initialized(cfs_status
.kvstore
))
427 dfsm_send_message(cfs_status
.kvstore
, KVSTORE_MESSAGE_LOG
, iov
, 1);
431 void cfs_status_init(void)
433 g_mutex_lock (&mutex
);
435 cfs_status
.start_time
= time(NULL
);
437 cfs_status
.vmlist
= vmlist_hash_new();
439 cfs_status
.kvhash
= kventry_hash_new();
441 cfs_status
.rrdhash
= rrdentry_hash_new();
443 cfs_status
.iphash
= g_hash_table_new_full(g_str_hash
, g_str_equal
, g_free
, g_free
);
445 cfs_status
.memdb_changes
= g_hash_table_new(g_str_hash
, g_str_equal
);
447 for (int i
= 0; i
< G_N_ELEMENTS(memdb_change_array
); i
++) {
448 g_hash_table_replace(cfs_status
.memdb_changes
,
449 memdb_change_array
[i
].path
,
450 &memdb_change_array
[i
]);
453 cfs_status
.clusterlog
= clusterlog_new();
456 clusterlog_add(cfs_status
.clusterlog
, "root", "cluster", getpid(),
457 LOG_INFO
, "starting cluster log");
459 g_mutex_unlock (&mutex
);
462 void cfs_status_cleanup(void)
464 g_mutex_lock (&mutex
);
466 cfs_status
.clinfo_version
++;
468 if (cfs_status
.clinfo
) {
469 cfs_clinfo_destroy(cfs_status
.clinfo
);
470 cfs_status
.clinfo
= NULL
;
473 if (cfs_status
.vmlist
) {
474 g_hash_table_destroy(cfs_status
.vmlist
);
475 cfs_status
.vmlist
= NULL
;
478 if (cfs_status
.kvhash
) {
479 g_hash_table_destroy(cfs_status
.kvhash
);
480 cfs_status
.kvhash
= NULL
;
483 if (cfs_status
.rrdhash
) {
484 g_hash_table_destroy(cfs_status
.rrdhash
);
485 cfs_status
.rrdhash
= NULL
;
488 if (cfs_status
.iphash
) {
489 g_hash_table_destroy(cfs_status
.iphash
);
490 cfs_status
.iphash
= NULL
;
493 if (cfs_status
.clusterlog
)
494 clusterlog_destroy(cfs_status
.clusterlog
);
496 g_mutex_unlock (&mutex
);
499 void cfs_status_set_clinfo(
500 cfs_clinfo_t
*clinfo
)
502 g_return_if_fail(clinfo
!= NULL
);
504 g_mutex_lock (&mutex
);
506 cfs_status
.clinfo_version
++;
508 cfs_clinfo_t
*old
= cfs_status
.clinfo
;
510 cfs_status
.clinfo
= clinfo
;
512 cfs_message("update cluster info (cluster name %s, version = %d)",
513 clinfo
->cluster_name
, clinfo
->cman_version
);
516 if (old
&& old
->nodes_byid
&& clinfo
->nodes_byid
) {
518 GHashTable
*ht
= clinfo
->nodes_byid
;
522 g_hash_table_iter_init (&iter
, ht
);
524 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
525 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
526 cfs_clnode_t
*oldnode
;
527 if ((oldnode
= g_hash_table_lookup(old
->nodes_byid
, key
))) {
528 node
->online
= oldnode
->online
;
529 node
->kvhash
= oldnode
->kvhash
;
530 oldnode
->kvhash
= NULL
;
537 cfs_clinfo_destroy(old
);
540 g_mutex_unlock (&mutex
);
544 dump_kvstore_versions(
547 const char *nodename
)
549 g_return_if_fail(kvhash
!= NULL
);
550 g_return_if_fail(str
!= NULL
);
551 g_return_if_fail(nodename
!= NULL
);
553 GHashTable
*ht
= kvhash
;
557 g_string_append_printf(str
, "\"%s\": {\n", nodename
);
559 g_hash_table_iter_init (&iter
, ht
);
562 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
563 kventry_t
*entry
= (kventry_t
*)value
;
564 if (i
) g_string_append_printf(str
, ",\n");
566 g_string_append_printf(str
,"\"%s\": %u", entry
->key
, entry
->version
);
569 g_string_append_printf(str
, "}\n");
573 cfs_create_version_msg(GString
*str
)
575 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
577 g_mutex_lock (&mutex
);
579 g_string_append_printf(str
,"{\n");
581 g_string_append_printf(str
, "\"starttime\": %lu,\n", (unsigned long)cfs_status
.start_time
);
583 g_string_append_printf(str
, "\"clinfo\": %u,\n", cfs_status
.clinfo_version
);
585 g_string_append_printf(str
, "\"vmlist\": %u,\n", cfs_status
.vmlist_version
);
587 for (int i
= 0; i
< G_N_ELEMENTS(memdb_change_array
); i
++) {
588 g_string_append_printf(str
, "\"%s\": %u,\n",
589 memdb_change_array
[i
].path
,
590 memdb_change_array
[i
].version
);
593 g_string_append_printf(str
, "\"kvstore\": {\n");
595 dump_kvstore_versions(str
, cfs_status
.kvhash
, cfs
.nodename
);
597 cfs_clinfo_t
*clinfo
= cfs_status
.clinfo
;
599 if (clinfo
&& clinfo
->nodes_byid
) {
600 GHashTable
*ht
= clinfo
->nodes_byid
;
604 g_hash_table_iter_init (&iter
, ht
);
606 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
607 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
610 g_string_append_printf(str
, ",\n");
611 dump_kvstore_versions(str
, node
->kvhash
, node
->name
);
615 g_string_append_printf(str
,"}\n");
617 g_string_append_printf(str
,"}\n");
619 g_mutex_unlock (&mutex
);
625 vmlist_hash_new(void)
627 return g_hash_table_new_full(g_int_hash
, g_int_equal
, NULL
,
628 (GDestroyNotify
)vminfo_free
);
632 vmlist_hash_insert_vm(
636 const char *nodename
,
639 g_return_val_if_fail(vmlist
!= NULL
, FALSE
);
640 g_return_val_if_fail(nodename
!= NULL
, FALSE
);
641 g_return_val_if_fail(vmid
!= 0, FALSE
);
642 // FIXME: remove openvz stuff for 7.x
643 g_return_val_if_fail(vmtype
== VMTYPE_QEMU
|| vmtype
== VMTYPE_OPENVZ
||
644 vmtype
== VMTYPE_LXC
, FALSE
);
646 if (!replace
&& g_hash_table_lookup(vmlist
, &vmid
)) {
647 cfs_critical("detected duplicate VMID %d", vmid
);
651 vminfo_t
*vminfo
= g_new0(vminfo_t
, 1);
654 vminfo
->vmtype
= vmtype
;
655 vminfo
->nodename
= g_strdup(nodename
);
657 vminfo
->version
= ++vminfo_version_counter
;
659 g_hash_table_replace(vmlist
, &vminfo
->vmid
, vminfo
);
668 const char *nodename
)
670 g_return_if_fail(cfs_status
.vmlist
!= NULL
);
671 g_return_if_fail(nodename
!= NULL
);
672 g_return_if_fail(vmid
!= 0);
673 // FIXME: remove openvz stuff for 7.x
674 g_return_if_fail(vmtype
== VMTYPE_QEMU
|| vmtype
== VMTYPE_OPENVZ
||
675 vmtype
== VMTYPE_LXC
);
677 cfs_debug("vmlist_register_vm: %s/%u %d", nodename
, vmid
, vmtype
);
679 g_mutex_lock (&mutex
);
681 cfs_status
.vmlist_version
++;
683 vmlist_hash_insert_vm(cfs_status
.vmlist
, vmtype
, vmid
, nodename
, TRUE
);
685 g_mutex_unlock (&mutex
);
689 vmlist_different_vm_exists(
692 const char *nodename
)
694 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, FALSE
);
695 g_return_val_if_fail(vmid
!= 0, FALSE
);
697 gboolean res
= FALSE
;
699 g_mutex_lock (&mutex
);
702 if ((vminfo
= (vminfo_t
*)g_hash_table_lookup(cfs_status
.vmlist
, &vmid
))) {
703 if (!(vminfo
->vmtype
== vmtype
&& strcmp(vminfo
->nodename
, nodename
) == 0))
706 g_mutex_unlock (&mutex
);
715 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, FALSE
);
716 g_return_val_if_fail(vmid
!= 0, FALSE
);
718 g_mutex_lock (&mutex
);
720 gpointer res
= g_hash_table_lookup(cfs_status
.vmlist
, &vmid
);
722 g_mutex_unlock (&mutex
);
731 g_return_if_fail(cfs_status
.vmlist
!= NULL
);
732 g_return_if_fail(vmid
!= 0);
734 g_mutex_lock (&mutex
);
736 cfs_status
.vmlist_version
++;
738 g_hash_table_remove(cfs_status
.vmlist
, &vmid
);
740 g_mutex_unlock (&mutex
);
743 void cfs_status_set_vmlist(
746 g_return_if_fail(vmlist
!= NULL
);
748 g_mutex_lock (&mutex
);
750 cfs_status
.vmlist_version
++;
752 if (cfs_status
.vmlist
)
753 g_hash_table_destroy(cfs_status
.vmlist
);
755 cfs_status
.vmlist
= vmlist
;
757 g_mutex_unlock (&mutex
);
761 cfs_create_vmlist_msg(GString
*str
)
763 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, -EINVAL
);
764 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
766 g_mutex_lock (&mutex
);
768 g_string_append_printf(str
,"{\n");
770 GHashTable
*ht
= cfs_status
.vmlist
;
772 guint count
= g_hash_table_size(ht
);
775 g_string_append_printf(str
,"\"version\": %u\n", cfs_status
.vmlist_version
);
777 g_string_append_printf(str
,"\"version\": %u,\n", cfs_status
.vmlist_version
);
779 g_string_append_printf(str
,"\"ids\": {\n");
784 g_hash_table_iter_init (&iter
, ht
);
787 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
788 vminfo_t
*vminfo
= (vminfo_t
*)value
;
789 const char *type
= vminfo_type_to_string(vminfo
);
792 g_string_append_printf(str
, ",\n");
795 g_string_append_printf(str
,"\"%u\": { \"node\": \"%s\", \"type\": \"%s\", \"version\": %u }",
796 vminfo
->vmid
, vminfo
->nodename
, type
, vminfo
->version
);
799 g_string_append_printf(str
,"}\n");
801 g_string_append_printf(str
,"\n}\n");
803 g_mutex_unlock (&mutex
);
808 // checks if a config line starts with the given prop. if yes, writes a '\0'
809 // at the end of the value, and returns the pointer where the value starts
810 // note: line[line_end] needs to be guaranteed a null byte
812 _get_property_value_from_line(char *line
, size_t line_len
, const char *prop
, size_t prop_len
)
814 if (line_len
<= prop_len
+ 1) return NULL
;
816 if (line
[prop_len
] == ':' && memcmp(line
, prop
, prop_len
) == 0) { // found
817 char *v_start
= &line
[prop_len
+ 1];
818 char *v_end
= &line
[line_len
- 1];
820 // drop initial value whitespaces here already
821 while (v_start
< v_end
&& *v_start
&& isspace(*v_start
)) v_start
++;
823 if (!*v_start
) return NULL
;
825 while (v_end
> v_start
&& isspace(*v_end
)) v_end
--;
826 if (v_end
< &line
[line_len
- 1]) {
836 // checks the conf for lines starting with the given props and
837 // writes the pointers into the correct positions into the 'found' array
838 // afterwards, without initial whitespace(s), we only deal with the format
839 // restriction imposed by our perl VM config parser, main reference is
840 // PVE::QemuServer::parse_vm_config this allows to be very fast and still
842 // main restrictions used for our advantage is the properties match regex:
843 // ($line =~ m/^([a-z][a-z_]*\d*):\s*(.+?)\s*$/) from parse_vm_config
844 // currently we only look at the current configuration in place, i.e., *no*
845 // snapshot and *no* pending changes
847 // min..max is the char range of the first character of the given props,
848 // so that we can return early when checking the line
849 // note: conf must end with a newline
851 _get_property_values(char **found
, char *conf
, int conf_size
, const char **props
, uint8_t num_props
, char min
, char max
)
853 const char *const conf_end
= conf
+ conf_size
;
855 size_t remaining_size
= conf_size
;
858 if (conf_size
== 0) {
862 char *next_newline
= memchr(conf
, '\n', conf_size
);
863 if (next_newline
== NULL
) {
864 return; // valid property lines end with \n, but none in the config
866 *next_newline
= '\0';
868 while (line
!= NULL
) {
869 if (!line
[0]) goto next
;
871 // snapshot or pending section start, but nothing found yet -> not found
872 if (line
[0] == '[') return;
873 // continue early if line does not begin with the min/max char of the properties
874 if (line
[0] < min
|| line
[0] > max
) goto next
;
876 size_t line_len
= next_newline
- line
;
877 for (uint8_t i
= 0; i
< num_props
; i
++) {
878 char * value
= _get_property_value_from_line(line
, line_len
, props
[i
], strlen(props
[i
]));
880 count
+= (found
[i
] != NULL
) & 0x1; // count newly found lines
884 if (count
== num_props
) {
888 line
= next_newline
+ 1;
889 remaining_size
= conf_end
- line
;
890 next_newline
= memchr(line
, '\n', remaining_size
);
891 if (next_newline
== NULL
) {
892 return; // valid property lines end with \n, but none in the config
894 *next_newline
= '\0';
901 _g_str_append_kv_jsonescaped(GString
*str
, const char *k
, const char *v
)
903 g_string_append_printf(str
, "\"%s\": \"", k
);
906 if (*v
== '\\' || *v
== '"') {
907 g_string_append_c(str
, '\\');
909 g_string_append_c(str
, *v
);
912 g_string_append_c(str
, '"');
916 _print_found_properties(
928 _get_property_values(values
, conf
, size
, props
, num_props
, min
, max
);
931 for (uint8_t i
= 0; i
< num_props
; i
++) {
932 if (values
[i
] == NULL
) {
936 g_string_append_c(str
, ',');
939 g_string_append_printf(str
, ",\n");
943 g_string_append_printf(str
, "\"%u\":{", vmid
);
946 _g_str_append_kv_jsonescaped(str
, props
[i
], values
[i
]);
950 g_string_append_c(str
, '}');
957 cfs_create_guest_conf_properties_msg(GString
*str
, memdb_t
*memdb
, const char **props
, uint8_t num_props
, uint32_t vmid
)
959 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, -EINVAL
);
960 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
962 // Prelock &memdb->mutex in order to enable the usage of memdb_read_nolock
963 // to prevent Deadlocks as in #2553
964 g_mutex_lock (&memdb
->mutex
);
965 g_mutex_lock (&mutex
);
967 g_string_printf(str
, "{\n");
969 GHashTable
*ht
= cfs_status
.vmlist
;
972 GString
*path
= NULL
;
974 char **values
= calloc(num_props
, sizeof(char*));
975 char min
= 'z', max
= 'a';
977 for (uint8_t i
= 0; i
< num_props
; i
++) {
978 if (props
[i
][0] > max
) {
982 if (props
[i
][0] < min
) {
987 if (!g_hash_table_size(ht
)) {
991 if ((path
= g_string_sized_new(256)) == NULL
) {
997 vminfo_t
*vminfo
= (vminfo_t
*) g_hash_table_lookup(cfs_status
.vmlist
, &vmid
);
998 if (vminfo
== NULL
) goto enoent
;
1000 if (!vminfo_to_path(vminfo
, path
)) goto err
;
1002 // use memdb_read_nolock because lock is handled here
1003 int size
= memdb_read_nolock(memdb
, path
->str
, &tmp
);
1004 if (tmp
== NULL
) goto err
;
1006 // conf needs to be newline terminated
1007 if (((char *)tmp
)[size
- 1] != '\n') {
1008 gpointer
new = realloc(tmp
, size
+ 1);
1009 if (new == NULL
) goto err
;
1011 ((char *)tmp
)[size
++] = '\n';
1013 _print_found_properties(str
, tmp
, size
, props
, num_props
, vmid
, values
, min
, max
, 1);
1015 GHashTableIter iter
;
1016 g_hash_table_iter_init (&iter
, ht
);
1018 gpointer key
, value
;
1020 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
1021 vminfo_t
*vminfo
= (vminfo_t
*)value
;
1023 if (!vminfo_to_path(vminfo
, path
)) goto err
;
1025 g_free(tmp
); // no-op if already null
1027 // use memdb_read_nolock because lock is handled here
1028 int size
= memdb_read_nolock(memdb
, path
->str
, &tmp
);
1029 if (tmp
== NULL
) continue;
1031 // conf needs to be newline terminated
1032 if (((char *)tmp
)[size
- 1] != '\n') {
1033 gpointer
new = realloc(tmp
, size
+ 1);
1034 if (new == NULL
) continue;
1036 ((char *)tmp
)[size
++] = '\n';
1039 memset(values
, 0, sizeof(char*)*num_props
); // reset array
1040 first
= _print_found_properties(str
, tmp
, size
, props
, num_props
,
1041 vminfo
->vmid
, values
, min
, max
, first
);
1048 g_string_free(path
, TRUE
);
1050 g_string_append_printf(str
,"\n}\n");
1051 g_mutex_unlock (&mutex
);
1052 g_mutex_unlock (&memdb
->mutex
);
1063 cfs_create_guest_conf_property_msg(GString
*str
, memdb_t
*memdb
, const char *prop
, uint32_t vmid
)
1065 return cfs_create_guest_conf_properties_msg(str
, memdb
, &prop
, 1, vmid
);
1069 record_memdb_change(const char *path
)
1071 g_return_if_fail(cfs_status
.memdb_changes
!= 0);
1075 if ((ce
= (memdb_change_t
*)g_hash_table_lookup(cfs_status
.memdb_changes
, path
))) {
1081 record_memdb_reload(void)
1083 for (int i
= 0; i
< G_N_ELEMENTS(memdb_change_array
); i
++) {
1084 memdb_change_array
[i
].version
++;
1095 g_return_val_if_fail(kvhash
!= NULL
, FALSE
);
1096 g_return_val_if_fail(key
!= NULL
, FALSE
);
1097 g_return_val_if_fail(data
!= NULL
, FALSE
);
1101 g_hash_table_remove(kvhash
, key
);
1102 } else if ((entry
= (kventry_t
*)g_hash_table_lookup(kvhash
, key
))) {
1103 g_free(entry
->data
);
1104 entry
->data
= g_memdup(data
, len
);
1108 kventry_t
*entry
= g_new0(kventry_t
, 1);
1110 entry
->key
= g_strdup(key
);
1111 entry
->data
= g_memdup(data
, len
);
1114 g_hash_table_replace(kvhash
, entry
->key
, entry
);
1120 static const char *rrd_def_node
[] = {
1121 "DS:loadavg:GAUGE:120:0:U",
1122 "DS:maxcpu:GAUGE:120:0:U",
1123 "DS:cpu:GAUGE:120:0:U",
1124 "DS:iowait:GAUGE:120:0:U",
1125 "DS:memtotal:GAUGE:120:0:U",
1126 "DS:memused:GAUGE:120:0:U",
1127 "DS:swaptotal:GAUGE:120:0:U",
1128 "DS:swapused:GAUGE:120:0:U",
1129 "DS:roottotal:GAUGE:120:0:U",
1130 "DS:rootused:GAUGE:120:0:U",
1131 "DS:netin:DERIVE:120:0:U",
1132 "DS:netout:DERIVE:120:0:U",
1134 "RRA:AVERAGE:0.5:1:70", // 1 min avg - one hour
1135 "RRA:AVERAGE:0.5:30:70", // 30 min avg - one day
1136 "RRA:AVERAGE:0.5:180:70", // 3 hour avg - one week
1137 "RRA:AVERAGE:0.5:720:70", // 12 hour avg - one month
1138 "RRA:AVERAGE:0.5:10080:70", // 7 day avg - ony year
1140 "RRA:MAX:0.5:1:70", // 1 min max - one hour
1141 "RRA:MAX:0.5:30:70", // 30 min max - one day
1142 "RRA:MAX:0.5:180:70", // 3 hour max - one week
1143 "RRA:MAX:0.5:720:70", // 12 hour max - one month
1144 "RRA:MAX:0.5:10080:70", // 7 day max - ony year
1148 static const char *rrd_def_vm
[] = {
1149 "DS:maxcpu:GAUGE:120:0:U",
1150 "DS:cpu:GAUGE:120:0:U",
1151 "DS:maxmem:GAUGE:120:0:U",
1152 "DS:mem:GAUGE:120:0:U",
1153 "DS:maxdisk:GAUGE:120:0:U",
1154 "DS:disk:GAUGE:120:0:U",
1155 "DS:netin:DERIVE:120:0:U",
1156 "DS:netout:DERIVE:120:0:U",
1157 "DS:diskread:DERIVE:120:0:U",
1158 "DS:diskwrite:DERIVE:120:0:U",
1160 "RRA:AVERAGE:0.5:1:70", // 1 min avg - one hour
1161 "RRA:AVERAGE:0.5:30:70", // 30 min avg - one day
1162 "RRA:AVERAGE:0.5:180:70", // 3 hour avg - one week
1163 "RRA:AVERAGE:0.5:720:70", // 12 hour avg - one month
1164 "RRA:AVERAGE:0.5:10080:70", // 7 day avg - ony year
1166 "RRA:MAX:0.5:1:70", // 1 min max - one hour
1167 "RRA:MAX:0.5:30:70", // 30 min max - one day
1168 "RRA:MAX:0.5:180:70", // 3 hour max - one week
1169 "RRA:MAX:0.5:720:70", // 12 hour max - one month
1170 "RRA:MAX:0.5:10080:70", // 7 day max - ony year
1174 static const char *rrd_def_storage
[] = {
1175 "DS:total:GAUGE:120:0:U",
1176 "DS:used:GAUGE:120:0:U",
1178 "RRA:AVERAGE:0.5:1:70", // 1 min avg - one hour
1179 "RRA:AVERAGE:0.5:30:70", // 30 min avg - one day
1180 "RRA:AVERAGE:0.5:180:70", // 3 hour avg - one week
1181 "RRA:AVERAGE:0.5:720:70", // 12 hour avg - one month
1182 "RRA:AVERAGE:0.5:10080:70", // 7 day avg - ony year
1184 "RRA:MAX:0.5:1:70", // 1 min max - one hour
1185 "RRA:MAX:0.5:30:70", // 30 min max - one day
1186 "RRA:MAX:0.5:180:70", // 3 hour max - one week
1187 "RRA:MAX:0.5:720:70", // 12 hour max - one month
1188 "RRA:MAX:0.5:10080:70", // 7 day max - ony year
1192 #define RRDDIR "/var/lib/rrdcached/db"
1196 const char *filename
,
1198 const char *rrddef
[])
1200 /* start at day boundary */
1203 struct tm
*ltm
= localtime(&ctime
);
1209 if (rrd_create_r(filename
, 60, timelocal(ltm
), argcount
, rrddef
)) {
1210 cfs_message("RRD create error %s: %s", filename
, rrd_get_error());
1214 static inline const char *
1220 while (*data
&& found
< count
) {
1233 g_return_if_fail(key
!= NULL
);
1234 g_return_if_fail(data
!= NULL
);
1235 g_return_if_fail(len
> 0);
1236 g_return_if_fail(len
< 4096);
1238 static const char *rrdcsock
= "unix:/var/run/rrdcached.sock";
1241 if (rrdc_connect(rrdcsock
) != 0)
1244 char *filename
= NULL
;
1248 if (strncmp(key
, "pve2-node/", 10) == 0) {
1249 const char *node
= key
+ 10;
1253 if (strchr(node
, '/') != NULL
)
1256 if (strlen(node
) < 1)
1259 filename
= g_strdup_printf(RRDDIR
"/%s", key
);
1261 if (!g_file_test(filename
, G_FILE_TEST_EXISTS
)) {
1263 mkdir(RRDDIR
"/pve2-node", 0755);
1264 int argcount
= sizeof(rrd_def_node
)/sizeof(void*) - 1;
1265 create_rrd_file(filename
, argcount
, rrd_def_node
);
1268 } else if ((strncmp(key
, "pve2-vm/", 8) == 0) ||
1269 (strncmp(key
, "pve2.3-vm/", 10) == 0)) {
1272 if (strncmp(key
, "pve2-vm/", 8) == 0) {
1280 if (strchr(vmid
, '/') != NULL
)
1283 if (strlen(vmid
) < 1)
1286 filename
= g_strdup_printf(RRDDIR
"/%s/%s", "pve2-vm", vmid
);
1288 if (!g_file_test(filename
, G_FILE_TEST_EXISTS
)) {
1290 mkdir(RRDDIR
"/pve2-vm", 0755);
1291 int argcount
= sizeof(rrd_def_vm
)/sizeof(void*) - 1;
1292 create_rrd_file(filename
, argcount
, rrd_def_vm
);
1295 } else if (strncmp(key
, "pve2-storage/", 13) == 0) {
1296 const char *node
= key
+ 13;
1298 const char *storage
= node
;
1299 while (*storage
&& *storage
!= '/')
1302 if (*storage
!= '/' || ((storage
- node
) < 1))
1307 if (strchr(storage
, '/') != NULL
)
1310 if (strlen(storage
) < 1)
1313 filename
= g_strdup_printf(RRDDIR
"/%s", key
);
1315 if (!g_file_test(filename
, G_FILE_TEST_EXISTS
)) {
1317 mkdir(RRDDIR
"/pve2-storage", 0755);
1319 char *dir
= g_path_get_dirname(filename
);
1323 int argcount
= sizeof(rrd_def_storage
)/sizeof(void*) - 1;
1324 create_rrd_file(filename
, argcount
, rrd_def_storage
);
1331 const char *dp
= skip
? rrd_skip_data(data
, skip
) : data
;
1333 const char *update_args
[] = { dp
, NULL
};
1337 if ((status
= rrdc_update(filename
, 1, update_args
)) != 0) {
1338 cfs_message("RRDC update error %s: %d", filename
, status
);
1341 if (rrd_update_r(filename
, NULL
, 1, update_args
) != 0) {
1342 cfs_message("RRD update error %s: %s", filename
, rrd_get_error());
1348 if (rrd_update_r(filename
, NULL
, 1, update_args
) != 0) {
1349 cfs_message("RRD update error %s: %s", filename
, rrd_get_error());
1360 cfs_critical("RRD update error: unknown/wrong key %s", key
);
1370 rrdentry_t
*entry
= (rrdentry_t
*)value
;
1371 uint32_t ctime
= GPOINTER_TO_UINT(user_data
);
1373 int diff
= ctime
- entry
->time
;
1375 /* remove everything older than 5 minutes */
1378 return (diff
> expire
) ? TRUE
: FALSE
;
1381 static char *rrd_dump_buf
= NULL
;
1382 static time_t rrd_dump_last
= 0;
1385 cfs_rrd_dump(GString
*str
)
1389 g_mutex_lock (&mutex
);
1392 if (rrd_dump_buf
&& (ctime
- rrd_dump_last
) < 2) {
1393 g_string_assign(str
, rrd_dump_buf
);
1394 g_mutex_unlock (&mutex
);
1398 /* remove old data */
1399 g_hash_table_foreach_remove(cfs_status
.rrdhash
, rrd_entry_is_old
,
1400 GUINT_TO_POINTER(ctime
));
1402 g_string_set_size(str
, 0);
1404 GHashTableIter iter
;
1405 gpointer key
, value
;
1407 g_hash_table_iter_init (&iter
, cfs_status
.rrdhash
);
1409 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
1410 rrdentry_t
*entry
= (rrdentry_t
*)value
;
1411 g_string_append(str
, key
);
1412 g_string_append(str
, ":");
1413 g_string_append(str
, entry
->data
);
1414 g_string_append(str
, "\n");
1417 g_string_append_c(str
, 0); // never return undef
1419 rrd_dump_last
= ctime
;
1421 g_free(rrd_dump_buf
);
1422 rrd_dump_buf
= g_strdup(str
->str
);
1424 g_mutex_unlock (&mutex
);
1430 const char *nodename
,
1434 g_return_val_if_fail(iphash
!= NULL
, FALSE
);
1435 g_return_val_if_fail(nodename
!= NULL
, FALSE
);
1436 g_return_val_if_fail(ip
!= NULL
, FALSE
);
1437 g_return_val_if_fail(len
> 0, FALSE
);
1438 g_return_val_if_fail(len
< 256, FALSE
);
1439 g_return_val_if_fail(ip
[len
-1] == 0, FALSE
);
1441 char *oldip
= (char *)g_hash_table_lookup(iphash
, nodename
);
1443 if (!oldip
|| (strcmp(oldip
, ip
) != 0)) {
1444 cfs_status
.clinfo_version
++;
1445 g_hash_table_replace(iphash
, g_strdup(nodename
), g_strdup(ip
));
1453 GHashTable
*rrdhash
,
1458 g_return_val_if_fail(rrdhash
!= NULL
, FALSE
);
1459 g_return_val_if_fail(key
!= NULL
, FALSE
);
1460 g_return_val_if_fail(data
!= NULL
, FALSE
);
1461 g_return_val_if_fail(len
> 0, FALSE
);
1462 g_return_val_if_fail(len
< 4096, FALSE
);
1463 g_return_val_if_fail(data
[len
-1] == 0, FALSE
);
1466 if ((entry
= (rrdentry_t
*)g_hash_table_lookup(rrdhash
, key
))) {
1467 g_free(entry
->data
);
1468 entry
->data
= g_memdup(data
, len
);
1470 entry
->time
= time(NULL
);
1472 rrdentry_t
*entry
= g_new0(rrdentry_t
, 1);
1474 entry
->key
= g_strdup(key
);
1475 entry
->data
= g_memdup(data
, len
);
1477 entry
->time
= time(NULL
);
1479 g_hash_table_replace(rrdhash
, entry
->key
, entry
);
1482 update_rrd_data(key
, data
, len
);
1488 kvstore_send_update_message(
1494 if (!dfsm_is_initialized(dfsm
))
1497 struct iovec iov
[2];
1500 g_strlcpy(name
, key
, sizeof(name
));
1502 iov
[0].iov_base
= &name
;
1503 iov
[0].iov_len
= sizeof(name
);
1505 iov
[1].iov_base
= (char *)data
;
1506 iov
[1].iov_len
= len
;
1508 if (dfsm_send_message(dfsm
, KVSTORE_MESSAGE_UPDATE
, iov
, 2) == CS_OK
)
1514 static clog_entry_t
*
1515 kvstore_parse_log_message(
1519 g_return_val_if_fail(msg
!= NULL
, NULL
);
1521 if (msg_len
< sizeof(clog_entry_t
)) {
1522 cfs_critical("received short log message (%zu < %zu)", msg_len
, sizeof(clog_entry_t
));
1526 clog_entry_t
*entry
= (clog_entry_t
*)msg
;
1528 uint32_t size
= sizeof(clog_entry_t
) + entry
->node_len
+
1529 entry
->ident_len
+ entry
->tag_len
+ entry
->msg_len
;
1531 if (msg_len
!= size
) {
1532 cfs_critical("received log message with wrong size (%zu != %u)", msg_len
, size
);
1536 char *msgptr
= entry
->data
;
1538 if (*((char *)msgptr
+ entry
->node_len
- 1)) {
1539 cfs_critical("unterminated string in log message");
1542 msgptr
+= entry
->node_len
;
1544 if (*((char *)msgptr
+ entry
->ident_len
- 1)) {
1545 cfs_critical("unterminated string in log message");
1548 msgptr
+= entry
->ident_len
;
1550 if (*((char *)msgptr
+ entry
->tag_len
- 1)) {
1551 cfs_critical("unterminated string in log message");
1554 msgptr
+= entry
->tag_len
;
1556 if (*((char *)msgptr
+ entry
->msg_len
- 1)) {
1557 cfs_critical("unterminated string in log message");
1565 kvstore_parse_update_message(
1569 gconstpointer
*data
,
1572 g_return_val_if_fail(msg
!= NULL
, FALSE
);
1573 g_return_val_if_fail(key
!= NULL
, FALSE
);
1574 g_return_val_if_fail(data
!= NULL
, FALSE
);
1575 g_return_val_if_fail(len
!= NULL
, FALSE
);
1577 if (msg_len
< 256) {
1578 cfs_critical("received short kvstore message (%zu < 256)", msg_len
);
1582 /* test if key is null terminated */
1584 for (i
= 0; i
< 256; i
++)
1585 if (((char *)msg
)[i
] == 0)
1592 *len
= msg_len
- 256;
1594 *data
= (char *) msg
+ 256;
1600 cfs_create_status_msg(
1602 const char *nodename
,
1605 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
1606 g_return_val_if_fail(key
!= NULL
, -EINVAL
);
1610 GHashTable
*kvhash
= NULL
;
1612 g_mutex_lock (&mutex
);
1614 if (!nodename
|| !nodename
[0] || !strcmp(nodename
, cfs
.nodename
)) {
1615 kvhash
= cfs_status
.kvhash
;
1616 } else if (cfs_status
.clinfo
&& cfs_status
.clinfo
->nodes_byname
) {
1617 cfs_clnode_t
*clnode
;
1618 if ((clnode
= g_hash_table_lookup(cfs_status
.clinfo
->nodes_byname
, nodename
)))
1619 kvhash
= clnode
->kvhash
;
1623 if (kvhash
&& (entry
= (kventry_t
*)g_hash_table_lookup(kvhash
, key
))) {
1624 g_string_append_len(str
, entry
->data
, entry
->len
);
1628 g_mutex_unlock (&mutex
);
1639 g_return_val_if_fail(key
!= NULL
, FALSE
);
1640 g_return_val_if_fail(data
!= NULL
, FALSE
);
1641 g_return_val_if_fail(cfs_status
.kvhash
!= NULL
, FALSE
);
1643 if (len
> CFS_MAX_STATUS_SIZE
)
1646 g_mutex_lock (&mutex
);
1650 if (strncmp(key
, "rrd/", 4) == 0) {
1651 res
= rrdentry_hash_set(cfs_status
.rrdhash
, key
+ 4, data
, len
);
1652 } else if (!strcmp(key
, "nodeip")) {
1653 res
= nodeip_hash_set(cfs_status
.iphash
, cfs
.nodename
, data
, len
);
1655 res
= kventry_hash_set(cfs_status
.kvhash
, key
, data
, len
);
1657 g_mutex_unlock (&mutex
);
1659 if (cfs_status
.kvstore
)
1660 kvstore_send_update_message(cfs_status
.kvstore
, key
, data
, len
);
1662 return res
? 0 : -ENOMEM
;
1666 cfs_kvstore_node_set(
1672 g_return_val_if_fail(nodeid
!= 0, FALSE
);
1673 g_return_val_if_fail(key
!= NULL
, FALSE
);
1674 g_return_val_if_fail(data
!= NULL
, FALSE
);
1676 g_mutex_lock (&mutex
);
1678 if (!cfs_status
.clinfo
|| !cfs_status
.clinfo
->nodes_byid
)
1679 goto ret
; /* ignore */
1681 cfs_clnode_t
*clnode
= g_hash_table_lookup(cfs_status
.clinfo
->nodes_byid
, &nodeid
);
1683 goto ret
; /* ignore */
1685 cfs_debug("got node %d status update %s", nodeid
, key
);
1687 if (strncmp(key
, "rrd/", 4) == 0) {
1688 rrdentry_hash_set(cfs_status
.rrdhash
, key
+ 4, data
, len
);
1689 } else if (!strcmp(key
, "nodeip")) {
1690 nodeip_hash_set(cfs_status
.iphash
, clnode
->name
, data
, len
);
1692 if (!clnode
->kvhash
) {
1693 if (!(clnode
->kvhash
= kventry_hash_new())) {
1694 goto ret
; /*ignore */
1698 kventry_hash_set(clnode
->kvhash
, key
, data
, len
);
1702 g_mutex_unlock (&mutex
);
1708 cfs_kvstore_sync(void)
1710 g_return_val_if_fail(cfs_status
.kvhash
!= NULL
, FALSE
);
1711 g_return_val_if_fail(cfs_status
.kvstore
!= NULL
, FALSE
);
1713 gboolean res
= TRUE
;
1715 g_mutex_lock (&mutex
);
1717 GHashTable
*ht
= cfs_status
.kvhash
;
1718 GHashTableIter iter
;
1719 gpointer key
, value
;
1721 g_hash_table_iter_init (&iter
, ht
);
1723 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
1724 kventry_t
*entry
= (kventry_t
*)value
;
1725 kvstore_send_update_message(cfs_status
.kvstore
, entry
->key
, entry
->data
, entry
->len
);
1728 g_mutex_unlock (&mutex
);
1745 g_return_val_if_fail(dfsm
!= NULL
, -1);
1746 g_return_val_if_fail(msg
!= NULL
, -1);
1747 g_return_val_if_fail(res_ptr
!= NULL
, -1);
1749 /* ignore message for ourself */
1750 if (dfsm_nodeid_is_local(dfsm
, nodeid
, pid
))
1753 if (msg_type
== KVSTORE_MESSAGE_UPDATE
) {
1757 if (kvstore_parse_update_message(msg
, msg_len
, &key
, &data
, &len
)) {
1758 cfs_kvstore_node_set(nodeid
, key
, data
, len
);
1760 cfs_critical("cant parse update message");
1762 } else if (msg_type
== KVSTORE_MESSAGE_LOG
) {
1763 cfs_message("received log"); // fixme: remove
1764 const clog_entry_t
*entry
;
1765 if ((entry
= kvstore_parse_log_message(msg
, msg_len
))) {
1766 clusterlog_insert(cfs_status
.clusterlog
, entry
);
1768 cfs_critical("cant parse log message");
1771 cfs_critical("received unknown message type %d\n", msg_type
);
1788 const struct cpg_address
*member_list
,
1789 size_t member_list_entries
)
1791 g_return_if_fail(dfsm
!= NULL
);
1792 g_return_if_fail(member_list
!= NULL
);
1794 cfs_debug("enter %s", __func__
);
1796 g_mutex_lock (&mutex
);
1798 cfs_clinfo_t
*clinfo
= cfs_status
.clinfo
;
1800 if (clinfo
&& clinfo
->nodes_byid
) {
1802 GHashTable
*ht
= clinfo
->nodes_byid
;
1803 GHashTableIter iter
;
1804 gpointer key
, value
;
1806 g_hash_table_iter_init (&iter
, ht
);
1808 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
1809 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
1810 node
->online
= FALSE
;
1813 for (int i
= 0; i
< member_list_entries
; i
++) {
1815 if ((node
= g_hash_table_lookup(clinfo
->nodes_byid
, &member_list
[i
].nodeid
))) {
1816 node
->online
= TRUE
;
1820 cfs_status
.clinfo_version
++;
1823 g_mutex_unlock (&mutex
);
1830 unsigned int *res_len
)
1832 g_return_val_if_fail(dfsm
!= NULL
, NULL
);
1834 gpointer msg
= clusterlog_get_state(cfs_status
.clusterlog
, res_len
);
1840 dfsm_process_update(
1843 dfsm_sync_info_t
*syncinfo
,
1849 cfs_critical("%s: received unexpected update message", __func__
);
1855 dfsm_process_state_update(
1858 dfsm_sync_info_t
*syncinfo
)
1860 g_return_val_if_fail(dfsm
!= NULL
, -1);
1861 g_return_val_if_fail(syncinfo
!= NULL
, -1);
1863 clog_base_t
*clog
[syncinfo
->node_count
];
1865 int local_index
= -1;
1866 for (int i
= 0; i
< syncinfo
->node_count
; i
++) {
1867 dfsm_node_info_t
*ni
= &syncinfo
->nodes
[i
];
1870 if (syncinfo
->local
== ni
)
1873 clog_base_t
*base
= (clog_base_t
*)ni
->state
;
1874 if (ni
->state_len
> 8 && ni
->state_len
== clog_size(base
)) {
1875 clog
[i
] = ni
->state
;
1877 cfs_critical("received log with wrong size %u", ni
->state_len
);
1882 if (!clusterlog_merge(cfs_status
.clusterlog
, clog
, syncinfo
->node_count
, local_index
)) {
1883 cfs_critical("unable to merge log files");
1895 dfsm_sync_info_t
*syncinfo
)
1897 g_return_val_if_fail(dfsm
!= NULL
, -1);
1898 g_return_val_if_fail(syncinfo
!= NULL
, -1);
1904 dfsm_synced(dfsm_t
*dfsm
)
1906 g_return_if_fail(dfsm
!= NULL
);
1908 char *ip
= (char *)g_hash_table_lookup(cfs_status
.iphash
, cfs
.nodename
);
1912 cfs_status_set("nodeip", ip
, strlen(ip
) + 1);
1919 dfsm_sync_info_t
*syncinfo
)
1924 static dfsm_callbacks_t kvstore_dfsm_callbacks
= {
1925 .dfsm_deliver_fn
= dfsm_deliver
,
1926 .dfsm_confchg_fn
= dfsm_confchg
,
1928 .dfsm_get_state_fn
= dfsm_get_state
,
1929 .dfsm_process_state_update_fn
= dfsm_process_state_update
,
1930 .dfsm_process_update_fn
= dfsm_process_update
,
1931 .dfsm_commit_fn
= dfsm_commit
,
1932 .dfsm_cleanup_fn
= dfsm_cleanup
,
1933 .dfsm_synced_fn
= dfsm_synced
,
1937 cfs_status_dfsm_new(void)
1939 g_mutex_lock (&mutex
);
1941 cfs_status
.kvstore
= dfsm_new(NULL
, KVSTORE_CPG_GROUP_NAME
, G_LOG_DOMAIN
,
1942 0, &kvstore_dfsm_callbacks
);
1943 g_mutex_unlock (&mutex
);
1945 return cfs_status
.kvstore
;
1949 cfs_is_quorate(void)
1951 g_mutex_lock (&mutex
);
1952 gboolean res
= cfs_status
.quorate
;
1953 g_mutex_unlock (&mutex
);
1963 g_mutex_lock (&mutex
);
1965 uint32_t prev_quorate
= cfs_status
.quorate
;
1966 cfs_status
.quorate
= quorate
;
1968 if (!prev_quorate
&& cfs_status
.quorate
) {
1970 cfs_message("node has quorum");
1973 if (prev_quorate
&& !cfs_status
.quorate
) {
1975 cfs_message("node lost quorum");
1978 g_mutex_unlock (&mutex
);