2 Copyright (C) 2010 Proxmox Server Solutions GmbH
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU Affero General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU Affero General Public License for more details.
14 You should have received a copy of the GNU Affero General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>.
17 Author: Dietmar Maurer <dietmar@proxmox.com>
21 #define G_LOG_DOMAIN "status"
25 #endif /* HAVE_CONFIG_H */
32 #include <sys/syslog.h>
34 #include <rrd_client.h>
38 #include "cfs-utils.h"
43 #define KVSTORE_CPG_GROUP_NAME "pve_kvstore_v1"
46 KVSTORE_MESSAGE_UPDATE
= 1,
47 KVSTORE_MESSAGE_UPDATE_COMPLETE
= 2,
48 KVSTORE_MESSAGE_LOG
= 3,
51 static uint32_t vminfo_version_counter
;
79 static memdb_change_t memdb_change_array
[] = {
80 { .path
= "corosync.conf" },
81 { .path
= "corosync.conf.new" },
82 { .path
= "storage.cfg" },
83 { .path
= "user.cfg" },
84 { .path
= "domains.cfg" },
85 { .path
= "priv/shadow.cfg" },
86 { .path
= "priv/acme/plugins.cfg" },
87 { .path
= "priv/tfa.cfg" },
88 { .path
= "priv/token.cfg" },
89 { .path
= "datacenter.cfg" },
90 { .path
= "vzdump.cron" },
91 { .path
= "ha/crm_commands" },
92 { .path
= "ha/manager_status" },
93 { .path
= "ha/resources.cfg" },
94 { .path
= "ha/groups.cfg" },
95 { .path
= "ha/fence.cfg" },
96 { .path
= "status.cfg" },
97 { .path
= "replication.cfg" },
98 { .path
= "ceph.conf" },
99 { .path
= "sdn/vnets.cfg" },
100 { .path
= "sdn/zones.cfg" },
101 { .path
= "sdn/controllers.cfg" },
102 { .path
= "virtual-guest/cpu-models.conf" },
112 cfs_clinfo_t
*clinfo
;
113 uint32_t clinfo_version
;
116 uint32_t vmlist_version
;
123 GHashTable
*memdb_changes
;
125 clusterlog_t
*clusterlog
;
128 static cfs_status_t cfs_status
;
140 uint32_t cman_version
;
142 GHashTable
*nodes_byid
;
143 GHashTable
*nodes_byname
;
147 g_int32_hash (gconstpointer v
)
149 return *(const uint32_t *) v
;
153 g_int32_equal (gconstpointer v1
,
156 return *((const uint32_t*) v1
) == *((const uint32_t*) v2
);
159 static void vminfo_free(vminfo_t
*vminfo
)
161 g_return_if_fail(vminfo
!= NULL
);
163 if (vminfo
->nodename
)
164 g_free(vminfo
->nodename
);
170 static const char *vminfo_type_to_string(vminfo_t
*vminfo
)
172 if (vminfo
->vmtype
== VMTYPE_QEMU
) {
174 } else if (vminfo
->vmtype
== VMTYPE_OPENVZ
) {
176 } else if (vminfo
->vmtype
== VMTYPE_LXC
) {
183 static const char *vminfo_type_to_path_type(vminfo_t
*vminfo
)
185 if (vminfo
->vmtype
== VMTYPE_QEMU
) {
186 return "qemu-server"; // special case..
188 return vminfo_type_to_string(vminfo
);
192 int vminfo_to_path(vminfo_t
*vminfo
, GString
*path
)
194 g_return_val_if_fail(vminfo
!= NULL
, -1);
195 g_return_val_if_fail(path
!= NULL
, -1);
197 if (!vminfo
->nodename
)
200 const char *type
= vminfo_type_to_path_type(vminfo
);
201 g_string_printf(path
, "/nodes/%s/%s/%u.conf", vminfo
->nodename
, type
, vminfo
->vmid
);
206 void cfs_clnode_destroy(
207 cfs_clnode_t
*clnode
)
209 g_return_if_fail(clnode
!= NULL
);
212 g_hash_table_destroy(clnode
->kvhash
);
215 g_free(clnode
->name
);
220 cfs_clnode_t
*cfs_clnode_new(
225 g_return_val_if_fail(name
!= NULL
, NULL
);
227 cfs_clnode_t
*clnode
= g_new0(cfs_clnode_t
, 1);
231 clnode
->name
= g_strdup(name
);
232 clnode
->nodeid
= nodeid
;
233 clnode
->votes
= votes
;
238 gboolean
cfs_clinfo_destroy(
239 cfs_clinfo_t
*clinfo
)
241 g_return_val_if_fail(clinfo
!= NULL
, FALSE
);
243 if (clinfo
->cluster_name
)
244 g_free(clinfo
->cluster_name
);
246 if (clinfo
->nodes_byname
)
247 g_hash_table_destroy(clinfo
->nodes_byname
);
249 if (clinfo
->nodes_byid
)
250 g_hash_table_destroy(clinfo
->nodes_byid
);
257 cfs_clinfo_t
*cfs_clinfo_new(
258 const char *cluster_name
,
259 uint32_t cman_version
)
261 g_return_val_if_fail(cluster_name
!= NULL
, NULL
);
263 cfs_clinfo_t
*clinfo
= g_new0(cfs_clinfo_t
, 1);
267 clinfo
->cluster_name
= g_strdup(cluster_name
);
268 clinfo
->cman_version
= cman_version
;
270 if (!(clinfo
->nodes_byid
= g_hash_table_new_full(
271 g_int32_hash
, g_int32_equal
, NULL
,
272 (GDestroyNotify
)cfs_clnode_destroy
)))
275 if (!(clinfo
->nodes_byname
= g_hash_table_new(g_str_hash
, g_str_equal
)))
281 cfs_clinfo_destroy(clinfo
);
286 gboolean
cfs_clinfo_add_node(
287 cfs_clinfo_t
*clinfo
,
288 cfs_clnode_t
*clnode
)
290 g_return_val_if_fail(clinfo
!= NULL
, FALSE
);
291 g_return_val_if_fail(clnode
!= NULL
, FALSE
);
293 g_hash_table_replace(clinfo
->nodes_byid
, &clnode
->nodeid
, clnode
);
294 g_hash_table_replace(clinfo
->nodes_byname
, clnode
->name
, clnode
);
300 cfs_create_memberlist_msg(
303 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
305 g_mutex_lock (&mutex
);
307 g_string_append_printf(str
,"{\n");
311 cfs_clinfo_t
*clinfo
= cfs_status
.clinfo
;
313 if (clinfo
&& clinfo
->nodes_byid
)
314 nodecount
= g_hash_table_size(clinfo
->nodes_byid
);
317 g_string_append_printf(str
, "\"nodename\": \"%s\",\n", cfs
.nodename
);
318 g_string_append_printf(str
, "\"version\": %u,\n", cfs_status
.clinfo_version
);
320 g_string_append_printf(str
, "\"cluster\": { ");
321 g_string_append_printf(str
, "\"name\": \"%s\", \"version\": %d, "
322 "\"nodes\": %d, \"quorate\": %d ",
323 clinfo
->cluster_name
, clinfo
->cman_version
,
324 nodecount
, cfs_status
.quorate
);
326 g_string_append_printf(str
,"},\n");
327 g_string_append_printf(str
,"\"nodelist\": {\n");
329 GHashTable
*ht
= clinfo
->nodes_byid
;
333 g_hash_table_iter_init (&iter
, ht
);
336 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
337 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
338 if (i
) g_string_append_printf(str
, ",\n");
341 g_string_append_printf(str
, " \"%s\": { \"id\": %d, \"online\": %d",
342 node
->name
, node
->nodeid
, node
->online
);
345 char *ip
= (char *)g_hash_table_lookup(cfs_status
.iphash
, node
->name
);
347 g_string_append_printf(str
, ", \"ip\": \"%s\"", ip
);
350 g_string_append_printf(str
, "}");
353 g_string_append_printf(str
,"\n }\n");
355 g_string_append_printf(str
, "\"nodename\": \"%s\",\n", cfs
.nodename
);
356 g_string_append_printf(str
, "\"version\": %u\n", cfs_status
.clinfo_version
);
359 g_string_append_printf(str
,"}\n");
361 g_mutex_unlock (&mutex
);
367 kventry_free(kventry_t
*entry
)
369 g_return_if_fail(entry
!= NULL
);
377 kventry_hash_new(void)
379 return g_hash_table_new_full(g_str_hash
, g_str_equal
, NULL
,
380 (GDestroyNotify
)kventry_free
);
384 rrdentry_free(rrdentry_t
*entry
)
386 g_return_if_fail(entry
!= NULL
);
394 rrdentry_hash_new(void)
396 return g_hash_table_new_full(g_str_hash
, g_str_equal
, NULL
,
397 (GDestroyNotify
)rrdentry_free
);
401 cfs_cluster_log_dump(GString
*str
, const char *user
, guint max_entries
)
403 clusterlog_dump(cfs_status
.clusterlog
, str
, user
, max_entries
);
407 cfs_cluster_log(clog_entry_t
*entry
)
409 g_return_if_fail(entry
!= NULL
);
411 clusterlog_insert(cfs_status
.clusterlog
, entry
);
413 if (cfs_status
.kvstore
) {
415 iov
[0].iov_base
= (char *)entry
;
416 iov
[0].iov_len
= clog_entry_size(entry
);
418 if (dfsm_is_initialized(cfs_status
.kvstore
))
419 dfsm_send_message(cfs_status
.kvstore
, KVSTORE_MESSAGE_LOG
, iov
, 1);
423 void cfs_status_init(void)
425 g_mutex_lock (&mutex
);
427 cfs_status
.start_time
= time(NULL
);
429 cfs_status
.vmlist
= vmlist_hash_new();
431 cfs_status
.kvhash
= kventry_hash_new();
433 cfs_status
.rrdhash
= rrdentry_hash_new();
435 cfs_status
.iphash
= g_hash_table_new_full(g_str_hash
, g_str_equal
, g_free
, g_free
);
437 cfs_status
.memdb_changes
= g_hash_table_new(g_str_hash
, g_str_equal
);
439 for (int i
= 0; i
< G_N_ELEMENTS(memdb_change_array
); i
++) {
440 g_hash_table_replace(cfs_status
.memdb_changes
,
441 memdb_change_array
[i
].path
,
442 &memdb_change_array
[i
]);
445 cfs_status
.clusterlog
= clusterlog_new();
448 clusterlog_add(cfs_status
.clusterlog
, "root", "cluster", getpid(),
449 LOG_INFO
, "starting cluster log");
451 g_mutex_unlock (&mutex
);
454 void cfs_status_cleanup(void)
456 g_mutex_lock (&mutex
);
458 cfs_status
.clinfo_version
++;
460 if (cfs_status
.clinfo
) {
461 cfs_clinfo_destroy(cfs_status
.clinfo
);
462 cfs_status
.clinfo
= NULL
;
465 if (cfs_status
.vmlist
) {
466 g_hash_table_destroy(cfs_status
.vmlist
);
467 cfs_status
.vmlist
= NULL
;
470 if (cfs_status
.kvhash
) {
471 g_hash_table_destroy(cfs_status
.kvhash
);
472 cfs_status
.kvhash
= NULL
;
475 if (cfs_status
.rrdhash
) {
476 g_hash_table_destroy(cfs_status
.rrdhash
);
477 cfs_status
.rrdhash
= NULL
;
480 if (cfs_status
.iphash
) {
481 g_hash_table_destroy(cfs_status
.iphash
);
482 cfs_status
.iphash
= NULL
;
485 if (cfs_status
.clusterlog
)
486 clusterlog_destroy(cfs_status
.clusterlog
);
488 g_mutex_unlock (&mutex
);
491 void cfs_status_set_clinfo(
492 cfs_clinfo_t
*clinfo
)
494 g_return_if_fail(clinfo
!= NULL
);
496 g_mutex_lock (&mutex
);
498 cfs_status
.clinfo_version
++;
500 cfs_clinfo_t
*old
= cfs_status
.clinfo
;
502 cfs_status
.clinfo
= clinfo
;
504 cfs_message("update cluster info (cluster name %s, version = %d)",
505 clinfo
->cluster_name
, clinfo
->cman_version
);
508 if (old
&& old
->nodes_byid
&& clinfo
->nodes_byid
) {
510 GHashTable
*ht
= clinfo
->nodes_byid
;
514 g_hash_table_iter_init (&iter
, ht
);
516 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
517 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
518 cfs_clnode_t
*oldnode
;
519 if ((oldnode
= g_hash_table_lookup(old
->nodes_byid
, key
))) {
520 node
->online
= oldnode
->online
;
521 node
->kvhash
= oldnode
->kvhash
;
522 oldnode
->kvhash
= NULL
;
529 cfs_clinfo_destroy(old
);
532 g_mutex_unlock (&mutex
);
536 dump_kvstore_versions(
539 const char *nodename
)
541 g_return_if_fail(kvhash
!= NULL
);
542 g_return_if_fail(str
!= NULL
);
543 g_return_if_fail(nodename
!= NULL
);
545 GHashTable
*ht
= kvhash
;
549 g_string_append_printf(str
, "\"%s\": {\n", nodename
);
551 g_hash_table_iter_init (&iter
, ht
);
554 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
555 kventry_t
*entry
= (kventry_t
*)value
;
556 if (i
) g_string_append_printf(str
, ",\n");
558 g_string_append_printf(str
,"\"%s\": %u", entry
->key
, entry
->version
);
561 g_string_append_printf(str
, "}\n");
565 cfs_create_version_msg(GString
*str
)
567 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
569 g_mutex_lock (&mutex
);
571 g_string_append_printf(str
,"{\n");
573 g_string_append_printf(str
, "\"starttime\": %lu,\n", (unsigned long)cfs_status
.start_time
);
575 g_string_append_printf(str
, "\"clinfo\": %u,\n", cfs_status
.clinfo_version
);
577 g_string_append_printf(str
, "\"vmlist\": %u,\n", cfs_status
.vmlist_version
);
579 for (int i
= 0; i
< G_N_ELEMENTS(memdb_change_array
); i
++) {
580 g_string_append_printf(str
, "\"%s\": %u,\n",
581 memdb_change_array
[i
].path
,
582 memdb_change_array
[i
].version
);
585 g_string_append_printf(str
, "\"kvstore\": {\n");
587 dump_kvstore_versions(str
, cfs_status
.kvhash
, cfs
.nodename
);
589 cfs_clinfo_t
*clinfo
= cfs_status
.clinfo
;
591 if (clinfo
&& clinfo
->nodes_byid
) {
592 GHashTable
*ht
= clinfo
->nodes_byid
;
596 g_hash_table_iter_init (&iter
, ht
);
598 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
599 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
602 g_string_append_printf(str
, ",\n");
603 dump_kvstore_versions(str
, node
->kvhash
, node
->name
);
607 g_string_append_printf(str
,"}\n");
609 g_string_append_printf(str
,"}\n");
611 g_mutex_unlock (&mutex
);
617 vmlist_hash_new(void)
619 return g_hash_table_new_full(g_int_hash
, g_int_equal
, NULL
,
620 (GDestroyNotify
)vminfo_free
);
624 vmlist_hash_insert_vm(
628 const char *nodename
,
631 g_return_val_if_fail(vmlist
!= NULL
, FALSE
);
632 g_return_val_if_fail(nodename
!= NULL
, FALSE
);
633 g_return_val_if_fail(vmid
!= 0, FALSE
);
634 g_return_val_if_fail(vmtype
== VMTYPE_QEMU
|| vmtype
== VMTYPE_OPENVZ
||
635 vmtype
== VMTYPE_LXC
, FALSE
);
637 if (!replace
&& g_hash_table_lookup(vmlist
, &vmid
)) {
638 cfs_critical("detected duplicate VMID %d", vmid
);
642 vminfo_t
*vminfo
= g_new0(vminfo_t
, 1);
645 vminfo
->vmtype
= vmtype
;
646 vminfo
->nodename
= g_strdup(nodename
);
648 vminfo
->version
= ++vminfo_version_counter
;
650 g_hash_table_replace(vmlist
, &vminfo
->vmid
, vminfo
);
659 const char *nodename
)
661 g_return_if_fail(cfs_status
.vmlist
!= NULL
);
662 g_return_if_fail(nodename
!= NULL
);
663 g_return_if_fail(vmid
!= 0);
664 g_return_if_fail(vmtype
== VMTYPE_QEMU
|| vmtype
== VMTYPE_OPENVZ
||
665 vmtype
== VMTYPE_LXC
);
667 cfs_debug("vmlist_register_vm: %s/%u %d", nodename
, vmid
, vmtype
);
669 g_mutex_lock (&mutex
);
671 cfs_status
.vmlist_version
++;
673 vmlist_hash_insert_vm(cfs_status
.vmlist
, vmtype
, vmid
, nodename
, TRUE
);
675 g_mutex_unlock (&mutex
);
679 vmlist_different_vm_exists(
682 const char *nodename
)
684 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, FALSE
);
685 g_return_val_if_fail(vmid
!= 0, FALSE
);
687 gboolean res
= FALSE
;
689 g_mutex_lock (&mutex
);
692 if ((vminfo
= (vminfo_t
*)g_hash_table_lookup(cfs_status
.vmlist
, &vmid
))) {
693 if (!(vminfo
->vmtype
== vmtype
&& strcmp(vminfo
->nodename
, nodename
) == 0))
696 g_mutex_unlock (&mutex
);
705 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, FALSE
);
706 g_return_val_if_fail(vmid
!= 0, FALSE
);
708 g_mutex_lock (&mutex
);
710 gpointer res
= g_hash_table_lookup(cfs_status
.vmlist
, &vmid
);
712 g_mutex_unlock (&mutex
);
721 g_return_if_fail(cfs_status
.vmlist
!= NULL
);
722 g_return_if_fail(vmid
!= 0);
724 g_mutex_lock (&mutex
);
726 cfs_status
.vmlist_version
++;
728 g_hash_table_remove(cfs_status
.vmlist
, &vmid
);
730 g_mutex_unlock (&mutex
);
733 void cfs_status_set_vmlist(
736 g_return_if_fail(vmlist
!= NULL
);
738 g_mutex_lock (&mutex
);
740 cfs_status
.vmlist_version
++;
742 if (cfs_status
.vmlist
)
743 g_hash_table_destroy(cfs_status
.vmlist
);
745 cfs_status
.vmlist
= vmlist
;
747 g_mutex_unlock (&mutex
);
751 cfs_create_vmlist_msg(GString
*str
)
753 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, -EINVAL
);
754 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
756 g_mutex_lock (&mutex
);
758 g_string_append_printf(str
,"{\n");
760 GHashTable
*ht
= cfs_status
.vmlist
;
762 guint count
= g_hash_table_size(ht
);
765 g_string_append_printf(str
,"\"version\": %u\n", cfs_status
.vmlist_version
);
767 g_string_append_printf(str
,"\"version\": %u,\n", cfs_status
.vmlist_version
);
769 g_string_append_printf(str
,"\"ids\": {\n");
774 g_hash_table_iter_init (&iter
, ht
);
777 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
778 vminfo_t
*vminfo
= (vminfo_t
*)value
;
779 const char *type
= vminfo_type_to_string(vminfo
);
782 g_string_append_printf(str
, ",\n");
785 g_string_append_printf(str
,"\"%u\": { \"node\": \"%s\", \"type\": \"%s\", \"version\": %u }",
786 vminfo
->vmid
, vminfo
->nodename
, type
, vminfo
->version
);
789 g_string_append_printf(str
,"}\n");
791 g_string_append_printf(str
,"\n}\n");
793 g_mutex_unlock (&mutex
);
798 // checks the conf for a line starting with '$prop:' and returns the value
799 // afterwards, whitout initial whitespace(s), we only deal with the format
800 // restricion imposed by our perl VM config parser, main reference is
801 // PVE::QemuServer::parse_vm_config this allows to be very fast and still
803 // main restrictions used for our advantage is the properties match reges:
804 // ($line =~ m/^([a-z][a-z_]*\d*):\s*(.+?)\s*$/) from parse_vm_config
805 // currently we only look at the current configuration in place, i.e., *no*
806 // snapshort and *no* pending changes
808 _get_property_value(char *conf
, int conf_size
, const char *prop
, int prop_len
)
810 const char *const conf_end
= conf
+ conf_size
;
812 size_t remaining_size
;
814 char *next_newline
= memchr(conf
, '\n', conf_size
);
815 if (next_newline
== NULL
) {
816 return NULL
; // valid property lines end with \n, but none in the config
818 *next_newline
= '\0';
820 while (line
!= NULL
) {
821 if (!line
[0]) goto next
;
823 // snapshot or pending section start, but nothing found yet -> not found
824 if (line
[0] == '[') return NULL
;
825 // properties start with /^[a-z]/, so continue early if not
826 if (line
[0] < 'a' || line
[0] > 'z') goto next
;
828 int line_len
= strlen(line
);
829 if (line_len
<= prop_len
+ 1) goto next
;
831 if (line
[prop_len
] == ':' && memcmp(line
, prop
, prop_len
) == 0) { // found
832 char *v_start
= &line
[prop_len
+ 1];
834 // drop initial value whitespaces here already
835 while (*v_start
&& isspace(*v_start
)) v_start
++;
837 if (!*v_start
) return NULL
;
839 char *v_end
= &line
[line_len
- 1];
840 while (v_end
> v_start
&& isspace(*v_end
)) v_end
--;
846 line
= next_newline
+ 1;
847 remaining_size
= conf_end
- line
;
848 if (remaining_size
<= prop_len
) {
851 next_newline
= memchr(line
, '\n', remaining_size
);
852 if (next_newline
== NULL
) {
853 return NULL
; // valid property lines end with \n, but none in the config
855 *next_newline
= '\0';
858 return NULL
; // not found
862 _g_str_append_kv_jsonescaped(GString
*str
, const char *k
, const char *v
)
864 g_string_append_printf(str
, "\"%s\": \"", k
);
867 if (*v
== '\\' || *v
== '"') {
868 g_string_append_c(str
, '\\');
870 g_string_append_c(str
, *v
);
873 g_string_append_c(str
, '"');
877 cfs_create_guest_conf_property_msg(GString
*str
, memdb_t
*memdb
, const char *prop
, uint32_t vmid
)
879 g_return_val_if_fail(cfs_status
.vmlist
!= NULL
, -EINVAL
);
880 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
882 int prop_len
= strlen(prop
);
884 GString
*path
= NULL
;
886 // Prelock &memdb->mutex in order to enable the usage of memdb_read_nolock
887 // to prevent Deadlocks as in #2553
888 g_mutex_lock (&memdb
->mutex
);
889 g_mutex_lock (&mutex
);
891 g_string_printf(str
,"{\n");
893 GHashTable
*ht
= cfs_status
.vmlist
;
895 if (!g_hash_table_size(ht
)) {
899 path
= g_string_sized_new(256);
901 vminfo_t
*vminfo
= (vminfo_t
*) g_hash_table_lookup(cfs_status
.vmlist
, &vmid
);
902 if (vminfo
== NULL
) goto enoent
;
904 if (!vminfo_to_path(vminfo
, path
)) goto err
;
906 // use memdb_read_nolock because lock is handled here
907 int size
= memdb_read_nolock(memdb
, path
->str
, &tmp
);
908 if (tmp
== NULL
) goto err
;
909 if (size
<= prop_len
) goto ret
;
911 char *val
= _get_property_value(tmp
, size
, prop
, prop_len
);
912 if (val
== NULL
) goto ret
;
914 g_string_append_printf(str
, "\"%u\":{", vmid
);
915 _g_str_append_kv_jsonescaped(str
, prop
, val
);
916 g_string_append_c(str
, '}');
920 g_hash_table_iter_init (&iter
, ht
);
924 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
925 vminfo_t
*vminfo
= (vminfo_t
*)value
;
927 if (!vminfo_to_path(vminfo
, path
)) goto err
;
929 g_free(tmp
); // no-op if already null
931 // use memdb_read_nolock because lock is handled here
932 int size
= memdb_read_nolock(memdb
, path
->str
, &tmp
);
933 if (tmp
== NULL
|| size
<= prop_len
) continue;
935 char *val
= _get_property_value(tmp
, size
, prop
, prop_len
);
936 if (val
== NULL
) continue;
938 if (!first
) g_string_append_printf(str
, ",\n");
941 g_string_append_printf(str
, "\"%u\":{", vminfo
->vmid
);
942 _g_str_append_kv_jsonescaped(str
, prop
, val
);
943 g_string_append_c(str
, '}');
949 g_string_free(path
, TRUE
);
951 g_string_append_printf(str
,"\n}\n");
952 g_mutex_unlock (&mutex
);
953 g_mutex_unlock (&memdb
->mutex
);
964 record_memdb_change(const char *path
)
966 g_return_if_fail(cfs_status
.memdb_changes
!= 0);
970 if ((ce
= (memdb_change_t
*)g_hash_table_lookup(cfs_status
.memdb_changes
, path
))) {
976 record_memdb_reload(void)
978 for (int i
= 0; i
< G_N_ELEMENTS(memdb_change_array
); i
++) {
979 memdb_change_array
[i
].version
++;
990 g_return_val_if_fail(kvhash
!= NULL
, FALSE
);
991 g_return_val_if_fail(key
!= NULL
, FALSE
);
992 g_return_val_if_fail(data
!= NULL
, FALSE
);
996 g_hash_table_remove(kvhash
, key
);
997 } else if ((entry
= (kventry_t
*)g_hash_table_lookup(kvhash
, key
))) {
999 entry
->data
= g_memdup(data
, len
);
1003 kventry_t
*entry
= g_new0(kventry_t
, 1);
1005 entry
->key
= g_strdup(key
);
1006 entry
->data
= g_memdup(data
, len
);
1009 g_hash_table_replace(kvhash
, entry
->key
, entry
);
1015 static const char *rrd_def_node
[] = {
1016 "DS:loadavg:GAUGE:120:0:U",
1017 "DS:maxcpu:GAUGE:120:0:U",
1018 "DS:cpu:GAUGE:120:0:U",
1019 "DS:iowait:GAUGE:120:0:U",
1020 "DS:memtotal:GAUGE:120:0:U",
1021 "DS:memused:GAUGE:120:0:U",
1022 "DS:swaptotal:GAUGE:120:0:U",
1023 "DS:swapused:GAUGE:120:0:U",
1024 "DS:roottotal:GAUGE:120:0:U",
1025 "DS:rootused:GAUGE:120:0:U",
1026 "DS:netin:DERIVE:120:0:U",
1027 "DS:netout:DERIVE:120:0:U",
1029 "RRA:AVERAGE:0.5:1:70", // 1 min avg - one hour
1030 "RRA:AVERAGE:0.5:30:70", // 30 min avg - one day
1031 "RRA:AVERAGE:0.5:180:70", // 3 hour avg - one week
1032 "RRA:AVERAGE:0.5:720:70", // 12 hour avg - one month
1033 "RRA:AVERAGE:0.5:10080:70", // 7 day avg - ony year
1035 "RRA:MAX:0.5:1:70", // 1 min max - one hour
1036 "RRA:MAX:0.5:30:70", // 30 min max - one day
1037 "RRA:MAX:0.5:180:70", // 3 hour max - one week
1038 "RRA:MAX:0.5:720:70", // 12 hour max - one month
1039 "RRA:MAX:0.5:10080:70", // 7 day max - ony year
1043 static const char *rrd_def_vm
[] = {
1044 "DS:maxcpu:GAUGE:120:0:U",
1045 "DS:cpu:GAUGE:120:0:U",
1046 "DS:maxmem:GAUGE:120:0:U",
1047 "DS:mem:GAUGE:120:0:U",
1048 "DS:maxdisk:GAUGE:120:0:U",
1049 "DS:disk:GAUGE:120:0:U",
1050 "DS:netin:DERIVE:120:0:U",
1051 "DS:netout:DERIVE:120:0:U",
1052 "DS:diskread:DERIVE:120:0:U",
1053 "DS:diskwrite:DERIVE:120:0:U",
1055 "RRA:AVERAGE:0.5:1:70", // 1 min avg - one hour
1056 "RRA:AVERAGE:0.5:30:70", // 30 min avg - one day
1057 "RRA:AVERAGE:0.5:180:70", // 3 hour avg - one week
1058 "RRA:AVERAGE:0.5:720:70", // 12 hour avg - one month
1059 "RRA:AVERAGE:0.5:10080:70", // 7 day avg - ony year
1061 "RRA:MAX:0.5:1:70", // 1 min max - one hour
1062 "RRA:MAX:0.5:30:70", // 30 min max - one day
1063 "RRA:MAX:0.5:180:70", // 3 hour max - one week
1064 "RRA:MAX:0.5:720:70", // 12 hour max - one month
1065 "RRA:MAX:0.5:10080:70", // 7 day max - ony year
1069 static const char *rrd_def_storage
[] = {
1070 "DS:total:GAUGE:120:0:U",
1071 "DS:used:GAUGE:120:0:U",
1073 "RRA:AVERAGE:0.5:1:70", // 1 min avg - one hour
1074 "RRA:AVERAGE:0.5:30:70", // 30 min avg - one day
1075 "RRA:AVERAGE:0.5:180:70", // 3 hour avg - one week
1076 "RRA:AVERAGE:0.5:720:70", // 12 hour avg - one month
1077 "RRA:AVERAGE:0.5:10080:70", // 7 day avg - ony year
1079 "RRA:MAX:0.5:1:70", // 1 min max - one hour
1080 "RRA:MAX:0.5:30:70", // 30 min max - one day
1081 "RRA:MAX:0.5:180:70", // 3 hour max - one week
1082 "RRA:MAX:0.5:720:70", // 12 hour max - one month
1083 "RRA:MAX:0.5:10080:70", // 7 day max - ony year
1087 #define RRDDIR "/var/lib/rrdcached/db"
1091 const char *filename
,
1093 const char *rrddef
[])
1095 /* start at day boundary */
1098 struct tm
*ltm
= localtime(&ctime
);
1104 if (rrd_create_r(filename
, 60, timelocal(ltm
), argcount
, rrddef
)) {
1105 cfs_message("RRD create error %s: %s", filename
, rrd_get_error());
1109 static inline const char *
1115 while (*data
&& found
< count
) {
1128 g_return_if_fail(key
!= NULL
);
1129 g_return_if_fail(data
!= NULL
);
1130 g_return_if_fail(len
> 0);
1131 g_return_if_fail(len
< 4096);
1133 static const char *rrdcsock
= "unix:/var/run/rrdcached.sock";
1136 if (rrdc_connect(rrdcsock
) != 0)
1139 char *filename
= NULL
;
1143 if (strncmp(key
, "pve2-node/", 10) == 0) {
1144 const char *node
= key
+ 10;
1148 if (strchr(node
, '/') != NULL
)
1151 if (strlen(node
) < 1)
1154 filename
= g_strdup_printf(RRDDIR
"/%s", key
);
1156 if (!g_file_test(filename
, G_FILE_TEST_EXISTS
)) {
1158 mkdir(RRDDIR
"/pve2-node", 0755);
1159 int argcount
= sizeof(rrd_def_node
)/sizeof(void*) - 1;
1160 create_rrd_file(filename
, argcount
, rrd_def_node
);
1163 } else if ((strncmp(key
, "pve2-vm/", 8) == 0) ||
1164 (strncmp(key
, "pve2.3-vm/", 10) == 0)) {
1167 if (strncmp(key
, "pve2-vm/", 8) == 0) {
1175 if (strchr(vmid
, '/') != NULL
)
1178 if (strlen(vmid
) < 1)
1181 filename
= g_strdup_printf(RRDDIR
"/%s/%s", "pve2-vm", vmid
);
1183 if (!g_file_test(filename
, G_FILE_TEST_EXISTS
)) {
1185 mkdir(RRDDIR
"/pve2-vm", 0755);
1186 int argcount
= sizeof(rrd_def_vm
)/sizeof(void*) - 1;
1187 create_rrd_file(filename
, argcount
, rrd_def_vm
);
1190 } else if (strncmp(key
, "pve2-storage/", 13) == 0) {
1191 const char *node
= key
+ 13;
1193 const char *storage
= node
;
1194 while (*storage
&& *storage
!= '/')
1197 if (*storage
!= '/' || ((storage
- node
) < 1))
1202 if (strchr(storage
, '/') != NULL
)
1205 if (strlen(storage
) < 1)
1208 filename
= g_strdup_printf(RRDDIR
"/%s", key
);
1210 if (!g_file_test(filename
, G_FILE_TEST_EXISTS
)) {
1212 mkdir(RRDDIR
"/pve2-storage", 0755);
1214 char *dir
= g_path_get_dirname(filename
);
1218 int argcount
= sizeof(rrd_def_storage
)/sizeof(void*) - 1;
1219 create_rrd_file(filename
, argcount
, rrd_def_storage
);
1226 const char *dp
= skip
? rrd_skip_data(data
, skip
) : data
;
1228 const char *update_args
[] = { dp
, NULL
};
1232 if ((status
= rrdc_update(filename
, 1, update_args
)) != 0) {
1233 cfs_message("RRDC update error %s: %d", filename
, status
);
1236 if (rrd_update_r(filename
, NULL
, 1, update_args
) != 0) {
1237 cfs_message("RRD update error %s: %s", filename
, rrd_get_error());
1243 if (rrd_update_r(filename
, NULL
, 1, update_args
) != 0) {
1244 cfs_message("RRD update error %s: %s", filename
, rrd_get_error());
1255 cfs_critical("RRD update error: unknown/wrong key %s", key
);
1265 rrdentry_t
*entry
= (rrdentry_t
*)value
;
1266 uint32_t ctime
= GPOINTER_TO_UINT(user_data
);
1268 int diff
= ctime
- entry
->time
;
1270 /* remove everything older than 5 minutes */
1273 return (diff
> expire
) ? TRUE
: FALSE
;
1276 static char *rrd_dump_buf
= NULL
;
1277 static time_t rrd_dump_last
= 0;
1280 cfs_rrd_dump(GString
*str
)
1284 g_mutex_lock (&mutex
);
1287 if (rrd_dump_buf
&& (ctime
- rrd_dump_last
) < 2) {
1288 g_string_assign(str
, rrd_dump_buf
);
1289 g_mutex_unlock (&mutex
);
1293 /* remove old data */
1294 g_hash_table_foreach_remove(cfs_status
.rrdhash
, rrd_entry_is_old
,
1295 GUINT_TO_POINTER(ctime
));
1297 g_string_set_size(str
, 0);
1299 GHashTableIter iter
;
1300 gpointer key
, value
;
1302 g_hash_table_iter_init (&iter
, cfs_status
.rrdhash
);
1304 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
1305 rrdentry_t
*entry
= (rrdentry_t
*)value
;
1306 g_string_append(str
, key
);
1307 g_string_append(str
, ":");
1308 g_string_append(str
, entry
->data
);
1309 g_string_append(str
, "\n");
1312 g_string_append_c(str
, 0); // never return undef
1314 rrd_dump_last
= ctime
;
1316 g_free(rrd_dump_buf
);
1317 rrd_dump_buf
= g_strdup(str
->str
);
1319 g_mutex_unlock (&mutex
);
1325 const char *nodename
,
1329 g_return_val_if_fail(iphash
!= NULL
, FALSE
);
1330 g_return_val_if_fail(nodename
!= NULL
, FALSE
);
1331 g_return_val_if_fail(ip
!= NULL
, FALSE
);
1332 g_return_val_if_fail(len
> 0, FALSE
);
1333 g_return_val_if_fail(len
< 256, FALSE
);
1334 g_return_val_if_fail(ip
[len
-1] == 0, FALSE
);
1336 char *oldip
= (char *)g_hash_table_lookup(iphash
, nodename
);
1338 if (!oldip
|| (strcmp(oldip
, ip
) != 0)) {
1339 cfs_status
.clinfo_version
++;
1340 g_hash_table_replace(iphash
, g_strdup(nodename
), g_strdup(ip
));
1348 GHashTable
*rrdhash
,
1353 g_return_val_if_fail(rrdhash
!= NULL
, FALSE
);
1354 g_return_val_if_fail(key
!= NULL
, FALSE
);
1355 g_return_val_if_fail(data
!= NULL
, FALSE
);
1356 g_return_val_if_fail(len
> 0, FALSE
);
1357 g_return_val_if_fail(len
< 4096, FALSE
);
1358 g_return_val_if_fail(data
[len
-1] == 0, FALSE
);
1361 if ((entry
= (rrdentry_t
*)g_hash_table_lookup(rrdhash
, key
))) {
1362 g_free(entry
->data
);
1363 entry
->data
= g_memdup(data
, len
);
1365 entry
->time
= time(NULL
);
1367 rrdentry_t
*entry
= g_new0(rrdentry_t
, 1);
1369 entry
->key
= g_strdup(key
);
1370 entry
->data
= g_memdup(data
, len
);
1372 entry
->time
= time(NULL
);
1374 g_hash_table_replace(rrdhash
, entry
->key
, entry
);
1377 update_rrd_data(key
, data
, len
);
1383 kvstore_send_update_message(
1389 if (!dfsm_is_initialized(dfsm
))
1392 struct iovec iov
[2];
1395 g_strlcpy(name
, key
, sizeof(name
));
1397 iov
[0].iov_base
= &name
;
1398 iov
[0].iov_len
= sizeof(name
);
1400 iov
[1].iov_base
= (char *)data
;
1401 iov
[1].iov_len
= len
;
1403 if (dfsm_send_message(dfsm
, KVSTORE_MESSAGE_UPDATE
, iov
, 2) == CS_OK
)
1409 static clog_entry_t
*
1410 kvstore_parse_log_message(
1414 g_return_val_if_fail(msg
!= NULL
, NULL
);
1416 if (msg_len
< sizeof(clog_entry_t
)) {
1417 cfs_critical("received short log message (%zu < %zu)", msg_len
, sizeof(clog_entry_t
));
1421 clog_entry_t
*entry
= (clog_entry_t
*)msg
;
1423 uint32_t size
= sizeof(clog_entry_t
) + entry
->node_len
+
1424 entry
->ident_len
+ entry
->tag_len
+ entry
->msg_len
;
1426 if (msg_len
!= size
) {
1427 cfs_critical("received log message with wrong size (%zu != %u)", msg_len
, size
);
1431 char *msgptr
= entry
->data
;
1433 if (*((char *)msgptr
+ entry
->node_len
- 1)) {
1434 cfs_critical("unterminated string in log message");
1437 msgptr
+= entry
->node_len
;
1439 if (*((char *)msgptr
+ entry
->ident_len
- 1)) {
1440 cfs_critical("unterminated string in log message");
1443 msgptr
+= entry
->ident_len
;
1445 if (*((char *)msgptr
+ entry
->tag_len
- 1)) {
1446 cfs_critical("unterminated string in log message");
1449 msgptr
+= entry
->tag_len
;
1451 if (*((char *)msgptr
+ entry
->msg_len
- 1)) {
1452 cfs_critical("unterminated string in log message");
1460 kvstore_parse_update_message(
1464 gconstpointer
*data
,
1467 g_return_val_if_fail(msg
!= NULL
, FALSE
);
1468 g_return_val_if_fail(key
!= NULL
, FALSE
);
1469 g_return_val_if_fail(data
!= NULL
, FALSE
);
1470 g_return_val_if_fail(len
!= NULL
, FALSE
);
1472 if (msg_len
< 256) {
1473 cfs_critical("received short kvstore message (%zu < 256)", msg_len
);
1477 /* test if key is null terminated */
1479 for (i
= 0; i
< 256; i
++)
1480 if (((char *)msg
)[i
] == 0)
1487 *len
= msg_len
- 256;
1489 *data
= (char *) msg
+ 256;
1495 cfs_create_status_msg(
1497 const char *nodename
,
1500 g_return_val_if_fail(str
!= NULL
, -EINVAL
);
1501 g_return_val_if_fail(key
!= NULL
, -EINVAL
);
1505 GHashTable
*kvhash
= NULL
;
1507 g_mutex_lock (&mutex
);
1509 if (!nodename
|| !nodename
[0] || !strcmp(nodename
, cfs
.nodename
)) {
1510 kvhash
= cfs_status
.kvhash
;
1511 } else if (cfs_status
.clinfo
&& cfs_status
.clinfo
->nodes_byname
) {
1512 cfs_clnode_t
*clnode
;
1513 if ((clnode
= g_hash_table_lookup(cfs_status
.clinfo
->nodes_byname
, nodename
)))
1514 kvhash
= clnode
->kvhash
;
1518 if (kvhash
&& (entry
= (kventry_t
*)g_hash_table_lookup(kvhash
, key
))) {
1519 g_string_append_len(str
, entry
->data
, entry
->len
);
1523 g_mutex_unlock (&mutex
);
1534 g_return_val_if_fail(key
!= NULL
, FALSE
);
1535 g_return_val_if_fail(data
!= NULL
, FALSE
);
1536 g_return_val_if_fail(cfs_status
.kvhash
!= NULL
, FALSE
);
1538 if (len
> CFS_MAX_STATUS_SIZE
)
1541 g_mutex_lock (&mutex
);
1545 if (strncmp(key
, "rrd/", 4) == 0) {
1546 res
= rrdentry_hash_set(cfs_status
.rrdhash
, key
+ 4, data
, len
);
1547 } else if (!strcmp(key
, "nodeip")) {
1548 res
= nodeip_hash_set(cfs_status
.iphash
, cfs
.nodename
, data
, len
);
1550 res
= kventry_hash_set(cfs_status
.kvhash
, key
, data
, len
);
1552 g_mutex_unlock (&mutex
);
1554 if (cfs_status
.kvstore
)
1555 kvstore_send_update_message(cfs_status
.kvstore
, key
, data
, len
);
1557 return res
? 0 : -ENOMEM
;
1561 cfs_kvstore_node_set(
1567 g_return_val_if_fail(nodeid
!= 0, FALSE
);
1568 g_return_val_if_fail(key
!= NULL
, FALSE
);
1569 g_return_val_if_fail(data
!= NULL
, FALSE
);
1571 g_mutex_lock (&mutex
);
1573 if (!cfs_status
.clinfo
|| !cfs_status
.clinfo
->nodes_byid
)
1574 goto ret
; /* ignore */
1576 cfs_clnode_t
*clnode
= g_hash_table_lookup(cfs_status
.clinfo
->nodes_byid
, &nodeid
);
1578 goto ret
; /* ignore */
1580 cfs_debug("got node %d status update %s", nodeid
, key
);
1582 if (strncmp(key
, "rrd/", 4) == 0) {
1583 rrdentry_hash_set(cfs_status
.rrdhash
, key
+ 4, data
, len
);
1584 } else if (!strcmp(key
, "nodeip")) {
1585 nodeip_hash_set(cfs_status
.iphash
, clnode
->name
, data
, len
);
1587 if (!clnode
->kvhash
) {
1588 if (!(clnode
->kvhash
= kventry_hash_new())) {
1589 goto ret
; /*ignore */
1593 kventry_hash_set(clnode
->kvhash
, key
, data
, len
);
1597 g_mutex_unlock (&mutex
);
1603 cfs_kvstore_sync(void)
1605 g_return_val_if_fail(cfs_status
.kvhash
!= NULL
, FALSE
);
1606 g_return_val_if_fail(cfs_status
.kvstore
!= NULL
, FALSE
);
1608 gboolean res
= TRUE
;
1610 g_mutex_lock (&mutex
);
1612 GHashTable
*ht
= cfs_status
.kvhash
;
1613 GHashTableIter iter
;
1614 gpointer key
, value
;
1616 g_hash_table_iter_init (&iter
, ht
);
1618 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
1619 kventry_t
*entry
= (kventry_t
*)value
;
1620 kvstore_send_update_message(cfs_status
.kvstore
, entry
->key
, entry
->data
, entry
->len
);
1623 g_mutex_unlock (&mutex
);
1640 g_return_val_if_fail(dfsm
!= NULL
, -1);
1641 g_return_val_if_fail(msg
!= NULL
, -1);
1642 g_return_val_if_fail(res_ptr
!= NULL
, -1);
1644 /* ignore message for ourself */
1645 if (dfsm_nodeid_is_local(dfsm
, nodeid
, pid
))
1648 if (msg_type
== KVSTORE_MESSAGE_UPDATE
) {
1652 if (kvstore_parse_update_message(msg
, msg_len
, &key
, &data
, &len
)) {
1653 cfs_kvstore_node_set(nodeid
, key
, data
, len
);
1655 cfs_critical("cant parse update message");
1657 } else if (msg_type
== KVSTORE_MESSAGE_LOG
) {
1658 cfs_message("received log"); // fixme: remove
1659 const clog_entry_t
*entry
;
1660 if ((entry
= kvstore_parse_log_message(msg
, msg_len
))) {
1661 clusterlog_insert(cfs_status
.clusterlog
, entry
);
1663 cfs_critical("cant parse log message");
1666 cfs_critical("received unknown message type %d\n", msg_type
);
1683 const struct cpg_address
*member_list
,
1684 size_t member_list_entries
)
1686 g_return_if_fail(dfsm
!= NULL
);
1687 g_return_if_fail(member_list
!= NULL
);
1689 cfs_debug("enter %s", __func__
);
1691 g_mutex_lock (&mutex
);
1693 cfs_clinfo_t
*clinfo
= cfs_status
.clinfo
;
1695 if (clinfo
&& clinfo
->nodes_byid
) {
1697 GHashTable
*ht
= clinfo
->nodes_byid
;
1698 GHashTableIter iter
;
1699 gpointer key
, value
;
1701 g_hash_table_iter_init (&iter
, ht
);
1703 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
1704 cfs_clnode_t
*node
= (cfs_clnode_t
*)value
;
1705 node
->online
= FALSE
;
1708 for (int i
= 0; i
< member_list_entries
; i
++) {
1710 if ((node
= g_hash_table_lookup(clinfo
->nodes_byid
, &member_list
[i
].nodeid
))) {
1711 node
->online
= TRUE
;
1715 cfs_status
.clinfo_version
++;
1718 g_mutex_unlock (&mutex
);
1725 unsigned int *res_len
)
1727 g_return_val_if_fail(dfsm
!= NULL
, NULL
);
1729 gpointer msg
= clusterlog_get_state(cfs_status
.clusterlog
, res_len
);
1735 dfsm_process_update(
1738 dfsm_sync_info_t
*syncinfo
,
1744 cfs_critical("%s: received unexpected update message", __func__
);
1750 dfsm_process_state_update(
1753 dfsm_sync_info_t
*syncinfo
)
1755 g_return_val_if_fail(dfsm
!= NULL
, -1);
1756 g_return_val_if_fail(syncinfo
!= NULL
, -1);
1758 clog_base_t
*clog
[syncinfo
->node_count
];
1760 int local_index
= -1;
1761 for (int i
= 0; i
< syncinfo
->node_count
; i
++) {
1762 dfsm_node_info_t
*ni
= &syncinfo
->nodes
[i
];
1765 if (syncinfo
->local
== ni
)
1768 clog_base_t
*base
= (clog_base_t
*)ni
->state
;
1769 if (ni
->state_len
> 8 && ni
->state_len
== clog_size(base
)) {
1770 clog
[i
] = ni
->state
;
1772 cfs_critical("received log with wrong size %u", ni
->state_len
);
1777 if (!clusterlog_merge(cfs_status
.clusterlog
, clog
, syncinfo
->node_count
, local_index
)) {
1778 cfs_critical("unable to merge log files");
1790 dfsm_sync_info_t
*syncinfo
)
1792 g_return_val_if_fail(dfsm
!= NULL
, -1);
1793 g_return_val_if_fail(syncinfo
!= NULL
, -1);
1799 dfsm_synced(dfsm_t
*dfsm
)
1801 g_return_if_fail(dfsm
!= NULL
);
1803 char *ip
= (char *)g_hash_table_lookup(cfs_status
.iphash
, cfs
.nodename
);
1807 cfs_status_set("nodeip", ip
, strlen(ip
) + 1);
1814 dfsm_sync_info_t
*syncinfo
)
1819 static dfsm_callbacks_t kvstore_dfsm_callbacks
= {
1820 .dfsm_deliver_fn
= dfsm_deliver
,
1821 .dfsm_confchg_fn
= dfsm_confchg
,
1823 .dfsm_get_state_fn
= dfsm_get_state
,
1824 .dfsm_process_state_update_fn
= dfsm_process_state_update
,
1825 .dfsm_process_update_fn
= dfsm_process_update
,
1826 .dfsm_commit_fn
= dfsm_commit
,
1827 .dfsm_cleanup_fn
= dfsm_cleanup
,
1828 .dfsm_synced_fn
= dfsm_synced
,
1832 cfs_status_dfsm_new(void)
1834 g_mutex_lock (&mutex
);
1836 cfs_status
.kvstore
= dfsm_new(NULL
, KVSTORE_CPG_GROUP_NAME
, G_LOG_DOMAIN
,
1837 0, &kvstore_dfsm_callbacks
);
1838 g_mutex_unlock (&mutex
);
1840 return cfs_status
.kvstore
;
1844 cfs_is_quorate(void)
1846 g_mutex_lock (&mutex
);
1847 gboolean res
= cfs_status
.quorate
;
1848 g_mutex_unlock (&mutex
);
1858 g_mutex_lock (&mutex
);
1860 uint32_t prev_quorate
= cfs_status
.quorate
;
1861 cfs_status
.quorate
= quorate
;
1863 if (!prev_quorate
&& cfs_status
.quorate
) {
1865 cfs_message("node has quorum");
1868 if (prev_quorate
&& !cfs_status
.quorate
) {
1870 cfs_message("node lost quorum");
1873 g_mutex_unlock (&mutex
);