compress.c \
crypto.c \
handle.c \
+ handle_api.c \
host.c \
links.c \
links_acl.c \
return 0;
}
-int compress_cfg(
+static int compress_cfg(
knet_handle_t knet_h,
struct knet_handle_compress_cfg *knet_handle_compress_cfg)
{
return err;
}
+int knet_handle_compress(knet_handle_t knet_h, struct knet_handle_compress_cfg *knet_handle_compress_cfg)
+{
+ int savederrno = 0;
+ int err = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (!knet_handle_compress_cfg) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ compress_fini(knet_h, 0);
+ err = compress_cfg(knet_h, knet_handle_compress_cfg);
+ savederrno = errno;
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
+
int knet_get_compress_list(struct knet_compress_info *compress_list, size_t *compress_list_entries)
{
int err = 0;
#include "internals.h"
-int compress_cfg(
- knet_handle_t knet_h,
- struct knet_handle_compress_cfg *knet_handle_compress_cfg);
-
int compress_init(
knet_handle_t knet_h);
return err;
}
-int crypto_use_config(
+static int crypto_use_config(
knet_handle_t knet_h,
uint8_t config_num)
{
return 0;
}
-int crypto_init(
+static int crypto_init(
knet_handle_t knet_h,
struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
uint8_t config_num)
return;
}
+static int _knet_handle_crypto_set_config(knet_handle_t knet_h,
+ struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
+ uint8_t config_num,
+ uint8_t force)
+{
+ int savederrno = 0;
+ int err = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (!knet_handle_crypto_cfg) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if ((config_num < 1) || (config_num > KNET_MAX_CRYPTO_INSTANCES)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ if ((knet_h->crypto_in_use_config == config_num) && (!force)) {
+ savederrno = EBUSY;
+ err = -1;
+ goto exit_unlock;
+ }
+
+ if ((!strncmp("none", knet_handle_crypto_cfg->crypto_model, 4)) ||
+ ((!strncmp("none", knet_handle_crypto_cfg->crypto_cipher_type, 4)) &&
+ (!strncmp("none", knet_handle_crypto_cfg->crypto_hash_type, 4)))) {
+ crypto_fini(knet_h, config_num);
+ log_debug(knet_h, KNET_SUB_CRYPTO, "crypto config %u is not enabled", config_num);
+ err = 0;
+ goto exit_unlock;
+ }
+
+ if (knet_handle_crypto_cfg->private_key_len < KNET_MIN_KEY_LEN) {
+ log_debug(knet_h, KNET_SUB_CRYPTO, "private key len too short for config %u (min %d): %u",
+ config_num, KNET_MIN_KEY_LEN, knet_handle_crypto_cfg->private_key_len);
+ savederrno = EINVAL;
+ err = -1;
+ goto exit_unlock;
+ }
+
+ if (knet_handle_crypto_cfg->private_key_len > KNET_MAX_KEY_LEN) {
+ log_debug(knet_h, KNET_SUB_CRYPTO, "private key len too long for config %u (max %d): %u",
+ config_num, KNET_MAX_KEY_LEN, knet_handle_crypto_cfg->private_key_len);
+ savederrno = EINVAL;
+ err = -1;
+ goto exit_unlock;
+ }
+
+ err = crypto_init(knet_h, knet_handle_crypto_cfg, config_num);
+
+ if (err) {
+ err = -2;
+ savederrno = errno;
+ }
+
+exit_unlock:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
+
+int knet_handle_crypto_set_config(knet_handle_t knet_h,
+ struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
+ uint8_t config_num)
+{
+ return _knet_handle_crypto_set_config(knet_h, knet_handle_crypto_cfg, config_num, 0);
+}
+
+int knet_handle_crypto_rx_clear_traffic(knet_handle_t knet_h,
+ uint8_t value)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (value > KNET_CRYPTO_RX_DISALLOW_CLEAR_TRAFFIC) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ knet_h->crypto_only = value;
+ if (knet_h->crypto_only) {
+ log_debug(knet_h, KNET_SUB_CRYPTO, "Only crypto traffic allowed for RX");
+ } else {
+ log_debug(knet_h, KNET_SUB_CRYPTO, "Both crypto and clear traffic allowed for RX");
+ }
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ return 0;
+}
+
+int knet_handle_crypto_use_config(knet_handle_t knet_h,
+ uint8_t config_num)
+{
+ int savederrno = 0;
+ int err = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (config_num > KNET_MAX_CRYPTO_INSTANCES) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ err = crypto_use_config(knet_h, config_num);
+ savederrno = errno;
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
+
int knet_get_crypto_list(struct knet_crypto_info *crypto_list, size_t *crypto_list_entries)
{
int err = 0;
errno = 0;
return err;
}
+
+/*
+ * compatibility wrapper for 1.x releases
+ */
+int knet_handle_crypto(knet_handle_t knet_h, struct knet_handle_crypto_cfg *knet_handle_crypto_cfg)
+{
+ int err = 0;
+ uint8_t value;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ value = knet_h->crypto_only;
+ /*
+ * configure crypto in slot 1
+ */
+ err = _knet_handle_crypto_set_config(knet_h, knet_handle_crypto_cfg, 1, 1);
+ if (err < 0) {
+ return err;
+ }
+
+ if ((!strncmp("none", knet_handle_crypto_cfg->crypto_model, 4)) ||
+ ((!strncmp("none", knet_handle_crypto_cfg->crypto_cipher_type, 4)) &&
+ (!strncmp("none", knet_handle_crypto_cfg->crypto_hash_type, 4)))) {
+ err = knet_handle_crypto_rx_clear_traffic(knet_h, KNET_CRYPTO_RX_ALLOW_CLEAR_TRAFFIC);
+ if (err < 0) {
+ return err;
+ }
+
+ /*
+ * start using clear traffic
+ */
+ err = knet_handle_crypto_use_config(knet_h, 0);
+ if (err < 0) {
+ err = knet_handle_crypto_rx_clear_traffic(knet_h, value);
+ if (err < 0) {
+ /*
+ * force attempt or things will go bad
+ */
+ knet_h->crypto_only = value;
+ }
+ }
+ return err;
+ } else {
+ err = knet_handle_crypto_rx_clear_traffic(knet_h, KNET_CRYPTO_RX_DISALLOW_CLEAR_TRAFFIC);
+ if (err < 0) {
+ return err;
+ }
+
+ /*
+ * start using crypto traffic
+ */
+ err = knet_handle_crypto_use_config(knet_h, 1);
+ if (err < 0) {
+ err = knet_handle_crypto_rx_clear_traffic(knet_h, value);
+ if (err < 0) {
+ /*
+ * force attempt or things will go bad
+ */
+ knet_h->crypto_only = value;
+ }
+ }
+ return err;
+ }
+}
unsigned char *buf_out,
ssize_t *buf_out_len);
-int crypto_use_config (
- knet_handle_t knet_h,
- uint8_t config_num);
-
-int crypto_init(
- knet_handle_t knet_h,
- struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
- uint8_t config_num);
-
void crypto_fini(
knet_handle_t knet_h,
uint8_t config_num);
#include <unistd.h>
#include <errno.h>
#include <pthread.h>
-#include <sys/uio.h>
#include <math.h>
#include <sys/time.h>
#include <sys/resource.h>
errno = 0;
return 0;
}
-
-int knet_handle_enable_sock_notify(knet_handle_t knet_h,
- void *sock_notify_fn_private_data,
- void (*sock_notify_fn) (
- void *private_data,
- int datafd,
- int8_t channel,
- uint8_t tx_rx,
- int error,
- int errorno))
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (!sock_notify_fn) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- knet_h->sock_notify_fn_private_data = sock_notify_fn_private_data;
- knet_h->sock_notify_fn = sock_notify_fn;
- log_debug(knet_h, KNET_SUB_HANDLE, "sock_notify_fn enabled");
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- return 0;
-}
-
-int knet_handle_add_datafd(knet_handle_t knet_h, int *datafd, int8_t *channel)
-{
- int err = 0, savederrno = 0;
- int i;
- struct epoll_event ev;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (datafd == NULL) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel == NULL) {
- errno = EINVAL;
- return -1;
- }
-
- if (*channel >= KNET_DATAFD_MAX) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- if (!knet_h->sock_notify_fn) {
- log_err(knet_h, KNET_SUB_HANDLE, "Adding datafd requires sock notify callback enabled!");
- savederrno = EINVAL;
- err = -1;
- goto out_unlock;
- }
-
- if (*datafd > 0) {
- for (i = 0; i < KNET_DATAFD_MAX; i++) {
- if ((knet_h->sockfd[i].in_use) && (knet_h->sockfd[i].sockfd[0] == *datafd)) {
- log_err(knet_h, KNET_SUB_HANDLE, "requested datafd: %d already exist in index: %d", *datafd, i);
- savederrno = EEXIST;
- err = -1;
- goto out_unlock;
- }
- }
- }
-
- /*
- * auto allocate a channel
- */
- if (*channel < 0) {
- for (i = 0; i < KNET_DATAFD_MAX; i++) {
- if (!knet_h->sockfd[i].in_use) {
- *channel = i;
- break;
- }
- }
- if (*channel < 0) {
- savederrno = EBUSY;
- err = -1;
- goto out_unlock;
- }
- } else {
- if (knet_h->sockfd[*channel].in_use) {
- savederrno = EBUSY;
- err = -1;
- goto out_unlock;
- }
- }
-
- knet_h->sockfd[*channel].is_created = 0;
- knet_h->sockfd[*channel].is_socket = 0;
- knet_h->sockfd[*channel].has_error = 0;
-
- if (*datafd > 0) {
- int sockopt;
- socklen_t sockoptlen = sizeof(sockopt);
-
- if (_fdset_cloexec(*datafd)) {
- savederrno = errno;
- err = -1;
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to set CLOEXEC on datafd: %s",
- strerror(savederrno));
- goto out_unlock;
- }
-
- if (_fdset_nonblock(*datafd)) {
- savederrno = errno;
- err = -1;
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to set NONBLOCK on datafd: %s",
- strerror(savederrno));
- goto out_unlock;
- }
-
- knet_h->sockfd[*channel].sockfd[0] = *datafd;
- knet_h->sockfd[*channel].sockfd[1] = 0;
-
- if (!getsockopt(knet_h->sockfd[*channel].sockfd[0], SOL_SOCKET, SO_TYPE, &sockopt, &sockoptlen)) {
- knet_h->sockfd[*channel].is_socket = 1;
- }
- } else {
- if (_init_socketpair(knet_h, knet_h->sockfd[*channel].sockfd)) {
- savederrno = errno;
- err = -1;
- goto out_unlock;
- }
-
- knet_h->sockfd[*channel].is_created = 1;
- knet_h->sockfd[*channel].is_socket = 1;
- *datafd = knet_h->sockfd[*channel].sockfd[0];
- }
-
- memset(&ev, 0, sizeof(struct epoll_event));
- ev.events = EPOLLIN;
- ev.data.fd = knet_h->sockfd[*channel].sockfd[knet_h->sockfd[*channel].is_created];
-
- if (epoll_ctl(knet_h->send_to_links_epollfd,
- EPOLL_CTL_ADD, knet_h->sockfd[*channel].sockfd[knet_h->sockfd[*channel].is_created], &ev)) {
- savederrno = errno;
- err = -1;
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to add datafd %d to linkfd epoll pool: %s",
- knet_h->sockfd[*channel].sockfd[knet_h->sockfd[*channel].is_created], strerror(savederrno));
- if (knet_h->sockfd[*channel].is_created) {
- _close_socketpair(knet_h, knet_h->sockfd[*channel].sockfd);
- }
- goto out_unlock;
- }
-
- knet_h->sockfd[*channel].in_use = 1;
-
-out_unlock:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-int knet_handle_remove_datafd(knet_handle_t knet_h, int datafd)
-{
- int err = 0, savederrno = 0;
- int8_t channel = -1;
- int i;
- struct epoll_event ev;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (datafd <= 0) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- for (i = 0; i < KNET_DATAFD_MAX; i++) {
- if ((knet_h->sockfd[i].in_use) &&
- (knet_h->sockfd[i].sockfd[0] == datafd)) {
- channel = i;
- break;
- }
- }
-
- if (channel < 0) {
- savederrno = EINVAL;
- err = -1;
- goto out_unlock;
- }
-
- if (!knet_h->sockfd[channel].has_error) {
- memset(&ev, 0, sizeof(struct epoll_event));
-
- if (epoll_ctl(knet_h->send_to_links_epollfd,
- EPOLL_CTL_DEL, knet_h->sockfd[channel].sockfd[knet_h->sockfd[channel].is_created], &ev)) {
- savederrno = errno;
- err = -1;
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to del datafd %d from linkfd epoll pool: %s",
- knet_h->sockfd[channel].sockfd[0], strerror(savederrno));
- goto out_unlock;
- }
- }
-
- if (knet_h->sockfd[channel].is_created) {
- _close_socketpair(knet_h, knet_h->sockfd[channel].sockfd);
- }
-
- memset(&knet_h->sockfd[channel], 0, sizeof(struct knet_sock));
-
-out_unlock:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-int knet_handle_get_datafd(knet_handle_t knet_h, const int8_t channel, int *datafd)
-{
- int err = 0, savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if ((channel < 0) || (channel >= KNET_DATAFD_MAX)) {
- errno = EINVAL;
- return -1;
- }
-
- if (datafd == NULL) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- if (!knet_h->sockfd[channel].in_use) {
- savederrno = EINVAL;
- err = -1;
- goto out_unlock;
- }
-
- *datafd = knet_h->sockfd[channel].sockfd[0];
-
-out_unlock:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-int knet_handle_get_channel(knet_handle_t knet_h, const int datafd, int8_t *channel)
-{
- int err = 0, savederrno = 0;
- int i;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (datafd <= 0) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel == NULL) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- *channel = -1;
-
- for (i = 0; i < KNET_DATAFD_MAX; i++) {
- if ((knet_h->sockfd[i].in_use) &&
- (knet_h->sockfd[i].sockfd[0] == datafd)) {
- *channel = i;
- break;
- }
- }
-
- if (*channel < 0) {
- savederrno = EINVAL;
- err = -1;
- goto out_unlock;
- }
-
-out_unlock:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-int knet_handle_enable_filter(knet_handle_t knet_h,
- void *dst_host_filter_fn_private_data,
- int (*dst_host_filter_fn) (
- void *private_data,
- const unsigned char *outdata,
- ssize_t outdata_len,
- uint8_t tx_rx,
- knet_node_id_t this_host_id,
- knet_node_id_t src_node_id,
- int8_t *channel,
- knet_node_id_t *dst_host_ids,
- size_t *dst_host_ids_entries))
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- knet_h->dst_host_filter_fn_private_data = dst_host_filter_fn_private_data;
- knet_h->dst_host_filter_fn = dst_host_filter_fn;
- if (knet_h->dst_host_filter_fn) {
- log_debug(knet_h, KNET_SUB_HANDLE, "dst_host_filter_fn enabled");
- } else {
- log_debug(knet_h, KNET_SUB_HANDLE, "dst_host_filter_fn disabled");
- }
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = 0;
- return 0;
-}
-
-int knet_handle_setfwd(knet_handle_t knet_h, unsigned int enabled)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (enabled > 1) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- if (enabled) {
- knet_h->enabled = enabled;
- log_debug(knet_h, KNET_SUB_HANDLE, "Data forwarding is enabled");
- } else {
- /*
- * notify TX and RX threads to flush the queues
- */
- if (set_thread_flush_queue(knet_h, KNET_THREAD_TX, KNET_THREAD_QUEUE_FLUSH) < 0) {
- log_debug(knet_h, KNET_SUB_HANDLE, "Unable to request queue flushing for TX thread");
- }
- if (set_thread_flush_queue(knet_h, KNET_THREAD_RX, KNET_THREAD_QUEUE_FLUSH) < 0) {
- log_debug(knet_h, KNET_SUB_HANDLE, "Unable to request queue flushing for RX thread");
- }
- }
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- /*
- * when disabling data forward, we need to give time to TX and RX
- * to flush the queues.
- *
- * the TX thread is the main leader here. When there is no more
- * data in the TX queue, we will also close traffic for RX.
- */
- if (!enabled) {
- /*
- * this usleep might be unnecessary, but wait_all_threads_flush_queue
- * adds extra locking delay.
- *
- * allow all threads to run free without extra locking interference
- * and then we switch to a more active wait in case the scheduler
- * has decided to delay one thread or another
- */
- usleep(KNET_THREADS_TIMERES * 2);
- wait_all_threads_flush_queue(knet_h);
-
- /*
- * all threads have done flushing the queue, we can stop data forwarding
- */
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
- knet_h->enabled = enabled;
- log_debug(knet_h, KNET_SUB_HANDLE, "Data forwarding is disabled");
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- }
-
- errno = 0;
- return 0;
-}
-
-int knet_handle_enable_access_lists(knet_handle_t knet_h, unsigned int enabled)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (enabled > 1) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- knet_h->use_access_lists = enabled;
-
- if (enabled) {
- log_debug(knet_h, KNET_SUB_HANDLE, "Links access lists are enabled");
- } else {
- log_debug(knet_h, KNET_SUB_HANDLE, "Links access lists are disabled");
- }
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = 0;
- return 0;
-}
-
-int knet_handle_pmtud_getfreq(knet_handle_t knet_h, unsigned int *interval)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (!interval) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- *interval = knet_h->pmtud_interval;
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = 0;
- return 0;
-}
-
-int knet_handle_pmtud_setfreq(knet_handle_t knet_h, unsigned int interval)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if ((!interval) || (interval > 86400)) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- knet_h->pmtud_interval = interval;
- log_debug(knet_h, KNET_SUB_HANDLE, "PMTUd interval set to: %u seconds", interval);
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = 0;
- return 0;
-}
-
-int knet_handle_enable_pmtud_notify(knet_handle_t knet_h,
- void *pmtud_notify_fn_private_data,
- void (*pmtud_notify_fn) (
- void *private_data,
- unsigned int data_mtu))
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- knet_h->pmtud_notify_fn_private_data = pmtud_notify_fn_private_data;
- knet_h->pmtud_notify_fn = pmtud_notify_fn;
- if (knet_h->pmtud_notify_fn) {
- log_debug(knet_h, KNET_SUB_HANDLE, "pmtud_notify_fn enabled");
- } else {
- log_debug(knet_h, KNET_SUB_HANDLE, "pmtud_notify_fn disabled");
- }
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = 0;
- return 0;
-}
-
-int knet_handle_pmtud_set(knet_handle_t knet_h,
- unsigned int iface_mtu)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (iface_mtu > KNET_PMTUD_SIZE_V4) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_PMTUD, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- log_info(knet_h, KNET_SUB_PMTUD, "MTU manually set to: %u", iface_mtu);
-
- knet_h->manual_mtu = iface_mtu;
-
- force_pmtud_run(knet_h, KNET_SUB_PMTUD, 0);
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = 0;
- return 0;
-}
-
-int knet_handle_pmtud_get(knet_handle_t knet_h,
- unsigned int *data_mtu)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (!data_mtu) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- *data_mtu = knet_h->data_mtu;
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = 0;
- return 0;
-}
-
-static int _knet_handle_crypto_set_config(knet_handle_t knet_h,
- struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
- uint8_t config_num,
- uint8_t force)
-{
- int savederrno = 0;
- int err = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (!knet_handle_crypto_cfg) {
- errno = EINVAL;
- return -1;
- }
-
- if ((config_num < 1) || (config_num > KNET_MAX_CRYPTO_INSTANCES)) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- if ((knet_h->crypto_in_use_config == config_num) && (!force)) {
- savederrno = EBUSY;
- err = -1;
- goto exit_unlock;
- }
-
- if ((!strncmp("none", knet_handle_crypto_cfg->crypto_model, 4)) ||
- ((!strncmp("none", knet_handle_crypto_cfg->crypto_cipher_type, 4)) &&
- (!strncmp("none", knet_handle_crypto_cfg->crypto_hash_type, 4)))) {
- crypto_fini(knet_h, config_num);
- log_debug(knet_h, KNET_SUB_CRYPTO, "crypto config %u is not enabled", config_num);
- err = 0;
- goto exit_unlock;
- }
-
- if (knet_handle_crypto_cfg->private_key_len < KNET_MIN_KEY_LEN) {
- log_debug(knet_h, KNET_SUB_CRYPTO, "private key len too short for config %u (min %d): %u",
- config_num, KNET_MIN_KEY_LEN, knet_handle_crypto_cfg->private_key_len);
- savederrno = EINVAL;
- err = -1;
- goto exit_unlock;
- }
-
- if (knet_handle_crypto_cfg->private_key_len > KNET_MAX_KEY_LEN) {
- log_debug(knet_h, KNET_SUB_CRYPTO, "private key len too long for config %u (max %d): %u",
- config_num, KNET_MAX_KEY_LEN, knet_handle_crypto_cfg->private_key_len);
- savederrno = EINVAL;
- err = -1;
- goto exit_unlock;
- }
-
- err = crypto_init(knet_h, knet_handle_crypto_cfg, config_num);
-
- if (err) {
- err = -2;
- savederrno = errno;
- }
-
-exit_unlock:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-int knet_handle_crypto_set_config(knet_handle_t knet_h,
- struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
- uint8_t config_num)
-{
- return _knet_handle_crypto_set_config(knet_h, knet_handle_crypto_cfg, config_num, 0);
-}
-
-int knet_handle_crypto_rx_clear_traffic(knet_handle_t knet_h,
- uint8_t value)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (value > KNET_CRYPTO_RX_DISALLOW_CLEAR_TRAFFIC) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- knet_h->crypto_only = value;
- if (knet_h->crypto_only) {
- log_debug(knet_h, KNET_SUB_CRYPTO, "Only crypto traffic allowed for RX");
- } else {
- log_debug(knet_h, KNET_SUB_CRYPTO, "Both crypto and clear traffic allowed for RX");
- }
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- return 0;
-}
-
-int knet_handle_crypto_use_config(knet_handle_t knet_h,
- uint8_t config_num)
-{
- int savederrno = 0;
- int err = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (config_num > KNET_MAX_CRYPTO_INSTANCES) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- err = crypto_use_config(knet_h, config_num);
- savederrno = errno;
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-/*
- * compatibility wrapper for 1.x releases
- */
-int knet_handle_crypto(knet_handle_t knet_h, struct knet_handle_crypto_cfg *knet_handle_crypto_cfg)
-{
- int err = 0;
- uint8_t value;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- value = knet_h->crypto_only;
- /*
- * configure crypto in slot 1
- */
- err = _knet_handle_crypto_set_config(knet_h, knet_handle_crypto_cfg, 1, 1);
- if (err < 0) {
- return err;
- }
-
- if ((!strncmp("none", knet_handle_crypto_cfg->crypto_model, 4)) ||
- ((!strncmp("none", knet_handle_crypto_cfg->crypto_cipher_type, 4)) &&
- (!strncmp("none", knet_handle_crypto_cfg->crypto_hash_type, 4)))) {
- err = knet_handle_crypto_rx_clear_traffic(knet_h, KNET_CRYPTO_RX_ALLOW_CLEAR_TRAFFIC);
- if (err < 0) {
- return err;
- }
-
- /*
- * start using clear traffic
- */
- err = knet_handle_crypto_use_config(knet_h, 0);
- if (err < 0) {
- err = knet_handle_crypto_rx_clear_traffic(knet_h, value);
- if (err < 0) {
- /*
- * force attempt or things will go bad
- */
- knet_h->crypto_only = value;
- }
- }
- return err;
- } else {
- err = knet_handle_crypto_rx_clear_traffic(knet_h, KNET_CRYPTO_RX_DISALLOW_CLEAR_TRAFFIC);
- if (err < 0) {
- return err;
- }
-
- /*
- * start using crypto traffic
- */
- err = knet_handle_crypto_use_config(knet_h, 1);
- if (err < 0) {
- err = knet_handle_crypto_rx_clear_traffic(knet_h, value);
- if (err < 0) {
- /*
- * force attempt or things will go bad
- */
- knet_h->crypto_only = value;
- }
- }
- return err;
- }
-}
-
-int knet_handle_compress(knet_handle_t knet_h, struct knet_handle_compress_cfg *knet_handle_compress_cfg)
-{
- int savederrno = 0;
- int err = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (!knet_handle_compress_cfg) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- compress_fini(knet_h, 0);
- err = compress_cfg(knet_h, knet_handle_compress_cfg);
- savederrno = errno;
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-ssize_t knet_recv(knet_handle_t knet_h, char *buff, const size_t buff_len, const int8_t channel)
-{
- int savederrno = 0;
- ssize_t err = 0;
- struct iovec iov_in;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff == NULL) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff_len <= 0) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff_len > KNET_MAX_PACKET_SIZE) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel < 0) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel >= KNET_DATAFD_MAX) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- if (!knet_h->sockfd[channel].in_use) {
- savederrno = EINVAL;
- err = -1;
- goto out_unlock;
- }
-
- memset(&iov_in, 0, sizeof(iov_in));
- iov_in.iov_base = (void *)buff;
- iov_in.iov_len = buff_len;
-
- err = readv(knet_h->sockfd[channel].sockfd[0], &iov_in, 1);
- savederrno = errno;
-
-out_unlock:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-ssize_t knet_send(knet_handle_t knet_h, const char *buff, const size_t buff_len, const int8_t channel)
-{
- int savederrno = 0;
- ssize_t err = 0;
- struct iovec iov_out[1];
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff == NULL) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff_len <= 0) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff_len > KNET_MAX_PACKET_SIZE) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel < 0) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel >= KNET_DATAFD_MAX) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- if (!knet_h->sockfd[channel].in_use) {
- savederrno = EINVAL;
- err = -1;
- goto out_unlock;
- }
-
- memset(iov_out, 0, sizeof(iov_out));
-
- iov_out[0].iov_base = (void *)buff;
- iov_out[0].iov_len = buff_len;
-
- err = writev(knet_h->sockfd[channel].sockfd[0], iov_out, 1);
- savederrno = errno;
-
-out_unlock:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- errno = err ? savederrno : 0;
- return err;
-}
-
-int knet_handle_get_stats(knet_handle_t knet_h, struct knet_handle_stats *stats, size_t struct_size)
-{
- int err = 0, savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (!stats) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- savederrno = pthread_mutex_lock(&knet_h->handle_stats_mutex);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock: %s",
- strerror(savederrno));
- err = -1;
- goto out_unlock;
- }
-
- if (struct_size > sizeof(struct knet_handle_stats)) {
- struct_size = sizeof(struct knet_handle_stats);
- }
-
- memmove(stats, &knet_h->stats, struct_size);
-
- /*
- * TX crypt stats only count the data packets sent, so add in the ping/pong/pmtud figures
- * RX is OK as it counts them before they are sorted.
- */
-
- stats->tx_crypt_packets += knet_h->stats_extra.tx_crypt_ping_packets +
- knet_h->stats_extra.tx_crypt_pong_packets +
- knet_h->stats_extra.tx_crypt_pmtu_packets +
- knet_h->stats_extra.tx_crypt_pmtu_reply_packets;
-
- /* Tell the caller our full size in case they have an old version */
- stats->size = sizeof(struct knet_handle_stats);
-
-out_unlock:
- pthread_mutex_unlock(&knet_h->handle_stats_mutex);
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- return err;
-}
-
-int knet_handle_clear_stats(knet_handle_t knet_h, int clear_option)
-{
- int savederrno = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (clear_option != KNET_CLEARSTATS_HANDLE_ONLY &&
- clear_option != KNET_CLEARSTATS_HANDLE_AND_LINK) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = get_global_wrlock(knet_h);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- memset(&knet_h->stats, 0, sizeof(struct knet_handle_stats));
- memset(&knet_h->stats_extra, 0, sizeof(struct knet_handle_stats_extra));
- if (clear_option == KNET_CLEARSTATS_HANDLE_AND_LINK) {
- _link_clear_stats(knet_h);
- }
-
- pthread_rwlock_unlock(&knet_h->global_rwlock);
- return 0;
-}
--- /dev/null
+/*
+ * Copyright (C) 2020 Red Hat, Inc. All rights reserved.
+ *
+ * Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
+ * Federico Simoncelli <fsimon@kronosnet.org>
+ *
+ * This software licensed under LGPL-2.0+
+ */
+
+#include "config.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <pthread.h>
+#include <sys/uio.h>
+
+#include "internals.h"
+#include "crypto.h"
+#include "links.h"
+#include "common.h"
+#include "transport_common.h"
+#include "logging.h"
+
+int knet_handle_enable_sock_notify(knet_handle_t knet_h,
+ void *sock_notify_fn_private_data,
+ void (*sock_notify_fn) (
+ void *private_data,
+ int datafd,
+ int8_t channel,
+ uint8_t tx_rx,
+ int error,
+ int errorno))
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (!sock_notify_fn) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ knet_h->sock_notify_fn_private_data = sock_notify_fn_private_data;
+ knet_h->sock_notify_fn = sock_notify_fn;
+ log_debug(knet_h, KNET_SUB_HANDLE, "sock_notify_fn enabled");
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ return 0;
+}
+
+int knet_handle_add_datafd(knet_handle_t knet_h, int *datafd, int8_t *channel)
+{
+ int err = 0, savederrno = 0;
+ int i;
+ struct epoll_event ev;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (datafd == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (*channel >= KNET_DATAFD_MAX) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ if (!knet_h->sock_notify_fn) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Adding datafd requires sock notify callback enabled!");
+ savederrno = EINVAL;
+ err = -1;
+ goto out_unlock;
+ }
+
+ if (*datafd > 0) {
+ for (i = 0; i < KNET_DATAFD_MAX; i++) {
+ if ((knet_h->sockfd[i].in_use) && (knet_h->sockfd[i].sockfd[0] == *datafd)) {
+ log_err(knet_h, KNET_SUB_HANDLE, "requested datafd: %d already exist in index: %d", *datafd, i);
+ savederrno = EEXIST;
+ err = -1;
+ goto out_unlock;
+ }
+ }
+ }
+
+ /*
+ * auto allocate a channel
+ */
+ if (*channel < 0) {
+ for (i = 0; i < KNET_DATAFD_MAX; i++) {
+ if (!knet_h->sockfd[i].in_use) {
+ *channel = i;
+ break;
+ }
+ }
+ if (*channel < 0) {
+ savederrno = EBUSY;
+ err = -1;
+ goto out_unlock;
+ }
+ } else {
+ if (knet_h->sockfd[*channel].in_use) {
+ savederrno = EBUSY;
+ err = -1;
+ goto out_unlock;
+ }
+ }
+
+ knet_h->sockfd[*channel].is_created = 0;
+ knet_h->sockfd[*channel].is_socket = 0;
+ knet_h->sockfd[*channel].has_error = 0;
+
+ if (*datafd > 0) {
+ int sockopt;
+ socklen_t sockoptlen = sizeof(sockopt);
+
+ if (_fdset_cloexec(*datafd)) {
+ savederrno = errno;
+ err = -1;
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to set CLOEXEC on datafd: %s",
+ strerror(savederrno));
+ goto out_unlock;
+ }
+
+ if (_fdset_nonblock(*datafd)) {
+ savederrno = errno;
+ err = -1;
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to set NONBLOCK on datafd: %s",
+ strerror(savederrno));
+ goto out_unlock;
+ }
+
+ knet_h->sockfd[*channel].sockfd[0] = *datafd;
+ knet_h->sockfd[*channel].sockfd[1] = 0;
+
+ if (!getsockopt(knet_h->sockfd[*channel].sockfd[0], SOL_SOCKET, SO_TYPE, &sockopt, &sockoptlen)) {
+ knet_h->sockfd[*channel].is_socket = 1;
+ }
+ } else {
+ if (_init_socketpair(knet_h, knet_h->sockfd[*channel].sockfd)) {
+ savederrno = errno;
+ err = -1;
+ goto out_unlock;
+ }
+
+ knet_h->sockfd[*channel].is_created = 1;
+ knet_h->sockfd[*channel].is_socket = 1;
+ *datafd = knet_h->sockfd[*channel].sockfd[0];
+ }
+
+ memset(&ev, 0, sizeof(struct epoll_event));
+ ev.events = EPOLLIN;
+ ev.data.fd = knet_h->sockfd[*channel].sockfd[knet_h->sockfd[*channel].is_created];
+
+ if (epoll_ctl(knet_h->send_to_links_epollfd,
+ EPOLL_CTL_ADD, knet_h->sockfd[*channel].sockfd[knet_h->sockfd[*channel].is_created], &ev)) {
+ savederrno = errno;
+ err = -1;
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to add datafd %d to linkfd epoll pool: %s",
+ knet_h->sockfd[*channel].sockfd[knet_h->sockfd[*channel].is_created], strerror(savederrno));
+ if (knet_h->sockfd[*channel].is_created) {
+ _close_socketpair(knet_h, knet_h->sockfd[*channel].sockfd);
+ }
+ goto out_unlock;
+ }
+
+ knet_h->sockfd[*channel].in_use = 1;
+
+out_unlock:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
+
+int knet_handle_remove_datafd(knet_handle_t knet_h, int datafd)
+{
+ int err = 0, savederrno = 0;
+ int8_t channel = -1;
+ int i;
+ struct epoll_event ev;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (datafd <= 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ for (i = 0; i < KNET_DATAFD_MAX; i++) {
+ if ((knet_h->sockfd[i].in_use) &&
+ (knet_h->sockfd[i].sockfd[0] == datafd)) {
+ channel = i;
+ break;
+ }
+ }
+
+ if (channel < 0) {
+ savederrno = EINVAL;
+ err = -1;
+ goto out_unlock;
+ }
+
+ if (!knet_h->sockfd[channel].has_error) {
+ memset(&ev, 0, sizeof(struct epoll_event));
+
+ if (epoll_ctl(knet_h->send_to_links_epollfd,
+ EPOLL_CTL_DEL, knet_h->sockfd[channel].sockfd[knet_h->sockfd[channel].is_created], &ev)) {
+ savederrno = errno;
+ err = -1;
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to del datafd %d from linkfd epoll pool: %s",
+ knet_h->sockfd[channel].sockfd[0], strerror(savederrno));
+ goto out_unlock;
+ }
+ }
+
+ if (knet_h->sockfd[channel].is_created) {
+ _close_socketpair(knet_h, knet_h->sockfd[channel].sockfd);
+ }
+
+ memset(&knet_h->sockfd[channel], 0, sizeof(struct knet_sock));
+
+out_unlock:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
+
+int knet_handle_get_datafd(knet_handle_t knet_h, const int8_t channel, int *datafd)
+{
+ int err = 0, savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if ((channel < 0) || (channel >= KNET_DATAFD_MAX)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (datafd == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ if (!knet_h->sockfd[channel].in_use) {
+ savederrno = EINVAL;
+ err = -1;
+ goto out_unlock;
+ }
+
+ *datafd = knet_h->sockfd[channel].sockfd[0];
+
+out_unlock:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
+
+int knet_handle_get_channel(knet_handle_t knet_h, const int datafd, int8_t *channel)
+{
+ int err = 0, savederrno = 0;
+ int i;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (datafd <= 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ *channel = -1;
+
+ for (i = 0; i < KNET_DATAFD_MAX; i++) {
+ if ((knet_h->sockfd[i].in_use) &&
+ (knet_h->sockfd[i].sockfd[0] == datafd)) {
+ *channel = i;
+ break;
+ }
+ }
+
+ if (*channel < 0) {
+ savederrno = EINVAL;
+ err = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
+
+int knet_handle_enable_filter(knet_handle_t knet_h,
+ void *dst_host_filter_fn_private_data,
+ int (*dst_host_filter_fn) (
+ void *private_data,
+ const unsigned char *outdata,
+ ssize_t outdata_len,
+ uint8_t tx_rx,
+ knet_node_id_t this_host_id,
+ knet_node_id_t src_node_id,
+ int8_t *channel,
+ knet_node_id_t *dst_host_ids,
+ size_t *dst_host_ids_entries))
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ knet_h->dst_host_filter_fn_private_data = dst_host_filter_fn_private_data;
+ knet_h->dst_host_filter_fn = dst_host_filter_fn;
+ if (knet_h->dst_host_filter_fn) {
+ log_debug(knet_h, KNET_SUB_HANDLE, "dst_host_filter_fn enabled");
+ } else {
+ log_debug(knet_h, KNET_SUB_HANDLE, "dst_host_filter_fn disabled");
+ }
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = 0;
+ return 0;
+}
+
+int knet_handle_setfwd(knet_handle_t knet_h, unsigned int enabled)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (enabled > 1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ if (enabled) {
+ knet_h->enabled = enabled;
+ log_debug(knet_h, KNET_SUB_HANDLE, "Data forwarding is enabled");
+ } else {
+ /*
+ * notify TX and RX threads to flush the queues
+ */
+ if (set_thread_flush_queue(knet_h, KNET_THREAD_TX, KNET_THREAD_QUEUE_FLUSH) < 0) {
+ log_debug(knet_h, KNET_SUB_HANDLE, "Unable to request queue flushing for TX thread");
+ }
+ if (set_thread_flush_queue(knet_h, KNET_THREAD_RX, KNET_THREAD_QUEUE_FLUSH) < 0) {
+ log_debug(knet_h, KNET_SUB_HANDLE, "Unable to request queue flushing for RX thread");
+ }
+ }
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ /*
+ * when disabling data forward, we need to give time to TX and RX
+ * to flush the queues.
+ *
+ * the TX thread is the main leader here. When there is no more
+ * data in the TX queue, we will also close traffic for RX.
+ */
+ if (!enabled) {
+ /*
+ * this usleep might be unnecessary, but wait_all_threads_flush_queue
+ * adds extra locking delay.
+ *
+ * allow all threads to run free without extra locking interference
+ * and then we switch to a more active wait in case the scheduler
+ * has decided to delay one thread or another
+ */
+ usleep(KNET_THREADS_TIMERES * 2);
+ wait_all_threads_flush_queue(knet_h);
+
+ /*
+ * all threads have done flushing the queue, we can stop data forwarding
+ */
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+ knet_h->enabled = enabled;
+ log_debug(knet_h, KNET_SUB_HANDLE, "Data forwarding is disabled");
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ }
+
+ errno = 0;
+ return 0;
+}
+
+int knet_handle_get_stats(knet_handle_t knet_h, struct knet_handle_stats *stats, size_t struct_size)
+{
+ int err = 0, savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (!stats) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ savederrno = pthread_mutex_lock(&knet_h->handle_stats_mutex);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock: %s",
+ strerror(savederrno));
+ err = -1;
+ goto out_unlock;
+ }
+
+ if (struct_size > sizeof(struct knet_handle_stats)) {
+ struct_size = sizeof(struct knet_handle_stats);
+ }
+
+ memmove(stats, &knet_h->stats, struct_size);
+
+ /*
+ * TX crypt stats only count the data packets sent, so add in the ping/pong/pmtud figures
+ * RX is OK as it counts them before they are sorted.
+ */
+
+ stats->tx_crypt_packets += knet_h->stats_extra.tx_crypt_ping_packets +
+ knet_h->stats_extra.tx_crypt_pong_packets +
+ knet_h->stats_extra.tx_crypt_pmtu_packets +
+ knet_h->stats_extra.tx_crypt_pmtu_reply_packets;
+
+ /* Tell the caller our full size in case they have an old version */
+ stats->size = sizeof(struct knet_handle_stats);
+
+out_unlock:
+ pthread_mutex_unlock(&knet_h->handle_stats_mutex);
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ return err;
+}
+
+int knet_handle_clear_stats(knet_handle_t knet_h, int clear_option)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (clear_option != KNET_CLEARSTATS_HANDLE_ONLY &&
+ clear_option != KNET_CLEARSTATS_HANDLE_AND_LINK) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ memset(&knet_h->stats, 0, sizeof(struct knet_handle_stats));
+ memset(&knet_h->stats_extra, 0, sizeof(struct knet_handle_stats_extra));
+ if (clear_option == KNET_CLEARSTATS_HANDLE_AND_LINK) {
+ _link_clear_stats(knet_h);
+ }
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ return 0;
+}
+
+int knet_handle_enable_access_lists(knet_handle_t knet_h, unsigned int enabled)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (enabled > 1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ knet_h->use_access_lists = enabled;
+
+ if (enabled) {
+ log_debug(knet_h, KNET_SUB_HANDLE, "Links access lists are enabled");
+ } else {
+ log_debug(knet_h, KNET_SUB_HANDLE, "Links access lists are disabled");
+ }
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = 0;
+ return 0;
+}
return NULL;
}
+
+int knet_handle_pmtud_getfreq(knet_handle_t knet_h, unsigned int *interval)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (!interval) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ *interval = knet_h->pmtud_interval;
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = 0;
+ return 0;
+}
+
+int knet_handle_pmtud_setfreq(knet_handle_t knet_h, unsigned int interval)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if ((!interval) || (interval > 86400)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ knet_h->pmtud_interval = interval;
+ log_debug(knet_h, KNET_SUB_HANDLE, "PMTUd interval set to: %u seconds", interval);
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = 0;
+ return 0;
+}
+
+int knet_handle_enable_pmtud_notify(knet_handle_t knet_h,
+ void *pmtud_notify_fn_private_data,
+ void (*pmtud_notify_fn) (
+ void *private_data,
+ unsigned int data_mtu))
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = get_global_wrlock(knet_h);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ knet_h->pmtud_notify_fn_private_data = pmtud_notify_fn_private_data;
+ knet_h->pmtud_notify_fn = pmtud_notify_fn;
+ if (knet_h->pmtud_notify_fn) {
+ log_debug(knet_h, KNET_SUB_HANDLE, "pmtud_notify_fn enabled");
+ } else {
+ log_debug(knet_h, KNET_SUB_HANDLE, "pmtud_notify_fn disabled");
+ }
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = 0;
+ return 0;
+}
+
+int knet_handle_pmtud_set(knet_handle_t knet_h,
+ unsigned int iface_mtu)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (iface_mtu > KNET_PMTUD_SIZE_V4) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_PMTUD, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ log_info(knet_h, KNET_SUB_PMTUD, "MTU manually set to: %u", iface_mtu);
+
+ knet_h->manual_mtu = iface_mtu;
+
+ force_pmtud_run(knet_h, KNET_SUB_PMTUD, 0);
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = 0;
+ return 0;
+}
+
+int knet_handle_pmtud_get(knet_handle_t knet_h,
+ unsigned int *data_mtu)
+{
+ int savederrno = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (!data_mtu) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ *data_mtu = knet_h->data_mtu;
+
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = 0;
+ return 0;
+}
return NULL;
}
+
+ssize_t knet_recv(knet_handle_t knet_h, char *buff, const size_t buff_len, const int8_t channel)
+{
+ int savederrno = 0;
+ ssize_t err = 0;
+ struct iovec iov_in;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff_len <= 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff_len > KNET_MAX_PACKET_SIZE) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel >= KNET_DATAFD_MAX) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ if (!knet_h->sockfd[channel].in_use) {
+ savederrno = EINVAL;
+ err = -1;
+ goto out_unlock;
+ }
+
+ memset(&iov_in, 0, sizeof(iov_in));
+ iov_in.iov_base = (void *)buff;
+ iov_in.iov_len = buff_len;
+
+ err = readv(knet_h->sockfd[channel].sockfd[0], &iov_in, 1);
+ savederrno = errno;
+
+out_unlock:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}
return err;
}
-int knet_send_sync(knet_handle_t knet_h, const char *buff, const size_t buff_len, const int8_t channel)
-{
- int savederrno = 0, err = 0;
-
- if (!knet_h) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff == NULL) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff_len <= 0) {
- errno = EINVAL;
- return -1;
- }
-
- if (buff_len > KNET_MAX_PACKET_SIZE) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel < 0) {
- errno = EINVAL;
- return -1;
- }
-
- if (channel >= KNET_DATAFD_MAX) {
- errno = EINVAL;
- return -1;
- }
-
- savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_TX, "Unable to get read lock: %s",
- strerror(savederrno));
- errno = savederrno;
- return -1;
- }
-
- if (!knet_h->sockfd[channel].in_use) {
- savederrno = EINVAL;
- err = -1;
- goto out;
- }
-
- savederrno = pthread_mutex_lock(&knet_h->tx_mutex);
- if (savederrno) {
- log_err(knet_h, KNET_SUB_TX, "Unable to get TX mutex lock: %s",
- strerror(savederrno));
- err = -1;
- goto out;
- }
-
- knet_h->recv_from_sock_buf->kh_type = KNET_HEADER_TYPE_DATA;
- memmove(knet_h->recv_from_sock_buf->khp_data_userdata, buff, buff_len);
- err = _parse_recv_from_sock(knet_h, buff_len, channel, 1);
- savederrno = errno;
-
- pthread_mutex_unlock(&knet_h->tx_mutex);
-
-out:
- pthread_rwlock_unlock(&knet_h->global_rwlock);
-
- errno = err ? savederrno : 0;
- return err;
-}
-
static void _handle_send_to_links(knet_handle_t knet_h, struct msghdr *msg, int sockfd, int8_t channel, int type)
{
ssize_t inlen = 0;
return NULL;
}
+
+int knet_send_sync(knet_handle_t knet_h, const char *buff, const size_t buff_len, const int8_t channel)
+{
+ int savederrno = 0, err = 0;
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff_len <= 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff_len > KNET_MAX_PACKET_SIZE) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel >= KNET_DATAFD_MAX) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_TX, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ if (!knet_h->sockfd[channel].in_use) {
+ savederrno = EINVAL;
+ err = -1;
+ goto out;
+ }
+
+ savederrno = pthread_mutex_lock(&knet_h->tx_mutex);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_TX, "Unable to get TX mutex lock: %s",
+ strerror(savederrno));
+ err = -1;
+ goto out;
+ }
+
+ knet_h->recv_from_sock_buf->kh_type = KNET_HEADER_TYPE_DATA;
+ memmove(knet_h->recv_from_sock_buf->khp_data_userdata, buff, buff_len);
+ err = _parse_recv_from_sock(knet_h, buff_len, channel, 1);
+ savederrno = errno;
+
+ pthread_mutex_unlock(&knet_h->tx_mutex);
+
+out:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+
+ errno = err ? savederrno : 0;
+ return err;
+}
+
+ssize_t knet_send(knet_handle_t knet_h, const char *buff, const size_t buff_len, const int8_t channel)
+{
+ int savederrno = 0;
+ ssize_t err = 0;
+ struct iovec iov_out[1];
+
+ if (!knet_h) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff_len <= 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (buff_len > KNET_MAX_PACKET_SIZE) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (channel >= KNET_DATAFD_MAX) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
+ if (savederrno) {
+ log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
+ strerror(savederrno));
+ errno = savederrno;
+ return -1;
+ }
+
+ if (!knet_h->sockfd[channel].in_use) {
+ savederrno = EINVAL;
+ err = -1;
+ goto out_unlock;
+ }
+
+ memset(iov_out, 0, sizeof(iov_out));
+
+ iov_out[0].iov_base = (void *)buff;
+ iov_out[0].iov_len = buff_len;
+
+ err = writev(knet_h->sockfd[channel].sockfd[0], iov_out, 1);
+ savederrno = errno;
+
+out_unlock:
+ pthread_rwlock_unlock(&knet_h->global_rwlock);
+ errno = err ? savederrno : 0;
+ return err;
+}