goto restart;
}
-static int _handle_check_pmtud(knet_handle_t knet_h, struct knet_host *dst_host, struct knet_link *dst_link, unsigned int *min_mtu)
+static int _handle_check_pmtud(knet_handle_t knet_h, struct knet_host *dst_host, struct knet_link *dst_link, unsigned int *min_mtu, int force_run)
{
uint8_t saved_valid_pmtud;
unsigned int saved_pmtud;
struct timespec clock_now;
unsigned long long diff_pmtud, interval;
- interval = knet_h->pmtud_interval * 1000000000llu; /* nanoseconds */
+ if (!force_run) {
+ interval = knet_h->pmtud_interval * 1000000000llu; /* nanoseconds */
- if (clock_gettime(CLOCK_MONOTONIC, &clock_now) != 0) {
- log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get monotonic clock");
- return 0;
- }
+ if (clock_gettime(CLOCK_MONOTONIC, &clock_now) != 0) {
+ log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get monotonic clock");
+ return 0;
+ }
- timespec_diff(dst_link->pmtud_last, clock_now, &diff_pmtud);
+ timespec_diff(dst_link->pmtud_last, clock_now, &diff_pmtud);
- if (diff_pmtud < interval) {
- *min_mtu = dst_link->status.mtu;
- return dst_link->has_valid_mtu;
+ if (diff_pmtud < interval) {
+ *min_mtu = dst_link->status.mtu;
+ return dst_link->has_valid_mtu;
+ }
}
switch (dst_link->dst_addr.ss_family) {
unsigned int min_mtu, have_mtu;
unsigned int lower_mtu;
int link_has_mtu;
+ int force_run = 0;
knet_h->data_mtu = KNET_PMTUD_MIN_MTU_V4 - KNET_HEADER_ALL_SIZE - knet_h->sec_header_size;
continue;
}
knet_h->pmtud_abort = 0;
+ knet_h->pmtud_running = 1;
+ force_run = knet_h->pmtud_forcerun;
+ knet_h->pmtud_forcerun = 0;
pthread_mutex_unlock(&knet_h->pmtud_mutex);
+ if (force_run) {
+ log_debug(knet_h, KNET_SUB_PMTUD, "PMTUd request to rerun has been received");
+ }
+
if (pthread_rwlock_rdlock(&knet_h->global_rwlock) != 0) {
log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get read lock");
continue;
(dst_link->status.dynconnected != 1)))
continue;
- link_has_mtu = _handle_check_pmtud(knet_h, dst_host, dst_link, &min_mtu);
+ link_has_mtu = _handle_check_pmtud(knet_h, dst_host, dst_link, &min_mtu, force_run);
if (errno == EDEADLK) {
goto out_unlock;
}
}
out_unlock:
pthread_rwlock_unlock(&knet_h->global_rwlock);
+ if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
+ log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
+ } else {
+ knet_h->pmtud_running = 0;
+ pthread_mutex_unlock(&knet_h->pmtud_mutex);
+ }
}
return NULL;
case 1: /* local source (EMSGSIZE) */
if (sock_err->ee_errno == EMSGSIZE) {
if (pthread_mutex_lock(&knet_h->kmtu_mutex) != 0) {
+ log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Unable to get mutex lock");
knet_h->kernel_mtu = 0;
+ break;
} else {
knet_h->kernel_mtu = sock_err->ee_info;
pthread_mutex_unlock(&knet_h->kmtu_mutex);
}
+
+ /*
+ * we can only try to take a lock here. This part of the code
+ * can be invoked by any thread, including PMTUd that is already
+ * holding a lock at that stage.
+ * If PMTUd is holding the lock, mostlikely it is already running
+ * and we don't need to notify it back.
+ */
+ if (!pthread_mutex_trylock(&knet_h->pmtud_mutex)) {
+ if (!knet_h->pmtud_running) {
+ log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Notifying PMTUd to rerun");
+ knet_h->pmtud_forcerun = 1;
+ }
+ pthread_mutex_unlock(&knet_h->pmtud_mutex);
+ }
}
/*
* those errors are way too noisy