static int
dpdk_mp_full(const struct rte_mempool *mp) OVS_REQUIRES(dpdk_mp_mutex)
{
- unsigned ring_count;
- /* This logic is needed because rte_mempool_full() is not guaranteed to
- * be atomic and mbufs could be moved from mempool cache --> mempool ring
- * during the call. However, as no mbufs will be taken from the mempool
- * at this time, we can work around it by also checking the ring entries
- * separately and ensuring that they have not changed.
+ /* At this point we want to know if all the mbufs are back
+ * in the mempool. rte_mempool_full() is not atomic but it's
+ * the best available and as we are no longer requesting mbufs
+ * from the mempool, it means mbufs will not move from
+ * 'mempool ring' --> 'mempool cache'. In rte_mempool_full()
+ * the ring is counted before caches, so we won't get false
+ * positives in this use case and we handle false negatives.
+ *
+ * If future implementations of rte_mempool_full() were to change
+ * it could be possible for a false positive. Even that would
+ * likely be ok, as there are additional checks during mempool
+ * freeing but it would make things racey.
*/
- ring_count = rte_mempool_ops_get_count(mp);
- if (rte_mempool_full(mp) && rte_mempool_ops_get_count(mp) == ring_count) {
- return 1;
- }
-
- return 0;
+ return rte_mempool_full(mp);
}
/* Free unused mempools. */