+static int
+dpdk_mp_full(const struct rte_mempool *mp) OVS_REQUIRES(dpdk_mp_mutex)
+{
+ unsigned ring_count;
+ /* This logic is needed because rte_mempool_full() is not guaranteed to
+ * be atomic and mbufs could be moved from mempool cache --> mempool ring
+ * during the call. However, as no mbufs will be taken from the mempool
+ * at this time, we can work around it by also checking the ring entries
+ * separately and ensuring that they have not changed.
+ */
+ ring_count = rte_mempool_ops_get_count(mp);
+ if (rte_mempool_full(mp) && rte_mempool_ops_get_count(mp) == ring_count) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Free unused mempools. */
+static void
+dpdk_mp_sweep(void) OVS_REQUIRES(dpdk_mp_mutex)
+{
+ struct dpdk_mp *dmp, *next;
+
+ LIST_FOR_EACH_SAFE (dmp, next, list_node, &dpdk_mp_list) {
+ if (!dmp->refcount && dpdk_mp_full(dmp->mp)) {
+ ovs_list_remove(&dmp->list_node);
+ rte_mempool_free(dmp->mp);
+ rte_free(dmp);
+ }
+ }
+}
+