]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
IB/hfi1: Don't remove RB entry when not needed.
authorSebastian Sanchez <sebastian.sanchez@intel.com>
Fri, 26 May 2017 12:35:12 +0000 (05:35 -0700)
committerDoug Ledford <dledford@redhat.com>
Tue, 27 Jun 2017 20:56:33 +0000 (16:56 -0400)
An RB tree is used for the SDMA pinning cache. Cache
entries are extracted and reinserted from the tree
in case the address range for it changes. However,
if the address range for the entry doesn't change,
deleting the entry from the RB tree is not necessary.

This affects performance since the tree needs to be
rebalanced for each insertion, and this happens in
the hot path. Optimize RB search by not removing
entries when it's not needed.

Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Mitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: Sebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/hfi1/mmu_rb.c
drivers/infiniband/hw/hfi1/mmu_rb.h
drivers/infiniband/hw/hfi1/user_sdma.c

index ccbf52c8ff6f037a485060e9d78f66c3b7fe79e6..d41fd87a39f26a2d18070754d1b579039018463e 100644 (file)
@@ -217,21 +217,27 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
        return node;
 }
 
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
-                                       unsigned long addr, unsigned long len)
+bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+                                    unsigned long addr, unsigned long len,
+                                    struct mmu_rb_node **rb_node)
 {
        struct mmu_rb_node *node;
        unsigned long flags;
+       bool ret = false;
 
        spin_lock_irqsave(&handler->lock, flags);
        node = __mmu_rb_search(handler, addr, len);
        if (node) {
+               if (node->addr == addr && node->len == len)
+                       goto unlock;
                __mmu_int_rb_remove(node, &handler->root);
                list_del(&node->list); /* remove from LRU list */
+               ret = true;
        }
+unlock:
        spin_unlock_irqrestore(&handler->lock, flags);
-
-       return node;
+       *rb_node = node;
+       return ret;
 }
 
 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
index 754f6ebf13fb1ac61d42dee6f2a31c726d98d42e..f04cec1e99d11a2d1edb2640667a9156ccca3828 100644 (file)
@@ -81,7 +81,8 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
                        struct mmu_rb_node *mnode);
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
-                                       unsigned long addr, unsigned long len);
+bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+                                    unsigned long addr, unsigned long len,
+                                    struct mmu_rb_node **rb_node);
 
 #endif /* _HFI1_MMU_RB_H */
index 16fd519216dc19a6eede103f55f127bd3fd7eff2..79450cf2a3d5417d9c20c70e1417b5715ae29002 100644 (file)
@@ -1165,14 +1165,23 @@ static int pin_vector_pages(struct user_sdma_request *req,
        struct hfi1_user_sdma_pkt_q *pq = req->pq;
        struct sdma_mmu_node *node = NULL;
        struct mmu_rb_node *rb_node;
-
-       rb_node = hfi1_mmu_rb_extract(pq->handler,
-                                     (unsigned long)iovec->iov.iov_base,
-                                     iovec->iov.iov_len);
-       if (rb_node)
+       bool extracted;
+
+       extracted =
+               hfi1_mmu_rb_remove_unless_exact(pq->handler,
+                                               (unsigned long)
+                                               iovec->iov.iov_base,
+                                               iovec->iov.iov_len, &rb_node);
+       if (rb_node) {
                node = container_of(rb_node, struct sdma_mmu_node, rb);
-       else
-               rb_node = NULL;
+               if (!extracted) {
+                       atomic_inc(&node->refcount);
+                       iovec->pages = node->pages;
+                       iovec->npages = node->npages;
+                       iovec->node = node;
+                       return 0;
+               }
+       }
 
        if (!node) {
                node = kzalloc(sizeof(*node), GFP_KERNEL);