]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
IB/mlx4: Fix creation of kernel QP with max number of send s/g entries
authorRoland Dreier <rolandd@cisco.com>
Tue, 20 May 2008 21:00:02 +0000 (14:00 -0700)
committerRoland Dreier <rolandd@cisco.com>
Tue, 20 May 2008 21:00:02 +0000 (14:00 -0700)
When creating a kernel QP where the consumer asked for a send queue
with lots of scatter/gater entries, set_kernel_sq_size() incorrectly
returned an error if the send queue stride is larger than the
hardware's maximum send work request descriptor size.  This is not a
problem; the only issue is to make sure that the actual descriptors
used do not overflow the maximum descriptor size, so check this instead.

Clamp the returned max_send_sge value to be no bigger than what
query_device returns for the max_sge to avoid confusing hapless users,
even if the hardware is capable of handling a few more s/g entries.

This bug caused NFS/RDMA mounts to fail when the server adapter used
the mlx4 driver.

Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/mlx4/qp.c

index cec030e118d1b25722c0f6af12e8bb557879d5f7..a80df22deae8672a79de7e212014d765d72cc3f4 100644 (file)
@@ -333,6 +333,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
                send_wqe_overhead(type, qp->flags);
 
+       if (s > dev->dev->caps.max_sq_desc_sz)
+               return -EINVAL;
+
        /*
         * Hermon supports shrinking WQEs, such that a single work
         * request can include multiple units of 1 << wqe_shift.  This
@@ -372,9 +375,6 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
 
        for (;;) {
-               if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz)
-                       return -EINVAL;
-
                qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
 
                /*
@@ -395,7 +395,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                ++qp->sq.wqe_shift;
        }
 
-       qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
+       qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
+                            (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
                         send_wqe_overhead(type, qp->flags)) /
                sizeof (struct mlx4_wqe_data_seg);
 
@@ -411,7 +412,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 
        cap->max_send_wr  = qp->sq.max_post =
                (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
-       cap->max_send_sge = qp->sq.max_gs;
+       cap->max_send_sge = min(qp->sq.max_gs,
+                               min(dev->dev->caps.max_sq_sg,
+                                   dev->dev->caps.max_rq_sg));
        /* We don't support inline sends for kernel QPs (yet) */
        cap->max_inline_data = 0;