2 * Copyright(c) 2016 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
60 static void rvt_rc_timeout(struct timer_list
*t
);
63 * Convert the AETH RNR timeout code into the number of microseconds.
65 static const u32 ib_rvt_rnr_table
[32] = {
66 655360, /* 00: 655.36 */
86 10240, /* 14: 10.24 */
87 15360, /* 15: 15.36 */
88 20480, /* 16: 20.48 */
89 30720, /* 17: 30.72 */
90 40960, /* 18: 40.96 */
91 61440, /* 19: 61.44 */
92 81920, /* 1A: 81.92 */
93 122880, /* 1B: 122.88 */
94 163840, /* 1C: 163.84 */
95 245760, /* 1D: 245.76 */
96 327680, /* 1E: 327.68 */
97 491520 /* 1F: 491.52 */
101 * Note that it is OK to post send work requests in the SQE and ERR
102 * states; rvt_do_send() will process them and generate error
103 * completions as per IB 1.2 C10-96.
105 const int ib_rvt_state_ops
[IB_QPS_ERR
+ 1] = {
107 [IB_QPS_INIT
] = RVT_POST_RECV_OK
,
108 [IB_QPS_RTR
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
,
109 [IB_QPS_RTS
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
110 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
|
111 RVT_PROCESS_NEXT_SEND_OK
,
112 [IB_QPS_SQD
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
113 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
,
114 [IB_QPS_SQE
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
115 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
116 [IB_QPS_ERR
] = RVT_POST_RECV_OK
| RVT_FLUSH_RECV
|
117 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
119 EXPORT_SYMBOL(ib_rvt_state_ops
);
121 /* platform specific: return the last level cache (llc) size, in KiB */
122 static int rvt_wss_llc_size(void)
124 /* assume that the boot CPU value is universal for all CPUs */
125 return boot_cpu_data
.x86_cache_size
;
128 /* platform specific: cacheless copy */
129 static void cacheless_memcpy(void *dst
, void *src
, size_t n
)
132 * Use the only available X64 cacheless copy. Add a __user cast
133 * to quiet sparse. The src agument is already in the kernel so
134 * there are no security issues. The extra fault recovery machinery
137 __copy_user_nocache(dst
, (void __user
*)src
, n
, 0);
140 void rvt_wss_exit(struct rvt_dev_info
*rdi
)
142 struct rvt_wss
*wss
= rdi
->wss
;
147 /* coded to handle partially initialized and repeat callers */
155 * rvt_wss_init - Init wss data structures
157 * Return: 0 on success
159 int rvt_wss_init(struct rvt_dev_info
*rdi
)
161 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
162 unsigned int wss_threshold
= rdi
->dparms
.wss_threshold
;
163 unsigned int wss_clean_period
= rdi
->dparms
.wss_clean_period
;
169 int node
= rdi
->dparms
.node
;
171 if (sge_copy_mode
!= RVT_SGE_COPY_ADAPTIVE
) {
176 rdi
->wss
= kzalloc_node(sizeof(*rdi
->wss
), GFP_KERNEL
, node
);
181 /* check for a valid percent range - default to 80 if none or invalid */
182 if (wss_threshold
< 1 || wss_threshold
> 100)
185 /* reject a wildly large period */
186 if (wss_clean_period
> 1000000)
187 wss_clean_period
= 256;
189 /* reject a zero period */
190 if (wss_clean_period
== 0)
191 wss_clean_period
= 1;
194 * Calculate the table size - the next power of 2 larger than the
195 * LLC size. LLC size is in KiB.
197 llc_size
= rvt_wss_llc_size() * 1024;
198 table_size
= roundup_pow_of_two(llc_size
);
200 /* one bit per page in rounded up table */
201 llc_bits
= llc_size
/ PAGE_SIZE
;
202 table_bits
= table_size
/ PAGE_SIZE
;
203 wss
->pages_mask
= table_bits
- 1;
204 wss
->num_entries
= table_bits
/ BITS_PER_LONG
;
206 wss
->threshold
= (llc_bits
* wss_threshold
) / 100;
207 if (wss
->threshold
== 0)
210 wss
->clean_period
= wss_clean_period
;
211 atomic_set(&wss
->clean_counter
, wss_clean_period
);
213 wss
->entries
= kcalloc_node(wss
->num_entries
, sizeof(*wss
->entries
),
224 * Advance the clean counter. When the clean period has expired,
227 * This is implemented in atomics to avoid locking. Because multiple
228 * variables are involved, it can be racy which can lead to slightly
229 * inaccurate information. Since this is only a heuristic, this is
230 * OK. Any innaccuracies will clean themselves out as the counter
231 * advances. That said, it is unlikely the entry clean operation will
232 * race - the next possible racer will not start until the next clean
235 * The clean counter is implemented as a decrement to zero. When zero
236 * is reached an entry is cleaned.
238 static void wss_advance_clean_counter(struct rvt_wss
*wss
)
244 /* become the cleaner if we decrement the counter to zero */
245 if (atomic_dec_and_test(&wss
->clean_counter
)) {
247 * Set, not add, the clean period. This avoids an issue
248 * where the counter could decrement below the clean period.
249 * Doing a set can result in lost decrements, slowing the
250 * clean advance. Since this a heuristic, this possible
253 * An alternative is to loop, advancing the counter by a
254 * clean period until the result is > 0. However, this could
255 * lead to several threads keeping another in the clean loop.
256 * This could be mitigated by limiting the number of times
257 * we stay in the loop.
259 atomic_set(&wss
->clean_counter
, wss
->clean_period
);
262 * Uniquely grab the entry to clean and move to next.
263 * The current entry is always the lower bits of
264 * wss.clean_entry. The table size, wss.num_entries,
265 * is always a power-of-2.
267 entry
= (atomic_inc_return(&wss
->clean_entry
) - 1)
268 & (wss
->num_entries
- 1);
270 /* clear the entry and count the bits */
271 bits
= xchg(&wss
->entries
[entry
], 0);
272 weight
= hweight64((u64
)bits
);
273 /* only adjust the contended total count if needed */
275 atomic_sub(weight
, &wss
->total_count
);
280 * Insert the given address into the working set array.
282 static void wss_insert(struct rvt_wss
*wss
, void *address
)
284 u32 page
= ((unsigned long)address
>> PAGE_SHIFT
) & wss
->pages_mask
;
285 u32 entry
= page
/ BITS_PER_LONG
; /* assumes this ends up a shift */
286 u32 nr
= page
& (BITS_PER_LONG
- 1);
288 if (!test_and_set_bit(nr
, &wss
->entries
[entry
]))
289 atomic_inc(&wss
->total_count
);
291 wss_advance_clean_counter(wss
);
295 * Is the working set larger than the threshold?
297 static inline bool wss_exceeds_threshold(struct rvt_wss
*wss
)
299 return atomic_read(&wss
->total_count
) >= wss
->threshold
;
302 static void get_map_page(struct rvt_qpn_table
*qpt
,
303 struct rvt_qpn_map
*map
)
305 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
308 * Free the page if someone raced with us installing it.
311 spin_lock(&qpt
->lock
);
315 map
->page
= (void *)page
;
316 spin_unlock(&qpt
->lock
);
320 * init_qpn_table - initialize the QP number table for a device
321 * @qpt: the QPN table
323 static int init_qpn_table(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
)
326 struct rvt_qpn_map
*map
;
329 if (!(rdi
->dparms
.qpn_res_end
>= rdi
->dparms
.qpn_res_start
))
332 spin_lock_init(&qpt
->lock
);
334 qpt
->last
= rdi
->dparms
.qpn_start
;
335 qpt
->incr
= rdi
->dparms
.qpn_inc
<< rdi
->dparms
.qos_shift
;
338 * Drivers may want some QPs beyond what we need for verbs let them use
339 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
340 * for those. The reserved range must be *after* the range which verbs
344 /* Figure out number of bit maps needed before reserved range */
345 qpt
->nmaps
= rdi
->dparms
.qpn_res_start
/ RVT_BITS_PER_PAGE
;
347 /* This should always be zero */
348 offset
= rdi
->dparms
.qpn_res_start
& RVT_BITS_PER_PAGE_MASK
;
350 /* Starting with the first reserved bit map */
351 map
= &qpt
->map
[qpt
->nmaps
];
353 rvt_pr_info(rdi
, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
354 rdi
->dparms
.qpn_res_start
, rdi
->dparms
.qpn_res_end
);
355 for (i
= rdi
->dparms
.qpn_res_start
; i
<= rdi
->dparms
.qpn_res_end
; i
++) {
357 get_map_page(qpt
, map
);
363 set_bit(offset
, map
->page
);
365 if (offset
== RVT_BITS_PER_PAGE
) {
376 * free_qpn_table - free the QP number table for a device
377 * @qpt: the QPN table
379 static void free_qpn_table(struct rvt_qpn_table
*qpt
)
383 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
384 free_page((unsigned long)qpt
->map
[i
].page
);
388 * rvt_driver_qp_init - Init driver qp resources
389 * @rdi: rvt dev strucutre
391 * Return: 0 on success
393 int rvt_driver_qp_init(struct rvt_dev_info
*rdi
)
398 if (!rdi
->dparms
.qp_table_size
)
402 * If driver is not doing any QP allocation then make sure it is
403 * providing the necessary QP functions.
405 if (!rdi
->driver_f
.free_all_qps
||
406 !rdi
->driver_f
.qp_priv_alloc
||
407 !rdi
->driver_f
.qp_priv_free
||
408 !rdi
->driver_f
.notify_qp_reset
||
409 !rdi
->driver_f
.notify_restart_rc
)
412 /* allocate parent object */
413 rdi
->qp_dev
= kzalloc_node(sizeof(*rdi
->qp_dev
), GFP_KERNEL
,
418 /* allocate hash table */
419 rdi
->qp_dev
->qp_table_size
= rdi
->dparms
.qp_table_size
;
420 rdi
->qp_dev
->qp_table_bits
= ilog2(rdi
->dparms
.qp_table_size
);
421 rdi
->qp_dev
->qp_table
=
422 kmalloc_array_node(rdi
->qp_dev
->qp_table_size
,
423 sizeof(*rdi
->qp_dev
->qp_table
),
424 GFP_KERNEL
, rdi
->dparms
.node
);
425 if (!rdi
->qp_dev
->qp_table
)
428 for (i
= 0; i
< rdi
->qp_dev
->qp_table_size
; i
++)
429 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[i
], NULL
);
431 spin_lock_init(&rdi
->qp_dev
->qpt_lock
);
433 /* initialize qpn map */
434 if (init_qpn_table(rdi
, &rdi
->qp_dev
->qpn_table
))
437 spin_lock_init(&rdi
->n_qps_lock
);
442 kfree(rdi
->qp_dev
->qp_table
);
443 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
452 * free_all_qps - check for QPs still in use
453 * @rdi: rvt device info structure
455 * There should not be any QPs still in use.
456 * Free memory for table.
458 static unsigned rvt_free_all_qps(struct rvt_dev_info
*rdi
)
462 unsigned n
, qp_inuse
= 0;
463 spinlock_t
*ql
; /* work around too long line below */
465 if (rdi
->driver_f
.free_all_qps
)
466 qp_inuse
= rdi
->driver_f
.free_all_qps(rdi
);
468 qp_inuse
+= rvt_mcast_tree_empty(rdi
);
473 ql
= &rdi
->qp_dev
->qpt_lock
;
474 spin_lock_irqsave(ql
, flags
);
475 for (n
= 0; n
< rdi
->qp_dev
->qp_table_size
; n
++) {
476 qp
= rcu_dereference_protected(rdi
->qp_dev
->qp_table
[n
],
477 lockdep_is_held(ql
));
478 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[n
], NULL
);
480 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
481 lockdep_is_held(ql
)))
484 spin_unlock_irqrestore(ql
, flags
);
490 * rvt_qp_exit - clean up qps on device exit
491 * @rdi: rvt dev structure
493 * Check for qp leaks and free resources.
495 void rvt_qp_exit(struct rvt_dev_info
*rdi
)
497 u32 qps_inuse
= rvt_free_all_qps(rdi
);
500 rvt_pr_err(rdi
, "QP memory leak! %u still in use\n",
505 kfree(rdi
->qp_dev
->qp_table
);
506 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
510 static inline unsigned mk_qpn(struct rvt_qpn_table
*qpt
,
511 struct rvt_qpn_map
*map
, unsigned off
)
513 return (map
- qpt
->map
) * RVT_BITS_PER_PAGE
+ off
;
517 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
518 * IB_QPT_SMI/IB_QPT_GSI
519 * @rdi: rvt device info structure
520 * @qpt: queue pair number table pointer
521 * @port_num: IB port number, 1 based, comes from core
523 * Return: The queue pair number
525 static int alloc_qpn(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
,
526 enum ib_qp_type type
, u8 port_num
)
528 u32 i
, offset
, max_scan
, qpn
;
529 struct rvt_qpn_map
*map
;
532 if (rdi
->driver_f
.alloc_qpn
)
533 return rdi
->driver_f
.alloc_qpn(rdi
, qpt
, type
, port_num
);
535 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
538 ret
= type
== IB_QPT_GSI
;
539 n
= 1 << (ret
+ 2 * (port_num
- 1));
540 spin_lock(&qpt
->lock
);
545 spin_unlock(&qpt
->lock
);
549 qpn
= qpt
->last
+ qpt
->incr
;
550 if (qpn
>= RVT_QPN_MAX
)
551 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
552 /* offset carries bit 0 */
553 offset
= qpn
& RVT_BITS_PER_PAGE_MASK
;
554 map
= &qpt
->map
[qpn
/ RVT_BITS_PER_PAGE
];
555 max_scan
= qpt
->nmaps
- !offset
;
557 if (unlikely(!map
->page
)) {
558 get_map_page(qpt
, map
);
559 if (unlikely(!map
->page
))
563 if (!test_and_set_bit(offset
, map
->page
)) {
570 * This qpn might be bogus if offset >= BITS_PER_PAGE.
571 * That is OK. It gets re-assigned below
573 qpn
= mk_qpn(qpt
, map
, offset
);
574 } while (offset
< RVT_BITS_PER_PAGE
&& qpn
< RVT_QPN_MAX
);
576 * In order to keep the number of pages allocated to a
577 * minimum, we scan the all existing pages before increasing
578 * the size of the bitmap table.
580 if (++i
> max_scan
) {
581 if (qpt
->nmaps
== RVT_QPNMAP_ENTRIES
)
583 map
= &qpt
->map
[qpt
->nmaps
++];
584 /* start at incr with current bit 0 */
585 offset
= qpt
->incr
| (offset
& 1);
586 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
588 /* start at incr with current bit 0 */
589 offset
= qpt
->incr
| (offset
& 1);
592 /* wrap to first map page, invert bit 0 */
593 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
595 /* there can be no set bits in low-order QoS bits */
596 WARN_ON(offset
& (BIT(rdi
->dparms
.qos_shift
) - 1));
597 qpn
= mk_qpn(qpt
, map
, offset
);
607 * rvt_clear_mr_refs - Drop help mr refs
608 * @qp: rvt qp data structure
609 * @clr_sends: If shoudl clear send side or not
611 static void rvt_clear_mr_refs(struct rvt_qp
*qp
, int clr_sends
)
614 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
616 if (test_and_clear_bit(RVT_R_REWIND_SGE
, &qp
->r_aflags
))
617 rvt_put_ss(&qp
->s_rdma_read_sge
);
619 rvt_put_ss(&qp
->r_sge
);
622 while (qp
->s_last
!= qp
->s_head
) {
623 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
627 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
628 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
629 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
630 atomic_dec(&ibah_to_rvtah(
631 wqe
->ud_wr
.ah
)->refcount
);
632 if (++qp
->s_last
>= qp
->s_size
)
634 smp_wmb(); /* see qp_set_savail */
637 rvt_put_mr(qp
->s_rdma_mr
);
638 qp
->s_rdma_mr
= NULL
;
642 for (n
= 0; qp
->s_ack_queue
&& n
< rvt_max_atomic(rdi
); n
++) {
643 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[n
];
645 if (e
->rdma_sge
.mr
) {
646 rvt_put_mr(e
->rdma_sge
.mr
);
647 e
->rdma_sge
.mr
= NULL
;
653 * rvt_swqe_has_lkey - return true if lkey is used by swqe
654 * @wqe - the send wqe
657 * Test the swqe for using lkey
659 static bool rvt_swqe_has_lkey(struct rvt_swqe
*wqe
, u32 lkey
)
663 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
664 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
666 if (rvt_mr_has_lkey(sge
->mr
, lkey
))
673 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
677 static bool rvt_qp_sends_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
679 u32 s_last
= qp
->s_last
;
681 while (s_last
!= qp
->s_head
) {
682 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, s_last
);
684 if (rvt_swqe_has_lkey(wqe
, lkey
))
687 if (++s_last
>= qp
->s_size
)
691 if (rvt_mr_has_lkey(qp
->s_rdma_mr
, lkey
))
697 * rvt_qp_acks_has_lkey - return true if acks have lkey
701 static bool rvt_qp_acks_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
704 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
706 for (i
= 0; qp
->s_ack_queue
&& i
< rvt_max_atomic(rdi
); i
++) {
707 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[i
];
709 if (rvt_mr_has_lkey(e
->rdma_sge
.mr
, lkey
))
716 * rvt_qp_mr_clean - clean up remote ops for lkey
718 * @lkey - the lkey that is being de-registered
720 * This routine checks if the lkey is being used by
723 * If so, the qp is put into an error state to elminate
724 * any references from the qp.
726 void rvt_qp_mr_clean(struct rvt_qp
*qp
, u32 lkey
)
728 bool lastwqe
= false;
730 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
731 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
732 /* avoid special QPs */
734 spin_lock_irq(&qp
->r_lock
);
735 spin_lock(&qp
->s_hlock
);
736 spin_lock(&qp
->s_lock
);
738 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
741 if (rvt_ss_has_lkey(&qp
->r_sge
, lkey
) ||
742 rvt_qp_sends_has_lkey(qp
, lkey
) ||
743 rvt_qp_acks_has_lkey(qp
, lkey
))
744 lastwqe
= rvt_error_qp(qp
, IB_WC_LOC_PROT_ERR
);
746 spin_unlock(&qp
->s_lock
);
747 spin_unlock(&qp
->s_hlock
);
748 spin_unlock_irq(&qp
->r_lock
);
752 ev
.device
= qp
->ibqp
.device
;
753 ev
.element
.qp
= &qp
->ibqp
;
754 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
755 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
760 * rvt_remove_qp - remove qp form table
761 * @rdi: rvt dev struct
764 * Remove the QP from the table so it can't be found asynchronously by
765 * the receive routine.
767 static void rvt_remove_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
769 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
770 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
774 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
776 if (rcu_dereference_protected(rvp
->qp
[0],
777 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
778 RCU_INIT_POINTER(rvp
->qp
[0], NULL
);
779 } else if (rcu_dereference_protected(rvp
->qp
[1],
780 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
781 RCU_INIT_POINTER(rvp
->qp
[1], NULL
);
784 struct rvt_qp __rcu
**qpp
;
787 qpp
= &rdi
->qp_dev
->qp_table
[n
];
788 for (; (q
= rcu_dereference_protected(*qpp
,
789 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
))) != NULL
;
792 RCU_INIT_POINTER(*qpp
,
793 rcu_dereference_protected(qp
->next
,
794 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)));
796 trace_rvt_qpremove(qp
, n
);
802 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
810 * rvt_init_qp - initialize the QP state to the reset state
811 * @qp: the QP to init or reinit
814 * This function is called from both rvt_create_qp() and
815 * rvt_reset_qp(). The difference is that the reset
816 * patch the necessary locks to protect against concurent
819 static void rvt_init_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
820 enum ib_qp_type type
)
824 qp
->qp_access_flags
= 0;
825 qp
->s_flags
&= RVT_S_SIGNAL_REQ_WR
;
831 qp
->s_sending_psn
= 0;
832 qp
->s_sending_hpsn
= 0;
836 if (type
== IB_QPT_RC
) {
837 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
838 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
840 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
841 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
843 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
854 qp
->s_mig_state
= IB_MIG_MIGRATED
;
855 qp
->r_head_ack_queue
= 0;
856 qp
->s_tail_ack_queue
= 0;
857 qp
->s_num_rd_atomic
= 0;
859 qp
->r_rq
.wq
->head
= 0;
860 qp
->r_rq
.wq
->tail
= 0;
862 qp
->r_sge
.num_sge
= 0;
863 atomic_set(&qp
->s_reserved_used
, 0);
867 * rvt_reset_qp - initialize the QP state to the reset state
868 * @qp: the QP to reset
871 * r_lock, s_hlock, and s_lock are required to be held by the caller
873 static void rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
874 enum ib_qp_type type
)
875 __must_hold(&qp
->s_lock
)
876 __must_hold(&qp
->s_hlock
)
877 __must_hold(&qp
->r_lock
)
879 lockdep_assert_held(&qp
->r_lock
);
880 lockdep_assert_held(&qp
->s_hlock
);
881 lockdep_assert_held(&qp
->s_lock
);
882 if (qp
->state
!= IB_QPS_RESET
) {
883 qp
->state
= IB_QPS_RESET
;
885 /* Let drivers flush their waitlist */
886 rdi
->driver_f
.flush_qp_waiters(qp
);
887 rvt_stop_rc_timers(qp
);
888 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_ANY_WAIT
);
889 spin_unlock(&qp
->s_lock
);
890 spin_unlock(&qp
->s_hlock
);
891 spin_unlock_irq(&qp
->r_lock
);
893 /* Stop the send queue and the retry timer */
894 rdi
->driver_f
.stop_send_queue(qp
);
895 rvt_del_timers_sync(qp
);
896 /* Wait for things to stop */
897 rdi
->driver_f
.quiesce_qp(qp
);
899 /* take qp out the hash and wait for it to be unused */
900 rvt_remove_qp(rdi
, qp
);
902 /* grab the lock b/c it was locked at call time */
903 spin_lock_irq(&qp
->r_lock
);
904 spin_lock(&qp
->s_hlock
);
905 spin_lock(&qp
->s_lock
);
907 rvt_clear_mr_refs(qp
, 1);
909 * Let the driver do any tear down or re-init it needs to for
910 * a qp that has been reset
912 rdi
->driver_f
.notify_qp_reset(qp
);
914 rvt_init_qp(rdi
, qp
, type
);
915 lockdep_assert_held(&qp
->r_lock
);
916 lockdep_assert_held(&qp
->s_hlock
);
917 lockdep_assert_held(&qp
->s_lock
);
920 /** rvt_free_qpn - Free a qpn from the bit map
922 * @qpn: queue pair number to free
924 static void rvt_free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
926 struct rvt_qpn_map
*map
;
928 map
= qpt
->map
+ (qpn
& RVT_QPN_MASK
) / RVT_BITS_PER_PAGE
;
930 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
934 * rvt_create_qp - create a queue pair for a device
935 * @ibpd: the protection domain who's device we create the queue pair for
936 * @init_attr: the attributes of the queue pair
937 * @udata: user data for libibverbs.so
939 * Queue pair creation is mostly an rvt issue. However, drivers have their own
940 * unique idea of what queue pair numbers mean. For instance there is a reserved
943 * Return: the queue pair on success, otherwise returns an errno.
945 * Called by the ib_create_qp() core verbs function.
947 struct ib_qp
*rvt_create_qp(struct ib_pd
*ibpd
,
948 struct ib_qp_init_attr
*init_attr
,
949 struct ib_udata
*udata
)
953 struct rvt_swqe
*swq
= NULL
;
956 struct ib_qp
*ret
= ERR_PTR(-ENOMEM
);
957 struct rvt_dev_info
*rdi
= ib_to_rvt(ibpd
->device
);
962 return ERR_PTR(-EINVAL
);
964 if (init_attr
->cap
.max_send_sge
> rdi
->dparms
.props
.max_send_sge
||
965 init_attr
->cap
.max_send_wr
> rdi
->dparms
.props
.max_qp_wr
||
966 init_attr
->create_flags
)
967 return ERR_PTR(-EINVAL
);
969 /* Check receive queue parameters if no SRQ is specified. */
970 if (!init_attr
->srq
) {
971 if (init_attr
->cap
.max_recv_sge
>
972 rdi
->dparms
.props
.max_recv_sge
||
973 init_attr
->cap
.max_recv_wr
> rdi
->dparms
.props
.max_qp_wr
)
974 return ERR_PTR(-EINVAL
);
976 if (init_attr
->cap
.max_send_sge
+
977 init_attr
->cap
.max_send_wr
+
978 init_attr
->cap
.max_recv_sge
+
979 init_attr
->cap
.max_recv_wr
== 0)
980 return ERR_PTR(-EINVAL
);
983 init_attr
->cap
.max_send_wr
+ 1 +
984 rdi
->dparms
.reserved_operations
;
985 switch (init_attr
->qp_type
) {
988 if (init_attr
->port_num
== 0 ||
989 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
)
990 return ERR_PTR(-EINVAL
);
995 sz
= sizeof(struct rvt_sge
) *
996 init_attr
->cap
.max_send_sge
+
997 sizeof(struct rvt_swqe
);
998 swq
= vzalloc_node(array_size(sz
, sqsize
), rdi
->dparms
.node
);
1000 return ERR_PTR(-ENOMEM
);
1004 if (init_attr
->srq
) {
1005 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(init_attr
->srq
);
1007 if (srq
->rq
.max_sge
> 1)
1008 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1009 (srq
->rq
.max_sge
- 1);
1010 } else if (init_attr
->cap
.max_recv_sge
> 1)
1011 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1012 (init_attr
->cap
.max_recv_sge
- 1);
1013 qp
= kzalloc_node(sz
+ sg_list_sz
, GFP_KERNEL
,
1018 RCU_INIT_POINTER(qp
->next
, NULL
);
1019 if (init_attr
->qp_type
== IB_QPT_RC
) {
1021 kcalloc_node(rvt_max_atomic(rdi
),
1022 sizeof(*qp
->s_ack_queue
),
1025 if (!qp
->s_ack_queue
)
1028 /* initialize timers needed for rc qp */
1029 timer_setup(&qp
->s_timer
, rvt_rc_timeout
, 0);
1030 hrtimer_init(&qp
->s_rnr_timer
, CLOCK_MONOTONIC
,
1032 qp
->s_rnr_timer
.function
= rvt_rc_rnr_retry
;
1035 * Driver needs to set up it's private QP structure and do any
1036 * initialization that is needed.
1038 priv
= rdi
->driver_f
.qp_priv_alloc(rdi
, qp
);
1044 qp
->timeout_jiffies
=
1045 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1047 if (init_attr
->srq
) {
1050 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1051 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1052 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1053 sizeof(struct rvt_rwqe
);
1055 qp
->r_rq
.wq
= vmalloc_user(
1056 sizeof(struct rvt_rwq
) +
1057 qp
->r_rq
.size
* sz
);
1059 qp
->r_rq
.wq
= vzalloc_node(
1060 sizeof(struct rvt_rwq
) +
1064 goto bail_driver_priv
;
1068 * ib_create_qp() will initialize qp->ibqp
1069 * except for qp->ibqp.qp_num.
1071 spin_lock_init(&qp
->r_lock
);
1072 spin_lock_init(&qp
->s_hlock
);
1073 spin_lock_init(&qp
->s_lock
);
1074 spin_lock_init(&qp
->r_rq
.lock
);
1075 atomic_set(&qp
->refcount
, 0);
1076 atomic_set(&qp
->local_ops_pending
, 0);
1077 init_waitqueue_head(&qp
->wait
);
1078 INIT_LIST_HEAD(&qp
->rspwait
);
1079 qp
->state
= IB_QPS_RESET
;
1081 qp
->s_size
= sqsize
;
1082 qp
->s_avail
= init_attr
->cap
.max_send_wr
;
1083 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1084 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1085 qp
->s_flags
= RVT_S_SIGNAL_REQ_WR
;
1087 err
= alloc_qpn(rdi
, &rdi
->qp_dev
->qpn_table
,
1089 init_attr
->port_num
);
1094 qp
->ibqp
.qp_num
= err
;
1095 qp
->port_num
= init_attr
->port_num
;
1096 rvt_init_qp(rdi
, qp
, init_attr
->qp_type
);
1097 if (rdi
->driver_f
.qp_priv_init
) {
1098 err
= rdi
->driver_f
.qp_priv_init(rdi
, qp
, init_attr
);
1107 /* Don't support raw QPs */
1108 return ERR_PTR(-EINVAL
);
1111 init_attr
->cap
.max_inline_data
= 0;
1114 * Return the address of the RWQ as the offset to mmap.
1115 * See rvt_mmap() for details.
1117 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1121 err
= ib_copy_to_udata(udata
, &offset
,
1128 u32 s
= sizeof(struct rvt_rwq
) + qp
->r_rq
.size
* sz
;
1130 qp
->ip
= rvt_create_mmap_info(rdi
, s
,
1131 ibpd
->uobject
->context
,
1134 ret
= ERR_PTR(-ENOMEM
);
1138 err
= ib_copy_to_udata(udata
, &qp
->ip
->offset
,
1139 sizeof(qp
->ip
->offset
));
1145 qp
->pid
= current
->pid
;
1148 spin_lock(&rdi
->n_qps_lock
);
1149 if (rdi
->n_qps_allocated
== rdi
->dparms
.props
.max_qp
) {
1150 spin_unlock(&rdi
->n_qps_lock
);
1151 ret
= ERR_PTR(-ENOMEM
);
1155 rdi
->n_qps_allocated
++;
1157 * Maintain a busy_jiffies variable that will be added to the timeout
1158 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1159 * is scaled by the number of rc qps created for the device to reduce
1160 * the number of timeouts occurring when there is a large number of
1161 * qps. busy_jiffies is incremented every rc qp scaling interval.
1162 * The scaling interval is selected based on extensive performance
1163 * evaluation of targeted workloads.
1165 if (init_attr
->qp_type
== IB_QPT_RC
) {
1167 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1169 spin_unlock(&rdi
->n_qps_lock
);
1172 spin_lock_irq(&rdi
->pending_lock
);
1173 list_add(&qp
->ip
->pending_mmaps
, &rdi
->pending_mmaps
);
1174 spin_unlock_irq(&rdi
->pending_lock
);
1180 * We have our QP and its good, now keep track of what types of opcodes
1181 * can be processed on this QP. We do this by keeping track of what the
1182 * 3 high order bits of the opcode are.
1184 switch (init_attr
->qp_type
) {
1188 qp
->allowed_ops
= IB_OPCODE_UD
;
1191 qp
->allowed_ops
= IB_OPCODE_RC
;
1194 qp
->allowed_ops
= IB_OPCODE_UC
;
1197 ret
= ERR_PTR(-EINVAL
);
1205 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1208 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1215 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1218 kfree(qp
->s_ack_queue
);
1228 * rvt_error_qp - put a QP into the error state
1229 * @qp: the QP to put into the error state
1230 * @err: the receive completion error to signal if a RWQE is active
1232 * Flushes both send and receive work queues.
1234 * Return: true if last WQE event should be generated.
1235 * The QP r_lock and s_lock should be held and interrupts disabled.
1236 * If we are already in error state, just return.
1238 int rvt_error_qp(struct rvt_qp
*qp
, enum ib_wc_status err
)
1242 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1244 lockdep_assert_held(&qp
->r_lock
);
1245 lockdep_assert_held(&qp
->s_lock
);
1246 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
1249 qp
->state
= IB_QPS_ERR
;
1251 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
1252 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
1253 del_timer(&qp
->s_timer
);
1256 if (qp
->s_flags
& RVT_S_ANY_WAIT_SEND
)
1257 qp
->s_flags
&= ~RVT_S_ANY_WAIT_SEND
;
1259 rdi
->driver_f
.notify_error_qp(qp
);
1261 /* Schedule the sending tasklet to drain the send work queue. */
1262 if (READ_ONCE(qp
->s_last
) != qp
->s_head
)
1263 rdi
->driver_f
.schedule_send(qp
);
1265 rvt_clear_mr_refs(qp
, 0);
1267 memset(&wc
, 0, sizeof(wc
));
1269 wc
.opcode
= IB_WC_RECV
;
1271 if (test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
)) {
1272 wc
.wr_id
= qp
->r_wr_id
;
1274 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1276 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1283 spin_lock(&qp
->r_rq
.lock
);
1285 /* sanity check pointers before trusting them */
1288 if (head
>= qp
->r_rq
.size
)
1291 if (tail
>= qp
->r_rq
.size
)
1293 while (tail
!= head
) {
1294 wc
.wr_id
= rvt_get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
1295 if (++tail
>= qp
->r_rq
.size
)
1297 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1301 spin_unlock(&qp
->r_rq
.lock
);
1302 } else if (qp
->ibqp
.event_handler
) {
1309 EXPORT_SYMBOL(rvt_error_qp
);
1312 * Put the QP into the hash table.
1313 * The hash table holds a reference to the QP.
1315 static void rvt_insert_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
1317 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
1318 unsigned long flags
;
1321 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
1323 if (qp
->ibqp
.qp_num
<= 1) {
1324 rcu_assign_pointer(rvp
->qp
[qp
->ibqp
.qp_num
], qp
);
1326 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
1328 qp
->next
= rdi
->qp_dev
->qp_table
[n
];
1329 rcu_assign_pointer(rdi
->qp_dev
->qp_table
[n
], qp
);
1330 trace_rvt_qpinsert(qp
, n
);
1333 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
1337 * rvt_modify_qp - modify the attributes of a queue pair
1338 * @ibqp: the queue pair who's attributes we're modifying
1339 * @attr: the new attributes
1340 * @attr_mask: the mask of attributes to modify
1341 * @udata: user data for libibverbs.so
1343 * Return: 0 on success, otherwise returns an errno.
1345 int rvt_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1346 int attr_mask
, struct ib_udata
*udata
)
1348 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1349 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1350 enum ib_qp_state cur_state
, new_state
;
1354 int pmtu
= 0; /* for gcc warning only */
1357 spin_lock_irq(&qp
->r_lock
);
1358 spin_lock(&qp
->s_hlock
);
1359 spin_lock(&qp
->s_lock
);
1361 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
1362 attr
->cur_qp_state
: qp
->state
;
1363 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1364 opa_ah
= rdma_cap_opa_ah(ibqp
->device
, qp
->port_num
);
1366 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1370 if (rdi
->driver_f
.check_modify_qp
&&
1371 rdi
->driver_f
.check_modify_qp(qp
, attr
, attr_mask
, udata
))
1374 if (attr_mask
& IB_QP_AV
) {
1376 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1377 opa_get_mcast_base(OPA_MCAST_NR
))
1380 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1381 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1385 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
1389 if (attr_mask
& IB_QP_ALT_PATH
) {
1391 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1392 opa_get_mcast_base(OPA_MCAST_NR
))
1395 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1396 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1400 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
1402 if (attr
->alt_pkey_index
>= rvt_get_npkeys(rdi
))
1406 if (attr_mask
& IB_QP_PKEY_INDEX
)
1407 if (attr
->pkey_index
>= rvt_get_npkeys(rdi
))
1410 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1411 if (attr
->min_rnr_timer
> 31)
1414 if (attr_mask
& IB_QP_PORT
)
1415 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1416 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1417 attr
->port_num
== 0 ||
1418 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
1421 if (attr_mask
& IB_QP_DEST_QPN
)
1422 if (attr
->dest_qp_num
> RVT_QPN_MASK
)
1425 if (attr_mask
& IB_QP_RETRY_CNT
)
1426 if (attr
->retry_cnt
> 7)
1429 if (attr_mask
& IB_QP_RNR_RETRY
)
1430 if (attr
->rnr_retry
> 7)
1434 * Don't allow invalid path_mtu values. OK to set greater
1435 * than the active mtu (or even the max_cap, if we have tuned
1436 * that to a small mtu. We'll set qp->path_mtu
1437 * to the lesser of requested attribute mtu and active,
1438 * for packetizing messages.
1439 * Note that the QP port has to be set in INIT and MTU in RTR.
1441 if (attr_mask
& IB_QP_PATH_MTU
) {
1442 pmtu
= rdi
->driver_f
.get_pmtu_from_attr(rdi
, qp
, attr
);
1447 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1448 if (attr
->path_mig_state
== IB_MIG_REARM
) {
1449 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1451 if (new_state
!= IB_QPS_RTS
)
1453 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
1454 if (qp
->s_mig_state
== IB_MIG_REARM
)
1456 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
1458 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1465 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1466 if (attr
->max_dest_rd_atomic
> rdi
->dparms
.max_rdma_atomic
)
1469 switch (new_state
) {
1471 if (qp
->state
!= IB_QPS_RESET
)
1472 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1476 /* Allow event to re-trigger if QP set to RTR more than once */
1477 qp
->r_flags
&= ~RVT_R_COMM_EST
;
1478 qp
->state
= new_state
;
1482 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
1483 qp
->state
= new_state
;
1487 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
1489 qp
->state
= new_state
;
1493 lastwqe
= rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1497 qp
->state
= new_state
;
1501 if (attr_mask
& IB_QP_PKEY_INDEX
)
1502 qp
->s_pkey_index
= attr
->pkey_index
;
1504 if (attr_mask
& IB_QP_PORT
)
1505 qp
->port_num
= attr
->port_num
;
1507 if (attr_mask
& IB_QP_DEST_QPN
)
1508 qp
->remote_qpn
= attr
->dest_qp_num
;
1510 if (attr_mask
& IB_QP_SQ_PSN
) {
1511 qp
->s_next_psn
= attr
->sq_psn
& rdi
->dparms
.psn_modify_mask
;
1512 qp
->s_psn
= qp
->s_next_psn
;
1513 qp
->s_sending_psn
= qp
->s_next_psn
;
1514 qp
->s_last_psn
= qp
->s_next_psn
- 1;
1515 qp
->s_sending_hpsn
= qp
->s_last_psn
;
1518 if (attr_mask
& IB_QP_RQ_PSN
)
1519 qp
->r_psn
= attr
->rq_psn
& rdi
->dparms
.psn_modify_mask
;
1521 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1522 qp
->qp_access_flags
= attr
->qp_access_flags
;
1524 if (attr_mask
& IB_QP_AV
) {
1525 rdma_replace_ah_attr(&qp
->remote_ah_attr
, &attr
->ah_attr
);
1526 qp
->s_srate
= rdma_ah_get_static_rate(&attr
->ah_attr
);
1527 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
1530 if (attr_mask
& IB_QP_ALT_PATH
) {
1531 rdma_replace_ah_attr(&qp
->alt_ah_attr
, &attr
->alt_ah_attr
);
1532 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
1535 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1536 qp
->s_mig_state
= attr
->path_mig_state
;
1538 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
1539 qp
->port_num
= rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1540 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
1544 if (attr_mask
& IB_QP_PATH_MTU
) {
1545 qp
->pmtu
= rdi
->driver_f
.mtu_from_qp(rdi
, qp
, pmtu
);
1546 qp
->log_pmtu
= ilog2(qp
->pmtu
);
1549 if (attr_mask
& IB_QP_RETRY_CNT
) {
1550 qp
->s_retry_cnt
= attr
->retry_cnt
;
1551 qp
->s_retry
= attr
->retry_cnt
;
1554 if (attr_mask
& IB_QP_RNR_RETRY
) {
1555 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
1556 qp
->s_rnr_retry
= attr
->rnr_retry
;
1559 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1560 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
1562 if (attr_mask
& IB_QP_TIMEOUT
) {
1563 qp
->timeout
= attr
->timeout
;
1564 qp
->timeout_jiffies
= rvt_timeout_to_jiffies(qp
->timeout
);
1567 if (attr_mask
& IB_QP_QKEY
)
1568 qp
->qkey
= attr
->qkey
;
1570 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1571 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
1573 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
1574 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
1576 if (rdi
->driver_f
.modify_qp
)
1577 rdi
->driver_f
.modify_qp(qp
, attr
, attr_mask
, udata
);
1579 spin_unlock(&qp
->s_lock
);
1580 spin_unlock(&qp
->s_hlock
);
1581 spin_unlock_irq(&qp
->r_lock
);
1583 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1584 rvt_insert_qp(rdi
, qp
);
1587 ev
.device
= qp
->ibqp
.device
;
1588 ev
.element
.qp
= &qp
->ibqp
;
1589 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1590 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1593 ev
.device
= qp
->ibqp
.device
;
1594 ev
.element
.qp
= &qp
->ibqp
;
1595 ev
.event
= IB_EVENT_PATH_MIG
;
1596 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1601 spin_unlock(&qp
->s_lock
);
1602 spin_unlock(&qp
->s_hlock
);
1603 spin_unlock_irq(&qp
->r_lock
);
1608 * rvt_destroy_qp - destroy a queue pair
1609 * @ibqp: the queue pair to destroy
1611 * Note that this can be called while the QP is actively sending or
1614 * Return: 0 on success.
1616 int rvt_destroy_qp(struct ib_qp
*ibqp
)
1618 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1619 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1621 spin_lock_irq(&qp
->r_lock
);
1622 spin_lock(&qp
->s_hlock
);
1623 spin_lock(&qp
->s_lock
);
1624 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1625 spin_unlock(&qp
->s_lock
);
1626 spin_unlock(&qp
->s_hlock
);
1627 spin_unlock_irq(&qp
->r_lock
);
1629 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1630 /* qpn is now available for use again */
1631 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1633 spin_lock(&rdi
->n_qps_lock
);
1634 rdi
->n_qps_allocated
--;
1635 if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1637 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1639 spin_unlock(&rdi
->n_qps_lock
);
1642 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1646 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1647 kfree(qp
->s_ack_queue
);
1648 rdma_destroy_ah_attr(&qp
->remote_ah_attr
);
1649 rdma_destroy_ah_attr(&qp
->alt_ah_attr
);
1655 * rvt_query_qp - query an ipbq
1656 * @ibqp: IB qp to query
1657 * @attr: attr struct to fill in
1658 * @attr_mask: attr mask ignored
1659 * @init_attr: struct to fill in
1663 int rvt_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1664 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
1666 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1667 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1669 attr
->qp_state
= qp
->state
;
1670 attr
->cur_qp_state
= attr
->qp_state
;
1671 attr
->path_mtu
= rdi
->driver_f
.mtu_to_path_mtu(qp
->pmtu
);
1672 attr
->path_mig_state
= qp
->s_mig_state
;
1673 attr
->qkey
= qp
->qkey
;
1674 attr
->rq_psn
= qp
->r_psn
& rdi
->dparms
.psn_mask
;
1675 attr
->sq_psn
= qp
->s_next_psn
& rdi
->dparms
.psn_mask
;
1676 attr
->dest_qp_num
= qp
->remote_qpn
;
1677 attr
->qp_access_flags
= qp
->qp_access_flags
;
1678 attr
->cap
.max_send_wr
= qp
->s_size
- 1 -
1679 rdi
->dparms
.reserved_operations
;
1680 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
1681 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
1682 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
1683 attr
->cap
.max_inline_data
= 0;
1684 attr
->ah_attr
= qp
->remote_ah_attr
;
1685 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
1686 attr
->pkey_index
= qp
->s_pkey_index
;
1687 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
1688 attr
->en_sqd_async_notify
= 0;
1689 attr
->sq_draining
= qp
->s_draining
;
1690 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
1691 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
1692 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
1693 attr
->port_num
= qp
->port_num
;
1694 attr
->timeout
= qp
->timeout
;
1695 attr
->retry_cnt
= qp
->s_retry_cnt
;
1696 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
1697 attr
->alt_port_num
=
1698 rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1699 attr
->alt_timeout
= qp
->alt_timeout
;
1701 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
1702 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
1703 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
1704 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
1705 init_attr
->srq
= qp
->ibqp
.srq
;
1706 init_attr
->cap
= attr
->cap
;
1707 if (qp
->s_flags
& RVT_S_SIGNAL_REQ_WR
)
1708 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1710 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1711 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
1712 init_attr
->port_num
= qp
->port_num
;
1717 * rvt_post_receive - post a receive on a QP
1718 * @ibqp: the QP to post the receive on
1719 * @wr: the WR to post
1720 * @bad_wr: the first bad WR is put here
1722 * This may be called from interrupt context.
1724 * Return: 0 on success otherwise errno
1726 int rvt_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1727 const struct ib_recv_wr
**bad_wr
)
1729 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1730 struct rvt_rwq
*wq
= qp
->r_rq
.wq
;
1731 unsigned long flags
;
1732 int qp_err_flush
= (ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_RECV
) &&
1735 /* Check that state is OK to post receive. */
1736 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_RECV_OK
) || !wq
) {
1741 for (; wr
; wr
= wr
->next
) {
1742 struct rvt_rwqe
*wqe
;
1746 if ((unsigned)wr
->num_sge
> qp
->r_rq
.max_sge
) {
1751 spin_lock_irqsave(&qp
->r_rq
.lock
, flags
);
1752 next
= wq
->head
+ 1;
1753 if (next
>= qp
->r_rq
.size
)
1755 if (next
== wq
->tail
) {
1756 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1760 if (unlikely(qp_err_flush
)) {
1763 memset(&wc
, 0, sizeof(wc
));
1765 wc
.opcode
= IB_WC_RECV
;
1766 wc
.wr_id
= wr
->wr_id
;
1767 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1768 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1770 wqe
= rvt_get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
1771 wqe
->wr_id
= wr
->wr_id
;
1772 wqe
->num_sge
= wr
->num_sge
;
1773 for (i
= 0; i
< wr
->num_sge
; i
++)
1774 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
1776 * Make sure queue entry is written
1777 * before the head index.
1782 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1788 * rvt_qp_valid_operation - validate post send wr request
1790 * @post-parms - the post send table for the driver
1791 * @wr - the work request
1793 * The routine validates the operation based on the
1794 * validation table an returns the length of the operation
1795 * which can extend beyond the ib_send_bw. Operation
1796 * dependent flags key atomic operation validation.
1798 * There is an exception for UD qps that validates the pd and
1799 * overrides the length to include the additional UD specific
1802 * Returns a negative error or the length of the work request
1803 * for building the swqe.
1805 static inline int rvt_qp_valid_operation(
1807 const struct rvt_operation_params
*post_parms
,
1808 const struct ib_send_wr
*wr
)
1812 if (wr
->opcode
>= RVT_OPERATION_MAX
|| !post_parms
[wr
->opcode
].length
)
1814 if (!(post_parms
[wr
->opcode
].qpt_support
& BIT(qp
->ibqp
.qp_type
)))
1816 if ((post_parms
[wr
->opcode
].flags
& RVT_OPERATION_PRIV
) &&
1817 ibpd_to_rvtpd(qp
->ibqp
.pd
)->user
)
1819 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC_SGE
&&
1820 (wr
->num_sge
== 0 ||
1821 wr
->sg_list
[0].length
< sizeof(u64
) ||
1822 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1)))
1824 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC
&&
1825 !qp
->s_max_rd_atomic
)
1827 len
= post_parms
[wr
->opcode
].length
;
1829 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1830 qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1831 if (qp
->ibqp
.pd
!= ud_wr(wr
)->ah
->pd
)
1833 len
= sizeof(struct ib_ud_wr
);
1839 * rvt_qp_is_avail - determine queue capacity
1841 * @rdi: the rdmavt device
1842 * @reserved_op: is reserved operation
1844 * This assumes the s_hlock is held but the s_last
1845 * qp variable is uncontrolled.
1847 * For non reserved operations, the qp->s_avail
1850 * The return value is zero or a -ENOMEM.
1852 static inline int rvt_qp_is_avail(
1854 struct rvt_dev_info
*rdi
,
1861 /* see rvt_qp_wqe_unreserve() */
1862 smp_mb__before_atomic();
1863 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1864 if (unlikely(reserved_op
)) {
1865 /* see rvt_qp_wqe_unreserve() */
1866 smp_mb__before_atomic();
1867 if (reserved_used
>= rdi
->dparms
.reserved_operations
)
1871 /* non-reserved operations */
1872 if (likely(qp
->s_avail
))
1874 slast
= READ_ONCE(qp
->s_last
);
1875 if (qp
->s_head
>= slast
)
1876 avail
= qp
->s_size
- (qp
->s_head
- slast
);
1878 avail
= slast
- qp
->s_head
;
1880 /* see rvt_qp_wqe_unreserve() */
1881 smp_mb__before_atomic();
1882 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1884 (rdi
->dparms
.reserved_operations
- reserved_used
);
1885 /* insure we don't assign a negative s_avail */
1886 if ((s32
)avail
<= 0)
1888 qp
->s_avail
= avail
;
1889 if (WARN_ON(qp
->s_avail
>
1890 (qp
->s_size
- 1 - rdi
->dparms
.reserved_operations
)))
1892 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1893 qp
->ibqp
.qp_num
, qp
->s_size
, qp
->s_avail
,
1894 qp
->s_head
, qp
->s_tail
, qp
->s_cur
,
1895 qp
->s_acked
, qp
->s_last
);
1900 * rvt_post_one_wr - post one RC, UC, or UD send work request
1901 * @qp: the QP to post on
1902 * @wr: the work request to send
1904 static int rvt_post_one_wr(struct rvt_qp
*qp
,
1905 const struct ib_send_wr
*wr
,
1908 struct rvt_swqe
*wqe
;
1913 struct rvt_lkey_table
*rkt
;
1915 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1920 int local_ops_delayed
= 0;
1922 BUILD_BUG_ON(IB_QPT_MAX
>= (sizeof(u32
) * BITS_PER_BYTE
));
1924 /* IB spec says that num_sge == 0 is OK. */
1925 if (unlikely(wr
->num_sge
> qp
->s_max_sge
))
1928 ret
= rvt_qp_valid_operation(qp
, rdi
->post_parms
, wr
);
1934 * Local operations include fast register and local invalidate.
1935 * Fast register needs to be processed immediately because the
1936 * registered lkey may be used by following work requests and the
1937 * lkey needs to be valid at the time those requests are posted.
1938 * Local invalidate can be processed immediately if fencing is
1939 * not required and no previous local invalidate ops are pending.
1940 * Signaled local operations that have been processed immediately
1941 * need to have requests with "completion only" flags set posted
1942 * to the send queue in order to generate completions.
1944 if ((rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
)) {
1945 switch (wr
->opcode
) {
1947 ret
= rvt_fast_reg_mr(qp
,
1950 reg_wr(wr
)->access
);
1951 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
1954 case IB_WR_LOCAL_INV
:
1955 if ((wr
->send_flags
& IB_SEND_FENCE
) ||
1956 atomic_read(&qp
->local_ops_pending
)) {
1957 local_ops_delayed
= 1;
1959 ret
= rvt_invalidate_rkey(
1960 qp
, wr
->ex
.invalidate_rkey
);
1961 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
1970 reserved_op
= rdi
->post_parms
[wr
->opcode
].flags
&
1971 RVT_OPERATION_USE_RESERVE
;
1972 /* check for avail */
1973 ret
= rvt_qp_is_avail(qp
, rdi
, reserved_op
);
1976 next
= qp
->s_head
+ 1;
1977 if (next
>= qp
->s_size
)
1980 rkt
= &rdi
->lkey_table
;
1981 pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
1982 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_head
);
1984 /* cplen has length from above */
1985 memcpy(&wqe
->wr
, wr
, cplen
);
1990 struct rvt_sge
*last_sge
= NULL
;
1992 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
1993 IB_ACCESS_LOCAL_WRITE
: 0;
1994 for (i
= 0; i
< wr
->num_sge
; i
++) {
1995 u32 length
= wr
->sg_list
[i
].length
;
1999 ret
= rvt_lkey_ok(rkt
, pd
, &wqe
->sg_list
[j
], last_sge
,
2000 &wr
->sg_list
[i
], acc
);
2001 if (unlikely(ret
< 0))
2002 goto bail_inval_free
;
2003 wqe
->length
+= length
;
2005 last_sge
= &wqe
->sg_list
[j
];
2008 wqe
->wr
.num_sge
= j
;
2012 * Calculate and set SWQE PSN values prior to handing it off
2013 * to the driver's check routine. This give the driver the
2014 * opportunity to adjust PSN values based on internal checks.
2016 log_pmtu
= qp
->log_pmtu
;
2017 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
2018 qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
2019 struct rvt_ah
*ah
= ibah_to_rvtah(wqe
->ud_wr
.ah
);
2021 log_pmtu
= ah
->log_pmtu
;
2022 atomic_inc(&ibah_to_rvtah(ud_wr(wr
)->ah
)->refcount
);
2025 if (rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
) {
2026 if (local_ops_delayed
)
2027 atomic_inc(&qp
->local_ops_pending
);
2029 wqe
->wr
.send_flags
|= RVT_SEND_COMPLETION_ONLY
;
2034 wqe
->ssn
= qp
->s_ssn
++;
2035 wqe
->psn
= qp
->s_next_psn
;
2036 wqe
->lpsn
= wqe
->psn
+
2038 ((wqe
->length
- 1) >> log_pmtu
) :
2042 /* general part of wqe valid - allow for driver checks */
2043 if (rdi
->driver_f
.setup_wqe
) {
2044 ret
= rdi
->driver_f
.setup_wqe(qp
, wqe
, call_send
);
2046 goto bail_inval_free_ref
;
2049 if (!(rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
))
2050 qp
->s_next_psn
= wqe
->lpsn
+ 1;
2052 if (unlikely(reserved_op
)) {
2053 wqe
->wr
.send_flags
|= RVT_SEND_RESERVE_USED
;
2054 rvt_qp_wqe_reserve(qp
, wqe
);
2056 wqe
->wr
.send_flags
&= ~RVT_SEND_RESERVE_USED
;
2059 trace_rvt_post_one_wr(qp
, wqe
, wr
->num_sge
);
2060 smp_wmb(); /* see request builders */
2065 bail_inval_free_ref
:
2066 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
2067 qp
->ibqp
.qp_type
!= IB_QPT_RC
)
2068 atomic_dec(&ibah_to_rvtah(ud_wr(wr
)->ah
)->refcount
);
2070 /* release mr holds */
2072 struct rvt_sge
*sge
= &wqe
->sg_list
[--j
];
2074 rvt_put_mr(sge
->mr
);
2080 * rvt_post_send - post a send on a QP
2081 * @ibqp: the QP to post the send on
2082 * @wr: the list of work requests to post
2083 * @bad_wr: the first bad WR is put here
2085 * This may be called from interrupt context.
2087 * Return: 0 on success else errno
2089 int rvt_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
2090 const struct ib_send_wr
**bad_wr
)
2092 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
2093 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2094 unsigned long flags
= 0;
2099 spin_lock_irqsave(&qp
->s_hlock
, flags
);
2102 * Ensure QP state is such that we can send. If not bail out early,
2103 * there is no need to do this every time we post a send.
2105 if (unlikely(!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_SEND_OK
))) {
2106 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2111 * If the send queue is empty, and we only have a single WR then just go
2112 * ahead and kick the send engine into gear. Otherwise we will always
2113 * just schedule the send to happen later.
2115 call_send
= qp
->s_head
== READ_ONCE(qp
->s_last
) && !wr
->next
;
2117 for (; wr
; wr
= wr
->next
) {
2118 err
= rvt_post_one_wr(qp
, wr
, &call_send
);
2119 if (unlikely(err
)) {
2126 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2129 * Only call do_send if there is exactly one packet, and the
2130 * driver said it was ok.
2132 if (nreq
== 1 && call_send
)
2133 rdi
->driver_f
.do_send(qp
);
2135 rdi
->driver_f
.schedule_send_no_lock(qp
);
2141 * rvt_post_srq_receive - post a receive on a shared receive queue
2142 * @ibsrq: the SRQ to post the receive on
2143 * @wr: the list of work requests to post
2144 * @bad_wr: A pointer to the first WR to cause a problem is put here
2146 * This may be called from interrupt context.
2148 * Return: 0 on success else errno
2150 int rvt_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
2151 const struct ib_recv_wr
**bad_wr
)
2153 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
2155 unsigned long flags
;
2157 for (; wr
; wr
= wr
->next
) {
2158 struct rvt_rwqe
*wqe
;
2162 if ((unsigned)wr
->num_sge
> srq
->rq
.max_sge
) {
2167 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
2169 next
= wq
->head
+ 1;
2170 if (next
>= srq
->rq
.size
)
2172 if (next
== wq
->tail
) {
2173 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
2178 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, wq
->head
);
2179 wqe
->wr_id
= wr
->wr_id
;
2180 wqe
->num_sge
= wr
->num_sge
;
2181 for (i
= 0; i
< wr
->num_sge
; i
++)
2182 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
2183 /* Make sure queue entry is written before the head index. */
2186 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
2192 * Validate a RWQE and fill in the SGE state.
2195 static int init_sge(struct rvt_qp
*qp
, struct rvt_rwqe
*wqe
)
2199 struct rvt_lkey_table
*rkt
;
2201 struct rvt_sge_state
*ss
;
2202 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2204 rkt
= &rdi
->lkey_table
;
2205 pd
= ibpd_to_rvtpd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
2207 ss
->sg_list
= qp
->r_sg_list
;
2209 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
2210 if (wqe
->sg_list
[i
].length
== 0)
2213 ret
= rvt_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
2214 NULL
, &wqe
->sg_list
[i
],
2215 IB_ACCESS_LOCAL_WRITE
);
2216 if (unlikely(ret
<= 0))
2218 qp
->r_len
+= wqe
->sg_list
[i
].length
;
2222 ss
->total_len
= qp
->r_len
;
2227 struct rvt_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
2229 rvt_put_mr(sge
->mr
);
2232 memset(&wc
, 0, sizeof(wc
));
2233 wc
.wr_id
= wqe
->wr_id
;
2234 wc
.status
= IB_WC_LOC_PROT_ERR
;
2235 wc
.opcode
= IB_WC_RECV
;
2237 /* Signal solicited completion event. */
2238 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
2243 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2245 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2247 * Return -1 if there is a local error, 0 if no RWQE is available,
2248 * otherwise return 1.
2250 * Can be called from interrupt level.
2252 int rvt_get_rwqe(struct rvt_qp
*qp
, bool wr_id_only
)
2254 unsigned long flags
;
2257 struct rvt_srq
*srq
;
2258 struct rvt_rwqe
*wqe
;
2259 void (*handler
)(struct ib_event
*, void *);
2264 srq
= ibsrq_to_rvtsrq(qp
->ibqp
.srq
);
2265 handler
= srq
->ibsrq
.event_handler
;
2273 spin_lock_irqsave(&rq
->lock
, flags
);
2274 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
2281 /* Validate tail before using it since it is user writable. */
2282 if (tail
>= rq
->size
)
2284 if (unlikely(tail
== wq
->head
)) {
2288 /* Make sure entry is read after head index is read. */
2290 wqe
= rvt_get_rwqe_ptr(rq
, tail
);
2292 * Even though we update the tail index in memory, the verbs
2293 * consumer is not supposed to post more entries until a
2294 * completion is generated.
2296 if (++tail
>= rq
->size
)
2299 if (!wr_id_only
&& !init_sge(qp
, wqe
)) {
2303 qp
->r_wr_id
= wqe
->wr_id
;
2306 set_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
);
2311 * Validate head pointer value and compute
2312 * the number of remaining WQEs.
2318 n
+= rq
->size
- tail
;
2321 if (n
< srq
->limit
) {
2325 spin_unlock_irqrestore(&rq
->lock
, flags
);
2326 ev
.device
= qp
->ibqp
.device
;
2327 ev
.element
.srq
= qp
->ibqp
.srq
;
2328 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
2329 handler(&ev
, srq
->ibsrq
.srq_context
);
2334 spin_unlock_irqrestore(&rq
->lock
, flags
);
2338 EXPORT_SYMBOL(rvt_get_rwqe
);
2341 * qp_comm_est - handle trap with QP established
2344 void rvt_comm_est(struct rvt_qp
*qp
)
2346 qp
->r_flags
|= RVT_R_COMM_EST
;
2347 if (qp
->ibqp
.event_handler
) {
2350 ev
.device
= qp
->ibqp
.device
;
2351 ev
.element
.qp
= &qp
->ibqp
;
2352 ev
.event
= IB_EVENT_COMM_EST
;
2353 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2356 EXPORT_SYMBOL(rvt_comm_est
);
2358 void rvt_rc_error(struct rvt_qp
*qp
, enum ib_wc_status err
)
2360 unsigned long flags
;
2363 spin_lock_irqsave(&qp
->s_lock
, flags
);
2364 lastwqe
= rvt_error_qp(qp
, err
);
2365 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2370 ev
.device
= qp
->ibqp
.device
;
2371 ev
.element
.qp
= &qp
->ibqp
;
2372 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
2373 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2376 EXPORT_SYMBOL(rvt_rc_error
);
2379 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2380 * @index - the index
2381 * return usec from an index into ib_rvt_rnr_table
2383 unsigned long rvt_rnr_tbl_to_usec(u32 index
)
2385 return ib_rvt_rnr_table
[(index
& IB_AETH_CREDIT_MASK
)];
2387 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec
);
2389 static inline unsigned long rvt_aeth_to_usec(u32 aeth
)
2391 return ib_rvt_rnr_table
[(aeth
>> IB_AETH_CREDIT_SHIFT
) &
2392 IB_AETH_CREDIT_MASK
];
2396 * rvt_add_retry_timer - add/start a retry timer
2398 * add a retry timer on the QP
2400 void rvt_add_retry_timer(struct rvt_qp
*qp
)
2402 struct ib_qp
*ibqp
= &qp
->ibqp
;
2403 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2405 lockdep_assert_held(&qp
->s_lock
);
2406 qp
->s_flags
|= RVT_S_TIMER
;
2407 /* 4.096 usec. * (1 << qp->timeout) */
2408 qp
->s_timer
.expires
= jiffies
+ qp
->timeout_jiffies
+
2410 add_timer(&qp
->s_timer
);
2412 EXPORT_SYMBOL(rvt_add_retry_timer
);
2415 * rvt_add_rnr_timer - add/start an rnr timer
2417 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2418 * add an rnr timer on the QP
2420 void rvt_add_rnr_timer(struct rvt_qp
*qp
, u32 aeth
)
2424 lockdep_assert_held(&qp
->s_lock
);
2425 qp
->s_flags
|= RVT_S_WAIT_RNR
;
2426 to
= rvt_aeth_to_usec(aeth
);
2427 trace_rvt_rnrnak_add(qp
, to
);
2428 hrtimer_start(&qp
->s_rnr_timer
,
2429 ns_to_ktime(1000 * to
), HRTIMER_MODE_REL_PINNED
);
2431 EXPORT_SYMBOL(rvt_add_rnr_timer
);
2434 * rvt_stop_rc_timers - stop all timers
2436 * stop any pending timers
2438 void rvt_stop_rc_timers(struct rvt_qp
*qp
)
2440 lockdep_assert_held(&qp
->s_lock
);
2441 /* Remove QP from all timers */
2442 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
2443 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
2444 del_timer(&qp
->s_timer
);
2445 hrtimer_try_to_cancel(&qp
->s_rnr_timer
);
2448 EXPORT_SYMBOL(rvt_stop_rc_timers
);
2451 * rvt_stop_rnr_timer - stop an rnr timer
2454 * stop an rnr timer and return if the timer
2457 static void rvt_stop_rnr_timer(struct rvt_qp
*qp
)
2459 lockdep_assert_held(&qp
->s_lock
);
2460 /* Remove QP from rnr timer */
2461 if (qp
->s_flags
& RVT_S_WAIT_RNR
) {
2462 qp
->s_flags
&= ~RVT_S_WAIT_RNR
;
2463 trace_rvt_rnrnak_stop(qp
, 0);
2468 * rvt_del_timers_sync - wait for any timeout routines to exit
2471 void rvt_del_timers_sync(struct rvt_qp
*qp
)
2473 del_timer_sync(&qp
->s_timer
);
2474 hrtimer_cancel(&qp
->s_rnr_timer
);
2476 EXPORT_SYMBOL(rvt_del_timers_sync
);
2479 * This is called from s_timer for missing responses.
2481 static void rvt_rc_timeout(struct timer_list
*t
)
2483 struct rvt_qp
*qp
= from_timer(qp
, t
, s_timer
);
2484 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2485 unsigned long flags
;
2487 spin_lock_irqsave(&qp
->r_lock
, flags
);
2488 spin_lock(&qp
->s_lock
);
2489 if (qp
->s_flags
& RVT_S_TIMER
) {
2490 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
2492 qp
->s_flags
&= ~RVT_S_TIMER
;
2493 rvp
->n_rc_timeouts
++;
2494 del_timer(&qp
->s_timer
);
2495 trace_rvt_rc_timeout(qp
, qp
->s_last_psn
+ 1);
2496 if (rdi
->driver_f
.notify_restart_rc
)
2497 rdi
->driver_f
.notify_restart_rc(qp
,
2500 rdi
->driver_f
.schedule_send(qp
);
2502 spin_unlock(&qp
->s_lock
);
2503 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
2507 * This is called from s_timer for RNR timeouts.
2509 enum hrtimer_restart
rvt_rc_rnr_retry(struct hrtimer
*t
)
2511 struct rvt_qp
*qp
= container_of(t
, struct rvt_qp
, s_rnr_timer
);
2512 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2513 unsigned long flags
;
2515 spin_lock_irqsave(&qp
->s_lock
, flags
);
2516 rvt_stop_rnr_timer(qp
);
2517 trace_rvt_rnrnak_timeout(qp
, 0);
2518 rdi
->driver_f
.schedule_send(qp
);
2519 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2520 return HRTIMER_NORESTART
;
2522 EXPORT_SYMBOL(rvt_rc_rnr_retry
);
2525 * rvt_qp_iter_init - initial for QP iteration
2529 * This returns an iterator suitable for iterating QPs
2532 * The @cb is a user defined callback and @v is a 64
2533 * bit value passed to and relevant for processing in the
2534 * @cb. An example use case would be to alter QP processing
2535 * based on criteria not part of the rvt_qp.
2537 * Use cases that require memory allocation to succeed
2538 * must preallocate appropriately.
2540 * Return: a pointer to an rvt_qp_iter or NULL
2542 struct rvt_qp_iter
*rvt_qp_iter_init(struct rvt_dev_info
*rdi
,
2544 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2546 struct rvt_qp_iter
*i
;
2548 i
= kzalloc(sizeof(*i
), GFP_KERNEL
);
2553 /* number of special QPs (SMI/GSI) for device */
2554 i
->specials
= rdi
->ibdev
.phys_port_cnt
* 2;
2560 EXPORT_SYMBOL(rvt_qp_iter_init
);
2563 * rvt_qp_iter_next - return the next QP in iter
2564 * @iter - the iterator
2566 * Fine grained QP iterator suitable for use
2567 * with debugfs seq_file mechanisms.
2569 * Updates iter->qp with the current QP when the return
2572 * Return: 0 - iter->qp is valid 1 - no more QPs
2574 int rvt_qp_iter_next(struct rvt_qp_iter
*iter
)
2579 struct rvt_qp
*pqp
= iter
->qp
;
2581 struct rvt_dev_info
*rdi
= iter
->rdi
;
2584 * The approach is to consider the special qps
2585 * as additional table entries before the
2586 * real hash table. Since the qp code sets
2587 * the qp->next hash link to NULL, this works just fine.
2589 * iter->specials is 2 * # ports
2591 * n = 0..iter->specials is the special qp indices
2593 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2594 * the potential hash bucket entries
2597 for (; n
< rdi
->qp_dev
->qp_table_size
+ iter
->specials
; n
++) {
2599 qp
= rcu_dereference(pqp
->next
);
2601 if (n
< iter
->specials
) {
2602 struct rvt_ibport
*rvp
;
2605 pidx
= n
% rdi
->ibdev
.phys_port_cnt
;
2606 rvp
= rdi
->ports
[pidx
];
2607 qp
= rcu_dereference(rvp
->qp
[n
& 1]);
2609 qp
= rcu_dereference(
2610 rdi
->qp_dev
->qp_table
[
2611 (n
- iter
->specials
)]);
2623 EXPORT_SYMBOL(rvt_qp_iter_next
);
2626 * rvt_qp_iter - iterate all QPs
2627 * @rdi - rvt devinfo
2628 * @v - a 64 bit value
2631 * This provides a way for iterating all QPs.
2633 * The @cb is a user defined callback and @v is a 64
2634 * bit value passed to and relevant for processing in the
2635 * cb. An example use case would be to alter QP processing
2636 * based on criteria not part of the rvt_qp.
2638 * The code has an internal iterator to simplify
2639 * non seq_file use cases.
2641 void rvt_qp_iter(struct rvt_dev_info
*rdi
,
2643 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2646 struct rvt_qp_iter i
= {
2648 .specials
= rdi
->ibdev
.phys_port_cnt
* 2,
2655 ret
= rvt_qp_iter_next(&i
);
2666 EXPORT_SYMBOL(rvt_qp_iter
);
2669 * This should be called with s_lock held.
2671 void rvt_send_complete(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
2672 enum ib_wc_status status
)
2675 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2677 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2682 trace_rvt_qp_send_completion(qp
, wqe
, last
);
2683 if (++last
>= qp
->s_size
)
2685 trace_rvt_qp_send_completion(qp
, wqe
, last
);
2687 /* See post_send() */
2690 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
2691 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
2692 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
2693 atomic_dec(&ibah_to_rvtah(wqe
->ud_wr
.ah
)->refcount
);
2695 rvt_qp_swqe_complete(qp
,
2697 rdi
->wc_opcode
[wqe
->wr
.opcode
],
2700 if (qp
->s_acked
== old_last
)
2702 if (qp
->s_cur
== old_last
)
2704 if (qp
->s_tail
== old_last
)
2706 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)
2709 EXPORT_SYMBOL(rvt_send_complete
);
2712 * rvt_copy_sge - copy data to SGE memory
2713 * @qp: associated QP
2714 * @ss: the SGE state
2715 * @data: the data to copy
2716 * @length: the length of the data
2717 * @release: boolean to release MR
2718 * @copy_last: do a separate copy of the last 8 bytes
2720 void rvt_copy_sge(struct rvt_qp
*qp
, struct rvt_sge_state
*ss
,
2721 void *data
, u32 length
,
2722 bool release
, bool copy_last
)
2724 struct rvt_sge
*sge
= &ss
->sge
;
2726 bool in_last
= false;
2727 bool cacheless_copy
= false;
2728 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2729 struct rvt_wss
*wss
= rdi
->wss
;
2730 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
2732 if (sge_copy_mode
== RVT_SGE_COPY_CACHELESS
) {
2733 cacheless_copy
= length
>= PAGE_SIZE
;
2734 } else if (sge_copy_mode
== RVT_SGE_COPY_ADAPTIVE
) {
2735 if (length
>= PAGE_SIZE
) {
2737 * NOTE: this *assumes*:
2738 * o The first vaddr is the dest.
2739 * o If multiple pages, then vaddr is sequential.
2741 wss_insert(wss
, sge
->vaddr
);
2742 if (length
>= (2 * PAGE_SIZE
))
2743 wss_insert(wss
, (sge
->vaddr
+ PAGE_SIZE
));
2745 cacheless_copy
= wss_exceeds_threshold(wss
);
2747 wss_advance_clean_counter(wss
);
2762 u32 len
= rvt_get_sge_length(sge
, length
);
2764 WARN_ON_ONCE(len
== 0);
2765 if (unlikely(in_last
)) {
2766 /* enforce byte transfer ordering */
2767 for (i
= 0; i
< len
; i
++)
2768 ((u8
*)sge
->vaddr
)[i
] = ((u8
*)data
)[i
];
2769 } else if (cacheless_copy
) {
2770 cacheless_memcpy(sge
->vaddr
, data
, len
);
2772 memcpy(sge
->vaddr
, data
, len
);
2774 rvt_update_sge(ss
, len
, release
);
2786 EXPORT_SYMBOL(rvt_copy_sge
);
2789 * ruc_loopback - handle UC and RC loopback requests
2790 * @sqp: the sending QP
2792 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2793 * Note that although we are single threaded due to the send engine, we still
2794 * have to protect against post_send(). We don't have to worry about
2795 * receive interrupts since this is a connected protocol and all packets
2796 * will pass through here.
2798 void rvt_ruc_loopback(struct rvt_qp
*sqp
)
2800 struct rvt_ibport
*rvp
= NULL
;
2801 struct rvt_dev_info
*rdi
= ib_to_rvt(sqp
->ibqp
.device
);
2803 struct rvt_swqe
*wqe
;
2804 struct rvt_sge
*sge
;
2805 unsigned long flags
;
2809 enum ib_wc_status send_status
;
2812 bool copy_last
= false;
2816 rvp
= rdi
->ports
[sqp
->port_num
- 1];
2819 * Note that we check the responder QP state after
2820 * checking the requester's state.
2823 qp
= rvt_lookup_qpn(ib_to_rvt(sqp
->ibqp
.device
), rvp
,
2826 spin_lock_irqsave(&sqp
->s_lock
, flags
);
2828 /* Return if we are already busy processing a work request. */
2829 if ((sqp
->s_flags
& (RVT_S_BUSY
| RVT_S_ANY_WAIT
)) ||
2830 !(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2833 sqp
->s_flags
|= RVT_S_BUSY
;
2836 if (sqp
->s_last
== READ_ONCE(sqp
->s_head
))
2838 wqe
= rvt_get_swqe_ptr(sqp
, sqp
->s_last
);
2840 /* Return if it is not OK to start a new work request. */
2841 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
2842 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_FLUSH_SEND
))
2844 /* We are in the error state, flush the work request. */
2845 send_status
= IB_WC_WR_FLUSH_ERR
;
2850 * We can rely on the entry not changing without the s_lock
2851 * being held until we update s_last.
2852 * We increment s_cur to indicate s_last is in progress.
2854 if (sqp
->s_last
== sqp
->s_cur
) {
2855 if (++sqp
->s_cur
>= sqp
->s_size
)
2858 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
2860 if (!qp
|| !(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) ||
2861 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
2864 * For RC, the requester would timeout and retry so
2865 * shortcut the timeouts and just signal too many retries.
2867 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
)
2868 send_status
= IB_WC_RETRY_EXC_ERR
;
2870 send_status
= IB_WC_SUCCESS
;
2874 memset(&wc
, 0, sizeof(wc
));
2875 send_status
= IB_WC_SUCCESS
;
2878 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
2879 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
2880 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
2881 sqp
->s_len
= wqe
->length
;
2882 switch (wqe
->wr
.opcode
) {
2886 case IB_WR_LOCAL_INV
:
2887 if (!(wqe
->wr
.send_flags
& RVT_SEND_COMPLETION_ONLY
)) {
2888 if (rvt_invalidate_rkey(sqp
,
2889 wqe
->wr
.ex
.invalidate_rkey
))
2890 send_status
= IB_WC_LOC_PROT_ERR
;
2895 case IB_WR_SEND_WITH_INV
:
2896 if (!rvt_invalidate_rkey(qp
, wqe
->wr
.ex
.invalidate_rkey
)) {
2897 wc
.wc_flags
= IB_WC_WITH_INVALIDATE
;
2898 wc
.ex
.invalidate_rkey
= wqe
->wr
.ex
.invalidate_rkey
;
2902 case IB_WR_SEND_WITH_IMM
:
2903 wc
.wc_flags
= IB_WC_WITH_IMM
;
2904 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
2908 ret
= rvt_get_rwqe(qp
, false);
2915 case IB_WR_RDMA_WRITE_WITH_IMM
:
2916 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
2918 wc
.wc_flags
= IB_WC_WITH_IMM
;
2919 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
2920 ret
= rvt_get_rwqe(qp
, true);
2925 /* skip copy_last set and qp_access_flags recheck */
2927 case IB_WR_RDMA_WRITE
:
2928 copy_last
= rvt_is_user_qp(qp
);
2929 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
2932 if (wqe
->length
== 0)
2934 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
2935 wqe
->rdma_wr
.remote_addr
,
2937 IB_ACCESS_REMOTE_WRITE
)))
2939 qp
->r_sge
.sg_list
= NULL
;
2940 qp
->r_sge
.num_sge
= 1;
2941 qp
->r_sge
.total_len
= wqe
->length
;
2944 case IB_WR_RDMA_READ
:
2945 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
2947 if (unlikely(!rvt_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
2948 wqe
->rdma_wr
.remote_addr
,
2950 IB_ACCESS_REMOTE_READ
)))
2953 sqp
->s_sge
.sg_list
= NULL
;
2954 sqp
->s_sge
.num_sge
= 1;
2955 qp
->r_sge
.sge
= wqe
->sg_list
[0];
2956 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
2957 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
2958 qp
->r_sge
.total_len
= wqe
->length
;
2961 case IB_WR_ATOMIC_CMP_AND_SWP
:
2962 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2963 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
2965 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
2966 wqe
->atomic_wr
.remote_addr
,
2967 wqe
->atomic_wr
.rkey
,
2968 IB_ACCESS_REMOTE_ATOMIC
)))
2970 /* Perform atomic OP and save result. */
2971 maddr
= (atomic64_t
*)qp
->r_sge
.sge
.vaddr
;
2972 sdata
= wqe
->atomic_wr
.compare_add
;
2973 *(u64
*)sqp
->s_sge
.sge
.vaddr
=
2974 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
2975 (u64
)atomic64_add_return(sdata
, maddr
) - sdata
:
2976 (u64
)cmpxchg((u64
*)qp
->r_sge
.sge
.vaddr
,
2977 sdata
, wqe
->atomic_wr
.swap
);
2978 rvt_put_mr(qp
->r_sge
.sge
.mr
);
2979 qp
->r_sge
.num_sge
= 0;
2983 send_status
= IB_WC_LOC_QP_OP_ERR
;
2987 sge
= &sqp
->s_sge
.sge
;
2988 while (sqp
->s_len
) {
2989 u32 len
= sqp
->s_len
;
2991 if (len
> sge
->length
)
2993 if (len
> sge
->sge_length
)
2994 len
= sge
->sge_length
;
2995 WARN_ON_ONCE(len
== 0);
2996 rvt_copy_sge(qp
, &qp
->r_sge
, sge
->vaddr
,
2997 len
, release
, copy_last
);
3000 sge
->sge_length
-= len
;
3001 if (sge
->sge_length
== 0) {
3003 rvt_put_mr(sge
->mr
);
3004 if (--sqp
->s_sge
.num_sge
)
3005 *sge
= *sqp
->s_sge
.sg_list
++;
3006 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
3007 if (++sge
->n
>= RVT_SEGSZ
) {
3008 if (++sge
->m
>= sge
->mr
->mapsz
)
3013 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
3015 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
3020 rvt_put_ss(&qp
->r_sge
);
3022 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
3025 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
3026 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3028 wc
.opcode
= IB_WC_RECV
;
3029 wc
.wr_id
= qp
->r_wr_id
;
3030 wc
.status
= IB_WC_SUCCESS
;
3031 wc
.byte_len
= wqe
->length
;
3033 wc
.src_qp
= qp
->remote_qpn
;
3034 wc
.slid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
) & U16_MAX
;
3035 wc
.sl
= rdma_ah_get_sl(&qp
->remote_ah_attr
);
3037 /* Signal completion event if the solicited bit is set. */
3038 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
3039 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
3042 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3045 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
3046 rvt_send_complete(sqp
, wqe
, send_status
);
3048 atomic_dec(&sqp
->local_ops_pending
);
3054 /* Handle RNR NAK */
3055 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
3059 * Note: we don't need the s_lock held since the BUSY flag
3060 * makes this single threaded.
3062 if (sqp
->s_rnr_retry
== 0) {
3063 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
3066 if (sqp
->s_rnr_retry_cnt
< 7)
3068 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3069 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_RECV_OK
))
3071 rvt_add_rnr_timer(sqp
, qp
->r_min_rnr_timer
<<
3072 IB_AETH_CREDIT_SHIFT
);
3076 send_status
= IB_WC_REM_OP_ERR
;
3077 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3081 send_status
= IB_WC_REM_INV_REQ_ERR
;
3082 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3086 send_status
= IB_WC_REM_ACCESS_ERR
;
3087 wc
.status
= IB_WC_LOC_PROT_ERR
;
3089 /* responder goes to error state */
3090 rvt_rc_error(qp
, wc
.status
);
3093 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3094 rvt_send_complete(sqp
, wqe
, send_status
);
3095 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
3096 int lastwqe
= rvt_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
3098 sqp
->s_flags
&= ~RVT_S_BUSY
;
3099 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3103 ev
.device
= sqp
->ibqp
.device
;
3104 ev
.element
.qp
= &sqp
->ibqp
;
3105 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
3106 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
3111 sqp
->s_flags
&= ~RVT_S_BUSY
;
3113 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3117 EXPORT_SYMBOL(rvt_ruc_loopback
);