3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/err.h>
52 #include <linux/vmalloc.h>
53 #include <linux/hash.h>
54 #include <linux/module.h>
55 #include <linux/random.h>
56 #include <linux/seq_file.h>
63 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
64 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
66 static unsigned int hfi1_qp_table_size
= 256;
67 module_param_named(qp_table_size
, hfi1_qp_table_size
, uint
, S_IRUGO
);
68 MODULE_PARM_DESC(qp_table_size
, "QP table size");
70 static void flush_tx_list(struct hfi1_qp
*qp
);
71 static int iowait_sleep(
72 struct sdma_engine
*sde
,
74 struct sdma_txreq
*stx
,
76 static void iowait_wakeup(struct iowait
*wait
, int reason
);
78 static inline unsigned mk_qpn(struct hfi1_qpn_table
*qpt
,
79 struct qpn_map
*map
, unsigned off
)
81 return (map
- qpt
->map
) * BITS_PER_PAGE
+ off
;
85 * Convert the AETH credit code into the number of credits.
87 static const u16 credit_table
[31] = {
121 static void get_map_page(struct hfi1_qpn_table
*qpt
, struct qpn_map
*map
)
123 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
126 * Free the page if someone raced with us installing it.
129 spin_lock(&qpt
->lock
);
133 map
->page
= (void *)page
;
134 spin_unlock(&qpt
->lock
);
138 * Allocate the next available QPN or
139 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
141 static int alloc_qpn(struct hfi1_devdata
*dd
, struct hfi1_qpn_table
*qpt
,
142 enum ib_qp_type type
, u8 port
)
144 u32 i
, offset
, max_scan
, qpn
;
148 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
151 ret
= type
== IB_QPT_GSI
;
152 n
= 1 << (ret
+ 2 * (port
- 1));
153 spin_lock(&qpt
->lock
);
158 spin_unlock(&qpt
->lock
);
162 qpn
= qpt
->last
+ qpt
->incr
;
164 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
165 /* offset carries bit 0 */
166 offset
= qpn
& BITS_PER_PAGE_MASK
;
167 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
168 max_scan
= qpt
->nmaps
- !offset
;
170 if (unlikely(!map
->page
)) {
171 get_map_page(qpt
, map
);
172 if (unlikely(!map
->page
))
176 if (!test_and_set_bit(offset
, map
->page
)) {
183 * This qpn might be bogus if offset >= BITS_PER_PAGE.
184 * That is OK. It gets re-assigned below
186 qpn
= mk_qpn(qpt
, map
, offset
);
187 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
189 * In order to keep the number of pages allocated to a
190 * minimum, we scan the all existing pages before increasing
191 * the size of the bitmap table.
193 if (++i
> max_scan
) {
194 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
196 map
= &qpt
->map
[qpt
->nmaps
++];
197 /* start at incr with current bit 0 */
198 offset
= qpt
->incr
| (offset
& 1);
199 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
201 /* start at incr with current bit 0 */
202 offset
= qpt
->incr
| (offset
& 1);
205 /* wrap to first map page, invert bit 0 */
206 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
208 /* there can be no bits at shift and below */
209 WARN_ON(offset
& (dd
->qos_shift
- 1));
210 qpn
= mk_qpn(qpt
, map
, offset
);
219 static void free_qpn(struct hfi1_qpn_table
*qpt
, u32 qpn
)
223 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
225 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
229 * Put the QP into the hash table.
230 * The hash table holds a reference to the QP.
232 static void insert_qp(struct hfi1_ibdev
*dev
, struct hfi1_qp
*qp
)
234 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
237 atomic_inc(&qp
->refcount
);
238 spin_lock_irqsave(&dev
->qp_dev
->qpt_lock
, flags
);
240 if (qp
->ibqp
.qp_num
<= 1) {
241 rcu_assign_pointer(ibp
->qp
[qp
->ibqp
.qp_num
], qp
);
243 u32 n
= qpn_hash(dev
->qp_dev
, qp
->ibqp
.qp_num
);
245 qp
->next
= dev
->qp_dev
->qp_table
[n
];
246 rcu_assign_pointer(dev
->qp_dev
->qp_table
[n
], qp
);
247 trace_hfi1_qpinsert(qp
, n
);
250 spin_unlock_irqrestore(&dev
->qp_dev
->qpt_lock
, flags
);
254 * Remove the QP from the table so it can't be found asynchronously by
255 * the receive interrupt routine.
257 static void remove_qp(struct hfi1_ibdev
*dev
, struct hfi1_qp
*qp
)
259 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
260 u32 n
= qpn_hash(dev
->qp_dev
, qp
->ibqp
.qp_num
);
264 spin_lock_irqsave(&dev
->qp_dev
->qpt_lock
, flags
);
266 if (rcu_dereference_protected(ibp
->qp
[0],
267 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)) == qp
) {
268 RCU_INIT_POINTER(ibp
->qp
[0], NULL
);
269 } else if (rcu_dereference_protected(ibp
->qp
[1],
270 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)) == qp
) {
271 RCU_INIT_POINTER(ibp
->qp
[1], NULL
);
274 struct hfi1_qp __rcu
**qpp
;
277 qpp
= &dev
->qp_dev
->qp_table
[n
];
278 for (; (q
= rcu_dereference_protected(*qpp
,
279 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)))
283 RCU_INIT_POINTER(*qpp
,
284 rcu_dereference_protected(qp
->next
,
285 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)));
287 trace_hfi1_qpremove(qp
, n
);
292 spin_unlock_irqrestore(&dev
->qp_dev
->qpt_lock
, flags
);
295 if (atomic_dec_and_test(&qp
->refcount
))
301 * free_all_qps - check for QPs still in use
302 * @qpt: the QP table to empty
304 * There should not be any QPs still in use.
305 * Free memory for table.
307 static unsigned free_all_qps(struct hfi1_devdata
*dd
)
309 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
312 unsigned n
, qp_inuse
= 0;
314 for (n
= 0; n
< dd
->num_pports
; n
++) {
315 struct hfi1_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
317 if (!hfi1_mcast_tree_empty(ibp
))
320 if (rcu_dereference(ibp
->qp
[0]))
322 if (rcu_dereference(ibp
->qp
[1]))
329 spin_lock_irqsave(&dev
->qp_dev
->qpt_lock
, flags
);
330 for (n
= 0; n
< dev
->qp_dev
->qp_table_size
; n
++) {
331 qp
= rcu_dereference_protected(dev
->qp_dev
->qp_table
[n
],
332 lockdep_is_held(&dev
->qp_dev
->qpt_lock
));
333 RCU_INIT_POINTER(dev
->qp_dev
->qp_table
[n
], NULL
);
335 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
336 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)))
339 spin_unlock_irqrestore(&dev
->qp_dev
->qpt_lock
, flags
);
346 * reset_qp - initialize the QP state to the reset state
347 * @qp: the QP to reset
350 static void reset_qp(struct hfi1_qp
*qp
, enum ib_qp_type type
)
354 qp
->qp_access_flags
= 0;
361 qp
->s_flags
&= HFI1_S_SIGNAL_REQ_WR
;
367 qp
->s_sending_psn
= 0;
368 qp
->s_sending_hpsn
= 0;
372 if (type
== IB_QPT_RC
) {
373 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
374 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
376 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
377 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
379 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
391 qp
->s_mig_state
= IB_MIG_MIGRATED
;
392 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
393 qp
->r_head_ack_queue
= 0;
394 qp
->s_tail_ack_queue
= 0;
395 qp
->s_num_rd_atomic
= 0;
397 qp
->r_rq
.wq
->head
= 0;
398 qp
->r_rq
.wq
->tail
= 0;
400 qp
->r_sge
.num_sge
= 0;
403 static void clear_mr_refs(struct hfi1_qp
*qp
, int clr_sends
)
407 if (test_and_clear_bit(HFI1_R_REWIND_SGE
, &qp
->r_aflags
))
408 hfi1_put_ss(&qp
->s_rdma_read_sge
);
410 hfi1_put_ss(&qp
->r_sge
);
413 while (qp
->s_last
!= qp
->s_head
) {
414 struct hfi1_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
417 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
418 struct hfi1_sge
*sge
= &wqe
->sg_list
[i
];
420 hfi1_put_mr(sge
->mr
);
422 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
423 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
424 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
425 atomic_dec(&to_iah(wqe
->wr
.wr
.ud
.ah
)->refcount
);
426 if (++qp
->s_last
>= qp
->s_size
)
430 hfi1_put_mr(qp
->s_rdma_mr
);
431 qp
->s_rdma_mr
= NULL
;
435 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
438 for (n
= 0; n
< ARRAY_SIZE(qp
->s_ack_queue
); n
++) {
439 struct hfi1_ack_entry
*e
= &qp
->s_ack_queue
[n
];
441 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
443 hfi1_put_mr(e
->rdma_sge
.mr
);
444 e
->rdma_sge
.mr
= NULL
;
450 * hfi1_error_qp - put a QP into the error state
451 * @qp: the QP to put into the error state
452 * @err: the receive completion error to signal if a RWQE is active
454 * Flushes both send and receive work queues.
455 * Returns true if last WQE event should be generated.
456 * The QP r_lock and s_lock should be held and interrupts disabled.
457 * If we are already in error state, just return.
459 int hfi1_error_qp(struct hfi1_qp
*qp
, enum ib_wc_status err
)
461 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
465 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
468 qp
->state
= IB_QPS_ERR
;
470 if (qp
->s_flags
& (HFI1_S_TIMER
| HFI1_S_WAIT_RNR
)) {
471 qp
->s_flags
&= ~(HFI1_S_TIMER
| HFI1_S_WAIT_RNR
);
472 del_timer(&qp
->s_timer
);
475 if (qp
->s_flags
& HFI1_S_ANY_WAIT_SEND
)
476 qp
->s_flags
&= ~HFI1_S_ANY_WAIT_SEND
;
478 write_seqlock(&dev
->iowait_lock
);
479 if (!list_empty(&qp
->s_iowait
.list
) && !(qp
->s_flags
& HFI1_S_BUSY
)) {
480 qp
->s_flags
&= ~HFI1_S_ANY_WAIT_IO
;
481 list_del_init(&qp
->s_iowait
.list
);
482 if (atomic_dec_and_test(&qp
->refcount
))
485 write_sequnlock(&dev
->iowait_lock
);
487 if (!(qp
->s_flags
& HFI1_S_BUSY
)) {
490 hfi1_put_mr(qp
->s_rdma_mr
);
491 qp
->s_rdma_mr
= NULL
;
496 /* Schedule the sending tasklet to drain the send work queue. */
497 if (qp
->s_last
!= qp
->s_head
)
498 hfi1_schedule_send(qp
);
500 clear_mr_refs(qp
, 0);
502 memset(&wc
, 0, sizeof(wc
));
504 wc
.opcode
= IB_WC_RECV
;
506 if (test_and_clear_bit(HFI1_R_WRID_VALID
, &qp
->r_aflags
)) {
507 wc
.wr_id
= qp
->r_wr_id
;
509 hfi1_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
511 wc
.status
= IB_WC_WR_FLUSH_ERR
;
518 spin_lock(&qp
->r_rq
.lock
);
520 /* sanity check pointers before trusting them */
523 if (head
>= qp
->r_rq
.size
)
526 if (tail
>= qp
->r_rq
.size
)
528 while (tail
!= head
) {
529 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
530 if (++tail
>= qp
->r_rq
.size
)
532 hfi1_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
536 spin_unlock(&qp
->r_rq
.lock
);
537 } else if (qp
->ibqp
.event_handler
)
544 static void flush_tx_list(struct hfi1_qp
*qp
)
546 while (!list_empty(&qp
->s_iowait
.tx_head
)) {
547 struct sdma_txreq
*tx
;
549 tx
= list_first_entry(
550 &qp
->s_iowait
.tx_head
,
553 list_del_init(&tx
->list
);
555 container_of(tx
, struct verbs_txreq
, txreq
));
559 static void flush_iowait(struct hfi1_qp
*qp
)
561 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
564 write_seqlock_irqsave(&dev
->iowait_lock
, flags
);
565 if (!list_empty(&qp
->s_iowait
.list
)) {
566 list_del_init(&qp
->s_iowait
.list
);
567 if (atomic_dec_and_test(&qp
->refcount
))
570 write_sequnlock_irqrestore(&dev
->iowait_lock
, flags
);
573 static inline int opa_mtu_enum_to_int(int mtu
)
576 case OPA_MTU_8192
: return 8192;
577 case OPA_MTU_10240
: return 10240;
583 * This function is what we would push to the core layer if we wanted to be a
584 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
585 * to blindly pass the MTU enum value from the PathRecord to us.
587 * The actual flag used to determine "8k MTU" will change and is currently
590 static inline int verbs_mtu_enum_to_int(struct ib_device
*dev
, enum ib_mtu mtu
)
592 int val
= opa_mtu_enum_to_int((int)mtu
);
596 return ib_mtu_enum_to_int(mtu
);
601 * hfi1_modify_qp - modify the attributes of a queue pair
602 * @ibqp: the queue pair who's attributes we're modifying
603 * @attr: the new attributes
604 * @attr_mask: the mask of attributes to modify
605 * @udata: user data for libibverbs.so
607 * Returns 0 on success, otherwise returns an errno.
609 int hfi1_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
610 int attr_mask
, struct ib_udata
*udata
)
612 struct hfi1_ibdev
*dev
= to_idev(ibqp
->device
);
613 struct hfi1_qp
*qp
= to_iqp(ibqp
);
614 enum ib_qp_state cur_state
, new_state
;
619 u32 pmtu
= 0; /* for gcc warning only */
620 struct hfi1_devdata
*dd
;
622 spin_lock_irq(&qp
->r_lock
);
623 spin_lock(&qp
->s_lock
);
625 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
626 attr
->cur_qp_state
: qp
->state
;
627 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
629 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
630 attr_mask
, IB_LINK_LAYER_UNSPECIFIED
))
633 if (attr_mask
& IB_QP_AV
) {
634 if (attr
->ah_attr
.dlid
>= HFI1_MULTICAST_LID_BASE
)
636 if (hfi1_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
640 if (attr_mask
& IB_QP_ALT_PATH
) {
641 if (attr
->alt_ah_attr
.dlid
>= HFI1_MULTICAST_LID_BASE
)
643 if (hfi1_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
645 if (attr
->alt_pkey_index
>= hfi1_get_npkeys(dd_from_dev(dev
)))
649 if (attr_mask
& IB_QP_PKEY_INDEX
)
650 if (attr
->pkey_index
>= hfi1_get_npkeys(dd_from_dev(dev
)))
653 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
654 if (attr
->min_rnr_timer
> 31)
657 if (attr_mask
& IB_QP_PORT
)
658 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
659 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
660 attr
->port_num
== 0 ||
661 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
664 if (attr_mask
& IB_QP_DEST_QPN
)
665 if (attr
->dest_qp_num
> HFI1_QPN_MASK
)
668 if (attr_mask
& IB_QP_RETRY_CNT
)
669 if (attr
->retry_cnt
> 7)
672 if (attr_mask
& IB_QP_RNR_RETRY
)
673 if (attr
->rnr_retry
> 7)
677 * Don't allow invalid path_mtu values. OK to set greater
678 * than the active mtu (or even the max_cap, if we have tuned
679 * that to a small mtu. We'll set qp->path_mtu
680 * to the lesser of requested attribute mtu and active,
681 * for packetizing messages.
682 * Note that the QP port has to be set in INIT and MTU in RTR.
684 if (attr_mask
& IB_QP_PATH_MTU
) {
685 int mtu
, pidx
= qp
->port_num
- 1;
687 dd
= dd_from_dev(dev
);
688 mtu
= verbs_mtu_enum_to_int(ibqp
->device
, attr
->path_mtu
);
692 if (mtu
> dd
->pport
[pidx
].ibmtu
)
693 pmtu
= mtu_to_enum(dd
->pport
[pidx
].ibmtu
, IB_MTU_2048
);
695 pmtu
= attr
->path_mtu
;
698 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
699 if (attr
->path_mig_state
== IB_MIG_REARM
) {
700 if (qp
->s_mig_state
== IB_MIG_ARMED
)
702 if (new_state
!= IB_QPS_RTS
)
704 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
705 if (qp
->s_mig_state
== IB_MIG_REARM
)
707 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
709 if (qp
->s_mig_state
== IB_MIG_ARMED
)
715 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
716 if (attr
->max_dest_rd_atomic
> HFI1_MAX_RDMA_ATOMIC
)
721 if (qp
->state
!= IB_QPS_RESET
) {
722 qp
->state
= IB_QPS_RESET
;
724 qp
->s_flags
&= ~(HFI1_S_TIMER
| HFI1_S_ANY_WAIT
);
725 spin_unlock(&qp
->s_lock
);
726 spin_unlock_irq(&qp
->r_lock
);
727 /* Stop the sending work queue and retry timer */
728 cancel_work_sync(&qp
->s_iowait
.iowork
);
729 del_timer_sync(&qp
->s_timer
);
730 iowait_sdma_drain(&qp
->s_iowait
);
733 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
734 spin_lock_irq(&qp
->r_lock
);
735 spin_lock(&qp
->s_lock
);
736 clear_mr_refs(qp
, 1);
738 reset_qp(qp
, ibqp
->qp_type
);
743 /* Allow event to re-trigger if QP set to RTR more than once */
744 qp
->r_flags
&= ~HFI1_R_COMM_EST
;
745 qp
->state
= new_state
;
749 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
750 qp
->state
= new_state
;
754 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
756 qp
->state
= new_state
;
760 lastwqe
= hfi1_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
764 qp
->state
= new_state
;
768 if (attr_mask
& IB_QP_PKEY_INDEX
)
769 qp
->s_pkey_index
= attr
->pkey_index
;
771 if (attr_mask
& IB_QP_PORT
)
772 qp
->port_num
= attr
->port_num
;
774 if (attr_mask
& IB_QP_DEST_QPN
)
775 qp
->remote_qpn
= attr
->dest_qp_num
;
777 if (attr_mask
& IB_QP_SQ_PSN
) {
778 qp
->s_next_psn
= attr
->sq_psn
& PSN_MODIFY_MASK
;
779 qp
->s_psn
= qp
->s_next_psn
;
780 qp
->s_sending_psn
= qp
->s_next_psn
;
781 qp
->s_last_psn
= qp
->s_next_psn
- 1;
782 qp
->s_sending_hpsn
= qp
->s_last_psn
;
785 if (attr_mask
& IB_QP_RQ_PSN
)
786 qp
->r_psn
= attr
->rq_psn
& PSN_MODIFY_MASK
;
788 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
789 qp
->qp_access_flags
= attr
->qp_access_flags
;
791 if (attr_mask
& IB_QP_AV
) {
792 qp
->remote_ah_attr
= attr
->ah_attr
;
793 qp
->s_srate
= attr
->ah_attr
.static_rate
;
794 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
797 if (attr_mask
& IB_QP_ALT_PATH
) {
798 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
799 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
802 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
803 qp
->s_mig_state
= attr
->path_mig_state
;
805 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
806 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
807 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
808 qp
->s_flags
|= HFI1_S_AHG_CLEAR
;
812 if (attr_mask
& IB_QP_PATH_MTU
) {
813 struct hfi1_ibport
*ibp
;
817 dd
= dd_from_dev(dev
);
818 ibp
= &dd
->pport
[qp
->port_num
- 1].ibport_data
;
820 sc
= ibp
->sl_to_sc
[qp
->remote_ah_attr
.sl
];
821 vl
= sc_to_vlt(dd
, sc
);
823 mtu
= verbs_mtu_enum_to_int(ibqp
->device
, pmtu
);
824 if (vl
< PER_VL_SEND_CONTEXTS
)
825 mtu
= min_t(u32
, mtu
, dd
->vld
[vl
].mtu
);
826 pmtu
= mtu_to_enum(mtu
, OPA_MTU_8192
);
832 if (attr_mask
& IB_QP_RETRY_CNT
) {
833 qp
->s_retry_cnt
= attr
->retry_cnt
;
834 qp
->s_retry
= attr
->retry_cnt
;
837 if (attr_mask
& IB_QP_RNR_RETRY
) {
838 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
839 qp
->s_rnr_retry
= attr
->rnr_retry
;
842 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
843 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
845 if (attr_mask
& IB_QP_TIMEOUT
) {
846 qp
->timeout
= attr
->timeout
;
847 qp
->timeout_jiffies
=
848 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
852 if (attr_mask
& IB_QP_QKEY
)
853 qp
->qkey
= attr
->qkey
;
855 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
856 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
858 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
859 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
861 spin_unlock(&qp
->s_lock
);
862 spin_unlock_irq(&qp
->r_lock
);
864 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
868 ev
.device
= qp
->ibqp
.device
;
869 ev
.element
.qp
= &qp
->ibqp
;
870 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
871 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
874 ev
.device
= qp
->ibqp
.device
;
875 ev
.element
.qp
= &qp
->ibqp
;
876 ev
.event
= IB_EVENT_PATH_MIG
;
877 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
883 spin_unlock(&qp
->s_lock
);
884 spin_unlock_irq(&qp
->r_lock
);
891 int hfi1_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
892 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
894 struct hfi1_qp
*qp
= to_iqp(ibqp
);
896 attr
->qp_state
= qp
->state
;
897 attr
->cur_qp_state
= attr
->qp_state
;
898 attr
->path_mtu
= qp
->path_mtu
;
899 attr
->path_mig_state
= qp
->s_mig_state
;
900 attr
->qkey
= qp
->qkey
;
901 attr
->rq_psn
= mask_psn(qp
->r_psn
);
902 attr
->sq_psn
= mask_psn(qp
->s_next_psn
);
903 attr
->dest_qp_num
= qp
->remote_qpn
;
904 attr
->qp_access_flags
= qp
->qp_access_flags
;
905 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
906 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
907 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
908 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
909 attr
->cap
.max_inline_data
= 0;
910 attr
->ah_attr
= qp
->remote_ah_attr
;
911 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
912 attr
->pkey_index
= qp
->s_pkey_index
;
913 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
914 attr
->en_sqd_async_notify
= 0;
915 attr
->sq_draining
= qp
->s_draining
;
916 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
917 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
918 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
919 attr
->port_num
= qp
->port_num
;
920 attr
->timeout
= qp
->timeout
;
921 attr
->retry_cnt
= qp
->s_retry_cnt
;
922 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
923 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
924 attr
->alt_timeout
= qp
->alt_timeout
;
926 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
927 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
928 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
929 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
930 init_attr
->srq
= qp
->ibqp
.srq
;
931 init_attr
->cap
= attr
->cap
;
932 if (qp
->s_flags
& HFI1_S_SIGNAL_REQ_WR
)
933 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
935 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
936 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
937 init_attr
->port_num
= qp
->port_num
;
942 * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
943 * @qp: the queue pair to compute the AETH for
947 __be32
hfi1_compute_aeth(struct hfi1_qp
*qp
)
949 u32 aeth
= qp
->r_msn
& HFI1_MSN_MASK
;
953 * Shared receive queues don't generate credits.
954 * Set the credit field to the invalid value.
956 aeth
|= HFI1_AETH_CREDIT_INVAL
<< HFI1_AETH_CREDIT_SHIFT
;
960 struct hfi1_rwq
*wq
= qp
->r_rq
.wq
;
964 /* sanity check pointers before trusting them */
966 if (head
>= qp
->r_rq
.size
)
969 if (tail
>= qp
->r_rq
.size
)
972 * Compute the number of credits available (RWQEs).
973 * There is a small chance that the pair of reads are
974 * not atomic, which is OK, since the fuzziness is
975 * resolved as further ACKs go out.
977 credits
= head
- tail
;
978 if ((int)credits
< 0)
979 credits
+= qp
->r_rq
.size
;
981 * Binary search the credit table to find the code to
988 if (credit_table
[x
] == credits
)
990 if (credit_table
[x
] > credits
)
997 aeth
|= x
<< HFI1_AETH_CREDIT_SHIFT
;
999 return cpu_to_be32(aeth
);
1003 * hfi1_create_qp - create a queue pair for a device
1004 * @ibpd: the protection domain who's device we create the queue pair for
1005 * @init_attr: the attributes of the queue pair
1006 * @udata: user data for libibverbs.so
1008 * Returns the queue pair on success, otherwise returns an errno.
1010 * Called by the ib_create_qp() core verbs function.
1012 struct ib_qp
*hfi1_create_qp(struct ib_pd
*ibpd
,
1013 struct ib_qp_init_attr
*init_attr
,
1014 struct ib_udata
*udata
)
1018 struct hfi1_swqe
*swq
= NULL
;
1019 struct hfi1_ibdev
*dev
;
1020 struct hfi1_devdata
*dd
;
1025 if (init_attr
->cap
.max_send_sge
> hfi1_max_sges
||
1026 init_attr
->cap
.max_send_wr
> hfi1_max_qp_wrs
||
1027 init_attr
->create_flags
) {
1028 ret
= ERR_PTR(-EINVAL
);
1032 /* Check receive queue parameters if no SRQ is specified. */
1033 if (!init_attr
->srq
) {
1034 if (init_attr
->cap
.max_recv_sge
> hfi1_max_sges
||
1035 init_attr
->cap
.max_recv_wr
> hfi1_max_qp_wrs
) {
1036 ret
= ERR_PTR(-EINVAL
);
1039 if (init_attr
->cap
.max_send_sge
+
1040 init_attr
->cap
.max_send_wr
+
1041 init_attr
->cap
.max_recv_sge
+
1042 init_attr
->cap
.max_recv_wr
== 0) {
1043 ret
= ERR_PTR(-EINVAL
);
1048 switch (init_attr
->qp_type
) {
1051 if (init_attr
->port_num
== 0 ||
1052 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
) {
1053 ret
= ERR_PTR(-EINVAL
);
1059 sz
= sizeof(struct hfi1_sge
) *
1060 init_attr
->cap
.max_send_sge
+
1061 sizeof(struct hfi1_swqe
);
1062 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
1064 ret
= ERR_PTR(-ENOMEM
);
1069 if (init_attr
->srq
) {
1070 struct hfi1_srq
*srq
= to_isrq(init_attr
->srq
);
1072 if (srq
->rq
.max_sge
> 1)
1073 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1074 (srq
->rq
.max_sge
- 1);
1075 } else if (init_attr
->cap
.max_recv_sge
> 1)
1076 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1077 (init_attr
->cap
.max_recv_sge
- 1);
1078 qp
= kzalloc(sz
+ sg_list_sz
, GFP_KERNEL
);
1080 ret
= ERR_PTR(-ENOMEM
);
1083 RCU_INIT_POINTER(qp
->next
, NULL
);
1084 qp
->s_hdr
= kzalloc(sizeof(*qp
->s_hdr
), GFP_KERNEL
);
1086 ret
= ERR_PTR(-ENOMEM
);
1089 qp
->timeout_jiffies
=
1090 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1095 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1096 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1097 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1098 sizeof(struct hfi1_rwqe
);
1099 qp
->r_rq
.wq
= vmalloc_user(sizeof(struct hfi1_rwq
) +
1100 qp
->r_rq
.size
* sz
);
1102 ret
= ERR_PTR(-ENOMEM
);
1108 * ib_create_qp() will initialize qp->ibqp
1109 * except for qp->ibqp.qp_num.
1111 spin_lock_init(&qp
->r_lock
);
1112 spin_lock_init(&qp
->s_lock
);
1113 spin_lock_init(&qp
->r_rq
.lock
);
1114 atomic_set(&qp
->refcount
, 0);
1115 init_waitqueue_head(&qp
->wait
);
1116 init_timer(&qp
->s_timer
);
1117 qp
->s_timer
.data
= (unsigned long)qp
;
1118 INIT_LIST_HEAD(&qp
->rspwait
);
1119 qp
->state
= IB_QPS_RESET
;
1121 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
1122 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1123 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1124 qp
->s_flags
= HFI1_S_SIGNAL_REQ_WR
;
1125 dev
= to_idev(ibpd
->device
);
1126 dd
= dd_from_dev(dev
);
1127 err
= alloc_qpn(dd
, &dev
->qp_dev
->qpn_table
, init_attr
->qp_type
,
1128 init_attr
->port_num
);
1134 qp
->ibqp
.qp_num
= err
;
1135 qp
->port_num
= init_attr
->port_num
;
1136 reset_qp(qp
, init_attr
->qp_type
);
1141 /* Don't support raw QPs */
1142 ret
= ERR_PTR(-ENOSYS
);
1146 init_attr
->cap
.max_inline_data
= 0;
1149 * Return the address of the RWQ as the offset to mmap.
1150 * See hfi1_mmap() for details.
1152 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1156 err
= ib_copy_to_udata(udata
, &offset
,
1163 u32 s
= sizeof(struct hfi1_rwq
) + qp
->r_rq
.size
* sz
;
1165 qp
->ip
= hfi1_create_mmap_info(dev
, s
,
1166 ibpd
->uobject
->context
,
1169 ret
= ERR_PTR(-ENOMEM
);
1173 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
1174 sizeof(qp
->ip
->offset
));
1182 spin_lock(&dev
->n_qps_lock
);
1183 if (dev
->n_qps_allocated
== hfi1_max_qps
) {
1184 spin_unlock(&dev
->n_qps_lock
);
1185 ret
= ERR_PTR(-ENOMEM
);
1189 dev
->n_qps_allocated
++;
1190 spin_unlock(&dev
->n_qps_lock
);
1193 spin_lock_irq(&dev
->pending_lock
);
1194 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
1195 spin_unlock_irq(&dev
->pending_lock
);
1201 * We have our QP and its good, now keep track of what types of opcodes
1202 * can be processed on this QP. We do this by keeping track of what the
1203 * 3 high order bits of the opcode are.
1205 switch (init_attr
->qp_type
) {
1209 qp
->allowed_ops
= IB_OPCODE_UD_SEND_ONLY
& OPCODE_QP_MASK
;
1212 qp
->allowed_ops
= IB_OPCODE_RC_SEND_ONLY
& OPCODE_QP_MASK
;
1215 qp
->allowed_ops
= IB_OPCODE_UC_SEND_ONLY
& OPCODE_QP_MASK
;
1218 ret
= ERR_PTR(-EINVAL
);
1226 kref_put(&qp
->ip
->ref
, hfi1_release_mmap_info
);
1229 free_qpn(&dev
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1240 * hfi1_destroy_qp - destroy a queue pair
1241 * @ibqp: the queue pair to destroy
1243 * Returns 0 on success.
1245 * Note that this can be called while the QP is actively sending or
1248 int hfi1_destroy_qp(struct ib_qp
*ibqp
)
1250 struct hfi1_qp
*qp
= to_iqp(ibqp
);
1251 struct hfi1_ibdev
*dev
= to_idev(ibqp
->device
);
1253 /* Make sure HW and driver activity is stopped. */
1254 spin_lock_irq(&qp
->r_lock
);
1255 spin_lock(&qp
->s_lock
);
1256 if (qp
->state
!= IB_QPS_RESET
) {
1257 qp
->state
= IB_QPS_RESET
;
1259 qp
->s_flags
&= ~(HFI1_S_TIMER
| HFI1_S_ANY_WAIT
);
1260 spin_unlock(&qp
->s_lock
);
1261 spin_unlock_irq(&qp
->r_lock
);
1262 cancel_work_sync(&qp
->s_iowait
.iowork
);
1263 del_timer_sync(&qp
->s_timer
);
1264 iowait_sdma_drain(&qp
->s_iowait
);
1267 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1268 spin_lock_irq(&qp
->r_lock
);
1269 spin_lock(&qp
->s_lock
);
1270 clear_mr_refs(qp
, 1);
1273 spin_unlock(&qp
->s_lock
);
1274 spin_unlock_irq(&qp
->r_lock
);
1276 /* all user's cleaned up, mark it available */
1277 free_qpn(&dev
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1278 spin_lock(&dev
->n_qps_lock
);
1279 dev
->n_qps_allocated
--;
1280 spin_unlock(&dev
->n_qps_lock
);
1283 kref_put(&qp
->ip
->ref
, hfi1_release_mmap_info
);
1293 * init_qpn_table - initialize the QP number table for a device
1294 * @qpt: the QPN table
1296 static int init_qpn_table(struct hfi1_devdata
*dd
, struct hfi1_qpn_table
*qpt
)
1299 struct qpn_map
*map
;
1302 spin_lock_init(&qpt
->lock
);
1305 qpt
->incr
= 1 << dd
->qos_shift
;
1307 /* insure we don't assign QPs from KDETH 64K window */
1308 qpn
= kdeth_qp
<< 16;
1309 qpt
->nmaps
= qpn
/ BITS_PER_PAGE
;
1310 /* This should always be zero */
1311 offset
= qpn
& BITS_PER_PAGE_MASK
;
1312 map
= &qpt
->map
[qpt
->nmaps
];
1313 dd_dev_info(dd
, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n",
1315 for (i
= 0; i
< 65536; i
++) {
1317 get_map_page(qpt
, map
);
1323 set_bit(offset
, map
->page
);
1325 if (offset
== BITS_PER_PAGE
) {
1336 * free_qpn_table - free the QP number table for a device
1337 * @qpt: the QPN table
1339 static void free_qpn_table(struct hfi1_qpn_table
*qpt
)
1343 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
1344 free_page((unsigned long) qpt
->map
[i
].page
);
1348 * hfi1_get_credit - flush the send work queue of a QP
1349 * @qp: the qp who's send work queue to flush
1350 * @aeth: the Acknowledge Extended Transport Header
1352 * The QP s_lock should be held.
1354 void hfi1_get_credit(struct hfi1_qp
*qp
, u32 aeth
)
1356 u32 credit
= (aeth
>> HFI1_AETH_CREDIT_SHIFT
) & HFI1_AETH_CREDIT_MASK
;
1359 * If the credit is invalid, we can send
1360 * as many packets as we like. Otherwise, we have to
1361 * honor the credit field.
1363 if (credit
== HFI1_AETH_CREDIT_INVAL
) {
1364 if (!(qp
->s_flags
& HFI1_S_UNLIMITED_CREDIT
)) {
1365 qp
->s_flags
|= HFI1_S_UNLIMITED_CREDIT
;
1366 if (qp
->s_flags
& HFI1_S_WAIT_SSN_CREDIT
) {
1367 qp
->s_flags
&= ~HFI1_S_WAIT_SSN_CREDIT
;
1368 hfi1_schedule_send(qp
);
1371 } else if (!(qp
->s_flags
& HFI1_S_UNLIMITED_CREDIT
)) {
1372 /* Compute new LSN (i.e., MSN + credit) */
1373 credit
= (aeth
+ credit_table
[credit
]) & HFI1_MSN_MASK
;
1374 if (cmp_msn(credit
, qp
->s_lsn
) > 0) {
1376 if (qp
->s_flags
& HFI1_S_WAIT_SSN_CREDIT
) {
1377 qp
->s_flags
&= ~HFI1_S_WAIT_SSN_CREDIT
;
1378 hfi1_schedule_send(qp
);
1384 void hfi1_qp_wakeup(struct hfi1_qp
*qp
, u32 flag
)
1386 unsigned long flags
;
1388 spin_lock_irqsave(&qp
->s_lock
, flags
);
1389 if (qp
->s_flags
& flag
) {
1390 qp
->s_flags
&= ~flag
;
1391 trace_hfi1_qpwakeup(qp
, flag
);
1392 hfi1_schedule_send(qp
);
1394 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1395 /* Notify hfi1_destroy_qp() if it is waiting. */
1396 if (atomic_dec_and_test(&qp
->refcount
))
1400 static int iowait_sleep(
1401 struct sdma_engine
*sde
,
1402 struct iowait
*wait
,
1403 struct sdma_txreq
*stx
,
1406 struct verbs_txreq
*tx
= container_of(stx
, struct verbs_txreq
, txreq
);
1408 unsigned long flags
;
1410 struct hfi1_ibdev
*dev
;
1414 spin_lock_irqsave(&qp
->s_lock
, flags
);
1415 if (ib_hfi1_state_ops
[qp
->state
] & HFI1_PROCESS_RECV_OK
) {
1418 * If we couldn't queue the DMA request, save the info
1419 * and try again later rather than destroying the
1420 * buffer and undoing the side effects of the copy.
1422 /* Make a common routine? */
1423 dev
= &sde
->dd
->verbs_dev
;
1424 list_add_tail(&stx
->list
, &wait
->tx_head
);
1425 write_seqlock(&dev
->iowait_lock
);
1426 if (sdma_progress(sde
, seq
, stx
))
1428 if (list_empty(&qp
->s_iowait
.list
)) {
1429 struct hfi1_ibport
*ibp
=
1430 to_iport(qp
->ibqp
.device
, qp
->port_num
);
1433 qp
->s_flags
|= HFI1_S_WAIT_DMA_DESC
;
1434 list_add_tail(&qp
->s_iowait
.list
, &sde
->dmawait
);
1435 trace_hfi1_qpsleep(qp
, HFI1_S_WAIT_DMA_DESC
);
1436 atomic_inc(&qp
->refcount
);
1438 write_sequnlock(&dev
->iowait_lock
);
1439 qp
->s_flags
&= ~HFI1_S_BUSY
;
1440 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1443 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1448 write_sequnlock(&dev
->iowait_lock
);
1449 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1450 list_del_init(&stx
->list
);
1454 static void iowait_wakeup(struct iowait
*wait
, int reason
)
1456 struct hfi1_qp
*qp
= container_of(wait
, struct hfi1_qp
, s_iowait
);
1458 WARN_ON(reason
!= SDMA_AVAIL_REASON
);
1459 hfi1_qp_wakeup(qp
, HFI1_S_WAIT_DMA_DESC
);
1462 int hfi1_qp_init(struct hfi1_ibdev
*dev
)
1464 struct hfi1_devdata
*dd
= dd_from_dev(dev
);
1468 /* allocate parent object */
1469 dev
->qp_dev
= kzalloc(sizeof(*dev
->qp_dev
), GFP_KERNEL
);
1472 /* allocate hash table */
1473 dev
->qp_dev
->qp_table_size
= hfi1_qp_table_size
;
1474 dev
->qp_dev
->qp_table_bits
= ilog2(hfi1_qp_table_size
);
1475 dev
->qp_dev
->qp_table
=
1476 kmalloc(dev
->qp_dev
->qp_table_size
*
1477 sizeof(*dev
->qp_dev
->qp_table
),
1479 if (!dev
->qp_dev
->qp_table
)
1481 for (i
= 0; i
< dev
->qp_dev
->qp_table_size
; i
++)
1482 RCU_INIT_POINTER(dev
->qp_dev
->qp_table
[i
], NULL
);
1483 spin_lock_init(&dev
->qp_dev
->qpt_lock
);
1484 /* initialize qpn map */
1485 ret
= init_qpn_table(dd
, &dev
->qp_dev
->qpn_table
);
1491 kfree(dev
->qp_dev
->qp_table
);
1492 free_qpn_table(&dev
->qp_dev
->qpn_table
);
1498 void hfi1_qp_exit(struct hfi1_ibdev
*dev
)
1500 struct hfi1_devdata
*dd
= dd_from_dev(dev
);
1503 qps_inuse
= free_all_qps(dd
);
1505 dd_dev_err(dd
, "QP memory leak! %u still in use\n",
1508 kfree(dev
->qp_dev
->qp_table
);
1509 free_qpn_table(&dev
->qp_dev
->qpn_table
);
1516 * qp_to_sdma_engine - map a qp to a send engine
1518 * @sc5: the 5 bit sc
1521 * A send engine for the qp or NULL for SMI type qp.
1523 struct sdma_engine
*qp_to_sdma_engine(struct hfi1_qp
*qp
, u8 sc5
)
1525 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1526 struct sdma_engine
*sde
;
1528 if (!(dd
->flags
& HFI1_HAS_SEND_DMA
))
1530 switch (qp
->ibqp
.qp_type
) {
1539 sde
= sdma_select_engine_sc(dd
, qp
->ibqp
.qp_num
>> dd
->qos_shift
, sc5
);
1544 struct hfi1_ibdev
*dev
;
1550 struct qp_iter
*qp_iter_init(struct hfi1_ibdev
*dev
)
1552 struct qp_iter
*iter
;
1554 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1559 iter
->specials
= dev
->ibdev
.phys_port_cnt
* 2;
1560 if (qp_iter_next(iter
)) {
1568 int qp_iter_next(struct qp_iter
*iter
)
1570 struct hfi1_ibdev
*dev
= iter
->dev
;
1573 struct hfi1_qp
*pqp
= iter
->qp
;
1577 * The approach is to consider the special qps
1578 * as an additional table entries before the
1579 * real hash table. Since the qp code sets
1580 * the qp->next hash link to NULL, this works just fine.
1582 * iter->specials is 2 * # ports
1584 * n = 0..iter->specials is the special qp indices
1586 * n = iter->specials..dev->qp_dev->qp_table_size+iter->specials are
1587 * the potential hash bucket entries
1590 for (; n
< dev
->qp_dev
->qp_table_size
+ iter
->specials
; n
++) {
1592 qp
= rcu_dereference(pqp
->next
);
1594 if (n
< iter
->specials
) {
1595 struct hfi1_pportdata
*ppd
;
1596 struct hfi1_ibport
*ibp
;
1599 pidx
= n
% dev
->ibdev
.phys_port_cnt
;
1600 ppd
= &dd_from_dev(dev
)->pport
[pidx
];
1601 ibp
= &ppd
->ibport_data
;
1604 qp
= rcu_dereference(ibp
->qp
[0]);
1606 qp
= rcu_dereference(ibp
->qp
[1]);
1608 qp
= rcu_dereference(
1609 dev
->qp_dev
->qp_table
[
1610 (n
- iter
->specials
)]);
1623 static const char * const qp_type_str
[] = {
1624 "SMI", "GSI", "RC", "UC", "UD",
1627 static int qp_idle(struct hfi1_qp
*qp
)
1630 qp
->s_last
== qp
->s_acked
&&
1631 qp
->s_acked
== qp
->s_cur
&&
1632 qp
->s_cur
== qp
->s_tail
&&
1633 qp
->s_tail
== qp
->s_head
;
1636 void qp_iter_print(struct seq_file
*s
, struct qp_iter
*iter
)
1638 struct hfi1_swqe
*wqe
;
1639 struct hfi1_qp
*qp
= iter
->qp
;
1640 struct sdma_engine
*sde
;
1642 sde
= qp_to_sdma_engine(qp
, qp
->s_sc
);
1643 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1645 "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n",
1647 qp_idle(qp
) ? "I" : "B",
1649 atomic_read(&qp
->refcount
),
1650 qp_type_str
[qp
->ibqp
.qp_type
],
1652 wqe
? wqe
->wr
.opcode
: 0,
1655 atomic_read(&qp
->s_iowait
.sdma_busy
),
1656 !list_empty(&qp
->s_iowait
.list
),
1661 qp
->s_psn
, qp
->s_next_psn
,
1662 qp
->s_sending_psn
, qp
->s_sending_hpsn
,
1663 qp
->s_last
, qp
->s_acked
, qp
->s_cur
,
1664 qp
->s_tail
, qp
->s_head
, qp
->s_size
,
1666 qp
->remote_ah_attr
.dlid
,
1667 qp
->remote_ah_attr
.sl
,
1671 qp
->s_rnr_retry_cnt
,
1673 sde
? sde
->this_idx
: 0);
1676 void qp_comm_est(struct hfi1_qp
*qp
)
1678 qp
->r_flags
|= HFI1_R_COMM_EST
;
1679 if (qp
->ibqp
.event_handler
) {
1682 ev
.device
= qp
->ibqp
.device
;
1683 ev
.element
.qp
= &qp
->ibqp
;
1684 ev
.event
= IB_EVENT_COMM_EST
;
1685 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);