3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/err.h>
52 #include <linux/vmalloc.h>
53 #include <linux/hash.h>
54 #include <linux/module.h>
55 #include <linux/random.h>
56 #include <linux/seq_file.h>
63 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
64 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
66 static unsigned int hfi1_qp_table_size
= 256;
67 module_param_named(qp_table_size
, hfi1_qp_table_size
, uint
, S_IRUGO
);
68 MODULE_PARM_DESC(qp_table_size
, "QP table size");
70 static void flush_tx_list(struct hfi1_qp
*qp
);
71 static int iowait_sleep(
72 struct sdma_engine
*sde
,
74 struct sdma_txreq
*stx
,
76 static void iowait_wakeup(struct iowait
*wait
, int reason
);
78 static inline unsigned mk_qpn(struct hfi1_qpn_table
*qpt
,
79 struct qpn_map
*map
, unsigned off
)
81 return (map
- qpt
->map
) * BITS_PER_PAGE
+ off
;
85 * Convert the AETH credit code into the number of credits.
87 static const u16 credit_table
[31] = {
121 static void get_map_page(struct hfi1_qpn_table
*qpt
, struct qpn_map
*map
)
123 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
126 * Free the page if someone raced with us installing it.
129 spin_lock(&qpt
->lock
);
133 map
->page
= (void *)page
;
134 spin_unlock(&qpt
->lock
);
138 * Allocate the next available QPN or
139 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
141 static int alloc_qpn(struct hfi1_devdata
*dd
, struct hfi1_qpn_table
*qpt
,
142 enum ib_qp_type type
, u8 port
)
144 u32 i
, offset
, max_scan
, qpn
;
148 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
151 ret
= type
== IB_QPT_GSI
;
152 n
= 1 << (ret
+ 2 * (port
- 1));
153 spin_lock(&qpt
->lock
);
158 spin_unlock(&qpt
->lock
);
162 qpn
= qpt
->last
+ qpt
->incr
;
164 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
165 /* offset carries bit 0 */
166 offset
= qpn
& BITS_PER_PAGE_MASK
;
167 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
168 max_scan
= qpt
->nmaps
- !offset
;
170 if (unlikely(!map
->page
)) {
171 get_map_page(qpt
, map
);
172 if (unlikely(!map
->page
))
176 if (!test_and_set_bit(offset
, map
->page
)) {
183 * This qpn might be bogus if offset >= BITS_PER_PAGE.
184 * That is OK. It gets re-assigned below
186 qpn
= mk_qpn(qpt
, map
, offset
);
187 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
189 * In order to keep the number of pages allocated to a
190 * minimum, we scan the all existing pages before increasing
191 * the size of the bitmap table.
193 if (++i
> max_scan
) {
194 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
196 map
= &qpt
->map
[qpt
->nmaps
++];
197 /* start at incr with current bit 0 */
198 offset
= qpt
->incr
| (offset
& 1);
199 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
201 /* start at incr with current bit 0 */
202 offset
= qpt
->incr
| (offset
& 1);
205 /* wrap to first map page, invert bit 0 */
206 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
208 /* there can be no bits at shift and below */
209 WARN_ON(offset
& (dd
->qos_shift
- 1));
210 qpn
= mk_qpn(qpt
, map
, offset
);
219 static void free_qpn(struct hfi1_qpn_table
*qpt
, u32 qpn
)
223 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
225 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
229 * Put the QP into the hash table.
230 * The hash table holds a reference to the QP.
232 static void insert_qp(struct hfi1_ibdev
*dev
, struct hfi1_qp
*qp
)
234 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
237 atomic_inc(&qp
->refcount
);
238 spin_lock_irqsave(&dev
->qp_dev
->qpt_lock
, flags
);
240 if (qp
->ibqp
.qp_num
<= 1) {
241 rcu_assign_pointer(ibp
->qp
[qp
->ibqp
.qp_num
], qp
);
243 u32 n
= qpn_hash(dev
->qp_dev
, qp
->ibqp
.qp_num
);
245 qp
->next
= dev
->qp_dev
->qp_table
[n
];
246 rcu_assign_pointer(dev
->qp_dev
->qp_table
[n
], qp
);
247 trace_hfi1_qpinsert(qp
, n
);
250 spin_unlock_irqrestore(&dev
->qp_dev
->qpt_lock
, flags
);
254 * Remove the QP from the table so it can't be found asynchronously by
255 * the receive interrupt routine.
257 static void remove_qp(struct hfi1_ibdev
*dev
, struct hfi1_qp
*qp
)
259 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
260 u32 n
= qpn_hash(dev
->qp_dev
, qp
->ibqp
.qp_num
);
264 spin_lock_irqsave(&dev
->qp_dev
->qpt_lock
, flags
);
266 if (rcu_dereference_protected(ibp
->qp
[0],
267 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)) == qp
) {
268 RCU_INIT_POINTER(ibp
->qp
[0], NULL
);
269 } else if (rcu_dereference_protected(ibp
->qp
[1],
270 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)) == qp
) {
271 RCU_INIT_POINTER(ibp
->qp
[1], NULL
);
274 struct hfi1_qp __rcu
**qpp
;
277 qpp
= &dev
->qp_dev
->qp_table
[n
];
278 for (; (q
= rcu_dereference_protected(*qpp
,
279 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)))
283 RCU_INIT_POINTER(*qpp
,
284 rcu_dereference_protected(qp
->next
,
285 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)));
287 trace_hfi1_qpremove(qp
, n
);
292 spin_unlock_irqrestore(&dev
->qp_dev
->qpt_lock
, flags
);
295 if (atomic_dec_and_test(&qp
->refcount
))
301 * free_all_qps - check for QPs still in use
302 * @qpt: the QP table to empty
304 * There should not be any QPs still in use.
305 * Free memory for table.
307 static unsigned free_all_qps(struct hfi1_devdata
*dd
)
309 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
312 unsigned n
, qp_inuse
= 0;
314 for (n
= 0; n
< dd
->num_pports
; n
++) {
315 struct hfi1_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
317 if (!hfi1_mcast_tree_empty(ibp
))
320 if (rcu_dereference(ibp
->qp
[0]))
322 if (rcu_dereference(ibp
->qp
[1]))
329 spin_lock_irqsave(&dev
->qp_dev
->qpt_lock
, flags
);
330 for (n
= 0; n
< dev
->qp_dev
->qp_table_size
; n
++) {
331 qp
= rcu_dereference_protected(dev
->qp_dev
->qp_table
[n
],
332 lockdep_is_held(&dev
->qp_dev
->qpt_lock
));
333 RCU_INIT_POINTER(dev
->qp_dev
->qp_table
[n
], NULL
);
335 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
336 lockdep_is_held(&dev
->qp_dev
->qpt_lock
)))
339 spin_unlock_irqrestore(&dev
->qp_dev
->qpt_lock
, flags
);
346 * reset_qp - initialize the QP state to the reset state
347 * @qp: the QP to reset
350 static void reset_qp(struct hfi1_qp
*qp
, enum ib_qp_type type
)
354 qp
->qp_access_flags
= 0;
361 qp
->s_flags
&= HFI1_S_SIGNAL_REQ_WR
;
367 qp
->s_sending_psn
= 0;
368 qp
->s_sending_hpsn
= 0;
372 if (type
== IB_QPT_RC
) {
373 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
374 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
376 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
377 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
379 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
392 qp
->s_mig_state
= IB_MIG_MIGRATED
;
393 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
394 qp
->r_head_ack_queue
= 0;
395 qp
->s_tail_ack_queue
= 0;
396 qp
->s_num_rd_atomic
= 0;
398 qp
->r_rq
.wq
->head
= 0;
399 qp
->r_rq
.wq
->tail
= 0;
401 qp
->r_sge
.num_sge
= 0;
404 static void clear_mr_refs(struct hfi1_qp
*qp
, int clr_sends
)
408 if (test_and_clear_bit(HFI1_R_REWIND_SGE
, &qp
->r_aflags
))
409 hfi1_put_ss(&qp
->s_rdma_read_sge
);
411 hfi1_put_ss(&qp
->r_sge
);
414 while (qp
->s_last
!= qp
->s_head
) {
415 struct hfi1_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
418 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
419 struct hfi1_sge
*sge
= &wqe
->sg_list
[i
];
421 hfi1_put_mr(sge
->mr
);
423 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
424 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
425 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
426 atomic_dec(&to_iah(wqe
->ud_wr
.ah
)->refcount
);
427 if (++qp
->s_last
>= qp
->s_size
)
431 hfi1_put_mr(qp
->s_rdma_mr
);
432 qp
->s_rdma_mr
= NULL
;
436 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
439 for (n
= 0; n
< ARRAY_SIZE(qp
->s_ack_queue
); n
++) {
440 struct hfi1_ack_entry
*e
= &qp
->s_ack_queue
[n
];
442 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
444 hfi1_put_mr(e
->rdma_sge
.mr
);
445 e
->rdma_sge
.mr
= NULL
;
451 * hfi1_error_qp - put a QP into the error state
452 * @qp: the QP to put into the error state
453 * @err: the receive completion error to signal if a RWQE is active
455 * Flushes both send and receive work queues.
456 * Returns true if last WQE event should be generated.
457 * The QP r_lock and s_lock should be held and interrupts disabled.
458 * If we are already in error state, just return.
460 int hfi1_error_qp(struct hfi1_qp
*qp
, enum ib_wc_status err
)
462 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
466 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
469 qp
->state
= IB_QPS_ERR
;
471 if (qp
->s_flags
& (HFI1_S_TIMER
| HFI1_S_WAIT_RNR
)) {
472 qp
->s_flags
&= ~(HFI1_S_TIMER
| HFI1_S_WAIT_RNR
);
473 del_timer(&qp
->s_timer
);
476 if (qp
->s_flags
& HFI1_S_ANY_WAIT_SEND
)
477 qp
->s_flags
&= ~HFI1_S_ANY_WAIT_SEND
;
479 write_seqlock(&dev
->iowait_lock
);
480 if (!list_empty(&qp
->s_iowait
.list
) && !(qp
->s_flags
& HFI1_S_BUSY
)) {
481 qp
->s_flags
&= ~HFI1_S_ANY_WAIT_IO
;
482 list_del_init(&qp
->s_iowait
.list
);
483 if (atomic_dec_and_test(&qp
->refcount
))
486 write_sequnlock(&dev
->iowait_lock
);
488 if (!(qp
->s_flags
& HFI1_S_BUSY
)) {
491 hfi1_put_mr(qp
->s_rdma_mr
);
492 qp
->s_rdma_mr
= NULL
;
497 /* Schedule the sending tasklet to drain the send work queue. */
498 if (qp
->s_last
!= qp
->s_head
)
499 hfi1_schedule_send(qp
);
501 clear_mr_refs(qp
, 0);
503 memset(&wc
, 0, sizeof(wc
));
505 wc
.opcode
= IB_WC_RECV
;
507 if (test_and_clear_bit(HFI1_R_WRID_VALID
, &qp
->r_aflags
)) {
508 wc
.wr_id
= qp
->r_wr_id
;
510 hfi1_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
512 wc
.status
= IB_WC_WR_FLUSH_ERR
;
519 spin_lock(&qp
->r_rq
.lock
);
521 /* sanity check pointers before trusting them */
524 if (head
>= qp
->r_rq
.size
)
527 if (tail
>= qp
->r_rq
.size
)
529 while (tail
!= head
) {
530 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
531 if (++tail
>= qp
->r_rq
.size
)
533 hfi1_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
537 spin_unlock(&qp
->r_rq
.lock
);
538 } else if (qp
->ibqp
.event_handler
)
545 static void flush_tx_list(struct hfi1_qp
*qp
)
547 while (!list_empty(&qp
->s_iowait
.tx_head
)) {
548 struct sdma_txreq
*tx
;
550 tx
= list_first_entry(
551 &qp
->s_iowait
.tx_head
,
554 list_del_init(&tx
->list
);
556 container_of(tx
, struct verbs_txreq
, txreq
));
560 static void flush_iowait(struct hfi1_qp
*qp
)
562 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
565 write_seqlock_irqsave(&dev
->iowait_lock
, flags
);
566 if (!list_empty(&qp
->s_iowait
.list
)) {
567 list_del_init(&qp
->s_iowait
.list
);
568 if (atomic_dec_and_test(&qp
->refcount
))
571 write_sequnlock_irqrestore(&dev
->iowait_lock
, flags
);
574 static inline int opa_mtu_enum_to_int(int mtu
)
577 case OPA_MTU_8192
: return 8192;
578 case OPA_MTU_10240
: return 10240;
584 * This function is what we would push to the core layer if we wanted to be a
585 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
586 * to blindly pass the MTU enum value from the PathRecord to us.
588 * The actual flag used to determine "8k MTU" will change and is currently
591 static inline int verbs_mtu_enum_to_int(struct ib_device
*dev
, enum ib_mtu mtu
)
593 int val
= opa_mtu_enum_to_int((int)mtu
);
597 return ib_mtu_enum_to_int(mtu
);
602 * hfi1_modify_qp - modify the attributes of a queue pair
603 * @ibqp: the queue pair who's attributes we're modifying
604 * @attr: the new attributes
605 * @attr_mask: the mask of attributes to modify
606 * @udata: user data for libibverbs.so
608 * Returns 0 on success, otherwise returns an errno.
610 int hfi1_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
611 int attr_mask
, struct ib_udata
*udata
)
613 struct hfi1_ibdev
*dev
= to_idev(ibqp
->device
);
614 struct hfi1_qp
*qp
= to_iqp(ibqp
);
615 enum ib_qp_state cur_state
, new_state
;
620 u32 pmtu
= 0; /* for gcc warning only */
621 struct hfi1_devdata
*dd
= dd_from_dev(dev
);
623 spin_lock_irq(&qp
->r_lock
);
624 spin_lock(&qp
->s_lock
);
626 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
627 attr
->cur_qp_state
: qp
->state
;
628 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
630 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
631 attr_mask
, IB_LINK_LAYER_UNSPECIFIED
))
634 if (attr_mask
& IB_QP_AV
) {
637 if (attr
->ah_attr
.dlid
>= HFI1_MULTICAST_LID_BASE
)
639 if (hfi1_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
641 sc
= ah_to_sc(ibqp
->device
, &attr
->ah_attr
);
642 if (!qp_to_sdma_engine(qp
, sc
) &&
643 dd
->flags
& HFI1_HAS_SEND_DMA
)
647 if (attr_mask
& IB_QP_ALT_PATH
) {
650 if (attr
->alt_ah_attr
.dlid
>= HFI1_MULTICAST_LID_BASE
)
652 if (hfi1_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
654 if (attr
->alt_pkey_index
>= hfi1_get_npkeys(dd
))
656 sc
= ah_to_sc(ibqp
->device
, &attr
->alt_ah_attr
);
657 if (!qp_to_sdma_engine(qp
, sc
) &&
658 dd
->flags
& HFI1_HAS_SEND_DMA
)
662 if (attr_mask
& IB_QP_PKEY_INDEX
)
663 if (attr
->pkey_index
>= hfi1_get_npkeys(dd
))
666 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
667 if (attr
->min_rnr_timer
> 31)
670 if (attr_mask
& IB_QP_PORT
)
671 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
672 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
673 attr
->port_num
== 0 ||
674 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
677 if (attr_mask
& IB_QP_DEST_QPN
)
678 if (attr
->dest_qp_num
> HFI1_QPN_MASK
)
681 if (attr_mask
& IB_QP_RETRY_CNT
)
682 if (attr
->retry_cnt
> 7)
685 if (attr_mask
& IB_QP_RNR_RETRY
)
686 if (attr
->rnr_retry
> 7)
690 * Don't allow invalid path_mtu values. OK to set greater
691 * than the active mtu (or even the max_cap, if we have tuned
692 * that to a small mtu. We'll set qp->path_mtu
693 * to the lesser of requested attribute mtu and active,
694 * for packetizing messages.
695 * Note that the QP port has to be set in INIT and MTU in RTR.
697 if (attr_mask
& IB_QP_PATH_MTU
) {
698 int mtu
, pidx
= qp
->port_num
- 1;
700 dd
= dd_from_dev(dev
);
701 mtu
= verbs_mtu_enum_to_int(ibqp
->device
, attr
->path_mtu
);
705 if (mtu
> dd
->pport
[pidx
].ibmtu
)
706 pmtu
= mtu_to_enum(dd
->pport
[pidx
].ibmtu
, IB_MTU_2048
);
708 pmtu
= attr
->path_mtu
;
711 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
712 if (attr
->path_mig_state
== IB_MIG_REARM
) {
713 if (qp
->s_mig_state
== IB_MIG_ARMED
)
715 if (new_state
!= IB_QPS_RTS
)
717 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
718 if (qp
->s_mig_state
== IB_MIG_REARM
)
720 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
722 if (qp
->s_mig_state
== IB_MIG_ARMED
)
728 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
729 if (attr
->max_dest_rd_atomic
> HFI1_MAX_RDMA_ATOMIC
)
734 if (qp
->state
!= IB_QPS_RESET
) {
735 qp
->state
= IB_QPS_RESET
;
737 qp
->s_flags
&= ~(HFI1_S_TIMER
| HFI1_S_ANY_WAIT
);
738 spin_unlock(&qp
->s_lock
);
739 spin_unlock_irq(&qp
->r_lock
);
740 /* Stop the sending work queue and retry timer */
741 cancel_work_sync(&qp
->s_iowait
.iowork
);
742 del_timer_sync(&qp
->s_timer
);
743 iowait_sdma_drain(&qp
->s_iowait
);
746 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
747 spin_lock_irq(&qp
->r_lock
);
748 spin_lock(&qp
->s_lock
);
749 clear_mr_refs(qp
, 1);
751 reset_qp(qp
, ibqp
->qp_type
);
756 /* Allow event to re-trigger if QP set to RTR more than once */
757 qp
->r_flags
&= ~HFI1_R_COMM_EST
;
758 qp
->state
= new_state
;
762 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
763 qp
->state
= new_state
;
767 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
769 qp
->state
= new_state
;
773 lastwqe
= hfi1_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
777 qp
->state
= new_state
;
781 if (attr_mask
& IB_QP_PKEY_INDEX
)
782 qp
->s_pkey_index
= attr
->pkey_index
;
784 if (attr_mask
& IB_QP_PORT
)
785 qp
->port_num
= attr
->port_num
;
787 if (attr_mask
& IB_QP_DEST_QPN
)
788 qp
->remote_qpn
= attr
->dest_qp_num
;
790 if (attr_mask
& IB_QP_SQ_PSN
) {
791 qp
->s_next_psn
= attr
->sq_psn
& PSN_MODIFY_MASK
;
792 qp
->s_psn
= qp
->s_next_psn
;
793 qp
->s_sending_psn
= qp
->s_next_psn
;
794 qp
->s_last_psn
= qp
->s_next_psn
- 1;
795 qp
->s_sending_hpsn
= qp
->s_last_psn
;
798 if (attr_mask
& IB_QP_RQ_PSN
)
799 qp
->r_psn
= attr
->rq_psn
& PSN_MODIFY_MASK
;
801 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
802 qp
->qp_access_flags
= attr
->qp_access_flags
;
804 if (attr_mask
& IB_QP_AV
) {
805 qp
->remote_ah_attr
= attr
->ah_attr
;
806 qp
->s_srate
= attr
->ah_attr
.static_rate
;
807 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
808 qp
->s_sc
= ah_to_sc(ibqp
->device
, &qp
->remote_ah_attr
);
809 qp
->s_sde
= qp_to_sdma_engine(qp
, qp
->s_sc
);
812 if (attr_mask
& IB_QP_ALT_PATH
) {
813 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
814 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
817 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
818 qp
->s_mig_state
= attr
->path_mig_state
;
820 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
821 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
822 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
823 qp
->s_flags
|= HFI1_S_AHG_CLEAR
;
824 qp
->s_sc
= ah_to_sc(ibqp
->device
, &qp
->remote_ah_attr
);
825 qp
->s_sde
= qp_to_sdma_engine(qp
, qp
->s_sc
);
829 if (attr_mask
& IB_QP_PATH_MTU
) {
830 struct hfi1_ibport
*ibp
;
834 dd
= dd_from_dev(dev
);
835 ibp
= &dd
->pport
[qp
->port_num
- 1].ibport_data
;
837 sc
= ibp
->sl_to_sc
[qp
->remote_ah_attr
.sl
];
838 vl
= sc_to_vlt(dd
, sc
);
840 mtu
= verbs_mtu_enum_to_int(ibqp
->device
, pmtu
);
841 if (vl
< PER_VL_SEND_CONTEXTS
)
842 mtu
= min_t(u32
, mtu
, dd
->vld
[vl
].mtu
);
843 pmtu
= mtu_to_enum(mtu
, OPA_MTU_8192
);
849 if (attr_mask
& IB_QP_RETRY_CNT
) {
850 qp
->s_retry_cnt
= attr
->retry_cnt
;
851 qp
->s_retry
= attr
->retry_cnt
;
854 if (attr_mask
& IB_QP_RNR_RETRY
) {
855 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
856 qp
->s_rnr_retry
= attr
->rnr_retry
;
859 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
860 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
862 if (attr_mask
& IB_QP_TIMEOUT
) {
863 qp
->timeout
= attr
->timeout
;
864 qp
->timeout_jiffies
=
865 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
869 if (attr_mask
& IB_QP_QKEY
)
870 qp
->qkey
= attr
->qkey
;
872 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
873 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
875 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
876 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
878 spin_unlock(&qp
->s_lock
);
879 spin_unlock_irq(&qp
->r_lock
);
881 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
885 ev
.device
= qp
->ibqp
.device
;
886 ev
.element
.qp
= &qp
->ibqp
;
887 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
888 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
891 ev
.device
= qp
->ibqp
.device
;
892 ev
.element
.qp
= &qp
->ibqp
;
893 ev
.event
= IB_EVENT_PATH_MIG
;
894 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
900 spin_unlock(&qp
->s_lock
);
901 spin_unlock_irq(&qp
->r_lock
);
908 int hfi1_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
909 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
911 struct hfi1_qp
*qp
= to_iqp(ibqp
);
913 attr
->qp_state
= qp
->state
;
914 attr
->cur_qp_state
= attr
->qp_state
;
915 attr
->path_mtu
= qp
->path_mtu
;
916 attr
->path_mig_state
= qp
->s_mig_state
;
917 attr
->qkey
= qp
->qkey
;
918 attr
->rq_psn
= mask_psn(qp
->r_psn
);
919 attr
->sq_psn
= mask_psn(qp
->s_next_psn
);
920 attr
->dest_qp_num
= qp
->remote_qpn
;
921 attr
->qp_access_flags
= qp
->qp_access_flags
;
922 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
923 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
924 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
925 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
926 attr
->cap
.max_inline_data
= 0;
927 attr
->ah_attr
= qp
->remote_ah_attr
;
928 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
929 attr
->pkey_index
= qp
->s_pkey_index
;
930 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
931 attr
->en_sqd_async_notify
= 0;
932 attr
->sq_draining
= qp
->s_draining
;
933 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
934 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
935 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
936 attr
->port_num
= qp
->port_num
;
937 attr
->timeout
= qp
->timeout
;
938 attr
->retry_cnt
= qp
->s_retry_cnt
;
939 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
940 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
941 attr
->alt_timeout
= qp
->alt_timeout
;
943 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
944 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
945 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
946 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
947 init_attr
->srq
= qp
->ibqp
.srq
;
948 init_attr
->cap
= attr
->cap
;
949 if (qp
->s_flags
& HFI1_S_SIGNAL_REQ_WR
)
950 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
952 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
953 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
954 init_attr
->port_num
= qp
->port_num
;
959 * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
960 * @qp: the queue pair to compute the AETH for
964 __be32
hfi1_compute_aeth(struct hfi1_qp
*qp
)
966 u32 aeth
= qp
->r_msn
& HFI1_MSN_MASK
;
970 * Shared receive queues don't generate credits.
971 * Set the credit field to the invalid value.
973 aeth
|= HFI1_AETH_CREDIT_INVAL
<< HFI1_AETH_CREDIT_SHIFT
;
977 struct hfi1_rwq
*wq
= qp
->r_rq
.wq
;
981 /* sanity check pointers before trusting them */
983 if (head
>= qp
->r_rq
.size
)
986 if (tail
>= qp
->r_rq
.size
)
989 * Compute the number of credits available (RWQEs).
990 * There is a small chance that the pair of reads are
991 * not atomic, which is OK, since the fuzziness is
992 * resolved as further ACKs go out.
994 credits
= head
- tail
;
995 if ((int)credits
< 0)
996 credits
+= qp
->r_rq
.size
;
998 * Binary search the credit table to find the code to
1004 x
= (min
+ max
) / 2;
1005 if (credit_table
[x
] == credits
)
1007 if (credit_table
[x
] > credits
)
1014 aeth
|= x
<< HFI1_AETH_CREDIT_SHIFT
;
1016 return cpu_to_be32(aeth
);
1020 * hfi1_create_qp - create a queue pair for a device
1021 * @ibpd: the protection domain who's device we create the queue pair for
1022 * @init_attr: the attributes of the queue pair
1023 * @udata: user data for libibverbs.so
1025 * Returns the queue pair on success, otherwise returns an errno.
1027 * Called by the ib_create_qp() core verbs function.
1029 struct ib_qp
*hfi1_create_qp(struct ib_pd
*ibpd
,
1030 struct ib_qp_init_attr
*init_attr
,
1031 struct ib_udata
*udata
)
1035 struct hfi1_swqe
*swq
= NULL
;
1036 struct hfi1_ibdev
*dev
;
1037 struct hfi1_devdata
*dd
;
1042 if (init_attr
->cap
.max_send_sge
> hfi1_max_sges
||
1043 init_attr
->cap
.max_send_wr
> hfi1_max_qp_wrs
||
1044 init_attr
->create_flags
) {
1045 ret
= ERR_PTR(-EINVAL
);
1049 /* Check receive queue parameters if no SRQ is specified. */
1050 if (!init_attr
->srq
) {
1051 if (init_attr
->cap
.max_recv_sge
> hfi1_max_sges
||
1052 init_attr
->cap
.max_recv_wr
> hfi1_max_qp_wrs
) {
1053 ret
= ERR_PTR(-EINVAL
);
1056 if (init_attr
->cap
.max_send_sge
+
1057 init_attr
->cap
.max_send_wr
+
1058 init_attr
->cap
.max_recv_sge
+
1059 init_attr
->cap
.max_recv_wr
== 0) {
1060 ret
= ERR_PTR(-EINVAL
);
1065 switch (init_attr
->qp_type
) {
1068 if (init_attr
->port_num
== 0 ||
1069 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
) {
1070 ret
= ERR_PTR(-EINVAL
);
1076 sz
= sizeof(struct hfi1_sge
) *
1077 init_attr
->cap
.max_send_sge
+
1078 sizeof(struct hfi1_swqe
);
1079 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
1081 ret
= ERR_PTR(-ENOMEM
);
1086 if (init_attr
->srq
) {
1087 struct hfi1_srq
*srq
= to_isrq(init_attr
->srq
);
1089 if (srq
->rq
.max_sge
> 1)
1090 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1091 (srq
->rq
.max_sge
- 1);
1092 } else if (init_attr
->cap
.max_recv_sge
> 1)
1093 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1094 (init_attr
->cap
.max_recv_sge
- 1);
1095 qp
= kzalloc(sz
+ sg_list_sz
, GFP_KERNEL
);
1097 ret
= ERR_PTR(-ENOMEM
);
1100 RCU_INIT_POINTER(qp
->next
, NULL
);
1101 qp
->s_hdr
= kzalloc(sizeof(*qp
->s_hdr
), GFP_KERNEL
);
1103 ret
= ERR_PTR(-ENOMEM
);
1106 qp
->timeout_jiffies
=
1107 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1112 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1113 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1114 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1115 sizeof(struct hfi1_rwqe
);
1116 qp
->r_rq
.wq
= vmalloc_user(sizeof(struct hfi1_rwq
) +
1117 qp
->r_rq
.size
* sz
);
1119 ret
= ERR_PTR(-ENOMEM
);
1125 * ib_create_qp() will initialize qp->ibqp
1126 * except for qp->ibqp.qp_num.
1128 spin_lock_init(&qp
->r_lock
);
1129 spin_lock_init(&qp
->s_lock
);
1130 spin_lock_init(&qp
->r_rq
.lock
);
1131 atomic_set(&qp
->refcount
, 0);
1132 init_waitqueue_head(&qp
->wait
);
1133 init_timer(&qp
->s_timer
);
1134 qp
->s_timer
.data
= (unsigned long)qp
;
1135 INIT_LIST_HEAD(&qp
->rspwait
);
1136 qp
->state
= IB_QPS_RESET
;
1138 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
1139 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1140 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1141 qp
->s_flags
= HFI1_S_SIGNAL_REQ_WR
;
1142 dev
= to_idev(ibpd
->device
);
1143 dd
= dd_from_dev(dev
);
1144 err
= alloc_qpn(dd
, &dev
->qp_dev
->qpn_table
, init_attr
->qp_type
,
1145 init_attr
->port_num
);
1151 qp
->ibqp
.qp_num
= err
;
1152 qp
->port_num
= init_attr
->port_num
;
1153 reset_qp(qp
, init_attr
->qp_type
);
1158 /* Don't support raw QPs */
1159 ret
= ERR_PTR(-ENOSYS
);
1163 init_attr
->cap
.max_inline_data
= 0;
1166 * Return the address of the RWQ as the offset to mmap.
1167 * See hfi1_mmap() for details.
1169 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1173 err
= ib_copy_to_udata(udata
, &offset
,
1180 u32 s
= sizeof(struct hfi1_rwq
) + qp
->r_rq
.size
* sz
;
1182 qp
->ip
= hfi1_create_mmap_info(dev
, s
,
1183 ibpd
->uobject
->context
,
1186 ret
= ERR_PTR(-ENOMEM
);
1190 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
1191 sizeof(qp
->ip
->offset
));
1199 spin_lock(&dev
->n_qps_lock
);
1200 if (dev
->n_qps_allocated
== hfi1_max_qps
) {
1201 spin_unlock(&dev
->n_qps_lock
);
1202 ret
= ERR_PTR(-ENOMEM
);
1206 dev
->n_qps_allocated
++;
1207 spin_unlock(&dev
->n_qps_lock
);
1210 spin_lock_irq(&dev
->pending_lock
);
1211 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
1212 spin_unlock_irq(&dev
->pending_lock
);
1218 * We have our QP and its good, now keep track of what types of opcodes
1219 * can be processed on this QP. We do this by keeping track of what the
1220 * 3 high order bits of the opcode are.
1222 switch (init_attr
->qp_type
) {
1226 qp
->allowed_ops
= IB_OPCODE_UD_SEND_ONLY
& OPCODE_QP_MASK
;
1229 qp
->allowed_ops
= IB_OPCODE_RC_SEND_ONLY
& OPCODE_QP_MASK
;
1232 qp
->allowed_ops
= IB_OPCODE_UC_SEND_ONLY
& OPCODE_QP_MASK
;
1235 ret
= ERR_PTR(-EINVAL
);
1243 kref_put(&qp
->ip
->ref
, hfi1_release_mmap_info
);
1246 free_qpn(&dev
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1257 * hfi1_destroy_qp - destroy a queue pair
1258 * @ibqp: the queue pair to destroy
1260 * Returns 0 on success.
1262 * Note that this can be called while the QP is actively sending or
1265 int hfi1_destroy_qp(struct ib_qp
*ibqp
)
1267 struct hfi1_qp
*qp
= to_iqp(ibqp
);
1268 struct hfi1_ibdev
*dev
= to_idev(ibqp
->device
);
1270 /* Make sure HW and driver activity is stopped. */
1271 spin_lock_irq(&qp
->r_lock
);
1272 spin_lock(&qp
->s_lock
);
1273 if (qp
->state
!= IB_QPS_RESET
) {
1274 qp
->state
= IB_QPS_RESET
;
1276 qp
->s_flags
&= ~(HFI1_S_TIMER
| HFI1_S_ANY_WAIT
);
1277 spin_unlock(&qp
->s_lock
);
1278 spin_unlock_irq(&qp
->r_lock
);
1279 cancel_work_sync(&qp
->s_iowait
.iowork
);
1280 del_timer_sync(&qp
->s_timer
);
1281 iowait_sdma_drain(&qp
->s_iowait
);
1284 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1285 spin_lock_irq(&qp
->r_lock
);
1286 spin_lock(&qp
->s_lock
);
1287 clear_mr_refs(qp
, 1);
1290 spin_unlock(&qp
->s_lock
);
1291 spin_unlock_irq(&qp
->r_lock
);
1293 /* all user's cleaned up, mark it available */
1294 free_qpn(&dev
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1295 spin_lock(&dev
->n_qps_lock
);
1296 dev
->n_qps_allocated
--;
1297 spin_unlock(&dev
->n_qps_lock
);
1300 kref_put(&qp
->ip
->ref
, hfi1_release_mmap_info
);
1310 * init_qpn_table - initialize the QP number table for a device
1311 * @qpt: the QPN table
1313 static int init_qpn_table(struct hfi1_devdata
*dd
, struct hfi1_qpn_table
*qpt
)
1316 struct qpn_map
*map
;
1319 spin_lock_init(&qpt
->lock
);
1322 qpt
->incr
= 1 << dd
->qos_shift
;
1324 /* insure we don't assign QPs from KDETH 64K window */
1325 qpn
= kdeth_qp
<< 16;
1326 qpt
->nmaps
= qpn
/ BITS_PER_PAGE
;
1327 /* This should always be zero */
1328 offset
= qpn
& BITS_PER_PAGE_MASK
;
1329 map
= &qpt
->map
[qpt
->nmaps
];
1330 dd_dev_info(dd
, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n",
1332 for (i
= 0; i
< 65536; i
++) {
1334 get_map_page(qpt
, map
);
1340 set_bit(offset
, map
->page
);
1342 if (offset
== BITS_PER_PAGE
) {
1353 * free_qpn_table - free the QP number table for a device
1354 * @qpt: the QPN table
1356 static void free_qpn_table(struct hfi1_qpn_table
*qpt
)
1360 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
1361 free_page((unsigned long) qpt
->map
[i
].page
);
1365 * hfi1_get_credit - flush the send work queue of a QP
1366 * @qp: the qp who's send work queue to flush
1367 * @aeth: the Acknowledge Extended Transport Header
1369 * The QP s_lock should be held.
1371 void hfi1_get_credit(struct hfi1_qp
*qp
, u32 aeth
)
1373 u32 credit
= (aeth
>> HFI1_AETH_CREDIT_SHIFT
) & HFI1_AETH_CREDIT_MASK
;
1376 * If the credit is invalid, we can send
1377 * as many packets as we like. Otherwise, we have to
1378 * honor the credit field.
1380 if (credit
== HFI1_AETH_CREDIT_INVAL
) {
1381 if (!(qp
->s_flags
& HFI1_S_UNLIMITED_CREDIT
)) {
1382 qp
->s_flags
|= HFI1_S_UNLIMITED_CREDIT
;
1383 if (qp
->s_flags
& HFI1_S_WAIT_SSN_CREDIT
) {
1384 qp
->s_flags
&= ~HFI1_S_WAIT_SSN_CREDIT
;
1385 hfi1_schedule_send(qp
);
1388 } else if (!(qp
->s_flags
& HFI1_S_UNLIMITED_CREDIT
)) {
1389 /* Compute new LSN (i.e., MSN + credit) */
1390 credit
= (aeth
+ credit_table
[credit
]) & HFI1_MSN_MASK
;
1391 if (cmp_msn(credit
, qp
->s_lsn
) > 0) {
1393 if (qp
->s_flags
& HFI1_S_WAIT_SSN_CREDIT
) {
1394 qp
->s_flags
&= ~HFI1_S_WAIT_SSN_CREDIT
;
1395 hfi1_schedule_send(qp
);
1401 void hfi1_qp_wakeup(struct hfi1_qp
*qp
, u32 flag
)
1403 unsigned long flags
;
1405 spin_lock_irqsave(&qp
->s_lock
, flags
);
1406 if (qp
->s_flags
& flag
) {
1407 qp
->s_flags
&= ~flag
;
1408 trace_hfi1_qpwakeup(qp
, flag
);
1409 hfi1_schedule_send(qp
);
1411 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1412 /* Notify hfi1_destroy_qp() if it is waiting. */
1413 if (atomic_dec_and_test(&qp
->refcount
))
1417 static int iowait_sleep(
1418 struct sdma_engine
*sde
,
1419 struct iowait
*wait
,
1420 struct sdma_txreq
*stx
,
1423 struct verbs_txreq
*tx
= container_of(stx
, struct verbs_txreq
, txreq
);
1425 unsigned long flags
;
1427 struct hfi1_ibdev
*dev
;
1431 spin_lock_irqsave(&qp
->s_lock
, flags
);
1432 if (ib_hfi1_state_ops
[qp
->state
] & HFI1_PROCESS_RECV_OK
) {
1435 * If we couldn't queue the DMA request, save the info
1436 * and try again later rather than destroying the
1437 * buffer and undoing the side effects of the copy.
1439 /* Make a common routine? */
1440 dev
= &sde
->dd
->verbs_dev
;
1441 list_add_tail(&stx
->list
, &wait
->tx_head
);
1442 write_seqlock(&dev
->iowait_lock
);
1443 if (sdma_progress(sde
, seq
, stx
))
1445 if (list_empty(&qp
->s_iowait
.list
)) {
1446 struct hfi1_ibport
*ibp
=
1447 to_iport(qp
->ibqp
.device
, qp
->port_num
);
1450 qp
->s_flags
|= HFI1_S_WAIT_DMA_DESC
;
1451 list_add_tail(&qp
->s_iowait
.list
, &sde
->dmawait
);
1452 trace_hfi1_qpsleep(qp
, HFI1_S_WAIT_DMA_DESC
);
1453 atomic_inc(&qp
->refcount
);
1455 write_sequnlock(&dev
->iowait_lock
);
1456 qp
->s_flags
&= ~HFI1_S_BUSY
;
1457 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1460 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1465 write_sequnlock(&dev
->iowait_lock
);
1466 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1467 list_del_init(&stx
->list
);
1471 static void iowait_wakeup(struct iowait
*wait
, int reason
)
1473 struct hfi1_qp
*qp
= container_of(wait
, struct hfi1_qp
, s_iowait
);
1475 WARN_ON(reason
!= SDMA_AVAIL_REASON
);
1476 hfi1_qp_wakeup(qp
, HFI1_S_WAIT_DMA_DESC
);
1479 int hfi1_qp_init(struct hfi1_ibdev
*dev
)
1481 struct hfi1_devdata
*dd
= dd_from_dev(dev
);
1485 /* allocate parent object */
1486 dev
->qp_dev
= kzalloc(sizeof(*dev
->qp_dev
), GFP_KERNEL
);
1489 /* allocate hash table */
1490 dev
->qp_dev
->qp_table_size
= hfi1_qp_table_size
;
1491 dev
->qp_dev
->qp_table_bits
= ilog2(hfi1_qp_table_size
);
1492 dev
->qp_dev
->qp_table
=
1493 kmalloc(dev
->qp_dev
->qp_table_size
*
1494 sizeof(*dev
->qp_dev
->qp_table
),
1496 if (!dev
->qp_dev
->qp_table
)
1498 for (i
= 0; i
< dev
->qp_dev
->qp_table_size
; i
++)
1499 RCU_INIT_POINTER(dev
->qp_dev
->qp_table
[i
], NULL
);
1500 spin_lock_init(&dev
->qp_dev
->qpt_lock
);
1501 /* initialize qpn map */
1502 ret
= init_qpn_table(dd
, &dev
->qp_dev
->qpn_table
);
1508 kfree(dev
->qp_dev
->qp_table
);
1509 free_qpn_table(&dev
->qp_dev
->qpn_table
);
1515 void hfi1_qp_exit(struct hfi1_ibdev
*dev
)
1517 struct hfi1_devdata
*dd
= dd_from_dev(dev
);
1520 qps_inuse
= free_all_qps(dd
);
1522 dd_dev_err(dd
, "QP memory leak! %u still in use\n",
1525 kfree(dev
->qp_dev
->qp_table
);
1526 free_qpn_table(&dev
->qp_dev
->qpn_table
);
1533 * qp_to_sdma_engine - map a qp to a send engine
1535 * @sc5: the 5 bit sc
1538 * A send engine for the qp or NULL for SMI type qp.
1540 struct sdma_engine
*qp_to_sdma_engine(struct hfi1_qp
*qp
, u8 sc5
)
1542 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1543 struct sdma_engine
*sde
;
1545 if (!(dd
->flags
& HFI1_HAS_SEND_DMA
))
1547 switch (qp
->ibqp
.qp_type
) {
1553 sde
= sdma_select_engine_sc(dd
, qp
->ibqp
.qp_num
>> dd
->qos_shift
, sc5
);
1558 struct hfi1_ibdev
*dev
;
1564 struct qp_iter
*qp_iter_init(struct hfi1_ibdev
*dev
)
1566 struct qp_iter
*iter
;
1568 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1573 iter
->specials
= dev
->ibdev
.phys_port_cnt
* 2;
1574 if (qp_iter_next(iter
)) {
1582 int qp_iter_next(struct qp_iter
*iter
)
1584 struct hfi1_ibdev
*dev
= iter
->dev
;
1587 struct hfi1_qp
*pqp
= iter
->qp
;
1591 * The approach is to consider the special qps
1592 * as an additional table entries before the
1593 * real hash table. Since the qp code sets
1594 * the qp->next hash link to NULL, this works just fine.
1596 * iter->specials is 2 * # ports
1598 * n = 0..iter->specials is the special qp indices
1600 * n = iter->specials..dev->qp_dev->qp_table_size+iter->specials are
1601 * the potential hash bucket entries
1604 for (; n
< dev
->qp_dev
->qp_table_size
+ iter
->specials
; n
++) {
1606 qp
= rcu_dereference(pqp
->next
);
1608 if (n
< iter
->specials
) {
1609 struct hfi1_pportdata
*ppd
;
1610 struct hfi1_ibport
*ibp
;
1613 pidx
= n
% dev
->ibdev
.phys_port_cnt
;
1614 ppd
= &dd_from_dev(dev
)->pport
[pidx
];
1615 ibp
= &ppd
->ibport_data
;
1618 qp
= rcu_dereference(ibp
->qp
[0]);
1620 qp
= rcu_dereference(ibp
->qp
[1]);
1622 qp
= rcu_dereference(
1623 dev
->qp_dev
->qp_table
[
1624 (n
- iter
->specials
)]);
1637 static const char * const qp_type_str
[] = {
1638 "SMI", "GSI", "RC", "UC", "UD",
1641 static int qp_idle(struct hfi1_qp
*qp
)
1644 qp
->s_last
== qp
->s_acked
&&
1645 qp
->s_acked
== qp
->s_cur
&&
1646 qp
->s_cur
== qp
->s_tail
&&
1647 qp
->s_tail
== qp
->s_head
;
1650 void qp_iter_print(struct seq_file
*s
, struct qp_iter
*iter
)
1652 struct hfi1_swqe
*wqe
;
1653 struct hfi1_qp
*qp
= iter
->qp
;
1654 struct sdma_engine
*sde
;
1656 sde
= qp_to_sdma_engine(qp
, qp
->s_sc
);
1657 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1659 "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n",
1661 qp_idle(qp
) ? "I" : "B",
1663 atomic_read(&qp
->refcount
),
1664 qp_type_str
[qp
->ibqp
.qp_type
],
1666 wqe
? wqe
->wr
.opcode
: 0,
1669 atomic_read(&qp
->s_iowait
.sdma_busy
),
1670 !list_empty(&qp
->s_iowait
.list
),
1675 qp
->s_psn
, qp
->s_next_psn
,
1676 qp
->s_sending_psn
, qp
->s_sending_hpsn
,
1677 qp
->s_last
, qp
->s_acked
, qp
->s_cur
,
1678 qp
->s_tail
, qp
->s_head
, qp
->s_size
,
1680 qp
->remote_ah_attr
.dlid
,
1681 qp
->remote_ah_attr
.sl
,
1685 qp
->s_rnr_retry_cnt
,
1687 sde
? sde
->this_idx
: 0);
1690 void qp_comm_est(struct hfi1_qp
*qp
)
1692 qp
->r_flags
|= HFI1_R_COMM_EST
;
1693 if (qp
->ibqp
.event_handler
) {
1696 ev
.device
= qp
->ibqp
.device
;
1697 ev
.element
.qp
= &qp
->ibqp
;
1698 ev
.event
= IB_EVENT_COMM_EST
;
1699 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1704 * Switch to alternate path.
1705 * The QP s_lock should be held and interrupts disabled.
1707 void hfi1_migrate_qp(struct hfi1_qp
*qp
)
1711 qp
->s_mig_state
= IB_MIG_MIGRATED
;
1712 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
1713 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
1714 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
1715 qp
->s_flags
|= HFI1_S_AHG_CLEAR
;
1716 qp
->s_sc
= ah_to_sc(qp
->ibqp
.device
, &qp
->remote_ah_attr
);
1717 qp
->s_sde
= qp_to_sdma_engine(qp
, qp
->s_sc
);
1719 ev
.device
= qp
->ibqp
.device
;
1720 ev
.element
.qp
= &qp
->ibqp
;
1721 ev
.event
= IB_EVENT_PATH_MIG
;
1722 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);