]>
Commit | Line | Data |
---|---|---|
77241056 MM |
1 | /* |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2015 Intel Corporation. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2015 Intel Corporation. | |
22 | * | |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
51 | #include <linux/err.h> | |
52 | #include <linux/vmalloc.h> | |
53 | #include <linux/hash.h> | |
54 | #include <linux/module.h> | |
55 | #include <linux/random.h> | |
56 | #include <linux/seq_file.h> | |
57 | ||
58 | #include "hfi.h" | |
59 | #include "qp.h" | |
60 | #include "trace.h" | |
61 | #include "sdma.h" | |
62 | ||
a2c2d608 | 63 | unsigned int hfi1_qp_table_size = 256; |
77241056 MM |
64 | module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); |
65 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | |
66 | ||
895420dd | 67 | static void flush_tx_list(struct rvt_qp *qp); |
77241056 MM |
68 | static int iowait_sleep( |
69 | struct sdma_engine *sde, | |
70 | struct iowait *wait, | |
71 | struct sdma_txreq *stx, | |
72 | unsigned seq); | |
73 | static void iowait_wakeup(struct iowait *wait, int reason); | |
74 | ||
1c4b7d97 DD |
75 | static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, |
76 | struct rvt_qpn_map *map, unsigned off) | |
77241056 | 77 | { |
1c4b7d97 | 78 | return (map - qpt->map) * RVT_BITS_PER_PAGE + off; |
77241056 MM |
79 | } |
80 | ||
81 | /* | |
82 | * Convert the AETH credit code into the number of credits. | |
83 | */ | |
84 | static const u16 credit_table[31] = { | |
85 | 0, /* 0 */ | |
86 | 1, /* 1 */ | |
87 | 2, /* 2 */ | |
88 | 3, /* 3 */ | |
89 | 4, /* 4 */ | |
90 | 6, /* 5 */ | |
91 | 8, /* 6 */ | |
92 | 12, /* 7 */ | |
93 | 16, /* 8 */ | |
94 | 24, /* 9 */ | |
95 | 32, /* A */ | |
96 | 48, /* B */ | |
97 | 64, /* C */ | |
98 | 96, /* D */ | |
99 | 128, /* E */ | |
100 | 192, /* F */ | |
101 | 256, /* 10 */ | |
102 | 384, /* 11 */ | |
103 | 512, /* 12 */ | |
104 | 768, /* 13 */ | |
105 | 1024, /* 14 */ | |
106 | 1536, /* 15 */ | |
107 | 2048, /* 16 */ | |
108 | 3072, /* 17 */ | |
109 | 4096, /* 18 */ | |
110 | 6144, /* 19 */ | |
111 | 8192, /* 1A */ | |
112 | 12288, /* 1B */ | |
113 | 16384, /* 1C */ | |
114 | 24576, /* 1D */ | |
115 | 32768 /* 1E */ | |
116 | }; | |
117 | ||
1c4b7d97 | 118 | static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) |
77241056 | 119 | { |
1c4b7d97 | 120 | struct rvt_qpn_map *map; |
77241056 | 121 | |
1c4b7d97 | 122 | map = qpt->map + qpn / RVT_BITS_PER_PAGE; |
77241056 | 123 | if (map->page) |
1c4b7d97 | 124 | clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); |
77241056 MM |
125 | } |
126 | ||
127 | /* | |
128 | * Put the QP into the hash table. | |
129 | * The hash table holds a reference to the QP. | |
130 | */ | |
895420dd | 131 | static void insert_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) |
77241056 MM |
132 | { |
133 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
134 | unsigned long flags; | |
135 | ||
136 | atomic_inc(&qp->refcount); | |
1c4b7d97 | 137 | spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); |
77241056 MM |
138 | |
139 | if (qp->ibqp.qp_num <= 1) { | |
4eb06882 | 140 | rcu_assign_pointer(ibp->rvp.qp[qp->ibqp.qp_num], qp); |
77241056 | 141 | } else { |
1c4b7d97 | 142 | u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num); |
77241056 | 143 | |
1c4b7d97 DD |
144 | qp->next = dev->rdi.qp_dev->qp_table[n]; |
145 | rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp); | |
77241056 MM |
146 | trace_hfi1_qpinsert(qp, n); |
147 | } | |
148 | ||
1c4b7d97 | 149 | spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); |
77241056 MM |
150 | } |
151 | ||
152 | /* | |
153 | * Remove the QP from the table so it can't be found asynchronously by | |
154 | * the receive interrupt routine. | |
155 | */ | |
895420dd | 156 | static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) |
77241056 MM |
157 | { |
158 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
1c4b7d97 | 159 | u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num); |
77241056 MM |
160 | unsigned long flags; |
161 | int removed = 1; | |
162 | ||
1c4b7d97 | 163 | spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); |
77241056 | 164 | |
4eb06882 DD |
165 | if (rcu_dereference_protected(ibp->rvp.qp[0], |
166 | lockdep_is_held( | |
1c4b7d97 | 167 | &dev->rdi.qp_dev->qpt_lock)) == qp) { |
4eb06882 DD |
168 | RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); |
169 | } else if (rcu_dereference_protected(ibp->rvp.qp[1], | |
1c4b7d97 | 170 | lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) { |
4eb06882 | 171 | RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); |
77241056 | 172 | } else { |
895420dd DD |
173 | struct rvt_qp *q; |
174 | struct rvt_qp __rcu **qpp; | |
77241056 MM |
175 | |
176 | removed = 0; | |
1c4b7d97 | 177 | qpp = &dev->rdi.qp_dev->qp_table[n]; |
77241056 | 178 | for (; (q = rcu_dereference_protected(*qpp, |
1c4b7d97 | 179 | lockdep_is_held(&dev->rdi.qp_dev->qpt_lock))) |
77241056 MM |
180 | != NULL; |
181 | qpp = &q->next) | |
182 | if (q == qp) { | |
183 | RCU_INIT_POINTER(*qpp, | |
184 | rcu_dereference_protected(qp->next, | |
1c4b7d97 | 185 | lockdep_is_held(&dev->rdi.qp_dev->qpt_lock))); |
77241056 MM |
186 | removed = 1; |
187 | trace_hfi1_qpremove(qp, n); | |
188 | break; | |
189 | } | |
190 | } | |
191 | ||
1c4b7d97 | 192 | spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); |
77241056 MM |
193 | if (removed) { |
194 | synchronize_rcu(); | |
195 | if (atomic_dec_and_test(&qp->refcount)) | |
196 | wake_up(&qp->wait); | |
197 | } | |
198 | } | |
199 | ||
895420dd | 200 | static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) |
77241056 MM |
201 | { |
202 | unsigned n; | |
203 | ||
54d10c1e | 204 | if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) |
77241056 MM |
205 | hfi1_put_ss(&qp->s_rdma_read_sge); |
206 | ||
207 | hfi1_put_ss(&qp->r_sge); | |
208 | ||
209 | if (clr_sends) { | |
210 | while (qp->s_last != qp->s_head) { | |
895420dd | 211 | struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last); |
77241056 MM |
212 | unsigned i; |
213 | ||
214 | for (i = 0; i < wqe->wr.num_sge; i++) { | |
895420dd | 215 | struct rvt_sge *sge = &wqe->sg_list[i]; |
77241056 | 216 | |
895420dd | 217 | rvt_put_mr(sge->mr); |
77241056 MM |
218 | } |
219 | if (qp->ibqp.qp_type == IB_QPT_UD || | |
220 | qp->ibqp.qp_type == IB_QPT_SMI || | |
221 | qp->ibqp.qp_type == IB_QPT_GSI) | |
15723f06 | 222 | atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); |
77241056 MM |
223 | if (++qp->s_last >= qp->s_size) |
224 | qp->s_last = 0; | |
225 | } | |
226 | if (qp->s_rdma_mr) { | |
895420dd | 227 | rvt_put_mr(qp->s_rdma_mr); |
77241056 MM |
228 | qp->s_rdma_mr = NULL; |
229 | } | |
230 | } | |
231 | ||
232 | if (qp->ibqp.qp_type != IB_QPT_RC) | |
233 | return; | |
234 | ||
235 | for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { | |
895420dd | 236 | struct rvt_ack_entry *e = &qp->s_ack_queue[n]; |
77241056 MM |
237 | |
238 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | |
239 | e->rdma_sge.mr) { | |
895420dd | 240 | rvt_put_mr(e->rdma_sge.mr); |
77241056 MM |
241 | e->rdma_sge.mr = NULL; |
242 | } | |
243 | } | |
244 | } | |
245 | ||
246 | /** | |
247 | * hfi1_error_qp - put a QP into the error state | |
248 | * @qp: the QP to put into the error state | |
249 | * @err: the receive completion error to signal if a RWQE is active | |
250 | * | |
251 | * Flushes both send and receive work queues. | |
252 | * Returns true if last WQE event should be generated. | |
253 | * The QP r_lock and s_lock should be held and interrupts disabled. | |
254 | * If we are already in error state, just return. | |
255 | */ | |
895420dd | 256 | int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) |
77241056 MM |
257 | { |
258 | struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); | |
4c6829c5 | 259 | struct hfi1_qp_priv *priv = qp->priv; |
77241056 MM |
260 | struct ib_wc wc; |
261 | int ret = 0; | |
262 | ||
263 | if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) | |
264 | goto bail; | |
265 | ||
266 | qp->state = IB_QPS_ERR; | |
267 | ||
54d10c1e DD |
268 | if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { |
269 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); | |
77241056 MM |
270 | del_timer(&qp->s_timer); |
271 | } | |
272 | ||
54d10c1e DD |
273 | if (qp->s_flags & RVT_S_ANY_WAIT_SEND) |
274 | qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; | |
77241056 MM |
275 | |
276 | write_seqlock(&dev->iowait_lock); | |
54d10c1e DD |
277 | if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { |
278 | qp->s_flags &= ~RVT_S_ANY_WAIT_IO; | |
4c6829c5 | 279 | list_del_init(&priv->s_iowait.list); |
77241056 MM |
280 | if (atomic_dec_and_test(&qp->refcount)) |
281 | wake_up(&qp->wait); | |
282 | } | |
283 | write_sequnlock(&dev->iowait_lock); | |
284 | ||
54d10c1e | 285 | if (!(qp->s_flags & RVT_S_BUSY)) { |
77241056 MM |
286 | qp->s_hdrwords = 0; |
287 | if (qp->s_rdma_mr) { | |
895420dd | 288 | rvt_put_mr(qp->s_rdma_mr); |
77241056 MM |
289 | qp->s_rdma_mr = NULL; |
290 | } | |
291 | flush_tx_list(qp); | |
292 | } | |
293 | ||
294 | /* Schedule the sending tasklet to drain the send work queue. */ | |
295 | if (qp->s_last != qp->s_head) | |
296 | hfi1_schedule_send(qp); | |
297 | ||
298 | clear_mr_refs(qp, 0); | |
299 | ||
300 | memset(&wc, 0, sizeof(wc)); | |
301 | wc.qp = &qp->ibqp; | |
302 | wc.opcode = IB_WC_RECV; | |
303 | ||
54d10c1e | 304 | if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { |
77241056 MM |
305 | wc.wr_id = qp->r_wr_id; |
306 | wc.status = err; | |
abd712da | 307 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); |
77241056 MM |
308 | } |
309 | wc.status = IB_WC_WR_FLUSH_ERR; | |
310 | ||
311 | if (qp->r_rq.wq) { | |
895420dd | 312 | struct rvt_rwq *wq; |
77241056 MM |
313 | u32 head; |
314 | u32 tail; | |
315 | ||
316 | spin_lock(&qp->r_rq.lock); | |
317 | ||
318 | /* sanity check pointers before trusting them */ | |
319 | wq = qp->r_rq.wq; | |
320 | head = wq->head; | |
321 | if (head >= qp->r_rq.size) | |
322 | head = 0; | |
323 | tail = wq->tail; | |
324 | if (tail >= qp->r_rq.size) | |
325 | tail = 0; | |
326 | while (tail != head) { | |
327 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; | |
328 | if (++tail >= qp->r_rq.size) | |
329 | tail = 0; | |
abd712da | 330 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); |
77241056 MM |
331 | } |
332 | wq->tail = tail; | |
333 | ||
334 | spin_unlock(&qp->r_rq.lock); | |
335 | } else if (qp->ibqp.event_handler) | |
336 | ret = 1; | |
337 | ||
338 | bail: | |
339 | return ret; | |
340 | } | |
341 | ||
895420dd | 342 | static void flush_tx_list(struct rvt_qp *qp) |
77241056 | 343 | { |
4c6829c5 DD |
344 | struct hfi1_qp_priv *priv = qp->priv; |
345 | ||
346 | while (!list_empty(&priv->s_iowait.tx_head)) { | |
77241056 MM |
347 | struct sdma_txreq *tx; |
348 | ||
349 | tx = list_first_entry( | |
4c6829c5 | 350 | &priv->s_iowait.tx_head, |
77241056 MM |
351 | struct sdma_txreq, |
352 | list); | |
353 | list_del_init(&tx->list); | |
354 | hfi1_put_txreq( | |
355 | container_of(tx, struct verbs_txreq, txreq)); | |
356 | } | |
357 | } | |
358 | ||
895420dd | 359 | static void flush_iowait(struct rvt_qp *qp) |
77241056 | 360 | { |
4c6829c5 | 361 | struct hfi1_qp_priv *priv = qp->priv; |
77241056 MM |
362 | struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); |
363 | unsigned long flags; | |
364 | ||
365 | write_seqlock_irqsave(&dev->iowait_lock, flags); | |
4c6829c5 DD |
366 | if (!list_empty(&priv->s_iowait.list)) { |
367 | list_del_init(&priv->s_iowait.list); | |
77241056 MM |
368 | if (atomic_dec_and_test(&qp->refcount)) |
369 | wake_up(&qp->wait); | |
370 | } | |
371 | write_sequnlock_irqrestore(&dev->iowait_lock, flags); | |
372 | } | |
373 | ||
374 | static inline int opa_mtu_enum_to_int(int mtu) | |
375 | { | |
376 | switch (mtu) { | |
377 | case OPA_MTU_8192: return 8192; | |
378 | case OPA_MTU_10240: return 10240; | |
379 | default: return -1; | |
380 | } | |
381 | } | |
382 | ||
383 | /** | |
384 | * This function is what we would push to the core layer if we wanted to be a | |
385 | * "first class citizen". Instead we hide this here and rely on Verbs ULPs | |
386 | * to blindly pass the MTU enum value from the PathRecord to us. | |
387 | * | |
388 | * The actual flag used to determine "8k MTU" will change and is currently | |
389 | * unknown. | |
390 | */ | |
391 | static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) | |
392 | { | |
393 | int val = opa_mtu_enum_to_int((int)mtu); | |
394 | ||
395 | if (val > 0) | |
396 | return val; | |
397 | return ib_mtu_enum_to_int(mtu); | |
398 | } | |
399 | ||
400 | ||
401 | /** | |
402 | * hfi1_modify_qp - modify the attributes of a queue pair | |
403 | * @ibqp: the queue pair who's attributes we're modifying | |
404 | * @attr: the new attributes | |
405 | * @attr_mask: the mask of attributes to modify | |
406 | * @udata: user data for libibverbs.so | |
407 | * | |
408 | * Returns 0 on success, otherwise returns an errno. | |
409 | */ | |
410 | int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
411 | int attr_mask, struct ib_udata *udata) | |
412 | { | |
413 | struct hfi1_ibdev *dev = to_idev(ibqp->device); | |
895420dd | 414 | struct rvt_qp *qp = to_iqp(ibqp); |
4c6829c5 | 415 | struct hfi1_qp_priv *priv = qp->priv; |
77241056 MM |
416 | enum ib_qp_state cur_state, new_state; |
417 | struct ib_event ev; | |
418 | int lastwqe = 0; | |
419 | int mig = 0; | |
420 | int ret; | |
421 | u32 pmtu = 0; /* for gcc warning only */ | |
d7b8ba51 | 422 | struct hfi1_devdata *dd = dd_from_dev(dev); |
77241056 MM |
423 | |
424 | spin_lock_irq(&qp->r_lock); | |
425 | spin_lock(&qp->s_lock); | |
426 | ||
427 | cur_state = attr_mask & IB_QP_CUR_STATE ? | |
428 | attr->cur_qp_state : qp->state; | |
429 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; | |
430 | ||
431 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, | |
432 | attr_mask, IB_LINK_LAYER_UNSPECIFIED)) | |
433 | goto inval; | |
434 | ||
435 | if (attr_mask & IB_QP_AV) { | |
d7b8ba51 MM |
436 | u8 sc; |
437 | ||
8859b4a6 | 438 | if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) |
77241056 | 439 | goto inval; |
15723f06 | 440 | if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) |
77241056 | 441 | goto inval; |
d7b8ba51 MM |
442 | sc = ah_to_sc(ibqp->device, &attr->ah_attr); |
443 | if (!qp_to_sdma_engine(qp, sc) && | |
444 | dd->flags & HFI1_HAS_SEND_DMA) | |
445 | goto inval; | |
77241056 MM |
446 | } |
447 | ||
448 | if (attr_mask & IB_QP_ALT_PATH) { | |
d7b8ba51 MM |
449 | u8 sc; |
450 | ||
8859b4a6 DD |
451 | if (attr->alt_ah_attr.dlid >= |
452 | be16_to_cpu(IB_MULTICAST_LID_BASE)) | |
77241056 | 453 | goto inval; |
15723f06 | 454 | if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) |
77241056 | 455 | goto inval; |
d7b8ba51 MM |
456 | if (attr->alt_pkey_index >= hfi1_get_npkeys(dd)) |
457 | goto inval; | |
458 | sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); | |
459 | if (!qp_to_sdma_engine(qp, sc) && | |
460 | dd->flags & HFI1_HAS_SEND_DMA) | |
77241056 MM |
461 | goto inval; |
462 | } | |
463 | ||
464 | if (attr_mask & IB_QP_PKEY_INDEX) | |
d7b8ba51 | 465 | if (attr->pkey_index >= hfi1_get_npkeys(dd)) |
77241056 MM |
466 | goto inval; |
467 | ||
468 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | |
469 | if (attr->min_rnr_timer > 31) | |
470 | goto inval; | |
471 | ||
472 | if (attr_mask & IB_QP_PORT) | |
473 | if (qp->ibqp.qp_type == IB_QPT_SMI || | |
474 | qp->ibqp.qp_type == IB_QPT_GSI || | |
475 | attr->port_num == 0 || | |
476 | attr->port_num > ibqp->device->phys_port_cnt) | |
477 | goto inval; | |
478 | ||
479 | if (attr_mask & IB_QP_DEST_QPN) | |
480 | if (attr->dest_qp_num > HFI1_QPN_MASK) | |
481 | goto inval; | |
482 | ||
483 | if (attr_mask & IB_QP_RETRY_CNT) | |
484 | if (attr->retry_cnt > 7) | |
485 | goto inval; | |
486 | ||
487 | if (attr_mask & IB_QP_RNR_RETRY) | |
488 | if (attr->rnr_retry > 7) | |
489 | goto inval; | |
490 | ||
491 | /* | |
492 | * Don't allow invalid path_mtu values. OK to set greater | |
493 | * than the active mtu (or even the max_cap, if we have tuned | |
494 | * that to a small mtu. We'll set qp->path_mtu | |
495 | * to the lesser of requested attribute mtu and active, | |
496 | * for packetizing messages. | |
497 | * Note that the QP port has to be set in INIT and MTU in RTR. | |
498 | */ | |
499 | if (attr_mask & IB_QP_PATH_MTU) { | |
500 | int mtu, pidx = qp->port_num - 1; | |
501 | ||
502 | dd = dd_from_dev(dev); | |
503 | mtu = verbs_mtu_enum_to_int(ibqp->device, attr->path_mtu); | |
504 | if (mtu == -1) | |
505 | goto inval; | |
506 | ||
507 | if (mtu > dd->pport[pidx].ibmtu) | |
508 | pmtu = mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); | |
509 | else | |
510 | pmtu = attr->path_mtu; | |
511 | } | |
512 | ||
513 | if (attr_mask & IB_QP_PATH_MIG_STATE) { | |
514 | if (attr->path_mig_state == IB_MIG_REARM) { | |
515 | if (qp->s_mig_state == IB_MIG_ARMED) | |
516 | goto inval; | |
517 | if (new_state != IB_QPS_RTS) | |
518 | goto inval; | |
519 | } else if (attr->path_mig_state == IB_MIG_MIGRATED) { | |
520 | if (qp->s_mig_state == IB_MIG_REARM) | |
521 | goto inval; | |
522 | if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) | |
523 | goto inval; | |
524 | if (qp->s_mig_state == IB_MIG_ARMED) | |
525 | mig = 1; | |
526 | } else | |
527 | goto inval; | |
528 | } | |
529 | ||
530 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
531 | if (attr->max_dest_rd_atomic > HFI1_MAX_RDMA_ATOMIC) | |
532 | goto inval; | |
533 | ||
534 | switch (new_state) { | |
535 | case IB_QPS_RESET: | |
536 | if (qp->state != IB_QPS_RESET) { | |
537 | qp->state = IB_QPS_RESET; | |
538 | flush_iowait(qp); | |
54d10c1e | 539 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); |
77241056 MM |
540 | spin_unlock(&qp->s_lock); |
541 | spin_unlock_irq(&qp->r_lock); | |
542 | /* Stop the sending work queue and retry timer */ | |
4c6829c5 | 543 | cancel_work_sync(&priv->s_iowait.iowork); |
77241056 | 544 | del_timer_sync(&qp->s_timer); |
4c6829c5 | 545 | iowait_sdma_drain(&priv->s_iowait); |
77241056 MM |
546 | flush_tx_list(qp); |
547 | remove_qp(dev, qp); | |
548 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | |
549 | spin_lock_irq(&qp->r_lock); | |
550 | spin_lock(&qp->s_lock); | |
551 | clear_mr_refs(qp, 1); | |
552 | clear_ahg(qp); | |
a2c2d608 | 553 | rvt_reset_qp(&dev->rdi, qp, ibqp->qp_type); |
77241056 MM |
554 | } |
555 | break; | |
556 | ||
557 | case IB_QPS_RTR: | |
558 | /* Allow event to re-trigger if QP set to RTR more than once */ | |
54d10c1e | 559 | qp->r_flags &= ~RVT_R_COMM_EST; |
77241056 MM |
560 | qp->state = new_state; |
561 | break; | |
562 | ||
563 | case IB_QPS_SQD: | |
564 | qp->s_draining = qp->s_last != qp->s_cur; | |
565 | qp->state = new_state; | |
566 | break; | |
567 | ||
568 | case IB_QPS_SQE: | |
569 | if (qp->ibqp.qp_type == IB_QPT_RC) | |
570 | goto inval; | |
571 | qp->state = new_state; | |
572 | break; | |
573 | ||
574 | case IB_QPS_ERR: | |
575 | lastwqe = hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); | |
576 | break; | |
577 | ||
578 | default: | |
579 | qp->state = new_state; | |
580 | break; | |
581 | } | |
582 | ||
583 | if (attr_mask & IB_QP_PKEY_INDEX) | |
584 | qp->s_pkey_index = attr->pkey_index; | |
585 | ||
586 | if (attr_mask & IB_QP_PORT) | |
587 | qp->port_num = attr->port_num; | |
588 | ||
589 | if (attr_mask & IB_QP_DEST_QPN) | |
590 | qp->remote_qpn = attr->dest_qp_num; | |
591 | ||
592 | if (attr_mask & IB_QP_SQ_PSN) { | |
593 | qp->s_next_psn = attr->sq_psn & PSN_MODIFY_MASK; | |
594 | qp->s_psn = qp->s_next_psn; | |
595 | qp->s_sending_psn = qp->s_next_psn; | |
596 | qp->s_last_psn = qp->s_next_psn - 1; | |
597 | qp->s_sending_hpsn = qp->s_last_psn; | |
598 | } | |
599 | ||
600 | if (attr_mask & IB_QP_RQ_PSN) | |
601 | qp->r_psn = attr->rq_psn & PSN_MODIFY_MASK; | |
602 | ||
603 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
604 | qp->qp_access_flags = attr->qp_access_flags; | |
605 | ||
606 | if (attr_mask & IB_QP_AV) { | |
607 | qp->remote_ah_attr = attr->ah_attr; | |
608 | qp->s_srate = attr->ah_attr.static_rate; | |
609 | qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); | |
4c6829c5 DD |
610 | priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); |
611 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
77241056 MM |
612 | } |
613 | ||
614 | if (attr_mask & IB_QP_ALT_PATH) { | |
615 | qp->alt_ah_attr = attr->alt_ah_attr; | |
616 | qp->s_alt_pkey_index = attr->alt_pkey_index; | |
617 | } | |
618 | ||
619 | if (attr_mask & IB_QP_PATH_MIG_STATE) { | |
620 | qp->s_mig_state = attr->path_mig_state; | |
621 | if (mig) { | |
622 | qp->remote_ah_attr = qp->alt_ah_attr; | |
623 | qp->port_num = qp->alt_ah_attr.port_num; | |
624 | qp->s_pkey_index = qp->s_alt_pkey_index; | |
54d10c1e | 625 | qp->s_flags |= RVT_S_AHG_CLEAR; |
4c6829c5 DD |
626 | priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); |
627 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
77241056 MM |
628 | } |
629 | } | |
630 | ||
631 | if (attr_mask & IB_QP_PATH_MTU) { | |
632 | struct hfi1_ibport *ibp; | |
633 | u8 sc, vl; | |
634 | u32 mtu; | |
635 | ||
636 | dd = dd_from_dev(dev); | |
637 | ibp = &dd->pport[qp->port_num - 1].ibport_data; | |
638 | ||
639 | sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; | |
640 | vl = sc_to_vlt(dd, sc); | |
641 | ||
642 | mtu = verbs_mtu_enum_to_int(ibqp->device, pmtu); | |
643 | if (vl < PER_VL_SEND_CONTEXTS) | |
644 | mtu = min_t(u32, mtu, dd->vld[vl].mtu); | |
645 | pmtu = mtu_to_enum(mtu, OPA_MTU_8192); | |
646 | ||
647 | qp->path_mtu = pmtu; | |
648 | qp->pmtu = mtu; | |
649 | } | |
650 | ||
651 | if (attr_mask & IB_QP_RETRY_CNT) { | |
652 | qp->s_retry_cnt = attr->retry_cnt; | |
653 | qp->s_retry = attr->retry_cnt; | |
654 | } | |
655 | ||
656 | if (attr_mask & IB_QP_RNR_RETRY) { | |
657 | qp->s_rnr_retry_cnt = attr->rnr_retry; | |
658 | qp->s_rnr_retry = attr->rnr_retry; | |
659 | } | |
660 | ||
661 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | |
662 | qp->r_min_rnr_timer = attr->min_rnr_timer; | |
663 | ||
664 | if (attr_mask & IB_QP_TIMEOUT) { | |
665 | qp->timeout = attr->timeout; | |
666 | qp->timeout_jiffies = | |
667 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | |
668 | 1000UL); | |
669 | } | |
670 | ||
671 | if (attr_mask & IB_QP_QKEY) | |
672 | qp->qkey = attr->qkey; | |
673 | ||
674 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
675 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; | |
676 | ||
677 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | |
678 | qp->s_max_rd_atomic = attr->max_rd_atomic; | |
679 | ||
680 | spin_unlock(&qp->s_lock); | |
681 | spin_unlock_irq(&qp->r_lock); | |
682 | ||
683 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | |
684 | insert_qp(dev, qp); | |
685 | ||
686 | if (lastwqe) { | |
687 | ev.device = qp->ibqp.device; | |
688 | ev.element.qp = &qp->ibqp; | |
689 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; | |
690 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
691 | } | |
692 | if (mig) { | |
693 | ev.device = qp->ibqp.device; | |
694 | ev.element.qp = &qp->ibqp; | |
695 | ev.event = IB_EVENT_PATH_MIG; | |
696 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
697 | } | |
698 | ret = 0; | |
699 | goto bail; | |
700 | ||
701 | inval: | |
702 | spin_unlock(&qp->s_lock); | |
703 | spin_unlock_irq(&qp->r_lock); | |
704 | ret = -EINVAL; | |
705 | ||
706 | bail: | |
707 | return ret; | |
708 | } | |
709 | ||
710 | int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
711 | int attr_mask, struct ib_qp_init_attr *init_attr) | |
712 | { | |
895420dd | 713 | struct rvt_qp *qp = to_iqp(ibqp); |
77241056 MM |
714 | |
715 | attr->qp_state = qp->state; | |
716 | attr->cur_qp_state = attr->qp_state; | |
717 | attr->path_mtu = qp->path_mtu; | |
718 | attr->path_mig_state = qp->s_mig_state; | |
719 | attr->qkey = qp->qkey; | |
720 | attr->rq_psn = mask_psn(qp->r_psn); | |
721 | attr->sq_psn = mask_psn(qp->s_next_psn); | |
722 | attr->dest_qp_num = qp->remote_qpn; | |
723 | attr->qp_access_flags = qp->qp_access_flags; | |
724 | attr->cap.max_send_wr = qp->s_size - 1; | |
725 | attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; | |
726 | attr->cap.max_send_sge = qp->s_max_sge; | |
727 | attr->cap.max_recv_sge = qp->r_rq.max_sge; | |
728 | attr->cap.max_inline_data = 0; | |
729 | attr->ah_attr = qp->remote_ah_attr; | |
730 | attr->alt_ah_attr = qp->alt_ah_attr; | |
731 | attr->pkey_index = qp->s_pkey_index; | |
732 | attr->alt_pkey_index = qp->s_alt_pkey_index; | |
733 | attr->en_sqd_async_notify = 0; | |
734 | attr->sq_draining = qp->s_draining; | |
735 | attr->max_rd_atomic = qp->s_max_rd_atomic; | |
736 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; | |
737 | attr->min_rnr_timer = qp->r_min_rnr_timer; | |
738 | attr->port_num = qp->port_num; | |
739 | attr->timeout = qp->timeout; | |
740 | attr->retry_cnt = qp->s_retry_cnt; | |
741 | attr->rnr_retry = qp->s_rnr_retry_cnt; | |
742 | attr->alt_port_num = qp->alt_ah_attr.port_num; | |
743 | attr->alt_timeout = qp->alt_timeout; | |
744 | ||
745 | init_attr->event_handler = qp->ibqp.event_handler; | |
746 | init_attr->qp_context = qp->ibqp.qp_context; | |
747 | init_attr->send_cq = qp->ibqp.send_cq; | |
748 | init_attr->recv_cq = qp->ibqp.recv_cq; | |
749 | init_attr->srq = qp->ibqp.srq; | |
750 | init_attr->cap = attr->cap; | |
54d10c1e | 751 | if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) |
77241056 MM |
752 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
753 | else | |
754 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | |
755 | init_attr->qp_type = qp->ibqp.qp_type; | |
756 | init_attr->port_num = qp->port_num; | |
757 | return 0; | |
758 | } | |
759 | ||
760 | /** | |
761 | * hfi1_compute_aeth - compute the AETH (syndrome + MSN) | |
762 | * @qp: the queue pair to compute the AETH for | |
763 | * | |
764 | * Returns the AETH. | |
765 | */ | |
895420dd | 766 | __be32 hfi1_compute_aeth(struct rvt_qp *qp) |
77241056 MM |
767 | { |
768 | u32 aeth = qp->r_msn & HFI1_MSN_MASK; | |
769 | ||
770 | if (qp->ibqp.srq) { | |
771 | /* | |
772 | * Shared receive queues don't generate credits. | |
773 | * Set the credit field to the invalid value. | |
774 | */ | |
775 | aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT; | |
776 | } else { | |
777 | u32 min, max, x; | |
778 | u32 credits; | |
895420dd | 779 | struct rvt_rwq *wq = qp->r_rq.wq; |
77241056 MM |
780 | u32 head; |
781 | u32 tail; | |
782 | ||
783 | /* sanity check pointers before trusting them */ | |
784 | head = wq->head; | |
785 | if (head >= qp->r_rq.size) | |
786 | head = 0; | |
787 | tail = wq->tail; | |
788 | if (tail >= qp->r_rq.size) | |
789 | tail = 0; | |
790 | /* | |
791 | * Compute the number of credits available (RWQEs). | |
792 | * There is a small chance that the pair of reads are | |
793 | * not atomic, which is OK, since the fuzziness is | |
794 | * resolved as further ACKs go out. | |
795 | */ | |
796 | credits = head - tail; | |
797 | if ((int)credits < 0) | |
798 | credits += qp->r_rq.size; | |
799 | /* | |
800 | * Binary search the credit table to find the code to | |
801 | * use. | |
802 | */ | |
803 | min = 0; | |
804 | max = 31; | |
805 | for (;;) { | |
806 | x = (min + max) / 2; | |
807 | if (credit_table[x] == credits) | |
808 | break; | |
809 | if (credit_table[x] > credits) | |
810 | max = x; | |
811 | else if (min == x) | |
812 | break; | |
813 | else | |
814 | min = x; | |
815 | } | |
816 | aeth |= x << HFI1_AETH_CREDIT_SHIFT; | |
817 | } | |
818 | return cpu_to_be32(aeth); | |
819 | } | |
820 | ||
77241056 MM |
821 | /** |
822 | * hfi1_destroy_qp - destroy a queue pair | |
823 | * @ibqp: the queue pair to destroy | |
824 | * | |
825 | * Returns 0 on success. | |
826 | * | |
827 | * Note that this can be called while the QP is actively sending or | |
828 | * receiving! | |
829 | */ | |
830 | int hfi1_destroy_qp(struct ib_qp *ibqp) | |
831 | { | |
895420dd | 832 | struct rvt_qp *qp = to_iqp(ibqp); |
77241056 | 833 | struct hfi1_ibdev *dev = to_idev(ibqp->device); |
4c6829c5 | 834 | struct hfi1_qp_priv *priv = qp->priv; |
77241056 MM |
835 | |
836 | /* Make sure HW and driver activity is stopped. */ | |
837 | spin_lock_irq(&qp->r_lock); | |
838 | spin_lock(&qp->s_lock); | |
839 | if (qp->state != IB_QPS_RESET) { | |
840 | qp->state = IB_QPS_RESET; | |
841 | flush_iowait(qp); | |
54d10c1e | 842 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); |
77241056 MM |
843 | spin_unlock(&qp->s_lock); |
844 | spin_unlock_irq(&qp->r_lock); | |
4c6829c5 | 845 | cancel_work_sync(&priv->s_iowait.iowork); |
77241056 | 846 | del_timer_sync(&qp->s_timer); |
4c6829c5 | 847 | iowait_sdma_drain(&priv->s_iowait); |
77241056 MM |
848 | flush_tx_list(qp); |
849 | remove_qp(dev, qp); | |
850 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | |
851 | spin_lock_irq(&qp->r_lock); | |
852 | spin_lock(&qp->s_lock); | |
853 | clear_mr_refs(qp, 1); | |
854 | clear_ahg(qp); | |
855 | } | |
856 | spin_unlock(&qp->s_lock); | |
857 | spin_unlock_irq(&qp->r_lock); | |
858 | ||
859 | /* all user's cleaned up, mark it available */ | |
1c4b7d97 | 860 | free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); |
77241056 MM |
861 | spin_lock(&dev->n_qps_lock); |
862 | dev->n_qps_allocated--; | |
863 | spin_unlock(&dev->n_qps_lock); | |
864 | ||
865 | if (qp->ip) | |
92c24be1 | 866 | kref_put(&qp->ip->ref, rvt_release_mmap_info); |
77241056 MM |
867 | else |
868 | vfree(qp->r_rq.wq); | |
869 | vfree(qp->s_wq); | |
4c6829c5 DD |
870 | kfree(priv->s_hdr); |
871 | kfree(priv); | |
77241056 MM |
872 | kfree(qp); |
873 | return 0; | |
874 | } | |
875 | ||
77241056 MM |
876 | /** |
877 | * hfi1_get_credit - flush the send work queue of a QP | |
878 | * @qp: the qp who's send work queue to flush | |
879 | * @aeth: the Acknowledge Extended Transport Header | |
880 | * | |
881 | * The QP s_lock should be held. | |
882 | */ | |
895420dd | 883 | void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) |
77241056 MM |
884 | { |
885 | u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK; | |
886 | ||
887 | /* | |
888 | * If the credit is invalid, we can send | |
889 | * as many packets as we like. Otherwise, we have to | |
890 | * honor the credit field. | |
891 | */ | |
892 | if (credit == HFI1_AETH_CREDIT_INVAL) { | |
54d10c1e DD |
893 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { |
894 | qp->s_flags |= RVT_S_UNLIMITED_CREDIT; | |
895 | if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { | |
896 | qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; | |
77241056 MM |
897 | hfi1_schedule_send(qp); |
898 | } | |
899 | } | |
54d10c1e | 900 | } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { |
77241056 MM |
901 | /* Compute new LSN (i.e., MSN + credit) */ |
902 | credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK; | |
903 | if (cmp_msn(credit, qp->s_lsn) > 0) { | |
904 | qp->s_lsn = credit; | |
54d10c1e DD |
905 | if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { |
906 | qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; | |
77241056 MM |
907 | hfi1_schedule_send(qp); |
908 | } | |
909 | } | |
910 | } | |
911 | } | |
912 | ||
895420dd | 913 | void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) |
77241056 MM |
914 | { |
915 | unsigned long flags; | |
916 | ||
917 | spin_lock_irqsave(&qp->s_lock, flags); | |
918 | if (qp->s_flags & flag) { | |
919 | qp->s_flags &= ~flag; | |
920 | trace_hfi1_qpwakeup(qp, flag); | |
921 | hfi1_schedule_send(qp); | |
922 | } | |
923 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
924 | /* Notify hfi1_destroy_qp() if it is waiting. */ | |
925 | if (atomic_dec_and_test(&qp->refcount)) | |
926 | wake_up(&qp->wait); | |
927 | } | |
928 | ||
929 | static int iowait_sleep( | |
930 | struct sdma_engine *sde, | |
931 | struct iowait *wait, | |
932 | struct sdma_txreq *stx, | |
933 | unsigned seq) | |
934 | { | |
935 | struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); | |
895420dd | 936 | struct rvt_qp *qp; |
4c6829c5 | 937 | struct hfi1_qp_priv *priv; |
77241056 MM |
938 | unsigned long flags; |
939 | int ret = 0; | |
940 | struct hfi1_ibdev *dev; | |
941 | ||
942 | qp = tx->qp; | |
4c6829c5 | 943 | priv = qp->priv; |
77241056 MM |
944 | |
945 | spin_lock_irqsave(&qp->s_lock, flags); | |
946 | if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { | |
947 | ||
948 | /* | |
949 | * If we couldn't queue the DMA request, save the info | |
950 | * and try again later rather than destroying the | |
951 | * buffer and undoing the side effects of the copy. | |
952 | */ | |
953 | /* Make a common routine? */ | |
954 | dev = &sde->dd->verbs_dev; | |
955 | list_add_tail(&stx->list, &wait->tx_head); | |
956 | write_seqlock(&dev->iowait_lock); | |
957 | if (sdma_progress(sde, seq, stx)) | |
958 | goto eagain; | |
4c6829c5 | 959 | if (list_empty(&priv->s_iowait.list)) { |
77241056 MM |
960 | struct hfi1_ibport *ibp = |
961 | to_iport(qp->ibqp.device, qp->port_num); | |
962 | ||
4eb06882 | 963 | ibp->rvp.n_dmawait++; |
54d10c1e | 964 | qp->s_flags |= RVT_S_WAIT_DMA_DESC; |
4c6829c5 | 965 | list_add_tail(&priv->s_iowait.list, &sde->dmawait); |
54d10c1e | 966 | trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); |
77241056 MM |
967 | atomic_inc(&qp->refcount); |
968 | } | |
969 | write_sequnlock(&dev->iowait_lock); | |
54d10c1e | 970 | qp->s_flags &= ~RVT_S_BUSY; |
77241056 MM |
971 | spin_unlock_irqrestore(&qp->s_lock, flags); |
972 | ret = -EBUSY; | |
973 | } else { | |
974 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
975 | hfi1_put_txreq(tx); | |
976 | } | |
977 | return ret; | |
978 | eagain: | |
979 | write_sequnlock(&dev->iowait_lock); | |
980 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
981 | list_del_init(&stx->list); | |
982 | return -EAGAIN; | |
983 | } | |
984 | ||
985 | static void iowait_wakeup(struct iowait *wait, int reason) | |
986 | { | |
895420dd | 987 | struct rvt_qp *qp = iowait_to_qp(wait); |
77241056 MM |
988 | |
989 | WARN_ON(reason != SDMA_AVAIL_REASON); | |
54d10c1e | 990 | hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); |
77241056 MM |
991 | } |
992 | ||
77241056 MM |
993 | /** |
994 | * | |
995 | * qp_to_sdma_engine - map a qp to a send engine | |
996 | * @qp: the QP | |
997 | * @sc5: the 5 bit sc | |
998 | * | |
999 | * Return: | |
1000 | * A send engine for the qp or NULL for SMI type qp. | |
1001 | */ | |
895420dd | 1002 | struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) |
77241056 MM |
1003 | { |
1004 | struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); | |
1005 | struct sdma_engine *sde; | |
1006 | ||
1007 | if (!(dd->flags & HFI1_HAS_SEND_DMA)) | |
1008 | return NULL; | |
1009 | switch (qp->ibqp.qp_type) { | |
77241056 MM |
1010 | case IB_QPT_SMI: |
1011 | return NULL; | |
1012 | default: | |
1013 | break; | |
1014 | } | |
1015 | sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); | |
1016 | return sde; | |
1017 | } | |
1018 | ||
1019 | struct qp_iter { | |
1020 | struct hfi1_ibdev *dev; | |
895420dd | 1021 | struct rvt_qp *qp; |
77241056 MM |
1022 | int specials; |
1023 | int n; | |
1024 | }; | |
1025 | ||
1026 | struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) | |
1027 | { | |
1028 | struct qp_iter *iter; | |
1029 | ||
1030 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | |
1031 | if (!iter) | |
1032 | return NULL; | |
1033 | ||
1034 | iter->dev = dev; | |
ec3f2c12 | 1035 | iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; |
77241056 MM |
1036 | if (qp_iter_next(iter)) { |
1037 | kfree(iter); | |
1038 | return NULL; | |
1039 | } | |
1040 | ||
1041 | return iter; | |
1042 | } | |
1043 | ||
1044 | int qp_iter_next(struct qp_iter *iter) | |
1045 | { | |
1046 | struct hfi1_ibdev *dev = iter->dev; | |
1047 | int n = iter->n; | |
1048 | int ret = 1; | |
895420dd DD |
1049 | struct rvt_qp *pqp = iter->qp; |
1050 | struct rvt_qp *qp; | |
77241056 MM |
1051 | |
1052 | /* | |
1053 | * The approach is to consider the special qps | |
1054 | * as an additional table entries before the | |
1055 | * real hash table. Since the qp code sets | |
1056 | * the qp->next hash link to NULL, this works just fine. | |
1057 | * | |
1058 | * iter->specials is 2 * # ports | |
1059 | * | |
1060 | * n = 0..iter->specials is the special qp indices | |
1061 | * | |
1c4b7d97 | 1062 | * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are |
77241056 MM |
1063 | * the potential hash bucket entries |
1064 | * | |
1065 | */ | |
1c4b7d97 | 1066 | for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) { |
77241056 MM |
1067 | if (pqp) { |
1068 | qp = rcu_dereference(pqp->next); | |
1069 | } else { | |
1070 | if (n < iter->specials) { | |
1071 | struct hfi1_pportdata *ppd; | |
1072 | struct hfi1_ibport *ibp; | |
1073 | int pidx; | |
1074 | ||
ec3f2c12 | 1075 | pidx = n % dev->rdi.ibdev.phys_port_cnt; |
77241056 MM |
1076 | ppd = &dd_from_dev(dev)->pport[pidx]; |
1077 | ibp = &ppd->ibport_data; | |
1078 | ||
1079 | if (!(n & 1)) | |
4eb06882 | 1080 | qp = rcu_dereference(ibp->rvp.qp[0]); |
77241056 | 1081 | else |
4eb06882 | 1082 | qp = rcu_dereference(ibp->rvp.qp[1]); |
77241056 MM |
1083 | } else { |
1084 | qp = rcu_dereference( | |
1c4b7d97 | 1085 | dev->rdi.qp_dev->qp_table[ |
77241056 MM |
1086 | (n - iter->specials)]); |
1087 | } | |
1088 | } | |
1089 | pqp = qp; | |
1090 | if (qp) { | |
1091 | iter->qp = qp; | |
1092 | iter->n = n; | |
1093 | return 0; | |
1094 | } | |
1095 | } | |
1096 | return ret; | |
1097 | } | |
1098 | ||
1099 | static const char * const qp_type_str[] = { | |
1100 | "SMI", "GSI", "RC", "UC", "UD", | |
1101 | }; | |
1102 | ||
895420dd | 1103 | static int qp_idle(struct rvt_qp *qp) |
77241056 MM |
1104 | { |
1105 | return | |
1106 | qp->s_last == qp->s_acked && | |
1107 | qp->s_acked == qp->s_cur && | |
1108 | qp->s_cur == qp->s_tail && | |
1109 | qp->s_tail == qp->s_head; | |
1110 | } | |
1111 | ||
1112 | void qp_iter_print(struct seq_file *s, struct qp_iter *iter) | |
1113 | { | |
895420dd DD |
1114 | struct rvt_swqe *wqe; |
1115 | struct rvt_qp *qp = iter->qp; | |
4c6829c5 | 1116 | struct hfi1_qp_priv *priv = qp->priv; |
77241056 MM |
1117 | struct sdma_engine *sde; |
1118 | ||
4c6829c5 | 1119 | sde = qp_to_sdma_engine(qp, priv->s_sc); |
77241056 MM |
1120 | wqe = get_swqe_ptr(qp, qp->s_last); |
1121 | seq_printf(s, | |
1122 | "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n", | |
1123 | iter->n, | |
1124 | qp_idle(qp) ? "I" : "B", | |
1125 | qp->ibqp.qp_num, | |
1126 | atomic_read(&qp->refcount), | |
1127 | qp_type_str[qp->ibqp.qp_type], | |
1128 | qp->state, | |
1129 | wqe ? wqe->wr.opcode : 0, | |
1130 | qp->s_hdrwords, | |
1131 | qp->s_flags, | |
4c6829c5 DD |
1132 | atomic_read(&priv->s_iowait.sdma_busy), |
1133 | !list_empty(&priv->s_iowait.list), | |
77241056 MM |
1134 | qp->timeout, |
1135 | wqe ? wqe->ssn : 0, | |
1136 | qp->s_lsn, | |
1137 | qp->s_last_psn, | |
1138 | qp->s_psn, qp->s_next_psn, | |
1139 | qp->s_sending_psn, qp->s_sending_hpsn, | |
1140 | qp->s_last, qp->s_acked, qp->s_cur, | |
1141 | qp->s_tail, qp->s_head, qp->s_size, | |
1142 | qp->remote_qpn, | |
1143 | qp->remote_ah_attr.dlid, | |
1144 | qp->remote_ah_attr.sl, | |
1145 | qp->pmtu, | |
1146 | qp->s_retry_cnt, | |
1147 | qp->timeout, | |
1148 | qp->s_rnr_retry_cnt, | |
1149 | sde, | |
1150 | sde ? sde->this_idx : 0); | |
1151 | } | |
1152 | ||
895420dd | 1153 | void qp_comm_est(struct rvt_qp *qp) |
77241056 | 1154 | { |
54d10c1e | 1155 | qp->r_flags |= RVT_R_COMM_EST; |
77241056 MM |
1156 | if (qp->ibqp.event_handler) { |
1157 | struct ib_event ev; | |
1158 | ||
1159 | ev.device = qp->ibqp.device; | |
1160 | ev.element.qp = &qp->ibqp; | |
1161 | ev.event = IB_EVENT_COMM_EST; | |
1162 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
1163 | } | |
1164 | } | |
c2f3ffb0 | 1165 | |
a2c2d608 DD |
1166 | void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
1167 | gfp_t gfp) | |
1168 | { | |
1169 | struct hfi1_qp_priv *priv; | |
1170 | ||
1171 | priv = kzalloc(sizeof(*priv), gfp); | |
1172 | if (!priv) | |
1173 | return ERR_PTR(-ENOMEM); | |
1174 | ||
1175 | priv->owner = qp; | |
1176 | ||
1177 | priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); | |
1178 | if (!priv->s_hdr) { | |
1179 | kfree(priv); | |
1180 | return ERR_PTR(-ENOMEM); | |
1181 | } | |
1182 | ||
1183 | return priv; | |
1184 | } | |
1185 | ||
1186 | void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |
1187 | { | |
1188 | struct hfi1_qp_priv *priv = qp->priv; | |
1189 | ||
1190 | kfree(priv->s_hdr); | |
1191 | kfree(priv); | |
1192 | } | |
1193 | ||
1194 | unsigned free_all_qps(struct rvt_dev_info *rdi) | |
1195 | { | |
1196 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
1197 | struct hfi1_ibdev, | |
1198 | rdi); | |
1199 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
1200 | struct hfi1_devdata, | |
1201 | verbs_dev); | |
1202 | int n; | |
1203 | unsigned qp_inuse = 0; | |
1204 | ||
1205 | for (n = 0; n < dd->num_pports; n++) { | |
1206 | struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; | |
1207 | ||
1208 | if (!hfi1_mcast_tree_empty(ibp)) | |
1209 | qp_inuse++; | |
1210 | rcu_read_lock(); | |
1211 | if (rcu_dereference(ibp->rvp.qp[0])) | |
1212 | qp_inuse++; | |
1213 | if (rcu_dereference(ibp->rvp.qp[1])) | |
1214 | qp_inuse++; | |
1215 | rcu_read_unlock(); | |
1216 | } | |
1217 | ||
1218 | return qp_inuse; | |
1219 | } | |
1220 | ||
1221 | void notify_qp_reset(struct rvt_qp *qp) | |
1222 | { | |
1223 | struct hfi1_qp_priv *priv = qp->priv; | |
1224 | ||
1225 | iowait_init( | |
1226 | &priv->s_iowait, | |
1227 | 1, | |
1228 | hfi1_do_send, | |
1229 | iowait_sleep, | |
1230 | iowait_wakeup); | |
1231 | priv->r_adefered = 0; | |
1232 | clear_ahg(qp); | |
1233 | } | |
1234 | ||
c2f3ffb0 MM |
1235 | /* |
1236 | * Switch to alternate path. | |
1237 | * The QP s_lock should be held and interrupts disabled. | |
1238 | */ | |
895420dd | 1239 | void hfi1_migrate_qp(struct rvt_qp *qp) |
c2f3ffb0 | 1240 | { |
4c6829c5 | 1241 | struct hfi1_qp_priv *priv = qp->priv; |
c2f3ffb0 MM |
1242 | struct ib_event ev; |
1243 | ||
1244 | qp->s_mig_state = IB_MIG_MIGRATED; | |
1245 | qp->remote_ah_attr = qp->alt_ah_attr; | |
1246 | qp->port_num = qp->alt_ah_attr.port_num; | |
1247 | qp->s_pkey_index = qp->s_alt_pkey_index; | |
54d10c1e | 1248 | qp->s_flags |= RVT_S_AHG_CLEAR; |
4c6829c5 DD |
1249 | priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); |
1250 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
c2f3ffb0 MM |
1251 | |
1252 | ev.device = qp->ibqp.device; | |
1253 | ev.element.qp = &qp->ibqp; | |
1254 | ev.event = IB_EVENT_PATH_MIG; | |
1255 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
1256 | } |