2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/math64.h>
38 #include <rdma/ib_verbs.h>
42 #define DRV_VERSION "0.1"
44 MODULE_AUTHOR("Steve Wise");
45 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
46 MODULE_LICENSE("Dual BSD/GPL");
48 static int allow_db_fc_on_t5
;
49 module_param(allow_db_fc_on_t5
, int, 0644);
50 MODULE_PARM_DESC(allow_db_fc_on_t5
,
51 "Allow DB Flow Control on T5 (default = 0)");
53 static int allow_db_coalescing_on_t5
;
54 module_param(allow_db_coalescing_on_t5
, int, 0644);
55 MODULE_PARM_DESC(allow_db_coalescing_on_t5
,
56 "Allow DB Coalescing on T5 (default = 0)");
59 module_param(c4iw_wr_log
, int, 0444);
60 MODULE_PARM_DESC(c4iw_wr_log
, "Enables logging of work request timing data.");
62 static int c4iw_wr_log_size_order
= 12;
63 module_param(c4iw_wr_log_size_order
, int, 0444);
64 MODULE_PARM_DESC(c4iw_wr_log_size_order
,
65 "Number of entries (log2) in the work request timing log.");
67 static LIST_HEAD(uld_ctx_list
);
68 static DEFINE_MUTEX(dev_mutex
);
69 static struct workqueue_struct
*reg_workq
;
71 #define DB_FC_RESUME_SIZE 64
72 #define DB_FC_RESUME_DELAY 1
73 #define DB_FC_DRAIN_THRESH 0
75 static struct dentry
*c4iw_debugfs_root
;
77 struct c4iw_debugfs_data
{
78 struct c4iw_dev
*devp
;
84 static int count_idrs(int id
, void *p
, void *data
)
88 *countp
= *countp
+ 1;
92 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
95 struct c4iw_debugfs_data
*d
= file
->private_data
;
97 return simple_read_from_buffer(buf
, count
, ppos
, d
->buf
, d
->pos
);
100 void c4iw_log_wr_stats(struct t4_wq
*wq
, struct t4_cqe
*cqe
)
102 struct wr_log_entry le
;
105 if (!wq
->rdev
->wr_log
)
108 idx
= (atomic_inc_return(&wq
->rdev
->wr_log_idx
) - 1) &
109 (wq
->rdev
->wr_log_size
- 1);
110 le
.poll_sge_ts
= cxgb4_read_sge_timestamp(wq
->rdev
->lldi
.ports
[0]);
111 le
.poll_host_time
= ktime_get();
113 le
.cqe_sge_ts
= CQE_TS(cqe
);
116 le
.opcode
= CQE_OPCODE(cqe
);
117 le
.post_host_time
= wq
->sq
.sw_sq
[wq
->sq
.cidx
].host_time
;
118 le
.post_sge_ts
= wq
->sq
.sw_sq
[wq
->sq
.cidx
].sge_ts
;
119 le
.wr_id
= CQE_WRID_SQ_IDX(cqe
);
122 le
.opcode
= FW_RI_RECEIVE
;
123 le
.post_host_time
= wq
->rq
.sw_rq
[wq
->rq
.cidx
].host_time
;
124 le
.post_sge_ts
= wq
->rq
.sw_rq
[wq
->rq
.cidx
].sge_ts
;
125 le
.wr_id
= CQE_WRID_MSN(cqe
);
127 wq
->rdev
->wr_log
[idx
] = le
;
130 static int wr_log_show(struct seq_file
*seq
, void *v
)
132 struct c4iw_dev
*dev
= seq
->private;
134 struct wr_log_entry
*lep
;
135 int prev_time_set
= 0;
138 #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
140 idx
= atomic_read(&dev
->rdev
.wr_log_idx
) &
141 (dev
->rdev
.wr_log_size
- 1);
144 end
= dev
->rdev
.wr_log_size
- 1;
145 lep
= &dev
->rdev
.wr_log
[idx
];
148 if (!prev_time_set
) {
150 prev_time
= lep
->poll_host_time
;
152 seq_printf(seq
, "%04u: nsec %llu qid %u opcode "
153 "%u %s 0x%x host_wr_delta nsec %llu "
154 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
155 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
156 "cqe_poll_delta_ns %llu\n",
158 ktime_to_ns(ktime_sub(lep
->poll_host_time
,
160 lep
->qid
, lep
->opcode
,
161 lep
->opcode
== FW_RI_RECEIVE
?
164 ktime_to_ns(ktime_sub(lep
->poll_host_time
,
165 lep
->post_host_time
)),
166 lep
->post_sge_ts
, lep
->cqe_sge_ts
,
168 ts2ns(lep
->poll_sge_ts
- lep
->post_sge_ts
),
169 ts2ns(lep
->poll_sge_ts
- lep
->cqe_sge_ts
));
170 prev_time
= lep
->poll_host_time
;
173 if (idx
> (dev
->rdev
.wr_log_size
- 1))
175 lep
= &dev
->rdev
.wr_log
[idx
];
181 static int wr_log_open(struct inode
*inode
, struct file
*file
)
183 return single_open(file
, wr_log_show
, inode
->i_private
);
186 static ssize_t
wr_log_clear(struct file
*file
, const char __user
*buf
,
187 size_t count
, loff_t
*pos
)
189 struct c4iw_dev
*dev
= ((struct seq_file
*)file
->private_data
)->private;
192 if (dev
->rdev
.wr_log
)
193 for (i
= 0; i
< dev
->rdev
.wr_log_size
; i
++)
194 dev
->rdev
.wr_log
[i
].valid
= 0;
198 static const struct file_operations wr_log_debugfs_fops
= {
199 .owner
= THIS_MODULE
,
201 .release
= single_release
,
204 .write
= wr_log_clear
,
207 static struct sockaddr_in zero_sin
= {
208 .sin_family
= AF_INET
,
211 static struct sockaddr_in6 zero_sin6
= {
212 .sin6_family
= AF_INET6
,
215 static void set_ep_sin_addrs(struct c4iw_ep
*ep
,
216 struct sockaddr_in
**lsin
,
217 struct sockaddr_in
**rsin
,
218 struct sockaddr_in
**m_lsin
,
219 struct sockaddr_in
**m_rsin
)
221 struct iw_cm_id
*id
= ep
->com
.cm_id
;
223 *m_lsin
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
224 *m_rsin
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
226 *lsin
= (struct sockaddr_in
*)&id
->local_addr
;
227 *rsin
= (struct sockaddr_in
*)&id
->remote_addr
;
234 static void set_ep_sin6_addrs(struct c4iw_ep
*ep
,
235 struct sockaddr_in6
**lsin6
,
236 struct sockaddr_in6
**rsin6
,
237 struct sockaddr_in6
**m_lsin6
,
238 struct sockaddr_in6
**m_rsin6
)
240 struct iw_cm_id
*id
= ep
->com
.cm_id
;
242 *m_lsin6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
243 *m_rsin6
= (struct sockaddr_in6
*)&ep
->com
.remote_addr
;
245 *lsin6
= (struct sockaddr_in6
*)&id
->local_addr
;
246 *rsin6
= (struct sockaddr_in6
*)&id
->remote_addr
;
253 static int dump_qp(int id
, void *p
, void *data
)
255 struct c4iw_qp
*qp
= p
;
256 struct c4iw_debugfs_data
*qpd
= data
;
260 if (id
!= qp
->wq
.sq
.qid
)
263 space
= qpd
->bufsize
- qpd
->pos
- 1;
268 struct c4iw_ep
*ep
= qp
->ep
;
270 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
271 struct sockaddr_in
*lsin
;
272 struct sockaddr_in
*rsin
;
273 struct sockaddr_in
*m_lsin
;
274 struct sockaddr_in
*m_rsin
;
276 set_ep_sin_addrs(ep
, &lsin
, &rsin
, &m_lsin
, &m_rsin
);
277 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
278 "rc qp sq id %u %s id %u state %u "
279 "onchip %u ep tid %u state %u "
280 "%pI4:%u/%u->%pI4:%u/%u\n",
281 qp
->wq
.sq
.qid
, qp
->srq
? "srq" : "rq",
282 qp
->srq
? qp
->srq
->idx
: qp
->wq
.rq
.qid
,
284 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
285 ep
->hwtid
, (int)ep
->com
.state
,
286 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
287 ntohs(m_lsin
->sin_port
),
288 &rsin
->sin_addr
, ntohs(rsin
->sin_port
),
289 ntohs(m_rsin
->sin_port
));
291 struct sockaddr_in6
*lsin6
;
292 struct sockaddr_in6
*rsin6
;
293 struct sockaddr_in6
*m_lsin6
;
294 struct sockaddr_in6
*m_rsin6
;
296 set_ep_sin6_addrs(ep
, &lsin6
, &rsin6
, &m_lsin6
,
298 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
299 "rc qp sq id %u rq id %u state %u "
300 "onchip %u ep tid %u state %u "
301 "%pI6:%u/%u->%pI6:%u/%u\n",
302 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
304 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
305 ep
->hwtid
, (int)ep
->com
.state
,
307 ntohs(lsin6
->sin6_port
),
308 ntohs(m_lsin6
->sin6_port
),
310 ntohs(rsin6
->sin6_port
),
311 ntohs(m_rsin6
->sin6_port
));
314 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
315 "qp sq id %u rq id %u state %u onchip %u\n",
316 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
318 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
);
324 static int qp_release(struct inode
*inode
, struct file
*file
)
326 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
328 pr_info("%s null qpd?\n", __func__
);
336 static int qp_open(struct inode
*inode
, struct file
*file
)
338 struct c4iw_debugfs_data
*qpd
;
341 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
345 qpd
->devp
= inode
->i_private
;
348 spin_lock_irq(&qpd
->devp
->lock
);
349 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
350 spin_unlock_irq(&qpd
->devp
->lock
);
352 qpd
->bufsize
= count
* 180;
353 qpd
->buf
= vmalloc(qpd
->bufsize
);
359 spin_lock_irq(&qpd
->devp
->lock
);
360 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
361 spin_unlock_irq(&qpd
->devp
->lock
);
363 qpd
->buf
[qpd
->pos
++] = 0;
364 file
->private_data
= qpd
;
368 static const struct file_operations qp_debugfs_fops
= {
369 .owner
= THIS_MODULE
,
371 .release
= qp_release
,
372 .read
= debugfs_read
,
373 .llseek
= default_llseek
,
376 static int dump_stag(int id
, void *p
, void *data
)
378 struct c4iw_debugfs_data
*stagd
= data
;
381 struct fw_ri_tpte tpte
;
384 space
= stagd
->bufsize
- stagd
->pos
- 1;
388 ret
= cxgb4_read_tpte(stagd
->devp
->rdev
.lldi
.ports
[0], (u32
)id
<<8,
391 dev_err(&stagd
->devp
->rdev
.lldi
.pdev
->dev
,
392 "%s cxgb4_read_tpte err %d\n", __func__
, ret
);
395 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
,
396 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
397 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
399 FW_RI_TPTE_VALID_G(ntohl(tpte
.valid_to_pdid
)),
400 FW_RI_TPTE_STAGKEY_G(ntohl(tpte
.valid_to_pdid
)),
401 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte
.valid_to_pdid
)),
402 FW_RI_TPTE_PDID_G(ntohl(tpte
.valid_to_pdid
)),
403 FW_RI_TPTE_PERM_G(ntohl(tpte
.locread_to_qpid
)),
404 FW_RI_TPTE_PS_G(ntohl(tpte
.locread_to_qpid
)),
405 ((u64
)ntohl(tpte
.len_hi
) << 32) | ntohl(tpte
.len_lo
),
406 ((u64
)ntohl(tpte
.va_hi
) << 32) | ntohl(tpte
.va_lo_fbo
));
412 static int stag_release(struct inode
*inode
, struct file
*file
)
414 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
416 pr_info("%s null stagd?\n", __func__
);
424 static int stag_open(struct inode
*inode
, struct file
*file
)
426 struct c4iw_debugfs_data
*stagd
;
430 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
435 stagd
->devp
= inode
->i_private
;
438 spin_lock_irq(&stagd
->devp
->lock
);
439 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
440 spin_unlock_irq(&stagd
->devp
->lock
);
442 stagd
->bufsize
= count
* 256;
443 stagd
->buf
= vmalloc(stagd
->bufsize
);
449 spin_lock_irq(&stagd
->devp
->lock
);
450 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
451 spin_unlock_irq(&stagd
->devp
->lock
);
453 stagd
->buf
[stagd
->pos
++] = 0;
454 file
->private_data
= stagd
;
462 static const struct file_operations stag_debugfs_fops
= {
463 .owner
= THIS_MODULE
,
465 .release
= stag_release
,
466 .read
= debugfs_read
,
467 .llseek
= default_llseek
,
470 static char *db_state_str
[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
472 static int stats_show(struct seq_file
*seq
, void *v
)
474 struct c4iw_dev
*dev
= seq
->private;
476 seq_printf(seq
, " Object: %10s %10s %10s %10s\n", "Total", "Current",
478 seq_printf(seq
, " PDID: %10llu %10llu %10llu %10llu\n",
479 dev
->rdev
.stats
.pd
.total
, dev
->rdev
.stats
.pd
.cur
,
480 dev
->rdev
.stats
.pd
.max
, dev
->rdev
.stats
.pd
.fail
);
481 seq_printf(seq
, " QID: %10llu %10llu %10llu %10llu\n",
482 dev
->rdev
.stats
.qid
.total
, dev
->rdev
.stats
.qid
.cur
,
483 dev
->rdev
.stats
.qid
.max
, dev
->rdev
.stats
.qid
.fail
);
484 seq_printf(seq
, " SRQS: %10llu %10llu %10llu %10llu\n",
485 dev
->rdev
.stats
.srqt
.total
, dev
->rdev
.stats
.srqt
.cur
,
486 dev
->rdev
.stats
.srqt
.max
, dev
->rdev
.stats
.srqt
.fail
);
487 seq_printf(seq
, " TPTMEM: %10llu %10llu %10llu %10llu\n",
488 dev
->rdev
.stats
.stag
.total
, dev
->rdev
.stats
.stag
.cur
,
489 dev
->rdev
.stats
.stag
.max
, dev
->rdev
.stats
.stag
.fail
);
490 seq_printf(seq
, " PBLMEM: %10llu %10llu %10llu %10llu\n",
491 dev
->rdev
.stats
.pbl
.total
, dev
->rdev
.stats
.pbl
.cur
,
492 dev
->rdev
.stats
.pbl
.max
, dev
->rdev
.stats
.pbl
.fail
);
493 seq_printf(seq
, " RQTMEM: %10llu %10llu %10llu %10llu\n",
494 dev
->rdev
.stats
.rqt
.total
, dev
->rdev
.stats
.rqt
.cur
,
495 dev
->rdev
.stats
.rqt
.max
, dev
->rdev
.stats
.rqt
.fail
);
496 seq_printf(seq
, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
497 dev
->rdev
.stats
.ocqp
.total
, dev
->rdev
.stats
.ocqp
.cur
,
498 dev
->rdev
.stats
.ocqp
.max
, dev
->rdev
.stats
.ocqp
.fail
);
499 seq_printf(seq
, " DB FULL: %10llu\n", dev
->rdev
.stats
.db_full
);
500 seq_printf(seq
, " DB EMPTY: %10llu\n", dev
->rdev
.stats
.db_empty
);
501 seq_printf(seq
, " DB DROP: %10llu\n", dev
->rdev
.stats
.db_drop
);
502 seq_printf(seq
, " DB State: %s Transitions %llu FC Interruptions %llu\n",
503 db_state_str
[dev
->db_state
],
504 dev
->rdev
.stats
.db_state_transitions
,
505 dev
->rdev
.stats
.db_fc_interruptions
);
506 seq_printf(seq
, "TCAM_FULL: %10llu\n", dev
->rdev
.stats
.tcam_full
);
507 seq_printf(seq
, "ACT_OFLD_CONN_FAILS: %10llu\n",
508 dev
->rdev
.stats
.act_ofld_conn_fails
);
509 seq_printf(seq
, "PAS_OFLD_CONN_FAILS: %10llu\n",
510 dev
->rdev
.stats
.pas_ofld_conn_fails
);
511 seq_printf(seq
, "NEG_ADV_RCVD: %10llu\n", dev
->rdev
.stats
.neg_adv
);
512 seq_printf(seq
, "AVAILABLE IRD: %10u\n", dev
->avail_ird
);
516 static int stats_open(struct inode
*inode
, struct file
*file
)
518 return single_open(file
, stats_show
, inode
->i_private
);
521 static ssize_t
stats_clear(struct file
*file
, const char __user
*buf
,
522 size_t count
, loff_t
*pos
)
524 struct c4iw_dev
*dev
= ((struct seq_file
*)file
->private_data
)->private;
526 mutex_lock(&dev
->rdev
.stats
.lock
);
527 dev
->rdev
.stats
.pd
.max
= 0;
528 dev
->rdev
.stats
.pd
.fail
= 0;
529 dev
->rdev
.stats
.qid
.max
= 0;
530 dev
->rdev
.stats
.qid
.fail
= 0;
531 dev
->rdev
.stats
.stag
.max
= 0;
532 dev
->rdev
.stats
.stag
.fail
= 0;
533 dev
->rdev
.stats
.pbl
.max
= 0;
534 dev
->rdev
.stats
.pbl
.fail
= 0;
535 dev
->rdev
.stats
.rqt
.max
= 0;
536 dev
->rdev
.stats
.rqt
.fail
= 0;
537 dev
->rdev
.stats
.rqt
.max
= 0;
538 dev
->rdev
.stats
.rqt
.fail
= 0;
539 dev
->rdev
.stats
.ocqp
.max
= 0;
540 dev
->rdev
.stats
.ocqp
.fail
= 0;
541 dev
->rdev
.stats
.db_full
= 0;
542 dev
->rdev
.stats
.db_empty
= 0;
543 dev
->rdev
.stats
.db_drop
= 0;
544 dev
->rdev
.stats
.db_state_transitions
= 0;
545 dev
->rdev
.stats
.tcam_full
= 0;
546 dev
->rdev
.stats
.act_ofld_conn_fails
= 0;
547 dev
->rdev
.stats
.pas_ofld_conn_fails
= 0;
548 mutex_unlock(&dev
->rdev
.stats
.lock
);
552 static const struct file_operations stats_debugfs_fops
= {
553 .owner
= THIS_MODULE
,
555 .release
= single_release
,
558 .write
= stats_clear
,
561 static int dump_ep(int id
, void *p
, void *data
)
563 struct c4iw_ep
*ep
= p
;
564 struct c4iw_debugfs_data
*epd
= data
;
568 space
= epd
->bufsize
- epd
->pos
- 1;
572 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
573 struct sockaddr_in
*lsin
;
574 struct sockaddr_in
*rsin
;
575 struct sockaddr_in
*m_lsin
;
576 struct sockaddr_in
*m_rsin
;
578 set_ep_sin_addrs(ep
, &lsin
, &rsin
, &m_lsin
, &m_rsin
);
579 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
580 "ep %p cm_id %p qp %p state %d flags 0x%lx "
581 "history 0x%lx hwtid %d atid %d "
582 "conn_na %u abort_na %u "
583 "%pI4:%d/%d <-> %pI4:%d/%d\n",
584 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
585 (int)ep
->com
.state
, ep
->com
.flags
,
586 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
587 ep
->stats
.connect_neg_adv
,
588 ep
->stats
.abort_neg_adv
,
589 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
590 ntohs(m_lsin
->sin_port
),
591 &rsin
->sin_addr
, ntohs(rsin
->sin_port
),
592 ntohs(m_rsin
->sin_port
));
594 struct sockaddr_in6
*lsin6
;
595 struct sockaddr_in6
*rsin6
;
596 struct sockaddr_in6
*m_lsin6
;
597 struct sockaddr_in6
*m_rsin6
;
599 set_ep_sin6_addrs(ep
, &lsin6
, &rsin6
, &m_lsin6
, &m_rsin6
);
600 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
601 "ep %p cm_id %p qp %p state %d flags 0x%lx "
602 "history 0x%lx hwtid %d atid %d "
603 "conn_na %u abort_na %u "
604 "%pI6:%d/%d <-> %pI6:%d/%d\n",
605 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
606 (int)ep
->com
.state
, ep
->com
.flags
,
607 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
608 ep
->stats
.connect_neg_adv
,
609 ep
->stats
.abort_neg_adv
,
610 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
),
611 ntohs(m_lsin6
->sin6_port
),
612 &rsin6
->sin6_addr
, ntohs(rsin6
->sin6_port
),
613 ntohs(m_rsin6
->sin6_port
));
620 static int dump_listen_ep(int id
, void *p
, void *data
)
622 struct c4iw_listen_ep
*ep
= p
;
623 struct c4iw_debugfs_data
*epd
= data
;
627 space
= epd
->bufsize
- epd
->pos
- 1;
631 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
632 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
633 &ep
->com
.cm_id
->local_addr
;
634 struct sockaddr_in
*m_lsin
= (struct sockaddr_in
*)
635 &ep
->com
.cm_id
->m_local_addr
;
637 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
638 "ep %p cm_id %p state %d flags 0x%lx stid %d "
639 "backlog %d %pI4:%d/%d\n",
640 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
641 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
642 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
643 ntohs(m_lsin
->sin_port
));
645 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
646 &ep
->com
.cm_id
->local_addr
;
647 struct sockaddr_in6
*m_lsin6
= (struct sockaddr_in6
*)
648 &ep
->com
.cm_id
->m_local_addr
;
650 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
651 "ep %p cm_id %p state %d flags 0x%lx stid %d "
652 "backlog %d %pI6:%d/%d\n",
653 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
654 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
655 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
),
656 ntohs(m_lsin6
->sin6_port
));
663 static int ep_release(struct inode
*inode
, struct file
*file
)
665 struct c4iw_debugfs_data
*epd
= file
->private_data
;
667 pr_info("%s null qpd?\n", __func__
);
675 static int ep_open(struct inode
*inode
, struct file
*file
)
677 struct c4iw_debugfs_data
*epd
;
681 epd
= kmalloc(sizeof(*epd
), GFP_KERNEL
);
686 epd
->devp
= inode
->i_private
;
689 spin_lock_irq(&epd
->devp
->lock
);
690 idr_for_each(&epd
->devp
->hwtid_idr
, count_idrs
, &count
);
691 idr_for_each(&epd
->devp
->atid_idr
, count_idrs
, &count
);
692 idr_for_each(&epd
->devp
->stid_idr
, count_idrs
, &count
);
693 spin_unlock_irq(&epd
->devp
->lock
);
695 epd
->bufsize
= count
* 240;
696 epd
->buf
= vmalloc(epd
->bufsize
);
702 spin_lock_irq(&epd
->devp
->lock
);
703 idr_for_each(&epd
->devp
->hwtid_idr
, dump_ep
, epd
);
704 idr_for_each(&epd
->devp
->atid_idr
, dump_ep
, epd
);
705 idr_for_each(&epd
->devp
->stid_idr
, dump_listen_ep
, epd
);
706 spin_unlock_irq(&epd
->devp
->lock
);
708 file
->private_data
= epd
;
716 static const struct file_operations ep_debugfs_fops
= {
717 .owner
= THIS_MODULE
,
719 .release
= ep_release
,
720 .read
= debugfs_read
,
723 static void setup_debugfs(struct c4iw_dev
*devp
)
725 debugfs_create_file_size("qps", S_IWUSR
, devp
->debugfs_root
,
726 (void *)devp
, &qp_debugfs_fops
, 4096);
728 debugfs_create_file_size("stags", S_IWUSR
, devp
->debugfs_root
,
729 (void *)devp
, &stag_debugfs_fops
, 4096);
731 debugfs_create_file_size("stats", S_IWUSR
, devp
->debugfs_root
,
732 (void *)devp
, &stats_debugfs_fops
, 4096);
734 debugfs_create_file_size("eps", S_IWUSR
, devp
->debugfs_root
,
735 (void *)devp
, &ep_debugfs_fops
, 4096);
738 debugfs_create_file_size("wr_log", S_IWUSR
, devp
->debugfs_root
,
739 (void *)devp
, &wr_log_debugfs_fops
, 4096);
742 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
743 struct c4iw_dev_ucontext
*uctx
)
745 struct list_head
*pos
, *nxt
;
746 struct c4iw_qid_list
*entry
;
748 mutex_lock(&uctx
->lock
);
749 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
750 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
751 list_del_init(&entry
->entry
);
752 if (!(entry
->qid
& rdev
->qpmask
)) {
753 c4iw_put_resource(&rdev
->resource
.qid_table
,
755 mutex_lock(&rdev
->stats
.lock
);
756 rdev
->stats
.qid
.cur
-= rdev
->qpmask
+ 1;
757 mutex_unlock(&rdev
->stats
.lock
);
762 list_for_each_safe(pos
, nxt
, &uctx
->cqids
) {
763 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
764 list_del_init(&entry
->entry
);
767 mutex_unlock(&uctx
->lock
);
770 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
771 struct c4iw_dev_ucontext
*uctx
)
773 INIT_LIST_HEAD(&uctx
->qpids
);
774 INIT_LIST_HEAD(&uctx
->cqids
);
775 mutex_init(&uctx
->lock
);
778 /* Caller takes care of locking if needed */
779 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
784 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
787 * This implementation assumes udb_density == ucq_density! Eventually
788 * we might need to support this but for now fail the open. Also the
789 * cqid and qpid range must match for now.
791 if (rdev
->lldi
.udb_density
!= rdev
->lldi
.ucq_density
) {
792 pr_err("%s: unsupported udb/ucq densities %u/%u\n",
793 pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.udb_density
,
794 rdev
->lldi
.ucq_density
);
797 if (rdev
->lldi
.vr
->qp
.start
!= rdev
->lldi
.vr
->cq
.start
||
798 rdev
->lldi
.vr
->qp
.size
!= rdev
->lldi
.vr
->cq
.size
) {
799 pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
800 pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->qp
.start
,
801 rdev
->lldi
.vr
->qp
.size
, rdev
->lldi
.vr
->cq
.size
,
802 rdev
->lldi
.vr
->cq
.size
);
806 /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
807 if (rdev
->lldi
.sge_host_page_size
> PAGE_SIZE
) {
808 pr_err("%s: unsupported sge host page size %u\n",
809 pci_name(rdev
->lldi
.pdev
),
810 rdev
->lldi
.sge_host_page_size
);
814 factor
= PAGE_SIZE
/ rdev
->lldi
.sge_host_page_size
;
815 rdev
->qpmask
= (rdev
->lldi
.udb_density
* factor
) - 1;
816 rdev
->cqmask
= (rdev
->lldi
.ucq_density
* factor
) - 1;
818 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
819 pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
820 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
821 rdev
->lldi
.vr
->pbl
.start
,
822 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
823 rdev
->lldi
.vr
->rq
.size
,
824 rdev
->lldi
.vr
->qp
.start
,
825 rdev
->lldi
.vr
->qp
.size
,
826 rdev
->lldi
.vr
->cq
.start
,
827 rdev
->lldi
.vr
->cq
.size
,
828 rdev
->lldi
.vr
->srq
.size
);
829 pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
830 &rdev
->lldi
.pdev
->resource
[2],
831 rdev
->lldi
.db_reg
, rdev
->lldi
.gts_reg
,
832 rdev
->qpmask
, rdev
->cqmask
);
834 if (c4iw_num_stags(rdev
) == 0)
837 rdev
->stats
.pd
.total
= T4_MAX_NUM_PD
;
838 rdev
->stats
.stag
.total
= rdev
->lldi
.vr
->stag
.size
;
839 rdev
->stats
.pbl
.total
= rdev
->lldi
.vr
->pbl
.size
;
840 rdev
->stats
.rqt
.total
= rdev
->lldi
.vr
->rq
.size
;
841 rdev
->stats
.srqt
.total
= rdev
->lldi
.vr
->srq
.size
;
842 rdev
->stats
.ocqp
.total
= rdev
->lldi
.vr
->ocq
.size
;
843 rdev
->stats
.qid
.total
= rdev
->lldi
.vr
->qp
.size
;
845 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
),
846 T4_MAX_NUM_PD
, rdev
->lldi
.vr
->srq
.size
);
848 pr_err("error %d initializing resources\n", err
);
851 err
= c4iw_pblpool_create(rdev
);
853 pr_err("error %d initializing pbl pool\n", err
);
854 goto destroy_resource
;
856 err
= c4iw_rqtpool_create(rdev
);
858 pr_err("error %d initializing rqt pool\n", err
);
859 goto destroy_pblpool
;
861 err
= c4iw_ocqp_pool_create(rdev
);
863 pr_err("error %d initializing ocqp pool\n", err
);
864 goto destroy_rqtpool
;
866 rdev
->status_page
= (struct t4_dev_status_page
*)
867 __get_free_page(GFP_KERNEL
);
868 if (!rdev
->status_page
) {
870 goto destroy_ocqp_pool
;
872 rdev
->status_page
->qp_start
= rdev
->lldi
.vr
->qp
.start
;
873 rdev
->status_page
->qp_size
= rdev
->lldi
.vr
->qp
.size
;
874 rdev
->status_page
->cq_start
= rdev
->lldi
.vr
->cq
.start
;
875 rdev
->status_page
->cq_size
= rdev
->lldi
.vr
->cq
.size
;
876 rdev
->status_page
->write_cmpl_supported
= rdev
->lldi
.write_cmpl_support
;
879 rdev
->wr_log
= kcalloc(1 << c4iw_wr_log_size_order
,
880 sizeof(*rdev
->wr_log
),
883 rdev
->wr_log_size
= 1 << c4iw_wr_log_size_order
;
884 atomic_set(&rdev
->wr_log_idx
, 0);
888 rdev
->free_workq
= create_singlethread_workqueue("iw_cxgb4_free");
889 if (!rdev
->free_workq
) {
891 goto err_free_status_page_and_wr_log
;
894 rdev
->status_page
->db_off
= 0;
896 init_completion(&rdev
->rqt_compl
);
897 init_completion(&rdev
->pbl_compl
);
898 kref_init(&rdev
->rqt_kref
);
899 kref_init(&rdev
->pbl_kref
);
902 err_free_status_page_and_wr_log
:
903 if (c4iw_wr_log
&& rdev
->wr_log
)
905 free_page((unsigned long)rdev
->status_page
);
907 c4iw_ocqp_pool_destroy(rdev
);
909 c4iw_rqtpool_destroy(rdev
);
911 c4iw_pblpool_destroy(rdev
);
913 c4iw_destroy_resource(&rdev
->resource
);
917 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
920 c4iw_release_dev_ucontext(rdev
, &rdev
->uctx
);
921 free_page((unsigned long)rdev
->status_page
);
922 c4iw_pblpool_destroy(rdev
);
923 c4iw_rqtpool_destroy(rdev
);
924 wait_for_completion(&rdev
->pbl_compl
);
925 wait_for_completion(&rdev
->rqt_compl
);
926 c4iw_ocqp_pool_destroy(rdev
);
927 destroy_workqueue(rdev
->free_workq
);
928 c4iw_destroy_resource(&rdev
->resource
);
931 void c4iw_dealloc(struct uld_ctx
*ctx
)
933 c4iw_rdev_close(&ctx
->dev
->rdev
);
934 WARN_ON_ONCE(!idr_is_empty(&ctx
->dev
->cqidr
));
935 idr_destroy(&ctx
->dev
->cqidr
);
936 WARN_ON_ONCE(!idr_is_empty(&ctx
->dev
->qpidr
));
937 idr_destroy(&ctx
->dev
->qpidr
);
938 WARN_ON_ONCE(!idr_is_empty(&ctx
->dev
->mmidr
));
939 idr_destroy(&ctx
->dev
->mmidr
);
940 wait_event(ctx
->dev
->wait
, idr_is_empty(&ctx
->dev
->hwtid_idr
));
941 idr_destroy(&ctx
->dev
->hwtid_idr
);
942 idr_destroy(&ctx
->dev
->stid_idr
);
943 idr_destroy(&ctx
->dev
->atid_idr
);
944 if (ctx
->dev
->rdev
.bar2_kva
)
945 iounmap(ctx
->dev
->rdev
.bar2_kva
);
946 if (ctx
->dev
->rdev
.oc_mw_kva
)
947 iounmap(ctx
->dev
->rdev
.oc_mw_kva
);
948 ib_dealloc_device(&ctx
->dev
->ibdev
);
952 static void c4iw_remove(struct uld_ctx
*ctx
)
954 pr_debug("c4iw_dev %p\n", ctx
->dev
);
955 c4iw_unregister_device(ctx
->dev
);
959 static int rdma_supported(const struct cxgb4_lld_info
*infop
)
961 return infop
->vr
->stag
.size
> 0 && infop
->vr
->pbl
.size
> 0 &&
962 infop
->vr
->rq
.size
> 0 && infop
->vr
->qp
.size
> 0 &&
963 infop
->vr
->cq
.size
> 0;
966 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
968 struct c4iw_dev
*devp
;
971 if (!rdma_supported(infop
)) {
972 pr_info("%s: RDMA not supported on this device\n",
973 pci_name(infop
->pdev
));
974 return ERR_PTR(-ENOSYS
);
976 if (!ocqp_supported(infop
))
977 pr_info("%s: On-Chip Queues not supported on this device\n",
978 pci_name(infop
->pdev
));
980 devp
= ib_alloc_device(c4iw_dev
, ibdev
);
982 pr_err("Cannot allocate ib device\n");
983 return ERR_PTR(-ENOMEM
);
985 devp
->rdev
.lldi
= *infop
;
987 /* init various hw-queue params based on lld info */
988 pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
989 devp
->rdev
.lldi
.sge_ingpadboundary
,
990 devp
->rdev
.lldi
.sge_egrstatuspagesize
);
992 devp
->rdev
.hw_queue
.t4_eq_status_entries
=
993 devp
->rdev
.lldi
.sge_egrstatuspagesize
/ 64;
994 devp
->rdev
.hw_queue
.t4_max_eq_size
= 65520;
995 devp
->rdev
.hw_queue
.t4_max_iq_size
= 65520;
996 devp
->rdev
.hw_queue
.t4_max_rq_size
= 8192 -
997 devp
->rdev
.hw_queue
.t4_eq_status_entries
- 1;
998 devp
->rdev
.hw_queue
.t4_max_sq_size
=
999 devp
->rdev
.hw_queue
.t4_max_eq_size
-
1000 devp
->rdev
.hw_queue
.t4_eq_status_entries
- 1;
1001 devp
->rdev
.hw_queue
.t4_max_qp_depth
=
1002 devp
->rdev
.hw_queue
.t4_max_rq_size
;
1003 devp
->rdev
.hw_queue
.t4_max_cq_depth
=
1004 devp
->rdev
.hw_queue
.t4_max_iq_size
- 2;
1005 devp
->rdev
.hw_queue
.t4_stat_len
=
1006 devp
->rdev
.lldi
.sge_egrstatuspagesize
;
1009 * For T5/T6 devices, we map all of BAR2 with WC.
1010 * For T4 devices with onchip qp mem, we map only that part
1013 devp
->rdev
.bar2_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2);
1014 if (!is_t4(devp
->rdev
.lldi
.adapter_type
)) {
1015 devp
->rdev
.bar2_kva
= ioremap_wc(devp
->rdev
.bar2_pa
,
1016 pci_resource_len(devp
->rdev
.lldi
.pdev
, 2));
1017 if (!devp
->rdev
.bar2_kva
) {
1018 pr_err("Unable to ioremap BAR2\n");
1019 ib_dealloc_device(&devp
->ibdev
);
1020 return ERR_PTR(-EINVAL
);
1022 } else if (ocqp_supported(infop
)) {
1023 devp
->rdev
.oc_mw_pa
=
1024 pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
1025 pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
1026 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
);
1027 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
1028 devp
->rdev
.lldi
.vr
->ocq
.size
);
1029 if (!devp
->rdev
.oc_mw_kva
) {
1030 pr_err("Unable to ioremap onchip mem\n");
1031 ib_dealloc_device(&devp
->ibdev
);
1032 return ERR_PTR(-EINVAL
);
1036 pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
1037 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
1038 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
1040 ret
= c4iw_rdev_open(&devp
->rdev
);
1042 pr_err("Unable to open CXIO rdev err %d\n", ret
);
1043 ib_dealloc_device(&devp
->ibdev
);
1044 return ERR_PTR(ret
);
1047 idr_init(&devp
->cqidr
);
1048 idr_init(&devp
->qpidr
);
1049 idr_init(&devp
->mmidr
);
1050 idr_init(&devp
->hwtid_idr
);
1051 idr_init(&devp
->stid_idr
);
1052 idr_init(&devp
->atid_idr
);
1053 spin_lock_init(&devp
->lock
);
1054 mutex_init(&devp
->rdev
.stats
.lock
);
1055 mutex_init(&devp
->db_mutex
);
1056 INIT_LIST_HEAD(&devp
->db_fc_list
);
1057 init_waitqueue_head(&devp
->wait
);
1058 devp
->avail_ird
= devp
->rdev
.lldi
.max_ird_adapter
;
1060 if (c4iw_debugfs_root
) {
1061 devp
->debugfs_root
= debugfs_create_dir(
1062 pci_name(devp
->rdev
.lldi
.pdev
),
1064 setup_debugfs(devp
);
1071 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
1073 struct uld_ctx
*ctx
;
1074 static int vers_printed
;
1077 if (!vers_printed
++)
1078 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1081 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
1083 ctx
= ERR_PTR(-ENOMEM
);
1088 pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
1089 pci_name(ctx
->lldi
.pdev
),
1090 ctx
->lldi
.nchan
, ctx
->lldi
.nrxq
,
1091 ctx
->lldi
.ntxq
, ctx
->lldi
.nports
);
1093 mutex_lock(&dev_mutex
);
1094 list_add_tail(&ctx
->entry
, &uld_ctx_list
);
1095 mutex_unlock(&dev_mutex
);
1097 for (i
= 0; i
< ctx
->lldi
.nrxq
; i
++)
1098 pr_debug("rxqid[%u] %u\n", i
, ctx
->lldi
.rxq_ids
[i
]);
1103 static inline struct sk_buff
*copy_gl_to_skb_pkt(const struct pkt_gl
*gl
,
1107 struct sk_buff
*skb
;
1110 * Allocate space for cpl_pass_accept_req which will be synthesized by
1111 * driver. Once the driver synthesizes the request the skb will go
1112 * through the regular cpl_pass_accept_req processing.
1113 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
1116 skb
= alloc_skb(gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
1117 sizeof(struct rss_header
) - pktshift
, GFP_ATOMIC
);
1121 __skb_put(skb
, gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
1122 sizeof(struct rss_header
) - pktshift
);
1125 * This skb will contain:
1126 * rss_header from the rspq descriptor (1 flit)
1127 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
1128 * space for the difference between the size of an
1129 * rx_pkt and pass_accept_req cpl (1 flit)
1130 * the packet data from the gl
1132 skb_copy_to_linear_data(skb
, rsp
, sizeof(struct cpl_pass_accept_req
) +
1133 sizeof(struct rss_header
));
1134 skb_copy_to_linear_data_offset(skb
, sizeof(struct rss_header
) +
1135 sizeof(struct cpl_pass_accept_req
),
1137 gl
->tot_len
- pktshift
);
1141 static inline int recv_rx_pkt(struct c4iw_dev
*dev
, const struct pkt_gl
*gl
,
1144 unsigned int opcode
= *(u8
*)rsp
;
1145 struct sk_buff
*skb
;
1147 if (opcode
!= CPL_RX_PKT
)
1150 skb
= copy_gl_to_skb_pkt(gl
, rsp
, dev
->rdev
.lldi
.sge_pktshift
);
1154 if (c4iw_handlers
[opcode
] == NULL
) {
1155 pr_info("%s no handler opcode 0x%x...\n", __func__
, opcode
);
1159 c4iw_handlers
[opcode
](dev
, skb
);
1165 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
1166 const struct pkt_gl
*gl
)
1168 struct uld_ctx
*ctx
= handle
;
1169 struct c4iw_dev
*dev
= ctx
->dev
;
1170 struct sk_buff
*skb
;
1174 /* omit RSS and rsp_ctrl at end of descriptor */
1175 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
1177 skb
= alloc_skb(256, GFP_ATOMIC
);
1180 __skb_put(skb
, len
);
1181 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
1182 } else if (gl
== CXGB4_MSG_AN
) {
1183 const struct rsp_ctrl
*rc
= (void *)rsp
;
1185 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
1186 c4iw_ev_handler(dev
, qid
);
1188 } else if (unlikely(*(u8
*)rsp
!= *(u8
*)gl
->va
)) {
1189 if (recv_rx_pkt(dev
, gl
, rsp
))
1192 pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
1193 pci_name(ctx
->lldi
.pdev
), gl
->va
,
1195 be64_to_cpu(*(__force __be64
*)gl
->va
),
1200 skb
= cxgb4_pktgl_to_skb(gl
, 128, 128);
1205 opcode
= *(u8
*)rsp
;
1206 if (c4iw_handlers
[opcode
]) {
1207 c4iw_handlers
[opcode
](dev
, skb
);
1209 pr_info("%s no handler opcode 0x%x...\n", __func__
, opcode
);
1218 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
1220 struct uld_ctx
*ctx
= handle
;
1222 pr_debug("new_state %u\n", new_state
);
1223 switch (new_state
) {
1224 case CXGB4_STATE_UP
:
1225 pr_info("%s: Up\n", pci_name(ctx
->lldi
.pdev
));
1227 ctx
->dev
= c4iw_alloc(&ctx
->lldi
);
1228 if (IS_ERR(ctx
->dev
)) {
1229 pr_err("%s: initialization failed: %ld\n",
1230 pci_name(ctx
->lldi
.pdev
),
1236 INIT_WORK(&ctx
->reg_work
, c4iw_register_device
);
1237 queue_work(reg_workq
, &ctx
->reg_work
);
1240 case CXGB4_STATE_DOWN
:
1241 pr_info("%s: Down\n", pci_name(ctx
->lldi
.pdev
));
1245 case CXGB4_STATE_FATAL_ERROR
:
1246 case CXGB4_STATE_START_RECOVERY
:
1247 pr_info("%s: Fatal Error\n", pci_name(ctx
->lldi
.pdev
));
1249 struct ib_event event
;
1251 ctx
->dev
->rdev
.flags
|= T4_FATAL_ERROR
;
1252 memset(&event
, 0, sizeof event
);
1253 event
.event
= IB_EVENT_DEVICE_FATAL
;
1254 event
.device
= &ctx
->dev
->ibdev
;
1255 ib_dispatch_event(&event
);
1259 case CXGB4_STATE_DETACH
:
1260 pr_info("%s: Detach\n", pci_name(ctx
->lldi
.pdev
));
1268 static int disable_qp_db(int id
, void *p
, void *data
)
1270 struct c4iw_qp
*qp
= p
;
1272 t4_disable_wq_db(&qp
->wq
);
1276 static void stop_queues(struct uld_ctx
*ctx
)
1278 unsigned long flags
;
1280 spin_lock_irqsave(&ctx
->dev
->lock
, flags
);
1281 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1282 ctx
->dev
->db_state
= STOPPED
;
1283 if (ctx
->dev
->rdev
.flags
& T4_STATUS_PAGE_DISABLED
)
1284 idr_for_each(&ctx
->dev
->qpidr
, disable_qp_db
, NULL
);
1286 ctx
->dev
->rdev
.status_page
->db_off
= 1;
1287 spin_unlock_irqrestore(&ctx
->dev
->lock
, flags
);
1290 static int enable_qp_db(int id
, void *p
, void *data
)
1292 struct c4iw_qp
*qp
= p
;
1294 t4_enable_wq_db(&qp
->wq
);
1298 static void resume_rc_qp(struct c4iw_qp
*qp
)
1300 spin_lock(&qp
->lock
);
1301 t4_ring_sq_db(&qp
->wq
, qp
->wq
.sq
.wq_pidx_inc
, NULL
);
1302 qp
->wq
.sq
.wq_pidx_inc
= 0;
1303 t4_ring_rq_db(&qp
->wq
, qp
->wq
.rq
.wq_pidx_inc
, NULL
);
1304 qp
->wq
.rq
.wq_pidx_inc
= 0;
1305 spin_unlock(&qp
->lock
);
1308 static void resume_a_chunk(struct uld_ctx
*ctx
)
1313 for (i
= 0; i
< DB_FC_RESUME_SIZE
; i
++) {
1314 qp
= list_first_entry(&ctx
->dev
->db_fc_list
, struct c4iw_qp
,
1316 list_del_init(&qp
->db_fc_entry
);
1318 if (list_empty(&ctx
->dev
->db_fc_list
))
1323 static void resume_queues(struct uld_ctx
*ctx
)
1325 spin_lock_irq(&ctx
->dev
->lock
);
1326 if (ctx
->dev
->db_state
!= STOPPED
)
1328 ctx
->dev
->db_state
= FLOW_CONTROL
;
1330 if (list_empty(&ctx
->dev
->db_fc_list
)) {
1331 WARN_ON(ctx
->dev
->db_state
!= FLOW_CONTROL
);
1332 ctx
->dev
->db_state
= NORMAL
;
1333 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1334 if (ctx
->dev
->rdev
.flags
& T4_STATUS_PAGE_DISABLED
) {
1335 idr_for_each(&ctx
->dev
->qpidr
, enable_qp_db
,
1338 ctx
->dev
->rdev
.status_page
->db_off
= 0;
1342 if (cxgb4_dbfifo_count(ctx
->dev
->rdev
.lldi
.ports
[0], 1)
1343 < (ctx
->dev
->rdev
.lldi
.dbfifo_int_thresh
<<
1344 DB_FC_DRAIN_THRESH
)) {
1345 resume_a_chunk(ctx
);
1347 if (!list_empty(&ctx
->dev
->db_fc_list
)) {
1348 spin_unlock_irq(&ctx
->dev
->lock
);
1349 if (DB_FC_RESUME_DELAY
) {
1350 set_current_state(TASK_UNINTERRUPTIBLE
);
1351 schedule_timeout(DB_FC_RESUME_DELAY
);
1353 spin_lock_irq(&ctx
->dev
->lock
);
1354 if (ctx
->dev
->db_state
!= FLOW_CONTROL
)
1360 if (ctx
->dev
->db_state
!= NORMAL
)
1361 ctx
->dev
->rdev
.stats
.db_fc_interruptions
++;
1362 spin_unlock_irq(&ctx
->dev
->lock
);
1367 struct c4iw_qp
**qps
;
1370 static int add_and_ref_qp(int id
, void *p
, void *data
)
1372 struct qp_list
*qp_listp
= data
;
1373 struct c4iw_qp
*qp
= p
;
1375 c4iw_qp_add_ref(&qp
->ibqp
);
1376 qp_listp
->qps
[qp_listp
->idx
++] = qp
;
1380 static int count_qps(int id
, void *p
, void *data
)
1382 unsigned *countp
= data
;
1387 static void deref_qps(struct qp_list
*qp_list
)
1391 for (idx
= 0; idx
< qp_list
->idx
; idx
++)
1392 c4iw_qp_rem_ref(&qp_list
->qps
[idx
]->ibqp
);
1395 static void recover_lost_dbs(struct uld_ctx
*ctx
, struct qp_list
*qp_list
)
1400 for (idx
= 0; idx
< qp_list
->idx
; idx
++) {
1401 struct c4iw_qp
*qp
= qp_list
->qps
[idx
];
1403 spin_lock_irq(&qp
->rhp
->lock
);
1404 spin_lock(&qp
->lock
);
1405 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1407 t4_sq_host_wq_pidx(&qp
->wq
),
1408 t4_sq_wq_size(&qp
->wq
));
1410 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
1411 pci_name(ctx
->lldi
.pdev
), qp
->wq
.sq
.qid
);
1412 spin_unlock(&qp
->lock
);
1413 spin_unlock_irq(&qp
->rhp
->lock
);
1416 qp
->wq
.sq
.wq_pidx_inc
= 0;
1418 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1420 t4_rq_host_wq_pidx(&qp
->wq
),
1421 t4_rq_wq_size(&qp
->wq
));
1424 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
1425 pci_name(ctx
->lldi
.pdev
), qp
->wq
.rq
.qid
);
1426 spin_unlock(&qp
->lock
);
1427 spin_unlock_irq(&qp
->rhp
->lock
);
1430 qp
->wq
.rq
.wq_pidx_inc
= 0;
1431 spin_unlock(&qp
->lock
);
1432 spin_unlock_irq(&qp
->rhp
->lock
);
1434 /* Wait for the dbfifo to drain */
1435 while (cxgb4_dbfifo_count(qp
->rhp
->rdev
.lldi
.ports
[0], 1) > 0) {
1436 set_current_state(TASK_UNINTERRUPTIBLE
);
1437 schedule_timeout(usecs_to_jiffies(10));
1442 static void recover_queues(struct uld_ctx
*ctx
)
1445 struct qp_list qp_list
;
1448 /* slow everybody down */
1449 set_current_state(TASK_UNINTERRUPTIBLE
);
1450 schedule_timeout(usecs_to_jiffies(1000));
1452 /* flush the SGE contexts */
1453 ret
= cxgb4_flush_eq_cache(ctx
->dev
->rdev
.lldi
.ports
[0]);
1455 pr_err("%s: Fatal error - DB overflow recovery failed\n",
1456 pci_name(ctx
->lldi
.pdev
));
1460 /* Count active queues so we can build a list of queues to recover */
1461 spin_lock_irq(&ctx
->dev
->lock
);
1462 WARN_ON(ctx
->dev
->db_state
!= STOPPED
);
1463 ctx
->dev
->db_state
= RECOVERY
;
1464 idr_for_each(&ctx
->dev
->qpidr
, count_qps
, &count
);
1466 qp_list
.qps
= kcalloc(count
, sizeof(*qp_list
.qps
), GFP_ATOMIC
);
1468 spin_unlock_irq(&ctx
->dev
->lock
);
1473 /* add and ref each qp so it doesn't get freed */
1474 idr_for_each(&ctx
->dev
->qpidr
, add_and_ref_qp
, &qp_list
);
1476 spin_unlock_irq(&ctx
->dev
->lock
);
1478 /* now traverse the list in a safe context to recover the db state*/
1479 recover_lost_dbs(ctx
, &qp_list
);
1481 /* we're almost done! deref the qps and clean up */
1482 deref_qps(&qp_list
);
1485 spin_lock_irq(&ctx
->dev
->lock
);
1486 WARN_ON(ctx
->dev
->db_state
!= RECOVERY
);
1487 ctx
->dev
->db_state
= STOPPED
;
1488 spin_unlock_irq(&ctx
->dev
->lock
);
1491 static int c4iw_uld_control(void *handle
, enum cxgb4_control control
, ...)
1493 struct uld_ctx
*ctx
= handle
;
1496 case CXGB4_CONTROL_DB_FULL
:
1498 ctx
->dev
->rdev
.stats
.db_full
++;
1500 case CXGB4_CONTROL_DB_EMPTY
:
1502 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1503 ctx
->dev
->rdev
.stats
.db_empty
++;
1504 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1506 case CXGB4_CONTROL_DB_DROP
:
1507 recover_queues(ctx
);
1508 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1509 ctx
->dev
->rdev
.stats
.db_drop
++;
1510 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1513 pr_warn("%s: unknown control cmd %u\n",
1514 pci_name(ctx
->lldi
.pdev
), control
);
1520 static struct cxgb4_uld_info c4iw_uld_info
= {
1522 .nrxq
= MAX_ULD_QSETS
,
1523 .ntxq
= MAX_ULD_QSETS
,
1527 .add
= c4iw_uld_add
,
1528 .rx_handler
= c4iw_uld_rx_handler
,
1529 .state_change
= c4iw_uld_state_change
,
1530 .control
= c4iw_uld_control
,
1533 void _c4iw_free_wr_wait(struct kref
*kref
)
1535 struct c4iw_wr_wait
*wr_waitp
;
1537 wr_waitp
= container_of(kref
, struct c4iw_wr_wait
, kref
);
1538 pr_debug("Free wr_wait %p\n", wr_waitp
);
1542 struct c4iw_wr_wait
*c4iw_alloc_wr_wait(gfp_t gfp
)
1544 struct c4iw_wr_wait
*wr_waitp
;
1546 wr_waitp
= kzalloc(sizeof(*wr_waitp
), gfp
);
1548 kref_init(&wr_waitp
->kref
);
1549 pr_debug("wr_wait %p\n", wr_waitp
);
1554 static int __init
c4iw_init_module(void)
1558 err
= c4iw_cm_init();
1562 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
1564 reg_workq
= create_singlethread_workqueue("Register_iWARP_device");
1566 pr_err("Failed creating workqueue to register iwarp device\n");
1570 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
1575 static void __exit
c4iw_exit_module(void)
1577 struct uld_ctx
*ctx
, *tmp
;
1579 mutex_lock(&dev_mutex
);
1580 list_for_each_entry_safe(ctx
, tmp
, &uld_ctx_list
, entry
) {
1585 mutex_unlock(&dev_mutex
);
1586 flush_workqueue(reg_workq
);
1587 destroy_workqueue(reg_workq
);
1588 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
1590 debugfs_remove_recursive(c4iw_debugfs_root
);
1593 module_init(c4iw_init_module
);
1594 module_exit(c4iw_exit_module
);