2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/math64.h>
38 #include <rdma/ib_verbs.h>
42 #define DRV_VERSION "0.1"
44 MODULE_AUTHOR("Steve Wise");
45 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
46 MODULE_LICENSE("Dual BSD/GPL");
47 MODULE_VERSION(DRV_VERSION
);
49 static int allow_db_fc_on_t5
;
50 module_param(allow_db_fc_on_t5
, int, 0644);
51 MODULE_PARM_DESC(allow_db_fc_on_t5
,
52 "Allow DB Flow Control on T5 (default = 0)");
54 static int allow_db_coalescing_on_t5
;
55 module_param(allow_db_coalescing_on_t5
, int, 0644);
56 MODULE_PARM_DESC(allow_db_coalescing_on_t5
,
57 "Allow DB Coalescing on T5 (default = 0)");
60 module_param(c4iw_wr_log
, int, 0444);
61 MODULE_PARM_DESC(c4iw_wr_log
, "Enables logging of work request timing data.");
63 static int c4iw_wr_log_size_order
= 12;
64 module_param(c4iw_wr_log_size_order
, int, 0444);
65 MODULE_PARM_DESC(c4iw_wr_log_size_order
,
66 "Number of entries (log2) in the work request timing log.");
69 struct list_head entry
;
70 struct cxgb4_lld_info lldi
;
74 static LIST_HEAD(uld_ctx_list
);
75 static DEFINE_MUTEX(dev_mutex
);
77 #define DB_FC_RESUME_SIZE 64
78 #define DB_FC_RESUME_DELAY 1
79 #define DB_FC_DRAIN_THRESH 0
81 static struct dentry
*c4iw_debugfs_root
;
83 struct c4iw_debugfs_data
{
84 struct c4iw_dev
*devp
;
90 /* registered cxgb4 netlink callbacks */
91 static struct ibnl_client_cbs c4iw_nl_cb_table
[] = {
92 [RDMA_NL_IWPM_REG_PID
] = {.dump
= iwpm_register_pid_cb
},
93 [RDMA_NL_IWPM_ADD_MAPPING
] = {.dump
= iwpm_add_mapping_cb
},
94 [RDMA_NL_IWPM_QUERY_MAPPING
] = {.dump
= iwpm_add_and_query_mapping_cb
},
95 [RDMA_NL_IWPM_HANDLE_ERR
] = {.dump
= iwpm_mapping_error_cb
},
96 [RDMA_NL_IWPM_MAPINFO
] = {.dump
= iwpm_mapping_info_cb
},
97 [RDMA_NL_IWPM_MAPINFO_NUM
] = {.dump
= iwpm_ack_mapping_info_cb
}
100 static int count_idrs(int id
, void *p
, void *data
)
104 *countp
= *countp
+ 1;
108 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
111 struct c4iw_debugfs_data
*d
= file
->private_data
;
113 return simple_read_from_buffer(buf
, count
, ppos
, d
->buf
, d
->pos
);
116 void c4iw_log_wr_stats(struct t4_wq
*wq
, struct t4_cqe
*cqe
)
118 struct wr_log_entry le
;
121 if (!wq
->rdev
->wr_log
)
124 idx
= (atomic_inc_return(&wq
->rdev
->wr_log_idx
) - 1) &
125 (wq
->rdev
->wr_log_size
- 1);
126 le
.poll_sge_ts
= cxgb4_read_sge_timestamp(wq
->rdev
->lldi
.ports
[0]);
127 getnstimeofday(&le
.poll_host_ts
);
129 le
.cqe_sge_ts
= CQE_TS(cqe
);
132 le
.opcode
= CQE_OPCODE(cqe
);
133 le
.post_host_ts
= wq
->sq
.sw_sq
[wq
->sq
.cidx
].host_ts
;
134 le
.post_sge_ts
= wq
->sq
.sw_sq
[wq
->sq
.cidx
].sge_ts
;
135 le
.wr_id
= CQE_WRID_SQ_IDX(cqe
);
138 le
.opcode
= FW_RI_RECEIVE
;
139 le
.post_host_ts
= wq
->rq
.sw_rq
[wq
->rq
.cidx
].host_ts
;
140 le
.post_sge_ts
= wq
->rq
.sw_rq
[wq
->rq
.cidx
].sge_ts
;
141 le
.wr_id
= CQE_WRID_MSN(cqe
);
143 wq
->rdev
->wr_log
[idx
] = le
;
146 static int wr_log_show(struct seq_file
*seq
, void *v
)
148 struct c4iw_dev
*dev
= seq
->private;
149 struct timespec prev_ts
= {0, 0};
150 struct wr_log_entry
*lep
;
154 #define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
156 idx
= atomic_read(&dev
->rdev
.wr_log_idx
) &
157 (dev
->rdev
.wr_log_size
- 1);
160 end
= dev
->rdev
.wr_log_size
- 1;
161 lep
= &dev
->rdev
.wr_log
[idx
];
166 prev_ts
= lep
->poll_host_ts
;
168 seq_printf(seq
, "%04u: sec %lu nsec %lu qid %u opcode "
169 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
170 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
171 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
172 "cqe_poll_delta_ns %llu\n",
174 timespec_sub(lep
->poll_host_ts
,
176 timespec_sub(lep
->poll_host_ts
,
178 lep
->qid
, lep
->opcode
,
179 lep
->opcode
== FW_RI_RECEIVE
?
182 timespec_sub(lep
->poll_host_ts
,
183 lep
->post_host_ts
).tv_sec
,
184 timespec_sub(lep
->poll_host_ts
,
185 lep
->post_host_ts
).tv_nsec
,
186 lep
->post_sge_ts
, lep
->cqe_sge_ts
,
188 ts2ns(lep
->poll_sge_ts
- lep
->post_sge_ts
),
189 ts2ns(lep
->poll_sge_ts
- lep
->cqe_sge_ts
));
190 prev_ts
= lep
->poll_host_ts
;
193 if (idx
> (dev
->rdev
.wr_log_size
- 1))
195 lep
= &dev
->rdev
.wr_log
[idx
];
201 static int wr_log_open(struct inode
*inode
, struct file
*file
)
203 return single_open(file
, wr_log_show
, inode
->i_private
);
206 static ssize_t
wr_log_clear(struct file
*file
, const char __user
*buf
,
207 size_t count
, loff_t
*pos
)
209 struct c4iw_dev
*dev
= ((struct seq_file
*)file
->private_data
)->private;
212 if (dev
->rdev
.wr_log
)
213 for (i
= 0; i
< dev
->rdev
.wr_log_size
; i
++)
214 dev
->rdev
.wr_log
[i
].valid
= 0;
218 static const struct file_operations wr_log_debugfs_fops
= {
219 .owner
= THIS_MODULE
,
221 .release
= single_release
,
224 .write
= wr_log_clear
,
227 static int dump_qp(int id
, void *p
, void *data
)
229 struct c4iw_qp
*qp
= p
;
230 struct c4iw_debugfs_data
*qpd
= data
;
234 if (id
!= qp
->wq
.sq
.qid
)
237 space
= qpd
->bufsize
- qpd
->pos
- 1;
242 if (qp
->ep
->com
.local_addr
.ss_family
== AF_INET
) {
243 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
244 &qp
->ep
->com
.local_addr
;
245 struct sockaddr_in
*rsin
= (struct sockaddr_in
*)
246 &qp
->ep
->com
.remote_addr
;
247 struct sockaddr_in
*mapped_lsin
= (struct sockaddr_in
*)
248 &qp
->ep
->com
.mapped_local_addr
;
249 struct sockaddr_in
*mapped_rsin
= (struct sockaddr_in
*)
250 &qp
->ep
->com
.mapped_remote_addr
;
252 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
253 "rc qp sq id %u rq id %u state %u "
254 "onchip %u ep tid %u state %u "
255 "%pI4:%u/%u->%pI4:%u/%u\n",
256 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
258 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
259 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
260 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
261 ntohs(mapped_lsin
->sin_port
),
262 &rsin
->sin_addr
, ntohs(rsin
->sin_port
),
263 ntohs(mapped_rsin
->sin_port
));
265 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
266 &qp
->ep
->com
.local_addr
;
267 struct sockaddr_in6
*rsin6
= (struct sockaddr_in6
*)
268 &qp
->ep
->com
.remote_addr
;
269 struct sockaddr_in6
*mapped_lsin6
=
270 (struct sockaddr_in6
*)
271 &qp
->ep
->com
.mapped_local_addr
;
272 struct sockaddr_in6
*mapped_rsin6
=
273 (struct sockaddr_in6
*)
274 &qp
->ep
->com
.mapped_remote_addr
;
276 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
277 "rc qp sq id %u rq id %u state %u "
278 "onchip %u ep tid %u state %u "
279 "%pI6:%u/%u->%pI6:%u/%u\n",
280 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
282 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
283 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
285 ntohs(lsin6
->sin6_port
),
286 ntohs(mapped_lsin6
->sin6_port
),
288 ntohs(rsin6
->sin6_port
),
289 ntohs(mapped_rsin6
->sin6_port
));
292 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
293 "qp sq id %u rq id %u state %u onchip %u\n",
294 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
296 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
);
302 static int qp_release(struct inode
*inode
, struct file
*file
)
304 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
306 printk(KERN_INFO
"%s null qpd?\n", __func__
);
314 static int qp_open(struct inode
*inode
, struct file
*file
)
316 struct c4iw_debugfs_data
*qpd
;
320 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
325 qpd
->devp
= inode
->i_private
;
328 spin_lock_irq(&qpd
->devp
->lock
);
329 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
330 spin_unlock_irq(&qpd
->devp
->lock
);
332 qpd
->bufsize
= count
* 128;
333 qpd
->buf
= vmalloc(qpd
->bufsize
);
339 spin_lock_irq(&qpd
->devp
->lock
);
340 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
341 spin_unlock_irq(&qpd
->devp
->lock
);
343 qpd
->buf
[qpd
->pos
++] = 0;
344 file
->private_data
= qpd
;
352 static const struct file_operations qp_debugfs_fops
= {
353 .owner
= THIS_MODULE
,
355 .release
= qp_release
,
356 .read
= debugfs_read
,
357 .llseek
= default_llseek
,
360 static int dump_stag(int id
, void *p
, void *data
)
362 struct c4iw_debugfs_data
*stagd
= data
;
365 struct fw_ri_tpte tpte
;
368 space
= stagd
->bufsize
- stagd
->pos
- 1;
372 ret
= cxgb4_read_tpte(stagd
->devp
->rdev
.lldi
.ports
[0], (u32
)id
<<8,
375 dev_err(&stagd
->devp
->rdev
.lldi
.pdev
->dev
,
376 "%s cxgb4_read_tpte err %d\n", __func__
, ret
);
379 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
,
380 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
381 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
383 FW_RI_TPTE_VALID_G(ntohl(tpte
.valid_to_pdid
)),
384 FW_RI_TPTE_STAGKEY_G(ntohl(tpte
.valid_to_pdid
)),
385 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte
.valid_to_pdid
)),
386 FW_RI_TPTE_PDID_G(ntohl(tpte
.valid_to_pdid
)),
387 FW_RI_TPTE_PERM_G(ntohl(tpte
.locread_to_qpid
)),
388 FW_RI_TPTE_PS_G(ntohl(tpte
.locread_to_qpid
)),
389 ((u64
)ntohl(tpte
.len_hi
) << 32) | ntohl(tpte
.len_lo
),
390 ((u64
)ntohl(tpte
.va_hi
) << 32) | ntohl(tpte
.va_lo_fbo
));
396 static int stag_release(struct inode
*inode
, struct file
*file
)
398 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
400 printk(KERN_INFO
"%s null stagd?\n", __func__
);
408 static int stag_open(struct inode
*inode
, struct file
*file
)
410 struct c4iw_debugfs_data
*stagd
;
414 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
419 stagd
->devp
= inode
->i_private
;
422 spin_lock_irq(&stagd
->devp
->lock
);
423 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
424 spin_unlock_irq(&stagd
->devp
->lock
);
426 stagd
->bufsize
= count
* 256;
427 stagd
->buf
= vmalloc(stagd
->bufsize
);
433 spin_lock_irq(&stagd
->devp
->lock
);
434 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
435 spin_unlock_irq(&stagd
->devp
->lock
);
437 stagd
->buf
[stagd
->pos
++] = 0;
438 file
->private_data
= stagd
;
446 static const struct file_operations stag_debugfs_fops
= {
447 .owner
= THIS_MODULE
,
449 .release
= stag_release
,
450 .read
= debugfs_read
,
451 .llseek
= default_llseek
,
454 static char *db_state_str
[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
456 static int stats_show(struct seq_file
*seq
, void *v
)
458 struct c4iw_dev
*dev
= seq
->private;
460 seq_printf(seq
, " Object: %10s %10s %10s %10s\n", "Total", "Current",
462 seq_printf(seq
, " PDID: %10llu %10llu %10llu %10llu\n",
463 dev
->rdev
.stats
.pd
.total
, dev
->rdev
.stats
.pd
.cur
,
464 dev
->rdev
.stats
.pd
.max
, dev
->rdev
.stats
.pd
.fail
);
465 seq_printf(seq
, " QID: %10llu %10llu %10llu %10llu\n",
466 dev
->rdev
.stats
.qid
.total
, dev
->rdev
.stats
.qid
.cur
,
467 dev
->rdev
.stats
.qid
.max
, dev
->rdev
.stats
.qid
.fail
);
468 seq_printf(seq
, " TPTMEM: %10llu %10llu %10llu %10llu\n",
469 dev
->rdev
.stats
.stag
.total
, dev
->rdev
.stats
.stag
.cur
,
470 dev
->rdev
.stats
.stag
.max
, dev
->rdev
.stats
.stag
.fail
);
471 seq_printf(seq
, " PBLMEM: %10llu %10llu %10llu %10llu\n",
472 dev
->rdev
.stats
.pbl
.total
, dev
->rdev
.stats
.pbl
.cur
,
473 dev
->rdev
.stats
.pbl
.max
, dev
->rdev
.stats
.pbl
.fail
);
474 seq_printf(seq
, " RQTMEM: %10llu %10llu %10llu %10llu\n",
475 dev
->rdev
.stats
.rqt
.total
, dev
->rdev
.stats
.rqt
.cur
,
476 dev
->rdev
.stats
.rqt
.max
, dev
->rdev
.stats
.rqt
.fail
);
477 seq_printf(seq
, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
478 dev
->rdev
.stats
.ocqp
.total
, dev
->rdev
.stats
.ocqp
.cur
,
479 dev
->rdev
.stats
.ocqp
.max
, dev
->rdev
.stats
.ocqp
.fail
);
480 seq_printf(seq
, " DB FULL: %10llu\n", dev
->rdev
.stats
.db_full
);
481 seq_printf(seq
, " DB EMPTY: %10llu\n", dev
->rdev
.stats
.db_empty
);
482 seq_printf(seq
, " DB DROP: %10llu\n", dev
->rdev
.stats
.db_drop
);
483 seq_printf(seq
, " DB State: %s Transitions %llu FC Interruptions %llu\n",
484 db_state_str
[dev
->db_state
],
485 dev
->rdev
.stats
.db_state_transitions
,
486 dev
->rdev
.stats
.db_fc_interruptions
);
487 seq_printf(seq
, "TCAM_FULL: %10llu\n", dev
->rdev
.stats
.tcam_full
);
488 seq_printf(seq
, "ACT_OFLD_CONN_FAILS: %10llu\n",
489 dev
->rdev
.stats
.act_ofld_conn_fails
);
490 seq_printf(seq
, "PAS_OFLD_CONN_FAILS: %10llu\n",
491 dev
->rdev
.stats
.pas_ofld_conn_fails
);
492 seq_printf(seq
, "AVAILABLE IRD: %10u\n", dev
->avail_ird
);
496 static int stats_open(struct inode
*inode
, struct file
*file
)
498 return single_open(file
, stats_show
, inode
->i_private
);
501 static ssize_t
stats_clear(struct file
*file
, const char __user
*buf
,
502 size_t count
, loff_t
*pos
)
504 struct c4iw_dev
*dev
= ((struct seq_file
*)file
->private_data
)->private;
506 mutex_lock(&dev
->rdev
.stats
.lock
);
507 dev
->rdev
.stats
.pd
.max
= 0;
508 dev
->rdev
.stats
.pd
.fail
= 0;
509 dev
->rdev
.stats
.qid
.max
= 0;
510 dev
->rdev
.stats
.qid
.fail
= 0;
511 dev
->rdev
.stats
.stag
.max
= 0;
512 dev
->rdev
.stats
.stag
.fail
= 0;
513 dev
->rdev
.stats
.pbl
.max
= 0;
514 dev
->rdev
.stats
.pbl
.fail
= 0;
515 dev
->rdev
.stats
.rqt
.max
= 0;
516 dev
->rdev
.stats
.rqt
.fail
= 0;
517 dev
->rdev
.stats
.ocqp
.max
= 0;
518 dev
->rdev
.stats
.ocqp
.fail
= 0;
519 dev
->rdev
.stats
.db_full
= 0;
520 dev
->rdev
.stats
.db_empty
= 0;
521 dev
->rdev
.stats
.db_drop
= 0;
522 dev
->rdev
.stats
.db_state_transitions
= 0;
523 dev
->rdev
.stats
.tcam_full
= 0;
524 dev
->rdev
.stats
.act_ofld_conn_fails
= 0;
525 dev
->rdev
.stats
.pas_ofld_conn_fails
= 0;
526 mutex_unlock(&dev
->rdev
.stats
.lock
);
530 static const struct file_operations stats_debugfs_fops
= {
531 .owner
= THIS_MODULE
,
533 .release
= single_release
,
536 .write
= stats_clear
,
539 static int dump_ep(int id
, void *p
, void *data
)
541 struct c4iw_ep
*ep
= p
;
542 struct c4iw_debugfs_data
*epd
= data
;
546 space
= epd
->bufsize
- epd
->pos
- 1;
550 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
551 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
553 struct sockaddr_in
*rsin
= (struct sockaddr_in
*)
554 &ep
->com
.remote_addr
;
555 struct sockaddr_in
*mapped_lsin
= (struct sockaddr_in
*)
556 &ep
->com
.mapped_local_addr
;
557 struct sockaddr_in
*mapped_rsin
= (struct sockaddr_in
*)
558 &ep
->com
.mapped_remote_addr
;
560 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
561 "ep %p cm_id %p qp %p state %d flags 0x%lx "
562 "history 0x%lx hwtid %d atid %d "
563 "%pI4:%d/%d <-> %pI4:%d/%d\n",
564 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
565 (int)ep
->com
.state
, ep
->com
.flags
,
566 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
567 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
568 ntohs(mapped_lsin
->sin_port
),
569 &rsin
->sin_addr
, ntohs(rsin
->sin_port
),
570 ntohs(mapped_rsin
->sin_port
));
572 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
574 struct sockaddr_in6
*rsin6
= (struct sockaddr_in6
*)
575 &ep
->com
.remote_addr
;
576 struct sockaddr_in6
*mapped_lsin6
= (struct sockaddr_in6
*)
577 &ep
->com
.mapped_local_addr
;
578 struct sockaddr_in6
*mapped_rsin6
= (struct sockaddr_in6
*)
579 &ep
->com
.mapped_remote_addr
;
581 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
582 "ep %p cm_id %p qp %p state %d flags 0x%lx "
583 "history 0x%lx hwtid %d atid %d "
584 "%pI6:%d/%d <-> %pI6:%d/%d\n",
585 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
586 (int)ep
->com
.state
, ep
->com
.flags
,
587 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
588 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
),
589 ntohs(mapped_lsin6
->sin6_port
),
590 &rsin6
->sin6_addr
, ntohs(rsin6
->sin6_port
),
591 ntohs(mapped_rsin6
->sin6_port
));
598 static int dump_listen_ep(int id
, void *p
, void *data
)
600 struct c4iw_listen_ep
*ep
= p
;
601 struct c4iw_debugfs_data
*epd
= data
;
605 space
= epd
->bufsize
- epd
->pos
- 1;
609 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
610 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
612 struct sockaddr_in
*mapped_lsin
= (struct sockaddr_in
*)
613 &ep
->com
.mapped_local_addr
;
615 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
616 "ep %p cm_id %p state %d flags 0x%lx stid %d "
617 "backlog %d %pI4:%d/%d\n",
618 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
619 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
620 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
621 ntohs(mapped_lsin
->sin_port
));
623 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
625 struct sockaddr_in6
*mapped_lsin6
= (struct sockaddr_in6
*)
626 &ep
->com
.mapped_local_addr
;
628 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
629 "ep %p cm_id %p state %d flags 0x%lx stid %d "
630 "backlog %d %pI6:%d/%d\n",
631 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
632 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
633 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
),
634 ntohs(mapped_lsin6
->sin6_port
));
641 static int ep_release(struct inode
*inode
, struct file
*file
)
643 struct c4iw_debugfs_data
*epd
= file
->private_data
;
645 pr_info("%s null qpd?\n", __func__
);
653 static int ep_open(struct inode
*inode
, struct file
*file
)
655 struct c4iw_debugfs_data
*epd
;
659 epd
= kmalloc(sizeof(*epd
), GFP_KERNEL
);
664 epd
->devp
= inode
->i_private
;
667 spin_lock_irq(&epd
->devp
->lock
);
668 idr_for_each(&epd
->devp
->hwtid_idr
, count_idrs
, &count
);
669 idr_for_each(&epd
->devp
->atid_idr
, count_idrs
, &count
);
670 idr_for_each(&epd
->devp
->stid_idr
, count_idrs
, &count
);
671 spin_unlock_irq(&epd
->devp
->lock
);
673 epd
->bufsize
= count
* 240;
674 epd
->buf
= vmalloc(epd
->bufsize
);
680 spin_lock_irq(&epd
->devp
->lock
);
681 idr_for_each(&epd
->devp
->hwtid_idr
, dump_ep
, epd
);
682 idr_for_each(&epd
->devp
->atid_idr
, dump_ep
, epd
);
683 idr_for_each(&epd
->devp
->stid_idr
, dump_listen_ep
, epd
);
684 spin_unlock_irq(&epd
->devp
->lock
);
686 file
->private_data
= epd
;
694 static const struct file_operations ep_debugfs_fops
= {
695 .owner
= THIS_MODULE
,
697 .release
= ep_release
,
698 .read
= debugfs_read
,
701 static int setup_debugfs(struct c4iw_dev
*devp
)
703 if (!devp
->debugfs_root
)
706 debugfs_create_file_size("qps", S_IWUSR
, devp
->debugfs_root
,
707 (void *)devp
, &qp_debugfs_fops
, 4096);
709 debugfs_create_file_size("stags", S_IWUSR
, devp
->debugfs_root
,
710 (void *)devp
, &stag_debugfs_fops
, 4096);
712 debugfs_create_file_size("stats", S_IWUSR
, devp
->debugfs_root
,
713 (void *)devp
, &stats_debugfs_fops
, 4096);
715 debugfs_create_file_size("eps", S_IWUSR
, devp
->debugfs_root
,
716 (void *)devp
, &ep_debugfs_fops
, 4096);
719 debugfs_create_file_size("wr_log", S_IWUSR
, devp
->debugfs_root
,
720 (void *)devp
, &wr_log_debugfs_fops
, 4096);
724 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
725 struct c4iw_dev_ucontext
*uctx
)
727 struct list_head
*pos
, *nxt
;
728 struct c4iw_qid_list
*entry
;
730 mutex_lock(&uctx
->lock
);
731 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
732 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
733 list_del_init(&entry
->entry
);
734 if (!(entry
->qid
& rdev
->qpmask
)) {
735 c4iw_put_resource(&rdev
->resource
.qid_table
,
737 mutex_lock(&rdev
->stats
.lock
);
738 rdev
->stats
.qid
.cur
-= rdev
->qpmask
+ 1;
739 mutex_unlock(&rdev
->stats
.lock
);
744 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
745 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
746 list_del_init(&entry
->entry
);
749 mutex_unlock(&uctx
->lock
);
752 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
753 struct c4iw_dev_ucontext
*uctx
)
755 INIT_LIST_HEAD(&uctx
->qpids
);
756 INIT_LIST_HEAD(&uctx
->cqids
);
757 mutex_init(&uctx
->lock
);
760 /* Caller takes care of locking if needed */
761 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
765 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
768 * qpshift is the number of bits to shift the qpid left in order
769 * to get the correct address of the doorbell for that qp.
771 rdev
->qpshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.udb_density
);
772 rdev
->qpmask
= rdev
->lldi
.udb_density
- 1;
773 rdev
->cqshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.ucq_density
);
774 rdev
->cqmask
= rdev
->lldi
.ucq_density
- 1;
775 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
776 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
777 "qp qid start %u size %u cq qid start %u size %u\n",
778 __func__
, pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
779 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
780 rdev
->lldi
.vr
->pbl
.start
,
781 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
782 rdev
->lldi
.vr
->rq
.size
,
783 rdev
->lldi
.vr
->qp
.start
,
784 rdev
->lldi
.vr
->qp
.size
,
785 rdev
->lldi
.vr
->cq
.start
,
786 rdev
->lldi
.vr
->cq
.size
);
787 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
788 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
789 (unsigned)pci_resource_len(rdev
->lldi
.pdev
, 2),
790 (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2),
793 rdev
->qpshift
, rdev
->qpmask
,
794 rdev
->cqshift
, rdev
->cqmask
);
796 if (c4iw_num_stags(rdev
) == 0) {
801 rdev
->stats
.pd
.total
= T4_MAX_NUM_PD
;
802 rdev
->stats
.stag
.total
= rdev
->lldi
.vr
->stag
.size
;
803 rdev
->stats
.pbl
.total
= rdev
->lldi
.vr
->pbl
.size
;
804 rdev
->stats
.rqt
.total
= rdev
->lldi
.vr
->rq
.size
;
805 rdev
->stats
.ocqp
.total
= rdev
->lldi
.vr
->ocq
.size
;
806 rdev
->stats
.qid
.total
= rdev
->lldi
.vr
->qp
.size
;
808 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
), T4_MAX_NUM_PD
);
810 printk(KERN_ERR MOD
"error %d initializing resources\n", err
);
813 err
= c4iw_pblpool_create(rdev
);
815 printk(KERN_ERR MOD
"error %d initializing pbl pool\n", err
);
818 err
= c4iw_rqtpool_create(rdev
);
820 printk(KERN_ERR MOD
"error %d initializing rqt pool\n", err
);
823 err
= c4iw_ocqp_pool_create(rdev
);
825 printk(KERN_ERR MOD
"error %d initializing ocqp pool\n", err
);
828 rdev
->status_page
= (struct t4_dev_status_page
*)
829 __get_free_page(GFP_KERNEL
);
830 if (!rdev
->status_page
) {
831 pr_err(MOD
"error allocating status page\n");
836 rdev
->wr_log
= kzalloc((1 << c4iw_wr_log_size_order
) *
837 sizeof(*rdev
->wr_log
), GFP_KERNEL
);
839 rdev
->wr_log_size
= 1 << c4iw_wr_log_size_order
;
840 atomic_set(&rdev
->wr_log_idx
, 0);
842 pr_err(MOD
"error allocating wr_log. Logging disabled\n");
846 rdev
->status_page
->db_off
= 0;
850 c4iw_rqtpool_destroy(rdev
);
852 c4iw_pblpool_destroy(rdev
);
854 c4iw_destroy_resource(&rdev
->resource
);
859 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
862 free_page((unsigned long)rdev
->status_page
);
863 c4iw_pblpool_destroy(rdev
);
864 c4iw_rqtpool_destroy(rdev
);
865 c4iw_destroy_resource(&rdev
->resource
);
868 static void c4iw_dealloc(struct uld_ctx
*ctx
)
870 c4iw_rdev_close(&ctx
->dev
->rdev
);
871 idr_destroy(&ctx
->dev
->cqidr
);
872 idr_destroy(&ctx
->dev
->qpidr
);
873 idr_destroy(&ctx
->dev
->mmidr
);
874 idr_destroy(&ctx
->dev
->hwtid_idr
);
875 idr_destroy(&ctx
->dev
->stid_idr
);
876 idr_destroy(&ctx
->dev
->atid_idr
);
877 if (ctx
->dev
->rdev
.bar2_kva
)
878 iounmap(ctx
->dev
->rdev
.bar2_kva
);
879 if (ctx
->dev
->rdev
.oc_mw_kva
)
880 iounmap(ctx
->dev
->rdev
.oc_mw_kva
);
881 ib_dealloc_device(&ctx
->dev
->ibdev
);
885 static void c4iw_remove(struct uld_ctx
*ctx
)
887 PDBG("%s c4iw_dev %p\n", __func__
, ctx
->dev
);
888 c4iw_unregister_device(ctx
->dev
);
892 static int rdma_supported(const struct cxgb4_lld_info
*infop
)
894 return infop
->vr
->stag
.size
> 0 && infop
->vr
->pbl
.size
> 0 &&
895 infop
->vr
->rq
.size
> 0 && infop
->vr
->qp
.size
> 0 &&
896 infop
->vr
->cq
.size
> 0;
899 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
901 struct c4iw_dev
*devp
;
904 if (!rdma_supported(infop
)) {
905 printk(KERN_INFO MOD
"%s: RDMA not supported on this device.\n",
906 pci_name(infop
->pdev
));
907 return ERR_PTR(-ENOSYS
);
909 if (!ocqp_supported(infop
))
910 pr_info("%s: On-Chip Queues not supported on this device.\n",
911 pci_name(infop
->pdev
));
913 devp
= (struct c4iw_dev
*)ib_alloc_device(sizeof(*devp
));
915 printk(KERN_ERR MOD
"Cannot allocate ib device\n");
916 return ERR_PTR(-ENOMEM
);
918 devp
->rdev
.lldi
= *infop
;
920 /* init various hw-queue params based on lld info */
921 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
922 __func__
, devp
->rdev
.lldi
.sge_ingpadboundary
,
923 devp
->rdev
.lldi
.sge_egrstatuspagesize
);
925 devp
->rdev
.hw_queue
.t4_eq_status_entries
=
926 devp
->rdev
.lldi
.sge_ingpadboundary
> 64 ? 2 : 1;
927 devp
->rdev
.hw_queue
.t4_max_eq_size
= 65520;
928 devp
->rdev
.hw_queue
.t4_max_iq_size
= 65520;
929 devp
->rdev
.hw_queue
.t4_max_rq_size
= 8192 -
930 devp
->rdev
.hw_queue
.t4_eq_status_entries
- 1;
931 devp
->rdev
.hw_queue
.t4_max_sq_size
=
932 devp
->rdev
.hw_queue
.t4_max_eq_size
-
933 devp
->rdev
.hw_queue
.t4_eq_status_entries
- 1;
934 devp
->rdev
.hw_queue
.t4_max_qp_depth
=
935 devp
->rdev
.hw_queue
.t4_max_rq_size
;
936 devp
->rdev
.hw_queue
.t4_max_cq_depth
=
937 devp
->rdev
.hw_queue
.t4_max_iq_size
- 2;
938 devp
->rdev
.hw_queue
.t4_stat_len
=
939 devp
->rdev
.lldi
.sge_egrstatuspagesize
;
942 * For T5 devices, we map all of BAR2 with WC.
943 * For T4 devices with onchip qp mem, we map only that part
946 devp
->rdev
.bar2_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2);
947 if (is_t5(devp
->rdev
.lldi
.adapter_type
)) {
948 devp
->rdev
.bar2_kva
= ioremap_wc(devp
->rdev
.bar2_pa
,
949 pci_resource_len(devp
->rdev
.lldi
.pdev
, 2));
950 if (!devp
->rdev
.bar2_kva
) {
951 pr_err(MOD
"Unable to ioremap BAR2\n");
952 ib_dealloc_device(&devp
->ibdev
);
953 return ERR_PTR(-EINVAL
);
955 } else if (ocqp_supported(infop
)) {
956 devp
->rdev
.oc_mw_pa
=
957 pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
958 pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
959 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
);
960 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
961 devp
->rdev
.lldi
.vr
->ocq
.size
);
962 if (!devp
->rdev
.oc_mw_kva
) {
963 pr_err(MOD
"Unable to ioremap onchip mem\n");
964 ib_dealloc_device(&devp
->ibdev
);
965 return ERR_PTR(-EINVAL
);
969 PDBG(KERN_INFO MOD
"ocq memory: "
970 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
971 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
972 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
974 ret
= c4iw_rdev_open(&devp
->rdev
);
976 printk(KERN_ERR MOD
"Unable to open CXIO rdev err %d\n", ret
);
977 ib_dealloc_device(&devp
->ibdev
);
981 idr_init(&devp
->cqidr
);
982 idr_init(&devp
->qpidr
);
983 idr_init(&devp
->mmidr
);
984 idr_init(&devp
->hwtid_idr
);
985 idr_init(&devp
->stid_idr
);
986 idr_init(&devp
->atid_idr
);
987 spin_lock_init(&devp
->lock
);
988 mutex_init(&devp
->rdev
.stats
.lock
);
989 mutex_init(&devp
->db_mutex
);
990 INIT_LIST_HEAD(&devp
->db_fc_list
);
991 devp
->avail_ird
= devp
->rdev
.lldi
.max_ird_adapter
;
993 if (c4iw_debugfs_root
) {
994 devp
->debugfs_root
= debugfs_create_dir(
995 pci_name(devp
->rdev
.lldi
.pdev
),
1004 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
1006 struct uld_ctx
*ctx
;
1007 static int vers_printed
;
1010 if (!vers_printed
++)
1011 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1014 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
1016 ctx
= ERR_PTR(-ENOMEM
);
1021 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
1022 __func__
, pci_name(ctx
->lldi
.pdev
),
1023 ctx
->lldi
.nchan
, ctx
->lldi
.nrxq
,
1024 ctx
->lldi
.ntxq
, ctx
->lldi
.nports
);
1026 mutex_lock(&dev_mutex
);
1027 list_add_tail(&ctx
->entry
, &uld_ctx_list
);
1028 mutex_unlock(&dev_mutex
);
1030 for (i
= 0; i
< ctx
->lldi
.nrxq
; i
++)
1031 PDBG("rxqid[%u] %u\n", i
, ctx
->lldi
.rxq_ids
[i
]);
1036 static inline struct sk_buff
*copy_gl_to_skb_pkt(const struct pkt_gl
*gl
,
1040 struct sk_buff
*skb
;
1043 * Allocate space for cpl_pass_accept_req which will be synthesized by
1044 * driver. Once the driver synthesizes the request the skb will go
1045 * through the regular cpl_pass_accept_req processing.
1046 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
1049 skb
= alloc_skb(gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
1050 sizeof(struct rss_header
) - pktshift
, GFP_ATOMIC
);
1054 __skb_put(skb
, gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
1055 sizeof(struct rss_header
) - pktshift
);
1058 * This skb will contain:
1059 * rss_header from the rspq descriptor (1 flit)
1060 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
1061 * space for the difference between the size of an
1062 * rx_pkt and pass_accept_req cpl (1 flit)
1063 * the packet data from the gl
1065 skb_copy_to_linear_data(skb
, rsp
, sizeof(struct cpl_pass_accept_req
) +
1066 sizeof(struct rss_header
));
1067 skb_copy_to_linear_data_offset(skb
, sizeof(struct rss_header
) +
1068 sizeof(struct cpl_pass_accept_req
),
1070 gl
->tot_len
- pktshift
);
1074 static inline int recv_rx_pkt(struct c4iw_dev
*dev
, const struct pkt_gl
*gl
,
1077 unsigned int opcode
= *(u8
*)rsp
;
1078 struct sk_buff
*skb
;
1080 if (opcode
!= CPL_RX_PKT
)
1083 skb
= copy_gl_to_skb_pkt(gl
, rsp
, dev
->rdev
.lldi
.sge_pktshift
);
1087 if (c4iw_handlers
[opcode
] == NULL
) {
1088 pr_info("%s no handler opcode 0x%x...\n", __func__
,
1093 c4iw_handlers
[opcode
](dev
, skb
);
1099 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
1100 const struct pkt_gl
*gl
)
1102 struct uld_ctx
*ctx
= handle
;
1103 struct c4iw_dev
*dev
= ctx
->dev
;
1104 struct sk_buff
*skb
;
1108 /* omit RSS and rsp_ctrl at end of descriptor */
1109 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
1111 skb
= alloc_skb(256, GFP_ATOMIC
);
1114 __skb_put(skb
, len
);
1115 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
1116 } else if (gl
== CXGB4_MSG_AN
) {
1117 const struct rsp_ctrl
*rc
= (void *)rsp
;
1119 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
1120 c4iw_ev_handler(dev
, qid
);
1122 } else if (unlikely(*(u8
*)rsp
!= *(u8
*)gl
->va
)) {
1123 if (recv_rx_pkt(dev
, gl
, rsp
))
1126 pr_info("%s: unexpected FL contents at %p, " \
1127 "RSS %#llx, FL %#llx, len %u\n",
1128 pci_name(ctx
->lldi
.pdev
), gl
->va
,
1129 (unsigned long long)be64_to_cpu(*rsp
),
1130 (unsigned long long)be64_to_cpu(
1131 *(__force __be64
*)gl
->va
),
1136 skb
= cxgb4_pktgl_to_skb(gl
, 128, 128);
1141 opcode
= *(u8
*)rsp
;
1142 if (c4iw_handlers
[opcode
]) {
1143 c4iw_handlers
[opcode
](dev
, skb
);
1145 pr_info("%s no handler opcode 0x%x...\n", __func__
,
1155 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
1157 struct uld_ctx
*ctx
= handle
;
1159 PDBG("%s new_state %u\n", __func__
, new_state
);
1160 switch (new_state
) {
1161 case CXGB4_STATE_UP
:
1162 printk(KERN_INFO MOD
"%s: Up\n", pci_name(ctx
->lldi
.pdev
));
1166 ctx
->dev
= c4iw_alloc(&ctx
->lldi
);
1167 if (IS_ERR(ctx
->dev
)) {
1169 "%s: initialization failed: %ld\n",
1170 pci_name(ctx
->lldi
.pdev
),
1175 ret
= c4iw_register_device(ctx
->dev
);
1178 "%s: RDMA registration failed: %d\n",
1179 pci_name(ctx
->lldi
.pdev
), ret
);
1184 case CXGB4_STATE_DOWN
:
1185 printk(KERN_INFO MOD
"%s: Down\n",
1186 pci_name(ctx
->lldi
.pdev
));
1190 case CXGB4_STATE_START_RECOVERY
:
1191 printk(KERN_INFO MOD
"%s: Fatal Error\n",
1192 pci_name(ctx
->lldi
.pdev
));
1194 struct ib_event event
;
1196 ctx
->dev
->rdev
.flags
|= T4_FATAL_ERROR
;
1197 memset(&event
, 0, sizeof event
);
1198 event
.event
= IB_EVENT_DEVICE_FATAL
;
1199 event
.device
= &ctx
->dev
->ibdev
;
1200 ib_dispatch_event(&event
);
1204 case CXGB4_STATE_DETACH
:
1205 printk(KERN_INFO MOD
"%s: Detach\n",
1206 pci_name(ctx
->lldi
.pdev
));
1214 static int disable_qp_db(int id
, void *p
, void *data
)
1216 struct c4iw_qp
*qp
= p
;
1218 t4_disable_wq_db(&qp
->wq
);
1222 static void stop_queues(struct uld_ctx
*ctx
)
1224 unsigned long flags
;
1226 spin_lock_irqsave(&ctx
->dev
->lock
, flags
);
1227 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1228 ctx
->dev
->db_state
= STOPPED
;
1229 if (ctx
->dev
->rdev
.flags
& T4_STATUS_PAGE_DISABLED
)
1230 idr_for_each(&ctx
->dev
->qpidr
, disable_qp_db
, NULL
);
1232 ctx
->dev
->rdev
.status_page
->db_off
= 1;
1233 spin_unlock_irqrestore(&ctx
->dev
->lock
, flags
);
1236 static int enable_qp_db(int id
, void *p
, void *data
)
1238 struct c4iw_qp
*qp
= p
;
1240 t4_enable_wq_db(&qp
->wq
);
1244 static void resume_rc_qp(struct c4iw_qp
*qp
)
1246 spin_lock(&qp
->lock
);
1247 t4_ring_sq_db(&qp
->wq
, qp
->wq
.sq
.wq_pidx_inc
,
1248 is_t5(qp
->rhp
->rdev
.lldi
.adapter_type
), NULL
);
1249 qp
->wq
.sq
.wq_pidx_inc
= 0;
1250 t4_ring_rq_db(&qp
->wq
, qp
->wq
.rq
.wq_pidx_inc
,
1251 is_t5(qp
->rhp
->rdev
.lldi
.adapter_type
), NULL
);
1252 qp
->wq
.rq
.wq_pidx_inc
= 0;
1253 spin_unlock(&qp
->lock
);
1256 static void resume_a_chunk(struct uld_ctx
*ctx
)
1261 for (i
= 0; i
< DB_FC_RESUME_SIZE
; i
++) {
1262 qp
= list_first_entry(&ctx
->dev
->db_fc_list
, struct c4iw_qp
,
1264 list_del_init(&qp
->db_fc_entry
);
1266 if (list_empty(&ctx
->dev
->db_fc_list
))
1271 static void resume_queues(struct uld_ctx
*ctx
)
1273 spin_lock_irq(&ctx
->dev
->lock
);
1274 if (ctx
->dev
->db_state
!= STOPPED
)
1276 ctx
->dev
->db_state
= FLOW_CONTROL
;
1278 if (list_empty(&ctx
->dev
->db_fc_list
)) {
1279 WARN_ON(ctx
->dev
->db_state
!= FLOW_CONTROL
);
1280 ctx
->dev
->db_state
= NORMAL
;
1281 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1282 if (ctx
->dev
->rdev
.flags
& T4_STATUS_PAGE_DISABLED
) {
1283 idr_for_each(&ctx
->dev
->qpidr
, enable_qp_db
,
1286 ctx
->dev
->rdev
.status_page
->db_off
= 0;
1290 if (cxgb4_dbfifo_count(ctx
->dev
->rdev
.lldi
.ports
[0], 1)
1291 < (ctx
->dev
->rdev
.lldi
.dbfifo_int_thresh
<<
1292 DB_FC_DRAIN_THRESH
)) {
1293 resume_a_chunk(ctx
);
1295 if (!list_empty(&ctx
->dev
->db_fc_list
)) {
1296 spin_unlock_irq(&ctx
->dev
->lock
);
1297 if (DB_FC_RESUME_DELAY
) {
1298 set_current_state(TASK_UNINTERRUPTIBLE
);
1299 schedule_timeout(DB_FC_RESUME_DELAY
);
1301 spin_lock_irq(&ctx
->dev
->lock
);
1302 if (ctx
->dev
->db_state
!= FLOW_CONTROL
)
1308 if (ctx
->dev
->db_state
!= NORMAL
)
1309 ctx
->dev
->rdev
.stats
.db_fc_interruptions
++;
1310 spin_unlock_irq(&ctx
->dev
->lock
);
1315 struct c4iw_qp
**qps
;
1318 static int add_and_ref_qp(int id
, void *p
, void *data
)
1320 struct qp_list
*qp_listp
= data
;
1321 struct c4iw_qp
*qp
= p
;
1323 c4iw_qp_add_ref(&qp
->ibqp
);
1324 qp_listp
->qps
[qp_listp
->idx
++] = qp
;
1328 static int count_qps(int id
, void *p
, void *data
)
1330 unsigned *countp
= data
;
1335 static void deref_qps(struct qp_list
*qp_list
)
1339 for (idx
= 0; idx
< qp_list
->idx
; idx
++)
1340 c4iw_qp_rem_ref(&qp_list
->qps
[idx
]->ibqp
);
1343 static void recover_lost_dbs(struct uld_ctx
*ctx
, struct qp_list
*qp_list
)
1348 for (idx
= 0; idx
< qp_list
->idx
; idx
++) {
1349 struct c4iw_qp
*qp
= qp_list
->qps
[idx
];
1351 spin_lock_irq(&qp
->rhp
->lock
);
1352 spin_lock(&qp
->lock
);
1353 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1355 t4_sq_host_wq_pidx(&qp
->wq
),
1356 t4_sq_wq_size(&qp
->wq
));
1358 pr_err(KERN_ERR MOD
"%s: Fatal error - "
1359 "DB overflow recovery failed - "
1360 "error syncing SQ qid %u\n",
1361 pci_name(ctx
->lldi
.pdev
), qp
->wq
.sq
.qid
);
1362 spin_unlock(&qp
->lock
);
1363 spin_unlock_irq(&qp
->rhp
->lock
);
1366 qp
->wq
.sq
.wq_pidx_inc
= 0;
1368 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1370 t4_rq_host_wq_pidx(&qp
->wq
),
1371 t4_rq_wq_size(&qp
->wq
));
1374 pr_err(KERN_ERR MOD
"%s: Fatal error - "
1375 "DB overflow recovery failed - "
1376 "error syncing RQ qid %u\n",
1377 pci_name(ctx
->lldi
.pdev
), qp
->wq
.rq
.qid
);
1378 spin_unlock(&qp
->lock
);
1379 spin_unlock_irq(&qp
->rhp
->lock
);
1382 qp
->wq
.rq
.wq_pidx_inc
= 0;
1383 spin_unlock(&qp
->lock
);
1384 spin_unlock_irq(&qp
->rhp
->lock
);
1386 /* Wait for the dbfifo to drain */
1387 while (cxgb4_dbfifo_count(qp
->rhp
->rdev
.lldi
.ports
[0], 1) > 0) {
1388 set_current_state(TASK_UNINTERRUPTIBLE
);
1389 schedule_timeout(usecs_to_jiffies(10));
1394 static void recover_queues(struct uld_ctx
*ctx
)
1397 struct qp_list qp_list
;
1400 /* slow everybody down */
1401 set_current_state(TASK_UNINTERRUPTIBLE
);
1402 schedule_timeout(usecs_to_jiffies(1000));
1404 /* flush the SGE contexts */
1405 ret
= cxgb4_flush_eq_cache(ctx
->dev
->rdev
.lldi
.ports
[0]);
1407 printk(KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
1408 pci_name(ctx
->lldi
.pdev
));
1412 /* Count active queues so we can build a list of queues to recover */
1413 spin_lock_irq(&ctx
->dev
->lock
);
1414 WARN_ON(ctx
->dev
->db_state
!= STOPPED
);
1415 ctx
->dev
->db_state
= RECOVERY
;
1416 idr_for_each(&ctx
->dev
->qpidr
, count_qps
, &count
);
1418 qp_list
.qps
= kzalloc(count
* sizeof *qp_list
.qps
, GFP_ATOMIC
);
1420 printk(KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
1421 pci_name(ctx
->lldi
.pdev
));
1422 spin_unlock_irq(&ctx
->dev
->lock
);
1427 /* add and ref each qp so it doesn't get freed */
1428 idr_for_each(&ctx
->dev
->qpidr
, add_and_ref_qp
, &qp_list
);
1430 spin_unlock_irq(&ctx
->dev
->lock
);
1432 /* now traverse the list in a safe context to recover the db state*/
1433 recover_lost_dbs(ctx
, &qp_list
);
1435 /* we're almost done! deref the qps and clean up */
1436 deref_qps(&qp_list
);
1439 spin_lock_irq(&ctx
->dev
->lock
);
1440 WARN_ON(ctx
->dev
->db_state
!= RECOVERY
);
1441 ctx
->dev
->db_state
= STOPPED
;
1442 spin_unlock_irq(&ctx
->dev
->lock
);
1445 static int c4iw_uld_control(void *handle
, enum cxgb4_control control
, ...)
1447 struct uld_ctx
*ctx
= handle
;
1450 case CXGB4_CONTROL_DB_FULL
:
1452 ctx
->dev
->rdev
.stats
.db_full
++;
1454 case CXGB4_CONTROL_DB_EMPTY
:
1456 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1457 ctx
->dev
->rdev
.stats
.db_empty
++;
1458 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1460 case CXGB4_CONTROL_DB_DROP
:
1461 recover_queues(ctx
);
1462 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1463 ctx
->dev
->rdev
.stats
.db_drop
++;
1464 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1467 printk(KERN_WARNING MOD
"%s: unknown control cmd %u\n",
1468 pci_name(ctx
->lldi
.pdev
), control
);
1474 static struct cxgb4_uld_info c4iw_uld_info
= {
1476 .add
= c4iw_uld_add
,
1477 .rx_handler
= c4iw_uld_rx_handler
,
1478 .state_change
= c4iw_uld_state_change
,
1479 .control
= c4iw_uld_control
,
1482 static int __init
c4iw_init_module(void)
1486 err
= c4iw_cm_init();
1490 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
1491 if (!c4iw_debugfs_root
)
1492 printk(KERN_WARNING MOD
1493 "could not create debugfs entry, continuing\n");
1495 if (ibnl_add_client(RDMA_NL_C4IW
, RDMA_NL_IWPM_NUM_OPS
,
1497 pr_err("%s[%u]: Failed to add netlink callback\n"
1498 , __func__
, __LINE__
);
1500 err
= iwpm_init(RDMA_NL_C4IW
);
1502 pr_err("port mapper initialization failed with %d\n", err
);
1503 ibnl_remove_client(RDMA_NL_C4IW
);
1505 debugfs_remove_recursive(c4iw_debugfs_root
);
1509 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
1514 static void __exit
c4iw_exit_module(void)
1516 struct uld_ctx
*ctx
, *tmp
;
1518 mutex_lock(&dev_mutex
);
1519 list_for_each_entry_safe(ctx
, tmp
, &uld_ctx_list
, entry
) {
1524 mutex_unlock(&dev_mutex
);
1525 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
1526 iwpm_exit(RDMA_NL_C4IW
);
1527 ibnl_remove_client(RDMA_NL_C4IW
);
1529 debugfs_remove_recursive(c4iw_debugfs_root
);
1532 module_init(c4iw_init_module
);
1533 module_exit(c4iw_exit_module
);