2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
49 #include "cxgb4_uld.h"
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
56 static int get_msix_idx_from_bmap(struct adapter
*adap
)
58 struct uld_msix_bmap
*bmap
= &adap
->msix_bmap_ulds
;
60 unsigned int msix_idx
;
62 spin_lock_irqsave(&bmap
->lock
, flags
);
63 msix_idx
= find_first_zero_bit(bmap
->msix_bmap
, bmap
->mapsize
);
64 if (msix_idx
< bmap
->mapsize
) {
65 __set_bit(msix_idx
, bmap
->msix_bmap
);
67 spin_unlock_irqrestore(&bmap
->lock
, flags
);
71 spin_unlock_irqrestore(&bmap
->lock
, flags
);
75 static void free_msix_idx_in_bmap(struct adapter
*adap
, unsigned int msix_idx
)
77 struct uld_msix_bmap
*bmap
= &adap
->msix_bmap_ulds
;
80 spin_lock_irqsave(&bmap
->lock
, flags
);
81 __clear_bit(msix_idx
, bmap
->msix_bmap
);
82 spin_unlock_irqrestore(&bmap
->lock
, flags
);
85 /* Flush the aggregated lro sessions */
86 static void uldrx_flush_handler(struct sge_rspq
*q
)
88 struct adapter
*adap
= q
->adap
;
90 if (adap
->uld
[q
->uld
].lro_flush
)
91 adap
->uld
[q
->uld
].lro_flush(&q
->lro_mgr
);
95 * uldrx_handler - response queue handler for ULD queues
96 * @q: the response queue that received the packet
97 * @rsp: the response queue descriptor holding the offload message
98 * @gl: the gather list of packet fragments
100 * Deliver an ingress offload packet to a ULD. All processing is done by
101 * the ULD, we just maintain statistics.
103 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
104 const struct pkt_gl
*gl
)
106 struct adapter
*adap
= q
->adap
;
107 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
111 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
112 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
115 if (q
->flush_handler
)
116 ret
= adap
->uld
[q
->uld
].lro_rx_handler(adap
->uld
[q
->uld
].handle
,
117 rsp
, gl
, &q
->lro_mgr
,
120 ret
= adap
->uld
[q
->uld
].rx_handler(adap
->uld
[q
->uld
].handle
,
130 else if (gl
== CXGB4_MSG_AN
)
137 static int alloc_uld_rxqs(struct adapter
*adap
,
138 struct sge_uld_rxq_info
*rxq_info
,
139 unsigned int nq
, unsigned int offset
, bool lro
)
141 struct sge
*s
= &adap
->sge
;
142 struct sge_ofld_rxq
*q
= rxq_info
->uldrxq
+ offset
;
143 unsigned short *ids
= rxq_info
->rspq_id
+ offset
;
144 unsigned int per_chan
= nq
/ adap
->params
.nports
;
145 unsigned int bmap_idx
= 0;
148 if (adap
->flags
& USING_MSIX
)
151 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
153 for (i
= 0; i
< nq
; i
++, q
++) {
155 bmap_idx
= get_msix_idx_from_bmap(adap
);
156 msi_idx
= adap
->msix_info_ulds
[bmap_idx
].idx
;
158 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
159 adap
->port
[i
/ per_chan
],
161 q
->fl
.size
? &q
->fl
: NULL
,
163 lro
? uldrx_flush_handler
: NULL
,
168 rxq_info
->msix_tbl
[i
+ offset
] = bmap_idx
;
169 memset(&q
->stats
, 0, sizeof(q
->stats
));
171 ids
[i
] = q
->rspq
.abs_id
;
175 q
= rxq_info
->uldrxq
+ offset
;
176 for ( ; i
; i
--, q
++) {
178 free_rspq_fl(adap
, &q
->rspq
,
179 q
->fl
.size
? &q
->fl
: NULL
);
182 /* We need to free rxq also in case of ciq allocation failure */
184 q
= rxq_info
->uldrxq
+ offset
;
185 for ( ; i
; i
--, q
++) {
187 free_rspq_fl(adap
, &q
->rspq
,
188 q
->fl
.size
? &q
->fl
: NULL
);
195 setup_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
, bool lro
)
197 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
200 if (adap
->flags
& USING_MSIX
) {
201 rxq_info
->msix_tbl
= kcalloc((rxq_info
->nrxq
+ rxq_info
->nciq
),
202 sizeof(unsigned short),
204 if (!rxq_info
->msix_tbl
)
208 ret
= !(!alloc_uld_rxqs(adap
, rxq_info
, rxq_info
->nrxq
, 0, lro
) &&
209 !alloc_uld_rxqs(adap
, rxq_info
, rxq_info
->nciq
,
210 rxq_info
->nrxq
, lro
));
212 /* Tell uP to route control queue completions to rdma rspq */
213 if (adap
->flags
& FULL_INIT_DONE
&&
214 !ret
&& uld_type
== CXGB4_ULD_RDMA
) {
215 struct sge
*s
= &adap
->sge
;
216 unsigned int cmplqid
;
219 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
220 for_each_port(adap
, i
) {
221 cmplqid
= rxq_info
->uldrxq
[i
].rspq
.cntxt_id
;
222 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
223 FW_PARAMS_PARAM_X_V(cmdop
) |
224 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
225 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
,
226 0, 1, ¶m
, &cmplqid
);
232 static void t4_free_uld_rxqs(struct adapter
*adap
, int n
,
233 struct sge_ofld_rxq
*q
)
235 for ( ; n
; n
--, q
++) {
237 free_rspq_fl(adap
, &q
->rspq
,
238 q
->fl
.size
? &q
->fl
: NULL
);
242 static void free_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
244 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
246 if (adap
->flags
& FULL_INIT_DONE
&& uld_type
== CXGB4_ULD_RDMA
) {
247 struct sge
*s
= &adap
->sge
;
248 u32 param
, cmdop
, cmplqid
= 0;
251 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
252 for_each_port(adap
, i
) {
253 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
254 FW_PARAMS_PARAM_X_V(cmdop
) |
255 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
256 t4_set_params(adap
, adap
->mbox
, adap
->pf
,
257 0, 1, ¶m
, &cmplqid
);
262 t4_free_uld_rxqs(adap
, rxq_info
->nciq
,
263 rxq_info
->uldrxq
+ rxq_info
->nrxq
);
264 t4_free_uld_rxqs(adap
, rxq_info
->nrxq
, rxq_info
->uldrxq
);
265 if (adap
->flags
& USING_MSIX
)
266 kfree(rxq_info
->msix_tbl
);
269 static int cfg_queues_uld(struct adapter
*adap
, unsigned int uld_type
,
270 const struct cxgb4_uld_info
*uld_info
)
272 struct sge
*s
= &adap
->sge
;
273 struct sge_uld_rxq_info
*rxq_info
;
274 int i
, nrxq
, ciq_size
;
276 rxq_info
= kzalloc(sizeof(*rxq_info
), GFP_KERNEL
);
280 if (adap
->flags
& USING_MSIX
&& uld_info
->nrxq
> s
->nqs_per_uld
) {
282 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
284 i
= min_t(int, uld_info
->nrxq
,
286 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
288 if (!uld_info
->ciq
) {
291 if (adap
->flags
& USING_MSIX
)
292 rxq_info
->nciq
= min_t(int, s
->nqs_per_uld
,
295 rxq_info
->nciq
= min_t(int, MAX_OFLD_QSETS
,
297 rxq_info
->nciq
= ((rxq_info
->nciq
/ adap
->params
.nports
) *
298 adap
->params
.nports
);
299 rxq_info
->nciq
= max_t(int, rxq_info
->nciq
,
300 adap
->params
.nports
);
303 nrxq
= rxq_info
->nrxq
+ rxq_info
->nciq
; /* total rxq's */
304 rxq_info
->uldrxq
= kcalloc(nrxq
, sizeof(struct sge_ofld_rxq
),
306 if (!rxq_info
->uldrxq
) {
311 rxq_info
->rspq_id
= kcalloc(nrxq
, sizeof(unsigned short), GFP_KERNEL
);
312 if (!rxq_info
->rspq_id
) {
313 kfree(rxq_info
->uldrxq
);
318 for (i
= 0; i
< rxq_info
->nrxq
; i
++) {
319 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
321 init_rspq(adap
, &r
->rspq
, 5, 1, uld_info
->rxq_size
, 64);
322 r
->rspq
.uld
= uld_type
;
326 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
327 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
328 dev_warn(adap
->pdev_dev
, "CIQ size too small for available IQs\n");
329 ciq_size
= SGE_MAX_IQ_SIZE
;
332 for (i
= rxq_info
->nrxq
; i
< nrxq
; i
++) {
333 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
335 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
336 r
->rspq
.uld
= uld_type
;
339 memcpy(rxq_info
->name
, uld_info
->name
, IFNAMSIZ
);
340 adap
->sge
.uld_rxq_info
[uld_type
] = rxq_info
;
345 static void free_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
347 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
349 kfree(rxq_info
->rspq_id
);
350 kfree(rxq_info
->uldrxq
);
355 request_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
357 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
359 unsigned int idx
, bmap_idx
;
361 for_each_uldrxq(rxq_info
, idx
) {
362 bmap_idx
= rxq_info
->msix_tbl
[idx
];
363 err
= request_irq(adap
->msix_info_ulds
[bmap_idx
].vec
,
365 adap
->msix_info_ulds
[bmap_idx
].desc
,
366 &rxq_info
->uldrxq
[idx
].rspq
);
373 bmap_idx
= rxq_info
->msix_tbl
[idx
];
374 free_msix_idx_in_bmap(adap
, bmap_idx
);
375 free_irq(adap
->msix_info_ulds
[bmap_idx
].vec
,
376 &rxq_info
->uldrxq
[idx
].rspq
);
382 free_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
384 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
385 unsigned int idx
, bmap_idx
;
387 for_each_uldrxq(rxq_info
, idx
) {
388 bmap_idx
= rxq_info
->msix_tbl
[idx
];
390 free_msix_idx_in_bmap(adap
, bmap_idx
);
391 free_irq(adap
->msix_info_ulds
[bmap_idx
].vec
,
392 &rxq_info
->uldrxq
[idx
].rspq
);
396 static void name_msix_vecs_uld(struct adapter
*adap
, unsigned int uld_type
)
398 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
399 int n
= sizeof(adap
->msix_info_ulds
[0].desc
);
400 unsigned int idx
, bmap_idx
;
402 for_each_uldrxq(rxq_info
, idx
) {
403 bmap_idx
= rxq_info
->msix_tbl
[idx
];
405 snprintf(adap
->msix_info_ulds
[bmap_idx
].desc
, n
, "%s-%s%d",
406 adap
->port
[0]->name
, rxq_info
->name
, idx
);
410 static void enable_rx(struct adapter
*adap
, struct sge_rspq
*q
)
416 cxgb_busy_poll_init_lock(q
);
417 napi_enable(&q
->napi
);
419 /* 0-increment GTS to start the timer and enable interrupts */
420 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
421 SEINTARM_V(q
->intr_params
) |
422 INGRESSQID_V(q
->cntxt_id
));
425 static void quiesce_rx(struct adapter
*adap
, struct sge_rspq
*q
)
427 if (q
&& q
->handler
) {
428 napi_disable(&q
->napi
);
430 while (!cxgb_poll_lock_napi(q
))
436 static void enable_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
438 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
441 for_each_uldrxq(rxq_info
, idx
)
442 enable_rx(adap
, &rxq_info
->uldrxq
[idx
].rspq
);
445 static void quiesce_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
447 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
450 for_each_uldrxq(rxq_info
, idx
)
451 quiesce_rx(adap
, &rxq_info
->uldrxq
[idx
].rspq
);
454 static void uld_queue_init(struct adapter
*adap
, unsigned int uld_type
,
455 struct cxgb4_lld_info
*lli
)
457 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
459 lli
->rxq_ids
= rxq_info
->rspq_id
;
460 lli
->nrxq
= rxq_info
->nrxq
;
461 lli
->ciq_ids
= rxq_info
->rspq_id
+ rxq_info
->nrxq
;
462 lli
->nciq
= rxq_info
->nciq
;
465 int t4_uld_mem_alloc(struct adapter
*adap
)
467 struct sge
*s
= &adap
->sge
;
469 adap
->uld
= kcalloc(CXGB4_ULD_MAX
, sizeof(*adap
->uld
), GFP_KERNEL
);
473 s
->uld_rxq_info
= kzalloc(CXGB4_ULD_MAX
*
474 sizeof(struct sge_uld_rxq_info
*),
476 if (!s
->uld_rxq_info
)
485 void t4_uld_mem_free(struct adapter
*adap
)
487 struct sge
*s
= &adap
->sge
;
489 kfree(s
->uld_rxq_info
);
493 void t4_uld_clean_up(struct adapter
*adap
)
495 struct sge_uld_rxq_info
*rxq_info
;
500 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++) {
501 if (!adap
->uld
[i
].handle
)
503 rxq_info
= adap
->sge
.uld_rxq_info
[i
];
504 if (adap
->flags
& FULL_INIT_DONE
)
505 quiesce_rx_uld(adap
, i
);
506 if (adap
->flags
& USING_MSIX
)
507 free_msix_queue_irqs_uld(adap
, i
);
508 free_sge_queues_uld(adap
, i
);
509 free_queues_uld(adap
, i
);
513 static void uld_init(struct adapter
*adap
, struct cxgb4_lld_info
*lld
)
517 lld
->pdev
= adap
->pdev
;
519 lld
->l2t
= adap
->l2t
;
520 lld
->tids
= &adap
->tids
;
521 lld
->ports
= adap
->port
;
522 lld
->vr
= &adap
->vres
;
523 lld
->mtus
= adap
->params
.mtus
;
524 lld
->ntxq
= adap
->sge
.ofldqsets
;
525 lld
->nchan
= adap
->params
.nports
;
526 lld
->nports
= adap
->params
.nports
;
527 lld
->wr_cred
= adap
->params
.ofldq_wr_cred
;
528 lld
->iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
529 lld
->iscsi_tagmask
= t4_read_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
);
530 lld
->iscsi_pgsz_order
= t4_read_reg(adap
, ULP_RX_ISCSI_PSZ_A
);
531 lld
->iscsi_llimit
= t4_read_reg(adap
, ULP_RX_ISCSI_LLIMIT_A
);
532 lld
->iscsi_ppm
= &adap
->iscsi_ppm
;
533 lld
->adapter_type
= adap
->params
.chip
;
534 lld
->cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
535 lld
->udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
536 lld
->ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
537 lld
->filt_mode
= adap
->params
.tp
.vlan_pri_map
;
538 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
539 for (i
= 0; i
< NCHAN
; i
++)
541 lld
->gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
542 lld
->db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
543 lld
->fw_vers
= adap
->params
.fw_vers
;
544 lld
->dbfifo_int_thresh
= dbfifo_int_thresh
;
545 lld
->sge_ingpadboundary
= adap
->sge
.fl_align
;
546 lld
->sge_egrstatuspagesize
= adap
->sge
.stat_len
;
547 lld
->sge_pktshift
= adap
->sge
.pktshift
;
548 lld
->enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
549 lld
->max_ordird_qp
= adap
->params
.max_ordird_qp
;
550 lld
->max_ird_adapter
= adap
->params
.max_ird_adapter
;
551 lld
->ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
552 lld
->nodeid
= dev_to_node(adap
->pdev_dev
);
555 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
558 struct cxgb4_lld_info lli
;
560 uld_init(adap
, &lli
);
561 uld_queue_init(adap
, uld
, &lli
);
563 handle
= adap
->uld
[uld
].add(&lli
);
564 if (IS_ERR(handle
)) {
565 dev_warn(adap
->pdev_dev
,
566 "could not attach to the %s driver, error %ld\n",
567 adap
->uld
[uld
].name
, PTR_ERR(handle
));
571 adap
->uld
[uld
].handle
= handle
;
572 t4_register_netevent_notifier();
574 if (adap
->flags
& FULL_INIT_DONE
)
575 adap
->uld
[uld
].state_change(handle
, CXGB4_STATE_UP
);
579 * cxgb4_register_uld - register an upper-layer driver
580 * @type: the ULD type
581 * @p: the ULD methods
583 * Registers an upper-layer driver with this driver and notifies the ULD
584 * about any presently available devices that support its type. Returns
585 * %-EBUSY if a ULD of the same type is already registered.
587 int cxgb4_register_uld(enum cxgb4_uld type
,
588 const struct cxgb4_uld_info
*p
)
591 unsigned int adap_idx
= 0;
592 struct adapter
*adap
;
594 if (type
>= CXGB4_ULD_MAX
)
597 mutex_lock(&uld_mutex
);
598 list_for_each_entry(adap
, &adapter_list
, list_node
) {
599 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
600 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
602 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
604 ret
= cfg_queues_uld(adap
, type
, p
);
607 ret
= setup_sge_queues_uld(adap
, type
, p
->lro
);
610 if (adap
->flags
& USING_MSIX
) {
611 name_msix_vecs_uld(adap
, type
);
612 ret
= request_msix_queue_irqs_uld(adap
, type
);
616 if (adap
->flags
& FULL_INIT_DONE
)
617 enable_rx_uld(adap
, type
);
618 if (adap
->uld
[type
].add
) {
622 adap
->uld
[type
] = *p
;
623 uld_attach(adap
, type
);
626 mutex_unlock(&uld_mutex
);
630 if (adap
->flags
& FULL_INIT_DONE
)
631 quiesce_rx_uld(adap
, type
);
632 if (adap
->flags
& USING_MSIX
)
633 free_msix_queue_irqs_uld(adap
, type
);
635 free_sge_queues_uld(adap
, type
);
637 free_queues_uld(adap
, type
);
640 list_for_each_entry(adap
, &adapter_list
, list_node
) {
641 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
642 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
644 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
648 adap
->uld
[type
].handle
= NULL
;
649 adap
->uld
[type
].add
= NULL
;
650 if (adap
->flags
& FULL_INIT_DONE
)
651 quiesce_rx_uld(adap
, type
);
652 if (adap
->flags
& USING_MSIX
)
653 free_msix_queue_irqs_uld(adap
, type
);
654 free_sge_queues_uld(adap
, type
);
655 free_queues_uld(adap
, type
);
658 mutex_unlock(&uld_mutex
);
661 EXPORT_SYMBOL(cxgb4_register_uld
);
664 * cxgb4_unregister_uld - unregister an upper-layer driver
665 * @type: the ULD type
667 * Unregisters an existing upper-layer driver.
669 int cxgb4_unregister_uld(enum cxgb4_uld type
)
671 struct adapter
*adap
;
673 if (type
>= CXGB4_ULD_MAX
)
676 mutex_lock(&uld_mutex
);
677 list_for_each_entry(adap
, &adapter_list
, list_node
) {
678 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
679 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
681 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
683 adap
->uld
[type
].handle
= NULL
;
684 adap
->uld
[type
].add
= NULL
;
685 if (adap
->flags
& FULL_INIT_DONE
)
686 quiesce_rx_uld(adap
, type
);
687 if (adap
->flags
& USING_MSIX
)
688 free_msix_queue_irqs_uld(adap
, type
);
689 free_sge_queues_uld(adap
, type
);
690 free_queues_uld(adap
, type
);
692 mutex_unlock(&uld_mutex
);
696 EXPORT_SYMBOL(cxgb4_unregister_uld
);