]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/target/iscsi/cxgbit/cxgbit_main.c
Merge tag 'mmc-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-bionic-kernel.git] / drivers / target / iscsi / cxgbit / cxgbit_main.c
1 /*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #define DRV_NAME "cxgbit"
10 #define DRV_VERSION "1.0.0-ko"
11 #define pr_fmt(fmt) DRV_NAME ": " fmt
12
13 #include "cxgbit.h"
14
15 #ifdef CONFIG_CHELSIO_T4_DCB
16 #include <net/dcbevent.h>
17 #include "cxgb4_dcb.h"
18 #endif
19
20 LIST_HEAD(cdev_list_head);
21 /* cdev list lock */
22 DEFINE_MUTEX(cdev_list_lock);
23
24 void _cxgbit_free_cdev(struct kref *kref)
25 {
26 struct cxgbit_device *cdev;
27
28 cdev = container_of(kref, struct cxgbit_device, kref);
29
30 cxgbi_ppm_release(cdev2ppm(cdev));
31 kfree(cdev);
32 }
33
34 static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
35 {
36 struct cxgb4_lld_info *lldi = &cdev->lldi;
37 u32 mdsl;
38
39 #define ULP2_MAX_PKT_LEN 16224
40 #define ISCSI_PDU_NONPAYLOAD_LEN 312
41 mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
42 ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
43 mdsl = min_t(u32, mdsl, 8192);
44 mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
45
46 cdev->mdsl = mdsl;
47 }
48
49 static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
50 {
51 struct cxgbit_device *cdev;
52
53 if (is_t4(lldi->adapter_type))
54 return ERR_PTR(-ENODEV);
55
56 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
57 if (!cdev)
58 return ERR_PTR(-ENOMEM);
59
60 kref_init(&cdev->kref);
61
62 cdev->lldi = *lldi;
63
64 cxgbit_set_mdsl(cdev);
65
66 if (cxgbit_ddp_init(cdev) < 0) {
67 kfree(cdev);
68 return ERR_PTR(-EINVAL);
69 }
70
71 if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
72 pr_info("cdev %s ddp init failed\n",
73 pci_name(lldi->pdev));
74
75 if (lldi->fw_vers >= 0x10d2b00)
76 set_bit(CDEV_ISO_ENABLE, &cdev->flags);
77
78 spin_lock_init(&cdev->cskq.lock);
79 INIT_LIST_HEAD(&cdev->cskq.list);
80
81 mutex_lock(&cdev_list_lock);
82 list_add_tail(&cdev->list, &cdev_list_head);
83 mutex_unlock(&cdev_list_lock);
84
85 pr_info("cdev %s added for iSCSI target transport\n",
86 pci_name(lldi->pdev));
87
88 return cdev;
89 }
90
91 static void cxgbit_close_conn(struct cxgbit_device *cdev)
92 {
93 struct cxgbit_sock *csk;
94 struct sk_buff *skb;
95 bool wakeup_thread = false;
96
97 spin_lock_bh(&cdev->cskq.lock);
98 list_for_each_entry(csk, &cdev->cskq.list, list) {
99 skb = alloc_skb(0, GFP_ATOMIC);
100 if (!skb)
101 continue;
102
103 spin_lock_bh(&csk->rxq.lock);
104 __skb_queue_tail(&csk->rxq, skb);
105 if (skb_queue_len(&csk->rxq) == 1)
106 wakeup_thread = true;
107 spin_unlock_bh(&csk->rxq.lock);
108
109 if (wakeup_thread) {
110 wake_up(&csk->waitq);
111 wakeup_thread = false;
112 }
113 }
114 spin_unlock_bh(&cdev->cskq.lock);
115 }
116
117 static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
118 {
119 bool free_cdev = false;
120
121 spin_lock_bh(&cdev->cskq.lock);
122 if (list_empty(&cdev->cskq.list))
123 free_cdev = true;
124 spin_unlock_bh(&cdev->cskq.lock);
125
126 if (free_cdev) {
127 mutex_lock(&cdev_list_lock);
128 list_del(&cdev->list);
129 mutex_unlock(&cdev_list_lock);
130
131 cxgbit_put_cdev(cdev);
132 } else {
133 cxgbit_close_conn(cdev);
134 }
135 }
136
137 static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
138 {
139 struct cxgbit_device *cdev = handle;
140
141 switch (state) {
142 case CXGB4_STATE_UP:
143 set_bit(CDEV_STATE_UP, &cdev->flags);
144 pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
145 break;
146 case CXGB4_STATE_START_RECOVERY:
147 clear_bit(CDEV_STATE_UP, &cdev->flags);
148 cxgbit_close_conn(cdev);
149 pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
150 break;
151 case CXGB4_STATE_DOWN:
152 pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
153 break;
154 case CXGB4_STATE_DETACH:
155 clear_bit(CDEV_STATE_UP, &cdev->flags);
156 pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
157 cxgbit_detach_cdev(cdev);
158 break;
159 default:
160 pr_info("cdev %s unknown state %d.\n",
161 pci_name(cdev->lldi.pdev), state);
162 break;
163 }
164 return 0;
165 }
166
167 static void
168 cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb,
169 u32 ddpvld)
170 {
171
172 if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
173 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld);
174 pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
175 }
176
177 if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
178 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld);
179 pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
180 }
181
182 if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
183 pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld);
184
185 if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
186 (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
187 pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
188 }
189 }
190
191 static void
192 cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
193 {
194 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
195 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
196 lro_cb->pdu_idx);
197 struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
198
199 cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld));
200
201 pdu_cb->flags |= PDUCBF_RX_STATUS;
202 pdu_cb->ddigest = ntohl(cpl->ulp_crc);
203 pdu_cb->pdulen = ntohs(cpl->len);
204
205 if (pdu_cb->flags & PDUCBF_RX_HDR)
206 pdu_cb->complete = true;
207
208 lro_cb->pdu_totallen += pdu_cb->pdulen;
209 lro_cb->complete = true;
210 lro_cb->pdu_idx++;
211 }
212
213 static void
214 cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
215 unsigned int offset)
216 {
217 u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
218 u8 i;
219
220 /* usually there's just one frag */
221 __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
222 gl->frags[0].offset + offset,
223 gl->frags[0].size - offset);
224 for (i = 1; i < gl->nfrags; i++)
225 __skb_fill_page_desc(skb, skb_frag_idx + i,
226 gl->frags[i].page,
227 gl->frags[i].offset,
228 gl->frags[i].size);
229
230 skb_shinfo(skb)->nr_frags += gl->nfrags;
231
232 /* get a reference to the last page, we don't own it */
233 get_page(gl->frags[gl->nfrags - 1].page);
234 }
235
236 static void
237 cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
238 {
239 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
240 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
241 lro_cb->pdu_idx);
242 u32 len, offset;
243
244 if (op == CPL_ISCSI_HDR) {
245 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
246
247 offset = sizeof(struct cpl_iscsi_hdr);
248 pdu_cb->flags |= PDUCBF_RX_HDR;
249 pdu_cb->seq = ntohl(cpl->seq);
250 len = ntohs(cpl->len);
251 pdu_cb->hdr = gl->va + offset;
252 pdu_cb->hlen = len;
253 pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
254
255 if (unlikely(gl->nfrags > 1))
256 cxgbit_skcb_flags(skb) = 0;
257
258 lro_cb->complete = false;
259 } else if (op == CPL_ISCSI_DATA) {
260 struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
261
262 offset = sizeof(struct cpl_iscsi_data);
263 pdu_cb->flags |= PDUCBF_RX_DATA;
264 len = ntohs(cpl->len);
265 pdu_cb->dlen = len;
266 pdu_cb->doffset = lro_cb->offset;
267 pdu_cb->nr_dfrags = gl->nfrags;
268 pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
269 lro_cb->complete = false;
270 } else {
271 struct cpl_rx_iscsi_cmp *cpl;
272
273 cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
274 offset = sizeof(struct cpl_rx_iscsi_cmp);
275 pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS);
276 len = be16_to_cpu(cpl->len);
277 pdu_cb->hdr = gl->va + offset;
278 pdu_cb->hlen = len;
279 pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
280 pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc);
281 pdu_cb->pdulen = ntohs(cpl->len);
282
283 if (unlikely(gl->nfrags > 1))
284 cxgbit_skcb_flags(skb) = 0;
285
286 cxgbit_process_ddpvld(lro_cb->csk, pdu_cb,
287 be32_to_cpu(cpl->ddpvld));
288
289 if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) {
290 pdu_cb->flags |= PDUCBF_RX_DDP_CMP;
291 pdu_cb->complete = true;
292 } else if (pdu_cb->flags & PDUCBF_RX_DATA) {
293 pdu_cb->complete = true;
294 }
295
296 lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen;
297 lro_cb->complete = true;
298 lro_cb->pdu_idx++;
299 }
300
301 cxgbit_copy_frags(skb, gl, offset);
302
303 pdu_cb->frags += gl->nfrags;
304 lro_cb->offset += len;
305 skb->len += len;
306 skb->data_len += len;
307 skb->truesize += len;
308 }
309
310 static struct sk_buff *
311 cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
312 const __be64 *rsp, struct napi_struct *napi)
313 {
314 struct sk_buff *skb;
315 struct cxgbit_lro_cb *lro_cb;
316
317 skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
318
319 if (unlikely(!skb))
320 return NULL;
321
322 memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
323
324 cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
325
326 lro_cb = cxgbit_skb_lro_cb(skb);
327
328 cxgbit_get_csk(csk);
329
330 lro_cb->csk = csk;
331
332 return skb;
333 }
334
335 static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
336 {
337 bool wakeup_thread = false;
338
339 spin_lock(&csk->rxq.lock);
340 __skb_queue_tail(&csk->rxq, skb);
341 if (skb_queue_len(&csk->rxq) == 1)
342 wakeup_thread = true;
343 spin_unlock(&csk->rxq.lock);
344
345 if (wakeup_thread)
346 wake_up(&csk->waitq);
347 }
348
349 static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
350 {
351 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
352 struct cxgbit_sock *csk = lro_cb->csk;
353
354 csk->lro_skb = NULL;
355
356 __skb_unlink(skb, &lro_mgr->lroq);
357 cxgbit_queue_lro_skb(csk, skb);
358
359 cxgbit_put_csk(csk);
360
361 lro_mgr->lro_pkts++;
362 lro_mgr->lro_session_cnt--;
363 }
364
365 static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
366 {
367 struct sk_buff *skb;
368
369 while ((skb = skb_peek(&lro_mgr->lroq)))
370 cxgbit_lro_flush(lro_mgr, skb);
371 }
372
373 static int
374 cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
375 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
376 struct napi_struct *napi)
377 {
378 struct sk_buff *skb;
379 struct cxgbit_lro_cb *lro_cb;
380
381 if (!csk) {
382 pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
383 goto out;
384 }
385
386 if (csk->lro_skb)
387 goto add_packet;
388
389 start_lro:
390 if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
391 cxgbit_uld_lro_flush(lro_mgr);
392 goto start_lro;
393 }
394
395 skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
396 if (unlikely(!skb))
397 goto out;
398
399 csk->lro_skb = skb;
400
401 __skb_queue_tail(&lro_mgr->lroq, skb);
402 lro_mgr->lro_session_cnt++;
403
404 add_packet:
405 skb = csk->lro_skb;
406 lro_cb = cxgbit_skb_lro_cb(skb);
407
408 if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
409 MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
410 (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
411 cxgbit_lro_flush(lro_mgr, skb);
412 goto start_lro;
413 }
414
415 if (gl)
416 cxgbit_lro_add_packet_gl(skb, op, gl);
417 else
418 cxgbit_lro_add_packet_rsp(skb, op, rsp);
419
420 lro_mgr->lro_merged++;
421
422 return 0;
423
424 out:
425 return -1;
426 }
427
428 static int
429 cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
430 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
431 struct napi_struct *napi)
432 {
433 struct cxgbit_device *cdev = hndl;
434 struct cxgb4_lld_info *lldi = &cdev->lldi;
435 struct cpl_tx_data *rpl = NULL;
436 struct cxgbit_sock *csk = NULL;
437 unsigned int tid = 0;
438 struct sk_buff *skb;
439 unsigned int op = *(u8 *)rsp;
440 bool lro_flush = true;
441
442 switch (op) {
443 case CPL_ISCSI_HDR:
444 case CPL_ISCSI_DATA:
445 case CPL_RX_ISCSI_CMP:
446 case CPL_RX_ISCSI_DDP:
447 case CPL_FW4_ACK:
448 lro_flush = false;
449 /* fall through */
450 case CPL_ABORT_RPL_RSS:
451 case CPL_PASS_ESTABLISH:
452 case CPL_PEER_CLOSE:
453 case CPL_CLOSE_CON_RPL:
454 case CPL_ABORT_REQ_RSS:
455 case CPL_SET_TCB_RPL:
456 case CPL_RX_DATA:
457 rpl = gl ? (struct cpl_tx_data *)gl->va :
458 (struct cpl_tx_data *)(rsp + 1);
459 tid = GET_TID(rpl);
460 csk = lookup_tid(lldi->tids, tid);
461 break;
462 default:
463 break;
464 }
465
466 if (csk && csk->lro_skb && lro_flush)
467 cxgbit_lro_flush(lro_mgr, csk->lro_skb);
468
469 if (!gl) {
470 unsigned int len;
471
472 if (op == CPL_RX_ISCSI_DDP) {
473 if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
474 napi))
475 return 0;
476 }
477
478 len = 64 - sizeof(struct rsp_ctrl) - 8;
479 skb = napi_alloc_skb(napi, len);
480 if (!skb)
481 goto nomem;
482 __skb_put(skb, len);
483 skb_copy_to_linear_data(skb, &rsp[1], len);
484 } else {
485 if (unlikely(op != *(u8 *)gl->va)) {
486 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
487 gl->va, be64_to_cpu(*rsp),
488 get_unaligned_be64(gl->va),
489 gl->tot_len);
490 return 0;
491 }
492
493 if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) ||
494 (op == CPL_RX_ISCSI_CMP)) {
495 if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
496 napi))
497 return 0;
498 }
499
500 #define RX_PULL_LEN 128
501 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
502 if (unlikely(!skb))
503 goto nomem;
504 }
505
506 rpl = (struct cpl_tx_data *)skb->data;
507 op = rpl->ot.opcode;
508 cxgbit_skcb_rx_opcode(skb) = op;
509
510 pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
511 cdev, op, rpl->ot.opcode_tid,
512 ntohl(rpl->ot.opcode_tid), skb);
513
514 if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
515 cxgbit_cplhandlers[op](cdev, skb);
516 } else {
517 pr_err("No handler for opcode 0x%x.\n", op);
518 __kfree_skb(skb);
519 }
520 return 0;
521 nomem:
522 pr_err("%s OOM bailing out.\n", __func__);
523 return 1;
524 }
525
526 #ifdef CONFIG_CHELSIO_T4_DCB
527 struct cxgbit_dcb_work {
528 struct dcb_app_type dcb_app;
529 struct work_struct work;
530 };
531
532 static void
533 cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
534 u8 dcb_priority, u16 port_num)
535 {
536 struct cxgbit_sock *csk;
537 struct sk_buff *skb;
538 u16 local_port;
539 bool wakeup_thread = false;
540
541 spin_lock_bh(&cdev->cskq.lock);
542 list_for_each_entry(csk, &cdev->cskq.list, list) {
543 if (csk->port_id != port_id)
544 continue;
545
546 if (csk->com.local_addr.ss_family == AF_INET6) {
547 struct sockaddr_in6 *sock_in6;
548
549 sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
550 local_port = ntohs(sock_in6->sin6_port);
551 } else {
552 struct sockaddr_in *sock_in;
553
554 sock_in = (struct sockaddr_in *)&csk->com.local_addr;
555 local_port = ntohs(sock_in->sin_port);
556 }
557
558 if (local_port != port_num)
559 continue;
560
561 if (csk->dcb_priority == dcb_priority)
562 continue;
563
564 skb = alloc_skb(0, GFP_ATOMIC);
565 if (!skb)
566 continue;
567
568 spin_lock(&csk->rxq.lock);
569 __skb_queue_tail(&csk->rxq, skb);
570 if (skb_queue_len(&csk->rxq) == 1)
571 wakeup_thread = true;
572 spin_unlock(&csk->rxq.lock);
573
574 if (wakeup_thread) {
575 wake_up(&csk->waitq);
576 wakeup_thread = false;
577 }
578 }
579 spin_unlock_bh(&cdev->cskq.lock);
580 }
581
582 static void cxgbit_dcb_workfn(struct work_struct *work)
583 {
584 struct cxgbit_dcb_work *dcb_work;
585 struct net_device *ndev;
586 struct cxgbit_device *cdev = NULL;
587 struct dcb_app_type *iscsi_app;
588 u8 priority, port_id = 0xff;
589
590 dcb_work = container_of(work, struct cxgbit_dcb_work, work);
591 iscsi_app = &dcb_work->dcb_app;
592
593 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
594 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
595 goto out;
596
597 priority = iscsi_app->app.priority;
598
599 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
600 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
601 goto out;
602
603 if (!iscsi_app->app.priority)
604 goto out;
605
606 priority = ffs(iscsi_app->app.priority) - 1;
607 } else {
608 goto out;
609 }
610
611 pr_debug("priority for ifid %d is %u\n",
612 iscsi_app->ifindex, priority);
613
614 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
615
616 if (!ndev)
617 goto out;
618
619 mutex_lock(&cdev_list_lock);
620 cdev = cxgbit_find_device(ndev, &port_id);
621
622 dev_put(ndev);
623
624 if (!cdev) {
625 mutex_unlock(&cdev_list_lock);
626 goto out;
627 }
628
629 cxgbit_update_dcb_priority(cdev, port_id, priority,
630 iscsi_app->app.protocol);
631 mutex_unlock(&cdev_list_lock);
632 out:
633 kfree(dcb_work);
634 }
635
636 static int
637 cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
638 void *data)
639 {
640 struct cxgbit_dcb_work *dcb_work;
641 struct dcb_app_type *dcb_app = data;
642
643 dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
644 if (!dcb_work)
645 return NOTIFY_DONE;
646
647 dcb_work->dcb_app = *dcb_app;
648 INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
649 schedule_work(&dcb_work->work);
650 return NOTIFY_OK;
651 }
652 #endif
653
654 static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
655 {
656 return TARGET_PROT_NORMAL;
657 }
658
659 static struct iscsit_transport cxgbit_transport = {
660 .name = DRV_NAME,
661 .transport_type = ISCSI_CXGBIT,
662 .rdma_shutdown = false,
663 .priv_size = sizeof(struct cxgbit_cmd),
664 .owner = THIS_MODULE,
665 .iscsit_setup_np = cxgbit_setup_np,
666 .iscsit_accept_np = cxgbit_accept_np,
667 .iscsit_free_np = cxgbit_free_np,
668 .iscsit_free_conn = cxgbit_free_conn,
669 .iscsit_get_login_rx = cxgbit_get_login_rx,
670 .iscsit_put_login_tx = cxgbit_put_login_tx,
671 .iscsit_immediate_queue = iscsit_immediate_queue,
672 .iscsit_response_queue = iscsit_response_queue,
673 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
674 .iscsit_queue_data_in = iscsit_queue_rsp,
675 .iscsit_queue_status = iscsit_queue_rsp,
676 .iscsit_xmit_pdu = cxgbit_xmit_pdu,
677 .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
678 .iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
679 .iscsit_validate_params = cxgbit_validate_params,
680 .iscsit_release_cmd = cxgbit_release_cmd,
681 .iscsit_aborted_task = iscsit_aborted_task,
682 .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
683 };
684
685 static struct cxgb4_uld_info cxgbit_uld_info = {
686 .name = DRV_NAME,
687 .nrxq = MAX_ULD_QSETS,
688 .ntxq = MAX_ULD_QSETS,
689 .rxq_size = 1024,
690 .lro = true,
691 .add = cxgbit_uld_add,
692 .state_change = cxgbit_uld_state_change,
693 .lro_rx_handler = cxgbit_uld_lro_rx_handler,
694 .lro_flush = cxgbit_uld_lro_flush,
695 };
696
697 #ifdef CONFIG_CHELSIO_T4_DCB
698 static struct notifier_block cxgbit_dcbevent_nb = {
699 .notifier_call = cxgbit_dcbevent_notify,
700 };
701 #endif
702
703 static int __init cxgbit_init(void)
704 {
705 cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
706 iscsit_register_transport(&cxgbit_transport);
707
708 #ifdef CONFIG_CHELSIO_T4_DCB
709 pr_info("%s dcb enabled.\n", DRV_NAME);
710 register_dcbevent_notifier(&cxgbit_dcbevent_nb);
711 #endif
712 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
713 sizeof(union cxgbit_skb_cb));
714 return 0;
715 }
716
717 static void __exit cxgbit_exit(void)
718 {
719 struct cxgbit_device *cdev, *tmp;
720
721 #ifdef CONFIG_CHELSIO_T4_DCB
722 unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
723 #endif
724 mutex_lock(&cdev_list_lock);
725 list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
726 list_del(&cdev->list);
727 cxgbit_put_cdev(cdev);
728 }
729 mutex_unlock(&cdev_list_lock);
730 iscsit_unregister_transport(&cxgbit_transport);
731 cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
732 }
733
734 module_init(cxgbit_init);
735 module_exit(cxgbit_exit);
736
737 MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
738 MODULE_AUTHOR("Chelsio Communications");
739 MODULE_VERSION(DRV_VERSION);
740 MODULE_LICENSE("GPL");