]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/cxgbi/libcxgbi.h
cxgb3i,cxgb4i,libcxgbi: remove iSCSI DDP support
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / cxgbi / libcxgbi.h
CommitLineData
9ba682f0 1/*
2 * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
3 *
1149a5ed 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
9ba682f0 5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#ifndef __LIBCXGBI_H__
15#define __LIBCXGBI_H__
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/debugfs.h>
21#include <linux/list.h>
22#include <linux/netdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/scatterlist.h>
25#include <linux/skbuff.h>
26#include <linux/vmalloc.h>
27#include <scsi/scsi_device.h>
28#include <scsi/libiscsi_tcp.h>
29
30enum cxgbi_dbg_flag {
31 CXGBI_DBG_ISCSI,
32 CXGBI_DBG_DDP,
33 CXGBI_DBG_TOE,
34 CXGBI_DBG_SOCK,
35
36 CXGBI_DBG_PDU_TX,
37 CXGBI_DBG_PDU_RX,
38 CXGBI_DBG_DEV,
39};
40
41#define log_debug(level, fmt, ...) \
42 do { \
43 if (dbg_level & (level)) \
44 pr_info(fmt, ##__VA_ARGS__); \
45 } while (0)
46
fc8d0590
AB
47#define pr_info_ipaddr(fmt_trail, \
48 addr1, addr2, args_trail...) \
49do { \
50 if (!((1 << CXGBI_DBG_SOCK) & dbg_level)) \
51 break; \
52 pr_info("%pISpc - %pISpc, " fmt_trail, \
53 addr1, addr2, args_trail); \
54} while (0)
55
9ba682f0 56/* max. connections per adapter */
57#define CXGBI_MAX_CONN 16384
58
59/* always allocate rooms for AHS */
60#define SKB_TX_ISCSI_PDU_HEADER_MAX \
61 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
62
63#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/
64
65/*
66 * align pdu size to multiple of 512 for better performance
67 */
68#define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0)
69
70#define ULP2_MODE_ISCSI 2
71
72#define ULP2_MAX_PKT_SIZE 16224
73#define ULP2_MAX_PDU_PAYLOAD \
74 (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
75
76/*
77 * For iscsi connections HW may inserts digest bytes into the pdu. Those digest
78 * bytes are not sent by the host but are part of the TCP payload and therefore
79 * consume TCP sequence space.
80 */
81static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 };
82static inline unsigned int cxgbi_ulp_extra_len(int submode)
83{
84 return ulp2_extra_len[submode & 3];
85}
86
9ba682f0 87#define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
88#define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
89#define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
90#define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
91
9ba682f0 92#define DDP_PGIDX_MAX 4
9ba682f0 93
9ba682f0 94/*
95 * sge_opaque_hdr -
96 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
97 * and for which we must reserve space.
98 */
99struct sge_opaque_hdr {
100 void *dev;
101 dma_addr_t addr[MAX_SKB_FRAGS + 1];
102};
103
104struct cxgbi_sock {
105 struct cxgbi_device *cdev;
106
107 int tid;
108 int atid;
109 unsigned long flags;
110 unsigned int mtu;
111 unsigned short rss_qid;
112 unsigned short txq_idx;
113 unsigned short advmss;
114 unsigned int tx_chan;
115 unsigned int rx_chan;
116 unsigned int mss_idx;
117 unsigned int smac_idx;
118 unsigned char port_id;
119 int wr_max_cred;
120 int wr_cred;
121 int wr_una_cred;
122 unsigned char hcrc_len;
123 unsigned char dcrc_len;
124
125 void *l2t;
126 struct sk_buff *wr_pending_head;
127 struct sk_buff *wr_pending_tail;
128 struct sk_buff *cpl_close;
129 struct sk_buff *cpl_abort_req;
130 struct sk_buff *cpl_abort_rpl;
131 struct sk_buff *skb_ulp_lhdr;
132 spinlock_t lock;
133 struct kref refcnt;
134 unsigned int state;
fc8d0590
AB
135 unsigned int csk_family;
136 union {
137 struct sockaddr_in saddr;
138 struct sockaddr_in6 saddr6;
139 };
140 union {
141 struct sockaddr_in daddr;
142 struct sockaddr_in6 daddr6;
143 };
9ba682f0 144 struct dst_entry *dst;
145 struct sk_buff_head receive_queue;
146 struct sk_buff_head write_queue;
147 struct timer_list retry_timer;
148 int err;
149 rwlock_t callback_lock;
150 void *user_data;
151
152 u32 rcv_nxt;
153 u32 copied_seq;
154 u32 rcv_wup;
155 u32 snd_nxt;
156 u32 snd_una;
157 u32 write_seq;
81daf10c
KX
158 u32 snd_win;
159 u32 rcv_win;
9ba682f0 160};
161
162/*
163 * connection states
164 */
165enum cxgbi_sock_states{
166 CTP_CLOSED,
167 CTP_CONNECTING,
168 CTP_ACTIVE_OPEN,
169 CTP_ESTABLISHED,
170 CTP_ACTIVE_CLOSE,
171 CTP_PASSIVE_CLOSE,
172 CTP_CLOSE_WAIT_1,
173 CTP_CLOSE_WAIT_2,
174 CTP_ABORTING,
175};
176
177/*
178 * Connection flags -- many to track some close related events.
179 */
180enum cxgbi_sock_flags {
181 CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */
182 CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */
183 CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */
184 CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */
185 CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */
186 CTPF_HAS_ATID, /* reserved atid */
187 CTPF_HAS_TID, /* reserved hw tid */
188 CTPF_OFFLOAD_DOWN, /* offload function off */
189};
190
191struct cxgbi_skb_rx_cb {
192 __u32 ddigest;
193 __u32 pdulen;
194};
195
196struct cxgbi_skb_tx_cb {
197 void *l2t;
198 struct sk_buff *wr_next;
199};
200
201enum cxgbi_skcb_flags {
202 SKCBF_TX_NEED_HDR, /* packet needs a header */
203 SKCBF_RX_COALESCED, /* received whole pdu */
25985edc
LDM
204 SKCBF_RX_HDR, /* received pdu header */
205 SKCBF_RX_DATA, /* received pdu payload */
206 SKCBF_RX_STATUS, /* received ddp status */
9ba682f0 207 SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
208 SKCBF_RX_HCRC_ERR, /* header digest error */
209 SKCBF_RX_DCRC_ERR, /* data digest error */
210 SKCBF_RX_PAD_ERR, /* padding byte error */
211};
212
213struct cxgbi_skb_cb {
214 unsigned char ulp_mode;
215 unsigned long flags;
216 unsigned int seq;
217 union {
218 struct cxgbi_skb_rx_cb rx;
219 struct cxgbi_skb_tx_cb tx;
220 };
221};
222
223#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
224#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
225#define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode)
226#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
227#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
228#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
229#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
230
231static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
232 enum cxgbi_skcb_flags flag)
233{
234 __set_bit(flag, &(cxgbi_skcb_flags(skb)));
235}
236
237static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
238 enum cxgbi_skcb_flags flag)
239{
240 __clear_bit(flag, &(cxgbi_skcb_flags(skb)));
241}
242
84944d8c
KX
243static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
244 enum cxgbi_skcb_flags flag)
9ba682f0 245{
246 return test_bit(flag, &(cxgbi_skcb_flags(skb)));
247}
248
249static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk,
250 enum cxgbi_sock_flags flag)
251{
252 __set_bit(flag, &csk->flags);
253 log_debug(1 << CXGBI_DBG_SOCK,
254 "csk 0x%p,%u,0x%lx, bit %d.\n",
255 csk, csk->state, csk->flags, flag);
256}
257
258static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk,
259 enum cxgbi_sock_flags flag)
260{
261 __clear_bit(flag, &csk->flags);
262 log_debug(1 << CXGBI_DBG_SOCK,
263 "csk 0x%p,%u,0x%lx, bit %d.\n",
264 csk, csk->state, csk->flags, flag);
265}
266
267static inline int cxgbi_sock_flag(struct cxgbi_sock *csk,
268 enum cxgbi_sock_flags flag)
269{
270 if (csk == NULL)
271 return 0;
272 return test_bit(flag, &csk->flags);
273}
274
275static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state)
276{
277 log_debug(1 << CXGBI_DBG_SOCK,
278 "csk 0x%p,%u,0x%lx, state -> %u.\n",
279 csk, csk->state, csk->flags, state);
280 csk->state = state;
281}
282
283static inline void cxgbi_sock_free(struct kref *kref)
284{
285 struct cxgbi_sock *csk = container_of(kref,
286 struct cxgbi_sock,
287 refcnt);
288 if (csk) {
289 log_debug(1 << CXGBI_DBG_SOCK,
290 "free csk 0x%p, state %u, flags 0x%lx\n",
291 csk, csk->state, csk->flags);
292 kfree(csk);
293 }
294}
295
296static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
297{
298 log_debug(1 << CXGBI_DBG_SOCK,
299 "%s, put csk 0x%p, ref %u-1.\n",
300 fn, csk, atomic_read(&csk->refcnt.refcount));
301 kref_put(&csk->refcnt, cxgbi_sock_free);
302}
303#define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk)
304
305static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
306{
307 log_debug(1 << CXGBI_DBG_SOCK,
308 "%s, get csk 0x%p, ref %u+1.\n",
309 fn, csk, atomic_read(&csk->refcnt.refcount));
310 kref_get(&csk->refcnt);
311}
312#define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
313
314static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk)
315{
316 return csk->state >= CTP_ACTIVE_CLOSE;
317}
318
319static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk)
320{
321 return csk->state == CTP_ESTABLISHED;
322}
323
324static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk)
325{
326 struct sk_buff *skb;
327
328 while ((skb = __skb_dequeue(&csk->write_queue)))
329 __kfree_skb(skb);
330}
331
332static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win)
333{
334 unsigned int wscale = 0;
335
336 while (wscale < 14 && (65535 << wscale) < win)
337 wscale++;
338 return wscale;
339}
340
24d3f95a 341static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp)
9ba682f0 342{
9ba682f0 343 struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
344
345 if (skb) {
346 __skb_put(skb, wrlen);
347 memset(skb->head, 0, wrlen + dlen);
348 } else
24d3f95a 349 pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen);
9ba682f0 350 return skb;
351}
352
353
354/*
355 * The number of WRs needed for an skb depends on the number of fragments
356 * in the skb and whether it has any payload in its main body. This maps the
357 * length of the gather list represented by an skb into the # of necessary WRs.
358 * The extra two fragments are for iscsi bhs and payload padding.
359 */
360#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
361
362static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk)
363{
364 csk->wr_pending_head = csk->wr_pending_tail = NULL;
365}
366
367static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
368 struct sk_buff *skb)
369{
370 cxgbi_skcb_tx_wr_next(skb) = NULL;
371 /*
372 * We want to take an extra reference since both us and the driver
373 * need to free the packet before it's really freed. We know there's
374 * just one user currently so we use atomic_set rather than skb_get
375 * to avoid the atomic op.
376 */
377 atomic_set(&skb->users, 2);
378
379 if (!csk->wr_pending_head)
380 csk->wr_pending_head = skb;
381 else
382 cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
383 csk->wr_pending_tail = skb;
384}
385
386static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk)
387{
388 int n = 0;
389 const struct sk_buff *skb = csk->wr_pending_head;
390
391 while (skb) {
392 n += skb->csum;
393 skb = cxgbi_skcb_tx_wr_next(skb);
394 }
395 return n;
396}
397
398static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk)
399{
400 return csk->wr_pending_head;
401}
402
403static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk)
404{
405 struct sk_buff *skb = csk->wr_pending_head;
406
407 if (likely(skb)) {
408 csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb);
409 cxgbi_skcb_tx_wr_next(skb) = NULL;
410 }
411 return skb;
412}
413
414void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *);
415void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *);
416void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *);
417void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int);
418void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *);
419void cxgbi_sock_closed(struct cxgbi_sock *);
420void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int);
421void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *);
422void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *);
423void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32);
424void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int,
425 int);
426unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int);
427void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *);
428
429struct cxgbi_hba {
430 struct net_device *ndev;
0b3d8947 431 struct net_device *vdev; /* vlan dev */
9ba682f0 432 struct Scsi_Host *shost;
433 struct cxgbi_device *cdev;
434 __be32 ipv4addr;
435 unsigned char port_id;
436};
437
438struct cxgbi_ports_map {
439 unsigned int max_connect;
440 unsigned int used;
441 unsigned short sport_base;
442 spinlock_t lock;
443 unsigned int next;
444 struct cxgbi_sock **port_csk;
445};
446
447#define CXGBI_FLAG_DEV_T3 0x1
448#define CXGBI_FLAG_DEV_T4 0x2
449#define CXGBI_FLAG_ADAPTER_RESET 0x4
450#define CXGBI_FLAG_IPV4_SET 0x10
451struct cxgbi_device {
452 struct list_head list_head;
078efae0 453 struct list_head rcu_node;
9ba682f0 454 unsigned int flags;
455 struct net_device **ports;
456 void *lldev;
457 struct cxgbi_hba **hbas;
458 const unsigned short *mtus;
459 unsigned char nmtus;
460 unsigned char nports;
461 struct pci_dev *pdev;
462 struct dentry *debugfs_root;
463 struct iscsi_transport *itp;
464
465 unsigned int pfvf;
9ba682f0 466 unsigned int rx_credit_thres;
467 unsigned int skb_tx_rsvd;
468 unsigned int skb_rx_extra; /* for msg coalesced mode */
469 unsigned int tx_max_size;
470 unsigned int rx_max_size;
471 struct cxgbi_ports_map pmap;
9ba682f0 472
473 void (*dev_ddp_cleanup)(struct cxgbi_device *);
9ba682f0 474 int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
475 unsigned int, int, int, int);
476 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
477 unsigned int, int, bool);
478
479 void (*csk_release_offload_resources)(struct cxgbi_sock *);
480 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
481 u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
482 int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
483 void (*csk_send_abort_req)(struct cxgbi_sock *);
484 void (*csk_send_close_req)(struct cxgbi_sock *);
485 int (*csk_alloc_cpls)(struct cxgbi_sock *);
486 int (*csk_init_act_open)(struct cxgbi_sock *);
487
488 void *dd_data;
489};
490#define cxgbi_cdev_priv(cdev) ((cdev)->dd_data)
491
492struct cxgbi_conn {
493 struct cxgbi_endpoint *cep;
494 struct iscsi_conn *iconn;
495 struct cxgbi_hba *chba;
496 u32 task_idx_bits;
497};
498
499struct cxgbi_endpoint {
500 struct cxgbi_conn *cconn;
501 struct cxgbi_hba *chba;
502 struct cxgbi_sock *csk;
503};
504
505#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
506struct cxgbi_task_data {
507 unsigned short nr_frags;
6a39a16a 508 struct page_frag frags[MAX_PDU_FRAGS];
9ba682f0 509 struct sk_buff *skb;
510 unsigned int offset;
511 unsigned int count;
512 unsigned int sgoffset;
513};
e3d2ad8c 514#define iscsi_task_cxgbi_data(task) \
515 ((task)->dd_data + sizeof(struct iscsi_tcp_task))
9ba682f0 516
9ba682f0 517static inline void *cxgbi_alloc_big_mem(unsigned int size,
518 gfp_t gfp)
519{
8be04b93
JP
520 void *p = kzalloc(size, gfp | __GFP_NOWARN);
521
9ba682f0 522 if (!p)
8be04b93
JP
523 p = vzalloc(size);
524
9ba682f0 525 return p;
526}
527
528static inline void cxgbi_free_big_mem(void *addr)
529{
32a78fac 530 kvfree(addr);
9ba682f0 531}
532
533static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
534{
535 if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
536 chba->ipv4addr = ipaddr;
537 else
538 pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n",
539 chba->ndev->name);
540}
541
9ba682f0 542struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
543void cxgbi_device_unregister(struct cxgbi_device *);
544void cxgbi_device_unregister_all(unsigned int flag);
545struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
fc8d0590 546struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
078efae0
AB
547struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
548 int *);
1abf635d 549int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
9ba682f0 550 struct scsi_host_template *,
551 struct scsi_transport_template *);
552void cxgbi_hbas_remove(struct cxgbi_device *);
553
554int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
555 unsigned int max_conn);
556void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev);
557
558void cxgbi_conn_tx_open(struct cxgbi_sock *);
559void cxgbi_conn_pdu_ready(struct cxgbi_sock *);
560int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8);
561int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int);
562int cxgbi_conn_xmit_pdu(struct iscsi_task *);
563
564void cxgbi_cleanup_task(struct iscsi_task *task);
565
587a1f16 566umode_t cxgbi_attr_is_visible(int param_type, int param);
9ba682f0 567void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
568int cxgbi_set_conn_param(struct iscsi_cls_conn *,
569 enum iscsi_param, char *, int);
c71b9b66 570int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
9ba682f0 571struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
572int cxgbi_bind_conn(struct iscsi_cls_session *,
573 struct iscsi_cls_conn *, u64, int);
574void cxgbi_destroy_session(struct iscsi_cls_session *);
575struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *,
576 u16, u16, u32);
577int cxgbi_set_host_param(struct Scsi_Host *,
578 enum iscsi_host_param, char *, int);
579int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *);
580struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *,
581 struct sockaddr *, int);
582int cxgbi_ep_poll(struct iscsi_endpoint *, int);
583void cxgbi_ep_disconnect(struct iscsi_endpoint *);
584
585int cxgbi_iscsi_init(struct iscsi_transport *,
586 struct scsi_transport_template **);
587void cxgbi_iscsi_cleanup(struct iscsi_transport *,
588 struct scsi_transport_template **);
589void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *);
590int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int,
591 unsigned int, unsigned int);
592int cxgbi_ddp_cleanup(struct cxgbi_device *);
593void cxgbi_ddp_page_size_factor(int *);
9ba682f0 594#endif /*__LIBCXGBI_H__*/