]>
Commit | Line | Data |
---|---|---|
6f7efaab | 1 | /* |
2 | * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management | |
3 | * | |
4 | * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
7 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
8 | * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this | |
9 | * release for licensing terms and conditions. | |
10 | * | |
11 | * Written by: Dimitris Michailidis (dm@chelsio.com) | |
12 | * Karen Xie (kxie@chelsio.com) | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | |
16 | ||
17 | #include <linux/version.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/moduleparam.h> | |
20 | #include <scsi/scsi_host.h> | |
21 | ||
22 | #include "common.h" | |
23 | #include "t3_cpl.h" | |
24 | #include "t3cdev.h" | |
25 | #include "cxgb3_defs.h" | |
26 | #include "cxgb3_ctl_defs.h" | |
27 | #include "cxgb3_offload.h" | |
28 | #include "firmware_exports.h" | |
29 | #include "cxgb3i.h" | |
30 | ||
31 | static unsigned int dbg_level; | |
32 | #include "../libcxgbi.h" | |
33 | ||
34 | #define DRV_MODULE_NAME "cxgb3i" | |
35 | #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" | |
36 | #define DRV_MODULE_VERSION "2.0.0" | |
37 | #define DRV_MODULE_RELDATE "Jun. 2010" | |
38 | ||
39 | static char version[] = | |
40 | DRV_MODULE_DESC " " DRV_MODULE_NAME | |
41 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
42 | ||
43 | MODULE_AUTHOR("Chelsio Communications, Inc."); | |
44 | MODULE_DESCRIPTION(DRV_MODULE_DESC); | |
45 | MODULE_VERSION(DRV_MODULE_VERSION); | |
46 | MODULE_LICENSE("GPL"); | |
47 | ||
48 | module_param(dbg_level, uint, 0644); | |
49 | MODULE_PARM_DESC(dbg_level, "debug flag (default=0)"); | |
50 | ||
51 | static int cxgb3i_rcv_win = 256 * 1024; | |
52 | module_param(cxgb3i_rcv_win, int, 0644); | |
53 | MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)"); | |
54 | ||
55 | static int cxgb3i_snd_win = 128 * 1024; | |
56 | module_param(cxgb3i_snd_win, int, 0644); | |
57 | MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); | |
58 | ||
59 | static int cxgb3i_rx_credit_thres = 10 * 1024; | |
60 | module_param(cxgb3i_rx_credit_thres, int, 0644); | |
61 | MODULE_PARM_DESC(rx_credit_thres, | |
62 | "RX credits return threshold in bytes (default=10KB)"); | |
63 | ||
64 | static unsigned int cxgb3i_max_connect = 8 * 1024; | |
65 | module_param(cxgb3i_max_connect, uint, 0644); | |
66 | MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)"); | |
67 | ||
68 | static unsigned int cxgb3i_sport_base = 20000; | |
69 | module_param(cxgb3i_sport_base, uint, 0644); | |
70 | MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)"); | |
71 | ||
72 | static void cxgb3i_dev_open(struct t3cdev *); | |
73 | static void cxgb3i_dev_close(struct t3cdev *); | |
74 | static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32); | |
75 | ||
76 | static struct cxgb3_client t3_client = { | |
77 | .name = DRV_MODULE_NAME, | |
78 | .handlers = cxgb3i_cpl_handlers, | |
79 | .add = cxgb3i_dev_open, | |
80 | .remove = cxgb3i_dev_close, | |
81 | .event_handler = cxgb3i_dev_event_handler, | |
82 | }; | |
83 | ||
84 | static struct scsi_host_template cxgb3i_host_template = { | |
85 | .module = THIS_MODULE, | |
86 | .name = DRV_MODULE_NAME, | |
87 | .proc_name = DRV_MODULE_NAME, | |
88 | .can_queue = CXGB3I_SCSI_HOST_QDEPTH, | |
89 | .queuecommand = iscsi_queuecommand, | |
90 | .change_queue_depth = iscsi_change_queue_depth, | |
91 | .sg_tablesize = SG_ALL, | |
92 | .max_sectors = 0xFFFF, | |
93 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | |
94 | .eh_abort_handler = iscsi_eh_abort, | |
95 | .eh_device_reset_handler = iscsi_eh_device_reset, | |
96 | .eh_target_reset_handler = iscsi_eh_recover_target, | |
97 | .target_alloc = iscsi_target_alloc, | |
98 | .use_clustering = DISABLE_CLUSTERING, | |
99 | .this_id = -1, | |
100 | }; | |
101 | ||
102 | static struct iscsi_transport cxgb3i_iscsi_transport = { | |
103 | .owner = THIS_MODULE, | |
104 | .name = DRV_MODULE_NAME, | |
105 | /* owner and name should be set already */ | |
106 | .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | |
107 | | CAP_DATADGST | CAP_DIGEST_OFFLOAD | | |
108 | CAP_PADDING_OFFLOAD, | |
109 | .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | | |
110 | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | | |
111 | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | | |
112 | ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | | |
113 | ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | | |
114 | ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | | |
115 | ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | | |
116 | ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | | |
117 | ISCSI_PERSISTENT_ADDRESS | | |
118 | ISCSI_TARGET_NAME | ISCSI_TPGT | | |
119 | ISCSI_USERNAME | ISCSI_PASSWORD | | |
120 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | |
121 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | |
122 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | | |
123 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | |
124 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | |
125 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | |
126 | ISCSI_HOST_INITIATOR_NAME | | |
127 | ISCSI_HOST_NETDEV_NAME, | |
128 | .get_host_param = cxgbi_get_host_param, | |
129 | .set_host_param = cxgbi_set_host_param, | |
130 | /* session management */ | |
131 | .create_session = cxgbi_create_session, | |
132 | .destroy_session = cxgbi_destroy_session, | |
133 | .get_session_param = iscsi_session_get_param, | |
134 | /* connection management */ | |
135 | .create_conn = cxgbi_create_conn, | |
136 | .bind_conn = cxgbi_bind_conn, | |
137 | .destroy_conn = iscsi_tcp_conn_teardown, | |
138 | .start_conn = iscsi_conn_start, | |
139 | .stop_conn = iscsi_conn_stop, | |
140 | .get_conn_param = cxgbi_get_conn_param, | |
141 | .set_param = cxgbi_set_conn_param, | |
142 | .get_stats = cxgbi_get_conn_stats, | |
143 | /* pdu xmit req from user space */ | |
144 | .send_pdu = iscsi_conn_send_pdu, | |
145 | /* task */ | |
146 | .init_task = iscsi_tcp_task_init, | |
147 | .xmit_task = iscsi_tcp_task_xmit, | |
148 | .cleanup_task = cxgbi_cleanup_task, | |
149 | /* pdu */ | |
150 | .alloc_pdu = cxgbi_conn_alloc_pdu, | |
151 | .init_pdu = cxgbi_conn_init_pdu, | |
152 | .xmit_pdu = cxgbi_conn_xmit_pdu, | |
153 | .parse_pdu_itt = cxgbi_parse_pdu_itt, | |
154 | /* TCP connect/disconnect */ | |
155 | .ep_connect = cxgbi_ep_connect, | |
156 | .ep_poll = cxgbi_ep_poll, | |
157 | .ep_disconnect = cxgbi_ep_disconnect, | |
158 | /* Error recovery timeout call */ | |
159 | .session_recovery_timedout = iscsi_session_recovery_timedout, | |
160 | }; | |
161 | ||
162 | static struct scsi_transport_template *cxgb3i_stt; | |
163 | ||
164 | /* | |
165 | * CPL (Chelsio Protocol Language) defines a message passing interface between | |
166 | * the host driver and Chelsio asic. | |
167 | * The section below implments CPLs that related to iscsi tcp connection | |
168 | * open/close/abort and data send/receive. | |
169 | */ | |
170 | ||
171 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); | |
172 | ||
173 | static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | |
174 | const struct l2t_entry *e) | |
175 | { | |
176 | unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win); | |
177 | struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; | |
178 | ||
179 | skb->priority = CPL_PRIORITY_SETUP; | |
180 | ||
181 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
182 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); | |
183 | req->local_port = csk->saddr.sin_port; | |
184 | req->peer_port = csk->daddr.sin_port; | |
185 | req->local_ip = csk->saddr.sin_addr.s_addr; | |
186 | req->peer_ip = csk->daddr.sin_addr.s_addr; | |
187 | ||
188 | req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS | | |
189 | V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | | |
190 | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); | |
191 | req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | | |
192 | V_RCV_BUFSIZ(cxgb3i_rcv_win>>10)); | |
193 | ||
194 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
195 | "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", | |
196 | csk, csk->state, csk->flags, csk->atid, | |
197 | &req->local_ip, ntohs(req->local_port), | |
198 | &req->peer_ip, ntohs(req->peer_port), | |
199 | csk->mss_idx, e->idx, e->smt_idx); | |
200 | ||
201 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
202 | } | |
203 | ||
204 | static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |
205 | { | |
206 | cxgbi_sock_act_open_req_arp_failure(NULL, skb); | |
207 | } | |
208 | ||
209 | /* | |
210 | * CPL connection close request: host -> | |
211 | * | |
212 | * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to | |
213 | * the write queue (i.e., after any unsent txt data). | |
214 | */ | |
215 | static void send_close_req(struct cxgbi_sock *csk) | |
216 | { | |
217 | struct sk_buff *skb = csk->cpl_close; | |
218 | struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; | |
219 | unsigned int tid = csk->tid; | |
220 | ||
221 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
222 | "csk 0x%p,%u,0x%lx,%u.\n", | |
223 | csk, csk->state, csk->flags, csk->tid); | |
224 | ||
225 | csk->cpl_close = NULL; | |
226 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | |
227 | req->wr.wr_lo = htonl(V_WR_TID(tid)); | |
228 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); | |
229 | req->rsvd = htonl(csk->write_seq); | |
230 | ||
231 | cxgbi_sock_skb_entail(csk, skb); | |
232 | if (csk->state >= CTP_ESTABLISHED) | |
233 | push_tx_frames(csk, 1); | |
234 | } | |
235 | ||
236 | /* | |
237 | * CPL connection abort request: host -> | |
238 | * | |
239 | * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs | |
240 | * for the same connection and also that we do not try to send a message | |
241 | * after the connection has closed. | |
242 | */ | |
243 | static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) | |
244 | { | |
245 | struct cpl_abort_req *req = cplhdr(skb); | |
246 | ||
247 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
248 | "t3dev 0x%p, tid %u, skb 0x%p.\n", | |
249 | tdev, GET_TID(req), skb); | |
250 | req->cmd = CPL_ABORT_NO_RST; | |
251 | cxgb3_ofld_send(tdev, skb); | |
252 | } | |
253 | ||
254 | static void send_abort_req(struct cxgbi_sock *csk) | |
255 | { | |
256 | struct sk_buff *skb = csk->cpl_abort_req; | |
257 | struct cpl_abort_req *req; | |
258 | ||
259 | if (unlikely(csk->state == CTP_ABORTING || !skb)) | |
260 | return; | |
261 | cxgbi_sock_set_state(csk, CTP_ABORTING); | |
262 | cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); | |
263 | /* Purge the send queue so we don't send anything after an abort. */ | |
264 | cxgbi_sock_purge_write_queue(csk); | |
265 | ||
266 | csk->cpl_abort_req = NULL; | |
267 | req = (struct cpl_abort_req *)skb->head; | |
268 | skb->priority = CPL_PRIORITY_DATA; | |
269 | set_arp_failure_handler(skb, abort_arp_failure); | |
270 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | |
271 | req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | |
272 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); | |
273 | req->rsvd0 = htonl(csk->snd_nxt); | |
274 | req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); | |
275 | req->cmd = CPL_ABORT_SEND_RST; | |
276 | ||
277 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
278 | "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", | |
279 | csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, | |
280 | req->rsvd1); | |
281 | ||
282 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
283 | } | |
284 | ||
285 | /* | |
286 | * CPL connection abort reply: host -> | |
287 | * | |
288 | * Send an ABORT_RPL message in response of the ABORT_REQ received. | |
289 | */ | |
290 | static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) | |
291 | { | |
292 | struct sk_buff *skb = csk->cpl_abort_rpl; | |
293 | struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; | |
294 | ||
295 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
296 | "csk 0x%p,%u,0x%lx,%u, status %d.\n", | |
297 | csk, csk->state, csk->flags, csk->tid, rst_status); | |
298 | ||
299 | csk->cpl_abort_rpl = NULL; | |
300 | skb->priority = CPL_PRIORITY_DATA; | |
301 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | |
302 | rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | |
303 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); | |
304 | rpl->cmd = rst_status; | |
305 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
306 | } | |
307 | ||
308 | /* | |
309 | * CPL connection rx data ack: host -> | |
310 | * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of | |
311 | * credits sent. | |
312 | */ | |
313 | static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) | |
314 | { | |
315 | struct sk_buff *skb; | |
316 | struct cpl_rx_data_ack *req; | |
317 | u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); | |
318 | ||
319 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
320 | "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", | |
321 | csk, csk->state, csk->flags, csk->tid, credits, dack); | |
322 | ||
24d3f95a | 323 | skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); |
6f7efaab | 324 | if (!skb) { |
325 | pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); | |
326 | return 0; | |
327 | } | |
328 | req = (struct cpl_rx_data_ack *)skb->head; | |
329 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
330 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); | |
331 | req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) | | |
332 | V_RX_CREDITS(credits)); | |
333 | skb->priority = CPL_PRIORITY_ACK; | |
334 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
335 | return credits; | |
336 | } | |
337 | ||
338 | /* | |
339 | * CPL connection tx data: host -> | |
340 | * | |
341 | * Send iscsi PDU via TX_DATA CPL message. Returns the number of | |
342 | * credits sent. | |
343 | * Each TX_DATA consumes work request credit (wrs), so we need to keep track of | |
344 | * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). | |
345 | */ | |
346 | ||
347 | static unsigned int wrlen __read_mostly; | |
348 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; | |
349 | ||
350 | static void init_wr_tab(unsigned int wr_len) | |
351 | { | |
352 | int i; | |
353 | ||
354 | if (skb_wrs[1]) /* already initialized */ | |
355 | return; | |
356 | for (i = 1; i < SKB_WR_LIST_SIZE; i++) { | |
357 | int sgl_len = (3 * i) / 2 + (i & 1); | |
358 | ||
359 | sgl_len += 3; | |
360 | skb_wrs[i] = (sgl_len <= wr_len | |
361 | ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); | |
362 | } | |
363 | wrlen = wr_len * 8; | |
364 | } | |
365 | ||
366 | static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, | |
367 | int len, int req_completion) | |
368 | { | |
369 | struct tx_data_wr *req; | |
370 | struct l2t_entry *l2t = csk->l2t; | |
371 | ||
372 | skb_reset_transport_header(skb); | |
373 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); | |
374 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | | |
375 | (req_completion ? F_WR_COMPL : 0)); | |
376 | req->wr_lo = htonl(V_WR_TID(csk->tid)); | |
377 | /* len includes the length of any HW ULP additions */ | |
378 | req->len = htonl(len); | |
379 | /* V_TX_ULP_SUBMODE sets both the mode and submode */ | |
380 | req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) | | |
381 | V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); | |
382 | req->sndseq = htonl(csk->snd_nxt); | |
383 | req->param = htonl(V_TX_PORT(l2t->smt_idx)); | |
384 | ||
385 | if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { | |
386 | req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | | |
387 | V_TX_CPU_IDX(csk->rss_qid)); | |
388 | /* sendbuffer is in units of 32KB. */ | |
389 | req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15)); | |
390 | cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); | |
391 | } | |
392 | } | |
393 | ||
394 | /** | |
395 | * push_tx_frames -- start transmit | |
396 | * @c3cn: the offloaded connection | |
397 | * @req_completion: request wr_ack or not | |
398 | * | |
399 | * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a | |
400 | * connection's send queue and sends them on to T3. Must be called with the | |
401 | * connection's lock held. Returns the amount of send buffer space that was | |
402 | * freed as a result of sending queued data to T3. | |
403 | */ | |
404 | ||
405 | static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) | |
406 | { | |
407 | kfree_skb(skb); | |
408 | } | |
409 | ||
410 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) | |
411 | { | |
412 | int total_size = 0; | |
413 | struct sk_buff *skb; | |
414 | ||
415 | if (unlikely(csk->state < CTP_ESTABLISHED || | |
416 | csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { | |
417 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
418 | "csk 0x%p,%u,0x%lx,%u, in closing state.\n", | |
419 | csk, csk->state, csk->flags, csk->tid); | |
420 | return 0; | |
421 | } | |
422 | ||
423 | while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { | |
424 | int len = skb->len; /* length before skb_push */ | |
425 | int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); | |
426 | int wrs_needed = skb_wrs[frags]; | |
427 | ||
428 | if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) | |
429 | wrs_needed = 1; | |
430 | ||
431 | WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); | |
432 | ||
433 | if (csk->wr_cred < wrs_needed) { | |
434 | log_debug(1 << CXGBI_DBG_PDU_TX, | |
435 | "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n", | |
436 | csk, skb->len, skb->data_len, frags, | |
437 | wrs_needed, csk->wr_cred); | |
438 | break; | |
439 | } | |
440 | ||
441 | __skb_unlink(skb, &csk->write_queue); | |
442 | skb->priority = CPL_PRIORITY_DATA; | |
443 | skb->csum = wrs_needed; /* remember this until the WR_ACK */ | |
444 | csk->wr_cred -= wrs_needed; | |
445 | csk->wr_una_cred += wrs_needed; | |
446 | cxgbi_sock_enqueue_wr(csk, skb); | |
447 | ||
448 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
449 | "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, " | |
450 | "left %u, unack %u.\n", | |
451 | csk, skb->len, skb->data_len, frags, skb->csum, | |
452 | csk->wr_cred, csk->wr_una_cred); | |
453 | ||
454 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { | |
455 | if ((req_completion && | |
456 | csk->wr_una_cred == wrs_needed) || | |
457 | csk->wr_una_cred >= csk->wr_max_cred / 2) { | |
458 | req_completion = 1; | |
459 | csk->wr_una_cred = 0; | |
460 | } | |
461 | len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); | |
462 | make_tx_data_wr(csk, skb, len, req_completion); | |
463 | csk->snd_nxt += len; | |
464 | cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); | |
465 | } | |
466 | total_size += skb->truesize; | |
467 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
468 | "csk 0x%p, tid 0x%x, send skb 0x%p.\n", | |
469 | csk, csk->tid, skb); | |
470 | set_arp_failure_handler(skb, arp_failure_skb_discard); | |
471 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
472 | } | |
473 | return total_size; | |
474 | } | |
475 | ||
476 | /* | |
477 | * Process a CPL_ACT_ESTABLISH message: -> host | |
478 | * Updates connection state from an active establish CPL message. Runs with | |
479 | * the connection lock held. | |
480 | */ | |
481 | ||
482 | static inline void free_atid(struct cxgbi_sock *csk) | |
483 | { | |
484 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { | |
485 | cxgb3_free_atid(csk->cdev->lldev, csk->atid); | |
486 | cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); | |
487 | cxgbi_sock_put(csk); | |
488 | } | |
489 | } | |
490 | ||
491 | static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
492 | { | |
493 | struct cxgbi_sock *csk = ctx; | |
494 | struct cpl_act_establish *req = cplhdr(skb); | |
495 | unsigned int tid = GET_TID(req); | |
496 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | |
497 | u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ | |
498 | ||
499 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
500 | "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", | |
501 | atid, atid, csk, csk->state, csk->flags, rcv_isn); | |
502 | ||
503 | cxgbi_sock_get(csk); | |
504 | cxgbi_sock_set_flag(csk, CTPF_HAS_TID); | |
505 | csk->tid = tid; | |
506 | cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); | |
507 | ||
508 | free_atid(csk); | |
509 | ||
510 | csk->rss_qid = G_QNUM(ntohs(skb->csum)); | |
511 | ||
512 | spin_lock_bh(&csk->lock); | |
513 | if (csk->retry_timer.function) { | |
514 | del_timer(&csk->retry_timer); | |
515 | csk->retry_timer.function = NULL; | |
516 | } | |
517 | ||
518 | if (unlikely(csk->state != CTP_ACTIVE_OPEN)) | |
519 | pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", | |
520 | csk, csk->state, csk->flags, csk->tid); | |
521 | ||
522 | csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; | |
523 | if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10)) | |
524 | csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); | |
525 | ||
526 | cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); | |
527 | ||
528 | if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) | |
529 | /* upper layer has requested closing */ | |
530 | send_abort_req(csk); | |
531 | else { | |
532 | if (skb_queue_len(&csk->write_queue)) | |
533 | push_tx_frames(csk, 1); | |
534 | cxgbi_conn_tx_open(csk); | |
535 | } | |
536 | ||
537 | spin_unlock_bh(&csk->lock); | |
538 | __kfree_skb(skb); | |
539 | return 0; | |
540 | } | |
541 | ||
542 | /* | |
543 | * Process a CPL_ACT_OPEN_RPL message: -> host | |
544 | * Handle active open failures. | |
545 | */ | |
546 | static int act_open_rpl_status_to_errno(int status) | |
547 | { | |
548 | switch (status) { | |
549 | case CPL_ERR_CONN_RESET: | |
550 | return -ECONNREFUSED; | |
551 | case CPL_ERR_ARP_MISS: | |
552 | return -EHOSTUNREACH; | |
553 | case CPL_ERR_CONN_TIMEDOUT: | |
554 | return -ETIMEDOUT; | |
555 | case CPL_ERR_TCAM_FULL: | |
556 | return -ENOMEM; | |
557 | case CPL_ERR_CONN_EXIST: | |
558 | return -EADDRINUSE; | |
559 | default: | |
560 | return -EIO; | |
561 | } | |
562 | } | |
563 | ||
564 | static void act_open_retry_timer(unsigned long data) | |
565 | { | |
566 | struct sk_buff *skb; | |
567 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; | |
568 | ||
569 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
570 | "csk 0x%p,%u,0x%lx,%u.\n", | |
571 | csk, csk->state, csk->flags, csk->tid); | |
572 | ||
573 | cxgbi_sock_get(csk); | |
574 | spin_lock_bh(&csk->lock); | |
24d3f95a | 575 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); |
6f7efaab | 576 | if (!skb) |
577 | cxgbi_sock_fail_act_open(csk, -ENOMEM); | |
578 | else { | |
579 | skb->sk = (struct sock *)csk; | |
580 | set_arp_failure_handler(skb, act_open_arp_failure); | |
581 | send_act_open_req(csk, skb, csk->l2t); | |
582 | } | |
583 | spin_unlock_bh(&csk->lock); | |
584 | cxgbi_sock_put(csk); | |
585 | } | |
586 | ||
587 | static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
588 | { | |
589 | struct cxgbi_sock *csk = ctx; | |
590 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
591 | ||
592 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
593 | "csk 0x%p,%u,0x%lx,%u, status %u.\n", | |
594 | csk, csk->state, csk->flags, csk->atid, rpl->status); | |
595 | ||
596 | if (rpl->status != CPL_ERR_TCAM_FULL && | |
597 | rpl->status != CPL_ERR_CONN_EXIST && | |
598 | rpl->status != CPL_ERR_ARP_MISS) | |
599 | cxgb3_queue_tid_release(tdev, GET_TID(rpl)); | |
600 | ||
601 | cxgbi_sock_get(csk); | |
602 | spin_lock_bh(&csk->lock); | |
603 | if (rpl->status == CPL_ERR_CONN_EXIST && | |
604 | csk->retry_timer.function != act_open_retry_timer) { | |
605 | csk->retry_timer.function = act_open_retry_timer; | |
606 | mod_timer(&csk->retry_timer, jiffies + HZ / 2); | |
607 | } else | |
608 | cxgbi_sock_fail_act_open(csk, | |
609 | act_open_rpl_status_to_errno(rpl->status)); | |
610 | ||
611 | spin_unlock_bh(&csk->lock); | |
612 | cxgbi_sock_put(csk); | |
613 | __kfree_skb(skb); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | /* | |
618 | * Process PEER_CLOSE CPL messages: -> host | |
619 | * Handle peer FIN. | |
620 | */ | |
621 | static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
622 | { | |
623 | struct cxgbi_sock *csk = ctx; | |
624 | ||
625 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
626 | "csk 0x%p,%u,0x%lx,%u.\n", | |
627 | csk, csk->state, csk->flags, csk->tid); | |
628 | ||
629 | cxgbi_sock_rcv_peer_close(csk); | |
630 | __kfree_skb(skb); | |
631 | return 0; | |
632 | } | |
633 | ||
634 | /* | |
635 | * Process CLOSE_CONN_RPL CPL message: -> host | |
636 | * Process a peer ACK to our FIN. | |
637 | */ | |
638 | static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, | |
639 | void *ctx) | |
640 | { | |
641 | struct cxgbi_sock *csk = ctx; | |
642 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | |
643 | ||
644 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
645 | "csk 0x%p,%u,0x%lx,%u, snxt %u.\n", | |
646 | csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); | |
647 | ||
648 | cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); | |
649 | __kfree_skb(skb); | |
650 | return 0; | |
651 | } | |
652 | ||
653 | /* | |
654 | * Process ABORT_REQ_RSS CPL message: -> host | |
655 | * Process abort requests. If we are waiting for an ABORT_RPL we ignore this | |
656 | * request except that we need to reply to it. | |
657 | */ | |
658 | ||
659 | static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, | |
660 | int *need_rst) | |
661 | { | |
662 | switch (abort_reason) { | |
663 | case CPL_ERR_BAD_SYN: /* fall through */ | |
664 | case CPL_ERR_CONN_RESET: | |
665 | return csk->state > CTP_ESTABLISHED ? | |
666 | -EPIPE : -ECONNRESET; | |
667 | case CPL_ERR_XMIT_TIMEDOUT: | |
668 | case CPL_ERR_PERSIST_TIMEDOUT: | |
669 | case CPL_ERR_FINWAIT2_TIMEDOUT: | |
670 | case CPL_ERR_KEEPALIVE_TIMEDOUT: | |
671 | return -ETIMEDOUT; | |
672 | default: | |
673 | return -EIO; | |
674 | } | |
675 | } | |
676 | ||
677 | static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
678 | { | |
679 | const struct cpl_abort_req_rss *req = cplhdr(skb); | |
680 | struct cxgbi_sock *csk = ctx; | |
681 | int rst_status = CPL_ABORT_NO_RST; | |
682 | ||
683 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
684 | "csk 0x%p,%u,0x%lx,%u.\n", | |
685 | csk, csk->state, csk->flags, csk->tid); | |
686 | ||
687 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | |
688 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) { | |
689 | goto done; | |
690 | } | |
691 | ||
692 | cxgbi_sock_get(csk); | |
693 | spin_lock_bh(&csk->lock); | |
694 | ||
695 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { | |
696 | cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); | |
697 | cxgbi_sock_set_state(csk, CTP_ABORTING); | |
698 | goto out; | |
699 | } | |
700 | ||
701 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); | |
702 | send_abort_rpl(csk, rst_status); | |
703 | ||
704 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { | |
705 | csk->err = abort_status_to_errno(csk, req->status, &rst_status); | |
706 | cxgbi_sock_closed(csk); | |
707 | } | |
708 | ||
709 | out: | |
710 | spin_unlock_bh(&csk->lock); | |
711 | cxgbi_sock_put(csk); | |
712 | done: | |
713 | __kfree_skb(skb); | |
714 | return 0; | |
715 | } | |
716 | ||
717 | /* | |
718 | * Process ABORT_RPL_RSS CPL message: -> host | |
719 | * Process abort replies. We only process these messages if we anticipate | |
720 | * them as the coordination between SW and HW in this area is somewhat lacking | |
721 | * and sometimes we get ABORT_RPLs after we are done with the connection that | |
722 | * originated the ABORT_REQ. | |
723 | */ | |
724 | static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
725 | { | |
726 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | |
727 | struct cxgbi_sock *csk = ctx; | |
728 | ||
729 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
730 | "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", | |
731 | rpl->status, csk, csk ? csk->state : 0, | |
732 | csk ? csk->flags : 0UL); | |
733 | /* | |
734 | * Ignore replies to post-close aborts indicating that the abort was | |
735 | * requested too late. These connections are terminated when we get | |
736 | * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss | |
737 | * arrives the TID is either no longer used or it has been recycled. | |
738 | */ | |
739 | if (rpl->status == CPL_ERR_ABORT_FAILED) | |
740 | goto rel_skb; | |
741 | /* | |
742 | * Sometimes we've already closed the connection, e.g., a post-close | |
743 | * abort races with ABORT_REQ_RSS, the latter frees the connection | |
744 | * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, | |
745 | * but FW turns the ABORT_REQ into a regular one and so we get | |
746 | * ABORT_RPL_RSS with status 0 and no connection. | |
747 | */ | |
748 | if (csk) | |
749 | cxgbi_sock_rcv_abort_rpl(csk); | |
750 | rel_skb: | |
751 | __kfree_skb(skb); | |
752 | return 0; | |
753 | } | |
754 | ||
755 | /* | |
756 | * Process RX_ISCSI_HDR CPL message: -> host | |
757 | * Handle received PDUs, the payload could be DDP'ed. If not, the payload | |
758 | * follow after the bhs. | |
759 | */ | |
760 | static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) | |
761 | { | |
762 | struct cxgbi_sock *csk = ctx; | |
763 | struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); | |
764 | struct cpl_iscsi_hdr_norss data_cpl; | |
765 | struct cpl_rx_data_ddp_norss ddp_cpl; | |
766 | unsigned int hdr_len, data_len, status; | |
767 | unsigned int len; | |
768 | int err; | |
769 | ||
770 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
771 | "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n", | |
772 | csk, csk->state, csk->flags, csk->tid, skb, skb->len); | |
773 | ||
774 | spin_lock_bh(&csk->lock); | |
775 | ||
776 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { | |
777 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
778 | "csk 0x%p,%u,0x%lx,%u, bad state.\n", | |
779 | csk, csk->state, csk->flags, csk->tid); | |
780 | if (csk->state != CTP_ABORTING) | |
781 | goto abort_conn; | |
782 | else | |
783 | goto discard; | |
784 | } | |
785 | ||
786 | cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); | |
787 | cxgbi_skcb_flags(skb) = 0; | |
788 | ||
789 | skb_reset_transport_header(skb); | |
790 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); | |
791 | ||
792 | len = hdr_len = ntohs(hdr_cpl->len); | |
793 | /* msg coalesce is off or not enough data received */ | |
794 | if (skb->len <= hdr_len) { | |
795 | pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n", | |
796 | csk->cdev->ports[csk->port_id]->name, csk->tid, | |
797 | skb->len, hdr_len); | |
798 | goto abort_conn; | |
799 | } | |
800 | cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); | |
801 | ||
802 | err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, | |
803 | sizeof(ddp_cpl)); | |
804 | if (err < 0) { | |
805 | pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n", | |
806 | csk->cdev->ports[csk->port_id]->name, csk->tid, | |
807 | skb->len, sizeof(ddp_cpl), err); | |
808 | goto abort_conn; | |
809 | } | |
810 | ||
811 | cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); | |
812 | cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); | |
813 | cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); | |
814 | status = ntohl(ddp_cpl.ddp_status); | |
815 | ||
816 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
817 | "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n", | |
818 | csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); | |
819 | ||
820 | if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) | |
821 | cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); | |
822 | if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) | |
823 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); | |
824 | if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) | |
825 | cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); | |
826 | ||
827 | if (skb->len > (hdr_len + sizeof(ddp_cpl))) { | |
828 | err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); | |
829 | if (err < 0) { | |
830 | pr_err("%s: tid %u, cp %zu/%u failed %d.\n", | |
831 | csk->cdev->ports[csk->port_id]->name, | |
832 | csk->tid, sizeof(data_cpl), skb->len, err); | |
833 | goto abort_conn; | |
834 | } | |
835 | data_len = ntohs(data_cpl.len); | |
836 | log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX, | |
837 | "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n", | |
838 | skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); | |
839 | len += sizeof(data_cpl) + data_len; | |
840 | } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) | |
841 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); | |
842 | ||
843 | csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); | |
844 | __pskb_trim(skb, len); | |
845 | __skb_queue_tail(&csk->receive_queue, skb); | |
846 | cxgbi_conn_pdu_ready(csk); | |
847 | ||
848 | spin_unlock_bh(&csk->lock); | |
849 | return 0; | |
850 | ||
851 | abort_conn: | |
852 | send_abort_req(csk); | |
853 | discard: | |
854 | spin_unlock_bh(&csk->lock); | |
855 | __kfree_skb(skb); | |
856 | return 0; | |
857 | } | |
858 | ||
859 | /* | |
860 | * Process TX_DATA_ACK CPL messages: -> host | |
861 | * Process an acknowledgment of WR completion. Advance snd_una and send the | |
862 | * next batch of work requests from the write queue. | |
863 | */ | |
864 | static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
865 | { | |
866 | struct cxgbi_sock *csk = ctx; | |
867 | struct cpl_wr_ack *hdr = cplhdr(skb); | |
868 | ||
869 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
870 | "csk 0x%p,%u,0x%lx,%u, cr %u.\n", | |
871 | csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); | |
872 | ||
873 | cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); | |
874 | __kfree_skb(skb); | |
875 | return 0; | |
876 | } | |
877 | ||
878 | /* | |
879 | * for each connection, pre-allocate skbs needed for close/abort requests. So | |
880 | * that we can service the request right away. | |
881 | */ | |
882 | static int alloc_cpls(struct cxgbi_sock *csk) | |
883 | { | |
24d3f95a | 884 | csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, |
6f7efaab | 885 | GFP_KERNEL); |
886 | if (!csk->cpl_close) | |
887 | return -ENOMEM; | |
24d3f95a | 888 | csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, |
6f7efaab | 889 | GFP_KERNEL); |
890 | if (!csk->cpl_abort_req) | |
891 | goto free_cpl_skbs; | |
892 | ||
24d3f95a | 893 | csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, |
6f7efaab | 894 | GFP_KERNEL); |
895 | if (!csk->cpl_abort_rpl) | |
896 | goto free_cpl_skbs; | |
897 | ||
898 | return 0; | |
899 | ||
900 | free_cpl_skbs: | |
901 | cxgbi_sock_free_cpl_skbs(csk); | |
902 | return -ENOMEM; | |
903 | } | |
904 | ||
905 | /** | |
906 | * release_offload_resources - release offload resource | |
907 | * @c3cn: the offloaded iscsi tcp connection. | |
908 | * Release resources held by an offload connection (TID, L2T entry, etc.) | |
909 | */ | |
910 | static void l2t_put(struct cxgbi_sock *csk) | |
911 | { | |
912 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | |
913 | ||
914 | if (csk->l2t) { | |
915 | l2t_release(L2DATA(t3dev), csk->l2t); | |
916 | csk->l2t = NULL; | |
917 | cxgbi_sock_put(csk); | |
918 | } | |
919 | } | |
920 | ||
921 | static void release_offload_resources(struct cxgbi_sock *csk) | |
922 | { | |
923 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | |
924 | ||
925 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
926 | "csk 0x%p,%u,0x%lx,%u.\n", | |
927 | csk, csk->state, csk->flags, csk->tid); | |
928 | ||
929 | csk->rss_qid = 0; | |
930 | cxgbi_sock_free_cpl_skbs(csk); | |
931 | ||
932 | if (csk->wr_cred != csk->wr_max_cred) { | |
933 | cxgbi_sock_purge_wr_queue(csk); | |
934 | cxgbi_sock_reset_wr_list(csk); | |
935 | } | |
936 | l2t_put(csk); | |
937 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) | |
938 | free_atid(csk); | |
939 | else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { | |
940 | cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); | |
941 | cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); | |
942 | cxgbi_sock_put(csk); | |
943 | } | |
944 | csk->dst = NULL; | |
945 | csk->cdev = NULL; | |
946 | } | |
947 | ||
948 | static int init_act_open(struct cxgbi_sock *csk) | |
949 | { | |
950 | struct dst_entry *dst = csk->dst; | |
951 | struct cxgbi_device *cdev = csk->cdev; | |
952 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | |
953 | struct net_device *ndev = cdev->ports[csk->port_id]; | |
954 | struct sk_buff *skb = NULL; | |
955 | ||
956 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
957 | "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); | |
958 | ||
959 | csk->rss_qid = 0; | |
960 | csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev); | |
961 | if (!csk->l2t) { | |
962 | pr_err("NO l2t available.\n"); | |
963 | return -EINVAL; | |
964 | } | |
965 | cxgbi_sock_get(csk); | |
966 | ||
967 | csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); | |
968 | if (csk->atid < 0) { | |
969 | pr_err("NO atid available.\n"); | |
970 | goto rel_resource; | |
971 | } | |
972 | cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); | |
973 | cxgbi_sock_get(csk); | |
974 | ||
24d3f95a | 975 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); |
6f7efaab | 976 | if (!skb) |
977 | goto rel_resource; | |
978 | skb->sk = (struct sock *)csk; | |
979 | set_arp_failure_handler(skb, act_open_arp_failure); | |
980 | ||
981 | csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; | |
982 | csk->wr_una_cred = 0; | |
983 | csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); | |
984 | cxgbi_sock_reset_wr_list(csk); | |
985 | csk->err = 0; | |
986 | ||
987 | cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); | |
988 | send_act_open_req(csk, skb, csk->l2t); | |
989 | return 0; | |
990 | ||
991 | rel_resource: | |
992 | if (skb) | |
993 | __kfree_skb(skb); | |
994 | return -EINVAL; | |
995 | } | |
996 | ||
997 | cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { | |
998 | [CPL_ACT_ESTABLISH] = do_act_establish, | |
999 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, | |
1000 | [CPL_PEER_CLOSE] = do_peer_close, | |
1001 | [CPL_ABORT_REQ_RSS] = do_abort_req, | |
1002 | [CPL_ABORT_RPL_RSS] = do_abort_rpl, | |
1003 | [CPL_CLOSE_CON_RPL] = do_close_con_rpl, | |
1004 | [CPL_TX_DMA_ACK] = do_wr_ack, | |
1005 | [CPL_ISCSI_HDR] = do_iscsi_hdr, | |
1006 | }; | |
1007 | ||
1008 | /** | |
1009 | * cxgb3i_ofld_init - allocate and initialize resources for each adapter found | |
1010 | * @cdev: cxgbi adapter | |
1011 | */ | |
1012 | int cxgb3i_ofld_init(struct cxgbi_device *cdev) | |
1013 | { | |
1014 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | |
1015 | struct adap_ports port; | |
1016 | struct ofld_page_info rx_page_info; | |
1017 | unsigned int wr_len; | |
1018 | int rc; | |
1019 | ||
1020 | if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 || | |
1021 | t3dev->ctl(t3dev, GET_PORTS, &port) < 0 || | |
1022 | t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { | |
1023 | pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev); | |
1024 | return -EINVAL; | |
1025 | } | |
1026 | ||
1027 | if (cxgb3i_max_connect > CXGBI_MAX_CONN) | |
1028 | cxgb3i_max_connect = CXGBI_MAX_CONN; | |
1029 | ||
1030 | rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base, | |
1031 | cxgb3i_max_connect); | |
1032 | if (rc < 0) | |
1033 | return rc; | |
1034 | ||
1035 | init_wr_tab(wr_len); | |
1036 | cdev->csk_release_offload_resources = release_offload_resources; | |
1037 | cdev->csk_push_tx_frames = push_tx_frames; | |
1038 | cdev->csk_send_abort_req = send_abort_req; | |
1039 | cdev->csk_send_close_req = send_close_req; | |
1040 | cdev->csk_send_rx_credits = send_rx_credits; | |
1041 | cdev->csk_alloc_cpls = alloc_cpls; | |
1042 | cdev->csk_init_act_open = init_act_open; | |
1043 | ||
1044 | pr_info("cdev 0x%p, offload up, added.\n", cdev); | |
1045 | return 0; | |
1046 | } | |
1047 | ||
1048 | /* | |
1049 | * functions to program the pagepod in h/w | |
1050 | */ | |
1051 | static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) | |
1052 | { | |
1053 | struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; | |
1054 | ||
1055 | memset(req, 0, sizeof(*req)); | |
1056 | ||
1057 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); | |
1058 | req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | | |
1059 | V_ULPTX_CMD(ULP_MEM_WRITE)); | |
1060 | req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | | |
1061 | V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); | |
1062 | } | |
1063 | ||
1064 | static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, | |
1065 | unsigned int idx, unsigned int npods, | |
1066 | struct cxgbi_gather_list *gl) | |
1067 | { | |
1068 | struct cxgbi_device *cdev = csk->cdev; | |
1069 | struct cxgbi_ddp_info *ddp = cdev->ddp; | |
1070 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | |
1071 | int i; | |
1072 | ||
1073 | log_debug(1 << CXGBI_DBG_DDP, | |
1074 | "csk 0x%p, idx %u, npods %u, gl 0x%p.\n", | |
1075 | csk, idx, npods, gl); | |
1076 | ||
1077 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | |
1078 | struct sk_buff *skb = ddp->gl_skb[idx]; | |
1079 | ||
1080 | /* hold on to the skb until we clear the ddp mapping */ | |
1081 | skb_get(skb); | |
1082 | ||
1083 | ulp_mem_io_set_hdr(skb, pm_addr); | |
1084 | cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + | |
1085 | sizeof(struct ulp_mem_io)), | |
1086 | hdr, gl, i * PPOD_PAGES_MAX); | |
1087 | skb->priority = CPL_PRIORITY_CONTROL; | |
1088 | cxgb3_ofld_send(cdev->lldev, skb); | |
1089 | } | |
1090 | return 0; | |
1091 | } | |
1092 | ||
1093 | static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, | |
1094 | unsigned int idx, unsigned int npods) | |
1095 | { | |
1096 | struct cxgbi_device *cdev = chba->cdev; | |
1097 | struct cxgbi_ddp_info *ddp = cdev->ddp; | |
1098 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | |
1099 | int i; | |
1100 | ||
1101 | log_debug(1 << CXGBI_DBG_DDP, | |
1102 | "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n", | |
1103 | cdev, idx, npods, tag); | |
1104 | ||
1105 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | |
1106 | struct sk_buff *skb = ddp->gl_skb[idx]; | |
1107 | ||
1108 | if (!skb) { | |
1109 | pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n", | |
1110 | tag, idx, i, npods); | |
1111 | continue; | |
1112 | } | |
1113 | ddp->gl_skb[idx] = NULL; | |
1114 | memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE); | |
1115 | ulp_mem_io_set_hdr(skb, pm_addr); | |
1116 | skb->priority = CPL_PRIORITY_CONTROL; | |
1117 | cxgb3_ofld_send(cdev->lldev, skb); | |
1118 | } | |
1119 | } | |
1120 | ||
1121 | static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt) | |
1122 | { | |
1123 | int i; | |
1124 | ||
1125 | log_debug(1 << CXGBI_DBG_DDP, | |
1126 | "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); | |
1127 | ||
1128 | for (i = 0; i < cnt; i++, idx++) | |
1129 | if (ddp->gl_skb[idx]) { | |
1130 | kfree_skb(ddp->gl_skb[idx]); | |
1131 | ddp->gl_skb[idx] = NULL; | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, | |
1136 | int cnt, gfp_t gfp) | |
1137 | { | |
1138 | int i; | |
1139 | ||
1140 | log_debug(1 << CXGBI_DBG_DDP, | |
1141 | "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); | |
1142 | ||
1143 | for (i = 0; i < cnt; i++) { | |
24d3f95a | 1144 | struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + |
6f7efaab | 1145 | PPOD_SIZE, 0, gfp); |
1146 | if (skb) { | |
1147 | ddp->gl_skb[idx + i] = skb; | |
1148 | } else { | |
1149 | ddp_free_gl_skb(ddp, idx, i); | |
1150 | return -ENOMEM; | |
1151 | } | |
1152 | } | |
1153 | return 0; | |
1154 | } | |
1155 | ||
1156 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | |
1157 | unsigned int tid, int pg_idx, bool reply) | |
1158 | { | |
24d3f95a | 1159 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
6f7efaab | 1160 | GFP_KERNEL); |
1161 | struct cpl_set_tcb_field *req; | |
1162 | u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; | |
1163 | ||
1164 | log_debug(1 << CXGBI_DBG_DDP, | |
1165 | "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); | |
1166 | if (!skb) | |
1167 | return -ENOMEM; | |
1168 | ||
1169 | /* set up ulp submode and page size */ | |
1170 | req = (struct cpl_set_tcb_field *)skb->head; | |
1171 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1172 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1173 | req->reply = V_NO_REPLY(reply ? 0 : 1); | |
1174 | req->cpu_idx = 0; | |
1175 | req->word = htons(31); | |
1176 | req->mask = cpu_to_be64(0xF0000000); | |
1177 | req->val = cpu_to_be64(val << 28); | |
1178 | skb->priority = CPL_PRIORITY_CONTROL; | |
1179 | ||
1180 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
1181 | return 0; | |
1182 | } | |
1183 | ||
1184 | /** | |
1185 | * cxgb3i_setup_conn_digest - setup conn. digest setting | |
1186 | * @csk: cxgb tcp socket | |
1187 | * @tid: connection id | |
1188 | * @hcrc: header digest enabled | |
1189 | * @dcrc: data digest enabled | |
1190 | * @reply: request reply from h/w | |
1191 | * set up the iscsi digest settings for a connection identified by tid | |
1192 | */ | |
1193 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |
1194 | int hcrc, int dcrc, int reply) | |
1195 | { | |
24d3f95a | 1196 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
6f7efaab | 1197 | GFP_KERNEL); |
1198 | struct cpl_set_tcb_field *req; | |
1199 | u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); | |
1200 | ||
1201 | log_debug(1 << CXGBI_DBG_DDP, | |
1202 | "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); | |
1203 | if (!skb) | |
1204 | return -ENOMEM; | |
1205 | ||
1206 | /* set up ulp submode and page size */ | |
1207 | req = (struct cpl_set_tcb_field *)skb->head; | |
1208 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1209 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1210 | req->reply = V_NO_REPLY(reply ? 0 : 1); | |
1211 | req->cpu_idx = 0; | |
1212 | req->word = htons(31); | |
1213 | req->mask = cpu_to_be64(0x0F000000); | |
1214 | req->val = cpu_to_be64(val << 24); | |
1215 | skb->priority = CPL_PRIORITY_CONTROL; | |
1216 | ||
1217 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
1218 | return 0; | |
1219 | } | |
1220 | ||
1221 | /** | |
1222 | * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource | |
1223 | * @cdev: cxgb3i adapter | |
1224 | * release all the resource held by the ddp pagepod manager for a given | |
1225 | * adapter if needed | |
1226 | */ | |
1227 | ||
1228 | static void t3_ddp_cleanup(struct cxgbi_device *cdev) | |
1229 | { | |
1230 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | |
1231 | ||
1232 | if (cxgbi_ddp_cleanup(cdev)) { | |
1233 | pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev); | |
1234 | tdev->ulp_iscsi = NULL; | |
1235 | } | |
1236 | } | |
1237 | ||
1238 | /** | |
1239 | * ddp_init - initialize the cxgb3 adapter's ddp resource | |
1240 | * @cdev: cxgb3i adapter | |
1241 | * initialize the ddp pagepod manager for a given adapter | |
1242 | */ | |
1243 | static int cxgb3i_ddp_init(struct cxgbi_device *cdev) | |
1244 | { | |
1245 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | |
1246 | struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; | |
1247 | struct ulp_iscsi_info uinfo; | |
1248 | unsigned int pgsz_factor[4]; | |
1249 | int err; | |
1250 | ||
1251 | if (ddp) { | |
1252 | kref_get(&ddp->refcnt); | |
1253 | pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | |
1254 | tdev, tdev->ulp_iscsi); | |
1255 | cdev->ddp = ddp; | |
1256 | return -EALREADY; | |
1257 | } | |
1258 | ||
1259 | err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); | |
1260 | if (err < 0) { | |
1261 | pr_err("%s, failed to get iscsi param err=%d.\n", | |
1262 | tdev->name, err); | |
1263 | return err; | |
1264 | } | |
1265 | ||
1266 | err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit, | |
1267 | uinfo.max_txsz, uinfo.max_rxsz); | |
1268 | if (err < 0) | |
1269 | return err; | |
1270 | ||
1271 | ddp = cdev->ddp; | |
1272 | ||
1273 | uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; | |
1274 | cxgbi_ddp_page_size_factor(pgsz_factor); | |
1275 | uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); | |
1276 | ||
1277 | err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); | |
1278 | if (err < 0) { | |
1279 | pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n", | |
1280 | tdev->name, err); | |
1281 | cxgbi_ddp_cleanup(cdev); | |
1282 | return err; | |
1283 | } | |
1284 | tdev->ulp_iscsi = ddp; | |
1285 | ||
1286 | cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb; | |
1287 | cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb; | |
1288 | cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; | |
1289 | cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; | |
1290 | cdev->csk_ddp_set = ddp_set_map; | |
1291 | cdev->csk_ddp_clear = ddp_clear_map; | |
1292 | ||
1293 | pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " | |
1294 | "%u/%u.\n", | |
1295 | tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, | |
1296 | ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, | |
1297 | ddp->max_rxsz, uinfo.max_rxsz); | |
1298 | return 0; | |
1299 | } | |
1300 | ||
1301 | static void cxgb3i_dev_close(struct t3cdev *t3dev) | |
1302 | { | |
1303 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1304 | ||
1305 | if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) { | |
1306 | pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0); | |
1307 | return; | |
1308 | } | |
1309 | ||
1310 | cxgbi_device_unregister(cdev); | |
1311 | } | |
1312 | ||
1313 | /** | |
1314 | * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings | |
1315 | * @t3dev: t3cdev adapter | |
1316 | */ | |
1317 | static void cxgb3i_dev_open(struct t3cdev *t3dev) | |
1318 | { | |
1319 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1320 | struct adapter *adapter = tdev2adap(t3dev); | |
1321 | int i, err; | |
1322 | ||
1323 | if (cdev) { | |
1324 | pr_info("0x%p, updating.\n", cdev); | |
1325 | return; | |
1326 | } | |
1327 | ||
1328 | cdev = cxgbi_device_register(0, adapter->params.nports); | |
1329 | if (!cdev) { | |
1330 | pr_warn("device 0x%p register failed.\n", t3dev); | |
1331 | return; | |
1332 | } | |
1333 | ||
1334 | cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET; | |
1335 | cdev->lldev = t3dev; | |
1336 | cdev->pdev = adapter->pdev; | |
1337 | cdev->ports = adapter->port; | |
1338 | cdev->nports = adapter->params.nports; | |
1339 | cdev->mtus = adapter->params.mtus; | |
1340 | cdev->nmtus = NMTUS; | |
1341 | cdev->snd_win = cxgb3i_snd_win; | |
1342 | cdev->rcv_win = cxgb3i_rcv_win; | |
1343 | cdev->rx_credit_thres = cxgb3i_rx_credit_thres; | |
1344 | cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; | |
1345 | cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); | |
1346 | cdev->dev_ddp_cleanup = t3_ddp_cleanup; | |
1347 | cdev->itp = &cxgb3i_iscsi_transport; | |
1348 | ||
1349 | err = cxgb3i_ddp_init(cdev); | |
1350 | if (err) { | |
1351 | pr_info("0x%p ddp init failed\n", cdev); | |
1352 | goto err_out; | |
1353 | } | |
1354 | ||
1355 | err = cxgb3i_ofld_init(cdev); | |
1356 | if (err) { | |
1357 | pr_info("0x%p offload init failed\n", cdev); | |
1358 | goto err_out; | |
1359 | } | |
1360 | ||
1361 | err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN, | |
1362 | &cxgb3i_host_template, cxgb3i_stt); | |
1363 | if (err) | |
1364 | goto err_out; | |
1365 | ||
1366 | for (i = 0; i < cdev->nports; i++) | |
1367 | cdev->hbas[i]->ipv4addr = | |
1368 | cxgb3i_get_private_ipv4addr(cdev->ports[i]); | |
1369 | ||
1370 | pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", | |
1371 | cdev, cdev ? cdev->flags : 0, t3dev, err); | |
1372 | return; | |
1373 | ||
1374 | err_out: | |
1375 | cxgbi_device_unregister(cdev); | |
1376 | } | |
1377 | ||
1378 | static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) | |
1379 | { | |
1380 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1381 | ||
1382 | log_debug(1 << CXGBI_DBG_TOE, | |
1383 | "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n", | |
1384 | t3dev, cdev, event, port); | |
1385 | if (!cdev) | |
1386 | return; | |
1387 | ||
1388 | switch (event) { | |
1389 | case OFFLOAD_STATUS_DOWN: | |
1390 | cdev->flags |= CXGBI_FLAG_ADAPTER_RESET; | |
1391 | break; | |
1392 | case OFFLOAD_STATUS_UP: | |
1393 | cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET; | |
1394 | break; | |
1395 | } | |
1396 | } | |
1397 | ||
1398 | /** | |
1399 | * cxgb3i_init_module - module init entry point | |
1400 | * | |
1401 | * initialize any driver wide global data structures and register itself | |
1402 | * with the cxgb3 module | |
1403 | */ | |
1404 | static int __init cxgb3i_init_module(void) | |
1405 | { | |
1406 | int rc; | |
1407 | ||
1408 | printk(KERN_INFO "%s", version); | |
1409 | ||
1410 | rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt); | |
1411 | if (rc < 0) | |
1412 | return rc; | |
1413 | ||
1414 | cxgb3_register_client(&t3_client); | |
1415 | return 0; | |
1416 | } | |
1417 | ||
1418 | /** | |
1419 | * cxgb3i_exit_module - module cleanup/exit entry point | |
1420 | * | |
1421 | * go through the driver hba list and for each hba, release any resource held. | |
1422 | * and unregisters iscsi transport and the cxgb3 module | |
1423 | */ | |
1424 | static void __exit cxgb3i_exit_module(void) | |
1425 | { | |
1426 | cxgb3_unregister_client(&t3_client); | |
1427 | cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3); | |
1428 | cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt); | |
1429 | } | |
1430 | ||
1431 | module_init(cxgb3i_init_module); | |
1432 | module_exit(cxgb3i_exit_module); |