]>
Commit | Line | Data |
---|---|---|
6f7efaab | 1 | /* |
2 | * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management | |
3 | * | |
4 | * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
7 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
8 | * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this | |
9 | * release for licensing terms and conditions. | |
10 | * | |
11 | * Written by: Dimitris Michailidis (dm@chelsio.com) | |
12 | * Karen Xie (kxie@chelsio.com) | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | |
16 | ||
6f7efaab | 17 | #include <linux/module.h> |
18 | #include <linux/moduleparam.h> | |
19 | #include <scsi/scsi_host.h> | |
20 | ||
21 | #include "common.h" | |
22 | #include "t3_cpl.h" | |
23 | #include "t3cdev.h" | |
24 | #include "cxgb3_defs.h" | |
25 | #include "cxgb3_ctl_defs.h" | |
26 | #include "cxgb3_offload.h" | |
27 | #include "firmware_exports.h" | |
28 | #include "cxgb3i.h" | |
29 | ||
30 | static unsigned int dbg_level; | |
31 | #include "../libcxgbi.h" | |
32 | ||
33 | #define DRV_MODULE_NAME "cxgb3i" | |
34 | #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" | |
35 | #define DRV_MODULE_VERSION "2.0.0" | |
36 | #define DRV_MODULE_RELDATE "Jun. 2010" | |
37 | ||
38 | static char version[] = | |
39 | DRV_MODULE_DESC " " DRV_MODULE_NAME | |
40 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
41 | ||
42 | MODULE_AUTHOR("Chelsio Communications, Inc."); | |
43 | MODULE_DESCRIPTION(DRV_MODULE_DESC); | |
44 | MODULE_VERSION(DRV_MODULE_VERSION); | |
45 | MODULE_LICENSE("GPL"); | |
46 | ||
47 | module_param(dbg_level, uint, 0644); | |
48 | MODULE_PARM_DESC(dbg_level, "debug flag (default=0)"); | |
49 | ||
50 | static int cxgb3i_rcv_win = 256 * 1024; | |
51 | module_param(cxgb3i_rcv_win, int, 0644); | |
52 | MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)"); | |
53 | ||
54 | static int cxgb3i_snd_win = 128 * 1024; | |
55 | module_param(cxgb3i_snd_win, int, 0644); | |
56 | MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); | |
57 | ||
58 | static int cxgb3i_rx_credit_thres = 10 * 1024; | |
59 | module_param(cxgb3i_rx_credit_thres, int, 0644); | |
60 | MODULE_PARM_DESC(rx_credit_thres, | |
61 | "RX credits return threshold in bytes (default=10KB)"); | |
62 | ||
63 | static unsigned int cxgb3i_max_connect = 8 * 1024; | |
64 | module_param(cxgb3i_max_connect, uint, 0644); | |
65 | MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)"); | |
66 | ||
67 | static unsigned int cxgb3i_sport_base = 20000; | |
68 | module_param(cxgb3i_sport_base, uint, 0644); | |
69 | MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)"); | |
70 | ||
71 | static void cxgb3i_dev_open(struct t3cdev *); | |
72 | static void cxgb3i_dev_close(struct t3cdev *); | |
73 | static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32); | |
74 | ||
75 | static struct cxgb3_client t3_client = { | |
76 | .name = DRV_MODULE_NAME, | |
77 | .handlers = cxgb3i_cpl_handlers, | |
78 | .add = cxgb3i_dev_open, | |
79 | .remove = cxgb3i_dev_close, | |
80 | .event_handler = cxgb3i_dev_event_handler, | |
81 | }; | |
82 | ||
83 | static struct scsi_host_template cxgb3i_host_template = { | |
84 | .module = THIS_MODULE, | |
85 | .name = DRV_MODULE_NAME, | |
86 | .proc_name = DRV_MODULE_NAME, | |
87 | .can_queue = CXGB3I_SCSI_HOST_QDEPTH, | |
88 | .queuecommand = iscsi_queuecommand, | |
db5ed4df | 89 | .change_queue_depth = scsi_change_queue_depth, |
6f7efaab | 90 | .sg_tablesize = SG_ALL, |
91 | .max_sectors = 0xFFFF, | |
92 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | |
93 | .eh_abort_handler = iscsi_eh_abort, | |
94 | .eh_device_reset_handler = iscsi_eh_device_reset, | |
95 | .eh_target_reset_handler = iscsi_eh_recover_target, | |
96 | .target_alloc = iscsi_target_alloc, | |
97 | .use_clustering = DISABLE_CLUSTERING, | |
98 | .this_id = -1, | |
c40ecc12 | 99 | .track_queue_depth = 1, |
6f7efaab | 100 | }; |
101 | ||
102 | static struct iscsi_transport cxgb3i_iscsi_transport = { | |
103 | .owner = THIS_MODULE, | |
104 | .name = DRV_MODULE_NAME, | |
105 | /* owner and name should be set already */ | |
106 | .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | |
107 | | CAP_DATADGST | CAP_DIGEST_OFFLOAD | | |
fdafd4df | 108 | CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, |
3128c6c7 | 109 | .attr_is_visible = cxgbi_attr_is_visible, |
6f7efaab | 110 | .get_host_param = cxgbi_get_host_param, |
111 | .set_host_param = cxgbi_set_host_param, | |
112 | /* session management */ | |
113 | .create_session = cxgbi_create_session, | |
114 | .destroy_session = cxgbi_destroy_session, | |
115 | .get_session_param = iscsi_session_get_param, | |
116 | /* connection management */ | |
117 | .create_conn = cxgbi_create_conn, | |
118 | .bind_conn = cxgbi_bind_conn, | |
119 | .destroy_conn = iscsi_tcp_conn_teardown, | |
120 | .start_conn = iscsi_conn_start, | |
121 | .stop_conn = iscsi_conn_stop, | |
c71b9b66 | 122 | .get_conn_param = iscsi_conn_get_param, |
6f7efaab | 123 | .set_param = cxgbi_set_conn_param, |
124 | .get_stats = cxgbi_get_conn_stats, | |
125 | /* pdu xmit req from user space */ | |
126 | .send_pdu = iscsi_conn_send_pdu, | |
127 | /* task */ | |
128 | .init_task = iscsi_tcp_task_init, | |
129 | .xmit_task = iscsi_tcp_task_xmit, | |
130 | .cleanup_task = cxgbi_cleanup_task, | |
131 | /* pdu */ | |
132 | .alloc_pdu = cxgbi_conn_alloc_pdu, | |
133 | .init_pdu = cxgbi_conn_init_pdu, | |
134 | .xmit_pdu = cxgbi_conn_xmit_pdu, | |
135 | .parse_pdu_itt = cxgbi_parse_pdu_itt, | |
136 | /* TCP connect/disconnect */ | |
c71b9b66 | 137 | .get_ep_param = cxgbi_get_ep_param, |
6f7efaab | 138 | .ep_connect = cxgbi_ep_connect, |
139 | .ep_poll = cxgbi_ep_poll, | |
140 | .ep_disconnect = cxgbi_ep_disconnect, | |
141 | /* Error recovery timeout call */ | |
142 | .session_recovery_timedout = iscsi_session_recovery_timedout, | |
143 | }; | |
144 | ||
145 | static struct scsi_transport_template *cxgb3i_stt; | |
146 | ||
147 | /* | |
148 | * CPL (Chelsio Protocol Language) defines a message passing interface between | |
149 | * the host driver and Chelsio asic. | |
150 | * The section below implments CPLs that related to iscsi tcp connection | |
151 | * open/close/abort and data send/receive. | |
152 | */ | |
153 | ||
154 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); | |
155 | ||
156 | static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | |
157 | const struct l2t_entry *e) | |
158 | { | |
159 | unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win); | |
160 | struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; | |
161 | ||
162 | skb->priority = CPL_PRIORITY_SETUP; | |
163 | ||
164 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
165 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); | |
166 | req->local_port = csk->saddr.sin_port; | |
167 | req->peer_port = csk->daddr.sin_port; | |
168 | req->local_ip = csk->saddr.sin_addr.s_addr; | |
169 | req->peer_ip = csk->daddr.sin_addr.s_addr; | |
170 | ||
171 | req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS | | |
172 | V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | | |
173 | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); | |
174 | req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | | |
175 | V_RCV_BUFSIZ(cxgb3i_rcv_win>>10)); | |
176 | ||
177 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
178 | "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", | |
179 | csk, csk->state, csk->flags, csk->atid, | |
180 | &req->local_ip, ntohs(req->local_port), | |
181 | &req->peer_ip, ntohs(req->peer_port), | |
182 | csk->mss_idx, e->idx, e->smt_idx); | |
183 | ||
184 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
185 | } | |
186 | ||
187 | static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |
188 | { | |
189 | cxgbi_sock_act_open_req_arp_failure(NULL, skb); | |
190 | } | |
191 | ||
192 | /* | |
193 | * CPL connection close request: host -> | |
194 | * | |
195 | * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to | |
196 | * the write queue (i.e., after any unsent txt data). | |
197 | */ | |
198 | static void send_close_req(struct cxgbi_sock *csk) | |
199 | { | |
200 | struct sk_buff *skb = csk->cpl_close; | |
201 | struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; | |
202 | unsigned int tid = csk->tid; | |
203 | ||
204 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
205 | "csk 0x%p,%u,0x%lx,%u.\n", | |
206 | csk, csk->state, csk->flags, csk->tid); | |
207 | ||
208 | csk->cpl_close = NULL; | |
209 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | |
210 | req->wr.wr_lo = htonl(V_WR_TID(tid)); | |
211 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); | |
212 | req->rsvd = htonl(csk->write_seq); | |
213 | ||
214 | cxgbi_sock_skb_entail(csk, skb); | |
215 | if (csk->state >= CTP_ESTABLISHED) | |
216 | push_tx_frames(csk, 1); | |
217 | } | |
218 | ||
219 | /* | |
220 | * CPL connection abort request: host -> | |
221 | * | |
222 | * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs | |
223 | * for the same connection and also that we do not try to send a message | |
224 | * after the connection has closed. | |
225 | */ | |
226 | static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) | |
227 | { | |
228 | struct cpl_abort_req *req = cplhdr(skb); | |
229 | ||
230 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
231 | "t3dev 0x%p, tid %u, skb 0x%p.\n", | |
232 | tdev, GET_TID(req), skb); | |
233 | req->cmd = CPL_ABORT_NO_RST; | |
234 | cxgb3_ofld_send(tdev, skb); | |
235 | } | |
236 | ||
237 | static void send_abort_req(struct cxgbi_sock *csk) | |
238 | { | |
239 | struct sk_buff *skb = csk->cpl_abort_req; | |
240 | struct cpl_abort_req *req; | |
241 | ||
242 | if (unlikely(csk->state == CTP_ABORTING || !skb)) | |
243 | return; | |
244 | cxgbi_sock_set_state(csk, CTP_ABORTING); | |
245 | cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); | |
246 | /* Purge the send queue so we don't send anything after an abort. */ | |
247 | cxgbi_sock_purge_write_queue(csk); | |
248 | ||
249 | csk->cpl_abort_req = NULL; | |
250 | req = (struct cpl_abort_req *)skb->head; | |
251 | skb->priority = CPL_PRIORITY_DATA; | |
252 | set_arp_failure_handler(skb, abort_arp_failure); | |
253 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | |
254 | req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | |
255 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); | |
256 | req->rsvd0 = htonl(csk->snd_nxt); | |
257 | req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); | |
258 | req->cmd = CPL_ABORT_SEND_RST; | |
259 | ||
260 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
261 | "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", | |
262 | csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, | |
263 | req->rsvd1); | |
264 | ||
265 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
266 | } | |
267 | ||
268 | /* | |
269 | * CPL connection abort reply: host -> | |
270 | * | |
271 | * Send an ABORT_RPL message in response of the ABORT_REQ received. | |
272 | */ | |
273 | static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) | |
274 | { | |
275 | struct sk_buff *skb = csk->cpl_abort_rpl; | |
276 | struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; | |
277 | ||
278 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
279 | "csk 0x%p,%u,0x%lx,%u, status %d.\n", | |
280 | csk, csk->state, csk->flags, csk->tid, rst_status); | |
281 | ||
282 | csk->cpl_abort_rpl = NULL; | |
283 | skb->priority = CPL_PRIORITY_DATA; | |
284 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | |
285 | rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | |
286 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); | |
287 | rpl->cmd = rst_status; | |
288 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
289 | } | |
290 | ||
291 | /* | |
292 | * CPL connection rx data ack: host -> | |
293 | * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of | |
294 | * credits sent. | |
295 | */ | |
296 | static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) | |
297 | { | |
298 | struct sk_buff *skb; | |
299 | struct cpl_rx_data_ack *req; | |
300 | u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); | |
301 | ||
302 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
303 | "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", | |
304 | csk, csk->state, csk->flags, csk->tid, credits, dack); | |
305 | ||
24d3f95a | 306 | skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); |
6f7efaab | 307 | if (!skb) { |
308 | pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); | |
309 | return 0; | |
310 | } | |
311 | req = (struct cpl_rx_data_ack *)skb->head; | |
312 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
313 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); | |
314 | req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) | | |
315 | V_RX_CREDITS(credits)); | |
316 | skb->priority = CPL_PRIORITY_ACK; | |
317 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
318 | return credits; | |
319 | } | |
320 | ||
321 | /* | |
322 | * CPL connection tx data: host -> | |
323 | * | |
324 | * Send iscsi PDU via TX_DATA CPL message. Returns the number of | |
325 | * credits sent. | |
326 | * Each TX_DATA consumes work request credit (wrs), so we need to keep track of | |
327 | * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). | |
328 | */ | |
329 | ||
330 | static unsigned int wrlen __read_mostly; | |
331 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; | |
332 | ||
333 | static void init_wr_tab(unsigned int wr_len) | |
334 | { | |
335 | int i; | |
336 | ||
337 | if (skb_wrs[1]) /* already initialized */ | |
338 | return; | |
339 | for (i = 1; i < SKB_WR_LIST_SIZE; i++) { | |
340 | int sgl_len = (3 * i) / 2 + (i & 1); | |
341 | ||
342 | sgl_len += 3; | |
343 | skb_wrs[i] = (sgl_len <= wr_len | |
344 | ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); | |
345 | } | |
346 | wrlen = wr_len * 8; | |
347 | } | |
348 | ||
349 | static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, | |
350 | int len, int req_completion) | |
351 | { | |
352 | struct tx_data_wr *req; | |
353 | struct l2t_entry *l2t = csk->l2t; | |
354 | ||
355 | skb_reset_transport_header(skb); | |
356 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); | |
357 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | | |
358 | (req_completion ? F_WR_COMPL : 0)); | |
359 | req->wr_lo = htonl(V_WR_TID(csk->tid)); | |
360 | /* len includes the length of any HW ULP additions */ | |
361 | req->len = htonl(len); | |
362 | /* V_TX_ULP_SUBMODE sets both the mode and submode */ | |
363 | req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) | | |
364 | V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); | |
365 | req->sndseq = htonl(csk->snd_nxt); | |
366 | req->param = htonl(V_TX_PORT(l2t->smt_idx)); | |
367 | ||
368 | if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { | |
369 | req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | | |
370 | V_TX_CPU_IDX(csk->rss_qid)); | |
371 | /* sendbuffer is in units of 32KB. */ | |
372 | req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15)); | |
373 | cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); | |
374 | } | |
375 | } | |
376 | ||
377 | /** | |
378 | * push_tx_frames -- start transmit | |
379 | * @c3cn: the offloaded connection | |
380 | * @req_completion: request wr_ack or not | |
381 | * | |
382 | * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a | |
383 | * connection's send queue and sends them on to T3. Must be called with the | |
384 | * connection's lock held. Returns the amount of send buffer space that was | |
385 | * freed as a result of sending queued data to T3. | |
386 | */ | |
387 | ||
388 | static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) | |
389 | { | |
390 | kfree_skb(skb); | |
391 | } | |
392 | ||
393 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) | |
394 | { | |
395 | int total_size = 0; | |
396 | struct sk_buff *skb; | |
397 | ||
398 | if (unlikely(csk->state < CTP_ESTABLISHED || | |
399 | csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { | |
400 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
401 | "csk 0x%p,%u,0x%lx,%u, in closing state.\n", | |
402 | csk, csk->state, csk->flags, csk->tid); | |
403 | return 0; | |
404 | } | |
405 | ||
406 | while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { | |
407 | int len = skb->len; /* length before skb_push */ | |
408 | int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); | |
409 | int wrs_needed = skb_wrs[frags]; | |
410 | ||
411 | if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) | |
412 | wrs_needed = 1; | |
413 | ||
414 | WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); | |
415 | ||
416 | if (csk->wr_cred < wrs_needed) { | |
417 | log_debug(1 << CXGBI_DBG_PDU_TX, | |
418 | "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n", | |
419 | csk, skb->len, skb->data_len, frags, | |
420 | wrs_needed, csk->wr_cred); | |
421 | break; | |
422 | } | |
423 | ||
424 | __skb_unlink(skb, &csk->write_queue); | |
425 | skb->priority = CPL_PRIORITY_DATA; | |
426 | skb->csum = wrs_needed; /* remember this until the WR_ACK */ | |
427 | csk->wr_cred -= wrs_needed; | |
428 | csk->wr_una_cred += wrs_needed; | |
429 | cxgbi_sock_enqueue_wr(csk, skb); | |
430 | ||
431 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
432 | "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, " | |
433 | "left %u, unack %u.\n", | |
434 | csk, skb->len, skb->data_len, frags, skb->csum, | |
435 | csk->wr_cred, csk->wr_una_cred); | |
436 | ||
437 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { | |
438 | if ((req_completion && | |
439 | csk->wr_una_cred == wrs_needed) || | |
440 | csk->wr_una_cred >= csk->wr_max_cred / 2) { | |
441 | req_completion = 1; | |
442 | csk->wr_una_cred = 0; | |
443 | } | |
444 | len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); | |
445 | make_tx_data_wr(csk, skb, len, req_completion); | |
446 | csk->snd_nxt += len; | |
447 | cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); | |
448 | } | |
449 | total_size += skb->truesize; | |
450 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
451 | "csk 0x%p, tid 0x%x, send skb 0x%p.\n", | |
452 | csk, csk->tid, skb); | |
453 | set_arp_failure_handler(skb, arp_failure_skb_discard); | |
454 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
455 | } | |
456 | return total_size; | |
457 | } | |
458 | ||
459 | /* | |
460 | * Process a CPL_ACT_ESTABLISH message: -> host | |
461 | * Updates connection state from an active establish CPL message. Runs with | |
462 | * the connection lock held. | |
463 | */ | |
464 | ||
465 | static inline void free_atid(struct cxgbi_sock *csk) | |
466 | { | |
467 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { | |
468 | cxgb3_free_atid(csk->cdev->lldev, csk->atid); | |
469 | cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); | |
470 | cxgbi_sock_put(csk); | |
471 | } | |
472 | } | |
473 | ||
474 | static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
475 | { | |
476 | struct cxgbi_sock *csk = ctx; | |
477 | struct cpl_act_establish *req = cplhdr(skb); | |
478 | unsigned int tid = GET_TID(req); | |
479 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | |
480 | u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ | |
481 | ||
482 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
483 | "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", | |
484 | atid, atid, csk, csk->state, csk->flags, rcv_isn); | |
485 | ||
486 | cxgbi_sock_get(csk); | |
487 | cxgbi_sock_set_flag(csk, CTPF_HAS_TID); | |
488 | csk->tid = tid; | |
489 | cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); | |
490 | ||
491 | free_atid(csk); | |
492 | ||
493 | csk->rss_qid = G_QNUM(ntohs(skb->csum)); | |
494 | ||
495 | spin_lock_bh(&csk->lock); | |
496 | if (csk->retry_timer.function) { | |
497 | del_timer(&csk->retry_timer); | |
498 | csk->retry_timer.function = NULL; | |
499 | } | |
500 | ||
501 | if (unlikely(csk->state != CTP_ACTIVE_OPEN)) | |
502 | pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", | |
503 | csk, csk->state, csk->flags, csk->tid); | |
504 | ||
505 | csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; | |
506 | if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10)) | |
507 | csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); | |
508 | ||
509 | cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); | |
510 | ||
511 | if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) | |
512 | /* upper layer has requested closing */ | |
513 | send_abort_req(csk); | |
514 | else { | |
515 | if (skb_queue_len(&csk->write_queue)) | |
516 | push_tx_frames(csk, 1); | |
517 | cxgbi_conn_tx_open(csk); | |
518 | } | |
519 | ||
520 | spin_unlock_bh(&csk->lock); | |
521 | __kfree_skb(skb); | |
522 | return 0; | |
523 | } | |
524 | ||
525 | /* | |
526 | * Process a CPL_ACT_OPEN_RPL message: -> host | |
527 | * Handle active open failures. | |
528 | */ | |
529 | static int act_open_rpl_status_to_errno(int status) | |
530 | { | |
531 | switch (status) { | |
532 | case CPL_ERR_CONN_RESET: | |
533 | return -ECONNREFUSED; | |
534 | case CPL_ERR_ARP_MISS: | |
535 | return -EHOSTUNREACH; | |
536 | case CPL_ERR_CONN_TIMEDOUT: | |
537 | return -ETIMEDOUT; | |
538 | case CPL_ERR_TCAM_FULL: | |
539 | return -ENOMEM; | |
540 | case CPL_ERR_CONN_EXIST: | |
541 | return -EADDRINUSE; | |
542 | default: | |
543 | return -EIO; | |
544 | } | |
545 | } | |
546 | ||
547 | static void act_open_retry_timer(unsigned long data) | |
548 | { | |
549 | struct sk_buff *skb; | |
550 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; | |
551 | ||
552 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
553 | "csk 0x%p,%u,0x%lx,%u.\n", | |
554 | csk, csk->state, csk->flags, csk->tid); | |
555 | ||
556 | cxgbi_sock_get(csk); | |
557 | spin_lock_bh(&csk->lock); | |
24d3f95a | 558 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); |
6f7efaab | 559 | if (!skb) |
560 | cxgbi_sock_fail_act_open(csk, -ENOMEM); | |
561 | else { | |
562 | skb->sk = (struct sock *)csk; | |
563 | set_arp_failure_handler(skb, act_open_arp_failure); | |
564 | send_act_open_req(csk, skb, csk->l2t); | |
565 | } | |
566 | spin_unlock_bh(&csk->lock); | |
567 | cxgbi_sock_put(csk); | |
568 | } | |
569 | ||
570 | static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
571 | { | |
572 | struct cxgbi_sock *csk = ctx; | |
573 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
574 | ||
0b3d8947 | 575 | pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n", |
576 | csk, csk->state, csk->flags, csk->atid, rpl->status, | |
577 | &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), | |
578 | &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); | |
6f7efaab | 579 | |
580 | if (rpl->status != CPL_ERR_TCAM_FULL && | |
581 | rpl->status != CPL_ERR_CONN_EXIST && | |
582 | rpl->status != CPL_ERR_ARP_MISS) | |
583 | cxgb3_queue_tid_release(tdev, GET_TID(rpl)); | |
584 | ||
585 | cxgbi_sock_get(csk); | |
586 | spin_lock_bh(&csk->lock); | |
587 | if (rpl->status == CPL_ERR_CONN_EXIST && | |
588 | csk->retry_timer.function != act_open_retry_timer) { | |
589 | csk->retry_timer.function = act_open_retry_timer; | |
590 | mod_timer(&csk->retry_timer, jiffies + HZ / 2); | |
591 | } else | |
592 | cxgbi_sock_fail_act_open(csk, | |
593 | act_open_rpl_status_to_errno(rpl->status)); | |
594 | ||
595 | spin_unlock_bh(&csk->lock); | |
596 | cxgbi_sock_put(csk); | |
597 | __kfree_skb(skb); | |
598 | return 0; | |
599 | } | |
600 | ||
601 | /* | |
602 | * Process PEER_CLOSE CPL messages: -> host | |
603 | * Handle peer FIN. | |
604 | */ | |
605 | static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
606 | { | |
607 | struct cxgbi_sock *csk = ctx; | |
608 | ||
609 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
610 | "csk 0x%p,%u,0x%lx,%u.\n", | |
611 | csk, csk->state, csk->flags, csk->tid); | |
612 | ||
613 | cxgbi_sock_rcv_peer_close(csk); | |
614 | __kfree_skb(skb); | |
615 | return 0; | |
616 | } | |
617 | ||
618 | /* | |
619 | * Process CLOSE_CONN_RPL CPL message: -> host | |
620 | * Process a peer ACK to our FIN. | |
621 | */ | |
622 | static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, | |
623 | void *ctx) | |
624 | { | |
625 | struct cxgbi_sock *csk = ctx; | |
626 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | |
627 | ||
628 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
629 | "csk 0x%p,%u,0x%lx,%u, snxt %u.\n", | |
630 | csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); | |
631 | ||
632 | cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); | |
633 | __kfree_skb(skb); | |
634 | return 0; | |
635 | } | |
636 | ||
637 | /* | |
638 | * Process ABORT_REQ_RSS CPL message: -> host | |
639 | * Process abort requests. If we are waiting for an ABORT_RPL we ignore this | |
640 | * request except that we need to reply to it. | |
641 | */ | |
642 | ||
643 | static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, | |
644 | int *need_rst) | |
645 | { | |
646 | switch (abort_reason) { | |
647 | case CPL_ERR_BAD_SYN: /* fall through */ | |
648 | case CPL_ERR_CONN_RESET: | |
0b3d8947 | 649 | return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; |
6f7efaab | 650 | case CPL_ERR_XMIT_TIMEDOUT: |
651 | case CPL_ERR_PERSIST_TIMEDOUT: | |
652 | case CPL_ERR_FINWAIT2_TIMEDOUT: | |
653 | case CPL_ERR_KEEPALIVE_TIMEDOUT: | |
654 | return -ETIMEDOUT; | |
655 | default: | |
656 | return -EIO; | |
657 | } | |
658 | } | |
659 | ||
660 | static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
661 | { | |
662 | const struct cpl_abort_req_rss *req = cplhdr(skb); | |
663 | struct cxgbi_sock *csk = ctx; | |
664 | int rst_status = CPL_ABORT_NO_RST; | |
665 | ||
666 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
667 | "csk 0x%p,%u,0x%lx,%u.\n", | |
668 | csk, csk->state, csk->flags, csk->tid); | |
669 | ||
670 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | |
671 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) { | |
672 | goto done; | |
673 | } | |
674 | ||
675 | cxgbi_sock_get(csk); | |
676 | spin_lock_bh(&csk->lock); | |
677 | ||
678 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { | |
679 | cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); | |
680 | cxgbi_sock_set_state(csk, CTP_ABORTING); | |
681 | goto out; | |
682 | } | |
683 | ||
684 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); | |
685 | send_abort_rpl(csk, rst_status); | |
686 | ||
687 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { | |
688 | csk->err = abort_status_to_errno(csk, req->status, &rst_status); | |
689 | cxgbi_sock_closed(csk); | |
690 | } | |
691 | ||
692 | out: | |
693 | spin_unlock_bh(&csk->lock); | |
694 | cxgbi_sock_put(csk); | |
695 | done: | |
696 | __kfree_skb(skb); | |
697 | return 0; | |
698 | } | |
699 | ||
700 | /* | |
701 | * Process ABORT_RPL_RSS CPL message: -> host | |
702 | * Process abort replies. We only process these messages if we anticipate | |
703 | * them as the coordination between SW and HW in this area is somewhat lacking | |
704 | * and sometimes we get ABORT_RPLs after we are done with the connection that | |
705 | * originated the ABORT_REQ. | |
706 | */ | |
707 | static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
708 | { | |
709 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | |
710 | struct cxgbi_sock *csk = ctx; | |
711 | ||
712 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
713 | "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", | |
714 | rpl->status, csk, csk ? csk->state : 0, | |
715 | csk ? csk->flags : 0UL); | |
716 | /* | |
717 | * Ignore replies to post-close aborts indicating that the abort was | |
718 | * requested too late. These connections are terminated when we get | |
719 | * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss | |
720 | * arrives the TID is either no longer used or it has been recycled. | |
721 | */ | |
722 | if (rpl->status == CPL_ERR_ABORT_FAILED) | |
723 | goto rel_skb; | |
724 | /* | |
725 | * Sometimes we've already closed the connection, e.g., a post-close | |
726 | * abort races with ABORT_REQ_RSS, the latter frees the connection | |
727 | * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, | |
728 | * but FW turns the ABORT_REQ into a regular one and so we get | |
729 | * ABORT_RPL_RSS with status 0 and no connection. | |
730 | */ | |
731 | if (csk) | |
732 | cxgbi_sock_rcv_abort_rpl(csk); | |
733 | rel_skb: | |
734 | __kfree_skb(skb); | |
735 | return 0; | |
736 | } | |
737 | ||
738 | /* | |
739 | * Process RX_ISCSI_HDR CPL message: -> host | |
740 | * Handle received PDUs, the payload could be DDP'ed. If not, the payload | |
741 | * follow after the bhs. | |
742 | */ | |
743 | static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) | |
744 | { | |
745 | struct cxgbi_sock *csk = ctx; | |
746 | struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); | |
747 | struct cpl_iscsi_hdr_norss data_cpl; | |
748 | struct cpl_rx_data_ddp_norss ddp_cpl; | |
749 | unsigned int hdr_len, data_len, status; | |
750 | unsigned int len; | |
751 | int err; | |
752 | ||
753 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
754 | "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n", | |
755 | csk, csk->state, csk->flags, csk->tid, skb, skb->len); | |
756 | ||
757 | spin_lock_bh(&csk->lock); | |
758 | ||
759 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { | |
760 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
761 | "csk 0x%p,%u,0x%lx,%u, bad state.\n", | |
762 | csk, csk->state, csk->flags, csk->tid); | |
763 | if (csk->state != CTP_ABORTING) | |
764 | goto abort_conn; | |
765 | else | |
766 | goto discard; | |
767 | } | |
768 | ||
769 | cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); | |
770 | cxgbi_skcb_flags(skb) = 0; | |
771 | ||
772 | skb_reset_transport_header(skb); | |
773 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); | |
774 | ||
775 | len = hdr_len = ntohs(hdr_cpl->len); | |
776 | /* msg coalesce is off or not enough data received */ | |
777 | if (skb->len <= hdr_len) { | |
778 | pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n", | |
779 | csk->cdev->ports[csk->port_id]->name, csk->tid, | |
780 | skb->len, hdr_len); | |
781 | goto abort_conn; | |
782 | } | |
783 | cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); | |
784 | ||
785 | err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, | |
786 | sizeof(ddp_cpl)); | |
787 | if (err < 0) { | |
788 | pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n", | |
789 | csk->cdev->ports[csk->port_id]->name, csk->tid, | |
790 | skb->len, sizeof(ddp_cpl), err); | |
791 | goto abort_conn; | |
792 | } | |
793 | ||
794 | cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); | |
795 | cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); | |
796 | cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); | |
797 | status = ntohl(ddp_cpl.ddp_status); | |
798 | ||
799 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
800 | "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n", | |
801 | csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); | |
802 | ||
803 | if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) | |
804 | cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); | |
805 | if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) | |
806 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); | |
807 | if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) | |
808 | cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); | |
809 | ||
810 | if (skb->len > (hdr_len + sizeof(ddp_cpl))) { | |
811 | err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); | |
812 | if (err < 0) { | |
813 | pr_err("%s: tid %u, cp %zu/%u failed %d.\n", | |
814 | csk->cdev->ports[csk->port_id]->name, | |
815 | csk->tid, sizeof(data_cpl), skb->len, err); | |
816 | goto abort_conn; | |
817 | } | |
818 | data_len = ntohs(data_cpl.len); | |
819 | log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX, | |
820 | "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n", | |
821 | skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); | |
822 | len += sizeof(data_cpl) + data_len; | |
823 | } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) | |
824 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); | |
825 | ||
826 | csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); | |
827 | __pskb_trim(skb, len); | |
828 | __skb_queue_tail(&csk->receive_queue, skb); | |
829 | cxgbi_conn_pdu_ready(csk); | |
830 | ||
831 | spin_unlock_bh(&csk->lock); | |
832 | return 0; | |
833 | ||
834 | abort_conn: | |
835 | send_abort_req(csk); | |
836 | discard: | |
837 | spin_unlock_bh(&csk->lock); | |
838 | __kfree_skb(skb); | |
839 | return 0; | |
840 | } | |
841 | ||
842 | /* | |
843 | * Process TX_DATA_ACK CPL messages: -> host | |
844 | * Process an acknowledgment of WR completion. Advance snd_una and send the | |
845 | * next batch of work requests from the write queue. | |
846 | */ | |
847 | static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
848 | { | |
849 | struct cxgbi_sock *csk = ctx; | |
850 | struct cpl_wr_ack *hdr = cplhdr(skb); | |
851 | ||
852 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
853 | "csk 0x%p,%u,0x%lx,%u, cr %u.\n", | |
854 | csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); | |
855 | ||
856 | cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); | |
857 | __kfree_skb(skb); | |
858 | return 0; | |
859 | } | |
860 | ||
861 | /* | |
862 | * for each connection, pre-allocate skbs needed for close/abort requests. So | |
863 | * that we can service the request right away. | |
864 | */ | |
865 | static int alloc_cpls(struct cxgbi_sock *csk) | |
866 | { | |
24d3f95a | 867 | csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, |
6f7efaab | 868 | GFP_KERNEL); |
869 | if (!csk->cpl_close) | |
870 | return -ENOMEM; | |
24d3f95a | 871 | csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, |
6f7efaab | 872 | GFP_KERNEL); |
873 | if (!csk->cpl_abort_req) | |
874 | goto free_cpl_skbs; | |
875 | ||
24d3f95a | 876 | csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, |
6f7efaab | 877 | GFP_KERNEL); |
878 | if (!csk->cpl_abort_rpl) | |
879 | goto free_cpl_skbs; | |
880 | ||
881 | return 0; | |
882 | ||
883 | free_cpl_skbs: | |
884 | cxgbi_sock_free_cpl_skbs(csk); | |
885 | return -ENOMEM; | |
886 | } | |
887 | ||
888 | /** | |
889 | * release_offload_resources - release offload resource | |
890 | * @c3cn: the offloaded iscsi tcp connection. | |
891 | * Release resources held by an offload connection (TID, L2T entry, etc.) | |
892 | */ | |
893 | static void l2t_put(struct cxgbi_sock *csk) | |
894 | { | |
895 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | |
896 | ||
897 | if (csk->l2t) { | |
e48f129c | 898 | l2t_release(t3dev, csk->l2t); |
6f7efaab | 899 | csk->l2t = NULL; |
900 | cxgbi_sock_put(csk); | |
901 | } | |
902 | } | |
903 | ||
904 | static void release_offload_resources(struct cxgbi_sock *csk) | |
905 | { | |
906 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | |
907 | ||
908 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
909 | "csk 0x%p,%u,0x%lx,%u.\n", | |
910 | csk, csk->state, csk->flags, csk->tid); | |
911 | ||
912 | csk->rss_qid = 0; | |
913 | cxgbi_sock_free_cpl_skbs(csk); | |
914 | ||
915 | if (csk->wr_cred != csk->wr_max_cred) { | |
916 | cxgbi_sock_purge_wr_queue(csk); | |
917 | cxgbi_sock_reset_wr_list(csk); | |
918 | } | |
919 | l2t_put(csk); | |
920 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) | |
921 | free_atid(csk); | |
922 | else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { | |
923 | cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); | |
924 | cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); | |
925 | cxgbi_sock_put(csk); | |
926 | } | |
927 | csk->dst = NULL; | |
928 | csk->cdev = NULL; | |
929 | } | |
930 | ||
0b3d8947 | 931 | static void update_address(struct cxgbi_hba *chba) |
932 | { | |
933 | if (chba->ipv4addr) { | |
934 | if (chba->vdev && | |
935 | chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) { | |
936 | cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr); | |
937 | cxgb3i_set_private_ipv4addr(chba->ndev, 0); | |
938 | pr_info("%s set %pI4.\n", | |
939 | chba->vdev->name, &chba->ipv4addr); | |
940 | } else if (chba->ipv4addr != | |
941 | cxgb3i_get_private_ipv4addr(chba->ndev)) { | |
942 | cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr); | |
943 | pr_info("%s set %pI4.\n", | |
944 | chba->ndev->name, &chba->ipv4addr); | |
945 | } | |
946 | } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) { | |
947 | if (chba->vdev) | |
948 | cxgb3i_set_private_ipv4addr(chba->vdev, 0); | |
949 | cxgb3i_set_private_ipv4addr(chba->ndev, 0); | |
950 | } | |
951 | } | |
952 | ||
6f7efaab | 953 | static int init_act_open(struct cxgbi_sock *csk) |
954 | { | |
955 | struct dst_entry *dst = csk->dst; | |
956 | struct cxgbi_device *cdev = csk->cdev; | |
957 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | |
958 | struct net_device *ndev = cdev->ports[csk->port_id]; | |
0b3d8947 | 959 | struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; |
6f7efaab | 960 | struct sk_buff *skb = NULL; |
961 | ||
962 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
963 | "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); | |
964 | ||
0b3d8947 | 965 | update_address(chba); |
966 | if (chba->ipv4addr) | |
967 | csk->saddr.sin_addr.s_addr = chba->ipv4addr; | |
968 | ||
6f7efaab | 969 | csk->rss_qid = 0; |
534cb283 DM |
970 | csk->l2t = t3_l2t_get(t3dev, dst, ndev, |
971 | &csk->daddr.sin_addr.s_addr); | |
6f7efaab | 972 | if (!csk->l2t) { |
973 | pr_err("NO l2t available.\n"); | |
974 | return -EINVAL; | |
975 | } | |
976 | cxgbi_sock_get(csk); | |
977 | ||
978 | csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); | |
979 | if (csk->atid < 0) { | |
980 | pr_err("NO atid available.\n"); | |
981 | goto rel_resource; | |
982 | } | |
983 | cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); | |
984 | cxgbi_sock_get(csk); | |
985 | ||
24d3f95a | 986 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); |
6f7efaab | 987 | if (!skb) |
988 | goto rel_resource; | |
989 | skb->sk = (struct sock *)csk; | |
990 | set_arp_failure_handler(skb, act_open_arp_failure); | |
991 | ||
992 | csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; | |
993 | csk->wr_una_cred = 0; | |
994 | csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); | |
995 | cxgbi_sock_reset_wr_list(csk); | |
996 | csk->err = 0; | |
997 | ||
0b3d8947 | 998 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
999 | "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n", | |
1000 | csk, csk->state, csk->flags, | |
1001 | &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), | |
1002 | &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); | |
1003 | ||
6f7efaab | 1004 | cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); |
1005 | send_act_open_req(csk, skb, csk->l2t); | |
1006 | return 0; | |
1007 | ||
1008 | rel_resource: | |
1009 | if (skb) | |
1010 | __kfree_skb(skb); | |
1011 | return -EINVAL; | |
1012 | } | |
1013 | ||
1014 | cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { | |
1015 | [CPL_ACT_ESTABLISH] = do_act_establish, | |
1016 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, | |
1017 | [CPL_PEER_CLOSE] = do_peer_close, | |
1018 | [CPL_ABORT_REQ_RSS] = do_abort_req, | |
1019 | [CPL_ABORT_RPL_RSS] = do_abort_rpl, | |
1020 | [CPL_CLOSE_CON_RPL] = do_close_con_rpl, | |
1021 | [CPL_TX_DMA_ACK] = do_wr_ack, | |
1022 | [CPL_ISCSI_HDR] = do_iscsi_hdr, | |
1023 | }; | |
1024 | ||
1025 | /** | |
1026 | * cxgb3i_ofld_init - allocate and initialize resources for each adapter found | |
1027 | * @cdev: cxgbi adapter | |
1028 | */ | |
1029 | int cxgb3i_ofld_init(struct cxgbi_device *cdev) | |
1030 | { | |
1031 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | |
1032 | struct adap_ports port; | |
1033 | struct ofld_page_info rx_page_info; | |
1034 | unsigned int wr_len; | |
1035 | int rc; | |
1036 | ||
1037 | if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 || | |
1038 | t3dev->ctl(t3dev, GET_PORTS, &port) < 0 || | |
1039 | t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { | |
1040 | pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev); | |
1041 | return -EINVAL; | |
1042 | } | |
1043 | ||
1044 | if (cxgb3i_max_connect > CXGBI_MAX_CONN) | |
1045 | cxgb3i_max_connect = CXGBI_MAX_CONN; | |
1046 | ||
1047 | rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base, | |
1048 | cxgb3i_max_connect); | |
1049 | if (rc < 0) | |
1050 | return rc; | |
1051 | ||
1052 | init_wr_tab(wr_len); | |
1053 | cdev->csk_release_offload_resources = release_offload_resources; | |
1054 | cdev->csk_push_tx_frames = push_tx_frames; | |
1055 | cdev->csk_send_abort_req = send_abort_req; | |
1056 | cdev->csk_send_close_req = send_close_req; | |
1057 | cdev->csk_send_rx_credits = send_rx_credits; | |
1058 | cdev->csk_alloc_cpls = alloc_cpls; | |
1059 | cdev->csk_init_act_open = init_act_open; | |
1060 | ||
1061 | pr_info("cdev 0x%p, offload up, added.\n", cdev); | |
1062 | return 0; | |
1063 | } | |
1064 | ||
1065 | /* | |
1066 | * functions to program the pagepod in h/w | |
1067 | */ | |
1068 | static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) | |
1069 | { | |
1070 | struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; | |
1071 | ||
1072 | memset(req, 0, sizeof(*req)); | |
1073 | ||
1074 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); | |
1075 | req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | | |
1076 | V_ULPTX_CMD(ULP_MEM_WRITE)); | |
1077 | req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | | |
1078 | V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); | |
1079 | } | |
1080 | ||
1081 | static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, | |
1082 | unsigned int idx, unsigned int npods, | |
1083 | struct cxgbi_gather_list *gl) | |
1084 | { | |
1085 | struct cxgbi_device *cdev = csk->cdev; | |
1086 | struct cxgbi_ddp_info *ddp = cdev->ddp; | |
1087 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | |
1088 | int i; | |
1089 | ||
1090 | log_debug(1 << CXGBI_DBG_DDP, | |
1091 | "csk 0x%p, idx %u, npods %u, gl 0x%p.\n", | |
1092 | csk, idx, npods, gl); | |
1093 | ||
1094 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | |
b8ce8b59 | 1095 | struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + |
1096 | PPOD_SIZE, 0, GFP_ATOMIC); | |
6f7efaab | 1097 | |
b8ce8b59 | 1098 | if (!skb) |
1099 | return -ENOMEM; | |
6f7efaab | 1100 | |
1101 | ulp_mem_io_set_hdr(skb, pm_addr); | |
1102 | cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + | |
1103 | sizeof(struct ulp_mem_io)), | |
1104 | hdr, gl, i * PPOD_PAGES_MAX); | |
1105 | skb->priority = CPL_PRIORITY_CONTROL; | |
1106 | cxgb3_ofld_send(cdev->lldev, skb); | |
1107 | } | |
1108 | return 0; | |
1109 | } | |
1110 | ||
1111 | static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, | |
1112 | unsigned int idx, unsigned int npods) | |
1113 | { | |
1114 | struct cxgbi_device *cdev = chba->cdev; | |
1115 | struct cxgbi_ddp_info *ddp = cdev->ddp; | |
1116 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | |
1117 | int i; | |
1118 | ||
1119 | log_debug(1 << CXGBI_DBG_DDP, | |
1120 | "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n", | |
1121 | cdev, idx, npods, tag); | |
1122 | ||
1123 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | |
b8ce8b59 | 1124 | struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + |
1125 | PPOD_SIZE, 0, GFP_ATOMIC); | |
6f7efaab | 1126 | |
1127 | if (!skb) { | |
b8ce8b59 | 1128 | pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n", |
6f7efaab | 1129 | tag, idx, i, npods); |
1130 | continue; | |
1131 | } | |
6f7efaab | 1132 | ulp_mem_io_set_hdr(skb, pm_addr); |
1133 | skb->priority = CPL_PRIORITY_CONTROL; | |
1134 | cxgb3_ofld_send(cdev->lldev, skb); | |
1135 | } | |
1136 | } | |
1137 | ||
6f7efaab | 1138 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, |
1139 | unsigned int tid, int pg_idx, bool reply) | |
1140 | { | |
24d3f95a | 1141 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
6f7efaab | 1142 | GFP_KERNEL); |
1143 | struct cpl_set_tcb_field *req; | |
1144 | u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; | |
1145 | ||
1146 | log_debug(1 << CXGBI_DBG_DDP, | |
1147 | "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); | |
1148 | if (!skb) | |
1149 | return -ENOMEM; | |
1150 | ||
1151 | /* set up ulp submode and page size */ | |
1152 | req = (struct cpl_set_tcb_field *)skb->head; | |
1153 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1154 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1155 | req->reply = V_NO_REPLY(reply ? 0 : 1); | |
1156 | req->cpu_idx = 0; | |
1157 | req->word = htons(31); | |
1158 | req->mask = cpu_to_be64(0xF0000000); | |
1159 | req->val = cpu_to_be64(val << 28); | |
1160 | skb->priority = CPL_PRIORITY_CONTROL; | |
1161 | ||
1162 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
1163 | return 0; | |
1164 | } | |
1165 | ||
1166 | /** | |
1167 | * cxgb3i_setup_conn_digest - setup conn. digest setting | |
1168 | * @csk: cxgb tcp socket | |
1169 | * @tid: connection id | |
1170 | * @hcrc: header digest enabled | |
1171 | * @dcrc: data digest enabled | |
1172 | * @reply: request reply from h/w | |
1173 | * set up the iscsi digest settings for a connection identified by tid | |
1174 | */ | |
1175 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |
1176 | int hcrc, int dcrc, int reply) | |
1177 | { | |
24d3f95a | 1178 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
6f7efaab | 1179 | GFP_KERNEL); |
1180 | struct cpl_set_tcb_field *req; | |
1181 | u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); | |
1182 | ||
1183 | log_debug(1 << CXGBI_DBG_DDP, | |
1184 | "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); | |
1185 | if (!skb) | |
1186 | return -ENOMEM; | |
1187 | ||
1188 | /* set up ulp submode and page size */ | |
1189 | req = (struct cpl_set_tcb_field *)skb->head; | |
1190 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1191 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1192 | req->reply = V_NO_REPLY(reply ? 0 : 1); | |
1193 | req->cpu_idx = 0; | |
1194 | req->word = htons(31); | |
1195 | req->mask = cpu_to_be64(0x0F000000); | |
1196 | req->val = cpu_to_be64(val << 24); | |
1197 | skb->priority = CPL_PRIORITY_CONTROL; | |
1198 | ||
1199 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
1200 | return 0; | |
1201 | } | |
1202 | ||
1203 | /** | |
1204 | * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource | |
1205 | * @cdev: cxgb3i adapter | |
1206 | * release all the resource held by the ddp pagepod manager for a given | |
1207 | * adapter if needed | |
1208 | */ | |
1209 | ||
1210 | static void t3_ddp_cleanup(struct cxgbi_device *cdev) | |
1211 | { | |
1212 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | |
1213 | ||
1214 | if (cxgbi_ddp_cleanup(cdev)) { | |
1215 | pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev); | |
1216 | tdev->ulp_iscsi = NULL; | |
1217 | } | |
1218 | } | |
1219 | ||
1220 | /** | |
1221 | * ddp_init - initialize the cxgb3 adapter's ddp resource | |
1222 | * @cdev: cxgb3i adapter | |
1223 | * initialize the ddp pagepod manager for a given adapter | |
1224 | */ | |
1225 | static int cxgb3i_ddp_init(struct cxgbi_device *cdev) | |
1226 | { | |
1227 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | |
1228 | struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; | |
1229 | struct ulp_iscsi_info uinfo; | |
1230 | unsigned int pgsz_factor[4]; | |
c682d602 | 1231 | int i, err; |
6f7efaab | 1232 | |
1233 | if (ddp) { | |
1234 | kref_get(&ddp->refcnt); | |
1235 | pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | |
1236 | tdev, tdev->ulp_iscsi); | |
1237 | cdev->ddp = ddp; | |
1238 | return -EALREADY; | |
1239 | } | |
1240 | ||
1241 | err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); | |
1242 | if (err < 0) { | |
1243 | pr_err("%s, failed to get iscsi param err=%d.\n", | |
1244 | tdev->name, err); | |
1245 | return err; | |
1246 | } | |
1247 | ||
1248 | err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit, | |
1249 | uinfo.max_txsz, uinfo.max_rxsz); | |
1250 | if (err < 0) | |
1251 | return err; | |
1252 | ||
1253 | ddp = cdev->ddp; | |
1254 | ||
1255 | uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; | |
1256 | cxgbi_ddp_page_size_factor(pgsz_factor); | |
c682d602 KX |
1257 | for (i = 0; i < 4; i++) |
1258 | uinfo.pgsz_factor[i] = pgsz_factor[i]; | |
6f7efaab | 1259 | uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); |
1260 | ||
1261 | err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); | |
1262 | if (err < 0) { | |
1263 | pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n", | |
1264 | tdev->name, err); | |
1265 | cxgbi_ddp_cleanup(cdev); | |
1266 | return err; | |
1267 | } | |
1268 | tdev->ulp_iscsi = ddp; | |
1269 | ||
6f7efaab | 1270 | cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; |
1271 | cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; | |
1272 | cdev->csk_ddp_set = ddp_set_map; | |
1273 | cdev->csk_ddp_clear = ddp_clear_map; | |
1274 | ||
1275 | pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " | |
1276 | "%u/%u.\n", | |
1277 | tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, | |
1278 | ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, | |
1279 | ddp->max_rxsz, uinfo.max_rxsz); | |
1280 | return 0; | |
1281 | } | |
1282 | ||
1283 | static void cxgb3i_dev_close(struct t3cdev *t3dev) | |
1284 | { | |
1285 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1286 | ||
1287 | if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) { | |
1288 | pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0); | |
1289 | return; | |
1290 | } | |
1291 | ||
1292 | cxgbi_device_unregister(cdev); | |
1293 | } | |
1294 | ||
1295 | /** | |
1296 | * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings | |
1297 | * @t3dev: t3cdev adapter | |
1298 | */ | |
1299 | static void cxgb3i_dev_open(struct t3cdev *t3dev) | |
1300 | { | |
1301 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1302 | struct adapter *adapter = tdev2adap(t3dev); | |
1303 | int i, err; | |
1304 | ||
1305 | if (cdev) { | |
1306 | pr_info("0x%p, updating.\n", cdev); | |
1307 | return; | |
1308 | } | |
1309 | ||
1310 | cdev = cxgbi_device_register(0, adapter->params.nports); | |
1311 | if (!cdev) { | |
1312 | pr_warn("device 0x%p register failed.\n", t3dev); | |
1313 | return; | |
1314 | } | |
1315 | ||
1316 | cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET; | |
1317 | cdev->lldev = t3dev; | |
1318 | cdev->pdev = adapter->pdev; | |
1319 | cdev->ports = adapter->port; | |
1320 | cdev->nports = adapter->params.nports; | |
1321 | cdev->mtus = adapter->params.mtus; | |
1322 | cdev->nmtus = NMTUS; | |
1323 | cdev->snd_win = cxgb3i_snd_win; | |
1324 | cdev->rcv_win = cxgb3i_rcv_win; | |
1325 | cdev->rx_credit_thres = cxgb3i_rx_credit_thres; | |
1326 | cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; | |
1327 | cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); | |
1328 | cdev->dev_ddp_cleanup = t3_ddp_cleanup; | |
1329 | cdev->itp = &cxgb3i_iscsi_transport; | |
1330 | ||
1331 | err = cxgb3i_ddp_init(cdev); | |
1332 | if (err) { | |
1333 | pr_info("0x%p ddp init failed\n", cdev); | |
1334 | goto err_out; | |
1335 | } | |
1336 | ||
1337 | err = cxgb3i_ofld_init(cdev); | |
1338 | if (err) { | |
1339 | pr_info("0x%p offload init failed\n", cdev); | |
1340 | goto err_out; | |
1341 | } | |
1342 | ||
1343 | err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN, | |
1344 | &cxgb3i_host_template, cxgb3i_stt); | |
1345 | if (err) | |
1346 | goto err_out; | |
1347 | ||
1348 | for (i = 0; i < cdev->nports; i++) | |
1349 | cdev->hbas[i]->ipv4addr = | |
1350 | cxgb3i_get_private_ipv4addr(cdev->ports[i]); | |
1351 | ||
1352 | pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", | |
1353 | cdev, cdev ? cdev->flags : 0, t3dev, err); | |
1354 | return; | |
1355 | ||
1356 | err_out: | |
1357 | cxgbi_device_unregister(cdev); | |
1358 | } | |
1359 | ||
1360 | static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) | |
1361 | { | |
1362 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1363 | ||
1364 | log_debug(1 << CXGBI_DBG_TOE, | |
1365 | "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n", | |
1366 | t3dev, cdev, event, port); | |
1367 | if (!cdev) | |
1368 | return; | |
1369 | ||
1370 | switch (event) { | |
1371 | case OFFLOAD_STATUS_DOWN: | |
1372 | cdev->flags |= CXGBI_FLAG_ADAPTER_RESET; | |
1373 | break; | |
1374 | case OFFLOAD_STATUS_UP: | |
1375 | cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET; | |
1376 | break; | |
1377 | } | |
1378 | } | |
1379 | ||
1380 | /** | |
1381 | * cxgb3i_init_module - module init entry point | |
1382 | * | |
1383 | * initialize any driver wide global data structures and register itself | |
1384 | * with the cxgb3 module | |
1385 | */ | |
1386 | static int __init cxgb3i_init_module(void) | |
1387 | { | |
1388 | int rc; | |
1389 | ||
1390 | printk(KERN_INFO "%s", version); | |
1391 | ||
1392 | rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt); | |
1393 | if (rc < 0) | |
1394 | return rc; | |
1395 | ||
1396 | cxgb3_register_client(&t3_client); | |
1397 | return 0; | |
1398 | } | |
1399 | ||
1400 | /** | |
1401 | * cxgb3i_exit_module - module cleanup/exit entry point | |
1402 | * | |
1403 | * go through the driver hba list and for each hba, release any resource held. | |
1404 | * and unregisters iscsi transport and the cxgb3 module | |
1405 | */ | |
1406 | static void __exit cxgb3i_exit_module(void) | |
1407 | { | |
1408 | cxgb3_unregister_client(&t3_client); | |
1409 | cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3); | |
1410 | cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt); | |
1411 | } | |
1412 | ||
1413 | module_init(cxgb3i_init_module); | |
1414 | module_exit(cxgb3i_exit_module); |