]>
Commit | Line | Data |
---|---|---|
853e2bd2 BG |
1 | /* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver. |
2 | * Handles operations such as session offload/upload etc, and manages | |
3 | * session resources such as connection id and qp resources. | |
4 | * | |
5 | * Copyright (c) 2008 - 2010 Broadcom Corporation | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation. | |
10 | * | |
11 | * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) | |
12 | */ | |
13 | ||
14 | #include "bnx2fc.h" | |
15 | static void bnx2fc_upld_timer(unsigned long data); | |
16 | static void bnx2fc_ofld_timer(unsigned long data); | |
17 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | |
18 | struct fcoe_port *port, | |
19 | struct fc_rport_priv *rdata); | |
20 | static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, | |
21 | struct bnx2fc_rport *tgt); | |
22 | static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |
23 | struct bnx2fc_rport *tgt); | |
24 | static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, | |
25 | struct bnx2fc_rport *tgt); | |
26 | static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); | |
27 | ||
28 | static void bnx2fc_upld_timer(unsigned long data) | |
29 | { | |
30 | ||
31 | struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data; | |
32 | ||
33 | BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); | |
34 | /* fake upload completion */ | |
35 | clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); | |
36 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
37 | wake_up_interruptible(&tgt->upld_wait); | |
38 | } | |
39 | ||
40 | static void bnx2fc_ofld_timer(unsigned long data) | |
41 | { | |
42 | ||
43 | struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data; | |
44 | ||
45 | BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n"); | |
46 | /* NOTE: This function should never be called, as | |
47 | * offload should never timeout | |
48 | */ | |
49 | /* | |
50 | * If the timer has expired, this session is dead | |
51 | * Clear offloaded flag and logout of this device. | |
52 | * Since OFFLOADED flag is cleared, this case | |
53 | * will be considered as offload error and the | |
54 | * port will be logged off, and conn_id, session | |
55 | * resources are freed up in bnx2fc_offload_session | |
56 | */ | |
57 | clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); | |
58 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); | |
59 | wake_up_interruptible(&tgt->ofld_wait); | |
60 | } | |
61 | ||
62 | static void bnx2fc_offload_session(struct fcoe_port *port, | |
63 | struct bnx2fc_rport *tgt, | |
64 | struct fc_rport_priv *rdata) | |
65 | { | |
66 | struct fc_lport *lport = rdata->local_port; | |
67 | struct fc_rport *rport = rdata->rport; | |
68 | struct bnx2fc_hba *hba = port->priv; | |
69 | int rval; | |
70 | int i = 0; | |
71 | ||
72 | /* Initialize bnx2fc_rport */ | |
73 | /* NOTE: tgt is already bzero'd */ | |
74 | rval = bnx2fc_init_tgt(tgt, port, rdata); | |
75 | if (rval) { | |
76 | printk(KERN_ERR PFX "Failed to allocate conn id for " | |
77 | "port_id (%6x)\n", rport->port_id); | |
78 | goto ofld_err; | |
79 | } | |
80 | ||
81 | /* Allocate session resources */ | |
82 | rval = bnx2fc_alloc_session_resc(hba, tgt); | |
83 | if (rval) { | |
84 | printk(KERN_ERR PFX "Failed to allocate resources\n"); | |
85 | goto ofld_err; | |
86 | } | |
87 | ||
88 | /* | |
89 | * Initialize FCoE session offload process. | |
90 | * Upon completion of offload process add | |
91 | * rport to list of rports | |
92 | */ | |
93 | retry_ofld: | |
94 | clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); | |
95 | rval = bnx2fc_send_session_ofld_req(port, tgt); | |
96 | if (rval) { | |
97 | printk(KERN_ERR PFX "ofld_req failed\n"); | |
98 | goto ofld_err; | |
99 | } | |
100 | ||
101 | /* | |
102 | * wait for the session is offloaded and enabled. 3 Secs | |
103 | * should be ample time for this process to complete. | |
104 | */ | |
105 | setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt); | |
106 | mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT); | |
107 | ||
108 | wait_event_interruptible(tgt->ofld_wait, | |
109 | (test_bit( | |
110 | BNX2FC_FLAG_OFLD_REQ_CMPL, | |
111 | &tgt->flags))); | |
112 | if (signal_pending(current)) | |
113 | flush_signals(current); | |
114 | ||
115 | del_timer_sync(&tgt->ofld_timer); | |
116 | ||
117 | if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { | |
118 | if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, | |
119 | &tgt->flags)) { | |
120 | BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " | |
121 | "retry ofld..%d\n", i++); | |
122 | msleep_interruptible(1000); | |
123 | if (i > 3) { | |
124 | i = 0; | |
125 | goto ofld_err; | |
126 | } | |
127 | goto retry_ofld; | |
128 | } | |
129 | goto ofld_err; | |
130 | } | |
131 | if (bnx2fc_map_doorbell(tgt)) { | |
132 | printk(KERN_ERR PFX "map doorbell failed - no mem\n"); | |
133 | /* upload will take care of cleaning up sess resc */ | |
134 | lport->tt.rport_logoff(rdata); | |
135 | } | |
619c5cb6 VZ |
136 | /* Arm CQ */ |
137 | bnx2fc_arm_cq(tgt); | |
853e2bd2 BG |
138 | return; |
139 | ||
140 | ofld_err: | |
141 | /* couldn't offload the session. log off from this rport */ | |
142 | BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); | |
143 | lport->tt.rport_logoff(rdata); | |
144 | /* Free session resources */ | |
145 | bnx2fc_free_session_resc(hba, tgt); | |
146 | if (tgt->fcoe_conn_id != -1) | |
147 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); | |
148 | } | |
149 | ||
150 | void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) | |
151 | { | |
152 | struct bnx2fc_cmd *io_req; | |
153 | struct list_head *list; | |
154 | struct list_head *tmp; | |
155 | int rc; | |
156 | int i = 0; | |
157 | BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", | |
158 | tgt->num_active_ios.counter); | |
159 | ||
160 | spin_lock_bh(&tgt->tgt_lock); | |
161 | tgt->flush_in_prog = 1; | |
162 | ||
163 | list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { | |
164 | i++; | |
165 | io_req = (struct bnx2fc_cmd *)list; | |
166 | list_del_init(&io_req->link); | |
167 | io_req->on_active_queue = 0; | |
168 | BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); | |
169 | ||
170 | if (cancel_delayed_work(&io_req->timeout_work)) { | |
171 | if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, | |
172 | &io_req->req_flags)) { | |
173 | /* Handle eh_abort timeout */ | |
174 | BNX2FC_IO_DBG(io_req, "eh_abort for IO " | |
175 | "cleaned up\n"); | |
176 | complete(&io_req->tm_done); | |
177 | } | |
178 | kref_put(&io_req->refcount, | |
179 | bnx2fc_cmd_release); /* drop timer hold */ | |
180 | } | |
181 | ||
182 | set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); | |
183 | set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); | |
184 | rc = bnx2fc_initiate_cleanup(io_req); | |
185 | BUG_ON(rc); | |
186 | } | |
187 | ||
188 | list_for_each_safe(list, tmp, &tgt->els_queue) { | |
189 | i++; | |
190 | io_req = (struct bnx2fc_cmd *)list; | |
191 | list_del_init(&io_req->link); | |
192 | io_req->on_active_queue = 0; | |
193 | ||
194 | BNX2FC_IO_DBG(io_req, "els_queue cleanup\n"); | |
195 | ||
196 | if (cancel_delayed_work(&io_req->timeout_work)) | |
197 | kref_put(&io_req->refcount, | |
198 | bnx2fc_cmd_release); /* drop timer hold */ | |
199 | ||
200 | if ((io_req->cb_func) && (io_req->cb_arg)) { | |
201 | io_req->cb_func(io_req->cb_arg); | |
202 | io_req->cb_arg = NULL; | |
203 | } | |
204 | ||
205 | rc = bnx2fc_initiate_cleanup(io_req); | |
206 | BUG_ON(rc); | |
207 | } | |
208 | ||
209 | list_for_each_safe(list, tmp, &tgt->io_retire_queue) { | |
210 | i++; | |
211 | io_req = (struct bnx2fc_cmd *)list; | |
212 | list_del_init(&io_req->link); | |
213 | ||
214 | BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); | |
215 | ||
216 | if (cancel_delayed_work(&io_req->timeout_work)) | |
217 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
218 | ||
219 | clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); | |
220 | } | |
221 | ||
222 | BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i); | |
223 | i = 0; | |
224 | spin_unlock_bh(&tgt->tgt_lock); | |
225 | /* wait for active_ios to go to 0 */ | |
226 | while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT)) | |
227 | msleep(25); | |
228 | if (tgt->num_active_ios.counter != 0) | |
229 | printk(KERN_ERR PFX "CLEANUP on port 0x%x:" | |
230 | " active_ios = %d\n", | |
231 | tgt->rdata->ids.port_id, tgt->num_active_ios.counter); | |
232 | spin_lock_bh(&tgt->tgt_lock); | |
233 | tgt->flush_in_prog = 0; | |
234 | spin_unlock_bh(&tgt->tgt_lock); | |
235 | } | |
236 | ||
237 | static void bnx2fc_upload_session(struct fcoe_port *port, | |
238 | struct bnx2fc_rport *tgt) | |
239 | { | |
240 | struct bnx2fc_hba *hba = port->priv; | |
241 | ||
242 | BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", | |
243 | tgt->num_active_ios.counter); | |
244 | ||
245 | /* | |
246 | * Called with hba->hba_mutex held. | |
247 | * This is a blocking call | |
248 | */ | |
249 | clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
250 | bnx2fc_send_session_disable_req(port, tgt); | |
251 | ||
252 | /* | |
253 | * wait for upload to complete. 3 Secs | |
254 | * should be sufficient time for this process to complete. | |
255 | */ | |
256 | setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt); | |
257 | mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); | |
258 | ||
259 | BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); | |
260 | wait_event_interruptible(tgt->upld_wait, | |
261 | (test_bit( | |
262 | BNX2FC_FLAG_UPLD_REQ_COMPL, | |
263 | &tgt->flags))); | |
264 | ||
265 | if (signal_pending(current)) | |
266 | flush_signals(current); | |
267 | ||
268 | del_timer_sync(&tgt->upld_timer); | |
269 | ||
270 | /* | |
271 | * traverse thru the active_q and tmf_q and cleanup | |
272 | * IOs in these lists | |
273 | */ | |
274 | BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n", | |
275 | tgt->flags); | |
276 | bnx2fc_flush_active_ios(tgt); | |
277 | ||
278 | /* Issue destroy KWQE */ | |
279 | if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) { | |
280 | BNX2FC_TGT_DBG(tgt, "send destroy req\n"); | |
281 | clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
282 | bnx2fc_send_session_destroy_req(hba, tgt); | |
283 | ||
284 | /* wait for destroy to complete */ | |
285 | setup_timer(&tgt->upld_timer, | |
286 | bnx2fc_upld_timer, (unsigned long)tgt); | |
287 | mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); | |
288 | ||
289 | wait_event_interruptible(tgt->upld_wait, | |
290 | (test_bit( | |
291 | BNX2FC_FLAG_UPLD_REQ_COMPL, | |
292 | &tgt->flags))); | |
293 | ||
294 | if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) | |
295 | printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); | |
296 | ||
297 | BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", | |
298 | tgt->flags); | |
299 | if (signal_pending(current)) | |
300 | flush_signals(current); | |
301 | ||
302 | del_timer_sync(&tgt->upld_timer); | |
303 | ||
304 | } else | |
305 | printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" | |
306 | " not sent to FW\n"); | |
307 | ||
308 | /* Free session resources */ | |
853e2bd2 BG |
309 | bnx2fc_free_session_resc(hba, tgt); |
310 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); | |
853e2bd2 BG |
311 | } |
312 | ||
313 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | |
314 | struct fcoe_port *port, | |
315 | struct fc_rport_priv *rdata) | |
316 | { | |
317 | ||
318 | struct fc_rport *rport = rdata->rport; | |
319 | struct bnx2fc_hba *hba = port->priv; | |
619c5cb6 VZ |
320 | struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; |
321 | struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; | |
853e2bd2 BG |
322 | |
323 | tgt->rport = rport; | |
324 | tgt->rdata = rdata; | |
325 | tgt->port = port; | |
326 | ||
327 | if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { | |
328 | BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n"); | |
329 | tgt->fcoe_conn_id = -1; | |
330 | return -1; | |
331 | } | |
332 | ||
333 | tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); | |
334 | if (tgt->fcoe_conn_id == -1) | |
335 | return -1; | |
336 | ||
337 | BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id); | |
338 | ||
339 | tgt->max_sqes = BNX2FC_SQ_WQES_MAX; | |
340 | tgt->max_rqes = BNX2FC_RQ_WQES_MAX; | |
341 | tgt->max_cqes = BNX2FC_CQ_WQES_MAX; | |
619c5cb6 | 342 | atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX); |
853e2bd2 BG |
343 | |
344 | /* Initialize the toggle bit */ | |
345 | tgt->sq_curr_toggle_bit = 1; | |
346 | tgt->cq_curr_toggle_bit = 1; | |
347 | tgt->sq_prod_idx = 0; | |
348 | tgt->cq_cons_idx = 0; | |
349 | tgt->rq_prod_idx = 0x8000; | |
350 | tgt->rq_cons_idx = 0; | |
351 | atomic_set(&tgt->num_active_ios, 0); | |
352 | ||
619c5cb6 VZ |
353 | /* initialize sq doorbell */ |
354 | sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; | |
355 | sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << | |
356 | B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; | |
357 | /* initialize rx doorbell */ | |
358 | rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) | | |
359 | (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) | | |
360 | (B577XX_FCOE_CONNECTION_TYPE << | |
361 | B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT)); | |
362 | rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) | | |
363 | (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT); | |
853e2bd2 BG |
364 | |
365 | spin_lock_init(&tgt->tgt_lock); | |
366 | spin_lock_init(&tgt->cq_lock); | |
367 | ||
368 | /* Initialize active_cmd_queue list */ | |
369 | INIT_LIST_HEAD(&tgt->active_cmd_queue); | |
370 | ||
371 | /* Initialize IO retire queue */ | |
372 | INIT_LIST_HEAD(&tgt->io_retire_queue); | |
373 | ||
374 | INIT_LIST_HEAD(&tgt->els_queue); | |
375 | ||
376 | /* Initialize active_tm_queue list */ | |
377 | INIT_LIST_HEAD(&tgt->active_tm_queue); | |
378 | ||
379 | init_waitqueue_head(&tgt->ofld_wait); | |
380 | init_waitqueue_head(&tgt->upld_wait); | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | /** | |
386 | * This event_callback is called after successful completion of libfc | |
387 | * initiated target login. bnx2fc can proceed with initiating the session | |
388 | * establishment. | |
389 | */ | |
390 | void bnx2fc_rport_event_handler(struct fc_lport *lport, | |
391 | struct fc_rport_priv *rdata, | |
392 | enum fc_rport_event event) | |
393 | { | |
394 | struct fcoe_port *port = lport_priv(lport); | |
395 | struct bnx2fc_hba *hba = port->priv; | |
396 | struct fc_rport *rport = rdata->rport; | |
397 | struct fc_rport_libfc_priv *rp; | |
398 | struct bnx2fc_rport *tgt; | |
399 | u32 port_id; | |
400 | ||
401 | BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n", | |
402 | event, rdata->ids.port_id); | |
403 | switch (event) { | |
404 | case RPORT_EV_READY: | |
405 | if (!rport) { | |
b2a554ff | 406 | printk(KERN_ERR PFX "rport is NULL: ERROR!\n"); |
853e2bd2 BG |
407 | break; |
408 | } | |
409 | ||
410 | rp = rport->dd_data; | |
411 | if (rport->port_id == FC_FID_DIR_SERV) { | |
412 | /* | |
25985edc | 413 | * bnx2fc_rport structure doesn't exist for |
853e2bd2 BG |
414 | * directory server. |
415 | * We should not come here, as lport will | |
416 | * take care of fabric login | |
417 | */ | |
b2a554ff | 418 | printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n", |
853e2bd2 BG |
419 | rdata->ids.port_id); |
420 | break; | |
421 | } | |
422 | ||
423 | if (rdata->spp_type != FC_TYPE_FCP) { | |
424 | BNX2FC_HBA_DBG(lport, "not FCP type target." | |
425 | " not offloading\n"); | |
426 | break; | |
427 | } | |
428 | if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { | |
429 | BNX2FC_HBA_DBG(lport, "not FCP_TARGET" | |
430 | " not offloading\n"); | |
431 | break; | |
432 | } | |
433 | ||
434 | /* | |
435 | * Offlaod process is protected with hba mutex. | |
436 | * Use the same mutex_lock for upload process too | |
437 | */ | |
438 | mutex_lock(&hba->hba_mutex); | |
439 | tgt = (struct bnx2fc_rport *)&rp[1]; | |
440 | ||
441 | /* This can happen when ADISC finds the same target */ | |
442 | if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { | |
443 | BNX2FC_TGT_DBG(tgt, "already offloaded\n"); | |
444 | mutex_unlock(&hba->hba_mutex); | |
445 | return; | |
446 | } | |
447 | ||
448 | /* | |
449 | * Offload the session. This is a blocking call, and will | |
450 | * wait until the session is offloaded. | |
451 | */ | |
452 | bnx2fc_offload_session(port, tgt, rdata); | |
453 | ||
454 | BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", | |
455 | hba->num_ofld_sess); | |
456 | ||
457 | if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { | |
458 | /* | |
459 | * Session is offloaded and enabled. Map | |
460 | * doorbell register for this target | |
461 | */ | |
462 | BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); | |
463 | /* This counter is protected with hba mutex */ | |
464 | hba->num_ofld_sess++; | |
465 | ||
466 | set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); | |
467 | } else { | |
468 | /* | |
469 | * Offload or enable would have failed. | |
470 | * In offload/enable completion path, the | |
471 | * rport would have already been removed | |
472 | */ | |
473 | BNX2FC_TGT_DBG(tgt, "Port is being logged off as " | |
474 | "offloaded flag not set\n"); | |
475 | } | |
476 | mutex_unlock(&hba->hba_mutex); | |
477 | break; | |
478 | case RPORT_EV_LOGO: | |
479 | case RPORT_EV_FAILED: | |
480 | case RPORT_EV_STOP: | |
481 | port_id = rdata->ids.port_id; | |
482 | if (port_id == FC_FID_DIR_SERV) | |
483 | break; | |
484 | ||
485 | if (!rport) { | |
b2a554ff | 486 | printk(KERN_INFO PFX "%x - rport not created Yet!!\n", |
853e2bd2 BG |
487 | port_id); |
488 | break; | |
489 | } | |
490 | rp = rport->dd_data; | |
491 | mutex_lock(&hba->hba_mutex); | |
492 | /* | |
493 | * Perform session upload. Note that rdata->peers is already | |
494 | * removed from disc->rports list before we get this event. | |
495 | */ | |
496 | tgt = (struct bnx2fc_rport *)&rp[1]; | |
497 | ||
498 | if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { | |
499 | mutex_unlock(&hba->hba_mutex); | |
500 | break; | |
501 | } | |
502 | clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); | |
503 | ||
504 | bnx2fc_upload_session(port, tgt); | |
505 | hba->num_ofld_sess--; | |
506 | BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n", | |
507 | hba->num_ofld_sess); | |
508 | /* | |
509 | * Try to wake up the linkdown wait thread. If num_ofld_sess | |
510 | * is 0, the waiting therad wakes up | |
511 | */ | |
512 | if ((hba->wait_for_link_down) && | |
513 | (hba->num_ofld_sess == 0)) { | |
514 | wake_up_interruptible(&hba->shutdown_wait); | |
515 | } | |
516 | if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) { | |
517 | printk(KERN_ERR PFX "Relogin to the tgt\n"); | |
518 | mutex_lock(&lport->disc.disc_mutex); | |
519 | lport->tt.rport_login(rdata); | |
520 | mutex_unlock(&lport->disc.disc_mutex); | |
521 | } | |
522 | mutex_unlock(&hba->hba_mutex); | |
523 | ||
524 | break; | |
525 | ||
526 | case RPORT_EV_NONE: | |
527 | break; | |
528 | } | |
529 | } | |
530 | ||
531 | /** | |
532 | * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id | |
533 | * | |
534 | * @port: fcoe_port struct to lookup the target port on | |
535 | * @port_id: The remote port ID to look up | |
536 | */ | |
537 | struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, | |
538 | u32 port_id) | |
539 | { | |
540 | struct bnx2fc_hba *hba = port->priv; | |
541 | struct bnx2fc_rport *tgt; | |
542 | struct fc_rport_priv *rdata; | |
543 | int i; | |
544 | ||
545 | for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { | |
546 | tgt = hba->tgt_ofld_list[i]; | |
547 | if ((tgt) && (tgt->port == port)) { | |
548 | rdata = tgt->rdata; | |
549 | if (rdata->ids.port_id == port_id) { | |
550 | if (rdata->rp_state != RPORT_ST_DELETE) { | |
551 | BNX2FC_TGT_DBG(tgt, "rport " | |
552 | "obtained\n"); | |
553 | return tgt; | |
554 | } else { | |
555 | printk(KERN_ERR PFX "rport 0x%x " | |
556 | "is in DELETED state\n", | |
557 | rdata->ids.port_id); | |
558 | return NULL; | |
559 | } | |
560 | } | |
561 | } | |
562 | } | |
563 | return NULL; | |
564 | } | |
565 | ||
566 | ||
567 | /** | |
568 | * bnx2fc_alloc_conn_id - allocates FCOE Connection id | |
569 | * | |
570 | * @hba: pointer to adapter structure | |
571 | * @tgt: pointer to bnx2fc_rport structure | |
572 | */ | |
573 | static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, | |
574 | struct bnx2fc_rport *tgt) | |
575 | { | |
576 | u32 conn_id, next; | |
577 | ||
578 | /* called with hba mutex held */ | |
579 | ||
580 | /* | |
581 | * tgt_ofld_list access is synchronized using | |
582 | * both hba mutex and hba lock. Atleast hba mutex or | |
583 | * hba lock needs to be held for read access. | |
584 | */ | |
585 | ||
586 | spin_lock_bh(&hba->hba_lock); | |
587 | next = hba->next_conn_id; | |
588 | conn_id = hba->next_conn_id++; | |
589 | if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) | |
590 | hba->next_conn_id = 0; | |
591 | ||
592 | while (hba->tgt_ofld_list[conn_id] != NULL) { | |
593 | conn_id++; | |
594 | if (conn_id == BNX2FC_NUM_MAX_SESS) | |
595 | conn_id = 0; | |
596 | ||
597 | if (conn_id == next) { | |
598 | /* No free conn_ids are available */ | |
599 | spin_unlock_bh(&hba->hba_lock); | |
600 | return -1; | |
601 | } | |
602 | } | |
603 | hba->tgt_ofld_list[conn_id] = tgt; | |
604 | tgt->fcoe_conn_id = conn_id; | |
605 | spin_unlock_bh(&hba->hba_lock); | |
606 | return conn_id; | |
607 | } | |
608 | ||
609 | static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) | |
610 | { | |
611 | /* called with hba mutex held */ | |
612 | spin_lock_bh(&hba->hba_lock); | |
613 | hba->tgt_ofld_list[conn_id] = NULL; | |
614 | hba->next_conn_id = conn_id; | |
615 | spin_unlock_bh(&hba->hba_lock); | |
616 | } | |
617 | ||
618 | /** | |
619 | *bnx2fc_alloc_session_resc - Allocate qp resources for the session | |
620 | * | |
621 | */ | |
622 | static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |
623 | struct bnx2fc_rport *tgt) | |
624 | { | |
625 | dma_addr_t page; | |
626 | int num_pages; | |
627 | u32 *pbl; | |
628 | ||
629 | /* Allocate and map SQ */ | |
630 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; | |
631 | tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | |
632 | ||
633 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | |
634 | &tgt->sq_dma, GFP_KERNEL); | |
635 | if (!tgt->sq) { | |
b2a554ff | 636 | printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", |
853e2bd2 BG |
637 | tgt->sq_mem_size); |
638 | goto mem_alloc_failure; | |
639 | } | |
640 | memset(tgt->sq, 0, tgt->sq_mem_size); | |
641 | ||
642 | /* Allocate and map CQ */ | |
643 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; | |
644 | tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | |
645 | ||
646 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | |
647 | &tgt->cq_dma, GFP_KERNEL); | |
648 | if (!tgt->cq) { | |
b2a554ff | 649 | printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", |
853e2bd2 BG |
650 | tgt->cq_mem_size); |
651 | goto mem_alloc_failure; | |
652 | } | |
653 | memset(tgt->cq, 0, tgt->cq_mem_size); | |
654 | ||
655 | /* Allocate and map RQ and RQ PBL */ | |
656 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; | |
657 | tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | |
658 | ||
659 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, | |
660 | &tgt->rq_dma, GFP_KERNEL); | |
661 | if (!tgt->rq) { | |
b2a554ff | 662 | printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", |
853e2bd2 BG |
663 | tgt->rq_mem_size); |
664 | goto mem_alloc_failure; | |
665 | } | |
666 | memset(tgt->rq, 0, tgt->rq_mem_size); | |
667 | ||
668 | tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); | |
669 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | |
670 | ||
671 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, | |
672 | &tgt->rq_pbl_dma, GFP_KERNEL); | |
673 | if (!tgt->rq_pbl) { | |
b2a554ff | 674 | printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", |
853e2bd2 BG |
675 | tgt->rq_pbl_size); |
676 | goto mem_alloc_failure; | |
677 | } | |
678 | ||
679 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); | |
680 | num_pages = tgt->rq_mem_size / PAGE_SIZE; | |
681 | page = tgt->rq_dma; | |
682 | pbl = (u32 *)tgt->rq_pbl; | |
683 | ||
684 | while (num_pages--) { | |
685 | *pbl = (u32)page; | |
686 | pbl++; | |
687 | *pbl = (u32)((u64)page >> 32); | |
688 | pbl++; | |
689 | page += PAGE_SIZE; | |
690 | } | |
691 | ||
692 | /* Allocate and map XFERQ */ | |
693 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; | |
694 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & | |
695 | PAGE_MASK; | |
696 | ||
697 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, | |
698 | &tgt->xferq_dma, GFP_KERNEL); | |
699 | if (!tgt->xferq) { | |
b2a554ff | 700 | printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", |
853e2bd2 BG |
701 | tgt->xferq_mem_size); |
702 | goto mem_alloc_failure; | |
703 | } | |
704 | memset(tgt->xferq, 0, tgt->xferq_mem_size); | |
705 | ||
706 | /* Allocate and map CONFQ & CONFQ PBL */ | |
707 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; | |
708 | tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & | |
709 | PAGE_MASK; | |
710 | ||
711 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, | |
712 | &tgt->confq_dma, GFP_KERNEL); | |
713 | if (!tgt->confq) { | |
b2a554ff | 714 | printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", |
853e2bd2 BG |
715 | tgt->confq_mem_size); |
716 | goto mem_alloc_failure; | |
717 | } | |
718 | memset(tgt->confq, 0, tgt->confq_mem_size); | |
719 | ||
720 | tgt->confq_pbl_size = | |
721 | (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); | |
722 | tgt->confq_pbl_size = | |
723 | (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | |
724 | ||
725 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, | |
726 | tgt->confq_pbl_size, | |
727 | &tgt->confq_pbl_dma, GFP_KERNEL); | |
728 | if (!tgt->confq_pbl) { | |
b2a554ff | 729 | printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", |
853e2bd2 BG |
730 | tgt->confq_pbl_size); |
731 | goto mem_alloc_failure; | |
732 | } | |
733 | ||
734 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); | |
735 | num_pages = tgt->confq_mem_size / PAGE_SIZE; | |
736 | page = tgt->confq_dma; | |
737 | pbl = (u32 *)tgt->confq_pbl; | |
738 | ||
739 | while (num_pages--) { | |
740 | *pbl = (u32)page; | |
741 | pbl++; | |
742 | *pbl = (u32)((u64)page >> 32); | |
743 | pbl++; | |
744 | page += PAGE_SIZE; | |
745 | } | |
746 | ||
747 | /* Allocate and map ConnDB */ | |
748 | tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); | |
749 | ||
750 | tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, | |
751 | tgt->conn_db_mem_size, | |
752 | &tgt->conn_db_dma, GFP_KERNEL); | |
753 | if (!tgt->conn_db) { | |
b2a554ff | 754 | printk(KERN_ERR PFX "unable to allocate conn_db %d\n", |
853e2bd2 BG |
755 | tgt->conn_db_mem_size); |
756 | goto mem_alloc_failure; | |
757 | } | |
758 | memset(tgt->conn_db, 0, tgt->conn_db_mem_size); | |
759 | ||
760 | ||
761 | /* Allocate and map LCQ */ | |
762 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; | |
763 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & | |
764 | PAGE_MASK; | |
765 | ||
766 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | |
767 | &tgt->lcq_dma, GFP_KERNEL); | |
768 | ||
769 | if (!tgt->lcq) { | |
b2a554ff | 770 | printk(KERN_ERR PFX "unable to allocate lcq %d\n", |
853e2bd2 BG |
771 | tgt->lcq_mem_size); |
772 | goto mem_alloc_failure; | |
773 | } | |
774 | memset(tgt->lcq, 0, tgt->lcq_mem_size); | |
775 | ||
853e2bd2 BG |
776 | tgt->conn_db->rq_prod = 0x8000; |
777 | ||
778 | return 0; | |
779 | ||
780 | mem_alloc_failure: | |
781 | bnx2fc_free_session_resc(hba, tgt); | |
782 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); | |
783 | return -ENOMEM; | |
784 | } | |
785 | ||
786 | /** | |
787 | * bnx2i_free_session_resc - free qp resources for the session | |
788 | * | |
789 | * @hba: adapter structure pointer | |
790 | * @tgt: bnx2fc_rport structure pointer | |
791 | * | |
792 | * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL | |
793 | */ | |
794 | static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, | |
795 | struct bnx2fc_rport *tgt) | |
796 | { | |
797 | BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); | |
798 | ||
799 | if (tgt->ctx_base) { | |
800 | iounmap(tgt->ctx_base); | |
801 | tgt->ctx_base = NULL; | |
802 | } | |
619c5cb6 VZ |
803 | |
804 | spin_lock_bh(&tgt->cq_lock); | |
853e2bd2 BG |
805 | /* Free LCQ */ |
806 | if (tgt->lcq) { | |
807 | dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | |
808 | tgt->lcq, tgt->lcq_dma); | |
809 | tgt->lcq = NULL; | |
810 | } | |
811 | /* Free connDB */ | |
812 | if (tgt->conn_db) { | |
813 | dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, | |
814 | tgt->conn_db, tgt->conn_db_dma); | |
815 | tgt->conn_db = NULL; | |
816 | } | |
817 | /* Free confq and confq pbl */ | |
818 | if (tgt->confq_pbl) { | |
819 | dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, | |
820 | tgt->confq_pbl, tgt->confq_pbl_dma); | |
821 | tgt->confq_pbl = NULL; | |
822 | } | |
823 | if (tgt->confq) { | |
824 | dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, | |
825 | tgt->confq, tgt->confq_dma); | |
826 | tgt->confq = NULL; | |
827 | } | |
828 | /* Free XFERQ */ | |
829 | if (tgt->xferq) { | |
830 | dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, | |
831 | tgt->xferq, tgt->xferq_dma); | |
832 | tgt->xferq = NULL; | |
833 | } | |
834 | /* Free RQ PBL and RQ */ | |
835 | if (tgt->rq_pbl) { | |
836 | dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, | |
837 | tgt->rq_pbl, tgt->rq_pbl_dma); | |
838 | tgt->rq_pbl = NULL; | |
839 | } | |
840 | if (tgt->rq) { | |
841 | dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, | |
842 | tgt->rq, tgt->rq_dma); | |
843 | tgt->rq = NULL; | |
844 | } | |
845 | /* Free CQ */ | |
846 | if (tgt->cq) { | |
847 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | |
848 | tgt->cq, tgt->cq_dma); | |
849 | tgt->cq = NULL; | |
850 | } | |
851 | /* Free SQ */ | |
852 | if (tgt->sq) { | |
853 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | |
854 | tgt->sq, tgt->sq_dma); | |
855 | tgt->sq = NULL; | |
856 | } | |
619c5cb6 | 857 | spin_unlock_bh(&tgt->cq_lock); |
853e2bd2 | 858 | } |