]>
Commit | Line | Data |
---|---|---|
03bee01d LL |
1 | /* |
2 | * Copyright (C) 2017, Microsoft Corporation. | |
3 | * | |
4 | * Author(s): Long Li <longli@microsoft.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | |
14 | * the GNU General Public License for more details. | |
15 | */ | |
f198186a | 16 | #include <linux/module.h> |
03bee01d | 17 | #include "smbdirect.h" |
f198186a LL |
18 | #include "cifs_debug.h" |
19 | ||
20 | static struct smbd_response *get_empty_queue_buffer( | |
21 | struct smbd_connection *info); | |
22 | static struct smbd_response *get_receive_buffer( | |
23 | struct smbd_connection *info); | |
24 | static void put_receive_buffer( | |
25 | struct smbd_connection *info, | |
26 | struct smbd_response *response); | |
27 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf); | |
28 | static void destroy_receive_buffers(struct smbd_connection *info); | |
29 | ||
30 | static void put_empty_packet( | |
31 | struct smbd_connection *info, struct smbd_response *response); | |
32 | static void enqueue_reassembly( | |
33 | struct smbd_connection *info, | |
34 | struct smbd_response *response, int data_length); | |
35 | static struct smbd_response *_get_first_reassembly( | |
36 | struct smbd_connection *info); | |
37 | ||
38 | static int smbd_post_recv( | |
39 | struct smbd_connection *info, | |
40 | struct smbd_response *response); | |
41 | ||
42 | static int smbd_post_send_empty(struct smbd_connection *info); | |
03bee01d LL |
43 | |
44 | /* SMBD version number */ | |
45 | #define SMBD_V1 0x0100 | |
46 | ||
47 | /* Port numbers for SMBD transport */ | |
48 | #define SMB_PORT 445 | |
49 | #define SMBD_PORT 5445 | |
50 | ||
51 | /* Address lookup and resolve timeout in ms */ | |
52 | #define RDMA_RESOLVE_TIMEOUT 5000 | |
53 | ||
54 | /* SMBD negotiation timeout in seconds */ | |
55 | #define SMBD_NEGOTIATE_TIMEOUT 120 | |
56 | ||
57 | /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */ | |
58 | #define SMBD_MIN_RECEIVE_SIZE 128 | |
59 | #define SMBD_MIN_FRAGMENTED_SIZE 131072 | |
60 | ||
61 | /* | |
62 | * Default maximum number of RDMA read/write outstanding on this connection | |
63 | * This value is possibly decreased during QP creation on hardware limit | |
64 | */ | |
65 | #define SMBD_CM_RESPONDER_RESOURCES 32 | |
66 | ||
67 | /* Maximum number of retries on data transfer operations */ | |
68 | #define SMBD_CM_RETRY 6 | |
69 | /* No need to retry on Receiver Not Ready since SMBD manages credits */ | |
70 | #define SMBD_CM_RNR_RETRY 0 | |
71 | ||
72 | /* | |
73 | * User configurable initial values per SMBD transport connection | |
74 | * as defined in [MS-SMBD] 3.1.1.1 | |
75 | * Those may change after a SMBD negotiation | |
76 | */ | |
77 | /* The local peer's maximum number of credits to grant to the peer */ | |
78 | int smbd_receive_credit_max = 255; | |
79 | ||
80 | /* The remote peer's credit request of local peer */ | |
81 | int smbd_send_credit_target = 255; | |
82 | ||
83 | /* The maximum single message size can be sent to remote peer */ | |
84 | int smbd_max_send_size = 1364; | |
85 | ||
86 | /* The maximum fragmented upper-layer payload receive size supported */ | |
87 | int smbd_max_fragmented_recv_size = 1024 * 1024; | |
88 | ||
89 | /* The maximum single-message size which can be received */ | |
90 | int smbd_max_receive_size = 8192; | |
91 | ||
92 | /* The timeout to initiate send of a keepalive message on idle */ | |
93 | int smbd_keep_alive_interval = 120; | |
94 | ||
95 | /* | |
96 | * User configurable initial values for RDMA transport | |
97 | * The actual values used may be lower and are limited to hardware capabilities | |
98 | */ | |
99 | /* Default maximum number of SGEs in a RDMA write/read */ | |
100 | int smbd_max_frmr_depth = 2048; | |
101 | ||
102 | /* If payload is less than this byte, use RDMA send/recv not read/write */ | |
103 | int rdma_readwrite_threshold = 4096; | |
f198186a LL |
104 | |
105 | /* Transport logging functions | |
106 | * Logging are defined as classes. They can be OR'ed to define the actual | |
107 | * logging level via module parameter smbd_logging_class | |
108 | * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and | |
109 | * log_rdma_event() | |
110 | */ | |
111 | #define LOG_OUTGOING 0x1 | |
112 | #define LOG_INCOMING 0x2 | |
113 | #define LOG_READ 0x4 | |
114 | #define LOG_WRITE 0x8 | |
115 | #define LOG_RDMA_SEND 0x10 | |
116 | #define LOG_RDMA_RECV 0x20 | |
117 | #define LOG_KEEP_ALIVE 0x40 | |
118 | #define LOG_RDMA_EVENT 0x80 | |
119 | #define LOG_RDMA_MR 0x100 | |
120 | static unsigned int smbd_logging_class; | |
121 | module_param(smbd_logging_class, uint, 0644); | |
122 | MODULE_PARM_DESC(smbd_logging_class, | |
123 | "Logging class for SMBD transport 0x0 to 0x100"); | |
124 | ||
125 | #define ERR 0x0 | |
126 | #define INFO 0x1 | |
127 | static unsigned int smbd_logging_level = ERR; | |
128 | module_param(smbd_logging_level, uint, 0644); | |
129 | MODULE_PARM_DESC(smbd_logging_level, | |
130 | "Logging level for SMBD transport, 0 (default): error, 1: info"); | |
131 | ||
132 | #define log_rdma(level, class, fmt, args...) \ | |
133 | do { \ | |
134 | if (level <= smbd_logging_level || class & smbd_logging_class) \ | |
135 | cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\ | |
136 | } while (0) | |
137 | ||
138 | #define log_outgoing(level, fmt, args...) \ | |
139 | log_rdma(level, LOG_OUTGOING, fmt, ##args) | |
140 | #define log_incoming(level, fmt, args...) \ | |
141 | log_rdma(level, LOG_INCOMING, fmt, ##args) | |
142 | #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args) | |
143 | #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args) | |
144 | #define log_rdma_send(level, fmt, args...) \ | |
145 | log_rdma(level, LOG_RDMA_SEND, fmt, ##args) | |
146 | #define log_rdma_recv(level, fmt, args...) \ | |
147 | log_rdma(level, LOG_RDMA_RECV, fmt, ##args) | |
148 | #define log_keep_alive(level, fmt, args...) \ | |
149 | log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args) | |
150 | #define log_rdma_event(level, fmt, args...) \ | |
151 | log_rdma(level, LOG_RDMA_EVENT, fmt, ##args) | |
152 | #define log_rdma_mr(level, fmt, args...) \ | |
153 | log_rdma(level, LOG_RDMA_MR, fmt, ##args) | |
154 | ||
155 | /* | |
156 | * Destroy the transport and related RDMA and memory resources | |
157 | * Need to go through all the pending counters and make sure on one is using | |
158 | * the transport while it is destroyed | |
159 | */ | |
160 | static void smbd_destroy_rdma_work(struct work_struct *work) | |
161 | { | |
162 | struct smbd_response *response; | |
163 | struct smbd_connection *info = | |
164 | container_of(work, struct smbd_connection, destroy_work); | |
165 | unsigned long flags; | |
166 | ||
167 | log_rdma_event(INFO, "destroying qp\n"); | |
168 | ib_drain_qp(info->id->qp); | |
169 | rdma_destroy_qp(info->id); | |
170 | ||
171 | /* Unblock all I/O waiting on the send queue */ | |
172 | wake_up_interruptible_all(&info->wait_send_queue); | |
173 | ||
174 | log_rdma_event(INFO, "cancelling idle timer\n"); | |
175 | cancel_delayed_work_sync(&info->idle_timer_work); | |
176 | log_rdma_event(INFO, "cancelling send immediate work\n"); | |
177 | cancel_delayed_work_sync(&info->send_immediate_work); | |
178 | ||
179 | log_rdma_event(INFO, "wait for all recv to finish\n"); | |
180 | wake_up_interruptible(&info->wait_reassembly_queue); | |
181 | ||
182 | log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); | |
183 | wait_event(info->wait_send_pending, | |
184 | atomic_read(&info->send_pending) == 0); | |
185 | wait_event(info->wait_send_payload_pending, | |
186 | atomic_read(&info->send_payload_pending) == 0); | |
187 | ||
188 | /* It's not posssible for upper layer to get to reassembly */ | |
189 | log_rdma_event(INFO, "drain the reassembly queue\n"); | |
190 | do { | |
191 | spin_lock_irqsave(&info->reassembly_queue_lock, flags); | |
192 | response = _get_first_reassembly(info); | |
193 | if (response) { | |
194 | list_del(&response->list); | |
195 | spin_unlock_irqrestore( | |
196 | &info->reassembly_queue_lock, flags); | |
197 | put_receive_buffer(info, response); | |
198 | } | |
199 | } while (response); | |
200 | spin_unlock_irqrestore(&info->reassembly_queue_lock, flags); | |
201 | info->reassembly_data_length = 0; | |
202 | ||
203 | log_rdma_event(INFO, "free receive buffers\n"); | |
204 | wait_event(info->wait_receive_queues, | |
205 | info->count_receive_queue + info->count_empty_packet_queue | |
206 | == info->receive_credit_max); | |
207 | destroy_receive_buffers(info); | |
208 | ||
209 | ib_free_cq(info->send_cq); | |
210 | ib_free_cq(info->recv_cq); | |
211 | ib_dealloc_pd(info->pd); | |
212 | rdma_destroy_id(info->id); | |
213 | ||
214 | /* free mempools */ | |
215 | mempool_destroy(info->request_mempool); | |
216 | kmem_cache_destroy(info->request_cache); | |
217 | ||
218 | mempool_destroy(info->response_mempool); | |
219 | kmem_cache_destroy(info->response_cache); | |
220 | ||
221 | info->transport_status = SMBD_DESTROYED; | |
222 | wake_up_all(&info->wait_destroy); | |
223 | } | |
224 | ||
225 | static int smbd_process_disconnected(struct smbd_connection *info) | |
226 | { | |
227 | schedule_work(&info->destroy_work); | |
228 | return 0; | |
229 | } | |
230 | ||
231 | static void smbd_disconnect_rdma_work(struct work_struct *work) | |
232 | { | |
233 | struct smbd_connection *info = | |
234 | container_of(work, struct smbd_connection, disconnect_work); | |
235 | ||
236 | if (info->transport_status == SMBD_CONNECTED) { | |
237 | info->transport_status = SMBD_DISCONNECTING; | |
238 | rdma_disconnect(info->id); | |
239 | } | |
240 | } | |
241 | ||
242 | static void smbd_disconnect_rdma_connection(struct smbd_connection *info) | |
243 | { | |
244 | queue_work(info->workqueue, &info->disconnect_work); | |
245 | } | |
246 | ||
247 | /* Upcall from RDMA CM */ | |
248 | static int smbd_conn_upcall( | |
249 | struct rdma_cm_id *id, struct rdma_cm_event *event) | |
250 | { | |
251 | struct smbd_connection *info = id->context; | |
252 | ||
253 | log_rdma_event(INFO, "event=%d status=%d\n", | |
254 | event->event, event->status); | |
255 | ||
256 | switch (event->event) { | |
257 | case RDMA_CM_EVENT_ADDR_RESOLVED: | |
258 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | |
259 | info->ri_rc = 0; | |
260 | complete(&info->ri_done); | |
261 | break; | |
262 | ||
263 | case RDMA_CM_EVENT_ADDR_ERROR: | |
264 | info->ri_rc = -EHOSTUNREACH; | |
265 | complete(&info->ri_done); | |
266 | break; | |
267 | ||
268 | case RDMA_CM_EVENT_ROUTE_ERROR: | |
269 | info->ri_rc = -ENETUNREACH; | |
270 | complete(&info->ri_done); | |
271 | break; | |
272 | ||
273 | case RDMA_CM_EVENT_ESTABLISHED: | |
274 | log_rdma_event(INFO, "connected event=%d\n", event->event); | |
275 | info->transport_status = SMBD_CONNECTED; | |
276 | wake_up_interruptible(&info->conn_wait); | |
277 | break; | |
278 | ||
279 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
280 | case RDMA_CM_EVENT_UNREACHABLE: | |
281 | case RDMA_CM_EVENT_REJECTED: | |
282 | log_rdma_event(INFO, "connecting failed event=%d\n", event->event); | |
283 | info->transport_status = SMBD_DISCONNECTED; | |
284 | wake_up_interruptible(&info->conn_wait); | |
285 | break; | |
286 | ||
287 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
288 | case RDMA_CM_EVENT_DISCONNECTED: | |
289 | /* This happenes when we fail the negotiation */ | |
290 | if (info->transport_status == SMBD_NEGOTIATE_FAILED) { | |
291 | info->transport_status = SMBD_DISCONNECTED; | |
292 | wake_up(&info->conn_wait); | |
293 | break; | |
294 | } | |
295 | ||
296 | info->transport_status = SMBD_DISCONNECTED; | |
297 | smbd_process_disconnected(info); | |
298 | break; | |
299 | ||
300 | default: | |
301 | break; | |
302 | } | |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
307 | /* Upcall from RDMA QP */ | |
308 | static void | |
309 | smbd_qp_async_error_upcall(struct ib_event *event, void *context) | |
310 | { | |
311 | struct smbd_connection *info = context; | |
312 | ||
313 | log_rdma_event(ERR, "%s on device %s info %p\n", | |
314 | ib_event_msg(event->event), event->device->name, info); | |
315 | ||
316 | switch (event->event) { | |
317 | case IB_EVENT_CQ_ERR: | |
318 | case IB_EVENT_QP_FATAL: | |
319 | smbd_disconnect_rdma_connection(info); | |
320 | ||
321 | default: | |
322 | break; | |
323 | } | |
324 | } | |
325 | ||
326 | static inline void *smbd_request_payload(struct smbd_request *request) | |
327 | { | |
328 | return (void *)request->packet; | |
329 | } | |
330 | ||
331 | static inline void *smbd_response_payload(struct smbd_response *response) | |
332 | { | |
333 | return (void *)response->packet; | |
334 | } | |
335 | ||
336 | /* Called when a RDMA send is done */ | |
337 | static void send_done(struct ib_cq *cq, struct ib_wc *wc) | |
338 | { | |
339 | int i; | |
340 | struct smbd_request *request = | |
341 | container_of(wc->wr_cqe, struct smbd_request, cqe); | |
342 | ||
343 | log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n", | |
344 | request, wc->status); | |
345 | ||
346 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { | |
347 | log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", | |
348 | wc->status, wc->opcode); | |
349 | smbd_disconnect_rdma_connection(request->info); | |
350 | } | |
351 | ||
352 | for (i = 0; i < request->num_sge; i++) | |
353 | ib_dma_unmap_single(request->info->id->device, | |
354 | request->sge[i].addr, | |
355 | request->sge[i].length, | |
356 | DMA_TO_DEVICE); | |
357 | ||
358 | if (request->has_payload) { | |
359 | if (atomic_dec_and_test(&request->info->send_payload_pending)) | |
360 | wake_up(&request->info->wait_send_payload_pending); | |
361 | } else { | |
362 | if (atomic_dec_and_test(&request->info->send_pending)) | |
363 | wake_up(&request->info->wait_send_pending); | |
364 | } | |
365 | ||
366 | mempool_free(request, request->info->request_mempool); | |
367 | } | |
368 | ||
369 | static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp) | |
370 | { | |
371 | log_rdma_event(INFO, "resp message min_version %u max_version %u " | |
372 | "negotiated_version %u credits_requested %u " | |
373 | "credits_granted %u status %u max_readwrite_size %u " | |
374 | "preferred_send_size %u max_receive_size %u " | |
375 | "max_fragmented_size %u\n", | |
376 | resp->min_version, resp->max_version, resp->negotiated_version, | |
377 | resp->credits_requested, resp->credits_granted, resp->status, | |
378 | resp->max_readwrite_size, resp->preferred_send_size, | |
379 | resp->max_receive_size, resp->max_fragmented_size); | |
380 | } | |
381 | ||
382 | /* | |
383 | * Process a negotiation response message, according to [MS-SMBD]3.1.5.7 | |
384 | * response, packet_length: the negotiation response message | |
385 | * return value: true if negotiation is a success, false if failed | |
386 | */ | |
387 | static bool process_negotiation_response( | |
388 | struct smbd_response *response, int packet_length) | |
389 | { | |
390 | struct smbd_connection *info = response->info; | |
391 | struct smbd_negotiate_resp *packet = smbd_response_payload(response); | |
392 | ||
393 | if (packet_length < sizeof(struct smbd_negotiate_resp)) { | |
394 | log_rdma_event(ERR, | |
395 | "error: packet_length=%d\n", packet_length); | |
396 | return false; | |
397 | } | |
398 | ||
399 | if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { | |
400 | log_rdma_event(ERR, "error: negotiated_version=%x\n", | |
401 | le16_to_cpu(packet->negotiated_version)); | |
402 | return false; | |
403 | } | |
404 | info->protocol = le16_to_cpu(packet->negotiated_version); | |
405 | ||
406 | if (packet->credits_requested == 0) { | |
407 | log_rdma_event(ERR, "error: credits_requested==0\n"); | |
408 | return false; | |
409 | } | |
410 | info->receive_credit_target = le16_to_cpu(packet->credits_requested); | |
411 | ||
412 | if (packet->credits_granted == 0) { | |
413 | log_rdma_event(ERR, "error: credits_granted==0\n"); | |
414 | return false; | |
415 | } | |
416 | atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); | |
417 | ||
418 | atomic_set(&info->receive_credits, 0); | |
419 | ||
420 | if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { | |
421 | log_rdma_event(ERR, "error: preferred_send_size=%d\n", | |
422 | le32_to_cpu(packet->preferred_send_size)); | |
423 | return false; | |
424 | } | |
425 | info->max_receive_size = le32_to_cpu(packet->preferred_send_size); | |
426 | ||
427 | if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { | |
428 | log_rdma_event(ERR, "error: max_receive_size=%d\n", | |
429 | le32_to_cpu(packet->max_receive_size)); | |
430 | return false; | |
431 | } | |
432 | info->max_send_size = min_t(int, info->max_send_size, | |
433 | le32_to_cpu(packet->max_receive_size)); | |
434 | ||
435 | if (le32_to_cpu(packet->max_fragmented_size) < | |
436 | SMBD_MIN_FRAGMENTED_SIZE) { | |
437 | log_rdma_event(ERR, "error: max_fragmented_size=%d\n", | |
438 | le32_to_cpu(packet->max_fragmented_size)); | |
439 | return false; | |
440 | } | |
441 | info->max_fragmented_send_size = | |
442 | le32_to_cpu(packet->max_fragmented_size); | |
443 | ||
444 | return true; | |
445 | } | |
446 | ||
447 | /* | |
448 | * Check and schedule to send an immediate packet | |
449 | * This is used to extend credtis to remote peer to keep the transport busy | |
450 | */ | |
451 | static void check_and_send_immediate(struct smbd_connection *info) | |
452 | { | |
453 | if (info->transport_status != SMBD_CONNECTED) | |
454 | return; | |
455 | ||
456 | info->send_immediate = true; | |
457 | ||
458 | /* | |
459 | * Promptly send a packet if our peer is running low on receive | |
460 | * credits | |
461 | */ | |
462 | if (atomic_read(&info->receive_credits) < | |
463 | info->receive_credit_target - 1) | |
464 | queue_delayed_work( | |
465 | info->workqueue, &info->send_immediate_work, 0); | |
466 | } | |
467 | ||
468 | static void smbd_post_send_credits(struct work_struct *work) | |
469 | { | |
470 | int ret = 0; | |
471 | int use_receive_queue = 1; | |
472 | int rc; | |
473 | struct smbd_response *response; | |
474 | struct smbd_connection *info = | |
475 | container_of(work, struct smbd_connection, | |
476 | post_send_credits_work); | |
477 | ||
478 | if (info->transport_status != SMBD_CONNECTED) { | |
479 | wake_up(&info->wait_receive_queues); | |
480 | return; | |
481 | } | |
482 | ||
483 | if (info->receive_credit_target > | |
484 | atomic_read(&info->receive_credits)) { | |
485 | while (true) { | |
486 | if (use_receive_queue) | |
487 | response = get_receive_buffer(info); | |
488 | else | |
489 | response = get_empty_queue_buffer(info); | |
490 | if (!response) { | |
491 | /* now switch to emtpy packet queue */ | |
492 | if (use_receive_queue) { | |
493 | use_receive_queue = 0; | |
494 | continue; | |
495 | } else | |
496 | break; | |
497 | } | |
498 | ||
499 | response->type = SMBD_TRANSFER_DATA; | |
500 | response->first_segment = false; | |
501 | rc = smbd_post_recv(info, response); | |
502 | if (rc) { | |
503 | log_rdma_recv(ERR, | |
504 | "post_recv failed rc=%d\n", rc); | |
505 | put_receive_buffer(info, response); | |
506 | break; | |
507 | } | |
508 | ||
509 | ret++; | |
510 | } | |
511 | } | |
512 | ||
513 | spin_lock(&info->lock_new_credits_offered); | |
514 | info->new_credits_offered += ret; | |
515 | spin_unlock(&info->lock_new_credits_offered); | |
516 | ||
517 | atomic_add(ret, &info->receive_credits); | |
518 | ||
519 | /* Check if we can post new receive and grant credits to peer */ | |
520 | check_and_send_immediate(info); | |
521 | } | |
522 | ||
523 | static void smbd_recv_done_work(struct work_struct *work) | |
524 | { | |
525 | struct smbd_connection *info = | |
526 | container_of(work, struct smbd_connection, recv_done_work); | |
527 | ||
528 | /* | |
529 | * We may have new send credits granted from remote peer | |
530 | * If any sender is blcoked on lack of credets, unblock it | |
531 | */ | |
532 | if (atomic_read(&info->send_credits)) | |
533 | wake_up_interruptible(&info->wait_send_queue); | |
534 | ||
535 | /* | |
536 | * Check if we need to send something to remote peer to | |
537 | * grant more credits or respond to KEEP_ALIVE packet | |
538 | */ | |
539 | check_and_send_immediate(info); | |
540 | } | |
541 | ||
542 | /* Called from softirq, when recv is done */ | |
543 | static void recv_done(struct ib_cq *cq, struct ib_wc *wc) | |
544 | { | |
545 | struct smbd_data_transfer *data_transfer; | |
546 | struct smbd_response *response = | |
547 | container_of(wc->wr_cqe, struct smbd_response, cqe); | |
548 | struct smbd_connection *info = response->info; | |
549 | int data_length = 0; | |
550 | ||
551 | log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d " | |
552 | "byte_len=%d pkey_index=%x\n", | |
553 | response, response->type, wc->status, wc->opcode, | |
554 | wc->byte_len, wc->pkey_index); | |
555 | ||
556 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { | |
557 | log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", | |
558 | wc->status, wc->opcode); | |
559 | smbd_disconnect_rdma_connection(info); | |
560 | goto error; | |
561 | } | |
562 | ||
563 | ib_dma_sync_single_for_cpu( | |
564 | wc->qp->device, | |
565 | response->sge.addr, | |
566 | response->sge.length, | |
567 | DMA_FROM_DEVICE); | |
568 | ||
569 | switch (response->type) { | |
570 | /* SMBD negotiation response */ | |
571 | case SMBD_NEGOTIATE_RESP: | |
572 | dump_smbd_negotiate_resp(smbd_response_payload(response)); | |
573 | info->full_packet_received = true; | |
574 | info->negotiate_done = | |
575 | process_negotiation_response(response, wc->byte_len); | |
576 | complete(&info->negotiate_completion); | |
577 | break; | |
578 | ||
579 | /* SMBD data transfer packet */ | |
580 | case SMBD_TRANSFER_DATA: | |
581 | data_transfer = smbd_response_payload(response); | |
582 | data_length = le32_to_cpu(data_transfer->data_length); | |
583 | ||
584 | /* | |
585 | * If this is a packet with data playload place the data in | |
586 | * reassembly queue and wake up the reading thread | |
587 | */ | |
588 | if (data_length) { | |
589 | if (info->full_packet_received) | |
590 | response->first_segment = true; | |
591 | ||
592 | if (le32_to_cpu(data_transfer->remaining_data_length)) | |
593 | info->full_packet_received = false; | |
594 | else | |
595 | info->full_packet_received = true; | |
596 | ||
597 | enqueue_reassembly( | |
598 | info, | |
599 | response, | |
600 | data_length); | |
601 | } else | |
602 | put_empty_packet(info, response); | |
603 | ||
604 | if (data_length) | |
605 | wake_up_interruptible(&info->wait_reassembly_queue); | |
606 | ||
607 | atomic_dec(&info->receive_credits); | |
608 | info->receive_credit_target = | |
609 | le16_to_cpu(data_transfer->credits_requested); | |
610 | atomic_add(le16_to_cpu(data_transfer->credits_granted), | |
611 | &info->send_credits); | |
612 | ||
613 | log_incoming(INFO, "data flags %d data_offset %d " | |
614 | "data_length %d remaining_data_length %d\n", | |
615 | le16_to_cpu(data_transfer->flags), | |
616 | le32_to_cpu(data_transfer->data_offset), | |
617 | le32_to_cpu(data_transfer->data_length), | |
618 | le32_to_cpu(data_transfer->remaining_data_length)); | |
619 | ||
620 | /* Send a KEEP_ALIVE response right away if requested */ | |
621 | info->keep_alive_requested = KEEP_ALIVE_NONE; | |
622 | if (le16_to_cpu(data_transfer->flags) & | |
623 | SMB_DIRECT_RESPONSE_REQUESTED) { | |
624 | info->keep_alive_requested = KEEP_ALIVE_PENDING; | |
625 | } | |
626 | ||
627 | queue_work(info->workqueue, &info->recv_done_work); | |
628 | return; | |
629 | ||
630 | default: | |
631 | log_rdma_recv(ERR, | |
632 | "unexpected response type=%d\n", response->type); | |
633 | } | |
634 | ||
635 | error: | |
636 | put_receive_buffer(info, response); | |
637 | } | |
638 | ||
639 | static struct rdma_cm_id *smbd_create_id( | |
640 | struct smbd_connection *info, | |
641 | struct sockaddr *dstaddr, int port) | |
642 | { | |
643 | struct rdma_cm_id *id; | |
644 | int rc; | |
645 | __be16 *sport; | |
646 | ||
647 | id = rdma_create_id(&init_net, smbd_conn_upcall, info, | |
648 | RDMA_PS_TCP, IB_QPT_RC); | |
649 | if (IS_ERR(id)) { | |
650 | rc = PTR_ERR(id); | |
651 | log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc); | |
652 | return id; | |
653 | } | |
654 | ||
655 | if (dstaddr->sa_family == AF_INET6) | |
656 | sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; | |
657 | else | |
658 | sport = &((struct sockaddr_in *)dstaddr)->sin_port; | |
659 | ||
660 | *sport = htons(port); | |
661 | ||
662 | init_completion(&info->ri_done); | |
663 | info->ri_rc = -ETIMEDOUT; | |
664 | ||
665 | rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr, | |
666 | RDMA_RESOLVE_TIMEOUT); | |
667 | if (rc) { | |
668 | log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc); | |
669 | goto out; | |
670 | } | |
671 | wait_for_completion_interruptible_timeout( | |
672 | &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); | |
673 | rc = info->ri_rc; | |
674 | if (rc) { | |
675 | log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc); | |
676 | goto out; | |
677 | } | |
678 | ||
679 | info->ri_rc = -ETIMEDOUT; | |
680 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); | |
681 | if (rc) { | |
682 | log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc); | |
683 | goto out; | |
684 | } | |
685 | wait_for_completion_interruptible_timeout( | |
686 | &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); | |
687 | rc = info->ri_rc; | |
688 | if (rc) { | |
689 | log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc); | |
690 | goto out; | |
691 | } | |
692 | ||
693 | return id; | |
694 | ||
695 | out: | |
696 | rdma_destroy_id(id); | |
697 | return ERR_PTR(rc); | |
698 | } | |
699 | ||
700 | /* | |
701 | * Test if FRWR (Fast Registration Work Requests) is supported on the device | |
702 | * This implementation requries FRWR on RDMA read/write | |
703 | * return value: true if it is supported | |
704 | */ | |
705 | static bool frwr_is_supported(struct ib_device_attr *attrs) | |
706 | { | |
707 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) | |
708 | return false; | |
709 | if (attrs->max_fast_reg_page_list_len == 0) | |
710 | return false; | |
711 | return true; | |
712 | } | |
713 | ||
714 | static int smbd_ia_open( | |
715 | struct smbd_connection *info, | |
716 | struct sockaddr *dstaddr, int port) | |
717 | { | |
718 | int rc; | |
719 | ||
720 | info->id = smbd_create_id(info, dstaddr, port); | |
721 | if (IS_ERR(info->id)) { | |
722 | rc = PTR_ERR(info->id); | |
723 | goto out1; | |
724 | } | |
725 | ||
726 | if (!frwr_is_supported(&info->id->device->attrs)) { | |
727 | log_rdma_event(ERR, | |
728 | "Fast Registration Work Requests " | |
729 | "(FRWR) is not supported\n"); | |
730 | log_rdma_event(ERR, | |
731 | "Device capability flags = %llx " | |
732 | "max_fast_reg_page_list_len = %u\n", | |
733 | info->id->device->attrs.device_cap_flags, | |
734 | info->id->device->attrs.max_fast_reg_page_list_len); | |
735 | rc = -EPROTONOSUPPORT; | |
736 | goto out2; | |
737 | } | |
738 | ||
739 | info->pd = ib_alloc_pd(info->id->device, 0); | |
740 | if (IS_ERR(info->pd)) { | |
741 | rc = PTR_ERR(info->pd); | |
742 | log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc); | |
743 | goto out2; | |
744 | } | |
745 | ||
746 | return 0; | |
747 | ||
748 | out2: | |
749 | rdma_destroy_id(info->id); | |
750 | info->id = NULL; | |
751 | ||
752 | out1: | |
753 | return rc; | |
754 | } | |
755 | ||
756 | /* | |
757 | * Send a negotiation request message to the peer | |
758 | * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3 | |
759 | * After negotiation, the transport is connected and ready for | |
760 | * carrying upper layer SMB payload | |
761 | */ | |
762 | static int smbd_post_send_negotiate_req(struct smbd_connection *info) | |
763 | { | |
764 | struct ib_send_wr send_wr, *send_wr_fail; | |
765 | int rc = -ENOMEM; | |
766 | struct smbd_request *request; | |
767 | struct smbd_negotiate_req *packet; | |
768 | ||
769 | request = mempool_alloc(info->request_mempool, GFP_KERNEL); | |
770 | if (!request) | |
771 | return rc; | |
772 | ||
773 | request->info = info; | |
774 | ||
775 | packet = smbd_request_payload(request); | |
776 | packet->min_version = cpu_to_le16(SMBD_V1); | |
777 | packet->max_version = cpu_to_le16(SMBD_V1); | |
778 | packet->reserved = 0; | |
779 | packet->credits_requested = cpu_to_le16(info->send_credit_target); | |
780 | packet->preferred_send_size = cpu_to_le32(info->max_send_size); | |
781 | packet->max_receive_size = cpu_to_le32(info->max_receive_size); | |
782 | packet->max_fragmented_size = | |
783 | cpu_to_le32(info->max_fragmented_recv_size); | |
784 | ||
785 | request->num_sge = 1; | |
786 | request->sge[0].addr = ib_dma_map_single( | |
787 | info->id->device, (void *)packet, | |
788 | sizeof(*packet), DMA_TO_DEVICE); | |
789 | if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { | |
790 | rc = -EIO; | |
791 | goto dma_mapping_failed; | |
792 | } | |
793 | ||
794 | request->sge[0].length = sizeof(*packet); | |
795 | request->sge[0].lkey = info->pd->local_dma_lkey; | |
796 | ||
797 | ib_dma_sync_single_for_device( | |
798 | info->id->device, request->sge[0].addr, | |
799 | request->sge[0].length, DMA_TO_DEVICE); | |
800 | ||
801 | request->cqe.done = send_done; | |
802 | ||
803 | send_wr.next = NULL; | |
804 | send_wr.wr_cqe = &request->cqe; | |
805 | send_wr.sg_list = request->sge; | |
806 | send_wr.num_sge = request->num_sge; | |
807 | send_wr.opcode = IB_WR_SEND; | |
808 | send_wr.send_flags = IB_SEND_SIGNALED; | |
809 | ||
810 | log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n", | |
811 | request->sge[0].addr, | |
812 | request->sge[0].length, request->sge[0].lkey); | |
813 | ||
814 | request->has_payload = false; | |
815 | atomic_inc(&info->send_pending); | |
816 | rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); | |
817 | if (!rc) | |
818 | return 0; | |
819 | ||
820 | /* if we reach here, post send failed */ | |
821 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); | |
822 | atomic_dec(&info->send_pending); | |
823 | ib_dma_unmap_single(info->id->device, request->sge[0].addr, | |
824 | request->sge[0].length, DMA_TO_DEVICE); | |
825 | ||
826 | dma_mapping_failed: | |
827 | mempool_free(request, info->request_mempool); | |
828 | return rc; | |
829 | } | |
830 | ||
831 | /* | |
832 | * Extend the credits to remote peer | |
833 | * This implements [MS-SMBD] 3.1.5.9 | |
834 | * The idea is that we should extend credits to remote peer as quickly as | |
835 | * it's allowed, to maintain data flow. We allocate as much receive | |
836 | * buffer as possible, and extend the receive credits to remote peer | |
837 | * return value: the new credtis being granted. | |
838 | */ | |
839 | static int manage_credits_prior_sending(struct smbd_connection *info) | |
840 | { | |
841 | int new_credits; | |
842 | ||
843 | spin_lock(&info->lock_new_credits_offered); | |
844 | new_credits = info->new_credits_offered; | |
845 | info->new_credits_offered = 0; | |
846 | spin_unlock(&info->lock_new_credits_offered); | |
847 | ||
848 | return new_credits; | |
849 | } | |
850 | ||
851 | /* | |
852 | * Check if we need to send a KEEP_ALIVE message | |
853 | * The idle connection timer triggers a KEEP_ALIVE message when expires | |
854 | * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send | |
855 | * back a response. | |
856 | * return value: | |
857 | * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set | |
858 | * 0: otherwise | |
859 | */ | |
860 | static int manage_keep_alive_before_sending(struct smbd_connection *info) | |
861 | { | |
862 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { | |
863 | info->keep_alive_requested = KEEP_ALIVE_SENT; | |
864 | return 1; | |
865 | } | |
866 | return 0; | |
867 | } | |
868 | ||
869 | /* | |
870 | * Build and prepare the SMBD packet header | |
871 | * This function waits for avaialbe send credits and build a SMBD packet | |
872 | * header. The caller then optional append payload to the packet after | |
873 | * the header | |
874 | * intput values | |
875 | * size: the size of the payload | |
876 | * remaining_data_length: remaining data to send if this is part of a | |
877 | * fragmented packet | |
878 | * output values | |
879 | * request_out: the request allocated from this function | |
880 | * return values: 0 on success, otherwise actual error code returned | |
881 | */ | |
882 | static int smbd_create_header(struct smbd_connection *info, | |
883 | int size, int remaining_data_length, | |
884 | struct smbd_request **request_out) | |
885 | { | |
886 | struct smbd_request *request; | |
887 | struct smbd_data_transfer *packet; | |
888 | int header_length; | |
889 | int rc; | |
890 | ||
891 | /* Wait for send credits. A SMBD packet needs one credit */ | |
892 | rc = wait_event_interruptible(info->wait_send_queue, | |
893 | atomic_read(&info->send_credits) > 0 || | |
894 | info->transport_status != SMBD_CONNECTED); | |
895 | if (rc) | |
896 | return rc; | |
897 | ||
898 | if (info->transport_status != SMBD_CONNECTED) { | |
899 | log_outgoing(ERR, "disconnected not sending\n"); | |
900 | return -ENOENT; | |
901 | } | |
902 | atomic_dec(&info->send_credits); | |
903 | ||
904 | request = mempool_alloc(info->request_mempool, GFP_KERNEL); | |
905 | if (!request) { | |
906 | rc = -ENOMEM; | |
907 | goto err; | |
908 | } | |
909 | ||
910 | request->info = info; | |
911 | ||
912 | /* Fill in the packet header */ | |
913 | packet = smbd_request_payload(request); | |
914 | packet->credits_requested = cpu_to_le16(info->send_credit_target); | |
915 | packet->credits_granted = | |
916 | cpu_to_le16(manage_credits_prior_sending(info)); | |
917 | info->send_immediate = false; | |
918 | ||
919 | packet->flags = 0; | |
920 | if (manage_keep_alive_before_sending(info)) | |
921 | packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); | |
922 | ||
923 | packet->reserved = 0; | |
924 | if (!size) | |
925 | packet->data_offset = 0; | |
926 | else | |
927 | packet->data_offset = cpu_to_le32(24); | |
928 | packet->data_length = cpu_to_le32(size); | |
929 | packet->remaining_data_length = cpu_to_le32(remaining_data_length); | |
930 | packet->padding = 0; | |
931 | ||
932 | log_outgoing(INFO, "credits_requested=%d credits_granted=%d " | |
933 | "data_offset=%d data_length=%d remaining_data_length=%d\n", | |
934 | le16_to_cpu(packet->credits_requested), | |
935 | le16_to_cpu(packet->credits_granted), | |
936 | le32_to_cpu(packet->data_offset), | |
937 | le32_to_cpu(packet->data_length), | |
938 | le32_to_cpu(packet->remaining_data_length)); | |
939 | ||
940 | /* Map the packet to DMA */ | |
941 | header_length = sizeof(struct smbd_data_transfer); | |
942 | /* If this is a packet without payload, don't send padding */ | |
943 | if (!size) | |
944 | header_length = offsetof(struct smbd_data_transfer, padding); | |
945 | ||
946 | request->num_sge = 1; | |
947 | request->sge[0].addr = ib_dma_map_single(info->id->device, | |
948 | (void *)packet, | |
949 | header_length, | |
950 | DMA_BIDIRECTIONAL); | |
951 | if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { | |
952 | mempool_free(request, info->request_mempool); | |
953 | rc = -EIO; | |
954 | goto err; | |
955 | } | |
956 | ||
957 | request->sge[0].length = header_length; | |
958 | request->sge[0].lkey = info->pd->local_dma_lkey; | |
959 | ||
960 | *request_out = request; | |
961 | return 0; | |
962 | ||
963 | err: | |
964 | atomic_inc(&info->send_credits); | |
965 | return rc; | |
966 | } | |
967 | ||
968 | static void smbd_destroy_header(struct smbd_connection *info, | |
969 | struct smbd_request *request) | |
970 | { | |
971 | ||
972 | ib_dma_unmap_single(info->id->device, | |
973 | request->sge[0].addr, | |
974 | request->sge[0].length, | |
975 | DMA_TO_DEVICE); | |
976 | mempool_free(request, info->request_mempool); | |
977 | atomic_inc(&info->send_credits); | |
978 | } | |
979 | ||
980 | /* Post the send request */ | |
981 | static int smbd_post_send(struct smbd_connection *info, | |
982 | struct smbd_request *request, bool has_payload) | |
983 | { | |
984 | struct ib_send_wr send_wr, *send_wr_fail; | |
985 | int rc, i; | |
986 | ||
987 | for (i = 0; i < request->num_sge; i++) { | |
988 | log_rdma_send(INFO, | |
989 | "rdma_request sge[%d] addr=%llu legnth=%u\n", | |
990 | i, request->sge[0].addr, request->sge[0].length); | |
991 | ib_dma_sync_single_for_device( | |
992 | info->id->device, | |
993 | request->sge[i].addr, | |
994 | request->sge[i].length, | |
995 | DMA_TO_DEVICE); | |
996 | } | |
997 | ||
998 | request->cqe.done = send_done; | |
999 | ||
1000 | send_wr.next = NULL; | |
1001 | send_wr.wr_cqe = &request->cqe; | |
1002 | send_wr.sg_list = request->sge; | |
1003 | send_wr.num_sge = request->num_sge; | |
1004 | send_wr.opcode = IB_WR_SEND; | |
1005 | send_wr.send_flags = IB_SEND_SIGNALED; | |
1006 | ||
1007 | if (has_payload) { | |
1008 | request->has_payload = true; | |
1009 | atomic_inc(&info->send_payload_pending); | |
1010 | } else { | |
1011 | request->has_payload = false; | |
1012 | atomic_inc(&info->send_pending); | |
1013 | } | |
1014 | ||
1015 | rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); | |
1016 | if (rc) { | |
1017 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); | |
1018 | if (has_payload) { | |
1019 | if (atomic_dec_and_test(&info->send_payload_pending)) | |
1020 | wake_up(&info->wait_send_payload_pending); | |
1021 | } else { | |
1022 | if (atomic_dec_and_test(&info->send_pending)) | |
1023 | wake_up(&info->wait_send_pending); | |
1024 | } | |
1025 | } else | |
1026 | /* Reset timer for idle connection after packet is sent */ | |
1027 | mod_delayed_work(info->workqueue, &info->idle_timer_work, | |
1028 | info->keep_alive_interval*HZ); | |
1029 | ||
1030 | return rc; | |
1031 | } | |
1032 | ||
1033 | static int smbd_post_send_sgl(struct smbd_connection *info, | |
1034 | struct scatterlist *sgl, int data_length, int remaining_data_length) | |
1035 | { | |
1036 | int num_sgs; | |
1037 | int i, rc; | |
1038 | struct smbd_request *request; | |
1039 | struct scatterlist *sg; | |
1040 | ||
1041 | rc = smbd_create_header( | |
1042 | info, data_length, remaining_data_length, &request); | |
1043 | if (rc) | |
1044 | return rc; | |
1045 | ||
1046 | num_sgs = sgl ? sg_nents(sgl) : 0; | |
1047 | for_each_sg(sgl, sg, num_sgs, i) { | |
1048 | request->sge[i+1].addr = | |
1049 | ib_dma_map_page(info->id->device, sg_page(sg), | |
1050 | sg->offset, sg->length, DMA_BIDIRECTIONAL); | |
1051 | if (ib_dma_mapping_error( | |
1052 | info->id->device, request->sge[i+1].addr)) { | |
1053 | rc = -EIO; | |
1054 | request->sge[i+1].addr = 0; | |
1055 | goto dma_mapping_failure; | |
1056 | } | |
1057 | request->sge[i+1].length = sg->length; | |
1058 | request->sge[i+1].lkey = info->pd->local_dma_lkey; | |
1059 | request->num_sge++; | |
1060 | } | |
1061 | ||
1062 | rc = smbd_post_send(info, request, data_length); | |
1063 | if (!rc) | |
1064 | return 0; | |
1065 | ||
1066 | dma_mapping_failure: | |
1067 | for (i = 1; i < request->num_sge; i++) | |
1068 | if (request->sge[i].addr) | |
1069 | ib_dma_unmap_single(info->id->device, | |
1070 | request->sge[i].addr, | |
1071 | request->sge[i].length, | |
1072 | DMA_TO_DEVICE); | |
1073 | smbd_destroy_header(info, request); | |
1074 | return rc; | |
1075 | } | |
1076 | ||
1077 | /* | |
1078 | * Send an empty message | |
1079 | * Empty message is used to extend credits to peer to for keep live | |
1080 | * while there is no upper layer payload to send at the time | |
1081 | */ | |
1082 | static int smbd_post_send_empty(struct smbd_connection *info) | |
1083 | { | |
1084 | info->count_send_empty++; | |
1085 | return smbd_post_send_sgl(info, NULL, 0, 0); | |
1086 | } | |
1087 | ||
1088 | /* | |
1089 | * Post a receive request to the transport | |
1090 | * The remote peer can only send data when a receive request is posted | |
1091 | * The interaction is controlled by send/receive credit system | |
1092 | */ | |
1093 | static int smbd_post_recv( | |
1094 | struct smbd_connection *info, struct smbd_response *response) | |
1095 | { | |
1096 | struct ib_recv_wr recv_wr, *recv_wr_fail = NULL; | |
1097 | int rc = -EIO; | |
1098 | ||
1099 | response->sge.addr = ib_dma_map_single( | |
1100 | info->id->device, response->packet, | |
1101 | info->max_receive_size, DMA_FROM_DEVICE); | |
1102 | if (ib_dma_mapping_error(info->id->device, response->sge.addr)) | |
1103 | return rc; | |
1104 | ||
1105 | response->sge.length = info->max_receive_size; | |
1106 | response->sge.lkey = info->pd->local_dma_lkey; | |
1107 | ||
1108 | response->cqe.done = recv_done; | |
1109 | ||
1110 | recv_wr.wr_cqe = &response->cqe; | |
1111 | recv_wr.next = NULL; | |
1112 | recv_wr.sg_list = &response->sge; | |
1113 | recv_wr.num_sge = 1; | |
1114 | ||
1115 | rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail); | |
1116 | if (rc) { | |
1117 | ib_dma_unmap_single(info->id->device, response->sge.addr, | |
1118 | response->sge.length, DMA_FROM_DEVICE); | |
1119 | ||
1120 | log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); | |
1121 | } | |
1122 | ||
1123 | return rc; | |
1124 | } | |
1125 | ||
1126 | /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */ | |
1127 | static int smbd_negotiate(struct smbd_connection *info) | |
1128 | { | |
1129 | int rc; | |
1130 | struct smbd_response *response = get_receive_buffer(info); | |
1131 | ||
1132 | response->type = SMBD_NEGOTIATE_RESP; | |
1133 | rc = smbd_post_recv(info, response); | |
1134 | log_rdma_event(INFO, | |
1135 | "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x " | |
1136 | "iov.lkey=%x\n", | |
1137 | rc, response->sge.addr, | |
1138 | response->sge.length, response->sge.lkey); | |
1139 | if (rc) | |
1140 | return rc; | |
1141 | ||
1142 | init_completion(&info->negotiate_completion); | |
1143 | info->negotiate_done = false; | |
1144 | rc = smbd_post_send_negotiate_req(info); | |
1145 | if (rc) | |
1146 | return rc; | |
1147 | ||
1148 | rc = wait_for_completion_interruptible_timeout( | |
1149 | &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); | |
1150 | log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc); | |
1151 | ||
1152 | if (info->negotiate_done) | |
1153 | return 0; | |
1154 | ||
1155 | if (rc == 0) | |
1156 | rc = -ETIMEDOUT; | |
1157 | else if (rc == -ERESTARTSYS) | |
1158 | rc = -EINTR; | |
1159 | else | |
1160 | rc = -ENOTCONN; | |
1161 | ||
1162 | return rc; | |
1163 | } | |
1164 | ||
1165 | static void put_empty_packet( | |
1166 | struct smbd_connection *info, struct smbd_response *response) | |
1167 | { | |
1168 | spin_lock(&info->empty_packet_queue_lock); | |
1169 | list_add_tail(&response->list, &info->empty_packet_queue); | |
1170 | info->count_empty_packet_queue++; | |
1171 | spin_unlock(&info->empty_packet_queue_lock); | |
1172 | ||
1173 | queue_work(info->workqueue, &info->post_send_credits_work); | |
1174 | } | |
1175 | ||
1176 | /* | |
1177 | * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1 | |
1178 | * This is a queue for reassembling upper layer payload and present to upper | |
1179 | * layer. All the inncoming payload go to the reassembly queue, regardless of | |
1180 | * if reassembly is required. The uuper layer code reads from the queue for all | |
1181 | * incoming payloads. | |
1182 | * Put a received packet to the reassembly queue | |
1183 | * response: the packet received | |
1184 | * data_length: the size of payload in this packet | |
1185 | */ | |
1186 | static void enqueue_reassembly( | |
1187 | struct smbd_connection *info, | |
1188 | struct smbd_response *response, | |
1189 | int data_length) | |
1190 | { | |
1191 | spin_lock(&info->reassembly_queue_lock); | |
1192 | list_add_tail(&response->list, &info->reassembly_queue); | |
1193 | info->reassembly_queue_length++; | |
1194 | /* | |
1195 | * Make sure reassembly_data_length is updated after list and | |
1196 | * reassembly_queue_length are updated. On the dequeue side | |
1197 | * reassembly_data_length is checked without a lock to determine | |
1198 | * if reassembly_queue_length and list is up to date | |
1199 | */ | |
1200 | virt_wmb(); | |
1201 | info->reassembly_data_length += data_length; | |
1202 | spin_unlock(&info->reassembly_queue_lock); | |
1203 | info->count_reassembly_queue++; | |
1204 | info->count_enqueue_reassembly_queue++; | |
1205 | } | |
1206 | ||
1207 | /* | |
1208 | * Get the first entry at the front of reassembly queue | |
1209 | * Caller is responsible for locking | |
1210 | * return value: the first entry if any, NULL if queue is empty | |
1211 | */ | |
1212 | static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) | |
1213 | { | |
1214 | struct smbd_response *ret = NULL; | |
1215 | ||
1216 | if (!list_empty(&info->reassembly_queue)) { | |
1217 | ret = list_first_entry( | |
1218 | &info->reassembly_queue, | |
1219 | struct smbd_response, list); | |
1220 | } | |
1221 | return ret; | |
1222 | } | |
1223 | ||
1224 | static struct smbd_response *get_empty_queue_buffer( | |
1225 | struct smbd_connection *info) | |
1226 | { | |
1227 | struct smbd_response *ret = NULL; | |
1228 | unsigned long flags; | |
1229 | ||
1230 | spin_lock_irqsave(&info->empty_packet_queue_lock, flags); | |
1231 | if (!list_empty(&info->empty_packet_queue)) { | |
1232 | ret = list_first_entry( | |
1233 | &info->empty_packet_queue, | |
1234 | struct smbd_response, list); | |
1235 | list_del(&ret->list); | |
1236 | info->count_empty_packet_queue--; | |
1237 | } | |
1238 | spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); | |
1239 | ||
1240 | return ret; | |
1241 | } | |
1242 | ||
1243 | /* | |
1244 | * Get a receive buffer | |
1245 | * For each remote send, we need to post a receive. The receive buffers are | |
1246 | * pre-allocated in advance. | |
1247 | * return value: the receive buffer, NULL if none is available | |
1248 | */ | |
1249 | static struct smbd_response *get_receive_buffer(struct smbd_connection *info) | |
1250 | { | |
1251 | struct smbd_response *ret = NULL; | |
1252 | unsigned long flags; | |
1253 | ||
1254 | spin_lock_irqsave(&info->receive_queue_lock, flags); | |
1255 | if (!list_empty(&info->receive_queue)) { | |
1256 | ret = list_first_entry( | |
1257 | &info->receive_queue, | |
1258 | struct smbd_response, list); | |
1259 | list_del(&ret->list); | |
1260 | info->count_receive_queue--; | |
1261 | info->count_get_receive_buffer++; | |
1262 | } | |
1263 | spin_unlock_irqrestore(&info->receive_queue_lock, flags); | |
1264 | ||
1265 | return ret; | |
1266 | } | |
1267 | ||
1268 | /* | |
1269 | * Return a receive buffer | |
1270 | * Upon returning of a receive buffer, we can post new receive and extend | |
1271 | * more receive credits to remote peer. This is done immediately after a | |
1272 | * receive buffer is returned. | |
1273 | */ | |
1274 | static void put_receive_buffer( | |
1275 | struct smbd_connection *info, struct smbd_response *response) | |
1276 | { | |
1277 | unsigned long flags; | |
1278 | ||
1279 | ib_dma_unmap_single(info->id->device, response->sge.addr, | |
1280 | response->sge.length, DMA_FROM_DEVICE); | |
1281 | ||
1282 | spin_lock_irqsave(&info->receive_queue_lock, flags); | |
1283 | list_add_tail(&response->list, &info->receive_queue); | |
1284 | info->count_receive_queue++; | |
1285 | info->count_put_receive_buffer++; | |
1286 | spin_unlock_irqrestore(&info->receive_queue_lock, flags); | |
1287 | ||
1288 | queue_work(info->workqueue, &info->post_send_credits_work); | |
1289 | } | |
1290 | ||
1291 | /* Preallocate all receive buffer on transport establishment */ | |
1292 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) | |
1293 | { | |
1294 | int i; | |
1295 | struct smbd_response *response; | |
1296 | ||
1297 | INIT_LIST_HEAD(&info->reassembly_queue); | |
1298 | spin_lock_init(&info->reassembly_queue_lock); | |
1299 | info->reassembly_data_length = 0; | |
1300 | info->reassembly_queue_length = 0; | |
1301 | ||
1302 | INIT_LIST_HEAD(&info->receive_queue); | |
1303 | spin_lock_init(&info->receive_queue_lock); | |
1304 | info->count_receive_queue = 0; | |
1305 | ||
1306 | INIT_LIST_HEAD(&info->empty_packet_queue); | |
1307 | spin_lock_init(&info->empty_packet_queue_lock); | |
1308 | info->count_empty_packet_queue = 0; | |
1309 | ||
1310 | init_waitqueue_head(&info->wait_receive_queues); | |
1311 | ||
1312 | for (i = 0; i < num_buf; i++) { | |
1313 | response = mempool_alloc(info->response_mempool, GFP_KERNEL); | |
1314 | if (!response) | |
1315 | goto allocate_failed; | |
1316 | ||
1317 | response->info = info; | |
1318 | list_add_tail(&response->list, &info->receive_queue); | |
1319 | info->count_receive_queue++; | |
1320 | } | |
1321 | ||
1322 | return 0; | |
1323 | ||
1324 | allocate_failed: | |
1325 | while (!list_empty(&info->receive_queue)) { | |
1326 | response = list_first_entry( | |
1327 | &info->receive_queue, | |
1328 | struct smbd_response, list); | |
1329 | list_del(&response->list); | |
1330 | info->count_receive_queue--; | |
1331 | ||
1332 | mempool_free(response, info->response_mempool); | |
1333 | } | |
1334 | return -ENOMEM; | |
1335 | } | |
1336 | ||
1337 | static void destroy_receive_buffers(struct smbd_connection *info) | |
1338 | { | |
1339 | struct smbd_response *response; | |
1340 | ||
1341 | while ((response = get_receive_buffer(info))) | |
1342 | mempool_free(response, info->response_mempool); | |
1343 | ||
1344 | while ((response = get_empty_queue_buffer(info))) | |
1345 | mempool_free(response, info->response_mempool); | |
1346 | } | |
1347 | ||
1348 | /* | |
1349 | * Check and send an immediate or keep alive packet | |
1350 | * The condition to send those packets are defined in [MS-SMBD] 3.1.1.1 | |
1351 | * Connection.KeepaliveRequested and Connection.SendImmediate | |
1352 | * The idea is to extend credits to server as soon as it becomes available | |
1353 | */ | |
1354 | static void send_immediate_work(struct work_struct *work) | |
1355 | { | |
1356 | struct smbd_connection *info = container_of( | |
1357 | work, struct smbd_connection, | |
1358 | send_immediate_work.work); | |
1359 | ||
1360 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING || | |
1361 | info->send_immediate) { | |
1362 | log_keep_alive(INFO, "send an empty message\n"); | |
1363 | smbd_post_send_empty(info); | |
1364 | } | |
1365 | } | |
1366 | ||
1367 | /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */ | |
1368 | static void idle_connection_timer(struct work_struct *work) | |
1369 | { | |
1370 | struct smbd_connection *info = container_of( | |
1371 | work, struct smbd_connection, | |
1372 | idle_timer_work.work); | |
1373 | ||
1374 | if (info->keep_alive_requested != KEEP_ALIVE_NONE) { | |
1375 | log_keep_alive(ERR, | |
1376 | "error status info->keep_alive_requested=%d\n", | |
1377 | info->keep_alive_requested); | |
1378 | smbd_disconnect_rdma_connection(info); | |
1379 | return; | |
1380 | } | |
1381 | ||
1382 | log_keep_alive(INFO, "about to send an empty idle message\n"); | |
1383 | smbd_post_send_empty(info); | |
1384 | ||
1385 | /* Setup the next idle timeout work */ | |
1386 | queue_delayed_work(info->workqueue, &info->idle_timer_work, | |
1387 | info->keep_alive_interval*HZ); | |
1388 | } | |
1389 | ||
ad57b8e1 LL |
1390 | /* |
1391 | * Reconnect this SMBD connection, called from upper layer | |
1392 | * return value: 0 on success, or actual error code | |
1393 | */ | |
1394 | int smbd_reconnect(struct TCP_Server_Info *server) | |
1395 | { | |
1396 | log_rdma_event(INFO, "reconnecting rdma session\n"); | |
1397 | ||
1398 | if (!server->smbd_conn) { | |
1399 | log_rdma_event(ERR, "rdma session already destroyed\n"); | |
1400 | return -EINVAL; | |
1401 | } | |
1402 | ||
1403 | /* | |
1404 | * This is possible if transport is disconnected and we haven't received | |
1405 | * notification from RDMA, but upper layer has detected timeout | |
1406 | */ | |
1407 | if (server->smbd_conn->transport_status == SMBD_CONNECTED) { | |
1408 | log_rdma_event(INFO, "disconnecting transport\n"); | |
1409 | smbd_disconnect_rdma_connection(server->smbd_conn); | |
1410 | } | |
1411 | ||
1412 | /* wait until the transport is destroyed */ | |
1413 | wait_event(server->smbd_conn->wait_destroy, | |
1414 | server->smbd_conn->transport_status == SMBD_DESTROYED); | |
1415 | ||
1416 | destroy_workqueue(server->smbd_conn->workqueue); | |
1417 | kfree(server->smbd_conn); | |
1418 | ||
1419 | log_rdma_event(INFO, "creating rdma session\n"); | |
1420 | server->smbd_conn = smbd_get_connection( | |
1421 | server, (struct sockaddr *) &server->dstaddr); | |
1422 | ||
1423 | return server->smbd_conn ? 0 : -ENOENT; | |
1424 | } | |
1425 | ||
f198186a LL |
1426 | static void destroy_caches_and_workqueue(struct smbd_connection *info) |
1427 | { | |
1428 | destroy_receive_buffers(info); | |
1429 | destroy_workqueue(info->workqueue); | |
1430 | mempool_destroy(info->response_mempool); | |
1431 | kmem_cache_destroy(info->response_cache); | |
1432 | mempool_destroy(info->request_mempool); | |
1433 | kmem_cache_destroy(info->request_cache); | |
1434 | } | |
1435 | ||
1436 | #define MAX_NAME_LEN 80 | |
1437 | static int allocate_caches_and_workqueue(struct smbd_connection *info) | |
1438 | { | |
1439 | char name[MAX_NAME_LEN]; | |
1440 | int rc; | |
1441 | ||
1442 | snprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); | |
1443 | info->request_cache = | |
1444 | kmem_cache_create( | |
1445 | name, | |
1446 | sizeof(struct smbd_request) + | |
1447 | sizeof(struct smbd_data_transfer), | |
1448 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
1449 | if (!info->request_cache) | |
1450 | return -ENOMEM; | |
1451 | ||
1452 | info->request_mempool = | |
1453 | mempool_create(info->send_credit_target, mempool_alloc_slab, | |
1454 | mempool_free_slab, info->request_cache); | |
1455 | if (!info->request_mempool) | |
1456 | goto out1; | |
1457 | ||
1458 | snprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); | |
1459 | info->response_cache = | |
1460 | kmem_cache_create( | |
1461 | name, | |
1462 | sizeof(struct smbd_response) + | |
1463 | info->max_receive_size, | |
1464 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
1465 | if (!info->response_cache) | |
1466 | goto out2; | |
1467 | ||
1468 | info->response_mempool = | |
1469 | mempool_create(info->receive_credit_max, mempool_alloc_slab, | |
1470 | mempool_free_slab, info->response_cache); | |
1471 | if (!info->response_mempool) | |
1472 | goto out3; | |
1473 | ||
1474 | snprintf(name, MAX_NAME_LEN, "smbd_%p", info); | |
1475 | info->workqueue = create_workqueue(name); | |
1476 | if (!info->workqueue) | |
1477 | goto out4; | |
1478 | ||
1479 | rc = allocate_receive_buffers(info, info->receive_credit_max); | |
1480 | if (rc) { | |
1481 | log_rdma_event(ERR, "failed to allocate receive buffers\n"); | |
1482 | goto out5; | |
1483 | } | |
1484 | ||
1485 | return 0; | |
1486 | ||
1487 | out5: | |
1488 | destroy_workqueue(info->workqueue); | |
1489 | out4: | |
1490 | mempool_destroy(info->response_mempool); | |
1491 | out3: | |
1492 | kmem_cache_destroy(info->response_cache); | |
1493 | out2: | |
1494 | mempool_destroy(info->request_mempool); | |
1495 | out1: | |
1496 | kmem_cache_destroy(info->request_cache); | |
1497 | return -ENOMEM; | |
1498 | } | |
1499 | ||
1500 | /* Create a SMBD connection, called by upper layer */ | |
1501 | struct smbd_connection *_smbd_get_connection( | |
1502 | struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port) | |
1503 | { | |
1504 | int rc; | |
1505 | struct smbd_connection *info; | |
1506 | struct rdma_conn_param conn_param; | |
1507 | struct ib_qp_init_attr qp_attr; | |
1508 | struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; | |
1509 | ||
1510 | info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); | |
1511 | if (!info) | |
1512 | return NULL; | |
1513 | ||
1514 | info->transport_status = SMBD_CONNECTING; | |
1515 | rc = smbd_ia_open(info, dstaddr, port); | |
1516 | if (rc) { | |
1517 | log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc); | |
1518 | goto create_id_failed; | |
1519 | } | |
1520 | ||
1521 | if (smbd_send_credit_target > info->id->device->attrs.max_cqe || | |
1522 | smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { | |
1523 | log_rdma_event(ERR, | |
1524 | "consider lowering send_credit_target = %d. " | |
1525 | "Possible CQE overrun, device " | |
1526 | "reporting max_cpe %d max_qp_wr %d\n", | |
1527 | smbd_send_credit_target, | |
1528 | info->id->device->attrs.max_cqe, | |
1529 | info->id->device->attrs.max_qp_wr); | |
1530 | goto config_failed; | |
1531 | } | |
1532 | ||
1533 | if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || | |
1534 | smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { | |
1535 | log_rdma_event(ERR, | |
1536 | "consider lowering receive_credit_max = %d. " | |
1537 | "Possible CQE overrun, device " | |
1538 | "reporting max_cpe %d max_qp_wr %d\n", | |
1539 | smbd_receive_credit_max, | |
1540 | info->id->device->attrs.max_cqe, | |
1541 | info->id->device->attrs.max_qp_wr); | |
1542 | goto config_failed; | |
1543 | } | |
1544 | ||
1545 | info->receive_credit_max = smbd_receive_credit_max; | |
1546 | info->send_credit_target = smbd_send_credit_target; | |
1547 | info->max_send_size = smbd_max_send_size; | |
1548 | info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; | |
1549 | info->max_receive_size = smbd_max_receive_size; | |
1550 | info->keep_alive_interval = smbd_keep_alive_interval; | |
1551 | ||
1552 | if (info->id->device->attrs.max_sge < SMBDIRECT_MAX_SGE) { | |
1553 | log_rdma_event(ERR, "warning: device max_sge = %d too small\n", | |
1554 | info->id->device->attrs.max_sge); | |
1555 | log_rdma_event(ERR, "Queue Pair creation may fail\n"); | |
1556 | } | |
1557 | ||
1558 | info->send_cq = NULL; | |
1559 | info->recv_cq = NULL; | |
1560 | info->send_cq = ib_alloc_cq(info->id->device, info, | |
1561 | info->send_credit_target, 0, IB_POLL_SOFTIRQ); | |
1562 | if (IS_ERR(info->send_cq)) { | |
1563 | info->send_cq = NULL; | |
1564 | goto alloc_cq_failed; | |
1565 | } | |
1566 | ||
1567 | info->recv_cq = ib_alloc_cq(info->id->device, info, | |
1568 | info->receive_credit_max, 0, IB_POLL_SOFTIRQ); | |
1569 | if (IS_ERR(info->recv_cq)) { | |
1570 | info->recv_cq = NULL; | |
1571 | goto alloc_cq_failed; | |
1572 | } | |
1573 | ||
1574 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
1575 | qp_attr.event_handler = smbd_qp_async_error_upcall; | |
1576 | qp_attr.qp_context = info; | |
1577 | qp_attr.cap.max_send_wr = info->send_credit_target; | |
1578 | qp_attr.cap.max_recv_wr = info->receive_credit_max; | |
1579 | qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE; | |
1580 | qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE; | |
1581 | qp_attr.cap.max_inline_data = 0; | |
1582 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
1583 | qp_attr.qp_type = IB_QPT_RC; | |
1584 | qp_attr.send_cq = info->send_cq; | |
1585 | qp_attr.recv_cq = info->recv_cq; | |
1586 | qp_attr.port_num = ~0; | |
1587 | ||
1588 | rc = rdma_create_qp(info->id, info->pd, &qp_attr); | |
1589 | if (rc) { | |
1590 | log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc); | |
1591 | goto create_qp_failed; | |
1592 | } | |
1593 | ||
1594 | memset(&conn_param, 0, sizeof(conn_param)); | |
1595 | conn_param.initiator_depth = 0; | |
1596 | ||
1597 | conn_param.retry_count = SMBD_CM_RETRY; | |
1598 | conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY; | |
1599 | conn_param.flow_control = 0; | |
1600 | init_waitqueue_head(&info->wait_destroy); | |
1601 | ||
1602 | log_rdma_event(INFO, "connecting to IP %pI4 port %d\n", | |
1603 | &addr_in->sin_addr, port); | |
1604 | ||
1605 | init_waitqueue_head(&info->conn_wait); | |
1606 | rc = rdma_connect(info->id, &conn_param); | |
1607 | if (rc) { | |
1608 | log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc); | |
1609 | goto rdma_connect_failed; | |
1610 | } | |
1611 | ||
1612 | wait_event_interruptible( | |
1613 | info->conn_wait, info->transport_status != SMBD_CONNECTING); | |
1614 | ||
1615 | if (info->transport_status != SMBD_CONNECTED) { | |
1616 | log_rdma_event(ERR, "rdma_connect failed port=%d\n", port); | |
1617 | goto rdma_connect_failed; | |
1618 | } | |
1619 | ||
1620 | log_rdma_event(INFO, "rdma_connect connected\n"); | |
1621 | ||
1622 | rc = allocate_caches_and_workqueue(info); | |
1623 | if (rc) { | |
1624 | log_rdma_event(ERR, "cache allocation failed\n"); | |
1625 | goto allocate_cache_failed; | |
1626 | } | |
1627 | ||
1628 | init_waitqueue_head(&info->wait_send_queue); | |
1629 | init_waitqueue_head(&info->wait_reassembly_queue); | |
1630 | ||
1631 | INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); | |
1632 | INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work); | |
1633 | queue_delayed_work(info->workqueue, &info->idle_timer_work, | |
1634 | info->keep_alive_interval*HZ); | |
1635 | ||
1636 | init_waitqueue_head(&info->wait_send_pending); | |
1637 | atomic_set(&info->send_pending, 0); | |
1638 | ||
1639 | init_waitqueue_head(&info->wait_send_payload_pending); | |
1640 | atomic_set(&info->send_payload_pending, 0); | |
1641 | ||
1642 | INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); | |
1643 | INIT_WORK(&info->destroy_work, smbd_destroy_rdma_work); | |
1644 | INIT_WORK(&info->recv_done_work, smbd_recv_done_work); | |
1645 | INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); | |
1646 | info->new_credits_offered = 0; | |
1647 | spin_lock_init(&info->lock_new_credits_offered); | |
1648 | ||
1649 | rc = smbd_negotiate(info); | |
1650 | if (rc) { | |
1651 | log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc); | |
1652 | goto negotiation_failed; | |
1653 | } | |
1654 | ||
1655 | return info; | |
1656 | ||
1657 | negotiation_failed: | |
1658 | cancel_delayed_work_sync(&info->idle_timer_work); | |
1659 | destroy_caches_and_workqueue(info); | |
1660 | info->transport_status = SMBD_NEGOTIATE_FAILED; | |
1661 | init_waitqueue_head(&info->conn_wait); | |
1662 | rdma_disconnect(info->id); | |
1663 | wait_event(info->conn_wait, | |
1664 | info->transport_status == SMBD_DISCONNECTED); | |
1665 | ||
1666 | allocate_cache_failed: | |
1667 | rdma_connect_failed: | |
1668 | rdma_destroy_qp(info->id); | |
1669 | ||
1670 | create_qp_failed: | |
1671 | alloc_cq_failed: | |
1672 | if (info->send_cq) | |
1673 | ib_free_cq(info->send_cq); | |
1674 | if (info->recv_cq) | |
1675 | ib_free_cq(info->recv_cq); | |
1676 | ||
1677 | config_failed: | |
1678 | ib_dealloc_pd(info->pd); | |
1679 | rdma_destroy_id(info->id); | |
1680 | ||
1681 | create_id_failed: | |
1682 | kfree(info); | |
1683 | return NULL; | |
1684 | } | |
399f9539 LL |
1685 | |
1686 | struct smbd_connection *smbd_get_connection( | |
1687 | struct TCP_Server_Info *server, struct sockaddr *dstaddr) | |
1688 | { | |
1689 | struct smbd_connection *ret; | |
1690 | int port = SMBD_PORT; | |
1691 | ||
1692 | try_again: | |
1693 | ret = _smbd_get_connection(server, dstaddr, port); | |
1694 | ||
1695 | /* Try SMB_PORT if SMBD_PORT doesn't work */ | |
1696 | if (!ret && port == SMBD_PORT) { | |
1697 | port = SMB_PORT; | |
1698 | goto try_again; | |
1699 | } | |
1700 | return ret; | |
1701 | } |