]>
Commit | Line | Data |
---|---|---|
1e23b3ee AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
1e23b3ee AG |
35 | #include <linux/pci.h> |
36 | #include <linux/dma-mapping.h> | |
37 | #include <rdma/rdma_cm.h> | |
38 | ||
0cb43965 | 39 | #include "rds_single_path.h" |
1e23b3ee AG |
40 | #include "rds.h" |
41 | #include "ib.h" | |
42 | ||
43 | static struct kmem_cache *rds_ib_incoming_slab; | |
44 | static struct kmem_cache *rds_ib_frag_slab; | |
45 | static atomic_t rds_ib_allocation = ATOMIC_INIT(0); | |
46 | ||
1e23b3ee AG |
47 | void rds_ib_recv_init_ring(struct rds_ib_connection *ic) |
48 | { | |
49 | struct rds_ib_recv_work *recv; | |
50 | u32 i; | |
51 | ||
52 | for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { | |
53 | struct ib_sge *sge; | |
54 | ||
55 | recv->r_ibinc = NULL; | |
56 | recv->r_frag = NULL; | |
57 | ||
58 | recv->r_wr.next = NULL; | |
59 | recv->r_wr.wr_id = i; | |
60 | recv->r_wr.sg_list = recv->r_sge; | |
61 | recv->r_wr.num_sge = RDS_IB_RECV_SGE; | |
62 | ||
919ced4c | 63 | sge = &recv->r_sge[0]; |
1e23b3ee AG |
64 | sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); |
65 | sge->length = sizeof(struct rds_header); | |
e5580242 | 66 | sge->lkey = ic->i_pd->local_dma_lkey; |
919ced4c AG |
67 | |
68 | sge = &recv->r_sge[1]; | |
69 | sge->addr = 0; | |
70 | sge->length = RDS_FRAG_SIZE; | |
e5580242 | 71 | sge->lkey = ic->i_pd->local_dma_lkey; |
1e23b3ee AG |
72 | } |
73 | } | |
74 | ||
33244125 CM |
75 | /* |
76 | * The entire 'from' list, including the from element itself, is put on | |
77 | * to the tail of the 'to' list. | |
78 | */ | |
79 | static void list_splice_entire_tail(struct list_head *from, | |
80 | struct list_head *to) | |
81 | { | |
82 | struct list_head *from_last = from->prev; | |
83 | ||
84 | list_splice_tail(from_last, to); | |
85 | list_add_tail(from_last, to); | |
86 | } | |
87 | ||
88 | static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) | |
89 | { | |
90 | struct list_head *tmp; | |
91 | ||
92 | tmp = xchg(&cache->xfer, NULL); | |
93 | if (tmp) { | |
94 | if (cache->ready) | |
95 | list_splice_entire_tail(tmp, cache->ready); | |
96 | else | |
97 | cache->ready = tmp; | |
98 | } | |
99 | } | |
100 | ||
101 | static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache) | |
102 | { | |
103 | struct rds_ib_cache_head *head; | |
104 | int cpu; | |
105 | ||
106 | cache->percpu = alloc_percpu(struct rds_ib_cache_head); | |
107 | if (!cache->percpu) | |
108 | return -ENOMEM; | |
109 | ||
110 | for_each_possible_cpu(cpu) { | |
111 | head = per_cpu_ptr(cache->percpu, cpu); | |
112 | head->first = NULL; | |
113 | head->count = 0; | |
114 | } | |
115 | cache->xfer = NULL; | |
116 | cache->ready = NULL; | |
117 | ||
118 | return 0; | |
119 | } | |
120 | ||
121 | int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic) | |
122 | { | |
123 | int ret; | |
124 | ||
125 | ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs); | |
126 | if (!ret) { | |
127 | ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags); | |
128 | if (ret) | |
129 | free_percpu(ic->i_cache_incs.percpu); | |
130 | } | |
131 | ||
132 | return ret; | |
133 | } | |
134 | ||
135 | static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache, | |
136 | struct list_head *caller_list) | |
137 | { | |
138 | struct rds_ib_cache_head *head; | |
139 | int cpu; | |
140 | ||
141 | for_each_possible_cpu(cpu) { | |
142 | head = per_cpu_ptr(cache->percpu, cpu); | |
143 | if (head->first) { | |
144 | list_splice_entire_tail(head->first, caller_list); | |
145 | head->first = NULL; | |
146 | } | |
147 | } | |
148 | ||
149 | if (cache->ready) { | |
150 | list_splice_entire_tail(cache->ready, caller_list); | |
151 | cache->ready = NULL; | |
152 | } | |
153 | } | |
154 | ||
155 | void rds_ib_recv_free_caches(struct rds_ib_connection *ic) | |
156 | { | |
157 | struct rds_ib_incoming *inc; | |
158 | struct rds_ib_incoming *inc_tmp; | |
159 | struct rds_page_frag *frag; | |
160 | struct rds_page_frag *frag_tmp; | |
161 | LIST_HEAD(list); | |
162 | ||
163 | rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); | |
164 | rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); | |
165 | free_percpu(ic->i_cache_incs.percpu); | |
166 | ||
167 | list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) { | |
168 | list_del(&inc->ii_cache_entry); | |
169 | WARN_ON(!list_empty(&inc->ii_frags)); | |
170 | kmem_cache_free(rds_ib_incoming_slab, inc); | |
171 | } | |
172 | ||
173 | rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); | |
174 | rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); | |
175 | free_percpu(ic->i_cache_frags.percpu); | |
176 | ||
177 | list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) { | |
178 | list_del(&frag->f_cache_entry); | |
179 | WARN_ON(!list_empty(&frag->f_item)); | |
180 | kmem_cache_free(rds_ib_frag_slab, frag); | |
181 | } | |
182 | } | |
183 | ||
184 | /* fwd decl */ | |
185 | static void rds_ib_recv_cache_put(struct list_head *new_item, | |
186 | struct rds_ib_refill_cache *cache); | |
187 | static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache); | |
188 | ||
189 | ||
190 | /* Recycle frag and attached recv buffer f_sg */ | |
191 | static void rds_ib_frag_free(struct rds_ib_connection *ic, | |
192 | struct rds_page_frag *frag) | |
193 | { | |
194 | rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); | |
195 | ||
196 | rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); | |
197 | } | |
198 | ||
199 | /* Recycle inc after freeing attached frags */ | |
200 | void rds_ib_inc_free(struct rds_incoming *inc) | |
201 | { | |
202 | struct rds_ib_incoming *ibinc; | |
203 | struct rds_page_frag *frag; | |
204 | struct rds_page_frag *pos; | |
205 | struct rds_ib_connection *ic = inc->i_conn->c_transport_data; | |
206 | ||
207 | ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); | |
208 | ||
209 | /* Free attached frags */ | |
210 | list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { | |
211 | list_del_init(&frag->f_item); | |
212 | rds_ib_frag_free(ic, frag); | |
213 | } | |
214 | BUG_ON(!list_empty(&ibinc->ii_frags)); | |
215 | ||
216 | rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc); | |
217 | rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); | |
218 | } | |
219 | ||
1e23b3ee AG |
220 | static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, |
221 | struct rds_ib_recv_work *recv) | |
222 | { | |
223 | if (recv->r_ibinc) { | |
224 | rds_inc_put(&recv->r_ibinc->ii_inc); | |
225 | recv->r_ibinc = NULL; | |
226 | } | |
227 | if (recv->r_frag) { | |
fc24f780 | 228 | ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); |
33244125 | 229 | rds_ib_frag_free(ic, recv->r_frag); |
1e23b3ee AG |
230 | recv->r_frag = NULL; |
231 | } | |
232 | } | |
233 | ||
234 | void rds_ib_recv_clear_ring(struct rds_ib_connection *ic) | |
235 | { | |
236 | u32 i; | |
237 | ||
238 | for (i = 0; i < ic->i_recv_ring.w_nr; i++) | |
239 | rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); | |
1e23b3ee AG |
240 | } |
241 | ||
037f18a3 CM |
242 | static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic, |
243 | gfp_t slab_mask) | |
33244125 CM |
244 | { |
245 | struct rds_ib_incoming *ibinc; | |
246 | struct list_head *cache_item; | |
247 | int avail_allocs; | |
248 | ||
249 | cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); | |
250 | if (cache_item) { | |
251 | ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry); | |
252 | } else { | |
253 | avail_allocs = atomic_add_unless(&rds_ib_allocation, | |
254 | 1, rds_ib_sysctl_max_recv_allocation); | |
255 | if (!avail_allocs) { | |
256 | rds_ib_stats_inc(s_ib_rx_alloc_limit); | |
257 | return NULL; | |
258 | } | |
037f18a3 | 259 | ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask); |
33244125 CM |
260 | if (!ibinc) { |
261 | atomic_dec(&rds_ib_allocation); | |
262 | return NULL; | |
263 | } | |
264 | } | |
265 | INIT_LIST_HEAD(&ibinc->ii_frags); | |
266 | rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr); | |
267 | ||
268 | return ibinc; | |
269 | } | |
270 | ||
037f18a3 CM |
271 | static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic, |
272 | gfp_t slab_mask, gfp_t page_mask) | |
33244125 CM |
273 | { |
274 | struct rds_page_frag *frag; | |
275 | struct list_head *cache_item; | |
276 | int ret; | |
277 | ||
278 | cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); | |
279 | if (cache_item) { | |
280 | frag = container_of(cache_item, struct rds_page_frag, f_cache_entry); | |
281 | } else { | |
037f18a3 | 282 | frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask); |
33244125 CM |
283 | if (!frag) |
284 | return NULL; | |
285 | ||
b4e1da3c | 286 | sg_init_table(&frag->f_sg, 1); |
33244125 | 287 | ret = rds_page_remainder_alloc(&frag->f_sg, |
037f18a3 | 288 | RDS_FRAG_SIZE, page_mask); |
33244125 CM |
289 | if (ret) { |
290 | kmem_cache_free(rds_ib_frag_slab, frag); | |
291 | return NULL; | |
292 | } | |
293 | } | |
294 | ||
295 | INIT_LIST_HEAD(&frag->f_item); | |
296 | ||
297 | return frag; | |
298 | } | |
299 | ||
1e23b3ee | 300 | static int rds_ib_recv_refill_one(struct rds_connection *conn, |
73ce4317 | 301 | struct rds_ib_recv_work *recv, gfp_t gfp) |
1e23b3ee AG |
302 | { |
303 | struct rds_ib_connection *ic = conn->c_transport_data; | |
1e23b3ee AG |
304 | struct ib_sge *sge; |
305 | int ret = -ENOMEM; | |
037f18a3 CM |
306 | gfp_t slab_mask = GFP_NOWAIT; |
307 | gfp_t page_mask = GFP_NOWAIT; | |
308 | ||
d0164adc | 309 | if (gfp & __GFP_DIRECT_RECLAIM) { |
037f18a3 CM |
310 | slab_mask = GFP_KERNEL; |
311 | page_mask = GFP_HIGHUSER; | |
312 | } | |
1e23b3ee | 313 | |
33244125 CM |
314 | if (!ic->i_cache_incs.ready) |
315 | rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); | |
316 | if (!ic->i_cache_frags.ready) | |
317 | rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); | |
318 | ||
3427e854 AG |
319 | /* |
320 | * ibinc was taken from recv if recv contained the start of a message. | |
321 | * recvs that were continuations will still have this allocated. | |
322 | */ | |
8690bfa1 | 323 | if (!recv->r_ibinc) { |
037f18a3 | 324 | recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); |
33244125 | 325 | if (!recv->r_ibinc) |
1e23b3ee | 326 | goto out; |
1e23b3ee AG |
327 | } |
328 | ||
3427e854 | 329 | WARN_ON(recv->r_frag); /* leak! */ |
037f18a3 | 330 | recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); |
3427e854 AG |
331 | if (!recv->r_frag) |
332 | goto out; | |
1e23b3ee | 333 | |
0b088e00 AG |
334 | ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, |
335 | 1, DMA_FROM_DEVICE); | |
336 | WARN_ON(ret != 1); | |
1e23b3ee | 337 | |
919ced4c | 338 | sge = &recv->r_sge[0]; |
1e23b3ee AG |
339 | sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); |
340 | sge->length = sizeof(struct rds_header); | |
341 | ||
919ced4c | 342 | sge = &recv->r_sge[1]; |
f2e9bd70 MM |
343 | sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg); |
344 | sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg); | |
1e23b3ee AG |
345 | |
346 | ret = 0; | |
347 | out: | |
348 | return ret; | |
349 | } | |
350 | ||
73ce4317 | 351 | static int acquire_refill(struct rds_connection *conn) |
352 | { | |
353 | return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0; | |
354 | } | |
355 | ||
356 | static void release_refill(struct rds_connection *conn) | |
357 | { | |
358 | clear_bit(RDS_RECV_REFILL, &conn->c_flags); | |
359 | ||
360 | /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a | |
361 | * hot path and finding waiters is very rare. We don't want to walk | |
362 | * the system-wide hashed waitqueue buckets in the fast path only to | |
363 | * almost never find waiters. | |
364 | */ | |
365 | if (waitqueue_active(&conn->c_waitq)) | |
366 | wake_up_all(&conn->c_waitq); | |
367 | } | |
368 | ||
1e23b3ee AG |
369 | /* |
370 | * This tries to allocate and post unused work requests after making sure that | |
371 | * they have all the allocations they need to queue received fragments into | |
33244125 | 372 | * sockets. |
1e23b3ee AG |
373 | * |
374 | * -1 is returned if posting fails due to temporary resource exhaustion. | |
375 | */ | |
73ce4317 | 376 | void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) |
1e23b3ee AG |
377 | { |
378 | struct rds_ib_connection *ic = conn->c_transport_data; | |
379 | struct rds_ib_recv_work *recv; | |
380 | struct ib_recv_wr *failed_wr; | |
381 | unsigned int posted = 0; | |
382 | int ret = 0; | |
d0164adc | 383 | bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM); |
1e23b3ee AG |
384 | u32 pos; |
385 | ||
73ce4317 | 386 | /* the goal here is to just make sure that someone, somewhere |
387 | * is posting buffers. If we can't get the refill lock, | |
388 | * let them do their thing | |
389 | */ | |
390 | if (!acquire_refill(conn)) | |
391 | return; | |
392 | ||
f64f9e71 JP |
393 | while ((prefill || rds_conn_up(conn)) && |
394 | rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { | |
1e23b3ee AG |
395 | if (pos >= ic->i_recv_ring.w_nr) { |
396 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", | |
397 | pos); | |
1e23b3ee AG |
398 | break; |
399 | } | |
400 | ||
401 | recv = &ic->i_recvs[pos]; | |
73ce4317 | 402 | ret = rds_ib_recv_refill_one(conn, recv, gfp); |
1e23b3ee | 403 | if (ret) { |
1e23b3ee AG |
404 | break; |
405 | } | |
406 | ||
407 | /* XXX when can this fail? */ | |
408 | ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); | |
409 | rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, | |
0b088e00 | 410 | recv->r_ibinc, sg_page(&recv->r_frag->f_sg), |
f2e9bd70 MM |
411 | (long) ib_sg_dma_address( |
412 | ic->i_cm_id->device, | |
413 | &recv->r_frag->f_sg), | |
414 | ret); | |
1e23b3ee AG |
415 | if (ret) { |
416 | rds_ib_conn_error(conn, "recv post on " | |
417 | "%pI4 returned %d, disconnecting and " | |
418 | "reconnecting\n", &conn->c_faddr, | |
419 | ret); | |
1e23b3ee AG |
420 | break; |
421 | } | |
422 | ||
423 | posted++; | |
424 | } | |
425 | ||
426 | /* We're doing flow control - update the window. */ | |
427 | if (ic->i_flowctl && posted) | |
428 | rds_ib_advertise_credits(conn, posted); | |
429 | ||
430 | if (ret) | |
431 | rds_ib_ring_unalloc(&ic->i_recv_ring, 1); | |
73ce4317 | 432 | |
433 | release_refill(conn); | |
434 | ||
435 | /* if we're called from the softirq handler, we'll be GFP_NOWAIT. | |
436 | * in this case the ring being low is going to lead to more interrupts | |
437 | * and we can safely let the softirq code take care of it unless the | |
438 | * ring is completely empty. | |
439 | * | |
440 | * if we're called from krdsd, we'll be GFP_KERNEL. In this case | |
441 | * we might have raced with the softirq code while we had the refill | |
442 | * lock held. Use rds_ib_ring_low() instead of ring_empty to decide | |
443 | * if we should requeue. | |
444 | */ | |
445 | if (rds_conn_up(conn) && | |
446 | ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) || | |
447 | rds_ib_ring_empty(&ic->i_recv_ring))) { | |
448 | queue_delayed_work(rds_wq, &conn->c_recv_w, 1); | |
449 | } | |
1e23b3ee AG |
450 | } |
451 | ||
33244125 CM |
452 | /* |
453 | * We want to recycle several types of recv allocations, like incs and frags. | |
454 | * To use this, the *_free() function passes in the ptr to a list_head within | |
455 | * the recyclee, as well as the cache to put it on. | |
456 | * | |
457 | * First, we put the memory on a percpu list. When this reaches a certain size, | |
458 | * We move it to an intermediate non-percpu list in a lockless manner, with some | |
459 | * xchg/compxchg wizardry. | |
460 | * | |
461 | * N.B. Instead of a list_head as the anchor, we use a single pointer, which can | |
462 | * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and | |
463 | * list_empty() will return true with one element is actually present. | |
464 | */ | |
465 | static void rds_ib_recv_cache_put(struct list_head *new_item, | |
466 | struct rds_ib_refill_cache *cache) | |
1e23b3ee | 467 | { |
33244125 | 468 | unsigned long flags; |
c196403b | 469 | struct list_head *old, *chpfirst; |
1e23b3ee | 470 | |
33244125 | 471 | local_irq_save(flags); |
1e23b3ee | 472 | |
ae4b46e9 SW |
473 | chpfirst = __this_cpu_read(cache->percpu->first); |
474 | if (!chpfirst) | |
33244125 CM |
475 | INIT_LIST_HEAD(new_item); |
476 | else /* put on front */ | |
ae4b46e9 | 477 | list_add_tail(new_item, chpfirst); |
33244125 | 478 | |
c196403b | 479 | __this_cpu_write(cache->percpu->first, new_item); |
ae4b46e9 SW |
480 | __this_cpu_inc(cache->percpu->count); |
481 | ||
482 | if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) | |
33244125 CM |
483 | goto end; |
484 | ||
485 | /* | |
486 | * Return our per-cpu first list to the cache's xfer by atomically | |
487 | * grabbing the current xfer list, appending it to our per-cpu list, | |
488 | * and then atomically returning that entire list back to the | |
489 | * cache's xfer list as long as it's still empty. | |
490 | */ | |
491 | do { | |
492 | old = xchg(&cache->xfer, NULL); | |
493 | if (old) | |
ae4b46e9 SW |
494 | list_splice_entire_tail(old, chpfirst); |
495 | old = cmpxchg(&cache->xfer, NULL, chpfirst); | |
33244125 CM |
496 | } while (old); |
497 | ||
ae4b46e9 | 498 | |
c196403b | 499 | __this_cpu_write(cache->percpu->first, NULL); |
ae4b46e9 | 500 | __this_cpu_write(cache->percpu->count, 0); |
33244125 CM |
501 | end: |
502 | local_irq_restore(flags); | |
1e23b3ee AG |
503 | } |
504 | ||
33244125 | 505 | static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache) |
1e23b3ee | 506 | { |
33244125 CM |
507 | struct list_head *head = cache->ready; |
508 | ||
509 | if (head) { | |
510 | if (!list_empty(head)) { | |
511 | cache->ready = head->next; | |
512 | list_del_init(head); | |
513 | } else | |
514 | cache->ready = NULL; | |
515 | } | |
1e23b3ee | 516 | |
33244125 | 517 | return head; |
1e23b3ee AG |
518 | } |
519 | ||
c310e72c | 520 | int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) |
1e23b3ee AG |
521 | { |
522 | struct rds_ib_incoming *ibinc; | |
523 | struct rds_page_frag *frag; | |
1e23b3ee AG |
524 | unsigned long to_copy; |
525 | unsigned long frag_off = 0; | |
1e23b3ee AG |
526 | int copied = 0; |
527 | int ret; | |
528 | u32 len; | |
529 | ||
530 | ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); | |
531 | frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); | |
532 | len = be32_to_cpu(inc->i_hdr.h_len); | |
533 | ||
c310e72c | 534 | while (iov_iter_count(to) && copied < len) { |
1e23b3ee AG |
535 | if (frag_off == RDS_FRAG_SIZE) { |
536 | frag = list_entry(frag->f_item.next, | |
537 | struct rds_page_frag, f_item); | |
538 | frag_off = 0; | |
539 | } | |
c310e72c AV |
540 | to_copy = min_t(unsigned long, iov_iter_count(to), |
541 | RDS_FRAG_SIZE - frag_off); | |
1e23b3ee AG |
542 | to_copy = min_t(unsigned long, to_copy, len - copied); |
543 | ||
1e23b3ee | 544 | /* XXX needs + offset for multiple recvs per page */ |
c310e72c AV |
545 | rds_stats_add(s_copy_to_user, to_copy); |
546 | ret = copy_page_to_iter(sg_page(&frag->f_sg), | |
547 | frag->f_sg.offset + frag_off, | |
548 | to_copy, | |
549 | to); | |
550 | if (ret != to_copy) | |
551 | return -EFAULT; | |
1e23b3ee | 552 | |
1e23b3ee AG |
553 | frag_off += to_copy; |
554 | copied += to_copy; | |
555 | } | |
556 | ||
557 | return copied; | |
558 | } | |
559 | ||
560 | /* ic starts out kzalloc()ed */ | |
561 | void rds_ib_recv_init_ack(struct rds_ib_connection *ic) | |
562 | { | |
563 | struct ib_send_wr *wr = &ic->i_ack_wr; | |
564 | struct ib_sge *sge = &ic->i_ack_sge; | |
565 | ||
566 | sge->addr = ic->i_ack_dma; | |
567 | sge->length = sizeof(struct rds_header); | |
e5580242 | 568 | sge->lkey = ic->i_pd->local_dma_lkey; |
1e23b3ee AG |
569 | |
570 | wr->sg_list = sge; | |
571 | wr->num_sge = 1; | |
572 | wr->opcode = IB_WR_SEND; | |
573 | wr->wr_id = RDS_IB_ACK_WR_ID; | |
574 | wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; | |
575 | } | |
576 | ||
577 | /* | |
578 | * You'd think that with reliable IB connections you wouldn't need to ack | |
579 | * messages that have been received. The problem is that IB hardware generates | |
580 | * an ack message before it has DMAed the message into memory. This creates a | |
581 | * potential message loss if the HCA is disabled for any reason between when it | |
582 | * sends the ack and before the message is DMAed and processed. This is only a | |
583 | * potential issue if another HCA is available for fail-over. | |
584 | * | |
585 | * When the remote host receives our ack they'll free the sent message from | |
586 | * their send queue. To decrease the latency of this we always send an ack | |
587 | * immediately after we've received messages. | |
588 | * | |
589 | * For simplicity, we only have one ack in flight at a time. This puts | |
590 | * pressure on senders to have deep enough send queues to absorb the latency of | |
591 | * a single ack frame being in flight. This might not be good enough. | |
592 | * | |
593 | * This is implemented by have a long-lived send_wr and sge which point to a | |
594 | * statically allocated ack frame. This ack wr does not fall under the ring | |
595 | * accounting that the tx and rx wrs do. The QP attribute specifically makes | |
596 | * room for it beyond the ring size. Send completion notices its special | |
597 | * wr_id and avoids working with the ring in that case. | |
598 | */ | |
8cbd9606 | 599 | #ifndef KERNEL_HAS_ATOMIC64 |
f4f943c9 | 600 | void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) |
1e23b3ee | 601 | { |
8cbd9606 AG |
602 | unsigned long flags; |
603 | ||
604 | spin_lock_irqsave(&ic->i_ack_lock, flags); | |
605 | ic->i_ack_next = seq; | |
606 | if (ack_required) | |
607 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
608 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | |
609 | } | |
610 | ||
611 | static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | |
612 | { | |
613 | unsigned long flags; | |
614 | u64 seq; | |
615 | ||
616 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
617 | ||
618 | spin_lock_irqsave(&ic->i_ack_lock, flags); | |
619 | seq = ic->i_ack_next; | |
620 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | |
621 | ||
622 | return seq; | |
623 | } | |
624 | #else | |
f4f943c9 | 625 | void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) |
8cbd9606 AG |
626 | { |
627 | atomic64_set(&ic->i_ack_next, seq); | |
1e23b3ee | 628 | if (ack_required) { |
4e857c58 | 629 | smp_mb__before_atomic(); |
1e23b3ee AG |
630 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
631 | } | |
632 | } | |
633 | ||
634 | static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | |
635 | { | |
636 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
4e857c58 | 637 | smp_mb__after_atomic(); |
1e23b3ee | 638 | |
8cbd9606 | 639 | return atomic64_read(&ic->i_ack_next); |
1e23b3ee | 640 | } |
8cbd9606 AG |
641 | #endif |
642 | ||
1e23b3ee AG |
643 | |
644 | static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) | |
645 | { | |
646 | struct rds_header *hdr = ic->i_ack; | |
647 | struct ib_send_wr *failed_wr; | |
648 | u64 seq; | |
649 | int ret; | |
650 | ||
651 | seq = rds_ib_get_ack(ic); | |
652 | ||
653 | rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); | |
654 | rds_message_populate_header(hdr, 0, 0, 0); | |
655 | hdr->h_ack = cpu_to_be64(seq); | |
656 | hdr->h_credit = adv_credits; | |
657 | rds_message_make_checksum(hdr); | |
658 | ic->i_ack_queued = jiffies; | |
659 | ||
660 | ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); | |
661 | if (unlikely(ret)) { | |
662 | /* Failed to send. Release the WR, and | |
663 | * force another ACK. | |
664 | */ | |
665 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
666 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
667 | ||
668 | rds_ib_stats_inc(s_ib_ack_send_failure); | |
735f61e6 AG |
669 | |
670 | rds_ib_conn_error(ic->conn, "sending ack failed\n"); | |
1e23b3ee AG |
671 | } else |
672 | rds_ib_stats_inc(s_ib_ack_sent); | |
673 | } | |
674 | ||
675 | /* | |
676 | * There are 3 ways of getting acknowledgements to the peer: | |
677 | * 1. We call rds_ib_attempt_ack from the recv completion handler | |
678 | * to send an ACK-only frame. | |
679 | * However, there can be only one such frame in the send queue | |
680 | * at any time, so we may have to postpone it. | |
681 | * 2. When another (data) packet is transmitted while there's | |
682 | * an ACK in the queue, we piggyback the ACK sequence number | |
683 | * on the data packet. | |
684 | * 3. If the ACK WR is done sending, we get called from the | |
685 | * send queue completion handler, and check whether there's | |
686 | * another ACK pending (postponed because the WR was on the | |
687 | * queue). If so, we transmit it. | |
688 | * | |
689 | * We maintain 2 variables: | |
690 | * - i_ack_flags, which keeps track of whether the ACK WR | |
691 | * is currently in the send queue or not (IB_ACK_IN_FLIGHT) | |
692 | * - i_ack_next, which is the last sequence number we received | |
693 | * | |
694 | * Potentially, send queue and receive queue handlers can run concurrently. | |
8cbd9606 AG |
695 | * It would be nice to not have to use a spinlock to synchronize things, |
696 | * but the one problem that rules this out is that 64bit updates are | |
697 | * not atomic on all platforms. Things would be a lot simpler if | |
698 | * we had atomic64 or maybe cmpxchg64 everywhere. | |
1e23b3ee AG |
699 | * |
700 | * Reconnecting complicates this picture just slightly. When we | |
701 | * reconnect, we may be seeing duplicate packets. The peer | |
702 | * is retransmitting them, because it hasn't seen an ACK for | |
703 | * them. It is important that we ACK these. | |
704 | * | |
705 | * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with | |
706 | * this flag set *MUST* be acknowledged immediately. | |
707 | */ | |
708 | ||
709 | /* | |
710 | * When we get here, we're called from the recv queue handler. | |
711 | * Check whether we ought to transmit an ACK. | |
712 | */ | |
713 | void rds_ib_attempt_ack(struct rds_ib_connection *ic) | |
714 | { | |
715 | unsigned int adv_credits; | |
716 | ||
717 | if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) | |
718 | return; | |
719 | ||
720 | if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { | |
721 | rds_ib_stats_inc(s_ib_ack_send_delayed); | |
722 | return; | |
723 | } | |
724 | ||
725 | /* Can we get a send credit? */ | |
7b70d033 | 726 | if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { |
1e23b3ee AG |
727 | rds_ib_stats_inc(s_ib_tx_throttle); |
728 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
729 | return; | |
730 | } | |
731 | ||
732 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
733 | rds_ib_send_ack(ic, adv_credits); | |
734 | } | |
735 | ||
736 | /* | |
737 | * We get here from the send completion handler, when the | |
738 | * adapter tells us the ACK frame was sent. | |
739 | */ | |
740 | void rds_ib_ack_send_complete(struct rds_ib_connection *ic) | |
741 | { | |
742 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
743 | rds_ib_attempt_ack(ic); | |
744 | } | |
745 | ||
746 | /* | |
747 | * This is called by the regular xmit code when it wants to piggyback | |
748 | * an ACK on an outgoing frame. | |
749 | */ | |
750 | u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) | |
751 | { | |
752 | if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) | |
753 | rds_ib_stats_inc(s_ib_ack_send_piggybacked); | |
754 | return rds_ib_get_ack(ic); | |
755 | } | |
756 | ||
757 | /* | |
758 | * It's kind of lame that we're copying from the posted receive pages into | |
759 | * long-lived bitmaps. We could have posted the bitmaps and rdma written into | |
760 | * them. But receiving new congestion bitmaps should be a *rare* event, so | |
761 | * hopefully we won't need to invest that complexity in making it more | |
762 | * efficient. By copying we can share a simpler core with TCP which has to | |
763 | * copy. | |
764 | */ | |
765 | static void rds_ib_cong_recv(struct rds_connection *conn, | |
766 | struct rds_ib_incoming *ibinc) | |
767 | { | |
768 | struct rds_cong_map *map; | |
769 | unsigned int map_off; | |
770 | unsigned int map_page; | |
771 | struct rds_page_frag *frag; | |
772 | unsigned long frag_off; | |
773 | unsigned long to_copy; | |
774 | unsigned long copied; | |
775 | uint64_t uncongested = 0; | |
776 | void *addr; | |
777 | ||
778 | /* catch completely corrupt packets */ | |
779 | if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) | |
780 | return; | |
781 | ||
782 | map = conn->c_fcong; | |
783 | map_page = 0; | |
784 | map_off = 0; | |
785 | ||
786 | frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); | |
787 | frag_off = 0; | |
788 | ||
789 | copied = 0; | |
790 | ||
791 | while (copied < RDS_CONG_MAP_BYTES) { | |
792 | uint64_t *src, *dst; | |
793 | unsigned int k; | |
794 | ||
795 | to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); | |
796 | BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ | |
797 | ||
6114eab5 | 798 | addr = kmap_atomic(sg_page(&frag->f_sg)); |
1e23b3ee | 799 | |
579ba855 | 800 | src = addr + frag->f_sg.offset + frag_off; |
1e23b3ee AG |
801 | dst = (void *)map->m_page_addrs[map_page] + map_off; |
802 | for (k = 0; k < to_copy; k += 8) { | |
803 | /* Record ports that became uncongested, ie | |
804 | * bits that changed from 0 to 1. */ | |
805 | uncongested |= ~(*src) & *dst; | |
806 | *dst++ = *src++; | |
807 | } | |
6114eab5 | 808 | kunmap_atomic(addr); |
1e23b3ee AG |
809 | |
810 | copied += to_copy; | |
811 | ||
812 | map_off += to_copy; | |
813 | if (map_off == PAGE_SIZE) { | |
814 | map_off = 0; | |
815 | map_page++; | |
816 | } | |
817 | ||
818 | frag_off += to_copy; | |
819 | if (frag_off == RDS_FRAG_SIZE) { | |
820 | frag = list_entry(frag->f_item.next, | |
821 | struct rds_page_frag, f_item); | |
822 | frag_off = 0; | |
823 | } | |
824 | } | |
825 | ||
826 | /* the congestion map is in little endian order */ | |
827 | uncongested = le64_to_cpu(uncongested); | |
828 | ||
829 | rds_cong_map_updated(map, uncongested); | |
830 | } | |
831 | ||
1e23b3ee | 832 | static void rds_ib_process_recv(struct rds_connection *conn, |
597ddd50 | 833 | struct rds_ib_recv_work *recv, u32 data_len, |
1e23b3ee AG |
834 | struct rds_ib_ack_state *state) |
835 | { | |
836 | struct rds_ib_connection *ic = conn->c_transport_data; | |
837 | struct rds_ib_incoming *ibinc = ic->i_ibinc; | |
838 | struct rds_header *ihdr, *hdr; | |
839 | ||
840 | /* XXX shut down the connection if port 0,0 are seen? */ | |
841 | ||
842 | rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, | |
597ddd50 | 843 | data_len); |
1e23b3ee | 844 | |
597ddd50 | 845 | if (data_len < sizeof(struct rds_header)) { |
1e23b3ee | 846 | rds_ib_conn_error(conn, "incoming message " |
5fd5c44d | 847 | "from %pI4 didn't include a " |
1e23b3ee AG |
848 | "header, disconnecting and " |
849 | "reconnecting\n", | |
850 | &conn->c_faddr); | |
851 | return; | |
852 | } | |
597ddd50 | 853 | data_len -= sizeof(struct rds_header); |
1e23b3ee | 854 | |
f147dd9e | 855 | ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; |
1e23b3ee AG |
856 | |
857 | /* Validate the checksum. */ | |
858 | if (!rds_message_verify_checksum(ihdr)) { | |
859 | rds_ib_conn_error(conn, "incoming message " | |
860 | "from %pI4 has corrupted header - " | |
861 | "forcing a reconnect\n", | |
862 | &conn->c_faddr); | |
863 | rds_stats_inc(s_recv_drop_bad_checksum); | |
864 | return; | |
865 | } | |
866 | ||
867 | /* Process the ACK sequence which comes with every packet */ | |
868 | state->ack_recv = be64_to_cpu(ihdr->h_ack); | |
869 | state->ack_recv_valid = 1; | |
870 | ||
871 | /* Process the credits update if there was one */ | |
872 | if (ihdr->h_credit) | |
873 | rds_ib_send_add_credits(conn, ihdr->h_credit); | |
874 | ||
597ddd50 | 875 | if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { |
1e23b3ee AG |
876 | /* This is an ACK-only packet. The fact that it gets |
877 | * special treatment here is that historically, ACKs | |
878 | * were rather special beasts. | |
879 | */ | |
880 | rds_ib_stats_inc(s_ib_ack_received); | |
881 | ||
882 | /* | |
883 | * Usually the frags make their way on to incs and are then freed as | |
884 | * the inc is freed. We don't go that route, so we have to drop the | |
885 | * page ref ourselves. We can't just leave the page on the recv | |
886 | * because that confuses the dma mapping of pages and each recv's use | |
0b088e00 | 887 | * of a partial page. |
1e23b3ee AG |
888 | * |
889 | * FIXME: Fold this into the code path below. | |
890 | */ | |
33244125 | 891 | rds_ib_frag_free(ic, recv->r_frag); |
0b088e00 | 892 | recv->r_frag = NULL; |
1e23b3ee AG |
893 | return; |
894 | } | |
895 | ||
896 | /* | |
897 | * If we don't already have an inc on the connection then this | |
898 | * fragment has a header and starts a message.. copy its header | |
899 | * into the inc and save the inc so we can hang upcoming fragments | |
900 | * off its list. | |
901 | */ | |
8690bfa1 | 902 | if (!ibinc) { |
1e23b3ee AG |
903 | ibinc = recv->r_ibinc; |
904 | recv->r_ibinc = NULL; | |
905 | ic->i_ibinc = ibinc; | |
906 | ||
907 | hdr = &ibinc->ii_inc.i_hdr; | |
908 | memcpy(hdr, ihdr, sizeof(*hdr)); | |
909 | ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); | |
910 | ||
911 | rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc, | |
912 | ic->i_recv_data_rem, hdr->h_flags); | |
913 | } else { | |
914 | hdr = &ibinc->ii_inc.i_hdr; | |
915 | /* We can't just use memcmp here; fragments of a | |
916 | * single message may carry different ACKs */ | |
f64f9e71 JP |
917 | if (hdr->h_sequence != ihdr->h_sequence || |
918 | hdr->h_len != ihdr->h_len || | |
919 | hdr->h_sport != ihdr->h_sport || | |
920 | hdr->h_dport != ihdr->h_dport) { | |
1e23b3ee AG |
921 | rds_ib_conn_error(conn, |
922 | "fragment header mismatch; forcing reconnect\n"); | |
923 | return; | |
924 | } | |
925 | } | |
926 | ||
927 | list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); | |
928 | recv->r_frag = NULL; | |
929 | ||
930 | if (ic->i_recv_data_rem > RDS_FRAG_SIZE) | |
931 | ic->i_recv_data_rem -= RDS_FRAG_SIZE; | |
932 | else { | |
933 | ic->i_recv_data_rem = 0; | |
934 | ic->i_ibinc = NULL; | |
935 | ||
936 | if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) | |
937 | rds_ib_cong_recv(conn, ibinc); | |
938 | else { | |
939 | rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, | |
6114eab5 | 940 | &ibinc->ii_inc, GFP_ATOMIC); |
1e23b3ee AG |
941 | state->ack_next = be64_to_cpu(hdr->h_sequence); |
942 | state->ack_next_valid = 1; | |
943 | } | |
944 | ||
945 | /* Evaluate the ACK_REQUIRED flag *after* we received | |
946 | * the complete frame, and after bumping the next_rx | |
947 | * sequence. */ | |
948 | if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { | |
949 | rds_stats_inc(s_recv_ack_required); | |
950 | state->ack_required = 1; | |
951 | } | |
952 | ||
953 | rds_inc_put(&ibinc->ii_inc); | |
954 | } | |
955 | } | |
956 | ||
f4f943c9 SS |
957 | void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, |
958 | struct ib_wc *wc, | |
959 | struct rds_ib_ack_state *state) | |
d521b63b AG |
960 | { |
961 | struct rds_connection *conn = ic->conn; | |
d521b63b AG |
962 | struct rds_ib_recv_work *recv; |
963 | ||
f4f943c9 SS |
964 | rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", |
965 | (unsigned long long)wc->wr_id, wc->status, | |
966 | ib_wc_status_msg(wc->status), wc->byte_len, | |
967 | be32_to_cpu(wc->ex.imm_data)); | |
1e23b3ee | 968 | |
f4f943c9 SS |
969 | rds_ib_stats_inc(s_ib_rx_cq_event); |
970 | recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; | |
971 | ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, | |
972 | DMA_FROM_DEVICE); | |
1e23b3ee | 973 | |
f4f943c9 SS |
974 | /* Also process recvs in connecting state because it is possible |
975 | * to get a recv completion _before_ the rdmacm ESTABLISHED | |
976 | * event is processed. | |
977 | */ | |
978 | if (wc->status == IB_WC_SUCCESS) { | |
979 | rds_ib_process_recv(conn, recv, wc->byte_len, state); | |
980 | } else { | |
981 | /* We expect errors as the qp is drained during shutdown */ | |
982 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) | |
983 | rds_ib_conn_error(conn, "recv completion on %pI4 had status %u (%s), disconnecting and reconnecting\n", | |
984 | &conn->c_faddr, | |
985 | wc->status, | |
986 | ib_wc_status_msg(wc->status)); | |
1e23b3ee | 987 | } |
d521b63b | 988 | |
f4f943c9 SS |
989 | /* rds_ib_process_recv() doesn't always consume the frag, and |
990 | * we might not have called it at all if the wc didn't indicate | |
991 | * success. We already unmapped the frag's pages, though, and | |
992 | * the following rds_ib_ring_free() call tells the refill path | |
993 | * that it will not find an allocated frag here. Make sure we | |
994 | * keep that promise by freeing a frag that's still on the ring. | |
995 | */ | |
996 | if (recv->r_frag) { | |
997 | rds_ib_frag_free(ic, recv->r_frag); | |
998 | recv->r_frag = NULL; | |
1e23b3ee | 999 | } |
f4f943c9 | 1000 | rds_ib_ring_free(&ic->i_recv_ring, 1); |
1e23b3ee AG |
1001 | |
1002 | /* If we ever end up with a really empty receive ring, we're | |
1003 | * in deep trouble, as the sender will definitely see RNR | |
1004 | * timeouts. */ | |
1005 | if (rds_ib_ring_empty(&ic->i_recv_ring)) | |
1006 | rds_ib_stats_inc(s_ib_rx_ring_empty); | |
1007 | ||
1e23b3ee | 1008 | if (rds_ib_ring_low(&ic->i_recv_ring)) |
73ce4317 | 1009 | rds_ib_recv_refill(conn, 0, GFP_NOWAIT); |
1e23b3ee AG |
1010 | } |
1011 | ||
2da43c4a | 1012 | int rds_ib_recv_path(struct rds_conn_path *cp) |
1e23b3ee | 1013 | { |
2da43c4a | 1014 | struct rds_connection *conn = cp->cp_conn; |
1e23b3ee AG |
1015 | struct rds_ib_connection *ic = conn->c_transport_data; |
1016 | int ret = 0; | |
1017 | ||
1018 | rdsdebug("conn %p\n", conn); | |
73ce4317 | 1019 | if (rds_conn_up(conn)) { |
1e23b3ee | 1020 | rds_ib_attempt_ack(ic); |
73ce4317 | 1021 | rds_ib_recv_refill(conn, 0, GFP_KERNEL); |
1022 | } | |
1e23b3ee AG |
1023 | |
1024 | return ret; | |
1025 | } | |
1026 | ||
ef87b7ea | 1027 | int rds_ib_recv_init(void) |
1e23b3ee AG |
1028 | { |
1029 | struct sysinfo si; | |
1030 | int ret = -ENOMEM; | |
1031 | ||
1032 | /* Default to 30% of all available RAM for recv memory */ | |
1033 | si_meminfo(&si); | |
1034 | rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; | |
1035 | ||
1036 | rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", | |
1037 | sizeof(struct rds_ib_incoming), | |
c20f5b96 | 1038 | 0, SLAB_HWCACHE_ALIGN, NULL); |
8690bfa1 | 1039 | if (!rds_ib_incoming_slab) |
1e23b3ee AG |
1040 | goto out; |
1041 | ||
1042 | rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", | |
1043 | sizeof(struct rds_page_frag), | |
c20f5b96 | 1044 | 0, SLAB_HWCACHE_ALIGN, NULL); |
ba54d3ce | 1045 | if (!rds_ib_frag_slab) { |
1e23b3ee | 1046 | kmem_cache_destroy(rds_ib_incoming_slab); |
ba54d3ce | 1047 | rds_ib_incoming_slab = NULL; |
1048 | } else | |
1e23b3ee AG |
1049 | ret = 0; |
1050 | out: | |
1051 | return ret; | |
1052 | } | |
1053 | ||
1054 | void rds_ib_recv_exit(void) | |
1055 | { | |
1056 | kmem_cache_destroy(rds_ib_incoming_slab); | |
1057 | kmem_cache_destroy(rds_ib_frag_slab); | |
1058 | } |