]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/rds/ib_rdma.c
RDS/IB: add _to_node() macros for numa and use {k,v}malloc_node()
[mirror_ubuntu-artful-kernel.git] / net / rds / ib_rdma.c
CommitLineData
08b48a1e
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
5a0e3ad6 34#include <linux/slab.h>
764f2dd9 35#include <linux/rculist.h>
08b48a1e
AG
36
37#include "rds.h"
08b48a1e
AG
38#include "ib.h"
39
40
41/*
42 * This is stored as mr->r_trans_private.
43 */
44struct rds_ib_mr {
45 struct rds_ib_device *device;
46 struct rds_ib_mr_pool *pool;
47 struct ib_fmr *fmr;
48 struct list_head list;
49 unsigned int remap_count;
50
51 struct scatterlist *sg;
52 unsigned int sg_len;
53 u64 *dma;
54 int sg_dma_len;
55};
56
57/*
58 * Our own little FMR pool
59 */
60struct rds_ib_mr_pool {
61 struct mutex flush_lock; /* serialize fmr invalidate */
62 struct work_struct flush_worker; /* flush worker */
63
64 spinlock_t list_lock; /* protect variables below */
65 atomic_t item_count; /* total # of MRs */
66 atomic_t dirty_count; /* # dirty of MRs */
67 struct list_head drop_list; /* MRs that have reached their max_maps limit */
68 struct list_head free_list; /* unused MRs */
69 struct list_head clean_list; /* unused & unamapped MRs */
70 atomic_t free_pinned; /* memory pinned by free MRs */
71 unsigned long max_items;
72 unsigned long max_items_soft;
73 unsigned long max_free_pinned;
74 struct ib_fmr_attr fmr_attr;
75};
76
77static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
78static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
79static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
80
81static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
82{
83 struct rds_ib_device *rds_ibdev;
84 struct rds_ib_ipaddr *i_ipaddr;
85
86 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
764f2dd9
CM
87 rcu_read_lock();
88 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
08b48a1e 89 if (i_ipaddr->ipaddr == ipaddr) {
764f2dd9 90 rcu_read_unlock();
08b48a1e
AG
91 return rds_ibdev;
92 }
93 }
764f2dd9 94 rcu_read_unlock();
08b48a1e
AG
95 }
96
97 return NULL;
98}
99
100static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
101{
102 struct rds_ib_ipaddr *i_ipaddr;
103
104 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
105 if (!i_ipaddr)
106 return -ENOMEM;
107
108 i_ipaddr->ipaddr = ipaddr;
109
110 spin_lock_irq(&rds_ibdev->spinlock);
764f2dd9 111 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
08b48a1e
AG
112 spin_unlock_irq(&rds_ibdev->spinlock);
113
114 return 0;
115}
116
117static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
118{
4a81802b 119 struct rds_ib_ipaddr *i_ipaddr;
764f2dd9
CM
120 struct rds_ib_ipaddr *to_free = NULL;
121
08b48a1e
AG
122
123 spin_lock_irq(&rds_ibdev->spinlock);
764f2dd9 124 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
08b48a1e 125 if (i_ipaddr->ipaddr == ipaddr) {
764f2dd9
CM
126 list_del_rcu(&i_ipaddr->list);
127 to_free = i_ipaddr;
08b48a1e
AG
128 break;
129 }
130 }
131 spin_unlock_irq(&rds_ibdev->spinlock);
764f2dd9
CM
132
133 if (to_free) {
134 synchronize_rcu();
135 kfree(to_free);
136 }
08b48a1e
AG
137}
138
139int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
140{
141 struct rds_ib_device *rds_ibdev_old;
142
143 rds_ibdev_old = rds_ib_get_device(ipaddr);
144 if (rds_ibdev_old)
145 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
146
147 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
148}
149
745cbcca 150void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
08b48a1e
AG
151{
152 struct rds_ib_connection *ic = conn->c_transport_data;
153
154 /* conn was previously on the nodev_conns_list */
155 spin_lock_irq(&ib_nodev_conns_lock);
156 BUG_ON(list_empty(&ib_nodev_conns));
157 BUG_ON(list_empty(&ic->ib_node));
158 list_del(&ic->ib_node);
08b48a1e
AG
159
160 spin_lock_irq(&rds_ibdev->spinlock);
161 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
162 spin_unlock_irq(&rds_ibdev->spinlock);
745cbcca 163 spin_unlock_irq(&ib_nodev_conns_lock);
08b48a1e
AG
164
165 ic->rds_ibdev = rds_ibdev;
08b48a1e
AG
166}
167
745cbcca 168void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
08b48a1e 169{
745cbcca 170 struct rds_ib_connection *ic = conn->c_transport_data;
08b48a1e 171
745cbcca
AG
172 /* place conn on nodev_conns_list */
173 spin_lock(&ib_nodev_conns_lock);
08b48a1e 174
745cbcca
AG
175 spin_lock_irq(&rds_ibdev->spinlock);
176 BUG_ON(list_empty(&ic->ib_node));
177 list_del(&ic->ib_node);
178 spin_unlock_irq(&rds_ibdev->spinlock);
179
180 list_add_tail(&ic->ib_node, &ib_nodev_conns);
181
182 spin_unlock(&ib_nodev_conns_lock);
183
184 ic->rds_ibdev = NULL;
08b48a1e
AG
185}
186
745cbcca 187void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
08b48a1e
AG
188{
189 struct rds_ib_connection *ic, *_ic;
190 LIST_HEAD(tmp_list);
191
192 /* avoid calling conn_destroy with irqs off */
745cbcca
AG
193 spin_lock_irq(list_lock);
194 list_splice(list, &tmp_list);
195 INIT_LIST_HEAD(list);
196 spin_unlock_irq(list_lock);
08b48a1e 197
433d308d 198 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
08b48a1e 199 rds_conn_destroy(ic->conn);
08b48a1e
AG
200}
201
202struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
203{
204 struct rds_ib_mr_pool *pool;
205
206 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
207 if (!pool)
208 return ERR_PTR(-ENOMEM);
209
210 INIT_LIST_HEAD(&pool->free_list);
211 INIT_LIST_HEAD(&pool->drop_list);
212 INIT_LIST_HEAD(&pool->clean_list);
213 mutex_init(&pool->flush_lock);
214 spin_lock_init(&pool->list_lock);
215 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
216
217 pool->fmr_attr.max_pages = fmr_message_size;
218 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
a870d627 219 pool->fmr_attr.page_shift = PAGE_SHIFT;
08b48a1e
AG
220 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
221
222 /* We never allow more than max_items MRs to be allocated.
223 * When we exceed more than max_items_soft, we start freeing
224 * items more aggressively.
225 * Make sure that max_items > max_items_soft > max_items / 2
226 */
227 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
228 pool->max_items = rds_ibdev->max_fmrs;
229
230 return pool;
231}
232
233void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
234{
235 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
236
237 iinfo->rdma_mr_max = pool->max_items;
238 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
239}
240
241void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
242{
243 flush_workqueue(rds_wq);
244 rds_ib_flush_mr_pool(pool, 1);
571c02fa
AG
245 WARN_ON(atomic_read(&pool->item_count));
246 WARN_ON(atomic_read(&pool->free_pinned));
08b48a1e
AG
247 kfree(pool);
248}
249
250static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
251{
252 struct rds_ib_mr *ibmr = NULL;
253 unsigned long flags;
254
255 spin_lock_irqsave(&pool->list_lock, flags);
256 if (!list_empty(&pool->clean_list)) {
257 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
258 list_del_init(&ibmr->list);
259 }
260 spin_unlock_irqrestore(&pool->list_lock, flags);
261
262 return ibmr;
263}
264
265static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
266{
267 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
268 struct rds_ib_mr *ibmr = NULL;
269 int err = 0, iter = 0;
270
271 while (1) {
272 ibmr = rds_ib_reuse_fmr(pool);
273 if (ibmr)
274 return ibmr;
275
276 /* No clean MRs - now we have the choice of either
277 * allocating a fresh MR up to the limit imposed by the
278 * driver, or flush any dirty unused MRs.
279 * We try to avoid stalling in the send path if possible,
280 * so we allocate as long as we're allowed to.
281 *
282 * We're fussy with enforcing the FMR limit, though. If the driver
283 * tells us we can't use more than N fmrs, we shouldn't start
284 * arguing with it */
285 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
286 break;
287
288 atomic_dec(&pool->item_count);
289
290 if (++iter > 2) {
291 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
292 return ERR_PTR(-EAGAIN);
293 }
294
295 /* We do have some empty MRs. Flush them out. */
296 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
297 rds_ib_flush_mr_pool(pool, 0);
298 }
299
e4c52c98 300 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
08b48a1e
AG
301 if (!ibmr) {
302 err = -ENOMEM;
303 goto out_no_cigar;
304 }
305
306 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
307 (IB_ACCESS_LOCAL_WRITE |
308 IB_ACCESS_REMOTE_READ |
15133f6e
AG
309 IB_ACCESS_REMOTE_WRITE|
310 IB_ACCESS_REMOTE_ATOMIC),
311
08b48a1e
AG
312 &pool->fmr_attr);
313 if (IS_ERR(ibmr->fmr)) {
314 err = PTR_ERR(ibmr->fmr);
315 ibmr->fmr = NULL;
316 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
317 goto out_no_cigar;
318 }
319
320 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
321 return ibmr;
322
323out_no_cigar:
324 if (ibmr) {
325 if (ibmr->fmr)
326 ib_dealloc_fmr(ibmr->fmr);
327 kfree(ibmr);
328 }
329 atomic_dec(&pool->item_count);
330 return ERR_PTR(err);
331}
332
333static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
334 struct scatterlist *sg, unsigned int nents)
335{
336 struct ib_device *dev = rds_ibdev->dev;
337 struct scatterlist *scat = sg;
338 u64 io_addr = 0;
339 u64 *dma_pages;
340 u32 len;
341 int page_cnt, sg_dma_len;
342 int i, j;
343 int ret;
344
345 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
346 DMA_BIDIRECTIONAL);
347 if (unlikely(!sg_dma_len)) {
348 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
349 return -EBUSY;
350 }
351
352 len = 0;
353 page_cnt = 0;
354
355 for (i = 0; i < sg_dma_len; ++i) {
356 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
357 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
358
a870d627 359 if (dma_addr & ~PAGE_MASK) {
08b48a1e
AG
360 if (i > 0)
361 return -EINVAL;
362 else
363 ++page_cnt;
364 }
a870d627 365 if ((dma_addr + dma_len) & ~PAGE_MASK) {
08b48a1e
AG
366 if (i < sg_dma_len - 1)
367 return -EINVAL;
368 else
369 ++page_cnt;
370 }
371
372 len += dma_len;
373 }
374
a870d627 375 page_cnt += len >> PAGE_SHIFT;
08b48a1e
AG
376 if (page_cnt > fmr_message_size)
377 return -EINVAL;
378
e4c52c98
AG
379 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
380 rdsibdev_to_node(rds_ibdev));
08b48a1e
AG
381 if (!dma_pages)
382 return -ENOMEM;
383
384 page_cnt = 0;
385 for (i = 0; i < sg_dma_len; ++i) {
386 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
387 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
388
a870d627 389 for (j = 0; j < dma_len; j += PAGE_SIZE)
08b48a1e 390 dma_pages[page_cnt++] =
a870d627 391 (dma_addr & PAGE_MASK) + j;
08b48a1e
AG
392 }
393
394 ret = ib_map_phys_fmr(ibmr->fmr,
395 dma_pages, page_cnt, io_addr);
396 if (ret)
397 goto out;
398
399 /* Success - we successfully remapped the MR, so we can
400 * safely tear down the old mapping. */
401 rds_ib_teardown_mr(ibmr);
402
403 ibmr->sg = scat;
404 ibmr->sg_len = nents;
405 ibmr->sg_dma_len = sg_dma_len;
406 ibmr->remap_count++;
407
408 rds_ib_stats_inc(s_ib_rdma_mr_used);
409 ret = 0;
410
411out:
412 kfree(dma_pages);
413
414 return ret;
415}
416
417void rds_ib_sync_mr(void *trans_private, int direction)
418{
419 struct rds_ib_mr *ibmr = trans_private;
420 struct rds_ib_device *rds_ibdev = ibmr->device;
421
422 switch (direction) {
423 case DMA_FROM_DEVICE:
424 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
425 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
426 break;
427 case DMA_TO_DEVICE:
428 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
429 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
430 break;
431 }
432}
433
434static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
435{
436 struct rds_ib_device *rds_ibdev = ibmr->device;
437
438 if (ibmr->sg_dma_len) {
439 ib_dma_unmap_sg(rds_ibdev->dev,
440 ibmr->sg, ibmr->sg_len,
441 DMA_BIDIRECTIONAL);
442 ibmr->sg_dma_len = 0;
443 }
444
445 /* Release the s/g list */
446 if (ibmr->sg_len) {
447 unsigned int i;
448
449 for (i = 0; i < ibmr->sg_len; ++i) {
450 struct page *page = sg_page(&ibmr->sg[i]);
451
452 /* FIXME we need a way to tell a r/w MR
453 * from a r/o MR */
9e2effba 454 BUG_ON(irqs_disabled());
08b48a1e
AG
455 set_page_dirty(page);
456 put_page(page);
457 }
458 kfree(ibmr->sg);
459
460 ibmr->sg = NULL;
461 ibmr->sg_len = 0;
462 }
463}
464
465static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
466{
467 unsigned int pinned = ibmr->sg_len;
468
469 __rds_ib_teardown_mr(ibmr);
470 if (pinned) {
471 struct rds_ib_device *rds_ibdev = ibmr->device;
472 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
473
474 atomic_sub(pinned, &pool->free_pinned);
475 }
476}
477
478static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
479{
480 unsigned int item_count;
481
482 item_count = atomic_read(&pool->item_count);
483 if (free_all)
484 return item_count;
485
486 return 0;
487}
488
489/*
490 * Flush our pool of MRs.
491 * At a minimum, all currently unused MRs are unmapped.
492 * If the number of MRs allocated exceeds the limit, we also try
493 * to free as many MRs as needed to get back to this limit.
494 */
495static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
496{
497 struct rds_ib_mr *ibmr, *next;
498 LIST_HEAD(unmap_list);
499 LIST_HEAD(fmr_list);
500 unsigned long unpinned = 0;
501 unsigned long flags;
502 unsigned int nfreed = 0, ncleaned = 0, free_goal;
503 int ret = 0;
504
505 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
506
507 mutex_lock(&pool->flush_lock);
508
509 spin_lock_irqsave(&pool->list_lock, flags);
510 /* Get the list of all MRs to be dropped. Ordering matters -
511 * we want to put drop_list ahead of free_list. */
512 list_splice_init(&pool->free_list, &unmap_list);
513 list_splice_init(&pool->drop_list, &unmap_list);
514 if (free_all)
515 list_splice_init(&pool->clean_list, &unmap_list);
516 spin_unlock_irqrestore(&pool->list_lock, flags);
517
518 free_goal = rds_ib_flush_goal(pool, free_all);
519
520 if (list_empty(&unmap_list))
521 goto out;
522
523 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
524 list_for_each_entry(ibmr, &unmap_list, list)
525 list_add(&ibmr->fmr->list, &fmr_list);
526 ret = ib_unmap_fmr(&fmr_list);
527 if (ret)
528 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
529
530 /* Now we can destroy the DMA mapping and unpin any pages */
531 list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
532 unpinned += ibmr->sg_len;
533 __rds_ib_teardown_mr(ibmr);
534 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
535 rds_ib_stats_inc(s_ib_rdma_mr_free);
536 list_del(&ibmr->list);
537 ib_dealloc_fmr(ibmr->fmr);
538 kfree(ibmr);
539 nfreed++;
540 }
541 ncleaned++;
542 }
543
544 spin_lock_irqsave(&pool->list_lock, flags);
545 list_splice(&unmap_list, &pool->clean_list);
546 spin_unlock_irqrestore(&pool->list_lock, flags);
547
548 atomic_sub(unpinned, &pool->free_pinned);
549 atomic_sub(ncleaned, &pool->dirty_count);
550 atomic_sub(nfreed, &pool->item_count);
551
552out:
553 mutex_unlock(&pool->flush_lock);
554 return ret;
555}
556
557static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
558{
559 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
560
561 rds_ib_flush_mr_pool(pool, 0);
562}
563
564void rds_ib_free_mr(void *trans_private, int invalidate)
565{
566 struct rds_ib_mr *ibmr = trans_private;
567 struct rds_ib_device *rds_ibdev = ibmr->device;
568 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
569 unsigned long flags;
570
571 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
572
573 /* Return it to the pool's free list */
574 spin_lock_irqsave(&pool->list_lock, flags);
575 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
576 list_add(&ibmr->list, &pool->drop_list);
577 else
578 list_add(&ibmr->list, &pool->free_list);
579
580 atomic_add(ibmr->sg_len, &pool->free_pinned);
581 atomic_inc(&pool->dirty_count);
582 spin_unlock_irqrestore(&pool->list_lock, flags);
583
584 /* If we've pinned too many pages, request a flush */
f64f9e71
JP
585 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
586 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
08b48a1e
AG
587 queue_work(rds_wq, &pool->flush_worker);
588
589 if (invalidate) {
590 if (likely(!in_interrupt())) {
591 rds_ib_flush_mr_pool(pool, 0);
592 } else {
593 /* We get here if the user created a MR marked
594 * as use_once and invalidate at the same time. */
595 queue_work(rds_wq, &pool->flush_worker);
596 }
597 }
598}
599
600void rds_ib_flush_mrs(void)
601{
602 struct rds_ib_device *rds_ibdev;
603
604 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
605 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
606
607 if (pool)
608 rds_ib_flush_mr_pool(pool, 0);
609 }
610}
611
612void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
613 struct rds_sock *rs, u32 *key_ret)
614{
615 struct rds_ib_device *rds_ibdev;
616 struct rds_ib_mr *ibmr = NULL;
617 int ret;
618
619 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
620 if (!rds_ibdev) {
621 ret = -ENODEV;
622 goto out;
623 }
624
625 if (!rds_ibdev->mr_pool) {
626 ret = -ENODEV;
627 goto out;
628 }
629
630 ibmr = rds_ib_alloc_fmr(rds_ibdev);
631 if (IS_ERR(ibmr))
632 return ibmr;
633
634 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
635 if (ret == 0)
636 *key_ret = ibmr->fmr->rkey;
637 else
638 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
639
640 ibmr->device = rds_ibdev;
641
642 out:
643 if (ret) {
644 if (ibmr)
645 rds_ib_free_mr(ibmr, 0);
646 ibmr = ERR_PTR(ret);
647 }
648 return ibmr;
649}