2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: fmr_pool.c 1349 2004-12-16 21:09:43Z roland $
35 #include <linux/errno.h>
36 #include <linux/spinlock.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
41 #include <ib_fmr_pool.h>
43 #include "core_priv.h"
46 IB_FMR_MAX_REMAPS
= 32,
49 IB_FMR_HASH_SIZE
= 1 << IB_FMR_HASH_BITS
,
50 IB_FMR_HASH_MASK
= IB_FMR_HASH_SIZE
- 1
54 * If an FMR is not in use, then the list member will point to either
55 * its pool's free_list (if the FMR can be mapped again; that is,
56 * remap_count < IB_FMR_MAX_REMAPS) or its pool's dirty_list (if the
57 * FMR needs to be unmapped before being remapped). In either of
58 * these cases it is a bug if the ref_count is not 0. In other words,
59 * if ref_count is > 0, then the list member must not be linked into
60 * either free_list or dirty_list.
62 * The cache_node member is used to link the FMR into a cache bucket
63 * (if caching is enabled). This is independent of the reference
64 * count of the FMR. When a valid FMR is released, its ref_count is
65 * decremented, and if ref_count reaches 0, the FMR is placed in
66 * either free_list or dirty_list as appropriate. However, it is not
67 * removed from the cache and may be "revived" if a call to
68 * ib_fmr_register_physical() occurs before the FMR is remapped. In
69 * this case we just increment the ref_count and remove the FMR from
70 * free_list/dirty_list.
72 * Before we remap an FMR from free_list, we remove it from the cache
73 * (to prevent another user from obtaining a stale FMR). When an FMR
74 * is released, we add it to the tail of the free list, so that our
75 * cache eviction policy is "least recently used."
77 * All manipulation of ref_count, list and cache_node is protected by
78 * pool_lock to maintain consistency.
88 struct list_head free_list
;
89 struct list_head dirty_list
;
90 struct hlist_head
*cache_bucket
;
92 void (*flush_function
)(struct ib_fmr_pool
*pool
,
96 struct task_struct
*thread
;
101 wait_queue_head_t force_wait
;
104 static inline u32
ib_fmr_hash(u64 first_page
)
106 return jhash_2words((u32
) first_page
,
107 (u32
) (first_page
>> 32),
111 /* Caller must hold pool_lock */
112 static inline struct ib_pool_fmr
*ib_fmr_cache_lookup(struct ib_fmr_pool
*pool
,
115 u64 io_virtual_address
)
117 struct hlist_head
*bucket
;
118 struct ib_pool_fmr
*fmr
;
119 struct hlist_node
*pos
;
121 if (!pool
->cache_bucket
)
124 bucket
= pool
->cache_bucket
+ ib_fmr_hash(*page_list
);
126 hlist_for_each_entry(fmr
, pos
, bucket
, cache_node
)
127 if (io_virtual_address
== fmr
->io_virtual_address
&&
128 page_list_len
== fmr
->page_list_len
&&
129 !memcmp(page_list
, fmr
->page_list
,
130 page_list_len
* sizeof *page_list
))
136 static void ib_fmr_batch_release(struct ib_fmr_pool
*pool
)
139 struct ib_pool_fmr
*fmr
;
140 LIST_HEAD(unmap_list
);
143 spin_lock_irq(&pool
->pool_lock
);
145 list_for_each_entry(fmr
, &pool
->dirty_list
, list
) {
146 hlist_del_init(&fmr
->cache_node
);
147 fmr
->remap_count
= 0;
148 list_add_tail(&fmr
->fmr
->list
, &fmr_list
);
151 if (fmr
->ref_count
!=0) {
152 printk(KERN_WARNING
"Unmapping FMR 0x%08x with ref count %d",
153 fmr
, fmr
->ref_count
);
158 list_splice(&pool
->dirty_list
, &unmap_list
);
159 INIT_LIST_HEAD(&pool
->dirty_list
);
162 spin_unlock_irq(&pool
->pool_lock
);
164 if (list_empty(&unmap_list
)) {
168 ret
= ib_unmap_fmr(&fmr_list
);
170 printk(KERN_WARNING
"ib_unmap_fmr returned %d", ret
);
172 spin_lock_irq(&pool
->pool_lock
);
173 list_splice(&unmap_list
, &pool
->free_list
);
174 spin_unlock_irq(&pool
->pool_lock
);
177 static int ib_fmr_cleanup_thread(void *pool_ptr
)
179 struct ib_fmr_pool
*pool
= pool_ptr
;
182 if (pool
->dirty_len
>= pool
->dirty_watermark
||
183 atomic_read(&pool
->flush_ser
) - atomic_read(&pool
->req_ser
) < 0) {
184 ib_fmr_batch_release(pool
);
186 atomic_inc(&pool
->flush_ser
);
187 wake_up_interruptible(&pool
->force_wait
);
189 if (pool
->flush_function
)
190 pool
->flush_function(pool
, pool
->flush_arg
);
193 set_current_state(TASK_INTERRUPTIBLE
);
194 if (pool
->dirty_len
< pool
->dirty_watermark
&&
195 atomic_read(&pool
->flush_ser
) - atomic_read(&pool
->req_ser
) >= 0 &&
196 !kthread_should_stop())
198 __set_current_state(TASK_RUNNING
);
199 } while (!kthread_should_stop());
205 * ib_create_fmr_pool - Create an FMR pool
206 * @pd:Protection domain for FMRs
207 * @params:FMR pool parameters
209 * Create a pool of FMRs. Return value is pointer to new pool or
210 * error code if creation failed.
212 struct ib_fmr_pool
*ib_create_fmr_pool(struct ib_pd
*pd
,
213 struct ib_fmr_pool_param
*params
)
215 struct ib_device
*device
;
216 struct ib_fmr_pool
*pool
;
221 return ERR_PTR(-EINVAL
);
224 if (!device
->alloc_fmr
|| !device
->dealloc_fmr
||
225 !device
->map_phys_fmr
|| !device
->unmap_fmr
) {
226 printk(KERN_WARNING
"Device %s does not support fast memory regions",
228 return ERR_PTR(-ENOSYS
);
231 pool
= kmalloc(sizeof *pool
, GFP_KERNEL
);
233 printk(KERN_WARNING
"couldn't allocate pool struct");
234 return ERR_PTR(-ENOMEM
);
237 pool
->cache_bucket
= NULL
;
239 pool
->flush_function
= params
->flush_function
;
240 pool
->flush_arg
= params
->flush_arg
;
242 INIT_LIST_HEAD(&pool
->free_list
);
243 INIT_LIST_HEAD(&pool
->dirty_list
);
247 kmalloc(IB_FMR_HASH_SIZE
* sizeof *pool
->cache_bucket
,
249 if (!pool
->cache_bucket
) {
250 printk(KERN_WARNING
"Failed to allocate cache in pool");
255 for (i
= 0; i
< IB_FMR_HASH_SIZE
; ++i
)
256 INIT_HLIST_HEAD(pool
->cache_bucket
+ i
);
260 pool
->max_pages
= params
->max_pages_per_fmr
;
261 pool
->dirty_watermark
= params
->dirty_watermark
;
263 spin_lock_init(&pool
->pool_lock
);
264 atomic_set(&pool
->req_ser
, 0);
265 atomic_set(&pool
->flush_ser
, 0);
266 init_waitqueue_head(&pool
->force_wait
);
268 pool
->thread
= kthread_create(ib_fmr_cleanup_thread
,
272 if (IS_ERR(pool
->thread
)) {
273 printk(KERN_WARNING
"couldn't start cleanup thread");
274 ret
= PTR_ERR(pool
->thread
);
279 struct ib_pool_fmr
*fmr
;
280 struct ib_fmr_attr attr
= {
281 .max_pages
= params
->max_pages_per_fmr
,
282 .max_maps
= IB_FMR_MAX_REMAPS
,
283 .page_size
= PAGE_SHIFT
286 for (i
= 0; i
< params
->pool_size
; ++i
) {
287 fmr
= kmalloc(sizeof *fmr
+ params
->max_pages_per_fmr
* sizeof (u64
),
290 printk(KERN_WARNING
"failed to allocate fmr struct "
296 fmr
->remap_count
= 0;
298 INIT_HLIST_NODE(&fmr
->cache_node
);
300 fmr
->fmr
= ib_alloc_fmr(pd
, params
->access
, &attr
);
301 if (IS_ERR(fmr
->fmr
)) {
302 printk(KERN_WARNING
"fmr_create failed for FMR %d", i
);
307 list_add_tail(&fmr
->list
, &pool
->free_list
);
315 kfree(pool
->cache_bucket
);
321 ib_destroy_fmr_pool(pool
);
323 return ERR_PTR(-ENOMEM
);
325 EXPORT_SYMBOL(ib_create_fmr_pool
);
328 * ib_destroy_fmr_pool - Free FMR pool
329 * @pool:FMR pool to free
331 * Destroy an FMR pool and free all associated resources.
333 int ib_destroy_fmr_pool(struct ib_fmr_pool
*pool
)
335 struct ib_pool_fmr
*fmr
;
336 struct ib_pool_fmr
*tmp
;
339 kthread_stop(pool
->thread
);
340 ib_fmr_batch_release(pool
);
343 list_for_each_entry_safe(fmr
, tmp
, &pool
->free_list
, list
) {
344 ib_dealloc_fmr(fmr
->fmr
);
345 list_del(&fmr
->list
);
350 if (i
< pool
->pool_size
)
351 printk(KERN_WARNING
"pool still has %d regions registered",
352 pool
->pool_size
- i
);
354 kfree(pool
->cache_bucket
);
359 EXPORT_SYMBOL(ib_destroy_fmr_pool
);
362 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
363 * @pool:FMR pool to flush
365 * Ensure that all unmapped FMRs are fully invalidated.
367 int ib_flush_fmr_pool(struct ib_fmr_pool
*pool
)
371 atomic_inc(&pool
->req_ser
);
373 * It's OK if someone else bumps req_ser again here -- we'll
374 * just wait a little longer.
376 serial
= atomic_read(&pool
->req_ser
);
378 wake_up_process(pool
->thread
);
380 if (wait_event_interruptible(pool
->force_wait
,
381 atomic_read(&pool
->flush_ser
) -
382 atomic_read(&pool
->req_ser
) >= 0))
387 EXPORT_SYMBOL(ib_flush_fmr_pool
);
390 * ib_fmr_pool_map_phys -
391 * @pool:FMR pool to allocate FMR from
392 * @page_list:List of pages to map
393 * @list_len:Number of pages in @page_list
394 * @io_virtual_address:I/O virtual address for new FMR
396 * Map an FMR from an FMR pool.
398 struct ib_pool_fmr
*ib_fmr_pool_map_phys(struct ib_fmr_pool
*pool_handle
,
401 u64
*io_virtual_address
)
403 struct ib_fmr_pool
*pool
= pool_handle
;
404 struct ib_pool_fmr
*fmr
;
408 if (list_len
< 1 || list_len
> pool
->max_pages
)
409 return ERR_PTR(-EINVAL
);
411 spin_lock_irqsave(&pool
->pool_lock
, flags
);
412 fmr
= ib_fmr_cache_lookup(pool
,
415 *io_virtual_address
);
419 if (fmr
->ref_count
== 1) {
420 list_del(&fmr
->list
);
423 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
428 if (list_empty(&pool
->free_list
)) {
429 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
430 return ERR_PTR(-EAGAIN
);
433 fmr
= list_entry(pool
->free_list
.next
, struct ib_pool_fmr
, list
);
434 list_del(&fmr
->list
);
435 hlist_del_init(&fmr
->cache_node
);
436 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
438 result
= ib_map_phys_fmr(fmr
->fmr
, page_list
, list_len
,
439 *io_virtual_address
);
442 spin_lock_irqsave(&pool
->pool_lock
, flags
);
443 list_add(&fmr
->list
, &pool
->free_list
);
444 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
446 printk(KERN_WARNING
"fmr_map returns %d",
449 return ERR_PTR(result
);
455 if (pool
->cache_bucket
) {
456 fmr
->io_virtual_address
= *io_virtual_address
;
457 fmr
->page_list_len
= list_len
;
458 memcpy(fmr
->page_list
, page_list
, list_len
* sizeof(*page_list
));
460 spin_lock_irqsave(&pool
->pool_lock
, flags
);
461 hlist_add_head(&fmr
->cache_node
,
462 pool
->cache_bucket
+ ib_fmr_hash(fmr
->page_list
[0]));
463 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
468 EXPORT_SYMBOL(ib_fmr_pool_map_phys
);
471 * ib_fmr_pool_unmap - Unmap FMR
474 * Unmap an FMR. The FMR mapping may remain valid until the FMR is
475 * reused (or until ib_flush_fmr_pool() is called).
477 int ib_fmr_pool_unmap(struct ib_pool_fmr
*fmr
)
479 struct ib_fmr_pool
*pool
;
484 spin_lock_irqsave(&pool
->pool_lock
, flags
);
487 if (!fmr
->ref_count
) {
488 if (fmr
->remap_count
< IB_FMR_MAX_REMAPS
) {
489 list_add_tail(&fmr
->list
, &pool
->free_list
);
491 list_add_tail(&fmr
->list
, &pool
->dirty_list
);
493 wake_up_process(pool
->thread
);
498 if (fmr
->ref_count
< 0)
499 printk(KERN_WARNING
"FMR %p has ref count %d < 0",
500 fmr
, fmr
->ref_count
);
503 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
507 EXPORT_SYMBOL(ib_fmr_pool_unmap
);