]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/errno.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/mm.h> | |
37 | #include <linux/export.h> | |
38 | #include <linux/bitmap.h> | |
39 | #include <linux/dma-mapping.h> | |
40 | #include <linux/vmalloc.h> | |
41 | ||
42 | #include "mlx4.h" | |
43 | ||
44 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | |
45 | { | |
46 | u32 obj; | |
47 | ||
48 | spin_lock(&bitmap->lock); | |
49 | ||
50 | obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); | |
51 | if (obj >= bitmap->max) { | |
52 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | |
53 | & bitmap->mask; | |
54 | obj = find_first_zero_bit(bitmap->table, bitmap->max); | |
55 | } | |
56 | ||
57 | if (obj < bitmap->max) { | |
58 | set_bit(obj, bitmap->table); | |
59 | bitmap->last = (obj + 1); | |
60 | if (bitmap->last == bitmap->max) | |
61 | bitmap->last = 0; | |
62 | obj |= bitmap->top; | |
63 | } else | |
64 | obj = -1; | |
65 | ||
66 | if (obj != -1) | |
67 | --bitmap->avail; | |
68 | ||
69 | spin_unlock(&bitmap->lock); | |
70 | ||
71 | return obj; | |
72 | } | |
73 | ||
74 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr) | |
75 | { | |
76 | mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); | |
77 | } | |
78 | ||
79 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) | |
80 | { | |
81 | u32 obj; | |
82 | ||
83 | if (likely(cnt == 1 && align == 1)) | |
84 | return mlx4_bitmap_alloc(bitmap); | |
85 | ||
86 | spin_lock(&bitmap->lock); | |
87 | ||
88 | obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, | |
89 | bitmap->last, cnt, align - 1); | |
90 | if (obj >= bitmap->max) { | |
91 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | |
92 | & bitmap->mask; | |
93 | obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, | |
94 | 0, cnt, align - 1); | |
95 | } | |
96 | ||
97 | if (obj < bitmap->max) { | |
98 | bitmap_set(bitmap->table, obj, cnt); | |
99 | if (obj == bitmap->last) { | |
100 | bitmap->last = (obj + cnt); | |
101 | if (bitmap->last >= bitmap->max) | |
102 | bitmap->last = 0; | |
103 | } | |
104 | obj |= bitmap->top; | |
105 | } else | |
106 | obj = -1; | |
107 | ||
108 | if (obj != -1) | |
109 | bitmap->avail -= cnt; | |
110 | ||
111 | spin_unlock(&bitmap->lock); | |
112 | ||
113 | return obj; | |
114 | } | |
115 | ||
116 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) | |
117 | { | |
118 | return bitmap->avail; | |
119 | } | |
120 | ||
121 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, | |
122 | int use_rr) | |
123 | { | |
124 | obj &= bitmap->max + bitmap->reserved_top - 1; | |
125 | ||
126 | spin_lock(&bitmap->lock); | |
127 | if (!use_rr) { | |
128 | bitmap->last = min(bitmap->last, obj); | |
129 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | |
130 | & bitmap->mask; | |
131 | } | |
132 | bitmap_clear(bitmap->table, obj, cnt); | |
133 | bitmap->avail += cnt; | |
134 | spin_unlock(&bitmap->lock); | |
135 | } | |
136 | ||
137 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | |
138 | u32 reserved_bot, u32 reserved_top) | |
139 | { | |
140 | /* num must be a power of 2 */ | |
141 | if (num != roundup_pow_of_two(num)) | |
142 | return -EINVAL; | |
143 | ||
144 | bitmap->last = 0; | |
145 | bitmap->top = 0; | |
146 | bitmap->max = num - reserved_top; | |
147 | bitmap->mask = mask; | |
148 | bitmap->reserved_top = reserved_top; | |
149 | bitmap->avail = num - reserved_top - reserved_bot; | |
150 | spin_lock_init(&bitmap->lock); | |
151 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * | |
152 | sizeof (long), GFP_KERNEL); | |
153 | if (!bitmap->table) | |
154 | return -ENOMEM; | |
155 | ||
156 | bitmap_set(bitmap->table, 0, reserved_bot); | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) | |
162 | { | |
163 | kfree(bitmap->table); | |
164 | } | |
165 | ||
166 | /* | |
167 | * Handling for queue buffers -- we allocate a bunch of memory and | |
168 | * register it in a memory region at HCA virtual address 0. If the | |
169 | * requested size is > max_direct, we split the allocation into | |
170 | * multiple pages, so we don't require too much contiguous memory. | |
171 | */ | |
172 | ||
173 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |
174 | struct mlx4_buf *buf) | |
175 | { | |
176 | dma_addr_t t; | |
177 | ||
178 | if (size <= max_direct) { | |
179 | buf->nbufs = 1; | |
180 | buf->npages = 1; | |
181 | buf->page_shift = get_order(size) + PAGE_SHIFT; | |
182 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, | |
183 | size, &t, GFP_KERNEL); | |
184 | if (!buf->direct.buf) | |
185 | return -ENOMEM; | |
186 | ||
187 | buf->direct.map = t; | |
188 | ||
189 | while (t & ((1 << buf->page_shift) - 1)) { | |
190 | --buf->page_shift; | |
191 | buf->npages *= 2; | |
192 | } | |
193 | ||
194 | memset(buf->direct.buf, 0, size); | |
195 | } else { | |
196 | int i; | |
197 | ||
198 | buf->direct.buf = NULL; | |
199 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; | |
200 | buf->npages = buf->nbufs; | |
201 | buf->page_shift = PAGE_SHIFT; | |
202 | buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), | |
203 | GFP_KERNEL); | |
204 | if (!buf->page_list) | |
205 | return -ENOMEM; | |
206 | ||
207 | for (i = 0; i < buf->nbufs; ++i) { | |
208 | buf->page_list[i].buf = | |
209 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, | |
210 | &t, GFP_KERNEL); | |
211 | if (!buf->page_list[i].buf) | |
212 | goto err_free; | |
213 | ||
214 | buf->page_list[i].map = t; | |
215 | ||
216 | memset(buf->page_list[i].buf, 0, PAGE_SIZE); | |
217 | } | |
218 | ||
219 | if (BITS_PER_LONG == 64) { | |
220 | struct page **pages; | |
221 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | |
222 | if (!pages) | |
223 | goto err_free; | |
224 | for (i = 0; i < buf->nbufs; ++i) | |
225 | pages[i] = virt_to_page(buf->page_list[i].buf); | |
226 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | |
227 | kfree(pages); | |
228 | if (!buf->direct.buf) | |
229 | goto err_free; | |
230 | } | |
231 | } | |
232 | ||
233 | return 0; | |
234 | ||
235 | err_free: | |
236 | mlx4_buf_free(dev, size, buf); | |
237 | ||
238 | return -ENOMEM; | |
239 | } | |
240 | EXPORT_SYMBOL_GPL(mlx4_buf_alloc); | |
241 | ||
242 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |
243 | { | |
244 | int i; | |
245 | ||
246 | if (buf->nbufs == 1) | |
247 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, | |
248 | buf->direct.map); | |
249 | else { | |
250 | if (BITS_PER_LONG == 64 && buf->direct.buf) | |
251 | vunmap(buf->direct.buf); | |
252 | ||
253 | for (i = 0; i < buf->nbufs; ++i) | |
254 | if (buf->page_list[i].buf) | |
255 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | |
256 | buf->page_list[i].buf, | |
257 | buf->page_list[i].map); | |
258 | kfree(buf->page_list); | |
259 | } | |
260 | } | |
261 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | |
262 | ||
263 | static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) | |
264 | { | |
265 | struct mlx4_db_pgdir *pgdir; | |
266 | ||
267 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | |
268 | if (!pgdir) | |
269 | return NULL; | |
270 | ||
271 | bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); | |
272 | pgdir->bits[0] = pgdir->order0; | |
273 | pgdir->bits[1] = pgdir->order1; | |
274 | pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, | |
275 | &pgdir->db_dma, GFP_KERNEL); | |
276 | if (!pgdir->db_page) { | |
277 | kfree(pgdir); | |
278 | return NULL; | |
279 | } | |
280 | ||
281 | return pgdir; | |
282 | } | |
283 | ||
284 | static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, | |
285 | struct mlx4_db *db, int order) | |
286 | { | |
287 | int o; | |
288 | int i; | |
289 | ||
290 | for (o = order; o <= 1; ++o) { | |
291 | i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); | |
292 | if (i < MLX4_DB_PER_PAGE >> o) | |
293 | goto found; | |
294 | } | |
295 | ||
296 | return -ENOMEM; | |
297 | ||
298 | found: | |
299 | clear_bit(i, pgdir->bits[o]); | |
300 | ||
301 | i <<= o; | |
302 | ||
303 | if (o > order) | |
304 | set_bit(i ^ 1, pgdir->bits[order]); | |
305 | ||
306 | db->u.pgdir = pgdir; | |
307 | db->index = i; | |
308 | db->db = pgdir->db_page + db->index; | |
309 | db->dma = pgdir->db_dma + db->index * 4; | |
310 | db->order = order; | |
311 | ||
312 | return 0; | |
313 | } | |
314 | ||
315 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) | |
316 | { | |
317 | struct mlx4_priv *priv = mlx4_priv(dev); | |
318 | struct mlx4_db_pgdir *pgdir; | |
319 | int ret = 0; | |
320 | ||
321 | mutex_lock(&priv->pgdir_mutex); | |
322 | ||
323 | list_for_each_entry(pgdir, &priv->pgdir_list, list) | |
324 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) | |
325 | goto out; | |
326 | ||
327 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); | |
328 | if (!pgdir) { | |
329 | ret = -ENOMEM; | |
330 | goto out; | |
331 | } | |
332 | ||
333 | list_add(&pgdir->list, &priv->pgdir_list); | |
334 | ||
335 | /* This should never fail -- we just allocated an empty page: */ | |
336 | WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); | |
337 | ||
338 | out: | |
339 | mutex_unlock(&priv->pgdir_mutex); | |
340 | ||
341 | return ret; | |
342 | } | |
343 | EXPORT_SYMBOL_GPL(mlx4_db_alloc); | |
344 | ||
345 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) | |
346 | { | |
347 | struct mlx4_priv *priv = mlx4_priv(dev); | |
348 | int o; | |
349 | int i; | |
350 | ||
351 | mutex_lock(&priv->pgdir_mutex); | |
352 | ||
353 | o = db->order; | |
354 | i = db->index; | |
355 | ||
356 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | |
357 | clear_bit(i ^ 1, db->u.pgdir->order0); | |
358 | ++o; | |
359 | } | |
360 | i >>= o; | |
361 | set_bit(i, db->u.pgdir->bits[o]); | |
362 | ||
363 | if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { | |
364 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | |
365 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | |
366 | list_del(&db->u.pgdir->list); | |
367 | kfree(db->u.pgdir); | |
368 | } | |
369 | ||
370 | mutex_unlock(&priv->pgdir_mutex); | |
371 | } | |
372 | EXPORT_SYMBOL_GPL(mlx4_db_free); | |
373 | ||
374 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | |
375 | int size, int max_direct) | |
376 | { | |
377 | int err; | |
378 | ||
379 | err = mlx4_db_alloc(dev, &wqres->db, 1); | |
380 | if (err) | |
381 | return err; | |
382 | ||
383 | *wqres->db.db = 0; | |
384 | ||
385 | err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); | |
386 | if (err) | |
387 | goto err_db; | |
388 | ||
389 | err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, | |
390 | &wqres->mtt); | |
391 | if (err) | |
392 | goto err_buf; | |
393 | ||
394 | err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); | |
395 | if (err) | |
396 | goto err_mtt; | |
397 | ||
398 | return 0; | |
399 | ||
400 | err_mtt: | |
401 | mlx4_mtt_cleanup(dev, &wqres->mtt); | |
402 | err_buf: | |
403 | mlx4_buf_free(dev, size, &wqres->buf); | |
404 | err_db: | |
405 | mlx4_db_free(dev, &wqres->db); | |
406 | ||
407 | return err; | |
408 | } | |
409 | EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); | |
410 | ||
411 | void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | |
412 | int size) | |
413 | { | |
414 | mlx4_mtt_cleanup(dev, &wqres->mtt); | |
415 | mlx4_buf_free(dev, size, &wqres->buf); | |
416 | mlx4_db_free(dev, &wqres->db); | |
417 | } | |
418 | EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); |