]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx4/alloc.c
mlx4_core: Roll back round robin bitmap allocation commit for CQs, SRQs, and MPTs
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / alloc.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/errno.h>
35#include <linux/slab.h>
6526128c 36#include <linux/mm.h>
ee40fa06 37#include <linux/export.h>
225c7b1f 38#include <linux/bitmap.h>
9cbe05c7 39#include <linux/dma-mapping.h>
29c27112 40#include <linux/vmalloc.h>
225c7b1f
RD
41
42#include "mlx4.h"
43
44u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
45{
46 u32 obj;
47
48 spin_lock(&bitmap->lock);
49
50 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
51 if (obj >= bitmap->max) {
93fc9e1b
YP
52 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
53 & bitmap->mask;
225c7b1f
RD
54 obj = find_first_zero_bit(bitmap->table, bitmap->max);
55 }
56
57 if (obj < bitmap->max) {
58 set_bit(obj, bitmap->table);
93fc9e1b
YP
59 bitmap->last = (obj + 1);
60 if (bitmap->last == bitmap->max)
61 bitmap->last = 0;
225c7b1f 62 obj |= bitmap->top;
225c7b1f
RD
63 } else
64 obj = -1;
65
42d1e017
EC
66 if (obj != -1)
67 --bitmap->avail;
68
225c7b1f
RD
69 spin_unlock(&bitmap->lock);
70
71 return obj;
72}
73
7c6d74d2 74void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
225c7b1f 75{
7c6d74d2 76 mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
a3cdcbfa
YP
77}
78
a3cdcbfa
YP
79u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
80{
e27cd4f8 81 u32 obj;
a3cdcbfa
YP
82
83 if (likely(cnt == 1 && align == 1))
84 return mlx4_bitmap_alloc(bitmap);
85
86 spin_lock(&bitmap->lock);
87
43ff8b60
AM
88 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
89 bitmap->last, cnt, align - 1);
a3cdcbfa 90 if (obj >= bitmap->max) {
93fc9e1b
YP
91 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
92 & bitmap->mask;
43ff8b60
AM
93 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
94 0, cnt, align - 1);
a3cdcbfa
YP
95 }
96
97 if (obj < bitmap->max) {
e27cd4f8 98 bitmap_set(bitmap->table, obj, cnt);
a3cdcbfa
YP
99 if (obj == bitmap->last) {
100 bitmap->last = (obj + cnt);
101 if (bitmap->last >= bitmap->max)
102 bitmap->last = 0;
103 }
104 obj |= bitmap->top;
105 } else
106 obj = -1;
107
42d1e017
EC
108 if (obj != -1)
109 bitmap->avail -= cnt;
110
a3cdcbfa
YP
111 spin_unlock(&bitmap->lock);
112
113 return obj;
114}
115
42d1e017
EC
116u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
117{
118 return bitmap->avail;
119}
120
7c6d74d2
JM
121void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
122 int use_rr)
a3cdcbfa 123{
93fc9e1b 124 obj &= bitmap->max + bitmap->reserved_top - 1;
225c7b1f
RD
125
126 spin_lock(&bitmap->lock);
7c6d74d2
JM
127 if (!use_rr) {
128 bitmap->last = min(bitmap->last, obj);
129 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
130 & bitmap->mask;
131 }
e27cd4f8 132 bitmap_clear(bitmap->table, obj, cnt);
42d1e017 133 bitmap->avail += cnt;
225c7b1f
RD
134 spin_unlock(&bitmap->lock);
135}
136
93fc9e1b
YP
137int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
138 u32 reserved_bot, u32 reserved_top)
225c7b1f 139{
225c7b1f
RD
140 /* num must be a power of 2 */
141 if (num != roundup_pow_of_two(num))
142 return -EINVAL;
143
144 bitmap->last = 0;
145 bitmap->top = 0;
93fc9e1b 146 bitmap->max = num - reserved_top;
225c7b1f 147 bitmap->mask = mask;
93fc9e1b 148 bitmap->reserved_top = reserved_top;
42d1e017 149 bitmap->avail = num - reserved_top - reserved_bot;
225c7b1f 150 spin_lock_init(&bitmap->lock);
93fc9e1b
YP
151 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
152 sizeof (long), GFP_KERNEL);
225c7b1f
RD
153 if (!bitmap->table)
154 return -ENOMEM;
155
e27cd4f8 156 bitmap_set(bitmap->table, 0, reserved_bot);
225c7b1f
RD
157
158 return 0;
159}
160
161void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
162{
163 kfree(bitmap->table);
164}
165
166/*
167 * Handling for queue buffers -- we allocate a bunch of memory and
168 * register it in a memory region at HCA virtual address 0. If the
169 * requested size is > max_direct, we split the allocation into
170 * multiple pages, so we don't require too much contiguous memory.
171 */
172
173int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
174 struct mlx4_buf *buf)
175{
176 dma_addr_t t;
177
178 if (size <= max_direct) {
179 buf->nbufs = 1;
180 buf->npages = 1;
181 buf->page_shift = get_order(size) + PAGE_SHIFT;
b57aacfa 182 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
225c7b1f 183 size, &t, GFP_KERNEL);
b57aacfa 184 if (!buf->direct.buf)
225c7b1f
RD
185 return -ENOMEM;
186
b57aacfa 187 buf->direct.map = t;
225c7b1f
RD
188
189 while (t & ((1 << buf->page_shift) - 1)) {
190 --buf->page_shift;
191 buf->npages *= 2;
192 }
193
b57aacfa 194 memset(buf->direct.buf, 0, size);
225c7b1f
RD
195 } else {
196 int i;
197
030b4b33 198 buf->direct.buf = NULL;
225c7b1f
RD
199 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
200 buf->npages = buf->nbufs;
201 buf->page_shift = PAGE_SHIFT;
baeb2ffa 202 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
225c7b1f 203 GFP_KERNEL);
b57aacfa 204 if (!buf->page_list)
225c7b1f
RD
205 return -ENOMEM;
206
207 for (i = 0; i < buf->nbufs; ++i) {
b57aacfa 208 buf->page_list[i].buf =
225c7b1f
RD
209 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
210 &t, GFP_KERNEL);
b57aacfa 211 if (!buf->page_list[i].buf)
225c7b1f
RD
212 goto err_free;
213
b57aacfa 214 buf->page_list[i].map = t;
225c7b1f 215
b57aacfa 216 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
225c7b1f 217 }
313abe55
JM
218
219 if (BITS_PER_LONG == 64) {
220 struct page **pages;
221 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
222 if (!pages)
223 goto err_free;
224 for (i = 0; i < buf->nbufs; ++i)
b57aacfa
RD
225 pages[i] = virt_to_page(buf->page_list[i].buf);
226 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
313abe55 227 kfree(pages);
b57aacfa 228 if (!buf->direct.buf)
313abe55
JM
229 goto err_free;
230 }
225c7b1f
RD
231 }
232
233 return 0;
234
235err_free:
236 mlx4_buf_free(dev, size, buf);
237
238 return -ENOMEM;
239}
240EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
241
242void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
243{
244 int i;
245
246 if (buf->nbufs == 1)
b57aacfa
RD
247 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
248 buf->direct.map);
225c7b1f 249 else {
030b4b33 250 if (BITS_PER_LONG == 64 && buf->direct.buf)
b57aacfa 251 vunmap(buf->direct.buf);
313abe55 252
225c7b1f 253 for (i = 0; i < buf->nbufs; ++i)
b57aacfa 254 if (buf->page_list[i].buf)
3bba11e5 255 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
b57aacfa
RD
256 buf->page_list[i].buf,
257 buf->page_list[i].map);
258 kfree(buf->page_list);
225c7b1f
RD
259 }
260}
261EXPORT_SYMBOL_GPL(mlx4_buf_free);
6296883c
YP
262
263static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
264{
265 struct mlx4_db_pgdir *pgdir;
266
267 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
268 if (!pgdir)
269 return NULL;
270
271 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
272 pgdir->bits[0] = pgdir->order0;
273 pgdir->bits[1] = pgdir->order1;
274 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
275 &pgdir->db_dma, GFP_KERNEL);
276 if (!pgdir->db_page) {
277 kfree(pgdir);
278 return NULL;
279 }
280
281 return pgdir;
282}
283
284static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
285 struct mlx4_db *db, int order)
286{
287 int o;
288 int i;
289
290 for (o = order; o <= 1; ++o) {
291 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
292 if (i < MLX4_DB_PER_PAGE >> o)
293 goto found;
294 }
295
296 return -ENOMEM;
297
298found:
299 clear_bit(i, pgdir->bits[o]);
300
301 i <<= o;
302
303 if (o > order)
304 set_bit(i ^ 1, pgdir->bits[order]);
305
306 db->u.pgdir = pgdir;
307 db->index = i;
308 db->db = pgdir->db_page + db->index;
309 db->dma = pgdir->db_dma + db->index * 4;
310 db->order = order;
311
312 return 0;
313}
314
315int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
316{
317 struct mlx4_priv *priv = mlx4_priv(dev);
318 struct mlx4_db_pgdir *pgdir;
319 int ret = 0;
320
321 mutex_lock(&priv->pgdir_mutex);
322
323 list_for_each_entry(pgdir, &priv->pgdir_list, list)
324 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
325 goto out;
326
327 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
328 if (!pgdir) {
329 ret = -ENOMEM;
330 goto out;
331 }
332
333 list_add(&pgdir->list, &priv->pgdir_list);
334
335 /* This should never fail -- we just allocated an empty page: */
336 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
337
338out:
339 mutex_unlock(&priv->pgdir_mutex);
340
341 return ret;
342}
343EXPORT_SYMBOL_GPL(mlx4_db_alloc);
344
345void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
346{
347 struct mlx4_priv *priv = mlx4_priv(dev);
348 int o;
349 int i;
350
351 mutex_lock(&priv->pgdir_mutex);
352
353 o = db->order;
354 i = db->index;
355
356 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
357 clear_bit(i ^ 1, db->u.pgdir->order0);
358 ++o;
359 }
360 i >>= o;
361 set_bit(i, db->u.pgdir->bits[o]);
362
363 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
364 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
365 db->u.pgdir->db_page, db->u.pgdir->db_dma);
366 list_del(&db->u.pgdir->list);
367 kfree(db->u.pgdir);
368 }
369
370 mutex_unlock(&priv->pgdir_mutex);
371}
372EXPORT_SYMBOL_GPL(mlx4_db_free);
38ae6a53
YP
373
374int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
375 int size, int max_direct)
376{
377 int err;
378
379 err = mlx4_db_alloc(dev, &wqres->db, 1);
380 if (err)
381 return err;
382
383 *wqres->db.db = 0;
384
385 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
386 if (err)
387 goto err_db;
388
389 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
390 &wqres->mtt);
391 if (err)
392 goto err_buf;
393
394 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
395 if (err)
396 goto err_mtt;
397
398 return 0;
399
400err_mtt:
401 mlx4_mtt_cleanup(dev, &wqres->mtt);
402err_buf:
403 mlx4_buf_free(dev, size, &wqres->buf);
404err_db:
405 mlx4_db_free(dev, &wqres->db);
406
407 return err;
408}
409EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
410
411void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
412 int size)
413{
414 mlx4_mtt_cleanup(dev, &wqres->mtt);
415 mlx4_buf_free(dev, size, &wqres->buf);
416 mlx4_db_free(dev, &wqres->db);
417}
418EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);