2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/io-mapping.h>
36 #include <linux/mlx5/driver.h>
37 #include "mlx5_core.h"
39 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
)
41 u32 out
[MLX5_ST_SZ_DW(alloc_uar_out
)] = {};
42 u32 in
[MLX5_ST_SZ_DW(alloc_uar_in
)] = {};
45 MLX5_SET(alloc_uar_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_UAR
);
46 err
= mlx5_cmd_exec_inout(dev
, alloc_uar
, in
, out
);
48 *uarn
= MLX5_GET(alloc_uar_out
, out
, uar
);
51 EXPORT_SYMBOL(mlx5_cmd_alloc_uar
);
53 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
)
55 u32 in
[MLX5_ST_SZ_DW(dealloc_uar_in
)] = {};
57 MLX5_SET(dealloc_uar_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_UAR
);
58 MLX5_SET(dealloc_uar_in
, in
, uar
, uarn
);
59 return mlx5_cmd_exec_in(dev
, dealloc_uar
, in
);
61 EXPORT_SYMBOL(mlx5_cmd_free_uar
);
63 static int uars_per_sys_page(struct mlx5_core_dev
*mdev
)
65 if (MLX5_CAP_GEN(mdev
, uar_4k
))
66 return MLX5_CAP_GEN(mdev
, num_of_uars_per_page
);
71 static u64
uar2pfn(struct mlx5_core_dev
*mdev
, u32 index
)
73 u32 system_page_index
;
75 if (MLX5_CAP_GEN(mdev
, uar_4k
))
76 system_page_index
= index
>> (PAGE_SHIFT
- MLX5_ADAPTER_PAGE_SHIFT
);
78 system_page_index
= index
;
80 return (mdev
->bar_addr
>> PAGE_SHIFT
) + system_page_index
;
83 static void up_rel_func(struct kref
*kref
)
85 struct mlx5_uars_page
*up
= container_of(kref
, struct mlx5_uars_page
, ref_count
);
89 if (mlx5_cmd_free_uar(up
->mdev
, up
->index
))
90 mlx5_core_warn(up
->mdev
, "failed to free uar index %d\n", up
->index
);
91 bitmap_free(up
->reg_bitmap
);
92 bitmap_free(up
->fp_bitmap
);
96 static struct mlx5_uars_page
*alloc_uars_page(struct mlx5_core_dev
*mdev
,
99 struct mlx5_uars_page
*up
;
105 bfregs
= uars_per_sys_page(mdev
) * MLX5_BFREGS_PER_UAR
;
106 up
= kzalloc(sizeof(*up
), GFP_KERNEL
);
111 up
->reg_bitmap
= bitmap_zalloc(bfregs
, GFP_KERNEL
);
115 up
->fp_bitmap
= bitmap_zalloc(bfregs
, GFP_KERNEL
);
119 for (i
= 0; i
< bfregs
; i
++)
120 if ((i
% MLX5_BFREGS_PER_UAR
) < MLX5_NON_FP_BFREGS_PER_UAR
)
121 set_bit(i
, up
->reg_bitmap
);
123 set_bit(i
, up
->fp_bitmap
);
126 up
->fp_avail
= bfregs
* MLX5_FP_BFREGS_PER_UAR
/ MLX5_BFREGS_PER_UAR
;
127 up
->reg_avail
= bfregs
* MLX5_NON_FP_BFREGS_PER_UAR
/ MLX5_BFREGS_PER_UAR
;
129 err
= mlx5_cmd_alloc_uar(mdev
, &up
->index
);
131 mlx5_core_warn(mdev
, "mlx5_cmd_alloc_uar() failed, %d\n", err
);
135 pfn
= uar2pfn(mdev
, up
->index
);
137 up
->map
= ioremap_wc(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
143 up
->map
= ioremap(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
149 kref_init(&up
->ref_count
);
150 mlx5_core_dbg(mdev
, "allocated UAR page: index %d, total bfregs %d\n",
151 up
->index
, up
->bfregs
);
155 if (mlx5_cmd_free_uar(mdev
, up
->index
))
156 mlx5_core_warn(mdev
, "failed to free uar index %d\n", up
->index
);
158 bitmap_free(up
->fp_bitmap
);
159 bitmap_free(up
->reg_bitmap
);
164 struct mlx5_uars_page
*mlx5_get_uars_page(struct mlx5_core_dev
*mdev
)
166 struct mlx5_uars_page
*ret
;
168 mutex_lock(&mdev
->priv
.bfregs
.reg_head
.lock
);
169 if (!list_empty(&mdev
->priv
.bfregs
.reg_head
.list
)) {
170 ret
= list_first_entry(&mdev
->priv
.bfregs
.reg_head
.list
,
171 struct mlx5_uars_page
, list
);
172 kref_get(&ret
->ref_count
);
175 ret
= alloc_uars_page(mdev
, false);
178 list_add(&ret
->list
, &mdev
->priv
.bfregs
.reg_head
.list
);
180 mutex_unlock(&mdev
->priv
.bfregs
.reg_head
.lock
);
184 EXPORT_SYMBOL(mlx5_get_uars_page
);
186 void mlx5_put_uars_page(struct mlx5_core_dev
*mdev
, struct mlx5_uars_page
*up
)
188 mutex_lock(&mdev
->priv
.bfregs
.reg_head
.lock
);
189 kref_put(&up
->ref_count
, up_rel_func
);
190 mutex_unlock(&mdev
->priv
.bfregs
.reg_head
.lock
);
192 EXPORT_SYMBOL(mlx5_put_uars_page
);
194 static unsigned long map_offset(struct mlx5_core_dev
*mdev
, int dbi
)
196 /* return the offset in bytes from the start of the page to the
197 * blue flame area of the UAR
199 return dbi
/ MLX5_BFREGS_PER_UAR
* MLX5_ADAPTER_PAGE_SIZE
+
200 (dbi
% MLX5_BFREGS_PER_UAR
) *
201 (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) + MLX5_BF_OFFSET
;
204 static int alloc_bfreg(struct mlx5_core_dev
*mdev
, struct mlx5_sq_bfreg
*bfreg
,
205 bool map_wc
, bool fast_path
)
207 struct mlx5_bfreg_data
*bfregs
;
208 struct mlx5_uars_page
*up
;
209 struct list_head
*head
;
210 unsigned long *bitmap
;
212 struct mutex
*lock
; /* pointer to right mutex */
215 bfregs
= &mdev
->priv
.bfregs
;
217 head
= &bfregs
->wc_head
.list
;
218 lock
= &bfregs
->wc_head
.lock
;
220 head
= &bfregs
->reg_head
.list
;
221 lock
= &bfregs
->reg_head
.lock
;
224 if (list_empty(head
)) {
225 up
= alloc_uars_page(mdev
, map_wc
);
230 list_add(&up
->list
, head
);
232 up
= list_entry(head
->next
, struct mlx5_uars_page
, list
);
233 kref_get(&up
->ref_count
);
236 bitmap
= up
->fp_bitmap
;
237 avail
= &up
->fp_avail
;
239 bitmap
= up
->reg_bitmap
;
240 avail
= &up
->reg_avail
;
242 dbi
= find_first_bit(bitmap
, up
->bfregs
);
243 clear_bit(dbi
, bitmap
);
248 bfreg
->map
= up
->map
+ map_offset(mdev
, dbi
);
251 bfreg
->index
= up
->index
+ dbi
/ MLX5_BFREGS_PER_UAR
;
257 int mlx5_alloc_bfreg(struct mlx5_core_dev
*mdev
, struct mlx5_sq_bfreg
*bfreg
,
258 bool map_wc
, bool fast_path
)
262 err
= alloc_bfreg(mdev
, bfreg
, map_wc
, fast_path
);
266 if (err
== -EAGAIN
&& map_wc
)
267 return alloc_bfreg(mdev
, bfreg
, false, fast_path
);
271 EXPORT_SYMBOL(mlx5_alloc_bfreg
);
273 static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev
*dev
,
274 struct mlx5_uars_page
*up
,
275 struct mlx5_sq_bfreg
*bfreg
)
277 unsigned int uar_idx
;
278 unsigned int bfreg_idx
;
279 unsigned int bf_reg_size
;
281 bf_reg_size
= 1 << MLX5_CAP_GEN(dev
, log_bf_reg_size
);
283 uar_idx
= (bfreg
->map
- up
->map
) >> MLX5_ADAPTER_PAGE_SHIFT
;
284 bfreg_idx
= (((uintptr_t)bfreg
->map
% MLX5_ADAPTER_PAGE_SIZE
) - MLX5_BF_OFFSET
) / bf_reg_size
;
286 return uar_idx
* MLX5_BFREGS_PER_UAR
+ bfreg_idx
;
289 void mlx5_free_bfreg(struct mlx5_core_dev
*mdev
, struct mlx5_sq_bfreg
*bfreg
)
291 struct mlx5_bfreg_data
*bfregs
;
292 struct mlx5_uars_page
*up
;
293 struct mutex
*lock
; /* pointer to right mutex */
297 unsigned long *bitmap
;
298 struct list_head
*head
;
300 bfregs
= &mdev
->priv
.bfregs
;
302 head
= &bfregs
->wc_head
.list
;
303 lock
= &bfregs
->wc_head
.lock
;
305 head
= &bfregs
->reg_head
.list
;
306 lock
= &bfregs
->reg_head
.lock
;
309 dbi
= addr_to_dbi_in_syspage(mdev
, up
, bfreg
);
310 fp
= (dbi
% MLX5_BFREGS_PER_UAR
) >= MLX5_NON_FP_BFREGS_PER_UAR
;
312 avail
= &up
->fp_avail
;
313 bitmap
= up
->fp_bitmap
;
315 avail
= &up
->reg_avail
;
316 bitmap
= up
->reg_bitmap
;
320 set_bit(dbi
, bitmap
);
322 list_add_tail(&up
->list
, head
);
324 kref_put(&up
->ref_count
, up_rel_func
);
327 EXPORT_SYMBOL(mlx5_free_bfreg
);