2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/kernel.h>
39 #include <linux/vmalloc.h>
41 #include <linux/mlx4/cmd.h>
46 static u32
mlx4_buddy_alloc(struct mlx4_buddy
*buddy
, int order
)
52 spin_lock(&buddy
->lock
);
54 for (o
= order
; o
<= buddy
->max_order
; ++o
)
55 if (buddy
->num_free
[o
]) {
56 m
= 1 << (buddy
->max_order
- o
);
57 seg
= find_first_bit(buddy
->bits
[o
], m
);
62 spin_unlock(&buddy
->lock
);
66 clear_bit(seg
, buddy
->bits
[o
]);
72 set_bit(seg
^ 1, buddy
->bits
[o
]);
76 spin_unlock(&buddy
->lock
);
83 static void mlx4_buddy_free(struct mlx4_buddy
*buddy
, u32 seg
, int order
)
87 spin_lock(&buddy
->lock
);
89 while (test_bit(seg
^ 1, buddy
->bits
[order
])) {
90 clear_bit(seg
^ 1, buddy
->bits
[order
]);
91 --buddy
->num_free
[order
];
96 set_bit(seg
, buddy
->bits
[order
]);
97 ++buddy
->num_free
[order
];
99 spin_unlock(&buddy
->lock
);
102 static int mlx4_buddy_init(struct mlx4_buddy
*buddy
, int max_order
)
106 buddy
->max_order
= max_order
;
107 spin_lock_init(&buddy
->lock
);
109 buddy
->bits
= kcalloc(buddy
->max_order
+ 1, sizeof (long *),
111 buddy
->num_free
= kcalloc((buddy
->max_order
+ 1), sizeof *buddy
->num_free
,
113 if (!buddy
->bits
|| !buddy
->num_free
)
116 for (i
= 0; i
<= buddy
->max_order
; ++i
) {
117 s
= BITS_TO_LONGS(1 << (buddy
->max_order
- i
));
118 buddy
->bits
[i
] = kcalloc(s
, sizeof (long), GFP_KERNEL
| __GFP_NOWARN
);
119 if (!buddy
->bits
[i
]) {
120 buddy
->bits
[i
] = vzalloc(s
* sizeof(long));
126 set_bit(0, buddy
->bits
[buddy
->max_order
]);
127 buddy
->num_free
[buddy
->max_order
] = 1;
132 for (i
= 0; i
<= buddy
->max_order
; ++i
)
133 kvfree(buddy
->bits
[i
]);
137 kfree(buddy
->num_free
);
142 static void mlx4_buddy_cleanup(struct mlx4_buddy
*buddy
)
146 for (i
= 0; i
<= buddy
->max_order
; ++i
)
147 kvfree(buddy
->bits
[i
]);
150 kfree(buddy
->num_free
);
153 u32
__mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
155 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
160 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
162 seg
= mlx4_buddy_alloc(&mr_table
->mtt_buddy
, seg_order
);
166 offset
= seg
* (1 << log_mtts_per_seg
);
168 if (mlx4_table_get_range(dev
, &mr_table
->mtt_table
, offset
,
169 offset
+ (1 << order
) - 1)) {
170 mlx4_buddy_free(&mr_table
->mtt_buddy
, seg
, seg_order
);
177 static u32
mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
183 if (mlx4_is_mfunc(dev
)) {
184 set_param_l(&in_param
, order
);
185 err
= mlx4_cmd_imm(dev
, in_param
, &out_param
, RES_MTT
,
186 RES_OP_RESERVE_AND_MAP
,
188 MLX4_CMD_TIME_CLASS_A
,
192 return get_param_l(&out_param
);
194 return __mlx4_alloc_mtt_range(dev
, order
);
197 int mlx4_mtt_init(struct mlx4_dev
*dev
, int npages
, int page_shift
,
198 struct mlx4_mtt
*mtt
)
204 mtt
->page_shift
= MLX4_ICM_PAGE_SHIFT
;
207 mtt
->page_shift
= page_shift
;
209 for (mtt
->order
= 0, i
= 1; i
< npages
; i
<<= 1)
212 mtt
->offset
= mlx4_alloc_mtt_range(dev
, mtt
->order
);
213 if (mtt
->offset
== -1)
218 EXPORT_SYMBOL_GPL(mlx4_mtt_init
);
220 void __mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
224 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
226 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
227 first_seg
= offset
/ (1 << log_mtts_per_seg
);
229 mlx4_buddy_free(&mr_table
->mtt_buddy
, first_seg
, seg_order
);
230 mlx4_table_put_range(dev
, &mr_table
->mtt_table
, offset
,
231 offset
+ (1 << order
) - 1);
234 static void mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
239 if (mlx4_is_mfunc(dev
)) {
240 set_param_l(&in_param
, offset
);
241 set_param_h(&in_param
, order
);
242 err
= mlx4_cmd(dev
, in_param
, RES_MTT
, RES_OP_RESERVE_AND_MAP
,
244 MLX4_CMD_TIME_CLASS_A
,
247 mlx4_warn(dev
, "Failed to free mtt range at:%d order:%d\n",
251 __mlx4_free_mtt_range(dev
, offset
, order
);
254 void mlx4_mtt_cleanup(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
259 mlx4_free_mtt_range(dev
, mtt
->offset
, mtt
->order
);
261 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup
);
263 u64
mlx4_mtt_addr(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
265 return (u64
) mtt
->offset
* dev
->caps
.mtt_entry_sz
;
267 EXPORT_SYMBOL_GPL(mlx4_mtt_addr
);
269 static u32
hw_index_to_key(u32 ind
)
271 return (ind
>> 24) | (ind
<< 8);
274 static u32
key_to_hw_index(u32 key
)
276 return (key
<< 24) | (key
>> 8);
279 static int mlx4_SW2HW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
282 return mlx4_cmd(dev
, mailbox
->dma
, mpt_index
,
283 0, MLX4_CMD_SW2HW_MPT
, MLX4_CMD_TIME_CLASS_B
,
287 static int mlx4_HW2SW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
290 return mlx4_cmd_box(dev
, 0, mailbox
? mailbox
->dma
: 0, mpt_index
,
291 !mailbox
, MLX4_CMD_HW2SW_MPT
,
292 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
295 /* Must protect against concurrent access */
296 int mlx4_mr_hw_get_mpt(struct mlx4_dev
*dev
, struct mlx4_mr
*mmr
,
297 struct mlx4_mpt_entry
***mpt_entry
)
300 int key
= key_to_hw_index(mmr
->key
) & (dev
->caps
.num_mpts
- 1);
301 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
303 if (mmr
->enabled
!= MLX4_MPT_EN_HW
)
306 err
= mlx4_HW2SW_MPT(dev
, NULL
, key
);
308 mlx4_warn(dev
, "HW2SW_MPT failed (%d).", err
);
309 mlx4_warn(dev
, "Most likely the MR has MWs bound to it.\n");
313 mmr
->enabled
= MLX4_MPT_EN_SW
;
315 if (!mlx4_is_mfunc(dev
)) {
316 **mpt_entry
= mlx4_table_find(
317 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
320 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
321 if (IS_ERR_OR_NULL(mailbox
))
322 return PTR_ERR(mailbox
);
324 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, key
,
325 0, MLX4_CMD_QUERY_MPT
,
326 MLX4_CMD_TIME_CLASS_B
,
331 *mpt_entry
= (struct mlx4_mpt_entry
**)&mailbox
->buf
;
334 if (!(*mpt_entry
) || !(**mpt_entry
)) {
342 mlx4_free_cmd_mailbox(dev
, mailbox
);
345 EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt
);
347 int mlx4_mr_hw_write_mpt(struct mlx4_dev
*dev
, struct mlx4_mr
*mmr
,
348 struct mlx4_mpt_entry
**mpt_entry
)
352 if (!mlx4_is_mfunc(dev
)) {
353 /* Make sure any changes to this entry are flushed */
356 *(u8
*)(*mpt_entry
) = MLX4_MPT_STATUS_HW
;
358 /* Make sure the new status is written */
361 err
= mlx4_SYNC_TPT(dev
);
363 int key
= key_to_hw_index(mmr
->key
) & (dev
->caps
.num_mpts
- 1);
365 struct mlx4_cmd_mailbox
*mailbox
=
366 container_of((void *)mpt_entry
, struct mlx4_cmd_mailbox
,
369 err
= mlx4_SW2HW_MPT(dev
, mailbox
, key
);
373 mmr
->pd
= be32_to_cpu((*mpt_entry
)->pd_flags
) & MLX4_MPT_PD_MASK
;
374 mmr
->enabled
= MLX4_MPT_EN_HW
;
378 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt
);
380 void mlx4_mr_hw_put_mpt(struct mlx4_dev
*dev
,
381 struct mlx4_mpt_entry
**mpt_entry
)
383 if (mlx4_is_mfunc(dev
)) {
384 struct mlx4_cmd_mailbox
*mailbox
=
385 container_of((void *)mpt_entry
, struct mlx4_cmd_mailbox
,
387 mlx4_free_cmd_mailbox(dev
, mailbox
);
390 EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt
);
392 int mlx4_mr_hw_change_pd(struct mlx4_dev
*dev
, struct mlx4_mpt_entry
*mpt_entry
,
395 u32 pd_flags
= be32_to_cpu(mpt_entry
->pd_flags
) & ~MLX4_MPT_PD_MASK
;
396 /* The wrapper function will put the slave's id here */
397 if (mlx4_is_mfunc(dev
))
398 pd_flags
&= ~MLX4_MPT_PD_VF_MASK
;
400 mpt_entry
->pd_flags
= cpu_to_be32(pd_flags
|
401 (pdn
& MLX4_MPT_PD_MASK
)
402 | MLX4_MPT_PD_FLAG_EN_INV
);
405 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd
);
407 int mlx4_mr_hw_change_access(struct mlx4_dev
*dev
,
408 struct mlx4_mpt_entry
*mpt_entry
,
411 u32 flags
= (be32_to_cpu(mpt_entry
->flags
) & ~MLX4_PERM_MASK
) |
412 (access
& MLX4_PERM_MASK
);
414 mpt_entry
->flags
= cpu_to_be32(flags
);
417 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access
);
419 static int mlx4_mr_alloc_reserved(struct mlx4_dev
*dev
, u32 mridx
, u32 pd
,
420 u64 iova
, u64 size
, u32 access
, int npages
,
421 int page_shift
, struct mlx4_mr
*mr
)
427 mr
->enabled
= MLX4_MPT_DISABLED
;
428 mr
->key
= hw_index_to_key(mridx
);
430 return mlx4_mtt_init(dev
, npages
, page_shift
, &mr
->mtt
);
433 static int mlx4_WRITE_MTT(struct mlx4_dev
*dev
,
434 struct mlx4_cmd_mailbox
*mailbox
,
437 return mlx4_cmd(dev
, mailbox
->dma
, num_entries
, 0, MLX4_CMD_WRITE_MTT
,
438 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
441 int __mlx4_mpt_reserve(struct mlx4_dev
*dev
)
443 struct mlx4_priv
*priv
= mlx4_priv(dev
);
445 return mlx4_bitmap_alloc(&priv
->mr_table
.mpt_bitmap
);
448 static int mlx4_mpt_reserve(struct mlx4_dev
*dev
)
452 if (mlx4_is_mfunc(dev
)) {
453 if (mlx4_cmd_imm(dev
, 0, &out_param
, RES_MPT
, RES_OP_RESERVE
,
455 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
457 return get_param_l(&out_param
);
459 return __mlx4_mpt_reserve(dev
);
462 void __mlx4_mpt_release(struct mlx4_dev
*dev
, u32 index
)
464 struct mlx4_priv
*priv
= mlx4_priv(dev
);
466 mlx4_bitmap_free(&priv
->mr_table
.mpt_bitmap
, index
, MLX4_NO_RR
);
469 static void mlx4_mpt_release(struct mlx4_dev
*dev
, u32 index
)
473 if (mlx4_is_mfunc(dev
)) {
474 set_param_l(&in_param
, index
);
475 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_RESERVE
,
477 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
478 mlx4_warn(dev
, "Failed to release mr index:%d\n",
482 __mlx4_mpt_release(dev
, index
);
485 int __mlx4_mpt_alloc_icm(struct mlx4_dev
*dev
, u32 index
, gfp_t gfp
)
487 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
489 return mlx4_table_get(dev
, &mr_table
->dmpt_table
, index
, gfp
);
492 static int mlx4_mpt_alloc_icm(struct mlx4_dev
*dev
, u32 index
, gfp_t gfp
)
496 if (mlx4_is_mfunc(dev
)) {
497 set_param_l(¶m
, index
);
498 return mlx4_cmd_imm(dev
, param
, ¶m
, RES_MPT
, RES_OP_MAP_ICM
,
500 MLX4_CMD_TIME_CLASS_A
,
503 return __mlx4_mpt_alloc_icm(dev
, index
, gfp
);
506 void __mlx4_mpt_free_icm(struct mlx4_dev
*dev
, u32 index
)
508 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
510 mlx4_table_put(dev
, &mr_table
->dmpt_table
, index
);
513 static void mlx4_mpt_free_icm(struct mlx4_dev
*dev
, u32 index
)
517 if (mlx4_is_mfunc(dev
)) {
518 set_param_l(&in_param
, index
);
519 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_MAP_ICM
,
520 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
522 mlx4_warn(dev
, "Failed to free icm of mr index:%d\n",
526 return __mlx4_mpt_free_icm(dev
, index
);
529 int mlx4_mr_alloc(struct mlx4_dev
*dev
, u32 pd
, u64 iova
, u64 size
, u32 access
,
530 int npages
, int page_shift
, struct mlx4_mr
*mr
)
535 index
= mlx4_mpt_reserve(dev
);
539 err
= mlx4_mr_alloc_reserved(dev
, index
, pd
, iova
, size
,
540 access
, npages
, page_shift
, mr
);
542 mlx4_mpt_release(dev
, index
);
546 EXPORT_SYMBOL_GPL(mlx4_mr_alloc
);
548 static int mlx4_mr_free_reserved(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
552 if (mr
->enabled
== MLX4_MPT_EN_HW
) {
553 err
= mlx4_HW2SW_MPT(dev
, NULL
,
554 key_to_hw_index(mr
->key
) &
555 (dev
->caps
.num_mpts
- 1));
557 mlx4_warn(dev
, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
562 mr
->enabled
= MLX4_MPT_EN_SW
;
564 mlx4_mtt_cleanup(dev
, &mr
->mtt
);
569 int mlx4_mr_free(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
573 ret
= mlx4_mr_free_reserved(dev
, mr
);
577 mlx4_mpt_free_icm(dev
, key_to_hw_index(mr
->key
));
578 mlx4_mpt_release(dev
, key_to_hw_index(mr
->key
));
582 EXPORT_SYMBOL_GPL(mlx4_mr_free
);
584 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
586 mlx4_mtt_cleanup(dev
, &mr
->mtt
);
589 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup
);
591 int mlx4_mr_rereg_mem_write(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
,
592 u64 iova
, u64 size
, int npages
,
593 int page_shift
, struct mlx4_mpt_entry
*mpt_entry
)
597 err
= mlx4_mtt_init(dev
, npages
, page_shift
, &mr
->mtt
);
601 mpt_entry
->start
= cpu_to_be64(mr
->iova
);
602 mpt_entry
->length
= cpu_to_be64(mr
->size
);
603 mpt_entry
->entity_size
= cpu_to_be32(mr
->mtt
.page_shift
);
605 mpt_entry
->pd_flags
&= cpu_to_be32(MLX4_MPT_PD_MASK
|
606 MLX4_MPT_PD_FLAG_EN_INV
);
607 mpt_entry
->flags
&= cpu_to_be32(MLX4_MPT_FLAG_FREE
|
608 MLX4_MPT_FLAG_SW_OWNS
);
609 if (mr
->mtt
.order
< 0) {
610 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL
);
611 mpt_entry
->mtt_addr
= 0;
613 mpt_entry
->mtt_addr
= cpu_to_be64(mlx4_mtt_addr(dev
,
615 if (mr
->mtt
.page_shift
== 0)
616 mpt_entry
->mtt_sz
= cpu_to_be32(1 << mr
->mtt
.order
);
618 if (mr
->mtt
.order
>= 0 && mr
->mtt
.page_shift
== 0) {
619 /* fast register MR in free state */
620 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
621 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG
|
622 MLX4_MPT_PD_FLAG_RAE
);
624 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS
);
626 mr
->enabled
= MLX4_MPT_EN_SW
;
630 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write
);
632 int mlx4_mr_enable(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
634 struct mlx4_cmd_mailbox
*mailbox
;
635 struct mlx4_mpt_entry
*mpt_entry
;
638 err
= mlx4_mpt_alloc_icm(dev
, key_to_hw_index(mr
->key
), GFP_KERNEL
);
642 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
643 if (IS_ERR(mailbox
)) {
644 err
= PTR_ERR(mailbox
);
647 mpt_entry
= mailbox
->buf
;
648 mpt_entry
->flags
= cpu_to_be32(MLX4_MPT_FLAG_MIO
|
649 MLX4_MPT_FLAG_REGION
|
652 mpt_entry
->key
= cpu_to_be32(key_to_hw_index(mr
->key
));
653 mpt_entry
->pd_flags
= cpu_to_be32(mr
->pd
| MLX4_MPT_PD_FLAG_EN_INV
);
654 mpt_entry
->start
= cpu_to_be64(mr
->iova
);
655 mpt_entry
->length
= cpu_to_be64(mr
->size
);
656 mpt_entry
->entity_size
= cpu_to_be32(mr
->mtt
.page_shift
);
658 if (mr
->mtt
.order
< 0) {
659 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL
);
660 mpt_entry
->mtt_addr
= 0;
662 mpt_entry
->mtt_addr
= cpu_to_be64(mlx4_mtt_addr(dev
,
666 if (mr
->mtt
.order
>= 0 && mr
->mtt
.page_shift
== 0) {
667 /* fast register MR in free state */
668 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
669 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG
|
670 MLX4_MPT_PD_FLAG_RAE
);
671 mpt_entry
->mtt_sz
= cpu_to_be32(1 << mr
->mtt
.order
);
673 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS
);
676 err
= mlx4_SW2HW_MPT(dev
, mailbox
,
677 key_to_hw_index(mr
->key
) & (dev
->caps
.num_mpts
- 1));
679 mlx4_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
682 mr
->enabled
= MLX4_MPT_EN_HW
;
684 mlx4_free_cmd_mailbox(dev
, mailbox
);
689 mlx4_free_cmd_mailbox(dev
, mailbox
);
692 mlx4_mpt_free_icm(dev
, key_to_hw_index(mr
->key
));
695 EXPORT_SYMBOL_GPL(mlx4_mr_enable
);
697 static int mlx4_write_mtt_chunk(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
698 int start_index
, int npages
, u64
*page_list
)
700 struct mlx4_priv
*priv
= mlx4_priv(dev
);
702 dma_addr_t dma_handle
;
705 mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
, mtt
->offset
+
706 start_index
, &dma_handle
);
711 dma_sync_single_for_cpu(&dev
->pdev
->dev
, dma_handle
,
712 npages
* sizeof (u64
), DMA_TO_DEVICE
);
714 for (i
= 0; i
< npages
; ++i
)
715 mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
717 dma_sync_single_for_device(&dev
->pdev
->dev
, dma_handle
,
718 npages
* sizeof (u64
), DMA_TO_DEVICE
);
723 int __mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
724 int start_index
, int npages
, u64
*page_list
)
729 int max_mtts_first_page
;
731 /* compute how may mtts fit in the first page */
732 mtts_per_page
= PAGE_SIZE
/ sizeof(u64
);
733 max_mtts_first_page
= mtts_per_page
- (mtt
->offset
+ start_index
)
736 chunk
= min_t(int, max_mtts_first_page
, npages
);
739 err
= mlx4_write_mtt_chunk(dev
, mtt
, start_index
, chunk
, page_list
);
743 start_index
+= chunk
;
746 chunk
= min_t(int, mtts_per_page
, npages
);
751 int mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
752 int start_index
, int npages
, u64
*page_list
)
754 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
755 __be64
*inbox
= NULL
;
763 if (mlx4_is_mfunc(dev
)) {
764 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
766 return PTR_ERR(mailbox
);
767 inbox
= mailbox
->buf
;
770 chunk
= min_t(int, MLX4_MAILBOX_SIZE
/ sizeof(u64
) - 2,
772 inbox
[0] = cpu_to_be64(mtt
->offset
+ start_index
);
774 for (i
= 0; i
< chunk
; ++i
)
775 inbox
[i
+ 2] = cpu_to_be64(page_list
[i
] |
776 MLX4_MTT_FLAG_PRESENT
);
777 err
= mlx4_WRITE_MTT(dev
, mailbox
, chunk
);
779 mlx4_free_cmd_mailbox(dev
, mailbox
);
784 start_index
+= chunk
;
787 mlx4_free_cmd_mailbox(dev
, mailbox
);
791 return __mlx4_write_mtt(dev
, mtt
, start_index
, npages
, page_list
);
793 EXPORT_SYMBOL_GPL(mlx4_write_mtt
);
795 int mlx4_buf_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
796 struct mlx4_buf
*buf
, gfp_t gfp
)
802 page_list
= kmalloc(buf
->npages
* sizeof *page_list
,
807 for (i
= 0; i
< buf
->npages
; ++i
)
809 page_list
[i
] = buf
->direct
.map
+ (i
<< buf
->page_shift
);
811 page_list
[i
] = buf
->page_list
[i
].map
;
813 err
= mlx4_write_mtt(dev
, mtt
, 0, buf
->npages
, page_list
);
818 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt
);
820 int mlx4_mw_alloc(struct mlx4_dev
*dev
, u32 pd
, enum mlx4_mw_type type
,
825 if ((type
== MLX4_MW_TYPE_1
&&
826 !(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
)) ||
827 (type
== MLX4_MW_TYPE_2
&&
828 !(dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
)))
831 index
= mlx4_mpt_reserve(dev
);
835 mw
->key
= hw_index_to_key(index
);
838 mw
->enabled
= MLX4_MPT_DISABLED
;
842 EXPORT_SYMBOL_GPL(mlx4_mw_alloc
);
844 int mlx4_mw_enable(struct mlx4_dev
*dev
, struct mlx4_mw
*mw
)
846 struct mlx4_cmd_mailbox
*mailbox
;
847 struct mlx4_mpt_entry
*mpt_entry
;
850 err
= mlx4_mpt_alloc_icm(dev
, key_to_hw_index(mw
->key
), GFP_KERNEL
);
854 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
855 if (IS_ERR(mailbox
)) {
856 err
= PTR_ERR(mailbox
);
859 mpt_entry
= mailbox
->buf
;
861 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
862 * off, thus creating a memory window and not a memory region.
864 mpt_entry
->key
= cpu_to_be32(key_to_hw_index(mw
->key
));
865 mpt_entry
->pd_flags
= cpu_to_be32(mw
->pd
);
866 if (mw
->type
== MLX4_MW_TYPE_2
) {
867 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
868 mpt_entry
->qpn
= cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP
);
869 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV
);
872 err
= mlx4_SW2HW_MPT(dev
, mailbox
,
873 key_to_hw_index(mw
->key
) &
874 (dev
->caps
.num_mpts
- 1));
876 mlx4_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
879 mw
->enabled
= MLX4_MPT_EN_HW
;
881 mlx4_free_cmd_mailbox(dev
, mailbox
);
886 mlx4_free_cmd_mailbox(dev
, mailbox
);
889 mlx4_mpt_free_icm(dev
, key_to_hw_index(mw
->key
));
892 EXPORT_SYMBOL_GPL(mlx4_mw_enable
);
894 void mlx4_mw_free(struct mlx4_dev
*dev
, struct mlx4_mw
*mw
)
898 if (mw
->enabled
== MLX4_MPT_EN_HW
) {
899 err
= mlx4_HW2SW_MPT(dev
, NULL
,
900 key_to_hw_index(mw
->key
) &
901 (dev
->caps
.num_mpts
- 1));
903 mlx4_warn(dev
, "xxx HW2SW_MPT failed (%d)\n", err
);
905 mw
->enabled
= MLX4_MPT_EN_SW
;
908 mlx4_mpt_free_icm(dev
, key_to_hw_index(mw
->key
));
909 mlx4_mpt_release(dev
, key_to_hw_index(mw
->key
));
911 EXPORT_SYMBOL_GPL(mlx4_mw_free
);
913 int mlx4_init_mr_table(struct mlx4_dev
*dev
)
915 struct mlx4_priv
*priv
= mlx4_priv(dev
);
916 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
919 /* Nothing to do for slaves - all MR handling is forwarded
921 if (mlx4_is_slave(dev
))
924 if (!is_power_of_2(dev
->caps
.num_mpts
))
927 err
= mlx4_bitmap_init(&mr_table
->mpt_bitmap
, dev
->caps
.num_mpts
,
928 ~0, dev
->caps
.reserved_mrws
, 0);
932 err
= mlx4_buddy_init(&mr_table
->mtt_buddy
,
933 ilog2((u32
)dev
->caps
.num_mtts
/
934 (1 << log_mtts_per_seg
)));
938 if (dev
->caps
.reserved_mtts
) {
939 priv
->reserved_mtts
=
940 mlx4_alloc_mtt_range(dev
,
941 fls(dev
->caps
.reserved_mtts
- 1));
942 if (priv
->reserved_mtts
< 0) {
943 mlx4_warn(dev
, "MTT table of order %u is too small\n",
944 mr_table
->mtt_buddy
.max_order
);
946 goto err_reserve_mtts
;
953 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
956 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
961 void mlx4_cleanup_mr_table(struct mlx4_dev
*dev
)
963 struct mlx4_priv
*priv
= mlx4_priv(dev
);
964 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
966 if (mlx4_is_slave(dev
))
968 if (priv
->reserved_mtts
>= 0)
969 mlx4_free_mtt_range(dev
, priv
->reserved_mtts
,
970 fls(dev
->caps
.reserved_mtts
- 1));
971 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
972 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
975 static inline int mlx4_check_fmr(struct mlx4_fmr
*fmr
, u64
*page_list
,
976 int npages
, u64 iova
)
980 if (npages
> fmr
->max_pages
)
983 page_mask
= (1 << fmr
->page_shift
) - 1;
985 /* We are getting page lists, so va must be page aligned. */
986 if (iova
& page_mask
)
989 /* Trust the user not to pass misaligned data in page_list */
991 for (i
= 0; i
< npages
; ++i
) {
992 if (page_list
[i
] & ~page_mask
)
996 if (fmr
->maps
>= fmr
->max_maps
)
1002 int mlx4_map_phys_fmr(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
, u64
*page_list
,
1003 int npages
, u64 iova
, u32
*lkey
, u32
*rkey
)
1008 err
= mlx4_check_fmr(fmr
, page_list
, npages
, iova
);
1014 key
= key_to_hw_index(fmr
->mr
.key
);
1015 key
+= dev
->caps
.num_mpts
;
1016 *lkey
= *rkey
= fmr
->mr
.key
= hw_index_to_key(key
);
1018 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_SW
;
1020 /* Make sure MPT status is visible before writing MTT entries */
1023 dma_sync_single_for_cpu(&dev
->pdev
->dev
, fmr
->dma_handle
,
1024 npages
* sizeof(u64
), DMA_TO_DEVICE
);
1026 for (i
= 0; i
< npages
; ++i
)
1027 fmr
->mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
1029 dma_sync_single_for_device(&dev
->pdev
->dev
, fmr
->dma_handle
,
1030 npages
* sizeof(u64
), DMA_TO_DEVICE
);
1032 fmr
->mpt
->key
= cpu_to_be32(key
);
1033 fmr
->mpt
->lkey
= cpu_to_be32(key
);
1034 fmr
->mpt
->length
= cpu_to_be64(npages
* (1ull << fmr
->page_shift
));
1035 fmr
->mpt
->start
= cpu_to_be64(iova
);
1037 /* Make MTT entries are visible before setting MPT status */
1040 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_HW
;
1042 /* Make sure MPT status is visible before consumer can use FMR */
1047 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr
);
1049 int mlx4_fmr_alloc(struct mlx4_dev
*dev
, u32 pd
, u32 access
, int max_pages
,
1050 int max_maps
, u8 page_shift
, struct mlx4_fmr
*fmr
)
1052 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1055 if (max_maps
> dev
->caps
.max_fmr_maps
)
1058 if (page_shift
< (ffs(dev
->caps
.page_size_cap
) - 1) || page_shift
>= 32)
1061 /* All MTTs must fit in the same page */
1062 if (max_pages
* sizeof *fmr
->mtts
> PAGE_SIZE
)
1065 fmr
->page_shift
= page_shift
;
1066 fmr
->max_pages
= max_pages
;
1067 fmr
->max_maps
= max_maps
;
1070 err
= mlx4_mr_alloc(dev
, pd
, 0, 0, access
, max_pages
,
1071 page_shift
, &fmr
->mr
);
1075 fmr
->mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
,
1087 (void) mlx4_mr_free(dev
, &fmr
->mr
);
1090 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc
);
1092 int mlx4_fmr_enable(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
1094 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1097 err
= mlx4_mr_enable(dev
, &fmr
->mr
);
1101 fmr
->mpt
= mlx4_table_find(&priv
->mr_table
.dmpt_table
,
1102 key_to_hw_index(fmr
->mr
.key
), NULL
);
1108 EXPORT_SYMBOL_GPL(mlx4_fmr_enable
);
1110 void mlx4_fmr_unmap(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
,
1111 u32
*lkey
, u32
*rkey
)
1113 struct mlx4_cmd_mailbox
*mailbox
;
1121 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1122 if (IS_ERR(mailbox
)) {
1123 err
= PTR_ERR(mailbox
);
1124 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err
);
1128 err
= mlx4_HW2SW_MPT(dev
, NULL
,
1129 key_to_hw_index(fmr
->mr
.key
) &
1130 (dev
->caps
.num_mpts
- 1));
1131 mlx4_free_cmd_mailbox(dev
, mailbox
);
1133 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err
);
1136 fmr
->mr
.enabled
= MLX4_MPT_EN_SW
;
1138 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap
);
1140 int mlx4_fmr_free(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
1147 ret
= mlx4_mr_free(dev
, &fmr
->mr
);
1150 fmr
->mr
.enabled
= MLX4_MPT_DISABLED
;
1154 EXPORT_SYMBOL_GPL(mlx4_fmr_free
);
1156 int mlx4_SYNC_TPT(struct mlx4_dev
*dev
)
1158 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_SYNC_TPT
, 1000,
1161 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT
);