]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/infiniband/hw/mthca/mthca_mr.c
treewide: kmalloc() -> kmalloc_array()
[mirror_ubuntu-jammy-kernel.git] / drivers / infiniband / hw / mthca / mthca_mr.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f 3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
1da177e4
LT
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
1da177e4
LT
32 */
33
34#include <linux/slab.h>
1da177e4
LT
35#include <linux/errno.h>
36
37#include "mthca_dev.h"
38#include "mthca_cmd.h"
86562a13 39#include "mthca_memfree.h"
1da177e4 40
d56d6f95
RD
41struct mthca_mtt {
42 struct mthca_buddy *buddy;
43 int order;
44 u32 first_seg;
45};
46
1da177e4
LT
47/*
48 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
49 */
50struct mthca_mpt_entry {
97f52eb4
SH
51 __be32 flags;
52 __be32 page_size;
53 __be32 key;
54 __be32 pd;
55 __be64 start;
56 __be64 length;
57 __be32 lkey;
58 __be32 window_count;
59 __be32 window_count_limit;
60 __be64 mtt_seg;
61 __be32 mtt_sz; /* Arbel only */
62 u32 reserved[2];
1da177e4
LT
63} __attribute__((packed));
64
65#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
66#define MTHCA_MPT_FLAG_MIO (1 << 17)
67#define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
68#define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
69#define MTHCA_MPT_FLAG_REGION (1 << 8)
70
71#define MTHCA_MTT_FLAG_PRESENT 1
72
e0f5fdca
MT
73#define MTHCA_MPT_STATUS_SW 0xF0
74#define MTHCA_MPT_STATUS_HW 0x00
75
651eaac9
EC
76#define SINAI_FMR_KEY_INC 0x1000000
77
1da177e4
LT
78/*
79 * Buddy allocator for MTT segments (currently not very efficient
80 * since it doesn't keep a free list and just searches linearly
81 * through the bitmaps)
82 */
83
9095e208 84static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
1da177e4
LT
85{
86 int o;
87 int m;
88 u32 seg;
89
9095e208 90 spin_lock(&buddy->lock);
1da177e4 91
e8bb4beb
RD
92 for (o = order; o <= buddy->max_order; ++o)
93 if (buddy->num_free[o]) {
94 m = 1 << (buddy->max_order - o);
95 seg = find_first_bit(buddy->bits[o], m);
96 if (seg < m)
97 goto found;
98 }
1da177e4 99
9095e208 100 spin_unlock(&buddy->lock);
1da177e4
LT
101 return -1;
102
103 found:
9095e208 104 clear_bit(seg, buddy->bits[o]);
e8bb4beb 105 --buddy->num_free[o];
1da177e4
LT
106
107 while (o > order) {
108 --o;
109 seg <<= 1;
9095e208 110 set_bit(seg ^ 1, buddy->bits[o]);
e8bb4beb 111 ++buddy->num_free[o];
1da177e4
LT
112 }
113
9095e208 114 spin_unlock(&buddy->lock);
1da177e4
LT
115
116 seg <<= order;
117
118 return seg;
119}
120
9095e208 121static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
1da177e4
LT
122{
123 seg >>= order;
124
9095e208 125 spin_lock(&buddy->lock);
1da177e4 126
9095e208
MT
127 while (test_bit(seg ^ 1, buddy->bits[order])) {
128 clear_bit(seg ^ 1, buddy->bits[order]);
e8bb4beb 129 --buddy->num_free[order];
1da177e4
LT
130 seg >>= 1;
131 ++order;
132 }
133
9095e208 134 set_bit(seg, buddy->bits[order]);
e8bb4beb 135 ++buddy->num_free[order];
1da177e4 136
9095e208 137 spin_unlock(&buddy->lock);
1da177e4
LT
138}
139
f4f3d0f0 140static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
86562a13 141{
9095e208
MT
142 int i, s;
143
144 buddy->max_order = max_order;
145 spin_lock_init(&buddy->lock);
146
de6eb66b 147 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
9095e208 148 GFP_KERNEL);
e4221314 149 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
e8bb4beb
RD
150 GFP_KERNEL);
151 if (!buddy->bits || !buddy->num_free)
9095e208
MT
152 goto err_out;
153
9095e208
MT
154 for (i = 0; i <= buddy->max_order; ++i) {
155 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
6da2ec56 156 buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
9095e208
MT
157 if (!buddy->bits[i])
158 goto err_out_free;
159 bitmap_zero(buddy->bits[i],
160 1 << (buddy->max_order - i));
161 }
162
163 set_bit(0, buddy->bits[buddy->max_order]);
e8bb4beb 164 buddy->num_free[buddy->max_order] = 1;
9095e208
MT
165
166 return 0;
167
168err_out_free:
169 for (i = 0; i <= buddy->max_order; ++i)
170 kfree(buddy->bits[i]);
171
e8bb4beb 172err_out:
9095e208 173 kfree(buddy->bits);
e8bb4beb 174 kfree(buddy->num_free);
9095e208 175
9095e208
MT
176 return -ENOMEM;
177}
178
e1f7868c 179static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
9095e208
MT
180{
181 int i;
182
183 for (i = 0; i <= buddy->max_order; ++i)
184 kfree(buddy->bits[i]);
185
186 kfree(buddy->bits);
e8bb4beb 187 kfree(buddy->num_free);
9095e208
MT
188}
189
d56d6f95
RD
190static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
191 struct mthca_buddy *buddy)
9095e208
MT
192{
193 u32 seg = mthca_buddy_alloc(buddy, order);
86562a13
RD
194
195 if (seg == -1)
196 return -1;
197
d10ddbf6 198 if (mthca_is_memfree(dev))
86562a13
RD
199 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
200 seg + (1 << order) - 1)) {
9095e208 201 mthca_buddy_free(buddy, seg, order);
86562a13
RD
202 seg = -1;
203 }
204
205 return seg;
206}
207
d56d6f95
RD
208static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
209 struct mthca_buddy *buddy)
210{
211 struct mthca_mtt *mtt;
212 int i;
213
214 if (size <= 0)
215 return ERR_PTR(-EINVAL);
216
217 mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
218 if (!mtt)
219 return ERR_PTR(-ENOMEM);
220
221 mtt->buddy = buddy;
222 mtt->order = 0;
c1f67a88 223 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
d56d6f95
RD
224 ++mtt->order;
225
226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
227 if (mtt->first_seg == -1) {
228 kfree(mtt);
229 return ERR_PTR(-ENOMEM);
230 }
231
232 return mtt;
233}
234
235struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
236{
237 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
238}
239
240void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
241{
242 if (!mtt)
243 return;
244
245 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
246
247 mthca_table_put_range(dev, dev->mr_table.mtt_table,
248 mtt->first_seg,
249 mtt->first_seg + (1 << mtt->order) - 1);
250
251 kfree(mtt);
252}
253
b2875d4c
MT
254static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
255 int start_index, u64 *buffer_list, int list_len)
86562a13 256{
ed878458 257 struct mthca_mailbox *mailbox;
97f52eb4 258 __be64 *mtt_entry;
d56d6f95 259 int err = 0;
d56d6f95
RD
260 int i;
261
ed878458
RD
262 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
263 if (IS_ERR(mailbox))
264 return PTR_ERR(mailbox);
265 mtt_entry = mailbox->buf;
d56d6f95
RD
266
267 while (list_len > 0) {
268 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
c1f67a88 269 mtt->first_seg * dev->limits.mtt_seg_size +
d56d6f95
RD
270 start_index * 8);
271 mtt_entry[1] = 0;
ed878458 272 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
d56d6f95
RD
273 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
274 MTHCA_MTT_FLAG_PRESENT);
275
276 /*
277 * If we have an odd number of entries to write, add
278 * one more dummy entry for firmware efficiency.
279 */
280 if (i & 1)
281 mtt_entry[i + 2] = 0;
282
cdb73db0 283 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
d56d6f95
RD
284 if (err) {
285 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
286 goto out;
287 }
d56d6f95
RD
288
289 list_len -= i;
290 start_index += i;
291 buffer_list += i;
292 }
293
294out:
ed878458 295 mthca_free_mailbox(dev, mailbox);
d56d6f95 296 return err;
86562a13
RD
297}
298
b2875d4c
MT
299int mthca_write_mtt_size(struct mthca_dev *dev)
300{
532c3b58
RD
301 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
302 !(dev->mthca_flags & MTHCA_FLAG_FMR))
b2875d4c
MT
303 /*
304 * Be friendly to WRITE_MTT command
305 * and leave two empty slots for the
306 * index and reserved fields of the
307 * mailbox.
308 */
309 return PAGE_SIZE / sizeof (u64) - 2;
310
311 /* For Arbel, all MTTs must fit in the same page. */
312 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
313}
314
c9add6ec
AB
315static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
316 struct mthca_mtt *mtt, int start_index,
317 u64 *buffer_list, int list_len)
b2875d4c
MT
318{
319 u64 __iomem *mtts;
320 int i;
321
c1f67a88 322 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
b2875d4c
MT
323 start_index * sizeof (u64);
324 for (i = 0; i < list_len; ++i)
325 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
326 mtts + i);
327}
328
c9add6ec
AB
329static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
330 struct mthca_mtt *mtt, int start_index,
331 u64 *buffer_list, int list_len)
b2875d4c
MT
332{
333 __be64 *mtts;
334 dma_addr_t dma_handle;
335 int i;
336 int s = start_index * sizeof (u64);
337
338 /* For Arbel, all MTTs must fit in the same page. */
339 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
340 /* Require full segments */
c1f67a88 341 BUG_ON(s % dev->limits.mtt_seg_size);
b2875d4c
MT
342
343 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
c1f67a88 344 s / dev->limits.mtt_seg_size, &dma_handle);
b2875d4c
MT
345
346 BUG_ON(!mtts);
347
99987bea
RD
348 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
349 list_len * sizeof (u64), DMA_TO_DEVICE);
350
b2875d4c
MT
351 for (i = 0; i < list_len; ++i)
352 mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
353
99987bea
RD
354 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
355 list_len * sizeof (u64), DMA_TO_DEVICE);
b2875d4c
MT
356}
357
358int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
359 int start_index, u64 *buffer_list, int list_len)
360{
361 int size = mthca_write_mtt_size(dev);
362 int chunk;
363
532c3b58
RD
364 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
365 !(dev->mthca_flags & MTHCA_FLAG_FMR))
b2875d4c
MT
366 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
367
368 while (list_len > 0) {
369 chunk = min(size, list_len);
370 if (mthca_is_memfree(dev))
371 mthca_arbel_write_mtt_seg(dev, mtt, start_index,
372 buffer_list, chunk);
373 else
374 mthca_tavor_write_mtt_seg(dev, mtt, start_index,
375 buffer_list, chunk);
376
377 list_len -= chunk;
378 start_index += chunk;
379 buffer_list += chunk;
380 }
381
382 return 0;
383}
384
d0a9d25c
MT
385static inline u32 tavor_hw_index_to_key(u32 ind)
386{
387 return ind;
388}
389
390static inline u32 tavor_key_to_hw_index(u32 key)
391{
392 return key;
393}
394
395static inline u32 arbel_hw_index_to_key(u32 ind)
396{
397 return (ind >> 24) | (ind << 8);
398}
399
400static inline u32 arbel_key_to_hw_index(u32 key)
401{
402 return (key << 24) | (key >> 8);
403}
404
1da177e4
LT
405static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
406{
d10ddbf6 407 if (mthca_is_memfree(dev))
d0a9d25c 408 return arbel_hw_index_to_key(ind);
1da177e4 409 else
d0a9d25c 410 return tavor_hw_index_to_key(ind);
1da177e4
LT
411}
412
413static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
414{
d10ddbf6 415 if (mthca_is_memfree(dev))
d0a9d25c 416 return arbel_key_to_hw_index(key);
1da177e4 417 else
d0a9d25c 418 return tavor_key_to_hw_index(key);
1da177e4
LT
419}
420
651eaac9
EC
421static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
422{
423 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
424 return ((key << 20) & 0x800000) | (key & 0x7fffff);
425 else
426 return key;
427}
428
d56d6f95
RD
429int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
430 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
1da177e4 431{
ed878458 432 struct mthca_mailbox *mailbox;
1da177e4
LT
433 struct mthca_mpt_entry *mpt_entry;
434 u32 key;
d56d6f95 435 int i;
1da177e4 436 int err;
1da177e4 437
d56d6f95
RD
438 WARN_ON(buffer_size_shift >= 32);
439
1da177e4
LT
440 key = mthca_alloc(&dev->mr_table.mpt_alloc);
441 if (key == -1)
442 return -ENOMEM;
651eaac9 443 key = adjust_key(dev, key);
1da177e4
LT
444 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
445
d10ddbf6 446 if (mthca_is_memfree(dev)) {
86562a13
RD
447 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
448 if (err)
449 goto err_out_mpt_free;
450 }
451
ed878458
RD
452 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
453 if (IS_ERR(mailbox)) {
454 err = PTR_ERR(mailbox);
86562a13 455 goto err_out_table;
1da177e4 456 }
ed878458 457 mpt_entry = mailbox->buf;
1da177e4
LT
458
459 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
460 MTHCA_MPT_FLAG_MIO |
1da177e4
LT
461 MTHCA_MPT_FLAG_REGION |
462 access);
d56d6f95
RD
463 if (!mr->mtt)
464 mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
465
466 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
1da177e4
LT
467 mpt_entry->key = cpu_to_be32(key);
468 mpt_entry->pd = cpu_to_be32(pd);
d56d6f95
RD
469 mpt_entry->start = cpu_to_be64(iova);
470 mpt_entry->length = cpu_to_be64(total_size);
1da177e4
LT
471
472 memset(&mpt_entry->lkey, 0,
473 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
474
d56d6f95
RD
475 if (mr->mtt)
476 mpt_entry->mtt_seg =
477 cpu_to_be64(dev->mr_table.mtt_base +
c1f67a88 478 mr->mtt->first_seg * dev->limits.mtt_seg_size);
d56d6f95
RD
479
480 if (0) {
481 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
482 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
483 if (i % 4 == 0)
484 printk("[%02x] ", i * 4);
97f52eb4 485 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
d56d6f95
RD
486 if ((i + 1) % 4 == 0)
487 printk("\n");
488 }
489 }
490
ed878458 491 err = mthca_SW2HW_MPT(dev, mailbox,
cdb73db0 492 key & (dev->limits.num_mpts - 1));
86562a13 493 if (err) {
1da177e4 494 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
d56d6f95 495 goto err_out_mailbox;
1da177e4
LT
496 }
497
ed878458 498 mthca_free_mailbox(dev, mailbox);
1da177e4 499 return err;
86562a13 500
d56d6f95 501err_out_mailbox:
ed878458 502 mthca_free_mailbox(dev, mailbox);
d56d6f95 503
86562a13 504err_out_table:
a03a5a67 505 mthca_table_put(dev, dev->mr_table.mpt_table, key);
86562a13
RD
506
507err_out_mpt_free:
55645e9b 508 mthca_free(&dev->mr_table.mpt_alloc, key);
86562a13 509 return err;
1da177e4
LT
510}
511
d56d6f95
RD
512int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
513 u32 access, struct mthca_mr *mr)
514{
515 mr->mtt = NULL;
516 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
517}
518
1da177e4
LT
519int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
520 u64 *buffer_list, int buffer_size_shift,
521 int list_len, u64 iova, u64 total_size,
522 u32 access, struct mthca_mr *mr)
523{
d56d6f95 524 int err;
1da177e4 525
d56d6f95
RD
526 mr->mtt = mthca_alloc_mtt(dev, list_len);
527 if (IS_ERR(mr->mtt))
528 return PTR_ERR(mr->mtt);
1da177e4 529
d56d6f95 530 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
1da177e4 531 if (err) {
d56d6f95
RD
532 mthca_free_mtt(dev, mr->mtt);
533 return err;
1da177e4
LT
534 }
535
d56d6f95
RD
536 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
537 total_size, access, mr);
1da177e4 538 if (err)
d56d6f95 539 mthca_free_mtt(dev, mr->mtt);
1da177e4 540
1da177e4
LT
541 return err;
542}
543
e0f5fdca 544/* Free mr or fmr */
d56d6f95 545static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
e0f5fdca 546{
a03a5a67 547 mthca_table_put(dev, dev->mr_table.mpt_table,
7f9f2dba 548 key_to_hw_index(dev, lkey));
e0f5fdca
MT
549
550 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
551}
552
1da177e4
LT
553void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
554{
555 int err;
1da177e4 556
1da177e4
LT
557 err = mthca_HW2SW_MPT(dev, NULL,
558 key_to_hw_index(dev, mr->ibmr.lkey) &
cdb73db0 559 (dev->limits.num_mpts - 1));
1da177e4
LT
560 if (err)
561 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
1da177e4 562
d56d6f95
RD
563 mthca_free_region(dev, mr->ibmr.lkey);
564 mthca_free_mtt(dev, mr->mtt);
e0f5fdca
MT
565}
566
567int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
568 u32 access, struct mthca_fmr *mr)
569{
570 struct mthca_mpt_entry *mpt_entry;
ed878458 571 struct mthca_mailbox *mailbox;
e0f5fdca
MT
572 u64 mtt_seg;
573 u32 key, idx;
e0f5fdca
MT
574 int list_len = mr->attr.max_pages;
575 int err = -ENOMEM;
576 int i;
577
d36f34aa 578 if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
e0f5fdca
MT
579 return -EINVAL;
580
581 /* For Arbel, all MTTs must fit in the same page. */
d10ddbf6 582 if (mthca_is_memfree(dev) &&
e0f5fdca
MT
583 mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
584 return -EINVAL;
585
586 mr->maps = 0;
587
588 key = mthca_alloc(&dev->mr_table.mpt_alloc);
589 if (key == -1)
590 return -ENOMEM;
651eaac9 591 key = adjust_key(dev, key);
e0f5fdca
MT
592
593 idx = key & (dev->limits.num_mpts - 1);
594 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
595
d10ddbf6 596 if (mthca_is_memfree(dev)) {
e0f5fdca
MT
597 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
598 if (err)
599 goto err_out_mpt_free;
600
391e4dea 601 mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
e0f5fdca
MT
602 BUG_ON(!mr->mem.arbel.mpt);
603 } else
604 mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
2fa5e2eb 605 sizeof *(mr->mem.tavor.mpt) * idx;
e0f5fdca 606
d56d6f95 607 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
2c788534
OK
608 if (IS_ERR(mr->mtt)) {
609 err = PTR_ERR(mr->mtt);
e0f5fdca 610 goto err_out_table;
2c788534 611 }
e0f5fdca 612
c1f67a88 613 mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
e0f5fdca 614
d10ddbf6 615 if (mthca_is_memfree(dev)) {
e0f5fdca 616 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
391e4dea
MT
617 mr->mtt->first_seg,
618 &mr->mem.arbel.dma_handle);
e0f5fdca
MT
619 BUG_ON(!mr->mem.arbel.mtts);
620 } else
621 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
622
ed878458 623 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
2c788534
OK
624 if (IS_ERR(mailbox)) {
625 err = PTR_ERR(mailbox);
e0f5fdca 626 goto err_out_free_mtt;
2c788534 627 }
e0f5fdca 628
ed878458 629 mpt_entry = mailbox->buf;
e0f5fdca
MT
630
631 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
632 MTHCA_MPT_FLAG_MIO |
633 MTHCA_MPT_FLAG_REGION |
634 access);
635
d36f34aa 636 mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
e0f5fdca
MT
637 mpt_entry->key = cpu_to_be32(key);
638 mpt_entry->pd = cpu_to_be32(pd);
639 memset(&mpt_entry->start, 0,
640 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
641 mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
642
643 if (0) {
644 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
645 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
646 if (i % 4 == 0)
647 printk("[%02x] ", i * 4);
97f52eb4 648 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
e0f5fdca
MT
649 if ((i + 1) % 4 == 0)
650 printk("\n");
651 }
652 }
653
ed878458 654 err = mthca_SW2HW_MPT(dev, mailbox,
cdb73db0 655 key & (dev->limits.num_mpts - 1));
e0f5fdca
MT
656 if (err) {
657 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
658 goto err_out_mailbox_free;
659 }
e0f5fdca 660
ed878458 661 mthca_free_mailbox(dev, mailbox);
e0f5fdca
MT
662 return 0;
663
664err_out_mailbox_free:
ed878458 665 mthca_free_mailbox(dev, mailbox);
e0f5fdca
MT
666
667err_out_free_mtt:
d56d6f95 668 mthca_free_mtt(dev, mr->mtt);
1da177e4 669
e0f5fdca 670err_out_table:
a03a5a67 671 mthca_table_put(dev, dev->mr_table.mpt_table, key);
e0f5fdca
MT
672
673err_out_mpt_free:
b7f9c112 674 mthca_free(&dev->mr_table.mpt_alloc, key);
e0f5fdca
MT
675 return err;
676}
677
678int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
679{
680 if (fmr->maps)
681 return -EBUSY;
682
d56d6f95
RD
683 mthca_free_region(dev, fmr->ibmr.lkey);
684 mthca_free_mtt(dev, fmr->mtt);
685
e0f5fdca
MT
686 return 0;
687}
688
689static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
690 int list_len, u64 iova)
691{
692 int i, page_mask;
693
694 if (list_len > fmr->attr.max_pages)
695 return -EINVAL;
696
d36f34aa 697 page_mask = (1 << fmr->attr.page_shift) - 1;
e0f5fdca
MT
698
699 /* We are getting page lists, so va must be page aligned. */
700 if (iova & page_mask)
701 return -EINVAL;
702
703 /* Trust the user not to pass misaligned data in page_list */
704 if (0)
705 for (i = 0; i < list_len; ++i) {
706 if (page_list[i] & ~page_mask)
707 return -EINVAL;
708 }
709
710 if (fmr->maps >= fmr->attr.max_maps)
711 return -EINVAL;
712
713 return 0;
714}
715
716
717int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
718 int list_len, u64 iova)
719{
720 struct mthca_fmr *fmr = to_mfmr(ibfmr);
721 struct mthca_dev *dev = to_mdev(ibfmr->device);
722 struct mthca_mpt_entry mpt_entry;
723 u32 key;
724 int i, err;
725
726 err = mthca_check_fmr(fmr, page_list, list_len, iova);
727 if (err)
728 return err;
729
730 ++fmr->maps;
731
732 key = tavor_key_to_hw_index(fmr->ibmr.lkey);
733 key += dev->limits.num_mpts;
734 fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
735
736 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
737
738 for (i = 0; i < list_len; ++i) {
739 __be64 mtt_entry = cpu_to_be64(page_list[i] |
740 MTHCA_MTT_FLAG_PRESENT);
741 mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
742 }
743
744 mpt_entry.lkey = cpu_to_be32(key);
d36f34aa 745 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
e0f5fdca
MT
746 mpt_entry.start = cpu_to_be64(iova);
747
97f52eb4 748 __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
e0f5fdca
MT
749 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
750 offsetof(struct mthca_mpt_entry, window_count) -
751 offsetof(struct mthca_mpt_entry, start));
752
753 writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
754
755 return 0;
756}
757
758int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
759 int list_len, u64 iova)
760{
761 struct mthca_fmr *fmr = to_mfmr(ibfmr);
762 struct mthca_dev *dev = to_mdev(ibfmr->device);
763 u32 key;
764 int i, err;
765
766 err = mthca_check_fmr(fmr, page_list, list_len, iova);
767 if (err)
768 return err;
769
770 ++fmr->maps;
771
772 key = arbel_key_to_hw_index(fmr->ibmr.lkey);
651eaac9
EC
773 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
774 key += SINAI_FMR_KEY_INC;
775 else
776 key += dev->limits.num_mpts;
e0f5fdca
MT
777 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
778
779 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
780
781 wmb();
782
99987bea
RD
783 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
784 list_len * sizeof(u64), DMA_TO_DEVICE);
785
e0f5fdca
MT
786 for (i = 0; i < list_len; ++i)
787 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
788 MTHCA_MTT_FLAG_PRESENT);
789
99987bea
RD
790 dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
791 list_len * sizeof(u64), DMA_TO_DEVICE);
391e4dea 792
e0f5fdca
MT
793 fmr->mem.arbel.mpt->key = cpu_to_be32(key);
794 fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
d36f34aa 795 fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
e0f5fdca
MT
796 fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
797
798 wmb();
799
800 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
801
802 wmb();
803
804 return 0;
805}
806
807void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
808{
e0f5fdca
MT
809 if (!fmr->maps)
810 return;
811
e0f5fdca
MT
812 fmr->maps = 0;
813
814 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
815}
816
817void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
818{
e0f5fdca
MT
819 if (!fmr->maps)
820 return;
821
e0f5fdca
MT
822 fmr->maps = 0;
823
824 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
1da177e4
LT
825}
826
f4f3d0f0 827int mthca_init_mr_table(struct mthca_dev *dev)
1da177e4 828{
eb4a7cbf 829 phys_addr_t addr;
c20e20ab 830 int mpts, mtts, err, i;
1da177e4
LT
831
832 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
833 dev->limits.num_mpts,
834 ~0, dev->limits.reserved_mrws);
835 if (err)
836 return err;
837
d10ddbf6 838 if (!mthca_is_memfree(dev) &&
e0f5fdca
MT
839 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
840 dev->limits.fmr_reserved_mtts = 0;
841 else
842 dev->mthca_flags |= MTHCA_FLAG_FMR;
843
651eaac9
EC
844 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
845 mthca_dbg(dev, "Memory key throughput optimization activated.\n");
846
9095e208
MT
847 err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
848 fls(dev->limits.num_mtt_segs - 1));
e0f5fdca 849
9095e208
MT
850 if (err)
851 goto err_mtt_buddy;
852
e0f5fdca
MT
853 dev->mr_table.tavor_fmr.mpt_base = NULL;
854 dev->mr_table.tavor_fmr.mtt_base = NULL;
855
856 if (dev->limits.fmr_reserved_mtts) {
857 i = fls(dev->limits.fmr_reserved_mtts - 1);
858
859 if (i >= 31) {
860 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
861 err = -EINVAL;
862 goto err_fmr_mpt;
863 }
c20e20ab
MT
864 mpts = mtts = 1 << i;
865 } else {
0264d885
MT
866 mtts = dev->limits.num_mtt_segs;
867 mpts = dev->limits.num_mpts;
c20e20ab
MT
868 }
869
870 if (!mthca_is_memfree(dev) &&
871 (dev->mthca_flags & MTHCA_FLAG_FMR)) {
e0f5fdca 872
ce477ae4
MT
873 addr = pci_resource_start(dev->pdev, 4) +
874 ((pci_resource_len(dev->pdev, 4) - 1) &
875 dev->mr_table.mpt_base);
876
e0f5fdca 877 dev->mr_table.tavor_fmr.mpt_base =
c20e20ab 878 ioremap(addr, mpts * sizeof(struct mthca_mpt_entry));
e0f5fdca
MT
879
880 if (!dev->mr_table.tavor_fmr.mpt_base) {
881 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
882 err = -ENOMEM;
883 goto err_fmr_mpt;
884 }
885
ce477ae4
MT
886 addr = pci_resource_start(dev->pdev, 4) +
887 ((pci_resource_len(dev->pdev, 4) - 1) &
888 dev->mr_table.mtt_base);
889
e0f5fdca 890 dev->mr_table.tavor_fmr.mtt_base =
c1f67a88 891 ioremap(addr, mtts * dev->limits.mtt_seg_size);
e0f5fdca
MT
892 if (!dev->mr_table.tavor_fmr.mtt_base) {
893 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
894 err = -ENOMEM;
895 goto err_fmr_mtt;
896 }
c20e20ab 897 }
e0f5fdca 898
c20e20ab
MT
899 if (dev->limits.fmr_reserved_mtts) {
900 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1));
e0f5fdca
MT
901 if (err)
902 goto err_fmr_mtt_buddy;
903
904 /* Prevent regular MRs from using FMR keys */
c20e20ab 905 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1));
e0f5fdca
MT
906 if (err)
907 goto err_reserve_fmr;
908
909 dev->mr_table.fmr_mtt_buddy =
2fa5e2eb 910 &dev->mr_table.tavor_fmr.mtt_buddy;
e0f5fdca
MT
911 } else
912 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
913
914 /* FMR table is always the first, take reserved MTTs out of there */
9095e208 915 if (dev->limits.reserved_mtts) {
e0f5fdca
MT
916 i = fls(dev->limits.reserved_mtts - 1);
917
d56d6f95
RD
918 if (mthca_alloc_mtt_range(dev, i,
919 dev->mr_table.fmr_mtt_buddy) == -1) {
9095e208 920 mthca_warn(dev, "MTT table of order %d is too small.\n",
e0f5fdca 921 dev->mr_table.fmr_mtt_buddy->max_order);
9095e208 922 err = -ENOMEM;
e0f5fdca 923 goto err_reserve_mtts;
9095e208 924 }
1da177e4
LT
925 }
926
1da177e4
LT
927 return 0;
928
e0f5fdca
MT
929err_reserve_mtts:
930err_reserve_fmr:
931 if (dev->limits.fmr_reserved_mtts)
932 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
933
934err_fmr_mtt_buddy:
935 if (dev->mr_table.tavor_fmr.mtt_base)
936 iounmap(dev->mr_table.tavor_fmr.mtt_base);
937
938err_fmr_mtt:
939 if (dev->mr_table.tavor_fmr.mpt_base)
940 iounmap(dev->mr_table.tavor_fmr.mpt_base);
941
942err_fmr_mpt:
943 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
944
9095e208 945err_mtt_buddy:
1da177e4
LT
946 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
947
948 return err;
949}
950
e1f7868c 951void mthca_cleanup_mr_table(struct mthca_dev *dev)
1da177e4 952{
1da177e4 953 /* XXX check if any MRs are still allocated? */
e0f5fdca
MT
954 if (dev->limits.fmr_reserved_mtts)
955 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
956
9095e208 957 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
e0f5fdca
MT
958
959 if (dev->mr_table.tavor_fmr.mtt_base)
960 iounmap(dev->mr_table.tavor_fmr.mtt_base);
961 if (dev->mr_table.tavor_fmr.mpt_base)
962 iounmap(dev->mr_table.tavor_fmr.mpt_base);
963
1da177e4
LT
964 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
965}