]>
Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
6cf0a15f | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | ||
34 | #include <linux/kref.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/debugfs.h> | |
37 | #include <linux/export.h> | |
746b5583 | 38 | #include <linux/delay.h> |
e126ba97 | 39 | #include <rdma/ib_umem.h> |
b4cfe447 | 40 | #include <rdma/ib_umem_odp.h> |
968e78dd | 41 | #include <rdma/ib_verbs.h> |
e126ba97 EC |
42 | #include "mlx5_ib.h" |
43 | ||
44 | enum { | |
746b5583 | 45 | MAX_PENDING_REG_MR = 8, |
e126ba97 EC |
46 | }; |
47 | ||
832a6b06 | 48 | #define MLX5_UMR_ALIGN 2048 |
fe45f827 | 49 | |
6aec21f6 | 50 | static int clean_mr(struct mlx5_ib_mr *mr); |
7d0cc6ed | 51 | static int use_umr(struct mlx5_ib_dev *dev, int order); |
49780d42 | 52 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
6aec21f6 | 53 | |
b4cfe447 HE |
54 | static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
55 | { | |
a606b0f6 | 56 | int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); |
b4cfe447 HE |
57 | |
58 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | |
59 | /* Wait until all page fault handlers using the mr complete. */ | |
60 | synchronize_srcu(&dev->mr_srcu); | |
61 | #endif | |
62 | ||
63 | return err; | |
64 | } | |
65 | ||
e126ba97 EC |
66 | static int order2idx(struct mlx5_ib_dev *dev, int order) |
67 | { | |
68 | struct mlx5_mr_cache *cache = &dev->cache; | |
69 | ||
70 | if (order < cache->ent[0].order) | |
71 | return 0; | |
72 | else | |
73 | return order - cache->ent[0].order; | |
74 | } | |
75 | ||
56e11d62 NO |
76 | static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) |
77 | { | |
78 | return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= | |
79 | length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); | |
80 | } | |
81 | ||
395a8e4c NO |
82 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
83 | static void update_odp_mr(struct mlx5_ib_mr *mr) | |
84 | { | |
85 | if (mr->umem->odp_data) { | |
86 | /* | |
87 | * This barrier prevents the compiler from moving the | |
88 | * setting of umem->odp_data->private to point to our | |
89 | * MR, before reg_umr finished, to ensure that the MR | |
90 | * initialization have finished before starting to | |
91 | * handle invalidations. | |
92 | */ | |
93 | smp_wmb(); | |
94 | mr->umem->odp_data->private = mr; | |
95 | /* | |
96 | * Make sure we will see the new | |
97 | * umem->odp_data->private value in the invalidation | |
98 | * routines, before we can get page faults on the | |
99 | * MR. Page faults can happen once we put the MR in | |
100 | * the tree, below this line. Without the barrier, | |
101 | * there can be a fault handling and an invalidation | |
102 | * before umem->odp_data->private == mr is visible to | |
103 | * the invalidation handler. | |
104 | */ | |
105 | smp_wmb(); | |
106 | } | |
107 | } | |
108 | #endif | |
109 | ||
746b5583 EC |
110 | static void reg_mr_callback(int status, void *context) |
111 | { | |
112 | struct mlx5_ib_mr *mr = context; | |
113 | struct mlx5_ib_dev *dev = mr->dev; | |
114 | struct mlx5_mr_cache *cache = &dev->cache; | |
115 | int c = order2idx(dev, mr->order); | |
116 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
117 | u8 key; | |
746b5583 | 118 | unsigned long flags; |
a606b0f6 | 119 | struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table; |
8605933a | 120 | int err; |
746b5583 | 121 | |
746b5583 EC |
122 | spin_lock_irqsave(&ent->lock, flags); |
123 | ent->pending--; | |
124 | spin_unlock_irqrestore(&ent->lock, flags); | |
125 | if (status) { | |
126 | mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); | |
127 | kfree(mr); | |
128 | dev->fill_delay = 1; | |
129 | mod_timer(&dev->delay_timer, jiffies + HZ); | |
130 | return; | |
131 | } | |
132 | ||
aa8e08d2 | 133 | mr->mmkey.type = MLX5_MKEY_MR; |
9603b61d JM |
134 | spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); |
135 | key = dev->mdev->priv.mkey_key++; | |
136 | spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); | |
ec22eb53 | 137 | mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key; |
746b5583 EC |
138 | |
139 | cache->last_add = jiffies; | |
140 | ||
141 | spin_lock_irqsave(&ent->lock, flags); | |
142 | list_add_tail(&mr->list, &ent->head); | |
143 | ent->cur++; | |
144 | ent->size++; | |
145 | spin_unlock_irqrestore(&ent->lock, flags); | |
8605933a HE |
146 | |
147 | write_lock_irqsave(&table->lock, flags); | |
a606b0f6 MB |
148 | err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key), |
149 | &mr->mmkey); | |
8605933a | 150 | if (err) |
a606b0f6 | 151 | pr_err("Error inserting to mkey tree. 0x%x\n", -err); |
8605933a | 152 | write_unlock_irqrestore(&table->lock, flags); |
49780d42 AK |
153 | |
154 | if (!completion_done(&ent->compl)) | |
155 | complete(&ent->compl); | |
746b5583 EC |
156 | } |
157 | ||
e126ba97 EC |
158 | static int add_keys(struct mlx5_ib_dev *dev, int c, int num) |
159 | { | |
e126ba97 EC |
160 | struct mlx5_mr_cache *cache = &dev->cache; |
161 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
ec22eb53 | 162 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
e126ba97 | 163 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
164 | void *mkc; |
165 | u32 *in; | |
e126ba97 EC |
166 | int err = 0; |
167 | int i; | |
168 | ||
ec22eb53 | 169 | in = kzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
170 | if (!in) |
171 | return -ENOMEM; | |
172 | ||
ec22eb53 | 173 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
e126ba97 | 174 | for (i = 0; i < num; i++) { |
746b5583 EC |
175 | if (ent->pending >= MAX_PENDING_REG_MR) { |
176 | err = -EAGAIN; | |
177 | break; | |
178 | } | |
179 | ||
e126ba97 EC |
180 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
181 | if (!mr) { | |
182 | err = -ENOMEM; | |
746b5583 | 183 | break; |
e126ba97 EC |
184 | } |
185 | mr->order = ent->order; | |
186 | mr->umred = 1; | |
746b5583 | 187 | mr->dev = dev; |
ec22eb53 SM |
188 | |
189 | MLX5_SET(mkc, mkc, free, 1); | |
190 | MLX5_SET(mkc, mkc, umr_en, 1); | |
49780d42 | 191 | MLX5_SET(mkc, mkc, access_mode, ent->access_mode); |
ec22eb53 SM |
192 | |
193 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
49780d42 AK |
194 | MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); |
195 | MLX5_SET(mkc, mkc, log_page_size, ent->page); | |
e126ba97 | 196 | |
746b5583 EC |
197 | spin_lock_irq(&ent->lock); |
198 | ent->pending++; | |
199 | spin_unlock_irq(&ent->lock); | |
ec22eb53 SM |
200 | err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey, |
201 | in, inlen, | |
202 | mr->out, sizeof(mr->out), | |
203 | reg_mr_callback, mr); | |
e126ba97 | 204 | if (err) { |
d14e7110 EC |
205 | spin_lock_irq(&ent->lock); |
206 | ent->pending--; | |
207 | spin_unlock_irq(&ent->lock); | |
e126ba97 | 208 | mlx5_ib_warn(dev, "create mkey failed %d\n", err); |
e126ba97 | 209 | kfree(mr); |
746b5583 | 210 | break; |
e126ba97 | 211 | } |
e126ba97 EC |
212 | } |
213 | ||
e126ba97 EC |
214 | kfree(in); |
215 | return err; | |
216 | } | |
217 | ||
218 | static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) | |
219 | { | |
e126ba97 EC |
220 | struct mlx5_mr_cache *cache = &dev->cache; |
221 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
222 | struct mlx5_ib_mr *mr; | |
e126ba97 EC |
223 | int err; |
224 | int i; | |
225 | ||
226 | for (i = 0; i < num; i++) { | |
746b5583 | 227 | spin_lock_irq(&ent->lock); |
e126ba97 | 228 | if (list_empty(&ent->head)) { |
746b5583 | 229 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
230 | return; |
231 | } | |
232 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | |
233 | list_del(&mr->list); | |
234 | ent->cur--; | |
235 | ent->size--; | |
746b5583 | 236 | spin_unlock_irq(&ent->lock); |
b4cfe447 | 237 | err = destroy_mkey(dev, mr); |
203099fd | 238 | if (err) |
e126ba97 | 239 | mlx5_ib_warn(dev, "failed destroy mkey\n"); |
203099fd | 240 | else |
e126ba97 | 241 | kfree(mr); |
e126ba97 EC |
242 | } |
243 | } | |
244 | ||
245 | static ssize_t size_write(struct file *filp, const char __user *buf, | |
246 | size_t count, loff_t *pos) | |
247 | { | |
248 | struct mlx5_cache_ent *ent = filp->private_data; | |
249 | struct mlx5_ib_dev *dev = ent->dev; | |
250 | char lbuf[20]; | |
251 | u32 var; | |
252 | int err; | |
253 | int c; | |
254 | ||
255 | if (copy_from_user(lbuf, buf, sizeof(lbuf))) | |
5e631a03 | 256 | return -EFAULT; |
e126ba97 EC |
257 | |
258 | c = order2idx(dev, ent->order); | |
259 | lbuf[sizeof(lbuf) - 1] = 0; | |
260 | ||
261 | if (sscanf(lbuf, "%u", &var) != 1) | |
262 | return -EINVAL; | |
263 | ||
264 | if (var < ent->limit) | |
265 | return -EINVAL; | |
266 | ||
267 | if (var > ent->size) { | |
746b5583 EC |
268 | do { |
269 | err = add_keys(dev, c, var - ent->size); | |
270 | if (err && err != -EAGAIN) | |
271 | return err; | |
272 | ||
273 | usleep_range(3000, 5000); | |
274 | } while (err); | |
e126ba97 EC |
275 | } else if (var < ent->size) { |
276 | remove_keys(dev, c, ent->size - var); | |
277 | } | |
278 | ||
279 | return count; | |
280 | } | |
281 | ||
282 | static ssize_t size_read(struct file *filp, char __user *buf, size_t count, | |
283 | loff_t *pos) | |
284 | { | |
285 | struct mlx5_cache_ent *ent = filp->private_data; | |
286 | char lbuf[20]; | |
287 | int err; | |
288 | ||
289 | if (*pos) | |
290 | return 0; | |
291 | ||
292 | err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); | |
293 | if (err < 0) | |
294 | return err; | |
295 | ||
296 | if (copy_to_user(buf, lbuf, err)) | |
5e631a03 | 297 | return -EFAULT; |
e126ba97 EC |
298 | |
299 | *pos += err; | |
300 | ||
301 | return err; | |
302 | } | |
303 | ||
304 | static const struct file_operations size_fops = { | |
305 | .owner = THIS_MODULE, | |
306 | .open = simple_open, | |
307 | .write = size_write, | |
308 | .read = size_read, | |
309 | }; | |
310 | ||
311 | static ssize_t limit_write(struct file *filp, const char __user *buf, | |
312 | size_t count, loff_t *pos) | |
313 | { | |
314 | struct mlx5_cache_ent *ent = filp->private_data; | |
315 | struct mlx5_ib_dev *dev = ent->dev; | |
316 | char lbuf[20]; | |
317 | u32 var; | |
318 | int err; | |
319 | int c; | |
320 | ||
321 | if (copy_from_user(lbuf, buf, sizeof(lbuf))) | |
5e631a03 | 322 | return -EFAULT; |
e126ba97 EC |
323 | |
324 | c = order2idx(dev, ent->order); | |
325 | lbuf[sizeof(lbuf) - 1] = 0; | |
326 | ||
327 | if (sscanf(lbuf, "%u", &var) != 1) | |
328 | return -EINVAL; | |
329 | ||
330 | if (var > ent->size) | |
331 | return -EINVAL; | |
332 | ||
333 | ent->limit = var; | |
334 | ||
335 | if (ent->cur < ent->limit) { | |
336 | err = add_keys(dev, c, 2 * ent->limit - ent->cur); | |
337 | if (err) | |
338 | return err; | |
339 | } | |
340 | ||
341 | return count; | |
342 | } | |
343 | ||
344 | static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, | |
345 | loff_t *pos) | |
346 | { | |
347 | struct mlx5_cache_ent *ent = filp->private_data; | |
348 | char lbuf[20]; | |
349 | int err; | |
350 | ||
351 | if (*pos) | |
352 | return 0; | |
353 | ||
354 | err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); | |
355 | if (err < 0) | |
356 | return err; | |
357 | ||
358 | if (copy_to_user(buf, lbuf, err)) | |
5e631a03 | 359 | return -EFAULT; |
e126ba97 EC |
360 | |
361 | *pos += err; | |
362 | ||
363 | return err; | |
364 | } | |
365 | ||
366 | static const struct file_operations limit_fops = { | |
367 | .owner = THIS_MODULE, | |
368 | .open = simple_open, | |
369 | .write = limit_write, | |
370 | .read = limit_read, | |
371 | }; | |
372 | ||
373 | static int someone_adding(struct mlx5_mr_cache *cache) | |
374 | { | |
375 | int i; | |
376 | ||
377 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
378 | if (cache->ent[i].cur < cache->ent[i].limit) | |
379 | return 1; | |
380 | } | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static void __cache_work_func(struct mlx5_cache_ent *ent) | |
386 | { | |
387 | struct mlx5_ib_dev *dev = ent->dev; | |
388 | struct mlx5_mr_cache *cache = &dev->cache; | |
389 | int i = order2idx(dev, ent->order); | |
746b5583 | 390 | int err; |
e126ba97 EC |
391 | |
392 | if (cache->stopped) | |
393 | return; | |
394 | ||
395 | ent = &dev->cache.ent[i]; | |
746b5583 EC |
396 | if (ent->cur < 2 * ent->limit && !dev->fill_delay) { |
397 | err = add_keys(dev, i, 1); | |
398 | if (ent->cur < 2 * ent->limit) { | |
399 | if (err == -EAGAIN) { | |
400 | mlx5_ib_dbg(dev, "returned eagain, order %d\n", | |
401 | i + 2); | |
402 | queue_delayed_work(cache->wq, &ent->dwork, | |
403 | msecs_to_jiffies(3)); | |
404 | } else if (err) { | |
405 | mlx5_ib_warn(dev, "command failed order %d, err %d\n", | |
406 | i + 2, err); | |
407 | queue_delayed_work(cache->wq, &ent->dwork, | |
408 | msecs_to_jiffies(1000)); | |
409 | } else { | |
410 | queue_work(cache->wq, &ent->work); | |
411 | } | |
412 | } | |
e126ba97 | 413 | } else if (ent->cur > 2 * ent->limit) { |
ab5cdc31 LR |
414 | /* |
415 | * The remove_keys() logic is performed as garbage collection | |
416 | * task. Such task is intended to be run when no other active | |
417 | * processes are running. | |
418 | * | |
419 | * The need_resched() will return TRUE if there are user tasks | |
420 | * to be activated in near future. | |
421 | * | |
422 | * In such case, we don't execute remove_keys() and postpone | |
423 | * the garbage collection work to try to run in next cycle, | |
424 | * in order to free CPU resources to other tasks. | |
425 | */ | |
426 | if (!need_resched() && !someone_adding(cache) && | |
746b5583 | 427 | time_after(jiffies, cache->last_add + 300 * HZ)) { |
e126ba97 EC |
428 | remove_keys(dev, i, 1); |
429 | if (ent->cur > ent->limit) | |
430 | queue_work(cache->wq, &ent->work); | |
431 | } else { | |
746b5583 | 432 | queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); |
e126ba97 EC |
433 | } |
434 | } | |
435 | } | |
436 | ||
437 | static void delayed_cache_work_func(struct work_struct *work) | |
438 | { | |
439 | struct mlx5_cache_ent *ent; | |
440 | ||
441 | ent = container_of(work, struct mlx5_cache_ent, dwork.work); | |
442 | __cache_work_func(ent); | |
443 | } | |
444 | ||
445 | static void cache_work_func(struct work_struct *work) | |
446 | { | |
447 | struct mlx5_cache_ent *ent; | |
448 | ||
449 | ent = container_of(work, struct mlx5_cache_ent, work); | |
450 | __cache_work_func(ent); | |
451 | } | |
452 | ||
49780d42 AK |
453 | struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) |
454 | { | |
455 | struct mlx5_mr_cache *cache = &dev->cache; | |
456 | struct mlx5_cache_ent *ent; | |
457 | struct mlx5_ib_mr *mr; | |
458 | int err; | |
459 | ||
460 | if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { | |
461 | mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); | |
462 | return NULL; | |
463 | } | |
464 | ||
465 | ent = &cache->ent[entry]; | |
466 | while (1) { | |
467 | spin_lock_irq(&ent->lock); | |
468 | if (list_empty(&ent->head)) { | |
469 | spin_unlock_irq(&ent->lock); | |
470 | ||
471 | err = add_keys(dev, entry, 1); | |
472 | if (err) | |
473 | return ERR_PTR(err); | |
474 | ||
475 | wait_for_completion(&ent->compl); | |
476 | } else { | |
477 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, | |
478 | list); | |
479 | list_del(&mr->list); | |
480 | ent->cur--; | |
481 | spin_unlock_irq(&ent->lock); | |
482 | if (ent->cur < ent->limit) | |
483 | queue_work(cache->wq, &ent->work); | |
484 | return mr; | |
485 | } | |
486 | } | |
487 | } | |
488 | ||
e126ba97 EC |
489 | static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) |
490 | { | |
491 | struct mlx5_mr_cache *cache = &dev->cache; | |
492 | struct mlx5_ib_mr *mr = NULL; | |
493 | struct mlx5_cache_ent *ent; | |
494 | int c; | |
495 | int i; | |
496 | ||
497 | c = order2idx(dev, order); | |
49780d42 | 498 | if (c < 0 || c > MAX_UMR_CACHE_ENTRY) { |
e126ba97 EC |
499 | mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); |
500 | return NULL; | |
501 | } | |
502 | ||
49780d42 | 503 | for (i = c; i < MAX_UMR_CACHE_ENTRY; i++) { |
e126ba97 EC |
504 | ent = &cache->ent[i]; |
505 | ||
506 | mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); | |
507 | ||
746b5583 | 508 | spin_lock_irq(&ent->lock); |
e126ba97 EC |
509 | if (!list_empty(&ent->head)) { |
510 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, | |
511 | list); | |
512 | list_del(&mr->list); | |
513 | ent->cur--; | |
746b5583 | 514 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
515 | if (ent->cur < ent->limit) |
516 | queue_work(cache->wq, &ent->work); | |
517 | break; | |
518 | } | |
746b5583 | 519 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
520 | |
521 | queue_work(cache->wq, &ent->work); | |
e126ba97 EC |
522 | } |
523 | ||
524 | if (!mr) | |
525 | cache->ent[c].miss++; | |
526 | ||
527 | return mr; | |
528 | } | |
529 | ||
49780d42 | 530 | void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
e126ba97 EC |
531 | { |
532 | struct mlx5_mr_cache *cache = &dev->cache; | |
533 | struct mlx5_cache_ent *ent; | |
534 | int shrink = 0; | |
535 | int c; | |
536 | ||
537 | c = order2idx(dev, mr->order); | |
538 | if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { | |
539 | mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); | |
540 | return; | |
541 | } | |
49780d42 AK |
542 | |
543 | if (unreg_umr(dev, mr)) | |
544 | return; | |
545 | ||
e126ba97 | 546 | ent = &cache->ent[c]; |
746b5583 | 547 | spin_lock_irq(&ent->lock); |
e126ba97 EC |
548 | list_add_tail(&mr->list, &ent->head); |
549 | ent->cur++; | |
550 | if (ent->cur > 2 * ent->limit) | |
551 | shrink = 1; | |
746b5583 | 552 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
553 | |
554 | if (shrink) | |
555 | queue_work(cache->wq, &ent->work); | |
556 | } | |
557 | ||
558 | static void clean_keys(struct mlx5_ib_dev *dev, int c) | |
559 | { | |
e126ba97 EC |
560 | struct mlx5_mr_cache *cache = &dev->cache; |
561 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
562 | struct mlx5_ib_mr *mr; | |
e126ba97 EC |
563 | int err; |
564 | ||
3c461911 | 565 | cancel_delayed_work(&ent->dwork); |
e126ba97 | 566 | while (1) { |
746b5583 | 567 | spin_lock_irq(&ent->lock); |
e126ba97 | 568 | if (list_empty(&ent->head)) { |
746b5583 | 569 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
570 | return; |
571 | } | |
572 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | |
573 | list_del(&mr->list); | |
574 | ent->cur--; | |
575 | ent->size--; | |
746b5583 | 576 | spin_unlock_irq(&ent->lock); |
b4cfe447 | 577 | err = destroy_mkey(dev, mr); |
203099fd | 578 | if (err) |
e126ba97 | 579 | mlx5_ib_warn(dev, "failed destroy mkey\n"); |
203099fd | 580 | else |
e126ba97 | 581 | kfree(mr); |
e126ba97 EC |
582 | } |
583 | } | |
584 | ||
585 | static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) | |
586 | { | |
587 | struct mlx5_mr_cache *cache = &dev->cache; | |
588 | struct mlx5_cache_ent *ent; | |
589 | int i; | |
590 | ||
591 | if (!mlx5_debugfs_root) | |
592 | return 0; | |
593 | ||
9603b61d | 594 | cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); |
e126ba97 EC |
595 | if (!cache->root) |
596 | return -ENOMEM; | |
597 | ||
598 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
599 | ent = &cache->ent[i]; | |
600 | sprintf(ent->name, "%d", ent->order); | |
601 | ent->dir = debugfs_create_dir(ent->name, cache->root); | |
602 | if (!ent->dir) | |
603 | return -ENOMEM; | |
604 | ||
605 | ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, | |
606 | &size_fops); | |
607 | if (!ent->fsize) | |
608 | return -ENOMEM; | |
609 | ||
610 | ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, | |
611 | &limit_fops); | |
612 | if (!ent->flimit) | |
613 | return -ENOMEM; | |
614 | ||
615 | ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, | |
616 | &ent->cur); | |
617 | if (!ent->fcur) | |
618 | return -ENOMEM; | |
619 | ||
620 | ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, | |
621 | &ent->miss); | |
622 | if (!ent->fmiss) | |
623 | return -ENOMEM; | |
624 | } | |
625 | ||
626 | return 0; | |
627 | } | |
628 | ||
629 | static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) | |
630 | { | |
631 | if (!mlx5_debugfs_root) | |
632 | return; | |
633 | ||
634 | debugfs_remove_recursive(dev->cache.root); | |
635 | } | |
636 | ||
746b5583 EC |
637 | static void delay_time_func(unsigned long ctx) |
638 | { | |
639 | struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx; | |
640 | ||
641 | dev->fill_delay = 0; | |
642 | } | |
643 | ||
e126ba97 EC |
644 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) |
645 | { | |
646 | struct mlx5_mr_cache *cache = &dev->cache; | |
647 | struct mlx5_cache_ent *ent; | |
e126ba97 EC |
648 | int err; |
649 | int i; | |
650 | ||
6bc1a656 | 651 | mutex_init(&dev->slow_path_mutex); |
3c856c82 | 652 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); |
e126ba97 EC |
653 | if (!cache->wq) { |
654 | mlx5_ib_warn(dev, "failed to create work queue\n"); | |
655 | return -ENOMEM; | |
656 | } | |
657 | ||
746b5583 | 658 | setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev); |
e126ba97 | 659 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { |
e126ba97 EC |
660 | ent = &cache->ent[i]; |
661 | INIT_LIST_HEAD(&ent->head); | |
662 | spin_lock_init(&ent->lock); | |
663 | ent->order = i + 2; | |
664 | ent->dev = dev; | |
49780d42 | 665 | ent->limit = 0; |
e126ba97 | 666 | |
49780d42 | 667 | init_completion(&ent->compl); |
e126ba97 EC |
668 | INIT_WORK(&ent->work, cache_work_func); |
669 | INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); | |
e126ba97 | 670 | queue_work(cache->wq, &ent->work); |
49780d42 AK |
671 | |
672 | if (i > MAX_UMR_CACHE_ENTRY) | |
673 | continue; | |
674 | ||
675 | if (!use_umr(dev, ent->order)) | |
676 | continue; | |
677 | ||
678 | ent->page = PAGE_SHIFT; | |
679 | ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / | |
680 | MLX5_IB_UMR_OCTOWORD; | |
681 | ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; | |
682 | if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && | |
683 | mlx5_core_is_pf(dev->mdev)) | |
684 | ent->limit = dev->mdev->profile->mr_cache[i].limit; | |
685 | else | |
686 | ent->limit = 0; | |
e126ba97 EC |
687 | } |
688 | ||
689 | err = mlx5_mr_cache_debugfs_init(dev); | |
690 | if (err) | |
691 | mlx5_ib_warn(dev, "cache debugfs failure\n"); | |
692 | ||
693 | return 0; | |
694 | } | |
695 | ||
acbda523 EC |
696 | static void wait_for_async_commands(struct mlx5_ib_dev *dev) |
697 | { | |
698 | struct mlx5_mr_cache *cache = &dev->cache; | |
699 | struct mlx5_cache_ent *ent; | |
700 | int total = 0; | |
701 | int i; | |
702 | int j; | |
703 | ||
704 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
705 | ent = &cache->ent[i]; | |
706 | for (j = 0 ; j < 1000; j++) { | |
707 | if (!ent->pending) | |
708 | break; | |
709 | msleep(50); | |
710 | } | |
711 | } | |
712 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
713 | ent = &cache->ent[i]; | |
714 | total += ent->pending; | |
715 | } | |
716 | ||
717 | if (total) | |
718 | mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total); | |
719 | else | |
720 | mlx5_ib_warn(dev, "done with all pending requests\n"); | |
721 | } | |
722 | ||
e126ba97 EC |
723 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) |
724 | { | |
725 | int i; | |
726 | ||
727 | dev->cache.stopped = 1; | |
3c461911 | 728 | flush_workqueue(dev->cache.wq); |
e126ba97 EC |
729 | |
730 | mlx5_mr_cache_debugfs_cleanup(dev); | |
731 | ||
732 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) | |
733 | clean_keys(dev, i); | |
734 | ||
3c461911 | 735 | destroy_workqueue(dev->cache.wq); |
acbda523 | 736 | wait_for_async_commands(dev); |
746b5583 | 737 | del_timer_sync(&dev->delay_timer); |
3c461911 | 738 | |
e126ba97 EC |
739 | return 0; |
740 | } | |
741 | ||
742 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) | |
743 | { | |
744 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
ec22eb53 | 745 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
9603b61d | 746 | struct mlx5_core_dev *mdev = dev->mdev; |
e126ba97 | 747 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
748 | void *mkc; |
749 | u32 *in; | |
e126ba97 EC |
750 | int err; |
751 | ||
752 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
753 | if (!mr) | |
754 | return ERR_PTR(-ENOMEM); | |
755 | ||
ec22eb53 | 756 | in = kzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
757 | if (!in) { |
758 | err = -ENOMEM; | |
759 | goto err_free; | |
760 | } | |
761 | ||
ec22eb53 SM |
762 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
763 | ||
764 | MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA); | |
765 | MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); | |
766 | MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); | |
767 | MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); | |
768 | MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); | |
769 | MLX5_SET(mkc, mkc, lr, 1); | |
e126ba97 | 770 | |
ec22eb53 SM |
771 | MLX5_SET(mkc, mkc, length64, 1); |
772 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
773 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
774 | MLX5_SET64(mkc, mkc, start_addr, 0); | |
775 | ||
776 | err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); | |
e126ba97 EC |
777 | if (err) |
778 | goto err_in; | |
779 | ||
780 | kfree(in); | |
aa8e08d2 | 781 | mr->mmkey.type = MLX5_MKEY_MR; |
a606b0f6 MB |
782 | mr->ibmr.lkey = mr->mmkey.key; |
783 | mr->ibmr.rkey = mr->mmkey.key; | |
e126ba97 EC |
784 | mr->umem = NULL; |
785 | ||
786 | return &mr->ibmr; | |
787 | ||
788 | err_in: | |
789 | kfree(in); | |
790 | ||
791 | err_free: | |
792 | kfree(mr); | |
793 | ||
794 | return ERR_PTR(err); | |
795 | } | |
796 | ||
797 | static int get_octo_len(u64 addr, u64 len, int page_size) | |
798 | { | |
799 | u64 offset; | |
800 | int npages; | |
801 | ||
802 | offset = addr & (page_size - 1); | |
803 | npages = ALIGN(len + offset, page_size) >> ilog2(page_size); | |
804 | return (npages + 1) / 2; | |
805 | } | |
806 | ||
7d0cc6ed | 807 | static int use_umr(struct mlx5_ib_dev *dev, int order) |
e126ba97 | 808 | { |
7d0cc6ed | 809 | if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) |
49780d42 | 810 | return order <= MAX_UMR_CACHE_ENTRY + 2; |
cc149f75 | 811 | return order <= MLX5_MAX_UMR_SHIFT; |
e126ba97 EC |
812 | } |
813 | ||
14ab8896 AB |
814 | static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, |
815 | int access_flags, struct ib_umem **umem, | |
816 | int *npages, int *page_shift, int *ncont, | |
817 | int *order) | |
395a8e4c NO |
818 | { |
819 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
14ab8896 AB |
820 | int err; |
821 | ||
822 | *umem = ib_umem_get(pd->uobject->context, start, length, | |
823 | access_flags, 0); | |
824 | err = PTR_ERR_OR_ZERO(*umem); | |
825 | if (err < 0) { | |
395a8e4c | 826 | mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); |
14ab8896 | 827 | return err; |
395a8e4c NO |
828 | } |
829 | ||
14ab8896 | 830 | mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, |
762f899a | 831 | page_shift, ncont, order); |
395a8e4c NO |
832 | if (!*npages) { |
833 | mlx5_ib_warn(dev, "avoid zero region\n"); | |
14ab8896 AB |
834 | ib_umem_release(*umem); |
835 | return -EINVAL; | |
395a8e4c NO |
836 | } |
837 | ||
838 | mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", | |
839 | *npages, *ncont, *order, *page_shift); | |
840 | ||
14ab8896 | 841 | return 0; |
395a8e4c NO |
842 | } |
843 | ||
add08d76 | 844 | static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) |
e126ba97 | 845 | { |
add08d76 CH |
846 | struct mlx5_ib_umr_context *context = |
847 | container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); | |
e126ba97 | 848 | |
add08d76 CH |
849 | context->status = wc->status; |
850 | complete(&context->done); | |
851 | } | |
e126ba97 | 852 | |
add08d76 CH |
853 | static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) |
854 | { | |
855 | context->cqe.done = mlx5_ib_umr_done; | |
856 | context->status = -1; | |
857 | init_completion(&context->done); | |
e126ba97 EC |
858 | } |
859 | ||
d5ea2df9 BJ |
860 | static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, |
861 | struct mlx5_umr_wr *umrwr) | |
862 | { | |
863 | struct umr_common *umrc = &dev->umrc; | |
864 | struct ib_send_wr *bad; | |
865 | int err; | |
866 | struct mlx5_ib_umr_context umr_context; | |
867 | ||
868 | mlx5_ib_init_umr_context(&umr_context); | |
869 | umrwr->wr.wr_cqe = &umr_context.cqe; | |
870 | ||
871 | down(&umrc->sem); | |
872 | err = ib_post_send(umrc->qp, &umrwr->wr, &bad); | |
873 | if (err) { | |
874 | mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); | |
875 | } else { | |
876 | wait_for_completion(&umr_context.done); | |
877 | if (umr_context.status != IB_WC_SUCCESS) { | |
878 | mlx5_ib_warn(dev, "reg umr failed (%u)\n", | |
879 | umr_context.status); | |
880 | err = -EFAULT; | |
881 | } | |
882 | } | |
883 | up(&umrc->sem); | |
884 | return err; | |
885 | } | |
886 | ||
e126ba97 EC |
887 | static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, |
888 | u64 virt_addr, u64 len, int npages, | |
889 | int page_shift, int order, int access_flags) | |
890 | { | |
891 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
e126ba97 | 892 | struct mlx5_ib_mr *mr; |
096f7e72 | 893 | int err = 0; |
e126ba97 EC |
894 | int i; |
895 | ||
746b5583 | 896 | for (i = 0; i < 1; i++) { |
e126ba97 EC |
897 | mr = alloc_cached_mr(dev, order); |
898 | if (mr) | |
899 | break; | |
900 | ||
901 | err = add_keys(dev, order2idx(dev, order), 1); | |
746b5583 EC |
902 | if (err && err != -EAGAIN) { |
903 | mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); | |
e126ba97 EC |
904 | break; |
905 | } | |
906 | } | |
907 | ||
908 | if (!mr) | |
909 | return ERR_PTR(-EAGAIN); | |
910 | ||
7d0cc6ed AK |
911 | mr->ibmr.pd = pd; |
912 | mr->umem = umem; | |
913 | mr->access_flags = access_flags; | |
914 | mr->desc_size = sizeof(struct mlx5_mtt); | |
a606b0f6 MB |
915 | mr->mmkey.iova = virt_addr; |
916 | mr->mmkey.size = len; | |
917 | mr->mmkey.pd = to_mpd(pd)->pdn; | |
b475598a | 918 | |
7d0cc6ed AK |
919 | err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, |
920 | MLX5_IB_UPD_XLT_ENABLE); | |
096f7e72 | 921 | |
096f7e72 | 922 | if (err) { |
49780d42 | 923 | mlx5_mr_cache_free(dev, mr); |
096f7e72 | 924 | return ERR_PTR(err); |
e126ba97 EC |
925 | } |
926 | ||
7d0cc6ed AK |
927 | mr->live = 1; |
928 | ||
e126ba97 | 929 | return mr; |
e126ba97 EC |
930 | } |
931 | ||
7d0cc6ed AK |
932 | static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, |
933 | void *xlt, int page_shift, size_t size, | |
934 | int flags) | |
832a6b06 HE |
935 | { |
936 | struct mlx5_ib_dev *dev = mr->dev; | |
832a6b06 | 937 | struct ib_umem *umem = mr->umem; |
7d0cc6ed AK |
938 | |
939 | npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); | |
940 | ||
941 | if (!(flags & MLX5_IB_UPD_XLT_ZAP)) { | |
942 | __mlx5_ib_populate_pas(dev, umem, page_shift, | |
943 | idx, npages, xlt, | |
944 | MLX5_IB_MTT_PRESENT); | |
945 | /* Clear padding after the pages | |
946 | * brought from the umem. | |
947 | */ | |
948 | memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, | |
949 | size - npages * sizeof(struct mlx5_mtt)); | |
950 | } | |
951 | ||
952 | return npages; | |
953 | } | |
954 | ||
955 | #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ | |
956 | MLX5_UMR_MTT_ALIGNMENT) | |
957 | #define MLX5_SPARE_UMR_CHUNK 0x10000 | |
958 | ||
959 | int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, | |
960 | int page_shift, int flags) | |
961 | { | |
962 | struct mlx5_ib_dev *dev = mr->dev; | |
963 | struct device *ddev = dev->ib_dev.dma_device; | |
964 | struct mlx5_ib_ucontext *uctx = NULL; | |
832a6b06 | 965 | int size; |
7d0cc6ed | 966 | void *xlt; |
832a6b06 | 967 | dma_addr_t dma; |
e622f2f4 | 968 | struct mlx5_umr_wr wr; |
832a6b06 HE |
969 | struct ib_sge sg; |
970 | int err = 0; | |
7d0cc6ed AK |
971 | int desc_size = sizeof(struct mlx5_mtt); |
972 | const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; | |
973 | const int page_mask = page_align - 1; | |
832a6b06 HE |
974 | size_t pages_mapped = 0; |
975 | size_t pages_to_map = 0; | |
976 | size_t pages_iter = 0; | |
7d0cc6ed | 977 | gfp_t gfp; |
832a6b06 HE |
978 | |
979 | /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, | |
7d0cc6ed AK |
980 | * so we need to align the offset and length accordingly |
981 | */ | |
982 | if (idx & page_mask) { | |
983 | npages += idx & page_mask; | |
984 | idx &= ~page_mask; | |
832a6b06 HE |
985 | } |
986 | ||
7d0cc6ed AK |
987 | gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; |
988 | gfp |= __GFP_ZERO | __GFP_NOWARN; | |
832a6b06 | 989 | |
7d0cc6ed AK |
990 | pages_to_map = ALIGN(npages, page_align); |
991 | size = desc_size * pages_to_map; | |
992 | size = min_t(int, size, MLX5_MAX_UMR_CHUNK); | |
832a6b06 | 993 | |
7d0cc6ed AK |
994 | xlt = (void *)__get_free_pages(gfp, get_order(size)); |
995 | if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { | |
996 | mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", | |
997 | size, get_order(size), MLX5_SPARE_UMR_CHUNK); | |
998 | ||
999 | size = MLX5_SPARE_UMR_CHUNK; | |
1000 | xlt = (void *)__get_free_pages(gfp, get_order(size)); | |
832a6b06 | 1001 | } |
7d0cc6ed AK |
1002 | |
1003 | if (!xlt) { | |
1004 | uctx = to_mucontext(mr->ibmr.uobject->context); | |
1005 | mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); | |
1006 | size = PAGE_SIZE; | |
1007 | xlt = (void *)uctx->upd_xlt_page; | |
1008 | mutex_lock(&uctx->upd_xlt_page_mutex); | |
1009 | memset(xlt, 0, size); | |
1010 | } | |
1011 | pages_iter = size / desc_size; | |
1012 | dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); | |
832a6b06 | 1013 | if (dma_mapping_error(ddev, dma)) { |
7d0cc6ed | 1014 | mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); |
832a6b06 | 1015 | err = -ENOMEM; |
7d0cc6ed | 1016 | goto free_xlt; |
832a6b06 HE |
1017 | } |
1018 | ||
7d0cc6ed AK |
1019 | sg.addr = dma; |
1020 | sg.lkey = dev->umrc.pd->local_dma_lkey; | |
1021 | ||
1022 | memset(&wr, 0, sizeof(wr)); | |
1023 | wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; | |
1024 | if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) | |
1025 | wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; | |
1026 | wr.wr.sg_list = &sg; | |
1027 | wr.wr.num_sge = 1; | |
1028 | wr.wr.opcode = MLX5_IB_WR_UMR; | |
1029 | ||
1030 | wr.pd = mr->ibmr.pd; | |
1031 | wr.mkey = mr->mmkey.key; | |
1032 | wr.length = mr->mmkey.size; | |
1033 | wr.virt_addr = mr->mmkey.iova; | |
1034 | wr.access_flags = mr->access_flags; | |
1035 | wr.page_shift = page_shift; | |
1036 | ||
832a6b06 HE |
1037 | for (pages_mapped = 0; |
1038 | pages_mapped < pages_to_map && !err; | |
7d0cc6ed | 1039 | pages_mapped += pages_iter, idx += pages_iter) { |
832a6b06 | 1040 | dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); |
7d0cc6ed AK |
1041 | npages = populate_xlt(mr, idx, pages_iter, xlt, |
1042 | page_shift, size, flags); | |
832a6b06 HE |
1043 | |
1044 | dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); | |
1045 | ||
7d0cc6ed AK |
1046 | sg.length = ALIGN(npages * desc_size, |
1047 | MLX5_UMR_MTT_ALIGNMENT); | |
1048 | ||
1049 | if (pages_mapped + pages_iter >= pages_to_map) { | |
1050 | if (flags & MLX5_IB_UPD_XLT_ENABLE) | |
1051 | wr.wr.send_flags |= | |
1052 | MLX5_IB_SEND_UMR_ENABLE_MR | | |
1053 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | | |
1054 | MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; | |
1055 | if (flags & MLX5_IB_UPD_XLT_PD || | |
1056 | flags & MLX5_IB_UPD_XLT_ACCESS) | |
1057 | wr.wr.send_flags |= | |
1058 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; | |
1059 | if (flags & MLX5_IB_UPD_XLT_ADDR) | |
1060 | wr.wr.send_flags |= | |
1061 | MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; | |
1062 | } | |
832a6b06 | 1063 | |
7d0cc6ed | 1064 | wr.offset = idx * desc_size; |
31616255 | 1065 | wr.xlt_size = sg.length; |
832a6b06 | 1066 | |
d5ea2df9 | 1067 | err = mlx5_ib_post_send_wait(dev, &wr); |
832a6b06 HE |
1068 | } |
1069 | dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); | |
1070 | ||
7d0cc6ed AK |
1071 | free_xlt: |
1072 | if (uctx) | |
1073 | mutex_unlock(&uctx->upd_xlt_page_mutex); | |
832a6b06 | 1074 | else |
7d0cc6ed | 1075 | free_pages((unsigned long)xlt, get_order(size)); |
832a6b06 HE |
1076 | |
1077 | return err; | |
1078 | } | |
832a6b06 | 1079 | |
395a8e4c NO |
1080 | /* |
1081 | * If ibmr is NULL it will be allocated by reg_create. | |
1082 | * Else, the given ibmr will be used. | |
1083 | */ | |
1084 | static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, | |
1085 | u64 virt_addr, u64 length, | |
1086 | struct ib_umem *umem, int npages, | |
1087 | int page_shift, int access_flags) | |
e126ba97 EC |
1088 | { |
1089 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
e126ba97 | 1090 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
1091 | __be64 *pas; |
1092 | void *mkc; | |
e126ba97 | 1093 | int inlen; |
ec22eb53 | 1094 | u32 *in; |
e126ba97 | 1095 | int err; |
938fe83c | 1096 | bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); |
e126ba97 | 1097 | |
395a8e4c | 1098 | mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); |
e126ba97 EC |
1099 | if (!mr) |
1100 | return ERR_PTR(-ENOMEM); | |
1101 | ||
ec22eb53 SM |
1102 | inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + |
1103 | sizeof(*pas) * ((npages + 1) / 2) * 2; | |
e126ba97 EC |
1104 | in = mlx5_vzalloc(inlen); |
1105 | if (!in) { | |
1106 | err = -ENOMEM; | |
1107 | goto err_1; | |
1108 | } | |
ec22eb53 | 1109 | pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); |
c438fde1 AK |
1110 | if (!(access_flags & IB_ACCESS_ON_DEMAND)) |
1111 | mlx5_ib_populate_pas(dev, umem, page_shift, pas, | |
1112 | pg_cap ? MLX5_IB_MTT_PRESENT : 0); | |
e126ba97 | 1113 | |
ec22eb53 | 1114 | /* The pg_access bit allows setting the access flags |
cc149f75 | 1115 | * in the page list submitted with the command. */ |
ec22eb53 SM |
1116 | MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); |
1117 | ||
1118 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
1119 | MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); | |
1120 | MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); | |
1121 | MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); | |
1122 | MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); | |
1123 | MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); | |
1124 | MLX5_SET(mkc, mkc, lr, 1); | |
1125 | ||
1126 | MLX5_SET64(mkc, mkc, start_addr, virt_addr); | |
1127 | MLX5_SET64(mkc, mkc, len, length); | |
1128 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
1129 | MLX5_SET(mkc, mkc, bsf_octword_size, 0); | |
1130 | MLX5_SET(mkc, mkc, translations_octword_size, | |
1131 | get_octo_len(virt_addr, length, 1 << page_shift)); | |
1132 | MLX5_SET(mkc, mkc, log_page_size, page_shift); | |
1133 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
1134 | MLX5_SET(create_mkey_in, in, translations_octword_actual_size, | |
1135 | get_octo_len(virt_addr, length, 1 << page_shift)); | |
1136 | ||
1137 | err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); | |
e126ba97 EC |
1138 | if (err) { |
1139 | mlx5_ib_warn(dev, "create mkey failed\n"); | |
1140 | goto err_2; | |
1141 | } | |
aa8e08d2 | 1142 | mr->mmkey.type = MLX5_MKEY_MR; |
49780d42 | 1143 | mr->desc_size = sizeof(struct mlx5_mtt); |
e126ba97 | 1144 | mr->umem = umem; |
7eae20db | 1145 | mr->dev = dev; |
b4cfe447 | 1146 | mr->live = 1; |
479163f4 | 1147 | kvfree(in); |
e126ba97 | 1148 | |
a606b0f6 | 1149 | mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); |
e126ba97 EC |
1150 | |
1151 | return mr; | |
1152 | ||
1153 | err_2: | |
479163f4 | 1154 | kvfree(in); |
e126ba97 EC |
1155 | |
1156 | err_1: | |
395a8e4c NO |
1157 | if (!ibmr) |
1158 | kfree(mr); | |
e126ba97 EC |
1159 | |
1160 | return ERR_PTR(err); | |
1161 | } | |
1162 | ||
395a8e4c NO |
1163 | static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, |
1164 | int npages, u64 length, int access_flags) | |
1165 | { | |
1166 | mr->npages = npages; | |
1167 | atomic_add(npages, &dev->mdev->priv.reg_pages); | |
a606b0f6 MB |
1168 | mr->ibmr.lkey = mr->mmkey.key; |
1169 | mr->ibmr.rkey = mr->mmkey.key; | |
395a8e4c | 1170 | mr->ibmr.length = length; |
56e11d62 | 1171 | mr->access_flags = access_flags; |
395a8e4c NO |
1172 | } |
1173 | ||
e126ba97 EC |
1174 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
1175 | u64 virt_addr, int access_flags, | |
1176 | struct ib_udata *udata) | |
1177 | { | |
1178 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1179 | struct mlx5_ib_mr *mr = NULL; | |
1180 | struct ib_umem *umem; | |
1181 | int page_shift; | |
1182 | int npages; | |
1183 | int ncont; | |
1184 | int order; | |
1185 | int err; | |
1186 | ||
900a6d79 EC |
1187 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", |
1188 | start, virt_addr, length, access_flags); | |
14ab8896 | 1189 | err = mr_umem_get(pd, start, length, access_flags, &umem, &npages, |
395a8e4c | 1190 | &page_shift, &ncont, &order); |
e126ba97 | 1191 | |
14ab8896 AB |
1192 | if (err < 0) |
1193 | return ERR_PTR(err); | |
e126ba97 | 1194 | |
7d0cc6ed | 1195 | if (use_umr(dev, order)) { |
e126ba97 EC |
1196 | mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, |
1197 | order, access_flags); | |
1198 | if (PTR_ERR(mr) == -EAGAIN) { | |
1199 | mlx5_ib_dbg(dev, "cache empty for order %d", order); | |
1200 | mr = NULL; | |
1201 | } | |
c438fde1 AK |
1202 | } else if (access_flags & IB_ACCESS_ON_DEMAND && |
1203 | !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { | |
6aec21f6 HE |
1204 | err = -EINVAL; |
1205 | pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB"); | |
1206 | goto error; | |
e126ba97 EC |
1207 | } |
1208 | ||
6bc1a656 ML |
1209 | if (!mr) { |
1210 | mutex_lock(&dev->slow_path_mutex); | |
395a8e4c NO |
1211 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, |
1212 | page_shift, access_flags); | |
6bc1a656 ML |
1213 | mutex_unlock(&dev->slow_path_mutex); |
1214 | } | |
e126ba97 EC |
1215 | |
1216 | if (IS_ERR(mr)) { | |
1217 | err = PTR_ERR(mr); | |
1218 | goto error; | |
1219 | } | |
1220 | ||
a606b0f6 | 1221 | mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); |
e126ba97 EC |
1222 | |
1223 | mr->umem = umem; | |
395a8e4c | 1224 | set_mr_fileds(dev, mr, npages, length, access_flags); |
e126ba97 | 1225 | |
b4cfe447 | 1226 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
395a8e4c | 1227 | update_odp_mr(mr); |
b4cfe447 HE |
1228 | #endif |
1229 | ||
e126ba97 EC |
1230 | return &mr->ibmr; |
1231 | ||
1232 | error: | |
1233 | ib_umem_release(umem); | |
1234 | return ERR_PTR(err); | |
1235 | } | |
1236 | ||
1237 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |
1238 | { | |
89ea94a7 | 1239 | struct mlx5_core_dev *mdev = dev->mdev; |
0025b0bd | 1240 | struct mlx5_umr_wr umrwr = {}; |
e126ba97 | 1241 | |
89ea94a7 MG |
1242 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
1243 | return 0; | |
1244 | ||
7d0cc6ed AK |
1245 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | |
1246 | MLX5_IB_SEND_UMR_FAIL_IF_FREE; | |
1247 | umrwr.wr.opcode = MLX5_IB_WR_UMR; | |
1248 | umrwr.mkey = mr->mmkey.key; | |
e126ba97 | 1249 | |
d5ea2df9 | 1250 | return mlx5_ib_post_send_wait(dev, &umrwr); |
e126ba97 EC |
1251 | } |
1252 | ||
7d0cc6ed | 1253 | static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, |
56e11d62 NO |
1254 | int access_flags, int flags) |
1255 | { | |
1256 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
56e11d62 | 1257 | struct mlx5_umr_wr umrwr = {}; |
56e11d62 NO |
1258 | int err; |
1259 | ||
56e11d62 NO |
1260 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; |
1261 | ||
7d0cc6ed AK |
1262 | umrwr.wr.opcode = MLX5_IB_WR_UMR; |
1263 | umrwr.mkey = mr->mmkey.key; | |
56e11d62 | 1264 | |
31616255 | 1265 | if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { |
56e11d62 | 1266 | umrwr.pd = pd; |
56e11d62 | 1267 | umrwr.access_flags = access_flags; |
31616255 | 1268 | umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; |
56e11d62 NO |
1269 | } |
1270 | ||
d5ea2df9 | 1271 | err = mlx5_ib_post_send_wait(dev, &umrwr); |
56e11d62 | 1272 | |
56e11d62 NO |
1273 | return err; |
1274 | } | |
1275 | ||
1276 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |
1277 | u64 length, u64 virt_addr, int new_access_flags, | |
1278 | struct ib_pd *new_pd, struct ib_udata *udata) | |
1279 | { | |
1280 | struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); | |
1281 | struct mlx5_ib_mr *mr = to_mmr(ib_mr); | |
1282 | struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; | |
1283 | int access_flags = flags & IB_MR_REREG_ACCESS ? | |
1284 | new_access_flags : | |
1285 | mr->access_flags; | |
1286 | u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; | |
1287 | u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; | |
1288 | int page_shift = 0; | |
7d0cc6ed | 1289 | int upd_flags = 0; |
56e11d62 NO |
1290 | int npages = 0; |
1291 | int ncont = 0; | |
1292 | int order = 0; | |
1293 | int err; | |
1294 | ||
1295 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", | |
1296 | start, virt_addr, length, access_flags); | |
1297 | ||
7d0cc6ed AK |
1298 | atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); |
1299 | ||
56e11d62 NO |
1300 | if (flags != IB_MR_REREG_PD) { |
1301 | /* | |
1302 | * Replace umem. This needs to be done whether or not UMR is | |
1303 | * used. | |
1304 | */ | |
1305 | flags |= IB_MR_REREG_TRANS; | |
1306 | ib_umem_release(mr->umem); | |
14ab8896 AB |
1307 | err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, |
1308 | &npages, &page_shift, &ncont, &order); | |
1309 | if (err < 0) { | |
7d0cc6ed | 1310 | clean_mr(mr); |
56e11d62 NO |
1311 | return err; |
1312 | } | |
1313 | } | |
1314 | ||
1315 | if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { | |
1316 | /* | |
1317 | * UMR can't be used - MKey needs to be replaced. | |
1318 | */ | |
1319 | if (mr->umred) { | |
1320 | err = unreg_umr(dev, mr); | |
1321 | if (err) | |
1322 | mlx5_ib_warn(dev, "Failed to unregister MR\n"); | |
1323 | } else { | |
1324 | err = destroy_mkey(dev, mr); | |
1325 | if (err) | |
1326 | mlx5_ib_warn(dev, "Failed to destroy MKey\n"); | |
1327 | } | |
1328 | if (err) | |
1329 | return err; | |
1330 | ||
1331 | mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, | |
1332 | page_shift, access_flags); | |
1333 | ||
1334 | if (IS_ERR(mr)) | |
1335 | return PTR_ERR(mr); | |
1336 | ||
1337 | mr->umred = 0; | |
1338 | } else { | |
1339 | /* | |
1340 | * Send a UMR WQE | |
1341 | */ | |
7d0cc6ed AK |
1342 | mr->ibmr.pd = pd; |
1343 | mr->access_flags = access_flags; | |
1344 | mr->mmkey.iova = addr; | |
1345 | mr->mmkey.size = len; | |
1346 | mr->mmkey.pd = to_mpd(pd)->pdn; | |
1347 | ||
1348 | if (flags & IB_MR_REREG_TRANS) { | |
1349 | upd_flags = MLX5_IB_UPD_XLT_ADDR; | |
1350 | if (flags & IB_MR_REREG_PD) | |
1351 | upd_flags |= MLX5_IB_UPD_XLT_PD; | |
1352 | if (flags & IB_MR_REREG_ACCESS) | |
1353 | upd_flags |= MLX5_IB_UPD_XLT_ACCESS; | |
1354 | err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, | |
1355 | upd_flags); | |
1356 | } else { | |
1357 | err = rereg_umr(pd, mr, access_flags, flags); | |
1358 | } | |
1359 | ||
56e11d62 NO |
1360 | if (err) { |
1361 | mlx5_ib_warn(dev, "Failed to rereg UMR\n"); | |
7d0cc6ed AK |
1362 | ib_umem_release(mr->umem); |
1363 | clean_mr(mr); | |
56e11d62 NO |
1364 | return err; |
1365 | } | |
1366 | } | |
1367 | ||
7d0cc6ed | 1368 | set_mr_fileds(dev, mr, npages, len, access_flags); |
56e11d62 | 1369 | |
56e11d62 NO |
1370 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
1371 | update_odp_mr(mr); | |
1372 | #endif | |
56e11d62 NO |
1373 | return 0; |
1374 | } | |
1375 | ||
8a187ee5 SG |
1376 | static int |
1377 | mlx5_alloc_priv_descs(struct ib_device *device, | |
1378 | struct mlx5_ib_mr *mr, | |
1379 | int ndescs, | |
1380 | int desc_size) | |
1381 | { | |
1382 | int size = ndescs * desc_size; | |
1383 | int add_size; | |
1384 | int ret; | |
1385 | ||
1386 | add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | |
1387 | ||
1388 | mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); | |
1389 | if (!mr->descs_alloc) | |
1390 | return -ENOMEM; | |
1391 | ||
1392 | mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); | |
1393 | ||
1394 | mr->desc_map = dma_map_single(device->dma_device, mr->descs, | |
1395 | size, DMA_TO_DEVICE); | |
1396 | if (dma_mapping_error(device->dma_device, mr->desc_map)) { | |
1397 | ret = -ENOMEM; | |
1398 | goto err; | |
1399 | } | |
1400 | ||
1401 | return 0; | |
1402 | err: | |
1403 | kfree(mr->descs_alloc); | |
1404 | ||
1405 | return ret; | |
1406 | } | |
1407 | ||
1408 | static void | |
1409 | mlx5_free_priv_descs(struct mlx5_ib_mr *mr) | |
1410 | { | |
1411 | if (mr->descs) { | |
1412 | struct ib_device *device = mr->ibmr.device; | |
1413 | int size = mr->max_descs * mr->desc_size; | |
1414 | ||
1415 | dma_unmap_single(device->dma_device, mr->desc_map, | |
1416 | size, DMA_TO_DEVICE); | |
1417 | kfree(mr->descs_alloc); | |
1418 | mr->descs = NULL; | |
1419 | } | |
1420 | } | |
1421 | ||
6aec21f6 | 1422 | static int clean_mr(struct mlx5_ib_mr *mr) |
e126ba97 | 1423 | { |
6aec21f6 | 1424 | struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); |
e126ba97 EC |
1425 | int umred = mr->umred; |
1426 | int err; | |
1427 | ||
8b91ffc1 SG |
1428 | if (mr->sig) { |
1429 | if (mlx5_core_destroy_psv(dev->mdev, | |
1430 | mr->sig->psv_memory.psv_idx)) | |
1431 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | |
1432 | mr->sig->psv_memory.psv_idx); | |
1433 | if (mlx5_core_destroy_psv(dev->mdev, | |
1434 | mr->sig->psv_wire.psv_idx)) | |
1435 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | |
1436 | mr->sig->psv_wire.psv_idx); | |
1437 | kfree(mr->sig); | |
1438 | mr->sig = NULL; | |
1439 | } | |
1440 | ||
8a187ee5 SG |
1441 | mlx5_free_priv_descs(mr); |
1442 | ||
e126ba97 | 1443 | if (!umred) { |
b4cfe447 | 1444 | err = destroy_mkey(dev, mr); |
e126ba97 EC |
1445 | if (err) { |
1446 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", | |
a606b0f6 | 1447 | mr->mmkey.key, err); |
e126ba97 EC |
1448 | return err; |
1449 | } | |
1450 | } else { | |
49780d42 | 1451 | mlx5_mr_cache_free(dev, mr); |
e126ba97 EC |
1452 | } |
1453 | ||
6aec21f6 HE |
1454 | if (!umred) |
1455 | kfree(mr); | |
1456 | ||
1457 | return 0; | |
1458 | } | |
1459 | ||
1460 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | |
1461 | { | |
1462 | struct mlx5_ib_dev *dev = to_mdev(ibmr->device); | |
1463 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
1464 | int npages = mr->npages; | |
1465 | struct ib_umem *umem = mr->umem; | |
1466 | ||
1467 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | |
b4cfe447 HE |
1468 | if (umem && umem->odp_data) { |
1469 | /* Prevent new page faults from succeeding */ | |
1470 | mr->live = 0; | |
6aec21f6 HE |
1471 | /* Wait for all running page-fault handlers to finish. */ |
1472 | synchronize_srcu(&dev->mr_srcu); | |
b4cfe447 HE |
1473 | /* Destroy all page mappings */ |
1474 | mlx5_ib_invalidate_range(umem, ib_umem_start(umem), | |
1475 | ib_umem_end(umem)); | |
1476 | /* | |
1477 | * We kill the umem before the MR for ODP, | |
1478 | * so that there will not be any invalidations in | |
1479 | * flight, looking at the *mr struct. | |
1480 | */ | |
1481 | ib_umem_release(umem); | |
1482 | atomic_sub(npages, &dev->mdev->priv.reg_pages); | |
1483 | ||
1484 | /* Avoid double-freeing the umem. */ | |
1485 | umem = NULL; | |
1486 | } | |
6aec21f6 HE |
1487 | #endif |
1488 | ||
1489 | clean_mr(mr); | |
1490 | ||
e126ba97 EC |
1491 | if (umem) { |
1492 | ib_umem_release(umem); | |
6aec21f6 | 1493 | atomic_sub(npages, &dev->mdev->priv.reg_pages); |
e126ba97 EC |
1494 | } |
1495 | ||
e126ba97 EC |
1496 | return 0; |
1497 | } | |
1498 | ||
9bee178b SG |
1499 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, |
1500 | enum ib_mr_type mr_type, | |
1501 | u32 max_num_sg) | |
3121e3c4 SG |
1502 | { |
1503 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
ec22eb53 | 1504 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
b005d316 | 1505 | int ndescs = ALIGN(max_num_sg, 4); |
ec22eb53 SM |
1506 | struct mlx5_ib_mr *mr; |
1507 | void *mkc; | |
1508 | u32 *in; | |
b005d316 | 1509 | int err; |
3121e3c4 SG |
1510 | |
1511 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
1512 | if (!mr) | |
1513 | return ERR_PTR(-ENOMEM); | |
1514 | ||
ec22eb53 | 1515 | in = kzalloc(inlen, GFP_KERNEL); |
3121e3c4 SG |
1516 | if (!in) { |
1517 | err = -ENOMEM; | |
1518 | goto err_free; | |
1519 | } | |
1520 | ||
ec22eb53 SM |
1521 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
1522 | MLX5_SET(mkc, mkc, free, 1); | |
1523 | MLX5_SET(mkc, mkc, translations_octword_size, ndescs); | |
1524 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
1525 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
3121e3c4 | 1526 | |
9bee178b | 1527 | if (mr_type == IB_MR_TYPE_MEM_REG) { |
ec22eb53 SM |
1528 | mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT; |
1529 | MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); | |
8a187ee5 | 1530 | err = mlx5_alloc_priv_descs(pd->device, mr, |
31616255 | 1531 | ndescs, sizeof(struct mlx5_mtt)); |
8a187ee5 SG |
1532 | if (err) |
1533 | goto err_free_in; | |
1534 | ||
31616255 | 1535 | mr->desc_size = sizeof(struct mlx5_mtt); |
8a187ee5 | 1536 | mr->max_descs = ndescs; |
b005d316 | 1537 | } else if (mr_type == IB_MR_TYPE_SG_GAPS) { |
ec22eb53 | 1538 | mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; |
b005d316 SG |
1539 | |
1540 | err = mlx5_alloc_priv_descs(pd->device, mr, | |
1541 | ndescs, sizeof(struct mlx5_klm)); | |
1542 | if (err) | |
1543 | goto err_free_in; | |
1544 | mr->desc_size = sizeof(struct mlx5_klm); | |
1545 | mr->max_descs = ndescs; | |
9bee178b | 1546 | } else if (mr_type == IB_MR_TYPE_SIGNATURE) { |
3121e3c4 SG |
1547 | u32 psv_index[2]; |
1548 | ||
ec22eb53 SM |
1549 | MLX5_SET(mkc, mkc, bsf_en, 1); |
1550 | MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); | |
3121e3c4 SG |
1551 | mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); |
1552 | if (!mr->sig) { | |
1553 | err = -ENOMEM; | |
1554 | goto err_free_in; | |
1555 | } | |
1556 | ||
1557 | /* create mem & wire PSVs */ | |
9603b61d | 1558 | err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, |
3121e3c4 SG |
1559 | 2, psv_index); |
1560 | if (err) | |
1561 | goto err_free_sig; | |
1562 | ||
ec22eb53 | 1563 | mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; |
3121e3c4 SG |
1564 | mr->sig->psv_memory.psv_idx = psv_index[0]; |
1565 | mr->sig->psv_wire.psv_idx = psv_index[1]; | |
d5436ba0 SG |
1566 | |
1567 | mr->sig->sig_status_checked = true; | |
1568 | mr->sig->sig_err_exists = false; | |
1569 | /* Next UMR, Arm SIGERR */ | |
1570 | ++mr->sig->sigerr_count; | |
9bee178b SG |
1571 | } else { |
1572 | mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); | |
1573 | err = -EINVAL; | |
1574 | goto err_free_in; | |
3121e3c4 SG |
1575 | } |
1576 | ||
ec22eb53 SM |
1577 | MLX5_SET(mkc, mkc, access_mode, mr->access_mode); |
1578 | MLX5_SET(mkc, mkc, umr_en, 1); | |
1579 | ||
1580 | err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); | |
3121e3c4 SG |
1581 | if (err) |
1582 | goto err_destroy_psv; | |
1583 | ||
aa8e08d2 | 1584 | mr->mmkey.type = MLX5_MKEY_MR; |
a606b0f6 MB |
1585 | mr->ibmr.lkey = mr->mmkey.key; |
1586 | mr->ibmr.rkey = mr->mmkey.key; | |
3121e3c4 SG |
1587 | mr->umem = NULL; |
1588 | kfree(in); | |
1589 | ||
1590 | return &mr->ibmr; | |
1591 | ||
1592 | err_destroy_psv: | |
1593 | if (mr->sig) { | |
9603b61d | 1594 | if (mlx5_core_destroy_psv(dev->mdev, |
3121e3c4 SG |
1595 | mr->sig->psv_memory.psv_idx)) |
1596 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | |
1597 | mr->sig->psv_memory.psv_idx); | |
9603b61d | 1598 | if (mlx5_core_destroy_psv(dev->mdev, |
3121e3c4 SG |
1599 | mr->sig->psv_wire.psv_idx)) |
1600 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | |
1601 | mr->sig->psv_wire.psv_idx); | |
1602 | } | |
8a187ee5 | 1603 | mlx5_free_priv_descs(mr); |
3121e3c4 SG |
1604 | err_free_sig: |
1605 | kfree(mr->sig); | |
1606 | err_free_in: | |
1607 | kfree(in); | |
1608 | err_free: | |
1609 | kfree(mr); | |
1610 | return ERR_PTR(err); | |
1611 | } | |
1612 | ||
d2370e0a MB |
1613 | struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
1614 | struct ib_udata *udata) | |
1615 | { | |
1616 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
ec22eb53 | 1617 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
d2370e0a | 1618 | struct mlx5_ib_mw *mw = NULL; |
ec22eb53 SM |
1619 | u32 *in = NULL; |
1620 | void *mkc; | |
d2370e0a MB |
1621 | int ndescs; |
1622 | int err; | |
1623 | struct mlx5_ib_alloc_mw req = {}; | |
1624 | struct { | |
1625 | __u32 comp_mask; | |
1626 | __u32 response_length; | |
1627 | } resp = {}; | |
1628 | ||
1629 | err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); | |
1630 | if (err) | |
1631 | return ERR_PTR(err); | |
1632 | ||
1633 | if (req.comp_mask || req.reserved1 || req.reserved2) | |
1634 | return ERR_PTR(-EOPNOTSUPP); | |
1635 | ||
1636 | if (udata->inlen > sizeof(req) && | |
1637 | !ib_is_udata_cleared(udata, sizeof(req), | |
1638 | udata->inlen - sizeof(req))) | |
1639 | return ERR_PTR(-EOPNOTSUPP); | |
1640 | ||
1641 | ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); | |
1642 | ||
1643 | mw = kzalloc(sizeof(*mw), GFP_KERNEL); | |
ec22eb53 | 1644 | in = kzalloc(inlen, GFP_KERNEL); |
d2370e0a MB |
1645 | if (!mw || !in) { |
1646 | err = -ENOMEM; | |
1647 | goto free; | |
1648 | } | |
1649 | ||
ec22eb53 SM |
1650 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
1651 | ||
1652 | MLX5_SET(mkc, mkc, free, 1); | |
1653 | MLX5_SET(mkc, mkc, translations_octword_size, ndescs); | |
1654 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
1655 | MLX5_SET(mkc, mkc, umr_en, 1); | |
1656 | MLX5_SET(mkc, mkc, lr, 1); | |
1657 | MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS); | |
1658 | MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2))); | |
1659 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
1660 | ||
1661 | err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen); | |
d2370e0a MB |
1662 | if (err) |
1663 | goto free; | |
1664 | ||
aa8e08d2 | 1665 | mw->mmkey.type = MLX5_MKEY_MW; |
d2370e0a MB |
1666 | mw->ibmw.rkey = mw->mmkey.key; |
1667 | ||
1668 | resp.response_length = min(offsetof(typeof(resp), response_length) + | |
1669 | sizeof(resp.response_length), udata->outlen); | |
1670 | if (resp.response_length) { | |
1671 | err = ib_copy_to_udata(udata, &resp, resp.response_length); | |
1672 | if (err) { | |
1673 | mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); | |
1674 | goto free; | |
1675 | } | |
1676 | } | |
1677 | ||
1678 | kfree(in); | |
1679 | return &mw->ibmw; | |
1680 | ||
1681 | free: | |
1682 | kfree(mw); | |
1683 | kfree(in); | |
1684 | return ERR_PTR(err); | |
1685 | } | |
1686 | ||
1687 | int mlx5_ib_dealloc_mw(struct ib_mw *mw) | |
1688 | { | |
1689 | struct mlx5_ib_mw *mmw = to_mmw(mw); | |
1690 | int err; | |
1691 | ||
1692 | err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev, | |
1693 | &mmw->mmkey); | |
1694 | if (!err) | |
1695 | kfree(mmw); | |
1696 | return err; | |
1697 | } | |
1698 | ||
d5436ba0 SG |
1699 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
1700 | struct ib_mr_status *mr_status) | |
1701 | { | |
1702 | struct mlx5_ib_mr *mmr = to_mmr(ibmr); | |
1703 | int ret = 0; | |
1704 | ||
1705 | if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { | |
1706 | pr_err("Invalid status check mask\n"); | |
1707 | ret = -EINVAL; | |
1708 | goto done; | |
1709 | } | |
1710 | ||
1711 | mr_status->fail_status = 0; | |
1712 | if (check_mask & IB_MR_CHECK_SIG_STATUS) { | |
1713 | if (!mmr->sig) { | |
1714 | ret = -EINVAL; | |
1715 | pr_err("signature status check requested on a non-signature enabled MR\n"); | |
1716 | goto done; | |
1717 | } | |
1718 | ||
1719 | mmr->sig->sig_status_checked = true; | |
1720 | if (!mmr->sig->sig_err_exists) | |
1721 | goto done; | |
1722 | ||
1723 | if (ibmr->lkey == mmr->sig->err_item.key) | |
1724 | memcpy(&mr_status->sig_err, &mmr->sig->err_item, | |
1725 | sizeof(mr_status->sig_err)); | |
1726 | else { | |
1727 | mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; | |
1728 | mr_status->sig_err.sig_err_offset = 0; | |
1729 | mr_status->sig_err.key = mmr->sig->err_item.key; | |
1730 | } | |
1731 | ||
1732 | mmr->sig->sig_err_exists = false; | |
1733 | mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; | |
1734 | } | |
1735 | ||
1736 | done: | |
1737 | return ret; | |
1738 | } | |
8a187ee5 | 1739 | |
b005d316 SG |
1740 | static int |
1741 | mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, | |
1742 | struct scatterlist *sgl, | |
ff2ba993 | 1743 | unsigned short sg_nents, |
9aa8b321 | 1744 | unsigned int *sg_offset_p) |
b005d316 SG |
1745 | { |
1746 | struct scatterlist *sg = sgl; | |
1747 | struct mlx5_klm *klms = mr->descs; | |
9aa8b321 | 1748 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
b005d316 SG |
1749 | u32 lkey = mr->ibmr.pd->local_dma_lkey; |
1750 | int i; | |
1751 | ||
ff2ba993 | 1752 | mr->ibmr.iova = sg_dma_address(sg) + sg_offset; |
b005d316 SG |
1753 | mr->ibmr.length = 0; |
1754 | mr->ndescs = sg_nents; | |
1755 | ||
1756 | for_each_sg(sgl, sg, sg_nents, i) { | |
1757 | if (unlikely(i > mr->max_descs)) | |
1758 | break; | |
ff2ba993 CH |
1759 | klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); |
1760 | klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); | |
b005d316 SG |
1761 | klms[i].key = cpu_to_be32(lkey); |
1762 | mr->ibmr.length += sg_dma_len(sg); | |
ff2ba993 CH |
1763 | |
1764 | sg_offset = 0; | |
b005d316 SG |
1765 | } |
1766 | ||
9aa8b321 BVA |
1767 | if (sg_offset_p) |
1768 | *sg_offset_p = sg_offset; | |
1769 | ||
b005d316 SG |
1770 | return i; |
1771 | } | |
1772 | ||
8a187ee5 SG |
1773 | static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) |
1774 | { | |
1775 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
1776 | __be64 *descs; | |
1777 | ||
1778 | if (unlikely(mr->ndescs == mr->max_descs)) | |
1779 | return -ENOMEM; | |
1780 | ||
1781 | descs = mr->descs; | |
1782 | descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); | |
1783 | ||
1784 | return 0; | |
1785 | } | |
1786 | ||
ff2ba993 | 1787 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
9aa8b321 | 1788 | unsigned int *sg_offset) |
8a187ee5 SG |
1789 | { |
1790 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
1791 | int n; | |
1792 | ||
1793 | mr->ndescs = 0; | |
1794 | ||
1795 | ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, | |
1796 | mr->desc_size * mr->max_descs, | |
1797 | DMA_TO_DEVICE); | |
1798 | ||
ec22eb53 | 1799 | if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) |
ff2ba993 | 1800 | n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset); |
b005d316 | 1801 | else |
ff2ba993 CH |
1802 | n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, |
1803 | mlx5_set_page); | |
8a187ee5 SG |
1804 | |
1805 | ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, | |
1806 | mr->desc_size * mr->max_descs, | |
1807 | DMA_TO_DEVICE); | |
1808 | ||
1809 | return n; | |
1810 | } |