]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | |
3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/errno.h> | |
35 | #include <linux/mm.h> | |
36 | #include <linux/scatterlist.h> | |
37 | #include <linux/slab.h> | |
38 | ||
39 | #include <linux/mlx4/cmd.h> | |
40 | ||
41 | #include "mlx4.h" | |
42 | #include "icm.h" | |
43 | #include "fw.h" | |
44 | ||
45 | /* | |
46 | * We allocate in as big chunks as we can, up to a maximum of 256 KB | |
47 | * per chunk. | |
48 | */ | |
49 | enum { | |
50 | MLX4_ICM_ALLOC_SIZE = 1 << 18, | |
51 | MLX4_TABLE_CHUNK_SIZE = 1 << 18 | |
52 | }; | |
53 | ||
54 | static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) | |
55 | { | |
56 | int i; | |
57 | ||
58 | if (chunk->nsg > 0) | |
59 | pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, | |
60 | PCI_DMA_BIDIRECTIONAL); | |
61 | ||
62 | for (i = 0; i < chunk->npages; ++i) | |
63 | __free_pages(sg_page(&chunk->mem[i]), | |
64 | get_order(chunk->mem[i].length)); | |
65 | } | |
66 | ||
67 | static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) | |
68 | { | |
69 | int i; | |
70 | ||
71 | for (i = 0; i < chunk->npages; ++i) | |
72 | dma_free_coherent(&dev->persist->pdev->dev, | |
73 | chunk->mem[i].length, | |
74 | lowmem_page_address(sg_page(&chunk->mem[i])), | |
75 | sg_dma_address(&chunk->mem[i])); | |
76 | } | |
77 | ||
78 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) | |
79 | { | |
80 | struct mlx4_icm_chunk *chunk, *tmp; | |
81 | ||
82 | if (!icm) | |
83 | return; | |
84 | ||
85 | list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { | |
86 | if (coherent) | |
87 | mlx4_free_icm_coherent(dev, chunk); | |
88 | else | |
89 | mlx4_free_icm_pages(dev, chunk); | |
90 | ||
91 | kfree(chunk); | |
92 | } | |
93 | ||
94 | kfree(icm); | |
95 | } | |
96 | ||
97 | static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, | |
98 | gfp_t gfp_mask, int node) | |
99 | { | |
100 | struct page *page; | |
101 | ||
102 | page = alloc_pages_node(node, gfp_mask, order); | |
103 | if (!page) { | |
104 | page = alloc_pages(gfp_mask, order); | |
105 | if (!page) | |
106 | return -ENOMEM; | |
107 | } | |
108 | ||
109 | sg_set_page(mem, page, PAGE_SIZE << order, 0); | |
110 | return 0; | |
111 | } | |
112 | ||
113 | static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, | |
114 | int order, gfp_t gfp_mask) | |
115 | { | |
116 | void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, | |
117 | &sg_dma_address(mem), gfp_mask); | |
118 | if (!buf) | |
119 | return -ENOMEM; | |
120 | ||
121 | if (offset_in_page(buf)) { | |
122 | dma_free_coherent(dev, PAGE_SIZE << order, | |
123 | buf, sg_dma_address(mem)); | |
124 | return -ENOMEM; | |
125 | } | |
126 | ||
127 | sg_set_buf(mem, buf, PAGE_SIZE << order); | |
128 | sg_dma_len(mem) = PAGE_SIZE << order; | |
129 | return 0; | |
130 | } | |
131 | ||
132 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |
133 | gfp_t gfp_mask, int coherent) | |
134 | { | |
135 | struct mlx4_icm *icm; | |
136 | struct mlx4_icm_chunk *chunk = NULL; | |
137 | int cur_order; | |
138 | int ret; | |
139 | ||
140 | /* We use sg_set_buf for coherent allocs, which assumes low memory */ | |
141 | BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); | |
142 | ||
143 | icm = kmalloc_node(sizeof(*icm), | |
144 | gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), | |
145 | dev->numa_node); | |
146 | if (!icm) { | |
147 | icm = kmalloc(sizeof(*icm), | |
148 | gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); | |
149 | if (!icm) | |
150 | return NULL; | |
151 | } | |
152 | ||
153 | icm->refcount = 0; | |
154 | INIT_LIST_HEAD(&icm->chunk_list); | |
155 | ||
156 | cur_order = get_order(MLX4_ICM_ALLOC_SIZE); | |
157 | ||
158 | while (npages > 0) { | |
159 | if (!chunk) { | |
160 | chunk = kmalloc_node(sizeof(*chunk), | |
161 | gfp_mask & ~(__GFP_HIGHMEM | | |
162 | __GFP_NOWARN), | |
163 | dev->numa_node); | |
164 | if (!chunk) { | |
165 | chunk = kmalloc(sizeof(*chunk), | |
166 | gfp_mask & ~(__GFP_HIGHMEM | | |
167 | __GFP_NOWARN)); | |
168 | if (!chunk) | |
169 | goto fail; | |
170 | } | |
171 | ||
172 | sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); | |
173 | chunk->npages = 0; | |
174 | chunk->nsg = 0; | |
175 | list_add_tail(&chunk->list, &icm->chunk_list); | |
176 | } | |
177 | ||
178 | while (1 << cur_order > npages) | |
179 | --cur_order; | |
180 | ||
181 | if (coherent) | |
182 | ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, | |
183 | &chunk->mem[chunk->npages], | |
184 | cur_order, gfp_mask); | |
185 | else | |
186 | ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], | |
187 | cur_order, gfp_mask, | |
188 | dev->numa_node); | |
189 | ||
190 | if (ret) { | |
191 | if (--cur_order < 0) | |
192 | goto fail; | |
193 | else | |
194 | continue; | |
195 | } | |
196 | ||
197 | ++chunk->npages; | |
198 | ||
199 | if (coherent) | |
200 | ++chunk->nsg; | |
201 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | |
202 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, | |
203 | chunk->npages, | |
204 | PCI_DMA_BIDIRECTIONAL); | |
205 | ||
206 | if (chunk->nsg <= 0) | |
207 | goto fail; | |
208 | } | |
209 | ||
210 | if (chunk->npages == MLX4_ICM_CHUNK_LEN) | |
211 | chunk = NULL; | |
212 | ||
213 | npages -= 1 << cur_order; | |
214 | } | |
215 | ||
216 | if (!coherent && chunk) { | |
217 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, | |
218 | chunk->npages, | |
219 | PCI_DMA_BIDIRECTIONAL); | |
220 | ||
221 | if (chunk->nsg <= 0) | |
222 | goto fail; | |
223 | } | |
224 | ||
225 | return icm; | |
226 | ||
227 | fail: | |
228 | mlx4_free_icm(dev, icm, coherent); | |
229 | return NULL; | |
230 | } | |
231 | ||
232 | static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) | |
233 | { | |
234 | return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); | |
235 | } | |
236 | ||
237 | static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) | |
238 | { | |
239 | return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, | |
240 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | |
241 | } | |
242 | ||
243 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) | |
244 | { | |
245 | return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); | |
246 | } | |
247 | ||
248 | int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) | |
249 | { | |
250 | return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, | |
251 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | |
252 | } | |
253 | ||
254 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) | |
255 | { | |
256 | u32 i = (obj & (table->num_obj - 1)) / | |
257 | (MLX4_TABLE_CHUNK_SIZE / table->obj_size); | |
258 | int ret = 0; | |
259 | ||
260 | mutex_lock(&table->mutex); | |
261 | ||
262 | if (table->icm[i]) { | |
263 | ++table->icm[i]->refcount; | |
264 | goto out; | |
265 | } | |
266 | ||
267 | table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, | |
268 | (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | |
269 | __GFP_NOWARN, table->coherent); | |
270 | if (!table->icm[i]) { | |
271 | ret = -ENOMEM; | |
272 | goto out; | |
273 | } | |
274 | ||
275 | if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + | |
276 | (u64) i * MLX4_TABLE_CHUNK_SIZE)) { | |
277 | mlx4_free_icm(dev, table->icm[i], table->coherent); | |
278 | table->icm[i] = NULL; | |
279 | ret = -ENOMEM; | |
280 | goto out; | |
281 | } | |
282 | ||
283 | ++table->icm[i]->refcount; | |
284 | ||
285 | out: | |
286 | mutex_unlock(&table->mutex); | |
287 | return ret; | |
288 | } | |
289 | ||
290 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) | |
291 | { | |
292 | u32 i; | |
293 | u64 offset; | |
294 | ||
295 | i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); | |
296 | ||
297 | mutex_lock(&table->mutex); | |
298 | ||
299 | if (--table->icm[i]->refcount == 0) { | |
300 | offset = (u64) i * MLX4_TABLE_CHUNK_SIZE; | |
301 | mlx4_UNMAP_ICM(dev, table->virt + offset, | |
302 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | |
303 | mlx4_free_icm(dev, table->icm[i], table->coherent); | |
304 | table->icm[i] = NULL; | |
305 | } | |
306 | ||
307 | mutex_unlock(&table->mutex); | |
308 | } | |
309 | ||
310 | void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, | |
311 | dma_addr_t *dma_handle) | |
312 | { | |
313 | int offset, dma_offset, i; | |
314 | u64 idx; | |
315 | struct mlx4_icm_chunk *chunk; | |
316 | struct mlx4_icm *icm; | |
317 | struct page *page = NULL; | |
318 | ||
319 | if (!table->lowmem) | |
320 | return NULL; | |
321 | ||
322 | mutex_lock(&table->mutex); | |
323 | ||
324 | idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size; | |
325 | icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; | |
326 | dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; | |
327 | ||
328 | if (!icm) | |
329 | goto out; | |
330 | ||
331 | list_for_each_entry(chunk, &icm->chunk_list, list) { | |
332 | for (i = 0; i < chunk->npages; ++i) { | |
333 | if (dma_handle && dma_offset >= 0) { | |
334 | if (sg_dma_len(&chunk->mem[i]) > dma_offset) | |
335 | *dma_handle = sg_dma_address(&chunk->mem[i]) + | |
336 | dma_offset; | |
337 | dma_offset -= sg_dma_len(&chunk->mem[i]); | |
338 | } | |
339 | /* | |
340 | * DMA mapping can merge pages but not split them, | |
341 | * so if we found the page, dma_handle has already | |
342 | * been assigned to. | |
343 | */ | |
344 | if (chunk->mem[i].length > offset) { | |
345 | page = sg_page(&chunk->mem[i]); | |
346 | goto out; | |
347 | } | |
348 | offset -= chunk->mem[i].length; | |
349 | } | |
350 | } | |
351 | ||
352 | out: | |
353 | mutex_unlock(&table->mutex); | |
354 | return page ? lowmem_page_address(page) + offset : NULL; | |
355 | } | |
356 | ||
357 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |
358 | u32 start, u32 end) | |
359 | { | |
360 | int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; | |
361 | int err; | |
362 | u32 i; | |
363 | ||
364 | for (i = start; i <= end; i += inc) { | |
365 | err = mlx4_table_get(dev, table, i); | |
366 | if (err) | |
367 | goto fail; | |
368 | } | |
369 | ||
370 | return 0; | |
371 | ||
372 | fail: | |
373 | while (i > start) { | |
374 | i -= inc; | |
375 | mlx4_table_put(dev, table, i); | |
376 | } | |
377 | ||
378 | return err; | |
379 | } | |
380 | ||
381 | void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |
382 | u32 start, u32 end) | |
383 | { | |
384 | u32 i; | |
385 | ||
386 | for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) | |
387 | mlx4_table_put(dev, table, i); | |
388 | } | |
389 | ||
390 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |
391 | u64 virt, int obj_size, u32 nobj, int reserved, | |
392 | int use_lowmem, int use_coherent) | |
393 | { | |
394 | int obj_per_chunk; | |
395 | int num_icm; | |
396 | unsigned chunk_size; | |
397 | int i; | |
398 | u64 size; | |
399 | ||
400 | obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; | |
401 | num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; | |
402 | ||
403 | table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); | |
404 | if (!table->icm) | |
405 | return -ENOMEM; | |
406 | table->virt = virt; | |
407 | table->num_icm = num_icm; | |
408 | table->num_obj = nobj; | |
409 | table->obj_size = obj_size; | |
410 | table->lowmem = use_lowmem; | |
411 | table->coherent = use_coherent; | |
412 | mutex_init(&table->mutex); | |
413 | ||
414 | size = (u64) nobj * obj_size; | |
415 | for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { | |
416 | chunk_size = MLX4_TABLE_CHUNK_SIZE; | |
417 | if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) | |
418 | chunk_size = PAGE_ALIGN(size - | |
419 | i * MLX4_TABLE_CHUNK_SIZE); | |
420 | ||
421 | table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, | |
422 | (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | |
423 | __GFP_NOWARN, use_coherent); | |
424 | if (!table->icm[i]) | |
425 | goto err; | |
426 | if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { | |
427 | mlx4_free_icm(dev, table->icm[i], use_coherent); | |
428 | table->icm[i] = NULL; | |
429 | goto err; | |
430 | } | |
431 | ||
432 | /* | |
433 | * Add a reference to this ICM chunk so that it never | |
434 | * gets freed (since it contains reserved firmware objects). | |
435 | */ | |
436 | ++table->icm[i]->refcount; | |
437 | } | |
438 | ||
439 | return 0; | |
440 | ||
441 | err: | |
442 | for (i = 0; i < num_icm; ++i) | |
443 | if (table->icm[i]) { | |
444 | mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, | |
445 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | |
446 | mlx4_free_icm(dev, table->icm[i], use_coherent); | |
447 | } | |
448 | ||
449 | kfree(table->icm); | |
450 | ||
451 | return -ENOMEM; | |
452 | } | |
453 | ||
454 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) | |
455 | { | |
456 | int i; | |
457 | ||
458 | for (i = 0; i < table->num_icm; ++i) | |
459 | if (table->icm[i]) { | |
460 | mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, | |
461 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | |
462 | mlx4_free_icm(dev, table->icm[i], table->coherent); | |
463 | } | |
464 | ||
465 | kfree(table->icm); | |
466 | } |