]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / lib / librte_eal / linuxapp / xen_dom0 / dom0_mm_misc.c
1 /*-
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Corporation
26 *
27 * BSD LICENSE
28 *
29 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 *
36 * * Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * * Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in
40 * the documentation and/or other materials provided with the
41 * distribution.
42 * * Neither the name of Intel Corporation nor the names of its
43 * contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 */
59
60 #include <linux/module.h>
61 #include <linux/miscdevice.h>
62 #include <linux/fs.h>
63 #include <linux/device.h>
64 #include <linux/errno.h>
65 #include <linux/vmalloc.h>
66 #include <linux/mm.h>
67 #include <linux/version.h>
68
69 #include <xen/xen.h>
70 #include <xen/page.h>
71 #include <xen/xen-ops.h>
72 #include <xen/interface/memory.h>
73
74 #include <exec-env/rte_dom0_common.h>
75
76 #include "compat.h"
77 #include "dom0_mm_dev.h"
78
79 MODULE_LICENSE("Dual BSD/GPL");
80 MODULE_AUTHOR("Intel Corporation");
81 MODULE_DESCRIPTION("Kernel Module for supporting DPDK running on Xen Dom0");
82
83 static struct dom0_mm_dev dom0_dev;
84 static struct kobject *dom0_kobj = NULL;
85
86 static struct memblock_info *rsv_mm_info;
87
88 /* Default configuration for reserved memory size(2048 MB). */
89 static uint32_t rsv_memsize = 2048;
90
91 static int dom0_open(struct inode *inode, struct file *file);
92 static int dom0_release(struct inode *inode, struct file *file);
93 static int dom0_ioctl(struct file *file, unsigned int ioctl_num,
94 unsigned long ioctl_param);
95 static int dom0_mmap(struct file *file, struct vm_area_struct *vma);
96 static int dom0_memory_free(uint32_t size);
97 static int dom0_memory_release(struct dom0_mm_data *mm_data);
98
99 static const struct file_operations data_fops = {
100 .owner = THIS_MODULE,
101 .open = dom0_open,
102 .release = dom0_release,
103 .mmap = dom0_mmap,
104 .unlocked_ioctl = (void *)dom0_ioctl,
105 };
106
107 static ssize_t
108 show_memsize_rsvd(struct device *dev, struct device_attribute *attr, char *buf)
109 {
110 return snprintf(buf, 10, "%u\n", dom0_dev.used_memsize);
111 }
112
113 static ssize_t
114 show_memsize(struct device *dev, struct device_attribute *attr, char *buf)
115 {
116 return snprintf(buf, 10, "%u\n", dom0_dev.config_memsize);
117 }
118
119 static ssize_t
120 store_memsize(struct device *dev, struct device_attribute *attr,
121 const char *buf, size_t count)
122 {
123 int err = 0;
124 unsigned long mem_size;
125
126 if (0 != kstrtoul(buf, 0, &mem_size))
127 return -EINVAL;
128
129 mutex_lock(&dom0_dev.data_lock);
130 if (0 == mem_size) {
131 err = -EINVAL;
132 goto fail;
133 } else if (mem_size > (rsv_memsize - dom0_dev.used_memsize)) {
134 XEN_ERR("configure memory size fail\n");
135 err = -EINVAL;
136 goto fail;
137 } else
138 dom0_dev.config_memsize = mem_size;
139
140 fail:
141 mutex_unlock(&dom0_dev.data_lock);
142 return err ? err : count;
143 }
144
145 static DEVICE_ATTR(memsize, S_IRUGO | S_IWUSR, show_memsize, store_memsize);
146 static DEVICE_ATTR(memsize_rsvd, S_IRUGO, show_memsize_rsvd, NULL);
147
148 static struct attribute *dev_attrs[] = {
149 &dev_attr_memsize.attr,
150 &dev_attr_memsize_rsvd.attr,
151 NULL,
152 };
153
154 /* the memory size unit is MB */
155 static const struct attribute_group dev_attr_grp = {
156 .name = "memsize-mB",
157 .attrs = dev_attrs,
158 };
159
160
161 static void
162 sort_viraddr(struct memblock_info *mb, int cnt)
163 {
164 int i,j;
165 uint64_t tmp_pfn;
166 uint64_t tmp_viraddr;
167
168 /*sort virtual address and pfn */
169 for(i = 0; i < cnt; i ++) {
170 for(j = cnt - 1; j > i; j--) {
171 if(mb[j].pfn < mb[j - 1].pfn) {
172 tmp_pfn = mb[j - 1].pfn;
173 mb[j - 1].pfn = mb[j].pfn;
174 mb[j].pfn = tmp_pfn;
175
176 tmp_viraddr = mb[j - 1].vir_addr;
177 mb[j - 1].vir_addr = mb[j].vir_addr;
178 mb[j].vir_addr = tmp_viraddr;
179 }
180 }
181 }
182 }
183
184 static int
185 dom0_find_memdata(const char * mem_name)
186 {
187 unsigned i;
188 int idx = -1;
189 for(i = 0; i< NUM_MEM_CTX; i++) {
190 if(dom0_dev.mm_data[i] == NULL)
191 continue;
192 if (!strncmp(dom0_dev.mm_data[i]->name, mem_name,
193 sizeof(char) * DOM0_NAME_MAX)) {
194 idx = i;
195 break;
196 }
197 }
198
199 return idx;
200 }
201
202 static int
203 dom0_find_mempos(void)
204 {
205 unsigned i;
206 int idx = -1;
207
208 for(i = 0; i< NUM_MEM_CTX; i++) {
209 if(dom0_dev.mm_data[i] == NULL){
210 idx = i;
211 break;
212 }
213 }
214
215 return idx;
216 }
217
218 static int
219 dom0_memory_release(struct dom0_mm_data *mm_data)
220 {
221 int idx;
222 uint32_t num_block, block_id;
223
224 /* each memory block is 2M */
225 num_block = mm_data->mem_size / SIZE_PER_BLOCK;
226 if (num_block == 0)
227 return -EINVAL;
228
229 /* reset global memory data */
230 idx = dom0_find_memdata(mm_data->name);
231 if (idx >= 0) {
232 dom0_dev.used_memsize -= mm_data->mem_size;
233 dom0_dev.mm_data[idx] = NULL;
234 dom0_dev.num_mem_ctx--;
235 }
236
237 /* reset these memory blocks status as free */
238 for (idx = 0; idx < num_block; idx++) {
239 block_id = mm_data->block_num[idx];
240 rsv_mm_info[block_id].used = 0;
241 }
242
243 memset(mm_data, 0, sizeof(struct dom0_mm_data));
244 vfree(mm_data);
245 return 0;
246 }
247
248 static int
249 dom0_memory_free(uint32_t rsv_size)
250 {
251 uint64_t vstart, vaddr;
252 uint32_t i, num_block, size;
253
254 if (!xen_pv_domain())
255 return -1;
256
257 /* each memory block is 2M */
258 num_block = rsv_size / SIZE_PER_BLOCK;
259 if (num_block == 0)
260 return -EINVAL;
261
262 /* free all memory blocks of size of 4M and destroy contiguous region */
263 for (i = 0; i < dom0_dev.num_bigblock * 2; i += 2) {
264 vstart = rsv_mm_info[i].vir_addr;
265 if (vstart) {
266 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
267 if (rsv_mm_info[i].exchange_flag)
268 xen_destroy_contiguous_region(vstart,
269 DOM0_CONTIG_NUM_ORDER);
270 if (rsv_mm_info[i + 1].exchange_flag)
271 xen_destroy_contiguous_region(vstart +
272 DOM0_MEMBLOCK_SIZE,
273 DOM0_CONTIG_NUM_ORDER);
274 #else
275 if (rsv_mm_info[i].exchange_flag)
276 xen_destroy_contiguous_region(rsv_mm_info[i].pfn
277 * PAGE_SIZE,
278 DOM0_CONTIG_NUM_ORDER);
279 if (rsv_mm_info[i + 1].exchange_flag)
280 xen_destroy_contiguous_region(rsv_mm_info[i].pfn
281 * PAGE_SIZE + DOM0_MEMBLOCK_SIZE,
282 DOM0_CONTIG_NUM_ORDER);
283 #endif
284
285 size = DOM0_MEMBLOCK_SIZE * 2;
286 vaddr = vstart;
287 while (size > 0) {
288 ClearPageReserved(virt_to_page(vaddr));
289 vaddr += PAGE_SIZE;
290 size -= PAGE_SIZE;
291 }
292 free_pages(vstart, MAX_NUM_ORDER);
293 }
294 }
295
296 /* free all memory blocks size of 2M and destroy contiguous region */
297 for (; i < num_block; i++) {
298 vstart = rsv_mm_info[i].vir_addr;
299 if (vstart) {
300 if (rsv_mm_info[i].exchange_flag)
301 xen_destroy_contiguous_region(vstart,
302 DOM0_CONTIG_NUM_ORDER);
303
304 size = DOM0_MEMBLOCK_SIZE;
305 vaddr = vstart;
306 while (size > 0) {
307 ClearPageReserved(virt_to_page(vaddr));
308 vaddr += PAGE_SIZE;
309 size -= PAGE_SIZE;
310 }
311 free_pages(vstart, DOM0_CONTIG_NUM_ORDER);
312 }
313 }
314
315 memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
316 vfree(rsv_mm_info);
317 rsv_mm_info = NULL;
318
319 return 0;
320 }
321
322 static void
323 find_free_memory(uint32_t count, struct dom0_mm_data *mm_data)
324 {
325 uint32_t i = 0;
326 uint32_t j = 0;
327
328 while ((i < count) && (j < rsv_memsize / SIZE_PER_BLOCK)) {
329 if (rsv_mm_info[j].used == 0) {
330 mm_data->block_info[i].pfn = rsv_mm_info[j].pfn;
331 mm_data->block_info[i].vir_addr =
332 rsv_mm_info[j].vir_addr;
333 mm_data->block_info[i].mfn = rsv_mm_info[j].mfn;
334 mm_data->block_info[i].exchange_flag =
335 rsv_mm_info[j].exchange_flag;
336 mm_data->block_num[i] = j;
337 rsv_mm_info[j].used = 1;
338 i++;
339 }
340 j++;
341 }
342 }
343
344 /**
345 * Find all memory segments in which physical addresses are contiguous.
346 */
347 static void
348 find_memseg(int count, struct dom0_mm_data * mm_data)
349 {
350 int i = 0;
351 int j, k, idx = 0;
352 uint64_t zone_len, pfn, num_block;
353
354 while(i < count) {
355 if (mm_data->block_info[i].exchange_flag == 0) {
356 i++;
357 continue;
358 }
359 k = 0;
360 pfn = mm_data->block_info[i].pfn;
361 mm_data->seg_info[idx].pfn = pfn;
362 mm_data->seg_info[idx].mfn[k] = mm_data->block_info[i].mfn;
363
364 for (j = i + 1; j < count; j++) {
365
366 /* ignore exchange fail memory block */
367 if (mm_data->block_info[j].exchange_flag == 0)
368 break;
369
370 if (mm_data->block_info[j].pfn !=
371 (mm_data->block_info[j - 1].pfn +
372 DOM0_MEMBLOCK_SIZE / PAGE_SIZE))
373 break;
374 ++k;
375 mm_data->seg_info[idx].mfn[k] = mm_data->block_info[j].mfn;
376 }
377
378 num_block = j - i;
379 zone_len = num_block * DOM0_MEMBLOCK_SIZE;
380 mm_data->seg_info[idx].size = zone_len;
381
382 XEN_PRINT("memseg id=%d, size=0x%llx\n", idx, zone_len);
383 i = i+ num_block;
384 idx++;
385 if (idx == DOM0_NUM_MEMSEG)
386 break;
387 }
388 mm_data->num_memseg = idx;
389 }
390
391 static int
392 dom0_memory_reserve(uint32_t rsv_size)
393 {
394 uint64_t pfn, vstart, vaddr;
395 uint32_t i, num_block, size, allocated_size = 0;
396
397 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
398 dma_addr_t dma_handle;
399 #endif
400
401 /* 2M as memory block */
402 num_block = rsv_size / SIZE_PER_BLOCK;
403
404 rsv_mm_info = vmalloc(sizeof(struct memblock_info) * num_block);
405 if (!rsv_mm_info) {
406 XEN_ERR("Unable to allocate device memory information\n");
407 return -ENOMEM;
408 }
409 memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
410
411 /* try alloc size of 4M once */
412 for (i = 0; i < num_block; i += 2) {
413 vstart = (unsigned long)
414 __get_free_pages(GFP_ATOMIC, MAX_NUM_ORDER);
415 if (vstart == 0)
416 break;
417
418 dom0_dev.num_bigblock = i / 2 + 1;
419 allocated_size = SIZE_PER_BLOCK * (i + 2);
420
421 /* size of 4M */
422 size = DOM0_MEMBLOCK_SIZE * 2;
423
424 vaddr = vstart;
425 while (size > 0) {
426 SetPageReserved(virt_to_page(vaddr));
427 vaddr += PAGE_SIZE;
428 size -= PAGE_SIZE;
429 }
430
431 pfn = virt_to_pfn(vstart);
432 rsv_mm_info[i].pfn = pfn;
433 rsv_mm_info[i].vir_addr = vstart;
434 rsv_mm_info[i + 1].pfn =
435 pfn + DOM0_MEMBLOCK_SIZE / PAGE_SIZE;
436 rsv_mm_info[i + 1].vir_addr =
437 vstart + DOM0_MEMBLOCK_SIZE;
438 }
439
440 /*if it failed to alloc 4M, and continue to alloc 2M once */
441 for (; i < num_block; i++) {
442 vstart = (unsigned long)
443 __get_free_pages(GFP_ATOMIC, DOM0_CONTIG_NUM_ORDER);
444 if (vstart == 0) {
445 XEN_ERR("allocate memory fail.\n");
446 dom0_memory_free(allocated_size);
447 return -ENOMEM;
448 }
449
450 allocated_size += SIZE_PER_BLOCK;
451
452 size = DOM0_MEMBLOCK_SIZE;
453 vaddr = vstart;
454 while (size > 0) {
455 SetPageReserved(virt_to_page(vaddr));
456 vaddr += PAGE_SIZE;
457 size -= PAGE_SIZE;
458 }
459 pfn = virt_to_pfn(vstart);
460 rsv_mm_info[i].pfn = pfn;
461 rsv_mm_info[i].vir_addr = vstart;
462 }
463
464 sort_viraddr(rsv_mm_info, num_block);
465
466 for (i = 0; i< num_block; i++) {
467
468 /*
469 * This API is used to exchage MFN for getting a block of
470 * contiguous physical addresses, its maximum size is 2M.
471 */
472 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
473 if (xen_create_contiguous_region(rsv_mm_info[i].vir_addr,
474 DOM0_CONTIG_NUM_ORDER, 0) == 0) {
475 #else
476 if (xen_create_contiguous_region(rsv_mm_info[i].pfn * PAGE_SIZE,
477 DOM0_CONTIG_NUM_ORDER, 0, &dma_handle) == 0) {
478 #endif
479 rsv_mm_info[i].exchange_flag = 1;
480 rsv_mm_info[i].mfn =
481 pfn_to_mfn(rsv_mm_info[i].pfn);
482 rsv_mm_info[i].used = 0;
483 } else {
484 XEN_ERR("exchange memeory fail\n");
485 rsv_mm_info[i].exchange_flag = 0;
486 dom0_dev.fail_times++;
487 if (dom0_dev.fail_times > MAX_EXCHANGE_FAIL_TIME) {
488 dom0_memory_free(rsv_size);
489 return -EFAULT;
490 }
491 }
492 }
493
494 return 0;
495 }
496
497 static int
498 dom0_prepare_memsegs(struct memory_info *meminfo, struct dom0_mm_data *mm_data)
499 {
500 uint32_t num_block;
501 int idx;
502
503 /* check if there is a free name buffer */
504 memcpy(mm_data->name, meminfo->name, DOM0_NAME_MAX);
505 mm_data->name[DOM0_NAME_MAX - 1] = '\0';
506 idx = dom0_find_mempos();
507 if (idx < 0)
508 return -1;
509
510 num_block = meminfo->size / SIZE_PER_BLOCK;
511 /* find free memory and new memory segments*/
512 find_free_memory(num_block, mm_data);
513 find_memseg(num_block, mm_data);
514
515 /* update private memory data */
516 mm_data->refcnt++;
517 mm_data->mem_size = meminfo->size;
518
519 /* update global memory data */
520 dom0_dev.mm_data[idx] = mm_data;
521 dom0_dev.num_mem_ctx++;
522 dom0_dev.used_memsize += mm_data->mem_size;
523
524 return 0;
525 }
526
527 static int
528 dom0_check_memory (struct memory_info *meminfo)
529 {
530 int idx;
531 uint64_t mem_size;
532
533 /* round memory size to the next even number. */
534 if (meminfo->size % 2)
535 ++meminfo->size;
536
537 mem_size = meminfo->size;
538 if (dom0_dev.num_mem_ctx > NUM_MEM_CTX) {
539 XEN_ERR("Memory data space is full in Dom0 driver\n");
540 return -1;
541 }
542 idx = dom0_find_memdata(meminfo->name);
543 if (idx >= 0) {
544 XEN_ERR("Memory data name %s has already exsited in Dom0 driver.\n",
545 meminfo->name);
546 return -1;
547 }
548 if ((dom0_dev.used_memsize + mem_size) > rsv_memsize) {
549 XEN_ERR("Total size can't be larger than reserved size.\n");
550 return -1;
551 }
552
553 return 0;
554 }
555
556 static int __init
557 dom0_init(void)
558 {
559 if (!xen_domain())
560 return -ENODEV;
561
562 if (rsv_memsize > DOM0_CONFIG_MEMSIZE) {
563 XEN_ERR("The reserved memory size cannot be greater than %d\n",
564 DOM0_CONFIG_MEMSIZE);
565 return -EINVAL;
566 }
567
568 /* Setup the misc device */
569 dom0_dev.miscdev.minor = MISC_DYNAMIC_MINOR;
570 dom0_dev.miscdev.name = "dom0_mm";
571 dom0_dev.miscdev.fops = &data_fops;
572
573 /* register misc char device */
574 if (misc_register(&dom0_dev.miscdev) != 0) {
575 XEN_ERR("Misc device registration failed\n");
576 return -EPERM;
577 }
578
579 mutex_init(&dom0_dev.data_lock);
580 dom0_kobj = kobject_create_and_add("dom0-mm", mm_kobj);
581
582 if (!dom0_kobj) {
583 XEN_ERR("dom0-mm object creation failed\n");
584 misc_deregister(&dom0_dev.miscdev);
585 return -ENOMEM;
586 }
587
588 if (sysfs_create_group(dom0_kobj, &dev_attr_grp)) {
589 kobject_put(dom0_kobj);
590 misc_deregister(&dom0_dev.miscdev);
591 return -EPERM;
592 }
593
594 if (dom0_memory_reserve(rsv_memsize) < 0) {
595 sysfs_remove_group(dom0_kobj, &dev_attr_grp);
596 kobject_put(dom0_kobj);
597 misc_deregister(&dom0_dev.miscdev);
598 return -ENOMEM;
599 }
600
601 XEN_PRINT("####### DPDK Xen Dom0 module loaded #######\n");
602
603 return 0;
604 }
605
606 static void __exit
607 dom0_exit(void)
608 {
609 if (rsv_mm_info != NULL)
610 dom0_memory_free(rsv_memsize);
611
612 sysfs_remove_group(dom0_kobj, &dev_attr_grp);
613 kobject_put(dom0_kobj);
614 misc_deregister(&dom0_dev.miscdev);
615
616 XEN_PRINT("####### DPDK Xen Dom0 module unloaded #######\n");
617 }
618
619 static int
620 dom0_open(struct inode *inode, struct file *file)
621 {
622 file->private_data = NULL;
623
624 XEN_PRINT(KERN_INFO "/dev/dom0_mm opened\n");
625 return 0;
626 }
627
628 static int
629 dom0_release(struct inode *inode, struct file *file)
630 {
631 int ret = 0;
632 struct dom0_mm_data *mm_data = file->private_data;
633
634 if (mm_data == NULL)
635 return ret;
636
637 mutex_lock(&dom0_dev.data_lock);
638 if (--mm_data->refcnt == 0)
639 ret = dom0_memory_release(mm_data);
640 mutex_unlock(&dom0_dev.data_lock);
641
642 file->private_data = NULL;
643 XEN_PRINT(KERN_INFO "/dev/dom0_mm closed\n");
644 return ret;
645 }
646
647 static int
648 dom0_mmap(struct file *file, struct vm_area_struct *vm)
649 {
650 int status = 0;
651 uint32_t idx = vm->vm_pgoff;
652 uint64_t pfn, size = vm->vm_end - vm->vm_start;
653 struct dom0_mm_data *mm_data = file->private_data;
654
655 if(mm_data == NULL)
656 return -EINVAL;
657
658 mutex_lock(&dom0_dev.data_lock);
659 if (idx >= mm_data->num_memseg) {
660 mutex_unlock(&dom0_dev.data_lock);
661 return -EINVAL;
662 }
663
664 if (size > mm_data->seg_info[idx].size){
665 mutex_unlock(&dom0_dev.data_lock);
666 return -EINVAL;
667 }
668
669 XEN_PRINT("mmap memseg idx =%d,size = 0x%llx\n", idx, size);
670
671 pfn = mm_data->seg_info[idx].pfn;
672 mutex_unlock(&dom0_dev.data_lock);
673
674 status = remap_pfn_range(vm, vm->vm_start, pfn, size, PAGE_SHARED);
675
676 return status;
677 }
678 static int
679 dom0_ioctl(struct file *file,
680 unsigned int ioctl_num,
681 unsigned long ioctl_param)
682 {
683 int idx, ret;
684 char name[DOM0_NAME_MAX] = {0};
685 struct memory_info meminfo;
686 struct dom0_mm_data *mm_data = file->private_data;
687
688 XEN_PRINT("IOCTL num=0x%0x param=0x%0lx \n", ioctl_num, ioctl_param);
689
690 /**
691 * Switch according to the ioctl called
692 */
693 switch _IOC_NR(ioctl_num) {
694 case _IOC_NR(RTE_DOM0_IOCTL_PREPARE_MEMSEG):
695 ret = copy_from_user(&meminfo, (void *)ioctl_param,
696 sizeof(struct memory_info));
697 if (ret)
698 return -EFAULT;
699
700 if (mm_data != NULL) {
701 XEN_ERR("Cannot create memory segment for the same"
702 " file descriptor\n");
703 return -EINVAL;
704 }
705
706 /* Allocate private data */
707 mm_data = vmalloc(sizeof(struct dom0_mm_data));
708 if (!mm_data) {
709 XEN_ERR("Unable to allocate device private data\n");
710 return -ENOMEM;
711 }
712 memset(mm_data, 0, sizeof(struct dom0_mm_data));
713
714 mutex_lock(&dom0_dev.data_lock);
715 /* check if we can allocate memory*/
716 if (dom0_check_memory(&meminfo) < 0) {
717 mutex_unlock(&dom0_dev.data_lock);
718 vfree(mm_data);
719 return -EINVAL;
720 }
721
722 /* allocate memory and created memory segments*/
723 if (dom0_prepare_memsegs(&meminfo, mm_data) < 0) {
724 XEN_ERR("create memory segment fail.\n");
725 mutex_unlock(&dom0_dev.data_lock);
726 return -EIO;
727 }
728
729 file->private_data = mm_data;
730 mutex_unlock(&dom0_dev.data_lock);
731 break;
732
733 /* support multiple process in term of memory mapping*/
734 case _IOC_NR(RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG):
735 ret = copy_from_user(name, (void *)ioctl_param,
736 sizeof(char) * DOM0_NAME_MAX);
737 if (ret)
738 return -EFAULT;
739
740 mutex_lock(&dom0_dev.data_lock);
741 idx = dom0_find_memdata(name);
742 if (idx < 0) {
743 mutex_unlock(&dom0_dev.data_lock);
744 return -EINVAL;
745 }
746
747 mm_data = dom0_dev.mm_data[idx];
748 mm_data->refcnt++;
749 file->private_data = mm_data;
750 mutex_unlock(&dom0_dev.data_lock);
751 break;
752
753 case _IOC_NR(RTE_DOM0_IOCTL_GET_NUM_MEMSEG):
754 ret = copy_to_user((void *)ioctl_param, &mm_data->num_memseg,
755 sizeof(int));
756 if (ret)
757 return -EFAULT;
758 break;
759
760 case _IOC_NR(RTE_DOM0_IOCTL_GET_MEMSEG_INFO):
761 ret = copy_to_user((void *)ioctl_param,
762 &mm_data->seg_info[0],
763 sizeof(struct memseg_info) *
764 mm_data->num_memseg);
765 if (ret)
766 return -EFAULT;
767 break;
768 default:
769 XEN_PRINT("IOCTL default \n");
770 break;
771 }
772
773 return 0;
774 }
775
776 module_init(dom0_init);
777 module_exit(dom0_exit);
778
779 module_param(rsv_memsize, uint, S_IRUGO | S_IWUSR);
780 MODULE_PARM_DESC(rsv_memsize, "Xen-dom0 reserved memory size(MB).\n");