]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
staging/atomisp: Add support for the Intel IPU v2
[mirror_ubuntu-jammy-kernel.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm.c
1 /*
2 * Support for Medifield PNW Camera Imaging ISP subsystem.
3 *
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5 *
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23 /*
24 * This file contains entry functions for memory management of ISP driver
25 */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h> /* for kmap */
30 #include <linux/io.h> /* for page_to_phys */
31 #include <linux/sysfs.h>
32
33 #include "hmm/hmm.h"
34 #include "hmm/hmm_pool.h"
35 #include "hmm/hmm_bo.h"
36
37 #include "atomisp_internal.h"
38 #include "asm/cacheflush.h"
39 #include "mmu/isp_mmu.h"
40 #include "mmu/sh_mmu_mrfld.h"
41
42 #ifdef USE_SSSE3
43 #include <asm/ssse3.h>
44 #endif
45
46 struct hmm_bo_device bo_device;
47 struct hmm_pool dynamic_pool;
48 struct hmm_pool reserved_pool;
49 static ia_css_ptr dummy_ptr;
50 struct _hmm_mem_stat hmm_mem_stat;
51
52 const char *hmm_bo_type_strings[HMM_BO_LAST] = {
53 "p", /* private */
54 "s", /* shared */
55 "u", /* user */
56 #ifdef CONFIG_ION
57 "i", /* ion */
58 #endif
59 };
60
61 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
62 char *buf, struct list_head *bo_list, bool active)
63 {
64 ssize_t ret = 0;
65 struct hmm_buffer_object *bo;
66 unsigned long flags;
67 int i;
68 long total[HMM_BO_LAST] = { 0 };
69 long count[HMM_BO_LAST] = { 0 };
70 int index1 = 0;
71 int index2 = 0;
72
73 ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
74 if (ret <= 0)
75 return 0;
76
77 index1 += ret;
78
79 spin_lock_irqsave(&bo_device.list_lock, flags);
80 list_for_each_entry(bo, bo_list, list) {
81 if ((active && (bo->status & HMM_BO_ALLOCED)) ||
82 (!active && !(bo->status & HMM_BO_ALLOCED))) {
83 ret = scnprintf(buf + index1, PAGE_SIZE - index1,
84 "%s %d\n",
85 hmm_bo_type_strings[bo->type], bo->pgnr);
86
87 total[bo->type] += bo->pgnr;
88 count[bo->type]++;
89 if (ret > 0)
90 index1 += ret;
91 }
92 }
93 spin_unlock_irqrestore(&bo_device.list_lock, flags);
94
95 for (i = 0; i < HMM_BO_LAST; i++) {
96 if (count[i]) {
97 ret = scnprintf(buf + index1 + index2,
98 PAGE_SIZE - index1 - index2,
99 "%ld %s buffer objects: %ld KB\n",
100 count[i], hmm_bo_type_strings[i], total[i] * 4);
101 if (ret > 0)
102 index2 += ret;
103 }
104 }
105
106 /* Add trailing zero, not included by scnprintf */
107 return index1 + index2 + 1;
108 }
109
110 static ssize_t active_bo_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113 {
114 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
115 }
116
117 static ssize_t free_bo_show(struct device *dev,
118 struct device_attribute *attr,
119 char *buf)
120 {
121 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
122 }
123
124 static ssize_t reserved_pool_show(struct device *dev,
125 struct device_attribute *attr,
126 char *buf)
127 {
128 ssize_t ret = 0;
129
130 struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
131 unsigned long flags;
132
133 if (!pinfo || !pinfo->initialized)
134 return 0;
135
136 spin_lock_irqsave(&pinfo->list_lock, flags);
137 ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
138 pinfo->index, pinfo->pgnr);
139 spin_unlock_irqrestore(&pinfo->list_lock, flags);
140
141 if (ret > 0)
142 ret++; /* Add trailing zero, not included by scnprintf */
143
144 return ret;
145 };
146
147 static ssize_t dynamic_pool_show(struct device *dev,
148 struct device_attribute *attr,
149 char *buf)
150 {
151 ssize_t ret = 0;
152
153 struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
154 unsigned long flags;
155
156 if (!pinfo || !pinfo->initialized)
157 return 0;
158
159 spin_lock_irqsave(&pinfo->list_lock, flags);
160 ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
161 pinfo->pgnr, pinfo->pool_size);
162 spin_unlock_irqrestore(&pinfo->list_lock, flags);
163
164 if (ret > 0)
165 ret++; /* Add trailing zero, not included by scnprintf */
166
167 return ret;
168 };
169
170 static DEVICE_ATTR(active_bo, S_IRUGO, active_bo_show, NULL);
171 static DEVICE_ATTR(free_bo, S_IRUGO, free_bo_show, NULL);
172 static DEVICE_ATTR(reserved_pool, S_IRUGO, reserved_pool_show, NULL);
173 static DEVICE_ATTR(dynamic_pool, S_IRUGO, dynamic_pool_show, NULL);
174
175 static struct attribute *sysfs_attrs_ctrl[] = {
176 &dev_attr_active_bo.attr,
177 &dev_attr_free_bo.attr,
178 &dev_attr_reserved_pool.attr,
179 &dev_attr_dynamic_pool.attr,
180 NULL
181 };
182
183 static struct attribute_group atomisp_attribute_group[] = {
184 {.attrs = sysfs_attrs_ctrl },
185 };
186
187 int hmm_init(void)
188 {
189 int ret;
190
191 ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
192 ISP_VM_START, ISP_VM_SIZE);
193 if (ret)
194 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
195
196 /*
197 * As hmm use NULL to indicate invalid ISP virtual address,
198 * and ISP_VM_START is defined to 0 too, so we allocate
199 * one piece of dummy memory, which should return value 0,
200 * at the beginning, to avoid hmm_alloc return 0 in the
201 * further allocation.
202 */
203 dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, 0, HMM_UNCACHED);
204
205 if (!ret) {
206 ret = sysfs_create_group(&atomisp_dev->kobj,
207 atomisp_attribute_group);
208 if (ret)
209 dev_err(atomisp_dev,
210 "%s Failed to create sysfs\n", __func__);
211 }
212
213 return ret;
214 }
215
216 void hmm_cleanup(void)
217 {
218 sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
219
220 /*
221 * free dummy memory first
222 */
223 hmm_free(dummy_ptr);
224 dummy_ptr = 0;
225
226 hmm_bo_device_exit(&bo_device);
227 }
228
229 ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
230 int from_highmem, void *userptr, bool cached)
231 {
232 unsigned int pgnr;
233 struct hmm_buffer_object *bo;
234 int ret;
235
236 /*Get page number from size*/
237 pgnr = size_to_pgnr_ceil(bytes);
238
239 /*Buffer object structure init*/
240 bo = hmm_bo_alloc(&bo_device, pgnr);
241 if (!bo) {
242 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
243 goto create_bo_err;
244 }
245
246 /*Allocate pages for memory*/
247 ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
248 if (ret) {
249 dev_err(atomisp_dev,
250 "hmm_bo_alloc_pages failed.\n");
251 goto alloc_page_err;
252 }
253
254 /*Combind the virtual address and pages togather*/
255 ret = hmm_bo_bind(bo);
256 if (ret) {
257 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
258 goto bind_err;
259 }
260
261 hmm_mem_stat.tol_cnt += pgnr;
262
263 return bo->start;
264
265 bind_err:
266 hmm_bo_free_pages(bo);
267 alloc_page_err:
268 hmm_bo_unref(bo);
269 create_bo_err:
270 return 0;
271 }
272
273 void hmm_free(ia_css_ptr virt)
274 {
275 struct hmm_buffer_object *bo;
276
277 bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
278
279 if (!bo) {
280 dev_err(atomisp_dev,
281 "can not find buffer object start with "
282 "address 0x%x\n", (unsigned int)virt);
283 return;
284 }
285
286 hmm_mem_stat.tol_cnt -= bo->pgnr;
287
288 hmm_bo_unbind(bo);
289
290 hmm_bo_free_pages(bo);
291
292 hmm_bo_unref(bo);
293 }
294
295 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
296 {
297 if (!bo) {
298 dev_err(atomisp_dev,
299 "can not find buffer object contains "
300 "address 0x%x\n", ptr);
301 return -EINVAL;
302 }
303
304 if (!hmm_bo_page_allocated(bo)) {
305 dev_err(atomisp_dev,
306 "buffer object has no page allocated.\n");
307 return -EINVAL;
308 }
309
310 if (!hmm_bo_allocated(bo)) {
311 dev_err(atomisp_dev,
312 "buffer object has no virtual address"
313 " space allocated.\n");
314 return -EINVAL;
315 }
316
317 return 0;
318 }
319
320 /*Read function in ISP memory management*/
321 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int bytes)
322 {
323 struct hmm_buffer_object *bo;
324 unsigned int idx, offset, len;
325 char *src, *des;
326 int ret;
327
328 bo = hmm_bo_device_search_in_range(&bo_device, virt);
329 ret = hmm_check_bo(bo, virt);
330 if (ret)
331 return ret;
332
333 des = (char *)data;
334 while (bytes) {
335 idx = (virt - bo->start) >> PAGE_SHIFT;
336 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
337
338 src = (char *)kmap(bo->page_obj[idx].page);
339 if (!src) {
340 dev_err(atomisp_dev,
341 "kmap buffer object page failed: "
342 "pg_idx = %d\n", idx);
343 return -EINVAL;
344 }
345
346 src += offset;
347
348 if ((bytes + offset) >= PAGE_SIZE) {
349 len = PAGE_SIZE - offset;
350 bytes -= len;
351 } else {
352 len = bytes;
353 bytes = 0;
354 }
355
356 virt += len; /* update virt for next loop */
357
358 if (des) {
359
360 #ifdef USE_SSSE3
361 _ssse3_memcpy(des, src, len);
362 #else
363 memcpy(des, src, len);
364 #endif
365 des += len;
366 }
367
368 clflush_cache_range(src, len);
369
370 kunmap(bo->page_obj[idx].page);
371 }
372
373 return 0;
374 }
375
376 /*Read function in ISP memory management*/
377 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
378 {
379 struct hmm_buffer_object *bo;
380 int ret;
381
382 bo = hmm_bo_device_search_in_range(&bo_device, virt);
383 ret = hmm_check_bo(bo, virt);
384 if (ret)
385 return ret;
386
387 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
388 void *src = bo->vmap_addr;
389
390 src += (virt - bo->start);
391 #ifdef USE_SSSE3
392 _ssse3_memcpy(data, src, bytes);
393 #else
394 memcpy(data, src, bytes);
395 #endif
396 if (bo->status & HMM_BO_VMAPED_CACHED)
397 clflush_cache_range(src, bytes);
398 } else {
399 void *vptr;
400
401 vptr = hmm_bo_vmap(bo, true);
402 if (!vptr)
403 return load_and_flush_by_kmap(virt, data, bytes);
404 else
405 vptr = vptr + (virt - bo->start);
406
407 #ifdef USE_SSSE3
408 _ssse3_memcpy(data, vptr, bytes);
409 #else
410 memcpy(data, vptr, bytes);
411 #endif
412 clflush_cache_range(vptr, bytes);
413 hmm_bo_vunmap(bo);
414 }
415
416 return 0;
417 }
418
419 /*Read function in ISP memory management*/
420 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
421 {
422 if (!data) {
423 dev_err(atomisp_dev,
424 "hmm_load NULL argument\n");
425 return -EINVAL;
426 }
427 return load_and_flush(virt, data, bytes);
428 }
429
430 /*Flush hmm data from the data cache*/
431 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
432 {
433 return load_and_flush(virt, NULL, bytes);
434 }
435
436 /*Write function in ISP memory management*/
437 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
438 {
439 struct hmm_buffer_object *bo;
440 unsigned int idx, offset, len;
441 char *src, *des;
442 int ret;
443
444 bo = hmm_bo_device_search_in_range(&bo_device, virt);
445 ret = hmm_check_bo(bo, virt);
446 if (ret)
447 return ret;
448
449 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
450 void *dst = bo->vmap_addr;
451
452 dst += (virt - bo->start);
453 #ifdef USE_SSSE3
454 _ssse3_memcpy(dst, data, bytes);
455 #else
456 memcpy(dst, data, bytes);
457 #endif
458 if (bo->status & HMM_BO_VMAPED_CACHED)
459 clflush_cache_range(dst, bytes);
460 } else {
461 void *vptr;
462
463 vptr = hmm_bo_vmap(bo, true);
464 if (vptr) {
465 vptr = vptr + (virt - bo->start);
466
467 #ifdef USE_SSSE3
468 _ssse3_memcpy(vptr, data, bytes);
469 #else
470 memcpy(vptr, data, bytes);
471 #endif
472 clflush_cache_range(vptr, bytes);
473 hmm_bo_vunmap(bo);
474 return 0;
475 }
476 }
477
478 src = (char *)data;
479 while (bytes) {
480 idx = (virt - bo->start) >> PAGE_SHIFT;
481 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
482
483 if (in_atomic())
484 des = (char *)kmap_atomic(bo->page_obj[idx].page);
485 else
486 des = (char *)kmap(bo->page_obj[idx].page);
487
488 if (!des) {
489 dev_err(atomisp_dev,
490 "kmap buffer object page failed: "
491 "pg_idx = %d\n", idx);
492 return -EINVAL;
493 }
494
495 des += offset;
496
497 if ((bytes + offset) >= PAGE_SIZE) {
498 len = PAGE_SIZE - offset;
499 bytes -= len;
500 } else {
501 len = bytes;
502 bytes = 0;
503 }
504
505 virt += len;
506
507 #ifdef USE_SSSE3
508 _ssse3_memcpy(des, src, len);
509 #else
510 memcpy(des, src, len);
511 #endif
512 src += len;
513
514 clflush_cache_range(des, len);
515
516 if (in_atomic())
517 /*
518 * Note: kunmap_atomic requires return addr from
519 * kmap_atomic, not the page. See linux/highmem.h
520 */
521 kunmap_atomic(des - offset);
522 else
523 kunmap(bo->page_obj[idx].page);
524 }
525
526 return 0;
527 }
528
529 /*memset function in ISP memory management*/
530 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
531 {
532 struct hmm_buffer_object *bo;
533 unsigned int idx, offset, len;
534 char *des;
535 int ret;
536
537 bo = hmm_bo_device_search_in_range(&bo_device, virt);
538 ret = hmm_check_bo(bo, virt);
539 if (ret)
540 return ret;
541
542 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
543 void *dst = bo->vmap_addr;
544
545 dst += (virt - bo->start);
546 memset(dst, c, bytes);
547
548 if (bo->status & HMM_BO_VMAPED_CACHED)
549 clflush_cache_range(dst, bytes);
550 } else {
551 void *vptr;
552
553 vptr = hmm_bo_vmap(bo, true);
554 if (vptr) {
555 vptr = vptr + (virt - bo->start);
556 memset((void *)vptr, c, bytes);
557 clflush_cache_range(vptr, bytes);
558 hmm_bo_vunmap(bo);
559 return 0;
560 }
561 }
562
563 while (bytes) {
564 idx = (virt - bo->start) >> PAGE_SHIFT;
565 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
566
567 des = (char *)kmap(bo->page_obj[idx].page);
568 if (!des) {
569 dev_err(atomisp_dev,
570 "kmap buffer object page failed: "
571 "pg_idx = %d\n", idx);
572 return -EINVAL;
573 }
574 des += offset;
575
576 if ((bytes + offset) >= PAGE_SIZE) {
577 len = PAGE_SIZE - offset;
578 bytes -= len;
579 } else {
580 len = bytes;
581 bytes = 0;
582 }
583
584 virt += len;
585
586 memset(des, c, len);
587
588 clflush_cache_range(des, len);
589
590 kunmap(bo->page_obj[idx].page);
591 }
592
593 return 0;
594 }
595
596 /*Virtual address to physical address convert*/
597 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
598 {
599 unsigned int idx, offset;
600 struct hmm_buffer_object *bo;
601
602 bo = hmm_bo_device_search_in_range(&bo_device, virt);
603 if (!bo) {
604 dev_err(atomisp_dev,
605 "can not find buffer object contains address 0x%x\n",
606 virt);
607 return -1;
608 }
609
610 idx = (virt - bo->start) >> PAGE_SHIFT;
611 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
612
613 return page_to_phys(bo->page_obj[idx].page) + offset;
614 }
615
616 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
617 {
618 struct hmm_buffer_object *bo;
619
620 bo = hmm_bo_device_search_start(&bo_device, virt);
621 if (!bo) {
622 dev_err(atomisp_dev,
623 "can not find buffer object start with address 0x%x\n",
624 virt);
625 return -EINVAL;
626 }
627
628 return hmm_bo_mmap(vma, bo);
629 }
630
631 /*Map ISP virtual address into IA virtual address*/
632 void *hmm_vmap(ia_css_ptr virt, bool cached)
633 {
634 struct hmm_buffer_object *bo;
635 void *ptr;
636
637 bo = hmm_bo_device_search_in_range(&bo_device, virt);
638 if (!bo) {
639 dev_err(atomisp_dev,
640 "can not find buffer object contains address 0x%x\n",
641 virt);
642 return NULL;
643 }
644
645 ptr = hmm_bo_vmap(bo, cached);
646 if (ptr)
647 return ptr + (virt - bo->start);
648 else
649 return NULL;
650 }
651
652 /* Flush the memory which is mapped as cached memory through hmm_vmap */
653 void hmm_flush_vmap(ia_css_ptr virt)
654 {
655 struct hmm_buffer_object *bo;
656
657 bo = hmm_bo_device_search_in_range(&bo_device, virt);
658 if (!bo) {
659 dev_warn(atomisp_dev,
660 "can not find buffer object contains address 0x%x\n",
661 virt);
662 return;
663 }
664
665 hmm_bo_flush_vmap(bo);
666 }
667
668 void hmm_vunmap(ia_css_ptr virt)
669 {
670 struct hmm_buffer_object *bo;
671
672 bo = hmm_bo_device_search_in_range(&bo_device, virt);
673 if (!bo) {
674 dev_warn(atomisp_dev,
675 "can not find buffer object contains address 0x%x\n",
676 virt);
677 return;
678 }
679
680 return hmm_bo_vunmap(bo);
681 }
682
683 int hmm_pool_register(unsigned int pool_size,
684 enum hmm_pool_type pool_type)
685 {
686 switch (pool_type) {
687 case HMM_POOL_TYPE_RESERVED:
688 reserved_pool.pops = &reserved_pops;
689 return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
690 pool_size);
691 case HMM_POOL_TYPE_DYNAMIC:
692 dynamic_pool.pops = &dynamic_pops;
693 return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
694 pool_size);
695 default:
696 dev_err(atomisp_dev, "invalid pool type.\n");
697 return -EINVAL;
698 }
699 }
700
701 void hmm_pool_unregister(enum hmm_pool_type pool_type)
702 {
703 switch (pool_type) {
704 case HMM_POOL_TYPE_RESERVED:
705 if (reserved_pool.pops && reserved_pool.pops->pool_exit)
706 reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
707 break;
708 case HMM_POOL_TYPE_DYNAMIC:
709 if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
710 dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
711 break;
712 default:
713 dev_err(atomisp_dev, "invalid pool type.\n");
714 break;
715 }
716
717 return;
718 }
719
720 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
721 {
722 return hmm_vmap(ptr, cached);
723 /* vmunmap will be done in hmm_bo_release() */
724 }
725
726 ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
727 {
728 struct hmm_buffer_object *bo;
729
730 bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
731 if (bo)
732 return bo->start;
733
734 dev_err(atomisp_dev,
735 "can not find buffer object whose kernel virtual address is %p\n",
736 ptr);
737 return 0;
738 }
739
740 void hmm_show_mem_stat(const char *func, const int line)
741 {
742 trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
743 hmm_mem_stat.tol_cnt,
744 hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
745 hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
746 hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
747 }
748
749 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
750 {
751 hmm_mem_stat.res_size = res_pgnr;
752 /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
753 if (0 == hmm_mem_stat.res_size) {
754 hmm_mem_stat.res_size = -1;
755 hmm_mem_stat.res_cnt = -1;
756 }
757
758 /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
759 if (!dyc_en) {
760 hmm_mem_stat.dyc_size = -1;
761 hmm_mem_stat.dyc_thr = -1;
762 } else {
763 hmm_mem_stat.dyc_size = 0;
764 hmm_mem_stat.dyc_thr = dyc_pgnr;
765 }
766 hmm_mem_stat.usr_size = 0;
767 hmm_mem_stat.sys_size = 0;
768 hmm_mem_stat.tol_cnt = 0;
769 }