]> git.proxmox.com Git - rustc.git/blob - src/jemalloc/test/unit/pack.c
New upstream version 1.22.1+dfsg1
[rustc.git] / src / jemalloc / test / unit / pack.c
1 #include "test/jemalloc_test.h"
2
3 /*
4 * Size class that is a divisor of the page size, ideally 4+ regions per run.
5 */
6 #if LG_PAGE <= 14
7 #define SZ (ZU(1) << (LG_PAGE - 2))
8 #else
9 #define SZ 4096
10 #endif
11
12 /*
13 * Number of chunks to consume at high water mark. Should be at least 2 so that
14 * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
15 * tested.
16 */
17 #define NCHUNKS 8
18
19 static unsigned
20 binind_compute(void)
21 {
22 size_t sz;
23 unsigned nbins, i;
24
25 sz = sizeof(nbins);
26 assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
27 "Unexpected mallctl failure");
28
29 for (i = 0; i < nbins; i++) {
30 size_t mib[4];
31 size_t miblen = sizeof(mib)/sizeof(size_t);
32 size_t size;
33
34 assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
35 &miblen), 0, "Unexpected mallctlnametomb failure");
36 mib[2] = (size_t)i;
37
38 sz = sizeof(size);
39 assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
40 0), 0, "Unexpected mallctlbymib failure");
41 if (size == SZ)
42 return (i);
43 }
44
45 test_fail("Unable to compute nregs_per_run");
46 return (0);
47 }
48
49 static size_t
50 nregs_per_run_compute(void)
51 {
52 uint32_t nregs;
53 size_t sz;
54 unsigned binind = binind_compute();
55 size_t mib[4];
56 size_t miblen = sizeof(mib)/sizeof(size_t);
57
58 assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
59 "Unexpected mallctlnametomb failure");
60 mib[2] = (size_t)binind;
61 sz = sizeof(nregs);
62 assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
63 0), 0, "Unexpected mallctlbymib failure");
64 return (nregs);
65 }
66
67 static size_t
68 npages_per_run_compute(void)
69 {
70 size_t sz;
71 unsigned binind = binind_compute();
72 size_t mib[4];
73 size_t miblen = sizeof(mib)/sizeof(size_t);
74 size_t run_size;
75
76 assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
77 "Unexpected mallctlnametomb failure");
78 mib[2] = (size_t)binind;
79 sz = sizeof(run_size);
80 assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL,
81 0), 0, "Unexpected mallctlbymib failure");
82 return (run_size >> LG_PAGE);
83 }
84
85 static size_t
86 npages_per_chunk_compute(void)
87 {
88
89 return ((chunksize >> LG_PAGE) - map_bias);
90 }
91
92 static size_t
93 nruns_per_chunk_compute(void)
94 {
95
96 return (npages_per_chunk_compute() / npages_per_run_compute());
97 }
98
99 static unsigned
100 arenas_extend_mallctl(void)
101 {
102 unsigned arena_ind;
103 size_t sz;
104
105 sz = sizeof(arena_ind);
106 assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
107 0, "Error in arenas.extend");
108
109 return (arena_ind);
110 }
111
112 static void
113 arena_reset_mallctl(unsigned arena_ind)
114 {
115 size_t mib[3];
116 size_t miblen = sizeof(mib)/sizeof(size_t);
117
118 assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
119 "Unexpected mallctlnametomib() failure");
120 mib[1] = (size_t)arena_ind;
121 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
122 "Unexpected mallctlbymib() failure");
123 }
124
125 TEST_BEGIN(test_pack)
126 {
127 unsigned arena_ind = arenas_extend_mallctl();
128 size_t nregs_per_run = nregs_per_run_compute();
129 size_t nruns_per_chunk = nruns_per_chunk_compute();
130 size_t nruns = nruns_per_chunk * NCHUNKS;
131 size_t nregs = nregs_per_run * nruns;
132 VARIABLE_ARRAY(void *, ptrs, nregs);
133 size_t i, j, offset;
134
135 /* Fill matrix. */
136 for (i = offset = 0; i < nruns; i++) {
137 for (j = 0; j < nregs_per_run; j++) {
138 void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
139 MALLOCX_TCACHE_NONE);
140 assert_ptr_not_null(p,
141 "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
142 " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
143 SZ, arena_ind, i, j);
144 ptrs[(i * nregs_per_run) + j] = p;
145 }
146 }
147
148 /*
149 * Free all but one region of each run, but rotate which region is
150 * preserved, so that subsequent allocations exercise the within-run
151 * layout policy.
152 */
153 offset = 0;
154 for (i = offset = 0;
155 i < nruns;
156 i++, offset = (offset + 1) % nregs_per_run) {
157 for (j = 0; j < nregs_per_run; j++) {
158 void *p = ptrs[(i * nregs_per_run) + j];
159 if (offset == j)
160 continue;
161 dallocx(p, MALLOCX_ARENA(arena_ind) |
162 MALLOCX_TCACHE_NONE);
163 }
164 }
165
166 /*
167 * Logically refill matrix, skipping preserved regions and verifying
168 * that the matrix is unmodified.
169 */
170 offset = 0;
171 for (i = offset = 0;
172 i < nruns;
173 i++, offset = (offset + 1) % nregs_per_run) {
174 for (j = 0; j < nregs_per_run; j++) {
175 void *p;
176
177 if (offset == j)
178 continue;
179 p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
180 MALLOCX_TCACHE_NONE);
181 assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
182 "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
183 i, j);
184 }
185 }
186
187 /* Clean up. */
188 arena_reset_mallctl(arena_ind);
189 }
190 TEST_END
191
192 int
193 main(void)
194 {
195
196 return (test(
197 test_pack));
198 }