]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/percpu-internal.h
percpu: replace area map allocator with bitmap
[mirror_ubuntu-bionic-kernel.git] / mm / percpu-internal.h
CommitLineData
8fa3ed80
DZ
1#ifndef _MM_PERCPU_INTERNAL_H
2#define _MM_PERCPU_INTERNAL_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6
7struct pcpu_chunk {
30a5b536
DZ
8#ifdef CONFIG_PERCPU_STATS
9 int nr_alloc; /* # of allocations */
10 size_t max_alloc_size; /* largest allocation size */
11#endif
12
8fa3ed80 13 struct list_head list; /* linked to pcpu_slot lists */
40064aec
DZF
14 int free_bytes; /* free bytes in the chunk */
15 int contig_bits; /* max contiguous size hint */
8fa3ed80
DZ
16 void *base_addr; /* base address of this chunk */
17
40064aec
DZF
18 unsigned long *alloc_map; /* allocation map */
19 unsigned long *bound_map; /* boundary map */
8fa3ed80
DZ
20
21 void *data; /* chunk data */
22 int first_free; /* no free below this */
23 bool immutable; /* no [de]population allowed */
e2266705
DZF
24 int start_offset; /* the overlap with the previous
25 region to have a page aligned
26 base_addr */
6b9d7c8e
DZF
27 int end_offset; /* additional area required to
28 have the region end page
29 aligned */
c0ebfdc3
DZF
30
31 int nr_pages; /* # of pages served by this chunk */
8fa3ed80 32 int nr_populated; /* # of populated pages */
0cecf50c 33 int nr_empty_pop_pages; /* # of empty populated pages */
8fa3ed80
DZ
34 unsigned long populated[]; /* populated bitmap */
35};
36
37extern spinlock_t pcpu_lock;
38
39extern struct list_head *pcpu_slot;
40extern int pcpu_nr_slots;
6b9b6f39 41extern int pcpu_nr_empty_pop_pages;
8fa3ed80
DZ
42
43extern struct pcpu_chunk *pcpu_first_chunk;
44extern struct pcpu_chunk *pcpu_reserved_chunk;
45
40064aec
DZF
46/**
47 * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
48 * @pages: number of physical pages
49 *
50 * This conversion is from physical pages to the number of bits
51 * required in the bitmap.
52 */
53static inline int pcpu_nr_pages_to_map_bits(int pages)
54{
55 return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
56}
57
58/**
59 * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
60 * @chunk: chunk of interest
61 *
62 * This conversion is from the number of physical pages that the chunk
63 * serves to the number of bits in the bitmap.
64 */
65static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
66{
67 return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
68}
69
30a5b536
DZ
70#ifdef CONFIG_PERCPU_STATS
71
72#include <linux/spinlock.h>
73
74struct percpu_stats {
75 u64 nr_alloc; /* lifetime # of allocations */
76 u64 nr_dealloc; /* lifetime # of deallocations */
77 u64 nr_cur_alloc; /* current # of allocations */
78 u64 nr_max_alloc; /* max # of live allocations */
79 u32 nr_chunks; /* current # of live chunks */
80 u32 nr_max_chunks; /* max # of live chunks */
81 size_t min_alloc_size; /* min allocaiton size */
82 size_t max_alloc_size; /* max allocation size */
83};
84
85extern struct percpu_stats pcpu_stats;
86extern struct pcpu_alloc_info pcpu_stats_ai;
87
88/*
89 * For debug purposes. We don't care about the flexible array.
90 */
91static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
92{
93 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
94
95 /* initialize min_alloc_size to unit_size */
96 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
97}
98
99/*
100 * pcpu_stats_area_alloc - increment area allocation stats
101 * @chunk: the location of the area being allocated
102 * @size: size of area to allocate in bytes
103 *
104 * CONTEXT:
105 * pcpu_lock.
106 */
107static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
108{
109 lockdep_assert_held(&pcpu_lock);
110
111 pcpu_stats.nr_alloc++;
112 pcpu_stats.nr_cur_alloc++;
113 pcpu_stats.nr_max_alloc =
114 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
115 pcpu_stats.min_alloc_size =
116 min(pcpu_stats.min_alloc_size, size);
117 pcpu_stats.max_alloc_size =
118 max(pcpu_stats.max_alloc_size, size);
119
120 chunk->nr_alloc++;
121 chunk->max_alloc_size = max(chunk->max_alloc_size, size);
122}
123
124/*
125 * pcpu_stats_area_dealloc - decrement allocation stats
126 * @chunk: the location of the area being deallocated
127 *
128 * CONTEXT:
129 * pcpu_lock.
130 */
131static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
132{
133 lockdep_assert_held(&pcpu_lock);
134
135 pcpu_stats.nr_dealloc++;
136 pcpu_stats.nr_cur_alloc--;
137
138 chunk->nr_alloc--;
139}
140
141/*
142 * pcpu_stats_chunk_alloc - increment chunk stats
143 */
144static inline void pcpu_stats_chunk_alloc(void)
145{
303abfdf
DZ
146 unsigned long flags;
147 spin_lock_irqsave(&pcpu_lock, flags);
30a5b536
DZ
148
149 pcpu_stats.nr_chunks++;
150 pcpu_stats.nr_max_chunks =
151 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
152
303abfdf 153 spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b536
DZ
154}
155
156/*
157 * pcpu_stats_chunk_dealloc - decrement chunk stats
158 */
159static inline void pcpu_stats_chunk_dealloc(void)
160{
303abfdf
DZ
161 unsigned long flags;
162 spin_lock_irqsave(&pcpu_lock, flags);
30a5b536
DZ
163
164 pcpu_stats.nr_chunks--;
165
303abfdf 166 spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b536
DZ
167}
168
169#else
170
171static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
172{
173}
174
175static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
176{
177}
178
179static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
180{
181}
182
183static inline void pcpu_stats_chunk_alloc(void)
184{
185}
186
187static inline void pcpu_stats_chunk_dealloc(void)
188{
189}
190
191#endif /* !CONFIG_PERCPU_STATS */
192
8fa3ed80 193#endif