]>
Commit | Line | Data |
---|---|---|
8fa3ed80 DZ |
1 | #ifndef _MM_PERCPU_INTERNAL_H |
2 | #define _MM_PERCPU_INTERNAL_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
6 | ||
7 | struct pcpu_chunk { | |
30a5b536 DZ |
8 | #ifdef CONFIG_PERCPU_STATS |
9 | int nr_alloc; /* # of allocations */ | |
10 | size_t max_alloc_size; /* largest allocation size */ | |
11 | #endif | |
12 | ||
8fa3ed80 DZ |
13 | struct list_head list; /* linked to pcpu_slot lists */ |
14 | int free_size; /* free bytes in the chunk */ | |
15 | int contig_hint; /* max contiguous size hint */ | |
16 | void *base_addr; /* base address of this chunk */ | |
17 | ||
18 | int map_used; /* # of map entries used before the sentry */ | |
19 | int map_alloc; /* # of map entries allocated */ | |
20 | int *map; /* allocation map */ | |
21 | struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ | |
22 | ||
23 | void *data; /* chunk data */ | |
24 | int first_free; /* no free below this */ | |
25 | bool immutable; /* no [de]population allowed */ | |
e2266705 DZF |
26 | int start_offset; /* the overlap with the previous |
27 | region to have a page aligned | |
28 | base_addr */ | |
6b9d7c8e DZF |
29 | int end_offset; /* additional area required to |
30 | have the region end page | |
31 | aligned */ | |
c0ebfdc3 DZF |
32 | |
33 | int nr_pages; /* # of pages served by this chunk */ | |
8fa3ed80 DZ |
34 | int nr_populated; /* # of populated pages */ |
35 | unsigned long populated[]; /* populated bitmap */ | |
36 | }; | |
37 | ||
38 | extern spinlock_t pcpu_lock; | |
39 | ||
40 | extern struct list_head *pcpu_slot; | |
41 | extern int pcpu_nr_slots; | |
6b9b6f39 | 42 | extern int pcpu_nr_empty_pop_pages; |
8fa3ed80 DZ |
43 | |
44 | extern struct pcpu_chunk *pcpu_first_chunk; | |
45 | extern struct pcpu_chunk *pcpu_reserved_chunk; | |
46 | ||
30a5b536 DZ |
47 | #ifdef CONFIG_PERCPU_STATS |
48 | ||
49 | #include <linux/spinlock.h> | |
50 | ||
51 | struct percpu_stats { | |
52 | u64 nr_alloc; /* lifetime # of allocations */ | |
53 | u64 nr_dealloc; /* lifetime # of deallocations */ | |
54 | u64 nr_cur_alloc; /* current # of allocations */ | |
55 | u64 nr_max_alloc; /* max # of live allocations */ | |
56 | u32 nr_chunks; /* current # of live chunks */ | |
57 | u32 nr_max_chunks; /* max # of live chunks */ | |
58 | size_t min_alloc_size; /* min allocaiton size */ | |
59 | size_t max_alloc_size; /* max allocation size */ | |
60 | }; | |
61 | ||
62 | extern struct percpu_stats pcpu_stats; | |
63 | extern struct pcpu_alloc_info pcpu_stats_ai; | |
64 | ||
65 | /* | |
66 | * For debug purposes. We don't care about the flexible array. | |
67 | */ | |
68 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
69 | { | |
70 | memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info)); | |
71 | ||
72 | /* initialize min_alloc_size to unit_size */ | |
73 | pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size; | |
74 | } | |
75 | ||
76 | /* | |
77 | * pcpu_stats_area_alloc - increment area allocation stats | |
78 | * @chunk: the location of the area being allocated | |
79 | * @size: size of area to allocate in bytes | |
80 | * | |
81 | * CONTEXT: | |
82 | * pcpu_lock. | |
83 | */ | |
84 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
85 | { | |
86 | lockdep_assert_held(&pcpu_lock); | |
87 | ||
88 | pcpu_stats.nr_alloc++; | |
89 | pcpu_stats.nr_cur_alloc++; | |
90 | pcpu_stats.nr_max_alloc = | |
91 | max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); | |
92 | pcpu_stats.min_alloc_size = | |
93 | min(pcpu_stats.min_alloc_size, size); | |
94 | pcpu_stats.max_alloc_size = | |
95 | max(pcpu_stats.max_alloc_size, size); | |
96 | ||
97 | chunk->nr_alloc++; | |
98 | chunk->max_alloc_size = max(chunk->max_alloc_size, size); | |
99 | } | |
100 | ||
101 | /* | |
102 | * pcpu_stats_area_dealloc - decrement allocation stats | |
103 | * @chunk: the location of the area being deallocated | |
104 | * | |
105 | * CONTEXT: | |
106 | * pcpu_lock. | |
107 | */ | |
108 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
109 | { | |
110 | lockdep_assert_held(&pcpu_lock); | |
111 | ||
112 | pcpu_stats.nr_dealloc++; | |
113 | pcpu_stats.nr_cur_alloc--; | |
114 | ||
115 | chunk->nr_alloc--; | |
116 | } | |
117 | ||
118 | /* | |
119 | * pcpu_stats_chunk_alloc - increment chunk stats | |
120 | */ | |
121 | static inline void pcpu_stats_chunk_alloc(void) | |
122 | { | |
303abfdf DZ |
123 | unsigned long flags; |
124 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
125 | |
126 | pcpu_stats.nr_chunks++; | |
127 | pcpu_stats.nr_max_chunks = | |
128 | max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); | |
129 | ||
303abfdf | 130 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
131 | } |
132 | ||
133 | /* | |
134 | * pcpu_stats_chunk_dealloc - decrement chunk stats | |
135 | */ | |
136 | static inline void pcpu_stats_chunk_dealloc(void) | |
137 | { | |
303abfdf DZ |
138 | unsigned long flags; |
139 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
140 | |
141 | pcpu_stats.nr_chunks--; | |
142 | ||
303abfdf | 143 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
144 | } |
145 | ||
146 | #else | |
147 | ||
148 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
149 | { | |
150 | } | |
151 | ||
152 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
153 | { | |
154 | } | |
155 | ||
156 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
157 | { | |
158 | } | |
159 | ||
160 | static inline void pcpu_stats_chunk_alloc(void) | |
161 | { | |
162 | } | |
163 | ||
164 | static inline void pcpu_stats_chunk_dealloc(void) | |
165 | { | |
166 | } | |
167 | ||
168 | #endif /* !CONFIG_PERCPU_STATS */ | |
169 | ||
8fa3ed80 | 170 | #endif |