]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - mm/percpu-internal.h
percpu: generalize bitmap (un)populated iterators
[mirror_ubuntu-focal-kernel.git] / mm / percpu-internal.h
CommitLineData
8fa3ed80
DZ
1#ifndef _MM_PERCPU_INTERNAL_H
2#define _MM_PERCPU_INTERNAL_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6
7struct pcpu_chunk {
30a5b536
DZ
8#ifdef CONFIG_PERCPU_STATS
9 int nr_alloc; /* # of allocations */
10 size_t max_alloc_size; /* largest allocation size */
11#endif
12
8fa3ed80
DZ
13 struct list_head list; /* linked to pcpu_slot lists */
14 int free_size; /* free bytes in the chunk */
15 int contig_hint; /* max contiguous size hint */
16 void *base_addr; /* base address of this chunk */
17
18 int map_used; /* # of map entries used before the sentry */
19 int map_alloc; /* # of map entries allocated */
20 int *map; /* allocation map */
21 struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
22
23 void *data; /* chunk data */
24 int first_free; /* no free below this */
25 bool immutable; /* no [de]population allowed */
e2266705
DZF
26 int start_offset; /* the overlap with the previous
27 region to have a page aligned
28 base_addr */
6b9d7c8e
DZF
29 int end_offset; /* additional area required to
30 have the region end page
31 aligned */
c0ebfdc3
DZF
32
33 int nr_pages; /* # of pages served by this chunk */
8fa3ed80 34 int nr_populated; /* # of populated pages */
0cecf50c 35 int nr_empty_pop_pages; /* # of empty populated pages */
8fa3ed80
DZ
36 unsigned long populated[]; /* populated bitmap */
37};
38
39extern spinlock_t pcpu_lock;
40
41extern struct list_head *pcpu_slot;
42extern int pcpu_nr_slots;
6b9b6f39 43extern int pcpu_nr_empty_pop_pages;
8fa3ed80
DZ
44
45extern struct pcpu_chunk *pcpu_first_chunk;
46extern struct pcpu_chunk *pcpu_reserved_chunk;
47
30a5b536
DZ
48#ifdef CONFIG_PERCPU_STATS
49
50#include <linux/spinlock.h>
51
52struct percpu_stats {
53 u64 nr_alloc; /* lifetime # of allocations */
54 u64 nr_dealloc; /* lifetime # of deallocations */
55 u64 nr_cur_alloc; /* current # of allocations */
56 u64 nr_max_alloc; /* max # of live allocations */
57 u32 nr_chunks; /* current # of live chunks */
58 u32 nr_max_chunks; /* max # of live chunks */
59 size_t min_alloc_size; /* min allocaiton size */
60 size_t max_alloc_size; /* max allocation size */
61};
62
63extern struct percpu_stats pcpu_stats;
64extern struct pcpu_alloc_info pcpu_stats_ai;
65
66/*
67 * For debug purposes. We don't care about the flexible array.
68 */
69static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
70{
71 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
72
73 /* initialize min_alloc_size to unit_size */
74 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
75}
76
77/*
78 * pcpu_stats_area_alloc - increment area allocation stats
79 * @chunk: the location of the area being allocated
80 * @size: size of area to allocate in bytes
81 *
82 * CONTEXT:
83 * pcpu_lock.
84 */
85static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
86{
87 lockdep_assert_held(&pcpu_lock);
88
89 pcpu_stats.nr_alloc++;
90 pcpu_stats.nr_cur_alloc++;
91 pcpu_stats.nr_max_alloc =
92 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
93 pcpu_stats.min_alloc_size =
94 min(pcpu_stats.min_alloc_size, size);
95 pcpu_stats.max_alloc_size =
96 max(pcpu_stats.max_alloc_size, size);
97
98 chunk->nr_alloc++;
99 chunk->max_alloc_size = max(chunk->max_alloc_size, size);
100}
101
102/*
103 * pcpu_stats_area_dealloc - decrement allocation stats
104 * @chunk: the location of the area being deallocated
105 *
106 * CONTEXT:
107 * pcpu_lock.
108 */
109static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
110{
111 lockdep_assert_held(&pcpu_lock);
112
113 pcpu_stats.nr_dealloc++;
114 pcpu_stats.nr_cur_alloc--;
115
116 chunk->nr_alloc--;
117}
118
119/*
120 * pcpu_stats_chunk_alloc - increment chunk stats
121 */
122static inline void pcpu_stats_chunk_alloc(void)
123{
303abfdf
DZ
124 unsigned long flags;
125 spin_lock_irqsave(&pcpu_lock, flags);
30a5b536
DZ
126
127 pcpu_stats.nr_chunks++;
128 pcpu_stats.nr_max_chunks =
129 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
130
303abfdf 131 spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b536
DZ
132}
133
134/*
135 * pcpu_stats_chunk_dealloc - decrement chunk stats
136 */
137static inline void pcpu_stats_chunk_dealloc(void)
138{
303abfdf
DZ
139 unsigned long flags;
140 spin_lock_irqsave(&pcpu_lock, flags);
30a5b536
DZ
141
142 pcpu_stats.nr_chunks--;
143
303abfdf 144 spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b536
DZ
145}
146
147#else
148
149static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
150{
151}
152
153static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
154{
155}
156
157static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
158{
159}
160
161static inline void pcpu_stats_chunk_alloc(void)
162{
163}
164
165static inline void pcpu_stats_chunk_dealloc(void)
166{
167}
168
169#endif /* !CONFIG_PERCPU_STATS */
170
8fa3ed80 171#endif