]> git.proxmox.com Git - ceph.git/blob - ceph/src/msg/xio/XioPool.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / msg / xio / XioPool.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2014 CohortFS, LLC
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14 #ifndef XIO_POOL_H
15 #define XIO_POOL_H
16
17 extern "C" {
18 #include <stdlib.h>
19 #include <string.h>
20 #include <stdint.h>
21 #include "libxio.h"
22 }
23 #include <vector>
24 #include "include/atomic.h"
25 #include "common/likely.h"
26
27
28 static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
29 struct xio_reg_mem* mp);
30 static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp);
31
32 using ceph::atomic_t;
33
34 class XioPool
35 {
36 private:
37 struct xio_mempool *handle;
38
39 public:
40 static bool trace_mempool;
41 static bool trace_msgcnt;
42 static const int MB = 8;
43
44 struct xio_piece {
45 struct xio_reg_mem mp[1];
46 struct xio_piece *next;
47 int s;
48 char payload[MB];
49 } *first;
50
51 explicit XioPool(struct xio_mempool *_handle) :
52 handle(_handle), first(0)
53 {
54 }
55 ~XioPool()
56 {
57 struct xio_piece *p;
58 while ((p = first)) {
59 first = p->next;
60 if (unlikely(trace_mempool)) {
61 memset(p->payload, 0xcf, p->s); // guard bytes
62 }
63 xpool_free(sizeof(struct xio_piece)+(p->s)-MB, p->mp);
64 }
65 }
66 void *alloc(size_t _s)
67 {
68 void *r;
69 struct xio_reg_mem mp[1];
70 struct xio_piece *x;
71 int e = xpool_alloc(handle, (sizeof(struct xio_piece)-MB) + _s, mp);
72 if (e) {
73 r = 0;
74 } else {
75 x = reinterpret_cast<struct xio_piece *>(mp->addr);
76 *x->mp = *mp;
77 x->next = first;
78 x->s = _s;
79 first = x;
80 r = x->payload;
81 }
82 return r;
83 }
84 };
85
86 class XioPoolStats {
87 private:
88 enum pool_sizes {
89 SLAB_64 = 0,
90 SLAB_256,
91 SLAB_1024,
92 SLAB_PAGE,
93 SLAB_MAX,
94 SLAB_OVERFLOW,
95 NUM_SLABS,
96 };
97
98 atomic_t ctr_set[NUM_SLABS];
99
100 atomic_t msg_cnt; // send msgs
101 atomic_t hook_cnt; // recv msgs
102
103 public:
104 XioPoolStats() : msg_cnt(0), hook_cnt(0) {
105 for (int ix = 0; ix < NUM_SLABS; ++ix) {
106 ctr_set[ix].set(0);
107 }
108 }
109
110 void dump(const char* tag, uint64_t serial);
111
112 void inc(uint64_t size) {
113 if (size <= 64) {
114 (ctr_set[SLAB_64]).inc();
115 return;
116 }
117 if (size <= 256) {
118 (ctr_set[SLAB_256]).inc();
119 return;
120 }
121 if (size <= 1024) {
122 (ctr_set[SLAB_1024]).inc();
123 return;
124 }
125 if (size <= 8192) {
126 (ctr_set[SLAB_PAGE]).inc();
127 return;
128 }
129 (ctr_set[SLAB_MAX]).inc();
130 }
131
132 void dec(uint64_t size) {
133 if (size <= 64) {
134 (ctr_set[SLAB_64]).dec();
135 return;
136 }
137 if (size <= 256) {
138 (ctr_set[SLAB_256]).dec();
139 return;
140 }
141 if (size <= 1024) {
142 (ctr_set[SLAB_1024]).dec();
143 return;
144 }
145 if (size <= 8192) {
146 (ctr_set[SLAB_PAGE]).dec();
147 return;
148 }
149 (ctr_set[SLAB_MAX]).dec();
150 }
151
152 void inc_overflow() { ctr_set[SLAB_OVERFLOW].inc(); }
153 void dec_overflow() { ctr_set[SLAB_OVERFLOW].dec(); }
154
155 void inc_msgcnt() {
156 if (unlikely(XioPool::trace_msgcnt)) {
157 msg_cnt.inc();
158 }
159 }
160
161 void dec_msgcnt() {
162 if (unlikely(XioPool::trace_msgcnt)) {
163 msg_cnt.dec();
164 }
165 }
166
167 void inc_hookcnt() {
168 if (unlikely(XioPool::trace_msgcnt)) {
169 hook_cnt.inc();
170 }
171 }
172
173 void dec_hookcnt() {
174 if (unlikely(XioPool::trace_msgcnt)) {
175 hook_cnt.dec();
176 }
177 }
178 };
179
180 extern XioPoolStats xp_stats;
181
182 static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
183 struct xio_reg_mem* mp)
184 {
185 // try to allocate from the xio pool
186 int r = xio_mempool_alloc(pool, size, mp);
187 if (r == 0) {
188 if (unlikely(XioPool::trace_mempool))
189 xp_stats.inc(size);
190 return 0;
191 }
192 // fall back to malloc on errors
193 mp->addr = malloc(size);
194 assert(mp->addr);
195 mp->length = 0;
196 if (unlikely(XioPool::trace_mempool))
197 xp_stats.inc_overflow();
198 return 0;
199 }
200
201 static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp)
202 {
203 if (mp->length) {
204 if (unlikely(XioPool::trace_mempool))
205 xp_stats.dec(size);
206 xio_mempool_free(mp);
207 } else { // from malloc
208 if (unlikely(XioPool::trace_mempool))
209 xp_stats.dec_overflow();
210 free(mp->addr);
211 }
212 }
213
214 #define xpool_inc_msgcnt() \
215 do { xp_stats.inc_msgcnt(); } while (0)
216
217 #define xpool_dec_msgcnt() \
218 do { xp_stats.dec_msgcnt(); } while (0)
219
220 #define xpool_inc_hookcnt() \
221 do { xp_stats.inc_hookcnt(); } while (0)
222
223 #define xpool_dec_hookcnt() \
224 do { xp_stats.dec_hookcnt(); } while (0)
225
226 #endif /* XIO_POOL_H */