]> git.proxmox.com Git - ceph.git/blob - ceph/src/msg/xio/XioPool.h
update sources to v12.1.0
[ceph.git] / ceph / src / msg / xio / XioPool.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2014 CohortFS, LLC
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14 #ifndef XIO_POOL_H
15 #define XIO_POOL_H
16
17 #include <atomic>
18 #include <vector>
19 #include <cstdlib>
20 #include <cstring>
21 #include <cstdint>
22
23 extern "C" {
24 #include "libxio.h"
25 }
26
27 #include "common/likely.h"
28
29 static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
30 struct xio_reg_mem* mp);
31 static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp);
32
33 class XioPool
34 {
35 private:
36 struct xio_mempool *handle;
37
38 public:
39 static bool trace_mempool;
40 static bool trace_msgcnt;
41 static const int MB = 8;
42
43 struct xio_piece {
44 struct xio_reg_mem mp[1];
45 struct xio_piece *next;
46 int s;
47 char payload[MB];
48 } *first;
49
50 explicit XioPool(struct xio_mempool *_handle) :
51 handle(_handle), first(0)
52 {
53 }
54 ~XioPool()
55 {
56 struct xio_piece *p;
57 while ((p = first)) {
58 first = p->next;
59 if (unlikely(trace_mempool)) {
60 memset(p->payload, 0xcf, p->s); // guard bytes
61 }
62 xpool_free(sizeof(struct xio_piece)+(p->s)-MB, p->mp);
63 }
64 }
65 void *alloc(size_t _s)
66 {
67 void *r;
68 struct xio_reg_mem mp[1];
69 struct xio_piece *x;
70 int e = xpool_alloc(handle, (sizeof(struct xio_piece)-MB) + _s, mp);
71 if (e) {
72 r = 0;
73 } else {
74 x = reinterpret_cast<struct xio_piece *>(mp->addr);
75 *x->mp = *mp;
76 x->next = first;
77 x->s = _s;
78 first = x;
79 r = x->payload;
80 }
81 return r;
82 }
83 };
84
85 class XioPoolStats {
86 private:
87 enum pool_sizes {
88 SLAB_64 = 0,
89 SLAB_256,
90 SLAB_1024,
91 SLAB_PAGE,
92 SLAB_MAX,
93 SLAB_OVERFLOW,
94 NUM_SLABS,
95 };
96
97 std::atomic<unsigned> ctr_set[NUM_SLABS] = {};
98 std::atomic<unsigned> msg_cnt = { 0 }; // send msgs
99 std::atomic<unsigned> hook_cnt = { 0 }; // recv msgs
100
101 public:
102 void dump(const char* tag, uint64_t serial);
103
104 void inc(uint64_t size) {
105 if (size <= 64) {
106 (ctr_set[SLAB_64])++;
107 return;
108 }
109 if (size <= 256) {
110 (ctr_set[SLAB_256])++;
111 return;
112 }
113 if (size <= 1024) {
114 (ctr_set[SLAB_1024])++;
115 return;
116 }
117 if (size <= 8192) {
118 (ctr_set[SLAB_PAGE])++;
119 return;
120 }
121 (ctr_set[SLAB_MAX])++;
122 }
123
124 void dec(uint64_t size) {
125 if (size <= 64) {
126 (ctr_set[SLAB_64])--;
127 return;
128 }
129 if (size <= 256) {
130 (ctr_set[SLAB_256])--;
131 return;
132 }
133 if (size <= 1024) {
134 (ctr_set[SLAB_1024])--;
135 return;
136 }
137 if (size <= 8192) {
138 (ctr_set[SLAB_PAGE])--;
139 return;
140 }
141 (ctr_set[SLAB_MAX])--;
142 }
143
144 void inc_overflow() { ctr_set[SLAB_OVERFLOW]++; }
145 void dec_overflow() { ctr_set[SLAB_OVERFLOW]--; }
146
147 void inc_msgcnt() {
148 if (unlikely(XioPool::trace_msgcnt)) {
149 msg_cnt++;
150 }
151 }
152
153 void dec_msgcnt() {
154 if (unlikely(XioPool::trace_msgcnt)) {
155 msg_cnt--;
156 }
157 }
158
159 void inc_hookcnt() {
160 if (unlikely(XioPool::trace_msgcnt)) {
161 hook_cnt++;
162 }
163 }
164
165 void dec_hookcnt() {
166 if (unlikely(XioPool::trace_msgcnt)) {
167 hook_cnt--;
168 }
169 }
170 };
171
172 extern XioPoolStats xp_stats;
173
174 static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
175 struct xio_reg_mem* mp)
176 {
177 // try to allocate from the xio pool
178 int r = xio_mempool_alloc(pool, size, mp);
179 if (r == 0) {
180 if (unlikely(XioPool::trace_mempool))
181 xp_stats += size;
182 return 0;
183 }
184 // fall back to malloc on errors
185 mp->addr = malloc(size);
186 assert(mp->addr);
187 mp->length = 0;
188 if (unlikely(XioPool::trace_mempool))
189 xp_stats.inc_overflow();
190 return 0;
191 }
192
193 static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp)
194 {
195 if (mp->length) {
196 if (unlikely(XioPool::trace_mempool))
197 xp_stats -= size;
198 xio_mempool_free(mp);
199 } else { // from malloc
200 if (unlikely(XioPool::trace_mempool))
201 xp_stats.dec_overflow();
202 free(mp->addr);
203 }
204 }
205
206 #define xpool_inc_msgcnt() \
207 do { xp_stats.inc_msgcnt(); } while (0)
208
209 #define xpool_dec_msgcnt() \
210 do { xp_stats.dec_msgcnt(); } while (0)
211
212 #define xpool_inc_hookcnt() \
213 do { xp_stats.inc_hookcnt(); } while (0)
214
215 #define xpool_dec_hookcnt() \
216 do { xp_stats.dec_hookcnt(); } while (0)
217
218 #endif /* XIO_POOL_H */