]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/ocf/src/ocf_request.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / ocf / src / ocf_request.c
CommitLineData
9f95a23c
TL
1/*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6#include "ocf/ocf.h"
f67539c2
TL
7#include "ocf_request.h"
8#include "ocf_cache_priv.h"
9#include "ocf_queue_priv.h"
10#include "utils/utils_cache_line.h"
9f95a23c
TL
11
12#define OCF_UTILS_RQ_DEBUG 0
13
14#if 1 == OCF_UTILS_RQ_DEBUG
15#define OCF_DEBUG_TRACE(cache) \
16 ocf_cache_log(cache, log_info, "[Utils][RQ] %s\n", __func__)
17
18#define OCF_DEBUG_PARAM(cache, format, ...) \
19 ocf_cache_log(cache, log_info, "[Utils][RQ] %s - "format"\n", \
20 __func__, ##__VA_ARGS__)
21#else
22#define OCF_DEBUG_TRACE(cache)
23#define OCF_DEBUG_PARAM(cache, format, ...)
24#endif
25
26enum ocf_req_size {
27 ocf_req_size_1 = 0,
28 ocf_req_size_2,
29 ocf_req_size_4,
30 ocf_req_size_8,
31 ocf_req_size_16,
32 ocf_req_size_32,
33 ocf_req_size_64,
34 ocf_req_size_128,
35 ocf_req_size_max,
36};
37
38struct ocf_req_allocator {
39 env_allocator *allocator[ocf_req_size_max];
40 size_t size[ocf_req_size_max];
41};
42
43static inline size_t ocf_req_sizeof_map(struct ocf_request *req)
44{
f67539c2 45 uint32_t lines = req->core_line_count;
9f95a23c
TL
46 size_t size = (lines * sizeof(struct ocf_map_info));
47
48 ENV_BUG_ON(lines == 0);
49 return size;
50}
51
52static inline size_t ocf_req_sizeof(uint32_t lines)
53{
54 size_t size = sizeof(struct ocf_request) +
55 (lines * sizeof(struct ocf_map_info));
56
57 ENV_BUG_ON(lines == 0);
58 return size;
59}
60
61#define ALLOCATOR_NAME_FMT "ocf_req_%u"
62/* Max number of digits in decimal representation of unsigned int is 10 */
63#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + 10)
64
65int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx)
66{
67 int i;
68 struct ocf_req_allocator *req;
69 char name[ALLOCATOR_NAME_MAX] = { '\0' };
70
71 OCF_DEBUG_TRACE(cache);
72
73 ocf_ctx->resources.req = env_zalloc(sizeof(*(ocf_ctx->resources.req)),
74 ENV_MEM_NORMAL);
75 req = ocf_ctx->resources.req;
76
77 if (!req)
f67539c2 78 goto err;
9f95a23c
TL
79
80 for (i = 0; i < ARRAY_SIZE(req->allocator); i++) {
81 req->size[i] = ocf_req_sizeof(1 << i);
82
83 if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
84 (1 << i)) < 0) {
f67539c2 85 goto err;
9f95a23c
TL
86 }
87
88 req->allocator[i] = env_allocator_create(req->size[i], name);
89
90 if (!req->allocator[i])
f67539c2 91 goto err;
9f95a23c
TL
92
93 OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, "
94 "size = %lu", 1 << i, req->size[i]);
95 }
96
97 return 0;
98
f67539c2 99err:
9f95a23c 100 ocf_req_allocator_deinit(ocf_ctx);
9f95a23c
TL
101 return -1;
102}
103
104void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx)
105{
106 int i;
107 struct ocf_req_allocator *req;
108
109 OCF_DEBUG_TRACE(cache);
110
111
112 if (!ocf_ctx->resources.req)
113 return;
114
115 req = ocf_ctx->resources.req;
116
117 for (i = 0; i < ARRAY_SIZE(req->allocator); i++) {
118 if (req->allocator[i]) {
119 env_allocator_destroy(req->allocator[i]);
120 req->allocator[i] = NULL;
121 }
122 }
123
124 env_free(req);
125 ocf_ctx->resources.req = NULL;
126}
127
128static inline env_allocator *_ocf_req_get_allocator_1(
129 struct ocf_cache *cache)
130{
131 return cache->owner->resources.req->allocator[0];
132}
133
134static env_allocator *_ocf_req_get_allocator(
135 struct ocf_cache *cache, uint32_t count)
136{
137 struct ocf_ctx *ocf_ctx = cache->owner;
138 unsigned int idx = 31 - __builtin_clz(count);
139
140 if (__builtin_ffs(count) <= idx)
141 idx++;
142
143 ENV_BUG_ON(count == 0);
144
145 if (idx >= ocf_req_size_max)
146 return NULL;
147
148 return ocf_ctx->resources.req->allocator[idx];
149}
150
9f95a23c
TL
151struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
152 uint64_t addr, uint32_t bytes, int rw)
153{
154 uint64_t core_line_first, core_line_last, core_line_count;
155 ocf_cache_t cache = queue->cache;
156 struct ocf_request *req;
157 env_allocator *allocator;
158
159 if (likely(bytes)) {
160 core_line_first = ocf_bytes_2_lines(cache, addr);
161 core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1);
162 core_line_count = core_line_last - core_line_first + 1;
163 } else {
164 core_line_first = ocf_bytes_2_lines(cache, addr);
165 core_line_last = core_line_first;
166 core_line_count = 1;
167 }
168
169 allocator = _ocf_req_get_allocator(cache, core_line_count);
170 if (allocator) {
171 req = env_allocator_new(allocator);
172 } else {
173 req = env_allocator_new(_ocf_req_get_allocator_1(cache));
174 }
175
176 if (unlikely(!req))
177 return NULL;
178
179 if (allocator)
180 req->map = req->__map;
181
182 OCF_DEBUG_TRACE(cache);
183
184 ocf_queue_get(queue);
185 req->io_queue = queue;
186
f67539c2 187 req->core = core;
9f95a23c
TL
188 req->cache = cache;
189
f67539c2
TL
190 req->d2c = (queue != cache->mngt_queue) && !ocf_refcnt_inc(
191 &cache->refcnt.metadata);
9f95a23c
TL
192
193 env_atomic_set(&req->ref_count, 1);
194
195 req->byte_position = addr;
196 req->byte_length = bytes;
197 req->core_line_first = core_line_first;
198 req->core_line_last = core_line_last;
199 req->core_line_count = core_line_count;
200 req->alloc_core_line_count = core_line_count;
201 req->rw = rw;
202 req->part_id = PARTITION_DEFAULT;
203
f67539c2
TL
204 req->discard.sector = BYTES_TO_SECTORS(addr);
205 req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
206 req->discard.handled = 0;
207
9f95a23c
TL
208 return req;
209}
210
211int ocf_req_alloc_map(struct ocf_request *req)
212{
213 if (req->map)
214 return 0;
215
216 req->map = env_zalloc(ocf_req_sizeof_map(req), ENV_MEM_NOIO);
217 if (!req->map) {
f67539c2
TL
218 req->error = -OCF_ERR_NO_MEM;
219 return -OCF_ERR_NO_MEM;
9f95a23c
TL
220 }
221
222 return 0;
223}
224
f67539c2
TL
225int ocf_req_alloc_map_discard(struct ocf_request *req)
226{
227 ENV_BUILD_BUG_ON(MAX_TRIM_RQ_SIZE / ocf_cache_line_size_4 *
228 sizeof(struct ocf_map_info) > 4 * KiB);
229
230 if (req->byte_length <= MAX_TRIM_RQ_SIZE)
231 return ocf_req_alloc_map(req);
232
233 /*
234 * NOTE: For cache line size bigger than 8k a single-allocation mapping
235 * can handle more than MAX_TRIM_RQ_SIZE, so for these cache line sizes
236 * discard request uses only part of the mapping array.
237 */
238 req->byte_length = MAX_TRIM_RQ_SIZE;
239 req->core_line_last = ocf_bytes_2_lines(req->cache,
240 req->byte_position + req->byte_length - 1);
241 req->core_line_count = req->core_line_last - req->core_line_first + 1;
242
243 return ocf_req_alloc_map(req);
244}
245
9f95a23c
TL
246struct ocf_request *ocf_req_new_extended(ocf_queue_t queue, ocf_core_t core,
247 uint64_t addr, uint32_t bytes, int rw)
248{
249 struct ocf_request *req;
250
251 req = ocf_req_new(queue, core, addr, bytes, rw);
252
253 if (likely(req) && ocf_req_alloc_map(req)) {
254 ocf_req_put(req);
255 return NULL;
256 }
257
258 return req;
259}
260
261struct ocf_request *ocf_req_new_discard(ocf_queue_t queue, ocf_core_t core,
262 uint64_t addr, uint32_t bytes, int rw)
263{
264 struct ocf_request *req;
265
266 req = ocf_req_new_extended(queue, core, addr,
267 OCF_MIN(bytes, MAX_TRIM_RQ_SIZE), rw);
268 if (!req)
269 return NULL;
270
9f95a23c
TL
271 return req;
272}
273
274void ocf_req_get(struct ocf_request *req)
275{
276 OCF_DEBUG_TRACE(req->cache);
277
278 env_atomic_inc(&req->ref_count);
279}
280
281void ocf_req_put(struct ocf_request *req)
282{
283 env_allocator *allocator;
f67539c2 284 ocf_queue_t queue = req->io_queue;
9f95a23c
TL
285
286 if (env_atomic_dec_return(&req->ref_count))
287 return;
288
9f95a23c
TL
289 OCF_DEBUG_TRACE(req->cache);
290
f67539c2
TL
291 if (!req->d2c && req->io_queue != req->cache->mngt_queue)
292 ocf_refcnt_dec(&req->cache->refcnt.metadata);
9f95a23c
TL
293
294 allocator = _ocf_req_get_allocator(req->cache,
295 req->alloc_core_line_count);
296 if (allocator) {
297 env_allocator_del(allocator, req);
298 } else {
299 env_free(req->map);
300 env_allocator_del(_ocf_req_get_allocator_1(req->cache), req);
301 }
f67539c2
TL
302
303 ocf_queue_put(queue);
304}
305
306int ocf_req_set_dirty(struct ocf_request *req)
307{
308 req->dirty = !!ocf_refcnt_inc(&req->cache->refcnt.dirty);
309 return req->dirty ? 0 : -OCF_ERR_AGAIN;
9f95a23c
TL
310}
311
312void ocf_req_clear_info(struct ocf_request *req)
313{
314 ENV_BUG_ON(env_memset(&req->info, sizeof(req->info), 0));
315}
316
317void ocf_req_clear_map(struct ocf_request *req)
318{
319 if (likely(req->map))
320 ENV_BUG_ON(env_memset(req->map,
321 sizeof(req->map[0]) * req->core_line_count, 0));
322}
323
f67539c2 324void ocf_req_hash(struct ocf_request *req)
9f95a23c 325{
f67539c2
TL
326 int i;
327
328 for (i = 0; i < req->core_line_count; i++) {
329 req->map[i].hash = ocf_metadata_hash_func(req->cache,
330 req->core_line_first + i,
331 ocf_core_get_id(req->core));
332 }
9f95a23c 333}