]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/ocf_core.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / ocf_core.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "ocf/ocf.h"
7 #include "ocf_priv.h"
8 #include "ocf_core_priv.h"
9 #include "ocf_io_priv.h"
10 #include "metadata/metadata.h"
11 #include "engine/cache_engine.h"
12 #include "utils/utils_req.h"
13 #include "utils/utils_part.h"
14 #include "utils/utils_device.h"
15 #include "ocf_request.h"
16 #include "ocf_trace_priv.h"
17
18 struct ocf_core_volume {
19 ocf_core_t core;
20 };
21
22 ocf_cache_t ocf_core_get_cache(ocf_core_t core)
23 {
24 OCF_CHECK_NULL(core);
25 return core->volume.cache;
26 }
27
28 ocf_volume_t ocf_core_get_volume(ocf_core_t core)
29 {
30 OCF_CHECK_NULL(core);
31 return &core->volume;
32 }
33
34 ocf_volume_t ocf_core_get_front_volume(ocf_core_t core)
35 {
36 OCF_CHECK_NULL(core);
37 return &core->front_volume;
38 }
39
40 ocf_core_id_t ocf_core_get_id(ocf_core_t core)
41 {
42 struct ocf_cache *cache;
43 ocf_core_id_t core_id;
44
45 OCF_CHECK_NULL(core);
46
47 cache = core->volume.cache;
48 core_id = core - cache->core;
49
50 return core_id;
51 }
52
53 int ocf_core_set_name(ocf_core_t core, const char *src, size_t src_size)
54 {
55 OCF_CHECK_NULL(core);
56 OCF_CHECK_NULL(src);
57
58 return env_strncpy(core->name, sizeof(core->name), src, src_size);
59 }
60
61 const char *ocf_core_get_name(ocf_core_t core)
62 {
63 OCF_CHECK_NULL(core);
64
65 return core->name;
66 }
67
68 ocf_core_state_t ocf_core_get_state(ocf_core_t core)
69 {
70 OCF_CHECK_NULL(core);
71
72 return core->opened ?
73 ocf_core_state_active : ocf_core_state_inactive;
74 }
75
76 bool ocf_core_is_valid(ocf_cache_t cache, ocf_core_id_t id)
77 {
78 OCF_CHECK_NULL(cache);
79
80 if (id > OCF_CORE_ID_MAX || id < OCF_CORE_ID_MIN)
81 return false;
82
83 if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap))
84 return false;
85
86 return true;
87 }
88
89 int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core)
90 {
91 OCF_CHECK_NULL(cache);
92
93 if (!ocf_core_is_valid(cache, id))
94 return -OCF_ERR_CORE_NOT_AVAIL;
95
96 *core = &cache->core[id];
97 return 0;
98 }
99
100 uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core)
101 {
102 uint32_t core_id = ocf_core_get_id(core);
103 ocf_cache_t cache = ocf_core_get_cache(core);
104
105 return cache->core_conf_meta[core_id].seq_cutoff_threshold;
106 }
107
108 ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
109 {
110 uint32_t core_id = ocf_core_get_id(core);
111 ocf_cache_t cache = ocf_core_get_cache(core);
112
113 return cache->core_conf_meta[core_id].seq_cutoff_policy;
114 }
115
116 int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
117 bool only_opened)
118 {
119 ocf_core_id_t id;
120 int result = 0;
121
122 OCF_CHECK_NULL(cache);
123
124 if (!visitor)
125 return -OCF_ERR_INVAL;
126
127 for (id = 0; id < OCF_CORE_MAX; id++) {
128 if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap))
129 continue;
130
131 if (only_opened && !cache->core[id].opened)
132 continue;
133
134 result = visitor(&cache->core[id], cntx);
135 if (result)
136 break;
137 }
138
139 return result;
140 }
141
142 /* *** HELPER FUNCTIONS *** */
143
144 static inline struct ocf_core_io *ocf_io_to_core_io(struct ocf_io *io)
145 {
146 return ocf_io_get_priv(io);
147 }
148
149 static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
150 {
151 struct ocf_core_volume *core_volume = ocf_volume_get_priv(volume);
152
153 return core_volume->core;
154 }
155
156 static inline int ocf_io_set_dirty(ocf_cache_t cache,
157 struct ocf_core_io *core_io)
158 {
159 core_io->dirty = ocf_refcnt_inc(&cache->dirty);
160 return core_io->dirty ? 0 : -EBUSY;
161 }
162
163 static inline void dec_counter_if_req_was_dirty(struct ocf_core_io *core_io,
164 ocf_cache_t cache)
165 {
166 if (!core_io->dirty)
167 return;
168
169 core_io->dirty = 0;
170 ocf_refcnt_dec(&cache->dirty);
171 }
172
173 static inline int ocf_core_validate_io(struct ocf_io *io)
174 {
175 ocf_core_t core;
176
177 if (!io->volume)
178 return -EINVAL;
179
180 if (!io->ops)
181 return -EINVAL;
182
183 if (io->addr >= ocf_volume_get_length(io->volume))
184 return -EINVAL;
185
186 if (io->addr + io->bytes > ocf_volume_get_length(io->volume))
187 return -EINVAL;
188
189 if (io->io_class >= OCF_IO_CLASS_MAX)
190 return -EINVAL;
191
192 if (io->dir != OCF_READ && io->dir != OCF_WRITE)
193 return -EINVAL;
194
195 if (!io->io_queue)
196 return -EINVAL;
197
198 if (!io->end)
199 return -EINVAL;
200
201 /* Core volume I/O must not be queued on management queue - this would
202 * break I/O accounting code, resulting in use-after-free type of errors
203 * after cache detach, core remove etc. */
204 core = ocf_volume_to_core(io->volume);
205 if (io->io_queue == ocf_core_get_cache(core)->mngt_queue)
206 return -EINVAL;
207
208 return 0;
209 }
210
211 static void ocf_req_complete(struct ocf_request *req, int error)
212 {
213 /* Log trace */
214 ocf_trace_io_cmpl(ocf_io_to_core_io(req->io), req->cache);
215
216 /* Complete IO */
217 ocf_io_end(req->io, error);
218
219 dec_counter_if_req_was_dirty(ocf_io_to_core_io(req->io), req->cache);
220
221 /* Invalidate OCF IO, it is not valid after completion */
222 ocf_io_put(req->io);
223 req->io = NULL;
224 }
225
226 void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
227 {
228 struct ocf_core_io *core_io;
229 ocf_req_cache_mode_t req_cache_mode;
230 ocf_core_t core;
231 ocf_cache_t cache;
232 int ret;
233
234 OCF_CHECK_NULL(io);
235
236 ret = ocf_core_validate_io(io);
237 if (ret < 0) {
238 io->end(io, ret);
239 return;
240 }
241
242 core_io = ocf_io_to_core_io(io);
243
244 core = ocf_volume_to_core(io->volume);
245 cache = ocf_core_get_cache(core);
246
247 ocf_trace_init_io(core_io, cache);
248
249 if (unlikely(!env_bit_test(ocf_cache_state_running,
250 &cache->cache_state))) {
251 ocf_io_end(io, -EIO);
252 return;
253 }
254
255 /* TODO: instead of casting ocf_cache_mode_t to ocf_req_cache_mode_t
256 we can resolve IO interface here and get rid of the latter. */
257 req_cache_mode = cache_mode;
258
259 if (cache_mode == ocf_cache_mode_none)
260 req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
261 if (req_cache_mode == ocf_req_cache_mode_wb &&
262 ocf_io_set_dirty(cache, core_io)) {
263 req_cache_mode = ocf_req_cache_mode_wt;
264 }
265
266 core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
267 io->dir);
268 if (!core_io->req) {
269 dec_counter_if_req_was_dirty(core_io, cache);
270 io->end(io, -ENOMEM);
271 return;
272 }
273
274 if (core_io->req->d2c)
275 req_cache_mode = ocf_req_cache_mode_d2c;
276
277 core_io->req->part_id = ocf_part_class2id(cache, io->io_class);
278 core_io->req->data = core_io->data;
279 core_io->req->complete = ocf_req_complete;
280 core_io->req->io = io;
281
282 ocf_seq_cutoff_update(core, core_io->req);
283
284 ocf_core_update_stats(core, io);
285
286 if (io->dir == OCF_WRITE)
287 ocf_trace_io(core_io, ocf_event_operation_wr, cache);
288 else if (io->dir == OCF_READ)
289 ocf_trace_io(core_io, ocf_event_operation_rd, cache);
290
291 ocf_io_get(io);
292 ret = ocf_engine_hndl_req(core_io->req, req_cache_mode);
293 if (ret) {
294 dec_counter_if_req_was_dirty(core_io, cache);
295 ocf_req_put(core_io->req);
296 io->end(io, ret);
297 }
298 }
299
300 int ocf_core_submit_io_fast(struct ocf_io *io)
301 {
302 struct ocf_core_io *core_io;
303 ocf_req_cache_mode_t req_cache_mode;
304 struct ocf_event_io trace_event;
305 struct ocf_request *req;
306 ocf_core_t core;
307 ocf_cache_t cache;
308 int fast;
309 int ret;
310
311 OCF_CHECK_NULL(io);
312
313 ret = ocf_core_validate_io(io);
314 if (ret < 0)
315 return ret;
316
317 core_io = ocf_io_to_core_io(io);
318
319 core = ocf_volume_to_core(io->volume);
320 cache = ocf_core_get_cache(core);
321
322 if (unlikely(!env_bit_test(ocf_cache_state_running,
323 &cache->cache_state))) {
324 ocf_io_end(io, -EIO);
325 return 0;
326 }
327
328 req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
329 if (req_cache_mode == ocf_req_cache_mode_wb &&
330 ocf_io_set_dirty(cache, core_io)) {
331 req_cache_mode = ocf_req_cache_mode_wt;
332 }
333
334 switch (req_cache_mode) {
335 case ocf_req_cache_mode_pt:
336 return -EIO;
337 case ocf_req_cache_mode_wb:
338 req_cache_mode = ocf_req_cache_mode_fast;
339 break;
340 default:
341 if (cache->use_submit_io_fast)
342 break;
343 if (io->dir == OCF_WRITE)
344 return -EIO;
345
346 req_cache_mode = ocf_req_cache_mode_fast;
347 }
348
349 core_io->req = ocf_req_new_extended(io->io_queue, core,
350 io->addr, io->bytes, io->dir);
351 // We need additional pointer to req in case completion arrives before
352 // we leave this function and core_io is freed
353 req = core_io->req;
354
355 if (!req) {
356 dec_counter_if_req_was_dirty(core_io, cache);
357 io->end(io, -ENOMEM);
358 return 0;
359 }
360 if (req->d2c) {
361 dec_counter_if_req_was_dirty(core_io, cache);
362 ocf_req_put(req);
363 return -EIO;
364 }
365
366 req->part_id = ocf_part_class2id(cache, io->io_class);
367 req->data = core_io->data;
368 req->complete = ocf_req_complete;
369 req->io = io;
370
371 ocf_core_update_stats(core, io);
372
373 if (cache->trace.trace_callback) {
374 if (io->dir == OCF_WRITE)
375 ocf_trace_prep_io_event(&trace_event, core_io, ocf_event_operation_wr);
376 else if (io->dir == OCF_READ)
377 ocf_trace_prep_io_event(&trace_event, core_io, ocf_event_operation_rd);
378 }
379
380 ocf_io_get(io);
381
382 fast = ocf_engine_hndl_fast_req(req, req_cache_mode);
383 if (fast != OCF_FAST_PATH_NO) {
384 ocf_trace_push(io->io_queue, &trace_event, sizeof(trace_event));
385 ocf_seq_cutoff_update(core, req);
386 return 0;
387 }
388
389 dec_counter_if_req_was_dirty(core_io, cache);
390
391 ocf_io_put(io);
392 ocf_req_put(req);
393 return -EIO;
394 }
395
396 static void ocf_core_volume_submit_io(struct ocf_io *io)
397 {
398 ocf_core_submit_io_mode(io, ocf_cache_mode_none);
399 }
400
401 static void ocf_core_volume_submit_flush(struct ocf_io *io)
402 {
403 struct ocf_core_io *core_io;
404 ocf_core_t core;
405 ocf_cache_t cache;
406 int ret;
407
408 OCF_CHECK_NULL(io);
409
410 ret = ocf_core_validate_io(io);
411 if (ret < 0) {
412 ocf_io_end(io, ret);
413 return;
414 }
415
416 core_io = ocf_io_to_core_io(io);
417
418 core = ocf_volume_to_core(io->volume);
419 cache = ocf_core_get_cache(core);
420
421 if (unlikely(!env_bit_test(ocf_cache_state_running,
422 &cache->cache_state))) {
423 ocf_io_end(io, -EIO);
424 return;
425 }
426
427 core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
428 io->dir);
429 if (!core_io->req) {
430 ocf_io_end(io, -ENOMEM);
431 return;
432 }
433
434 core_io->req->complete = ocf_req_complete;
435 core_io->req->io = io;
436 core_io->req->data = core_io->data;
437
438 ocf_trace_io(core_io, ocf_event_operation_flush, cache);
439 ocf_io_get(io);
440 ocf_engine_hndl_ops_req(core_io->req);
441 }
442
443 static void ocf_core_volume_submit_discard(struct ocf_io *io)
444 {
445 struct ocf_core_io *core_io;
446 ocf_core_t core;
447 ocf_cache_t cache;
448 int ret;
449
450 OCF_CHECK_NULL(io);
451
452 ret = ocf_core_validate_io(io);
453 if (ret < 0) {
454 ocf_io_end(io, ret);
455 return;
456 }
457
458 core_io = ocf_io_to_core_io(io);
459
460 core = ocf_volume_to_core(io->volume);
461 cache = ocf_core_get_cache(core);
462
463 if (unlikely(!env_bit_test(ocf_cache_state_running,
464 &cache->cache_state))) {
465 ocf_io_end(io, -EIO);
466 return;
467 }
468
469 core_io->req = ocf_req_new_discard(io->io_queue, core,
470 io->addr, io->bytes, OCF_WRITE);
471 if (!core_io->req) {
472 ocf_io_end(io, -ENOMEM);
473 return;
474 }
475
476 core_io->req->complete = ocf_req_complete;
477 core_io->req->io = io;
478 core_io->req->data = core_io->data;
479
480 ocf_trace_io(core_io, ocf_event_operation_discard, cache);
481 ocf_io_get(io);
482 ocf_engine_hndl_discard_req(core_io->req);
483 }
484
485 /* *** VOLUME OPS *** */
486
487 static int ocf_core_volume_open(ocf_volume_t volume, void *volume_params)
488 {
489 struct ocf_core_volume *core_volume = ocf_volume_get_priv(volume);
490 const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(volume);
491 ocf_core_t core = (ocf_core_t)uuid->data;
492
493 core_volume->core = core;
494
495 return 0;
496 }
497
498 static void ocf_core_volume_close(ocf_volume_t volume)
499 {
500 }
501
502 static unsigned int ocf_core_volume_get_max_io_size(ocf_volume_t volume)
503 {
504 ocf_core_t core = ocf_volume_to_core(volume);
505
506 return ocf_volume_get_max_io_size(&core->volume);
507 }
508
509 static uint64_t ocf_core_volume_get_byte_length(ocf_volume_t volume)
510 {
511 ocf_core_t core = ocf_volume_to_core(volume);
512
513 return ocf_volume_get_length(&core->volume);
514 }
515
516
517 /* *** IO OPS *** */
518
519 static int ocf_core_io_set_data(struct ocf_io *io,
520 ctx_data_t *data, uint32_t offset)
521 {
522 struct ocf_core_io *core_io;
523
524 OCF_CHECK_NULL(io);
525
526 if (!data || offset)
527 return -EINVAL;
528
529 core_io = ocf_io_to_core_io(io);
530 core_io->data = data;
531
532 return 0;
533 }
534
535 static ctx_data_t *ocf_core_io_get_data(struct ocf_io *io)
536 {
537 struct ocf_core_io *core_io;
538
539 OCF_CHECK_NULL(io);
540
541 core_io = ocf_io_to_core_io(io);
542 return core_io->data;
543 }
544
545 const struct ocf_volume_properties ocf_core_volume_properties = {
546 .name = "OCF Core",
547 .io_priv_size = sizeof(struct ocf_core_io),
548 .volume_priv_size = sizeof(struct ocf_core_volume),
549 .caps = {
550 .atomic_writes = 0,
551 },
552 .ops = {
553 .submit_io = ocf_core_volume_submit_io,
554 .submit_flush = ocf_core_volume_submit_flush,
555 .submit_discard = ocf_core_volume_submit_discard,
556 .submit_metadata = NULL,
557
558 .open = ocf_core_volume_open,
559 .close = ocf_core_volume_close,
560 .get_max_io_size = ocf_core_volume_get_max_io_size,
561 .get_length = ocf_core_volume_get_byte_length,
562 },
563 .io_ops = {
564 .set_data = ocf_core_io_set_data,
565 .get_data = ocf_core_io_get_data,
566 },
567 };
568
569 int ocf_core_volume_type_init(ocf_ctx_t ctx)
570 {
571 return ocf_ctx_register_volume_type(ctx, 0,
572 &ocf_core_volume_properties);
573 }
574
575 void ocf_core_volume_type_deinit(ocf_ctx_t ctx)
576 {
577 ocf_ctx_unregister_volume_type(ctx, 0);
578 }