]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/engine/engine_discard.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / ocf / src / engine / engine_discard.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5 #include "ocf/ocf.h"
6 #include "../ocf_cache_priv.h"
7 #include "cache_engine.h"
8 #include "engine_common.h"
9 #include "engine_discard.h"
10 #include "../metadata/metadata.h"
11 #include "../ocf_request.h"
12 #include "../utils/utils_io.h"
13 #include "../utils/utils_cache_line.h"
14 #include "../concurrency/ocf_concurrency.h"
15
16 #define OCF_ENGINE_DEBUG 0
17
18 #define OCF_ENGINE_DEBUG_IO_NAME "discard"
19 #include "engine_debug.h"
20
21 static int _ocf_discard_step_do(struct ocf_request *req);
22 static int _ocf_discard_step(struct ocf_request *req);
23 static int _ocf_discard_flush_cache(struct ocf_request *req);
24 static int _ocf_discard_core(struct ocf_request *req);
25
26 static const struct ocf_io_if _io_if_discard_step = {
27 .read = _ocf_discard_step,
28 .write = _ocf_discard_step,
29 };
30
31 static const struct ocf_io_if _io_if_discard_step_resume = {
32 .read = _ocf_discard_step_do,
33 .write = _ocf_discard_step_do,
34 };
35
36 static const struct ocf_io_if _io_if_discard_flush_cache = {
37 .read = _ocf_discard_flush_cache,
38 .write = _ocf_discard_flush_cache,
39 };
40
41 static const struct ocf_io_if _io_if_discard_core = {
42 .read = _ocf_discard_core,
43 .write = _ocf_discard_core,
44 };
45
46 static void _ocf_discard_complete_req(struct ocf_request *req, int error)
47 {
48 req->complete(req, error);
49
50 ocf_req_put(req);
51 }
52 static void _ocf_discard_core_complete(struct ocf_io *io, int error)
53 {
54 struct ocf_request *req = io->priv1;
55
56 OCF_DEBUG_RQ(req, "Core DISCARD Completion");
57
58 _ocf_discard_complete_req(req, error);
59
60 ocf_io_put(io);
61 }
62
63 static int _ocf_discard_core(struct ocf_request *req)
64 {
65 struct ocf_io *io;
66 int err;
67
68 io = ocf_volume_new_io(&req->core->volume, req->io_queue,
69 SECTORS_TO_BYTES(req->discard.sector),
70 SECTORS_TO_BYTES(req->discard.nr_sects),
71 OCF_WRITE, 0, 0);
72 if (!io) {
73 _ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
74 return -OCF_ERR_NO_MEM;
75 }
76
77 ocf_io_set_cmpl(io, req, NULL, _ocf_discard_core_complete);
78 err = ocf_io_set_data(io, req->data, 0);
79 if (err) {
80 _ocf_discard_core_complete(io, err);
81 return err;
82 }
83
84 ocf_volume_submit_discard(io);
85
86 return 0;
87 }
88
89 static void _ocf_discard_cache_flush_complete(struct ocf_io *io, int error)
90 {
91 struct ocf_request *req = io->priv1;
92
93 if (error) {
94 ocf_metadata_error(req->cache);
95 _ocf_discard_complete_req(req, error);
96 ocf_io_put(io);
97 return;
98 }
99
100 req->io_if = &_io_if_discard_core;
101 ocf_engine_push_req_front(req, true);
102
103 ocf_io_put(io);
104 }
105
106 static int _ocf_discard_flush_cache(struct ocf_request *req)
107 {
108 struct ocf_io *io;
109
110 io = ocf_volume_new_io(&req->cache->device->volume, req->io_queue,
111 0, 0, OCF_WRITE, 0, 0);
112 if (!io) {
113 ocf_metadata_error(req->cache);
114 _ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
115 return -OCF_ERR_NO_MEM;
116 }
117
118 ocf_io_set_cmpl(io, req, NULL, _ocf_discard_cache_flush_complete);
119
120 ocf_volume_submit_flush(io);
121
122 return 0;
123 }
124
125 static void _ocf_discard_finish_step(struct ocf_request *req)
126 {
127 req->discard.handled += BYTES_TO_SECTORS(req->byte_length);
128
129 if (req->discard.handled < req->discard.nr_sects)
130 req->io_if = &_io_if_discard_step;
131 else if (req->cache->device->init_mode != ocf_init_mode_metadata_volatile)
132 req->io_if = &_io_if_discard_flush_cache;
133 else
134 req->io_if = &_io_if_discard_core;
135
136 ocf_engine_push_req_front(req, true);
137 }
138
139 static void _ocf_discard_step_complete(struct ocf_request *req, int error)
140 {
141 if (error)
142 req->error |= error;
143
144 if (env_atomic_dec_return(&req->req_remaining))
145 return;
146
147 OCF_DEBUG_RQ(req, "Completion");
148
149 /* Release WRITE lock of request */
150 ocf_req_unlock_wr(req);
151
152 if (req->error) {
153 ocf_metadata_error(req->cache);
154 _ocf_discard_complete_req(req, req->error);
155 return;
156 }
157
158 _ocf_discard_finish_step(req);
159 }
160
161 int _ocf_discard_step_do(struct ocf_request *req)
162 {
163 struct ocf_cache *cache = req->cache;
164
165 /* Get OCF request - increase reference counter */
166 ocf_req_get(req);
167
168 env_atomic_set(&req->req_remaining, 1); /* One core IO */
169
170 if (ocf_engine_mapped_count(req)) {
171 /* There are mapped cache line, need to remove them */
172
173 ocf_req_hash_lock_wr(req);
174
175 /* Remove mapped cache lines from metadata */
176 ocf_purge_map_info(req);
177
178 if (req->info.flush_metadata) {
179 /* Request was dirty and need to flush metadata */
180 ocf_metadata_flush_do_asynch(cache, req,
181 _ocf_discard_step_complete);
182 }
183
184 ocf_req_hash_unlock_wr(req);
185 }
186
187 ocf_req_hash_lock_rd(req);
188
189 /* Even if no cachelines are mapped they could be tracked in promotion
190 * policy. RD lock suffices. */
191 ocf_promotion_req_purge(req->cache->promotion_policy, req);
192
193 ocf_req_hash_unlock_rd(req);
194
195 OCF_DEBUG_RQ(req, "Discard");
196 _ocf_discard_step_complete(req, 0);
197
198 /* Put OCF request - decrease reference counter */
199 ocf_req_put(req);
200
201 return 0;
202 }
203
204 static void _ocf_discard_on_resume(struct ocf_request *req)
205 {
206 OCF_DEBUG_RQ(req, "On resume");
207 ocf_engine_push_req_front(req, true);
208 }
209
210 static int _ocf_discard_step(struct ocf_request *req)
211 {
212 int lock;
213 struct ocf_cache *cache = req->cache;
214
215 OCF_DEBUG_TRACE(req->cache);
216
217 req->byte_position = SECTORS_TO_BYTES(req->discard.sector +
218 req->discard.handled);
219 req->byte_length = OCF_MIN(SECTORS_TO_BYTES(req->discard.nr_sects -
220 req->discard.handled), MAX_TRIM_RQ_SIZE);
221 req->core_line_first = ocf_bytes_2_lines(cache, req->byte_position);
222 req->core_line_last =
223 ocf_bytes_2_lines(cache, req->byte_position + req->byte_length - 1);
224 req->core_line_count = req->core_line_last - req->core_line_first + 1;
225 req->io_if = &_io_if_discard_step_resume;
226
227 ENV_BUG_ON(env_memset(req->map, sizeof(*req->map) * req->core_line_count,
228 0));
229
230 ocf_req_hash(req);
231 ocf_req_hash_lock_rd(req);
232
233 /* Travers to check if request is mapped fully */
234 ocf_engine_traverse(req);
235
236 if (ocf_engine_mapped_count(req)) {
237 /* Some cache line are mapped, lock request for WRITE access */
238 lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
239 } else {
240 lock = OCF_LOCK_ACQUIRED;
241 }
242
243 ocf_req_hash_unlock_rd(req);
244
245 if (lock >= 0) {
246 if (OCF_LOCK_ACQUIRED == lock) {
247 _ocf_discard_step_do(req);
248 } else {
249 /* WR lock was not acquired, need to wait for resume */
250 OCF_DEBUG_RQ(req, "NO LOCK")
251 }
252 } else {
253 OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
254 req->error |= lock;
255 _ocf_discard_finish_step(req);
256 }
257
258 env_cond_resched();
259
260 return 0;
261 }
262
263 int ocf_discard(struct ocf_request *req)
264 {
265 OCF_DEBUG_TRACE(req->cache);
266
267 ocf_io_start(&req->ioi.io);
268
269 if (req->rw == OCF_READ) {
270 req->complete(req, -OCF_ERR_INVAL);
271 ocf_req_put(req);
272 return 0;
273 }
274
275 /* Get OCF request - increase reference counter */
276 ocf_req_get(req);
277
278 _ocf_discard_step(req);
279
280 /* Put OCF request - decrease reference counter */
281 ocf_req_put(req);
282
283 return 0;
284 }