2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
7 #include "../ocf_priv.h"
8 #include "../ocf_cache_priv.h"
9 #include "../ocf_volume_priv.h"
10 #include "../ocf_request.h"
12 #include "utils_cache_line.h"
14 struct ocf_submit_volume_context
{
15 env_atomic req_remaining
;
17 ocf_submit_end_t cmpl
;
21 static void _ocf_volume_flush_end(struct ocf_io
*io
, int error
)
23 ocf_submit_end_t cmpl
= io
->priv1
;
25 cmpl(io
->priv2
, error
);
29 void ocf_submit_volume_flush(ocf_volume_t volume
,
30 ocf_submit_end_t cmpl
, void *priv
)
34 io
= ocf_volume_new_io(volume
);
36 cmpl(priv
, -OCF_ERR_NO_MEM
);
40 ocf_io_configure(io
, 0, 0, OCF_WRITE
, 0, 0);
41 ocf_io_set_cmpl(io
, cmpl
, priv
, _ocf_volume_flush_end
);
43 ocf_volume_submit_flush(io
);
46 static void ocf_submit_volume_end(struct ocf_io
*io
, int error
)
48 struct ocf_submit_volume_context
*context
= io
->priv1
;
51 context
->error
= error
;
55 if (env_atomic_dec_return(&context
->req_remaining
))
58 context
->cmpl(context
->priv
, context
->error
);
62 void ocf_submit_volume_discard(ocf_volume_t volume
, uint64_t addr
,
63 uint64_t length
, ocf_submit_end_t cmpl
, void *priv
)
65 struct ocf_submit_volume_context
*context
;
67 uint64_t max_length
= (uint32_t)~0;
70 context
= env_vzalloc(sizeof(*context
));
72 cmpl(priv
, -OCF_ERR_NO_MEM
);
76 env_atomic_set(&context
->req_remaining
, 1);
81 io
= ocf_volume_new_io(volume
);
83 context
->error
= -OCF_ERR_NO_MEM
;
87 env_atomic_inc(&context
->req_remaining
);
89 bytes
= OCF_MIN(length
, max_length
);
91 ocf_io_configure(io
, addr
, bytes
, OCF_WRITE
, 0, 0);
92 ocf_io_set_cmpl(io
, context
, NULL
, ocf_submit_volume_end
);
93 ocf_volume_submit_discard(io
);
99 if (env_atomic_dec_return(&context
->req_remaining
))
102 cmpl(priv
, context
->error
);
106 void ocf_submit_write_zeros(ocf_volume_t volume
, uint64_t addr
,
107 uint64_t length
, ocf_submit_end_t cmpl
, void *priv
)
109 struct ocf_submit_volume_context
*context
;
111 uint32_t max_length
= ~((uint32_t)PAGE_SIZE
- 1);
114 context
= env_vzalloc(sizeof(*context
));
116 cmpl(priv
, -OCF_ERR_NO_MEM
);
120 env_atomic_set(&context
->req_remaining
, 1);
121 context
->cmpl
= cmpl
;
122 context
->priv
= priv
;
125 io
= ocf_volume_new_io(volume
);
127 context
->error
= -OCF_ERR_NO_MEM
;
131 env_atomic_inc(&context
->req_remaining
);
133 bytes
= OCF_MIN(length
, max_length
);
135 ocf_io_configure(io
, addr
, bytes
, OCF_WRITE
, 0, 0);
136 ocf_io_set_cmpl(io
, context
, NULL
, ocf_submit_volume_end
);
137 ocf_volume_submit_write_zeroes(io
);
143 if (env_atomic_dec_return(&context
->req_remaining
))
146 cmpl(priv
, context
->error
);
150 struct ocf_submit_cache_page_context
{
153 ocf_submit_end_t cmpl
;
157 static void ocf_submit_cache_page_end(struct ocf_io
*io
, int error
)
159 struct ocf_submit_cache_page_context
*context
= io
->priv1
;
160 ctx_data_t
*data
= ocf_io_get_data(io
);
162 if (io
->dir
== OCF_READ
) {
163 ctx_data_rd_check(context
->cache
->owner
, context
->buffer
,
167 context
->cmpl(context
->priv
, error
);
168 ctx_data_free(context
->cache
->owner
, data
);
173 void ocf_submit_cache_page(ocf_cache_t cache
, uint64_t addr
, int dir
,
174 void *buffer
, ocf_submit_end_t cmpl
, void *priv
)
176 struct ocf_submit_cache_page_context
*context
;
181 context
= env_vmalloc(sizeof(*context
));
183 cmpl(priv
, -OCF_ERR_NO_MEM
);
187 context
->cache
= cache
;
188 context
->buffer
= buffer
;
189 context
->cmpl
= cmpl
;
190 context
->priv
= priv
;
192 io
= ocf_volume_new_io(&cache
->device
->volume
);
194 result
= -OCF_ERR_NO_MEM
;
198 data
= ctx_data_alloc(cache
->owner
, 1);
200 result
= -OCF_ERR_NO_MEM
;
204 if (dir
== OCF_WRITE
)
205 ctx_data_wr_check(cache
->owner
, data
, buffer
, PAGE_SIZE
);
207 result
= ocf_io_set_data(io
, data
, 0);
211 ocf_io_configure(io
, addr
, PAGE_SIZE
, dir
, 0, 0);
212 ocf_io_set_cmpl(io
, context
, NULL
, ocf_submit_cache_page_end
);
214 ocf_volume_submit_io(io
);
218 ctx_data_free(cache
->owner
, data
);
226 static void ocf_submit_volume_req_cmpl(struct ocf_io
*io
, int error
)
228 struct ocf_request
*req
= io
->priv1
;
229 ocf_req_end_t callback
= io
->priv2
;
231 callback(req
, error
);
236 void ocf_submit_cache_reqs(struct ocf_cache
*cache
,
237 struct ocf_map_info
*map_info
, struct ocf_request
*req
, int dir
,
238 unsigned int reqs
, ocf_req_end_t callback
)
240 struct ocf_counters_block
*cache_stats
;
241 uint64_t flags
= req
->io
? req
->io
->flags
: 0;
242 uint32_t class = req
->io
? req
->io
->io_class
: 0;
243 uint64_t addr
, bytes
, total_bytes
= 0;
248 cache_stats
= &cache
->core
[req
->core_id
].
249 counters
->cache_blocks
;
252 io
= ocf_new_cache_io(cache
);
254 callback(req
, -ENOMEM
);
258 addr
= ocf_metadata_map_lg2phy(cache
,
259 map_info
[0].coll_idx
);
260 addr
*= ocf_line_size(cache
);
261 addr
+= cache
->device
->metadata_offset
;
262 addr
+= (req
->byte_position
% ocf_line_size(cache
));
263 bytes
= req
->byte_length
;
265 ocf_io_configure(io
, addr
, bytes
, dir
, class, flags
);
266 ocf_io_set_queue(io
, req
->io_queue
);
267 ocf_io_set_cmpl(io
, req
, callback
, ocf_submit_volume_req_cmpl
);
269 err
= ocf_io_set_data(io
, req
->data
, 0);
276 ocf_volume_submit_io(io
);
277 total_bytes
= req
->byte_length
;
282 /* Issue requests to cache. */
283 for (i
= 0; i
< reqs
; i
++) {
284 io
= ocf_new_cache_io(cache
);
287 /* Finish all IOs which left with ERROR */
288 for (; i
< reqs
; i
++)
289 callback(req
, -ENOMEM
);
293 addr
= ocf_metadata_map_lg2phy(cache
,
294 map_info
[i
].coll_idx
);
295 addr
*= ocf_line_size(cache
);
296 addr
+= cache
->device
->metadata_offset
;
297 bytes
= ocf_line_size(cache
);
300 uint64_t seek
= (req
->byte_position
%
301 ocf_line_size(cache
));
305 } else if (i
== (reqs
- 1)) {
306 uint64_t skip
= (ocf_line_size(cache
) -
307 ((req
->byte_position
+ req
->byte_length
) %
308 ocf_line_size(cache
))) % ocf_line_size(cache
);
313 ocf_io_configure(io
, addr
, bytes
, dir
, class, flags
);
314 ocf_io_set_queue(io
, req
->io_queue
);
315 ocf_io_set_cmpl(io
, req
, callback
, ocf_submit_volume_req_cmpl
);
317 err
= ocf_io_set_data(io
, req
->data
, total_bytes
);
320 /* Finish all IOs which left with ERROR */
321 for (; i
< reqs
; i
++)
325 ocf_volume_submit_io(io
);
326 total_bytes
+= bytes
;
330 if (dir
== OCF_WRITE
)
331 env_atomic64_add(total_bytes
, &cache_stats
->write_bytes
);
332 else if (dir
== OCF_READ
)
333 env_atomic64_add(total_bytes
, &cache_stats
->read_bytes
);
336 void ocf_submit_volume_req(ocf_volume_t volume
, struct ocf_request
*req
,
337 ocf_req_end_t callback
)
339 struct ocf_cache
*cache
= req
->cache
;
340 struct ocf_counters_block
*core_stats
;
341 uint64_t flags
= req
->io
? req
->io
->flags
: 0;
342 uint32_t class = req
->io
? req
->io
->io_class
: 0;
347 core_stats
= &cache
->core
[req
->core_id
].
348 counters
->core_blocks
;
349 if (dir
== OCF_WRITE
)
350 env_atomic64_add(req
->byte_length
, &core_stats
->write_bytes
);
351 else if (dir
== OCF_READ
)
352 env_atomic64_add(req
->byte_length
, &core_stats
->read_bytes
);
354 io
= ocf_volume_new_io(volume
);
356 callback(req
, -ENOMEM
);
360 ocf_io_configure(io
, req
->byte_position
, req
->byte_length
, dir
,
362 ocf_io_set_queue(io
, req
->io_queue
);
363 ocf_io_set_cmpl(io
, req
, callback
, ocf_submit_volume_req_cmpl
);
364 err
= ocf_io_set_data(io
, req
->data
, 0);
370 ocf_volume_submit_io(io
);