2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
7 #include "../ocf_cache_priv.h"
9 #include "engine_common.h"
10 #include "../concurrency/ocf_concurrency.h"
11 #include "../ocf_request.h"
12 #include "../utils/utils_cache_line.h"
13 #include "../utils/utils_io.h"
14 #include "../metadata/metadata.h"
16 #define OCF_ENGINE_DEBUG_IO_NAME "wi"
17 #include "engine_debug.h"
19 static int ocf_write_wi_update_and_flush_metadata(struct ocf_request
*req
);
21 static const struct ocf_io_if _io_if_wi_flush_metadata
= {
22 .read
= ocf_write_wi_update_and_flush_metadata
,
23 .write
= ocf_write_wi_update_and_flush_metadata
,
26 static void _ocf_write_wi_io_flush_metadata(struct ocf_request
*req
, int error
)
29 ocf_core_stats_cache_error_update(req
->core
, OCF_WRITE
);
33 if (env_atomic_dec_return(&req
->req_remaining
))
37 ocf_engine_error(req
, true, "Failed to write data to cache");
39 ocf_req_unlock_wr(req
);
41 req
->complete(req
, req
->error
);
46 static int ocf_write_wi_update_and_flush_metadata(struct ocf_request
*req
)
48 struct ocf_cache
*cache
= req
->cache
;
50 env_atomic_set(&req
->req_remaining
, 1); /* One core IO */
52 if (ocf_engine_mapped_count(req
)) {
53 /* There are mapped cache line, need to remove them */
55 ocf_req_hash_lock_wr(req
); /*- Metadata WR access ---------------*/
57 /* Remove mapped cache lines from metadata */
58 ocf_purge_map_info(req
);
60 ocf_req_hash_unlock_wr(req
); /*- END Metadata WR access ---------*/
62 if (req
->info
.flush_metadata
) {
63 /* Request was dirty and need to flush metadata */
64 ocf_metadata_flush_do_asynch(cache
, req
,
65 _ocf_write_wi_io_flush_metadata
);
70 _ocf_write_wi_io_flush_metadata(req
, 0);
75 static void _ocf_write_wi_core_complete(struct ocf_request
*req
, int error
)
79 req
->info
.core_error
= 1;
80 ocf_core_stats_core_error_update(req
->core
, OCF_WRITE
);
83 if (env_atomic_dec_return(&req
->req_remaining
))
86 OCF_DEBUG_RQ(req
, "Completion");
89 ocf_req_unlock_wr(req
);
91 req
->complete(req
, req
->error
);
95 ocf_engine_push_req_front_if(req
, &_io_if_wi_flush_metadata
,
100 static int _ocf_write_wi_do(struct ocf_request
*req
)
102 /* Get OCF request - increase reference counter */
105 env_atomic_set(&req
->req_remaining
, 1); /* One core IO */
107 OCF_DEBUG_RQ(req
, "Submit");
109 /* Submit write IO to the core */
110 ocf_submit_volume_req(&req
->core
->volume
, req
,
111 _ocf_write_wi_core_complete
);
113 /* Update statistics */
114 ocf_engine_update_block_stats(req
);
115 ocf_core_stats_request_pt_update(req
->core
, req
->part_id
, req
->rw
,
116 req
->info
.hit_no
, req
->core_line_count
);
118 /* Put OCF request - decrease reference counter */
124 static void _ocf_write_wi_on_resume(struct ocf_request
*req
)
126 OCF_DEBUG_RQ(req
, "On resume");
127 ocf_engine_push_req_front(req
, true);
130 static const struct ocf_io_if _io_if_wi_resume
= {
131 .read
= _ocf_write_wi_do
,
132 .write
= _ocf_write_wi_do
,
135 int ocf_write_wi(struct ocf_request
*req
)
137 int lock
= OCF_LOCK_NOT_ACQUIRED
;
139 OCF_DEBUG_TRACE(req
->cache
);
141 ocf_io_start(&req
->ioi
.io
);
143 /* Get OCF request - increase reference counter */
146 /* Set resume io_if */
147 req
->io_if
= &_io_if_wi_resume
;
150 ocf_req_hash_lock_rd(req
); /*- Metadata READ access, No eviction --------*/
152 /* Travers to check if request is mapped fully */
153 ocf_engine_traverse(req
);
155 if (ocf_engine_mapped_count(req
)) {
156 /* Some cache line are mapped, lock request for WRITE access */
157 lock
= ocf_req_async_lock_wr(req
, _ocf_write_wi_on_resume
);
159 lock
= OCF_LOCK_ACQUIRED
;
162 ocf_req_hash_unlock_rd(req
); /*- END Metadata READ access----------------*/
165 if (lock
== OCF_LOCK_ACQUIRED
) {
166 _ocf_write_wi_do(req
);
168 /* WR lock was not acquired, need to wait for resume */
169 OCF_DEBUG_RQ(req
, "NO LOCK");
172 OCF_DEBUG_RQ(req
, "LOCK ERROR %d", lock
);
173 req
->complete(req
, lock
);
177 /* Put OCF request - decrease reference counter */