return;
}
- __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+ atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
req->ctx->req_op->buf_unmap(req->ctx, req);
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
- __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
if (ret == -EBUSY)
return -ENOBUFS;
if (!ret) {
- if (req->fake_busy)
+ if (atomic_read(&req->fake_busy))
ret = -EBUSY;
else
ret = -EINPROGRESS;
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
sec_update_iv(req);
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
sk_req->base.complete(&sk_req->base, req->err_type);
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = 1;
+ atomic_set(&req->fake_busy, 1);
else
- req->fake_busy = 0;
+ atomic_set(&req->fake_busy, 0);
ret = ctx->req_op->get_res(ctx, req);
if (ret) {
.write = sec_debug_write,
};
+static int debugfs_atomic64_t_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL,
+ "%lld\n");
+
static int sec_core_debug_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+ debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt,
+ &fops_atomic64_t_ro);
- debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+ debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt,
+ &fops_atomic64_t_ro);
return 0;
}