* or implicit barrier packets as necessary.
* increased:
* w_send_barrier
- * _req_mod(req, queue_for_net_write or queue_for_net_read);
+ * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
* it is much easier and equally valid to count what we queue for the
* worker, even before it actually was queued or send.
* (drbd_make_request_common; recovery path on read io-error)
* decreased:
* got_BarrierAck (respective tl_clear, tl_clear_barrier)
- * _req_mod(req, data_received)
+ * _req_mod(req, DATA_RECEIVED)
* [from receive_DataReply]
- * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked)
+ * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
* [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
* for some reason it is NOT decreased in got_NegAck,
* but in the resulting cleanup code from report_params.
* we should try to remember the reason for that...
- * _req_mod(req, send_failed or send_canceled)
- * _req_mod(req, connection_lost_while_pending)
+ * _req_mod(req, SEND_FAILED or SEND_CANCELED)
+ * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier]
*/
static inline void inc_ap_pending(struct drbd_conf *mdev)
/* Clean up list of requests processed during current epoch */
list_for_each_safe(le, tle, &b->requests) {
r = list_entry(le, struct drbd_request, tl_requests);
- _req_mod(r, barrier_acked);
+ _req_mod(r, BARRIER_ACKED);
}
/* There could be requests on the list waiting for completion
of the write to the local disk. To avoid corruptions of
the write acks - which would be a bug and violating write ordering.
To not deadlock in case we lose connection while such requests are
still pending, we need some way to find them for the
- _req_mode(connection_lost_while_pending).
+ _req_mode(CONNECTION_LOST_WHILE_PENDING).
These have been list_move'd to the out_of_sequence_requests list in
- _req_mod(, barrier_acked) above.
+ _req_mod(, BARRIER_ACKED) above.
*/
list_del_init(&b->requests);
* @mdev: DRBD device.
* @what: The action/event to perform with all request objects
*
- * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
- * restart_frozen_disk_io.
+ * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
+ * RESTART_FROZEN_DISK_IO.
*/
static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
{
tmp = b->next;
if (n_writes) {
- if (what == resend) {
+ if (what == RESEND) {
b->n_writes = n_writes;
if (b->w.cb == NULL) {
b->w.cb = w_send_barrier;
spin_lock_irq(&mdev->req_lock);
- _tl_restart(mdev, connection_lost_while_pending);
+ _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
/* we expect this list to be empty. */
D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
r = list_entry(le, struct drbd_request, tl_requests);
/* It would be nice to complete outside of spinlock.
* But this is easier for now. */
- _req_mod(r, connection_lost_while_pending);
+ _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
}
/* ensure bit indicating barrier is required is clear */
union drbd_state ns, enum chg_state_flags flags)
{
enum drbd_fencing_p fp;
- enum drbd_req_event what = nothing;
+ enum drbd_req_event what = NOTHING;
union drbd_state nsm = (union drbd_state){ .i = -1 };
if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
nsm.i = -1;
if (ns.susp_nod) {
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
- what = resend;
+ what = RESEND;
if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
- what = restart_frozen_disk_io;
+ what = RESTART_FROZEN_DISK_IO;
- if (what != nothing)
+ if (what != NOTHING)
nsm.susp_nod = 0;
}
/* case2: The connection was established again: */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
clear_bit(NEW_CUR_UUID, &mdev->flags);
- what = resend;
+ what = RESEND;
nsm.susp_fen = 0;
}
}
- if (what != nothing) {
+ if (what != NOTHING) {
spin_lock_irq(&mdev->req_lock);
_tl_restart(mdev, what);
nsm.i &= mdev->state.i;
if (mdev->state.conn < C_CONNECTED)
tl_clear(mdev);
if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
- tl_restart(mdev, fail_frozen_disk_io);
+ tl_restart(mdev, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(mdev);
/*
* This function is called from _asender only_
- * but see also comments in _req_mod(,barrier_acked)
+ * but see also comments in _req_mod(,BARRIER_ACKED)
* and receive_Barrier.
*
* Move entries from net_ee to done_ee, if ready.
ok = recv_dless_read(mdev, req, sector, data_size);
if (ok)
- req_mod(req, data_received);
+ req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
test_bit(NEW_CUR_UUID, &mdev->flags)) {
- /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
+ /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
spin_unlock_irq(&mdev->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
switch (be16_to_cpu(h->command)) {
case P_RS_WRITE_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer_and_sis;
+ what = WRITE_ACKED_BY_PEER_AND_SIS;
break;
case P_WRITE_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer;
+ what = WRITE_ACKED_BY_PEER;
break;
case P_RECV_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
- what = recv_acked_by_peer;
+ what = RECV_ACKED_BY_PEER;
break;
case P_DISCARD_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = conflict_discarded_by_peer;
+ what = CONFLICT_DISCARDED_BY_PEER;
break;
default:
D_ASSERT(0);
found = validate_req_change_req_state(mdev, p->block_id, sector,
&mdev->write_requests, __func__,
- neg_acked, missing_ok);
+ NEG_ACKED, missing_ok);
if (!found) {
/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
The master bio might already be completed, therefore the
return validate_req_change_req_state(mdev, p->block_id, sector,
&mdev->read_requests, __func__,
- neg_acked, false);
+ NEG_ACKED, false);
}
static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
return;
if (req->master_bio) {
- /* this is data_received (remote read)
+ /* this is DATA_RECEIVED (remote read)
* or protocol C P_WRITE_ACK
* or protocol B P_RECV_ACK
- * or protocol A "handed_over_to_network" (SendAck)
+ * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck)
* or canceled or failed,
* or killed from the transfer log due to connection loss.
*/
/* does not happen...
* initialization done in drbd_req_new
- case created:
+ case CREATED:
break;
*/
- case to_be_send: /* via network */
+ case TO_BE_SENT: /* via network */
/* reached via drbd_make_request_common
* and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
inc_ap_pending(mdev);
break;
- case to_be_submitted: /* locally */
+ case TO_BE_SUBMITTED: /* locally */
/* reached via drbd_make_request_common */
D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
req->rq_state |= RQ_LOCAL_PENDING;
break;
- case completed_ok:
+ case COMPLETED_OK:
if (bio_data_dir(req->master_bio) == WRITE)
mdev->writ_cnt += req->i.size >> 9;
else
put_ldev(mdev);
break;
- case write_completed_with_error:
+ case WRITE_COMPLETED_WITH_ERROR:
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
put_ldev(mdev);
break;
- case read_ahead_completed_with_error:
+ case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA */
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
put_ldev(mdev);
break;
- case read_completed_with_error:
+ case READ_COMPLETED_WITH_ERROR:
drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
req->rq_state |= RQ_LOCAL_COMPLETED;
break;
}
- /* _req_mod(req,to_be_send); oops, recursion... */
+ /* _req_mod(req,TO_BE_SENT); oops, recursion... */
req->rq_state |= RQ_NET_PENDING;
inc_ap_pending(mdev);
- /* fall through: _req_mod(req,queue_for_net_read); */
+ /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */
- case queue_for_net_read:
+ case QUEUE_FOR_NET_READ:
/* READ or READA, and
* no local disk,
* or target area marked as invalid,
drbd_queue_work(&mdev->data.work, &req->w);
break;
- case queue_for_net_write:
+ case QUEUE_FOR_NET_WRITE:
/* assert something? */
/* from drbd_make_request_common only */
break;
- case queue_for_send_oos:
+ case QUEUE_FOR_SEND_OOS:
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_oos;
drbd_queue_work(&mdev->data.work, &req->w);
break;
- case oos_handed_to_network:
+ case OOS_HANDED_TO_NETWORK:
/* actually the same */
- case send_canceled:
+ case SEND_CANCELED:
/* treat it the same */
- case send_failed:
+ case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
req->rq_state &= ~RQ_NET_QUEUED;
_req_may_be_done_not_susp(req, m);
break;
- case handed_over_to_network:
+ case HANDED_OVER_TO_NETWORK:
/* assert something? */
if (bio_data_dir(req->master_bio) == WRITE)
atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
req->rq_state &= ~RQ_NET_QUEUED;
req->rq_state |= RQ_NET_SENT;
/* because _drbd_send_zc_bio could sleep, and may want to
- * dereference the bio even after the "write_acked_by_peer" and
- * "completed_ok" events came in, once we return from
+ * dereference the bio even after the "WRITE_ACKED_BY_PEER" and
+ * "COMPLETED_OK" events came in, once we return from
* _drbd_send_zc_bio (drbd_send_dblock), we have to check
* whether it is done already, and end it. */
_req_may_be_done_not_susp(req, m);
break;
- case read_retry_remote_canceled:
+ case READ_RETRY_REMOTE_CANCELED:
req->rq_state &= ~RQ_NET_QUEUED;
/* fall through, in case we raced with drbd_disconnect */
- case connection_lost_while_pending:
+ case CONNECTION_LOST_WHILE_PENDING:
/* transfer log cleanup after connection loss */
/* assert something? */
if (req->rq_state & RQ_NET_PENDING)
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case write_acked_by_peer_and_sis:
+ case WRITE_ACKED_BY_PEER_AND_SIS:
req->rq_state |= RQ_NET_SIS;
- case conflict_discarded_by_peer:
+ case CONFLICT_DISCARDED_BY_PEER:
/* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
* node crashes are covered by the activity log. */
- if (what == conflict_discarded_by_peer)
+ if (what == CONFLICT_DISCARDED_BY_PEER)
dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
" DRBD is not a random data generator!\n",
(unsigned long long)req->i.sector, req->i.size);
req->rq_state |= RQ_NET_DONE;
/* fall through */
- case write_acked_by_peer:
+ case WRITE_ACKED_BY_PEER:
/* protocol C; successfully written on peer.
* Nothing to do here.
* We want to keep the tl in place for all protocols, to cater
* P_BARRIER_ACK, but that is an unnecessary optimization. */
/* this makes it effectively the same as for: */
- case recv_acked_by_peer:
+ case RECV_ACKED_BY_PEER:
/* protocol B; pretends to be successfully written on peer.
- * see also notes above in handed_over_to_network about
+ * see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
req->rq_state |= RQ_NET_OK;
D_ASSERT(req->rq_state & RQ_NET_PENDING);
_req_may_be_done_not_susp(req, m);
break;
- case neg_acked:
+ case NEG_ACKED:
/* assert something? */
if (req->rq_state & RQ_NET_PENDING) {
dec_ap_pending(mdev);
req->rq_state |= RQ_NET_DONE;
_req_may_be_done_not_susp(req, m);
- /* else: done by handed_over_to_network */
+ /* else: done by HANDED_OVER_TO_NETWORK */
break;
- case fail_frozen_disk_io:
+ case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case restart_frozen_disk_io:
+ case RESTART_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
drbd_queue_work(&mdev->data.work, &req->w);
break;
- case resend:
+ case RESEND:
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
before the connection loss (B&C only); only P_BARRIER_ACK was missing.
Trowing them out of the TL here by pretending we got a BARRIER_ACK
}
break;
}
- /* else, fall through to barrier_acked */
+ /* else, fall through to BARRIER_ACKED */
- case barrier_acked:
+ case BARRIER_ACKED:
if (!(req->rq_state & RQ_WRITE))
break;
/* barrier came in before all requests have been acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
- dev_err(DEV, "FIXME (barrier_acked but pending)\n");
+ dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
}
if ((req->rq_state & RQ_NET_MASK) != 0) {
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case data_received:
+ case DATA_RECEIVED:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING;
/* mark them early for readability.
* this just sets some state flags. */
if (remote)
- _req_mod(req, to_be_send);
+ _req_mod(req, TO_BE_SENT);
if (local)
- _req_mod(req, to_be_submitted);
+ _req_mod(req, TO_BE_SUBMITTED);
/* check this request on the collision detection hash tables.
* if we have a conflict, just complete it here.
* or READ, but not in sync.
*/
_req_mod(req, (rw == WRITE)
- ? queue_for_net_write
- : queue_for_net_read);
+ ? QUEUE_FOR_NET_WRITE
+ : QUEUE_FOR_NET_READ);
}
if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
- _req_mod(req, queue_for_send_oos);
+ _req_mod(req, QUEUE_FOR_SEND_OOS);
if (remote &&
mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
*/
enum drbd_req_event {
- created,
- to_be_send,
- to_be_submitted,
+ CREATED,
+ TO_BE_SENT,
+ TO_BE_SUBMITTED,
/* XXX yes, now I am inconsistent...
* these are not "events" but "actions"
* oh, well... */
- queue_for_net_write,
- queue_for_net_read,
- queue_for_send_oos,
-
- send_canceled,
- send_failed,
- handed_over_to_network,
- oos_handed_to_network,
- connection_lost_while_pending,
- read_retry_remote_canceled,
- recv_acked_by_peer,
- write_acked_by_peer,
- write_acked_by_peer_and_sis, /* and set_in_sync */
- conflict_discarded_by_peer,
- neg_acked,
- barrier_acked, /* in protocol A and B */
- data_received, /* (remote read) */
-
- read_completed_with_error,
- read_ahead_completed_with_error,
- write_completed_with_error,
- completed_ok,
- resend,
- fail_frozen_disk_io,
- restart_frozen_disk_io,
- nothing, /* for tracing only */
+ QUEUE_FOR_NET_WRITE,
+ QUEUE_FOR_NET_READ,
+ QUEUE_FOR_SEND_OOS,
+
+ SEND_CANCELED,
+ SEND_FAILED,
+ HANDED_OVER_TO_NETWORK,
+ OOS_HANDED_TO_NETWORK,
+ CONNECTION_LOST_WHILE_PENDING,
+ READ_RETRY_REMOTE_CANCELED,
+ RECV_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
+ CONFLICT_DISCARDED_BY_PEER,
+ NEG_ACKED,
+ BARRIER_ACKED, /* in protocol A and B */
+ DATA_RECEIVED, /* (remote read) */
+
+ READ_COMPLETED_WITH_ERROR,
+ READ_AHEAD_COMPLETED_WITH_ERROR,
+ WRITE_COMPLETED_WITH_ERROR,
+ COMPLETED_OK,
+ RESEND,
+ FAIL_FROZEN_DISK_IO,
+ RESTART_FROZEN_DISK_IO,
+ NOTHING,
};
/* encoding of request states for now. we don't actually need that many bits.
* recv_ack (B) or implicit "ack" (A),
* still waiting for the barrier ack.
* master_bio may already be completed and invalidated.
- * 11100: write_acked (C),
- * data_received (for remote read, any protocol)
+ * 11100: write acked (C),
+ * data received (for remote read, any protocol)
* or finally the barrier ack has arrived (B,A)...
* request can be freed
* 01100: neg-acked (write, protocol C)
/* to avoid recursion in __req_mod */
if (unlikely(error)) {
what = (bio_data_dir(bio) == WRITE)
- ? write_completed_with_error
+ ? WRITE_COMPLETED_WITH_ERROR
: (bio_rw(bio) == READ)
- ? read_completed_with_error
- : read_ahead_completed_with_error;
+ ? READ_COMPLETED_WITH_ERROR
+ : READ_AHEAD_COMPLETED_WITH_ERROR;
} else
- what = completed_ok;
+ what = COMPLETED_OK;
bio_put(req->private_bio);
req->private_bio = ERR_PTR(error);
spin_lock_irq(&mdev->req_lock);
if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
- _req_mod(req, read_retry_remote_canceled);
+ _req_mod(req, READ_RETRY_REMOTE_CANCELED);
spin_unlock_irq(&mdev->req_lock);
return 1;
}
int ok;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
+ req_mod(req, SEND_CANCELED);
return 1;
}
ok = drbd_send_oos(mdev, req);
- req_mod(req, oos_handed_to_network);
+ req_mod(req, OOS_HANDED_TO_NETWORK);
return ok;
}
int ok;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
+ req_mod(req, SEND_CANCELED);
return 1;
}
ok = drbd_send_dblock(mdev, req);
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
return ok;
}
int ok;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
+ req_mod(req, SEND_CANCELED);
return 1;
}
if (mdev->state.conn >= C_CONNECTED)
drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
}
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
return ok;
}