part_stat_unlock();
}
+static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
+ struct bio *bio_src)
+{
+ struct drbd_request *req;
+
+ req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
+ if (!req)
+ return NULL;
+
+ drbd_req_make_private_bio(req, bio_src);
+ req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
+ req->mdev = mdev;
+ req->master_bio = bio_src;
+ req->epoch = 0;
+
+ drbd_clear_interval(&req->i);
+ req->i.sector = bio_src->bi_sector;
+ req->i.size = bio_src->bi_size;
+ req->i.waiting = false;
+
+ INIT_LIST_HEAD(&req->tl_requests);
+ INIT_LIST_HEAD(&req->w.list);
+
+ return req;
+}
+
+static void drbd_req_free(struct drbd_request *req)
+{
+ mempool_free(req, drbd_request_mempool);
+}
+
+/* rw is bio_data_dir(), only READ or WRITE */
static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
{
const unsigned long s = req->rq_state;
* Other places where we set out-of-sync:
* READ with local io-error */
if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(mdev, req->sector, req->size);
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(mdev, req->sector, req->size);
+ drbd_set_in_sync(mdev, req->i.sector, req->i.size);
/* one might be tempted to move the drbd_al_complete_io
* to the local io completion callback drbd_endio_pri.
if (s & RQ_LOCAL_MASK) {
if (get_ldev_if_state(mdev, D_FAILED)) {
if (s & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, req->sector);
+ drbd_al_complete_io(mdev, req->i.sector);
put_ldev(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) {
dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
"but my Disk seems to have failed :(\n",
- (unsigned long long) req->sector);
+ (unsigned long long) req->i.sector);
}
}
}
if (test_bit(CREATE_BARRIER, &mdev->flags))
return;
- b = mdev->newest_tle;
+ b = mdev->tconn->newest_tle;
b->w.cb = w_send_barrier;
/* inc_ap_pending done here, so we won't
* get imbalanced on connection loss.
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
- drbd_queue_work(&mdev->data.work, &b->w);
+ drbd_queue_work(&mdev->tconn->data.work, &b->w);
set_bit(CREATE_BARRIER, &mdev->flags);
}
struct drbd_request *req)
{
const unsigned long s = req->rq_state;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
/* Before we can signal completion to the upper layers,
* we may need to close the current epoch.
*/
if (mdev->state.conn >= C_CONNECTED &&
(s & RQ_NET_SENT) != 0 &&
- req->epoch == mdev->newest_tle->br_number)
+ req->epoch == mdev->tconn->newest_tle->br_number)
queue_barrier(mdev);
-
- /* we need to do the conflict detection stuff,
- * if we have the ee_hash (two_primaries) and
- * this has been on the network */
- if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
- const sector_t sector = req->sector;
- const int size = req->size;
-
- /* ASSERT:
- * there must be no conflicting requests, since
- * they must have been failed on the spot */
-#define OVERLAPS overlaps(sector, size, i->sector, i->size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, colision) {
- if (OVERLAPS) {
- dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
- "other: %p %llus +%u\n",
- req, (unsigned long long)sector, size,
- i, (unsigned long long)i->sector, i->size);
- }
- }
-
- /* maybe "wake" those conflicting epoch entries
- * that wait for this request to finish.
- *
- * currently, there can be only _one_ such ee
- * (well, or some more, which would be pending
- * P_DISCARD_ACK not yet sent by the asender...),
- * since we block the receiver thread upon the
- * first conflict detection, which will wait on
- * misc_wait. maybe we want to assert that?
- *
- * anyways, if we found one,
- * we just have to do a wake_up. */
-#undef OVERLAPS
-#define OVERLAPS overlaps(sector, size, e->sector, e->size)
- slot = ee_hash_slot(mdev, req->sector);
- hlist_for_each_entry(e, n, slot, colision) {
- if (OVERLAPS) {
- wake_up(&mdev->misc_wait);
- break;
- }
- }
- }
-#undef OVERLAPS
}
void complete_master_bio(struct drbd_conf *mdev,
dec_ap_bio(mdev);
}
+
+static void drbd_remove_request_interval(struct rb_root *root,
+ struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->mdev;
+ struct drbd_interval *i = &req->i;
+
+ drbd_remove_interval(root, i);
+
+ /* Wake up any processes waiting for this request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
/* Helper for __req_mod().
* Set m->bio to the master bio, if it is fit to be completed,
* or leave it alone (it is initialized to NULL in __req_mod),
return;
if (req->master_bio) {
- /* this is data_received (remote read)
+ /* this is DATA_RECEIVED (remote read)
* or protocol C P_WRITE_ACK
* or protocol B P_RECV_ACK
- * or protocol A "handed_over_to_network" (SendAck)
+ * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck)
* or canceled or failed,
* or killed from the transfer log due to connection loss.
*/
/* remove the request from the conflict detection
* respective block_id verification hash */
- if (!hlist_unhashed(&req->colision))
- hlist_del(&req->colision);
- else
+ if (!drbd_interval_empty(&req->i)) {
+ struct rb_root *root;
+
+ if (rw == WRITE)
+ root = &mdev->write_requests;
+ else
+ root = &mdev->read_requests;
+ drbd_remove_request_interval(root, req);
+ } else
D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
/* for writes we need to do some extra housekeeping */
* conflicting requests with local origin, and why we have to do so regardless
* of whether we allowed multiple primaries.
*
- * BTW, in case we only have one primary, the ee_hash is empty anyways, and the
- * second hlist_for_each_entry becomes a noop. This is even simpler than to
- * grab a reference on the net_conf, and check for the two_primaries flag...
+ * In case we only have one primary, the epoch_entries tree is empty.
*/
static int _req_conflicts(struct drbd_request *req)
{
struct drbd_conf *mdev = req->mdev;
- const sector_t sector = req->sector;
- const int size = req->size;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
+ const sector_t sector = req->i.sector;
+ const int size = req->i.size;
+ struct drbd_interval *i;
- D_ASSERT(hlist_unhashed(&req->colision));
+ D_ASSERT(drbd_interval_empty(&req->i));
- if (!get_net_conf(mdev))
+ if (!get_net_conf(mdev->tconn))
return 0;
- /* BUG_ON */
- ERR_IF (mdev->tl_hash_s == 0)
- goto out_no_conflict;
- BUG_ON(mdev->tl_hash == NULL);
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, colision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent local write detected! "
- "[DISCARD L] new: %llus +%u; "
+ i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ if (i) {
+ struct drbd_request *req2 =
+ container_of(i, struct drbd_request, i);
+
+ dev_alert(DEV, "%s[%u] Concurrent local write detected! "
+ "[DISCARD L] new: %llus +%u; "
+ "pending: %llus +%u\n",
+ current->comm, current->pid,
+ (unsigned long long)sector, size,
+ (unsigned long long)req2->i.sector, req2->i.size);
+ goto out_conflict;
+ }
+
+ if (!RB_EMPTY_ROOT(&mdev->epoch_entries)) {
+ /* check for overlapping requests with remote origin */
+ i = drbd_find_overlap(&mdev->epoch_entries, sector, size);
+ if (i) {
+ struct drbd_epoch_entry *e =
+ container_of(i, struct drbd_epoch_entry, i);
+
+ dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
+ " [DISCARD L] new: %llus +%u; "
"pending: %llus +%u\n",
current->comm, current->pid,
(unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
+ (unsigned long long)e->i.sector, e->i.size);
goto out_conflict;
}
}
- if (mdev->ee_hash_s) {
- /* now, check for overlapping requests with remote origin */
- BUG_ON(mdev->ee_hash == NULL);
-#undef OVERLAPS
-#define OVERLAPS overlaps(e->sector, e->size, sector, size)
- slot = ee_hash_slot(mdev, sector);
- hlist_for_each_entry(e, n, slot, colision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
- " [DISCARD L] new: %llus +%u; "
- "pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)e->sector, e->size);
- goto out_conflict;
- }
- }
- }
-#undef OVERLAPS
-
-out_no_conflict:
/* this is like it should be, and what we expected.
* our users do behave after all... */
- put_net_conf(mdev);
+ put_net_conf(mdev->tconn);
return 0;
out_conflict:
- put_net_conf(mdev);
+ put_net_conf(mdev->tconn);
return 1;
}
/* does not happen...
* initialization done in drbd_req_new
- case created:
+ case CREATED:
break;
*/
- case to_be_send: /* via network */
+ case TO_BE_SENT: /* via network */
/* reached via drbd_make_request_common
* and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
inc_ap_pending(mdev);
break;
- case to_be_submitted: /* locally */
+ case TO_BE_SUBMITTED: /* locally */
/* reached via drbd_make_request_common */
D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
req->rq_state |= RQ_LOCAL_PENDING;
break;
- case completed_ok:
+ case COMPLETED_OK:
if (bio_data_dir(req->master_bio) == WRITE)
- mdev->writ_cnt += req->size>>9;
+ mdev->writ_cnt += req->i.size >> 9;
else
- mdev->read_cnt += req->size>>9;
+ mdev->read_cnt += req->i.size >> 9;
req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
req->rq_state &= ~RQ_LOCAL_PENDING;
put_ldev(mdev);
break;
- case write_completed_with_error:
+ case WRITE_COMPLETED_WITH_ERROR:
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
put_ldev(mdev);
break;
- case read_ahead_completed_with_error:
+ case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA */
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
put_ldev(mdev);
break;
- case read_completed_with_error:
- drbd_set_out_of_sync(mdev, req->sector, req->size);
+ case READ_COMPLETED_WITH_ERROR:
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
break;
}
- /* _req_mod(req,to_be_send); oops, recursion... */
+ /* _req_mod(req,TO_BE_SENT); oops, recursion... */
req->rq_state |= RQ_NET_PENDING;
inc_ap_pending(mdev);
- /* fall through: _req_mod(req,queue_for_net_read); */
+ /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */
- case queue_for_net_read:
+ case QUEUE_FOR_NET_READ:
/* READ or READA, and
* no local disk,
* or target area marked as invalid,
/* so we can verify the handle in the answer packet
* corresponding hlist_del is in _req_may_be_done() */
- hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector));
+ drbd_insert_interval(&mdev->read_requests, &req->i);
set_bit(UNPLUG_REMOTE, &mdev->flags);
req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
? w_read_retry_remote
: w_send_read_req;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->data.work, &req->w);
break;
- case queue_for_net_write:
+ case QUEUE_FOR_NET_WRITE:
/* assert something? */
/* from drbd_make_request_common only */
- hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector));
/* corresponding hlist_del is in _req_may_be_done() */
+ drbd_insert_interval(&mdev->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
* just after it grabs the req_lock */
D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
- req->epoch = mdev->newest_tle->br_number;
+ req->epoch = mdev->tconn->newest_tle->br_number;
/* increment size of current epoch */
- mdev->newest_tle->n_writes++;
+ mdev->tconn->newest_tle->n_writes++;
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->data.work, &req->w);
/* close the epoch, in case it outgrew the limit */
- if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
+ if (mdev->tconn->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
queue_barrier(mdev);
break;
- case queue_for_send_oos:
+ case QUEUE_FOR_SEND_OOS:
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_oos;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->data.work, &req->w);
break;
- case oos_handed_to_network:
+ case OOS_HANDED_TO_NETWORK:
/* actually the same */
- case send_canceled:
+ case SEND_CANCELED:
/* treat it the same */
- case send_failed:
+ case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
req->rq_state &= ~RQ_NET_QUEUED;
_req_may_be_done_not_susp(req, m);
break;
- case handed_over_to_network:
+ case HANDED_OVER_TO_NETWORK:
/* assert something? */
if (bio_data_dir(req->master_bio) == WRITE)
- atomic_add(req->size>>9, &mdev->ap_in_flight);
+ atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
if (bio_data_dir(req->master_bio) == WRITE &&
- mdev->net_conf->wire_protocol == DRBD_PROT_A) {
+ mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A) {
/* this is what is dangerous about protocol A:
* pretend it was successfully written on the peer. */
if (req->rq_state & RQ_NET_PENDING) {
req->rq_state &= ~RQ_NET_QUEUED;
req->rq_state |= RQ_NET_SENT;
/* because _drbd_send_zc_bio could sleep, and may want to
- * dereference the bio even after the "write_acked_by_peer" and
- * "completed_ok" events came in, once we return from
+ * dereference the bio even after the "WRITE_ACKED_BY_PEER" and
+ * "COMPLETED_OK" events came in, once we return from
* _drbd_send_zc_bio (drbd_send_dblock), we have to check
* whether it is done already, and end it. */
_req_may_be_done_not_susp(req, m);
break;
- case read_retry_remote_canceled:
+ case READ_RETRY_REMOTE_CANCELED:
req->rq_state &= ~RQ_NET_QUEUED;
/* fall through, in case we raced with drbd_disconnect */
- case connection_lost_while_pending:
+ case CONNECTION_LOST_WHILE_PENDING:
/* transfer log cleanup after connection loss */
/* assert something? */
if (req->rq_state & RQ_NET_PENDING)
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE;
if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
/* if it is still queued, we may not complete it here.
* it will be canceled soon. */
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case write_acked_by_peer_and_sis:
+ case WRITE_ACKED_BY_PEER_AND_SIS:
req->rq_state |= RQ_NET_SIS;
- case conflict_discarded_by_peer:
+ case CONFLICT_DISCARDED_BY_PEER:
/* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
* node crashes are covered by the activity log. */
- if (what == conflict_discarded_by_peer)
+ if (what == CONFLICT_DISCARDED_BY_PEER)
dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
" DRBD is not a random data generator!\n",
- (unsigned long long)req->sector, req->size);
+ (unsigned long long)req->i.sector, req->i.size);
req->rq_state |= RQ_NET_DONE;
/* fall through */
- case write_acked_by_peer:
+ case WRITE_ACKED_BY_PEER:
/* protocol C; successfully written on peer.
* Nothing to do here.
* We want to keep the tl in place for all protocols, to cater
* P_BARRIER_ACK, but that is an unnecessary optimization. */
/* this makes it effectively the same as for: */
- case recv_acked_by_peer:
+ case RECV_ACKED_BY_PEER:
/* protocol B; pretends to be successfully written on peer.
- * see also notes above in handed_over_to_network about
+ * see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
req->rq_state |= RQ_NET_OK;
D_ASSERT(req->rq_state & RQ_NET_PENDING);
dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
req->rq_state &= ~RQ_NET_PENDING;
_req_may_be_done_not_susp(req, m);
break;
- case neg_acked:
+ case NEG_ACKED:
/* assert something? */
if (req->rq_state & RQ_NET_PENDING) {
dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
}
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE;
_req_may_be_done_not_susp(req, m);
- /* else: done by handed_over_to_network */
+ /* else: done by HANDED_OVER_TO_NETWORK */
break;
- case fail_frozen_disk_io:
+ case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case restart_frozen_disk_io:
+ case RESTART_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
get_ldev(mdev);
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->data.work, &req->w);
break;
- case resend:
+ case RESEND:
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
before the connection loss (B&C only); only P_BARRIER_ACK was missing.
Trowing them out of the TL here by pretending we got a BARRIER_ACK
We ensure that the peer was not rebooted */
if (!(req->rq_state & RQ_NET_OK)) {
if (req->w.cb) {
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->data.work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
}
break;
}
- /* else, fall through to barrier_acked */
+ /* else, fall through to BARRIER_ACKED */
- case barrier_acked:
+ case BARRIER_ACKED:
if (!(req->rq_state & RQ_WRITE))
break;
/* barrier came in before all requests have been acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
- dev_err(DEV, "FIXME (barrier_acked but pending)\n");
- list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
+ dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
+ list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests);
}
if ((req->rq_state & RQ_NET_MASK) != 0) {
req->rq_state |= RQ_NET_DONE;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A)
+ atomic_sub(req->i.size>>9, &mdev->ap_in_flight);
}
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case data_received:
+ case DATA_RECEIVED:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING;
* spinlock, and grabbing the spinlock.
* if we lost that race, we retry. */
if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
+ mdev->tconn->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
allocate_barrier:
b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
}
/* GOOD, everything prepared, grab the spin_lock */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of
bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */
ret = 1;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto fail_free_complete;
}
dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
if (!(local || remote)) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto fail_free_complete;
}
}
- if (b && mdev->unused_spare_tle == NULL) {
- mdev->unused_spare_tle = b;
+ if (b && mdev->tconn->unused_spare_tle == NULL) {
+ mdev->tconn->unused_spare_tle = b;
b = NULL;
}
if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
+ mdev->tconn->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
/* someone closed the current epoch
* while we were grabbing the spinlock */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto allocate_barrier;
}
* barrier packet. To get the write ordering right, we only have to
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
- if ((remote || send_oos) && mdev->unused_spare_tle &&
+ if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
+ _tl_add_barrier(mdev, mdev->tconn->unused_spare_tle);
+ mdev->tconn->unused_spare_tle = NULL;
} else {
D_ASSERT(!(remote && rw == WRITE &&
test_bit(CREATE_BARRIER, &mdev->flags)));
/* mark them early for readability.
* this just sets some state flags. */
if (remote)
- _req_mod(req, to_be_send);
+ _req_mod(req, TO_BE_SENT);
if (local)
- _req_mod(req, to_be_submitted);
+ _req_mod(req, TO_BE_SUBMITTED);
/* check this request on the collision detection hash tables.
* if we have a conflict, just complete it here.
if (rw == WRITE && _req_conflicts(req))
goto fail_conflicting;
- list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
+ list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests);
/* NOTE remote first: to get the concurrent write detection right,
* we must register the request before start of local IO. */
* or READ, but not in sync.
*/
_req_mod(req, (rw == WRITE)
- ? queue_for_net_write
- : queue_for_net_read);
+ ? QUEUE_FOR_NET_WRITE
+ : QUEUE_FOR_NET_READ);
}
if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
- _req_mod(req, queue_for_send_oos);
+ _req_mod(req, QUEUE_FOR_SEND_OOS);
if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
+ mdev->tconn->net_conf->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) {
int congested = 0;
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ if (mdev->tconn->net_conf->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= mdev->tconn->net_conf->cong_fill) {
dev_info(DEV, "Congestion-fill threshold reached\n");
congested = 1;
}
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ if (mdev->act_log->used >= mdev->tconn->net_conf->cong_extents) {
dev_info(DEV, "Congestion-extents threshold reached\n");
congested = 1;
}
if (congested) {
queue_barrier(mdev); /* last barrier, after mirrored writes */
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ if (mdev->tconn->net_conf->on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ else /*mdev->tconn->net_conf->on_congestion == OC_DISCONNECT */
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
}
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
kfree(b); /* if someone else has beaten us to it... */
if (local) {
* pretend that it was successfully served right now.
*/
_drbd_end_io_acct(mdev, req);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (remote)
dec_ap_pending(mdev);
/* THINK: do we want to fail it (-EIO), or pretend success?
err = 0;
fail_free_complete:
- if (rw == WRITE && local)
+ if (req->rq_state & RQ_IN_ACT_LOG)
drbd_al_complete_io(mdev, sector);
fail_and_free_req:
if (local) {
struct list_head *le;
unsigned long et = 0; /* effective timeout = ko_count * timeout */
- if (get_net_conf(mdev)) {
- et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
- put_net_conf(mdev);
+ if (get_net_conf(mdev->tconn)) {
+ et = mdev->tconn->net_conf->timeout*HZ/10 * mdev->tconn->net_conf->ko_count;
+ put_net_conf(mdev->tconn);
}
if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
return; /* Recurring timer stopped */
- spin_lock_irq(&mdev->req_lock);
- le = &mdev->oldest_tle->requests;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ le = &mdev->tconn->oldest_tle->requests;
if (list_empty(le)) {
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
mod_timer(&mdev->request_timer, jiffies + et);
return;
}
mod_timer(&mdev->request_timer, req->start_time + et);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}