struct page *page;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
- if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
+ if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
return NULL;
e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
rr = drbd_recv(mdev, data, len);
- if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
+ if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
dev_err(DEV, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(mdev);
if (hg == -1 && mdev->state.role == R_PRIMARY) {
- self = drbd_set_role(mdev, R_SECONDARY, 0);
+ enum drbd_state_rv rv2;
+
+ drbd_set_role(mdev, R_SECONDARY, 0);
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
- if (self != SS_SUCCESS) {
+ rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ if (rv2 != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Successfully gave up primary role.\n");
case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(mdev);
if (hg == -1) {
+ enum drbd_state_rv rv2;
+
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
- if (self != SS_SUCCESS) {
+ rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ if (rv2 != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Successfully gave up primary role.\n");
return C_MASK;
}
if (hg == -1001) {
- dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
+ dev_alert(DEV, "To resolve this both sides have to support at least protocol 91\n");
return C_MASK;
}
}
put_ldev(mdev);
}
-#undef min_not_zero
ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(mdev)) {
{
struct p_req_state *p = &mdev->data.rbuf.req_state;
union drbd_state mask, val;
- int rv;
+ enum drbd_state_rv rv;
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
int ok = FALSE;
struct p_header80 *h = &mdev->data.rbuf.header.h80;
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
-
- drbd_bm_lock(mdev, "receive bitmap");
+ /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
/* maybe we should use some per thread scratch page,
* and allocate that during initial device creation? */
ok = TRUE;
out:
- drbd_bm_unlock(mdev);
+ /* drbd_bm_unlock(mdev); by intention no lock */
if (ok && mdev->state.conn == C_WF_BITMAP_S)
drbd_start_resync(mdev, C_SYNC_SOURCE);
free_page((unsigned long) buffer);
if (os.conn == C_DISCONNECTING) {
wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
- if (!is_susp(mdev->state)) {
- /* we must not free the tl_hash
- * while application io is still on the fly */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
- drbd_free_tl_hash(mdev);
- }
-
crypto_free_hash(mdev->cram_hmac_tfm);
mdev->cram_hmac_tfm = NULL;