drbd: Define the size of res_opts->cpu_mask in a single place
[deliverable/linux.git] / drivers / block / drbd / drbd_main.c
index 9e2c8f9d7a0bcd00a3d4011b75a0013f33007112..59a58e896cf581efd5c411e563a8eab7ff62e7fb 100644 (file)
@@ -198,7 +198,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
        int expect_epoch = 0;
        int expect_size = 0;
 
-       spin_lock_irq(&connection->req_lock);
+       spin_lock_irq(&connection->resource->req_lock);
 
        /* find oldest not yet barrier-acked write request,
         * count writes in its epoch. */
@@ -255,12 +255,12 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
                        break;
                _req_mod(req, BARRIER_ACKED);
        }
-       spin_unlock_irq(&connection->req_lock);
+       spin_unlock_irq(&connection->resource->req_lock);
 
        return;
 
 bail:
-       spin_unlock_irq(&connection->req_lock);
+       spin_unlock_irq(&connection->resource->req_lock);
        conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
 }
 
@@ -284,9 +284,9 @@ void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
 
 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
 {
-       spin_lock_irq(&connection->req_lock);
+       spin_lock_irq(&connection->resource->req_lock);
        _tl_restart(connection, what);
-       spin_unlock_irq(&connection->req_lock);
+       spin_unlock_irq(&connection->resource->req_lock);
 }
 
 /**
@@ -311,7 +311,7 @@ void tl_abort_disk_io(struct drbd_device *device)
        struct drbd_connection *connection = first_peer_device(device)->connection;
        struct drbd_request *req, *r;
 
-       spin_lock_irq(&connection->req_lock);
+       spin_lock_irq(&connection->resource->req_lock);
        list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
                if (!(req->rq_state & RQ_LOCAL_PENDING))
                        continue;
@@ -319,7 +319,7 @@ void tl_abort_disk_io(struct drbd_device *device)
                        continue;
                _req_mod(req, ABORT_DISK_IO);
        }
-       spin_unlock_irq(&connection->req_lock);
+       spin_unlock_irq(&connection->resource->req_lock);
 }
 
 static int drbd_thread_setup(void *arg)
@@ -891,7 +891,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
        struct p_rs_uuid *p;
        u64 uuid;
 
-       D_ASSERT(device->state.disk == D_UP_TO_DATE);
+       D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
 
        uuid = device->ldev->md.uuid[UI_BITMAP];
        if (uuid && uuid != UUID_JUST_CREATED)
@@ -919,7 +919,7 @@ int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flag
        unsigned int max_bio_size;
 
        if (get_ldev_if_state(device, D_NEGOTIATING)) {
-               D_ASSERT(device->ldev->backing_bdev);
+               D_ASSERT(device, device->ldev->backing_bdev);
                d_size = drbd_get_max_capacity(device->ldev);
                rcu_read_lock();
                u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
@@ -1836,7 +1836,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
        int rv = 0;
 
        mutex_lock(&drbd_main_mutex);
-       spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
+       spin_lock_irqsave(&device->resource->req_lock, flags);
        /* to have a stable device->state.role
         * and no race with updating open_cnt */
 
@@ -1849,7 +1849,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
 
        if (!rv)
                device->open_cnt++;
-       spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
+       spin_unlock_irqrestore(&device->resource->req_lock, flags);
        mutex_unlock(&drbd_main_mutex);
 
        return rv;
@@ -1974,7 +1974,7 @@ void drbd_device_cleanup(struct drbd_device *device)
                device->rs_mark_left[i] = 0;
                device->rs_mark_time[i] = 0;
        }
-       D_ASSERT(first_peer_device(device)->connection->net_conf == NULL);
+       D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
 
        drbd_set_my_capacity(device, 0);
        if (device->bitmap) {
@@ -1988,16 +1988,16 @@ void drbd_device_cleanup(struct drbd_device *device)
 
        clear_bit(AL_SUSPENDED, &device->flags);
 
-       D_ASSERT(list_empty(&device->active_ee));
-       D_ASSERT(list_empty(&device->sync_ee));
-       D_ASSERT(list_empty(&device->done_ee));
-       D_ASSERT(list_empty(&device->read_ee));
-       D_ASSERT(list_empty(&device->net_ee));
-       D_ASSERT(list_empty(&device->resync_reads));
-       D_ASSERT(list_empty(&first_peer_device(device)->connection->sender_work.q));
-       D_ASSERT(list_empty(&device->resync_work.list));
-       D_ASSERT(list_empty(&device->unplug_work.list));
-       D_ASSERT(list_empty(&device->go_diskless.list));
+       D_ASSERT(device, list_empty(&device->active_ee));
+       D_ASSERT(device, list_empty(&device->sync_ee));
+       D_ASSERT(device, list_empty(&device->done_ee));
+       D_ASSERT(device, list_empty(&device->read_ee));
+       D_ASSERT(device, list_empty(&device->net_ee));
+       D_ASSERT(device, list_empty(&device->resync_reads));
+       D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
+       D_ASSERT(device, list_empty(&device->resync_work.list));
+       D_ASSERT(device, list_empty(&device->unplug_work.list));
+       D_ASSERT(device, list_empty(&device->go_diskless.list));
 
        drbd_set_defaults(device);
 }
@@ -2014,7 +2014,7 @@ static void drbd_destroy_mempools(void)
                drbd_pp_vacant--;
        }
 
-       /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+       /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
 
        if (drbd_md_io_bio_set)
                bioset_free(drbd_md_io_bio_set);
@@ -2169,7 +2169,7 @@ void drbd_destroy_device(struct kref *kref)
        del_timer_sync(&device->request_timer);
 
        /* paranoia asserts */
-       D_ASSERT(device->open_cnt == 0);
+       D_ASSERT(device, device->open_cnt == 0);
        /* end paranoia asserts */
 
        /* cleanup stuff that may have been allocated during
@@ -2327,7 +2327,7 @@ static void drbd_cleanup(void)
        drbd_genl_unregister();
 
        idr_for_each_entry(&drbd_devices, device, i)
-               drbd_delete_minor(device);
+               drbd_delete_device(device);
 
        /* not _rcu since, no other updater anymore. Genl already unregistered */
        for_each_resource_safe(resource, tmp, &drbd_resources) {
@@ -2503,8 +2503,7 @@ int set_resource_options(struct drbd_resource *resource, struct res_opts *res_op
 
        /* silently ignore cpu mask on UP kernel */
        if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
-               /* FIXME: Get rid of constant 32 here */
-               err = bitmap_parse(res_opts->cpu_mask, 32,
+               err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
                                   cpumask_bits(new_cpu_mask), nr_cpu_ids);
                if (err) {
                        drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
@@ -2534,7 +2533,7 @@ struct drbd_resource *drbd_create_resource(const char *name)
 {
        struct drbd_resource *resource;
 
-       resource = kmalloc(sizeof(struct drbd_resource), GFP_KERNEL);
+       resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
        if (!resource)
                return NULL;
        resource->name = kstrdup(name, GFP_KERNEL);
@@ -2546,6 +2545,8 @@ struct drbd_resource *drbd_create_resource(const char *name)
        idr_init(&resource->devices);
        INIT_LIST_HEAD(&resource->connections);
        list_add_tail_rcu(&resource->resources, &drbd_resources);
+       mutex_init(&resource->conf_update);
+       spin_lock_init(&resource->req_lock);
        return resource;
 }
 
@@ -2588,8 +2589,6 @@ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
 
        connection->cstate = C_STANDALONE;
        mutex_init(&connection->cstate_mutex);
-       spin_lock_init(&connection->req_lock);
-       mutex_init(&connection->conf_update);
        init_waitqueue_head(&connection->ping_wait);
        idr_init(&connection->peer_devices);
 
@@ -2659,11 +2658,11 @@ static int init_submitter(struct drbd_device *device)
        return 0;
 }
 
-enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigned int minor, int vnr)
+enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr)
 {
-       struct drbd_resource *resource = connection->resource;
+       struct drbd_connection *connection;
        struct drbd_device *device;
-       struct drbd_peer_device *peer_device;
+       struct drbd_peer_device *peer_device, *tmp_peer_device;
        struct gendisk *disk;
        struct request_queue *q;
        int id;
@@ -2679,18 +2678,8 @@ enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigne
                return ERR_NOMEM;
        kref_init(&device->kref);
 
-       peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
-       if (!peer_device)
-               goto out_no_peer_device;
-
-       INIT_LIST_HEAD(&device->peer_devices);
-       list_add(&peer_device->peer_devices, &device->peer_devices);
        kref_get(&resource->kref);
        device->resource = resource;
-       kref_get(&connection->kref);
-       peer_device->connection = connection;
-       peer_device->device = device;
-
        device->minor = minor;
        device->vnr = vnr;
 
@@ -2730,7 +2719,7 @@ enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigne
        blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        blk_queue_merge_bvec(q, drbd_merge_bvec);
-       q->queue_lock = &connection->req_lock;
+       q->queue_lock = &resource->req_lock;
 
        device->md_io_page = alloc_page(GFP_KERNEL);
        if (!device->md_io_page)
@@ -2761,15 +2750,27 @@ enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigne
        }
        kref_get(&device->kref);
 
-       id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
-       if (id < 0) {
-               if (id == -ENOSPC) {
-                       err = ERR_INVALID_REQUEST;
-                       drbd_msg_put_info("requested volume exists already");
+       INIT_LIST_HEAD(&device->peer_devices);
+       for_each_connection(connection, resource) {
+               peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
+               if (!peer_device)
+                       goto out_idr_remove_from_resource;
+               peer_device->connection = connection;
+               peer_device->device = device;
+
+               list_add(&peer_device->peer_devices, &device->peer_devices);
+               kref_get(&device->kref);
+
+               id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
+               if (id < 0) {
+                       if (id == -ENOSPC) {
+                               err = ERR_INVALID_REQUEST;
+                               drbd_msg_put_info("requested volume exists already");
+                       }
+                       goto out_idr_remove_from_resource;
                }
-               goto out_idr_remove_from_resource;
+               kref_get(&connection->kref);
        }
-       kref_get(&device->kref);
 
        if (init_submitter(device)) {
                err = ERR_NOMEM;
@@ -2780,7 +2781,7 @@ enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigne
        add_disk(disk);
 
        /* inherit the connection state */
-       device->state.conn = connection->cstate;
+       device->state.conn = first_connection(resource)->cstate;
        if (device->state.conn == C_WF_REPORT_PARAMS)
                drbd_connected(device);
 
@@ -2789,6 +2790,17 @@ enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigne
 out_idr_remove_vol:
        idr_remove(&connection->peer_devices, vnr);
 out_idr_remove_from_resource:
+       for_each_connection(connection, resource) {
+               peer_device = idr_find(&connection->peer_devices, vnr);
+               if (peer_device) {
+                       idr_remove(&connection->peer_devices, vnr);
+                       kref_put(&connection->kref, drbd_destroy_connection);
+               }
+       }
+       for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
+               list_del(&peer_device->peer_devices);
+               kfree(peer_device);
+       }
        idr_remove(&resource->devices, vnr);
 out_idr_remove_minor:
        idr_remove(&drbd_devices, minor);
@@ -2802,14 +2814,12 @@ out_no_io_page:
 out_no_disk:
        blk_cleanup_queue(q);
 out_no_q:
-       kref_put(&connection->kref, drbd_destroy_connection);
        kref_put(&resource->kref, drbd_destroy_resource);
-out_no_peer_device:
        kfree(device);
        return err;
 }
 
-void drbd_delete_minor(struct drbd_device *device)
+void drbd_delete_device(struct drbd_device *device)
 {
        struct drbd_resource *resource = device->resource;
        struct drbd_connection *connection;
@@ -3006,7 +3016,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
        buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
        buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
 
-       D_ASSERT(drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
+       D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
        sector = device->ldev->md.md_offset;
 
        if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
@@ -3270,14 +3280,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
 
        rv = NO_ERROR;
 
-       spin_lock_irq(&first_peer_device(device)->connection->req_lock);
+       spin_lock_irq(&device->resource->req_lock);
        if (device->state.conn < C_CONNECTED) {
                unsigned int peer;
                peer = be32_to_cpu(buffer->la_peer_max_bio_size);
                peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
                device->peer_max_bio_size = peer;
        }
-       spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
+       spin_unlock_irq(&device->resource->req_lock);
 
  err:
        drbd_md_put_buffer(device);
@@ -3459,7 +3469,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
        struct drbd_device *device = w->device;
        int rv = -EIO;
 
-       D_ASSERT(atomic_read(&device->ap_bio_cnt) == 0);
+       D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
 
        if (get_ldev(device)) {
                drbd_bm_lock(device, work->why, work->flags);
@@ -3498,7 +3508,7 @@ static int w_go_diskless(struct drbd_work *w, int unused)
 {
        struct drbd_device *device = w->device;
 
-       D_ASSERT(device->state.disk == D_FAILED);
+       D_ASSERT(device, device->state.disk == D_FAILED);
        /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
         * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
         * the protected members anymore, though, so once put_ldev reaches zero
@@ -3552,11 +3562,11 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
                          void (*done)(struct drbd_device *, int),
                          char *why, enum bm_flag flags)
 {
-       D_ASSERT(current == first_peer_device(device)->connection->worker.task);
+       D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
 
-       D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags));
-       D_ASSERT(!test_bit(BITMAP_IO, &device->flags));
-       D_ASSERT(list_empty(&device->bm_io_work.w.list));
+       D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
+       D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
+       D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
        if (device->bm_io_work.why)
                drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
                        why, device->bm_io_work.why);
@@ -3566,13 +3576,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
        device->bm_io_work.why = why;
        device->bm_io_work.flags = flags;
 
-       spin_lock_irq(&first_peer_device(device)->connection->req_lock);
+       spin_lock_irq(&device->resource->req_lock);
        set_bit(BITMAP_IO, &device->flags);
        if (atomic_read(&device->ap_bio_cnt) == 0) {
                if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
                        drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
        }
-       spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
+       spin_unlock_irq(&device->resource->req_lock);
 }
 
 /**
@@ -3589,7 +3599,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
 {
        int rv;
 
-       D_ASSERT(current != first_peer_device(device)->connection->worker.task);
+       D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
 
        if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
                drbd_suspend_io(device);
@@ -3740,10 +3750,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
        /* Indicate to wake up device->misc_wait on progress.  */
        i->waiting = true;
        prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
-       spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
+       spin_unlock_irq(&device->resource->req_lock);
        timeout = schedule_timeout(timeout);
        finish_wait(&device->misc_wait, &wait);
-       spin_lock_irq(&first_peer_device(device)->connection->req_lock);
+       spin_lock_irq(&device->resource->req_lock);
        if (!timeout || device->state.conn < C_CONNECTED)
                return -ETIMEDOUT;
        if (signal_pending(current))
This page took 0.031936 seconds and 5 git commands to generate.