NFS: Convert nfs_get_lock_context to return an ERR_PTR on failure
authorTrond Myklebust <Trond.Myklebust@netapp.com>
Mon, 13 Aug 2012 21:15:50 +0000 (17:15 -0400)
committerTrond Myklebust <Trond.Myklebust@netapp.com>
Fri, 28 Sep 2012 20:03:03 +0000 (16:03 -0400)
We want to be able to distinguish between allocation failures, and
the case where the lock context is not needed (because there are no
locks).

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/pagelist.c

index 1ba385b7c90da41b4c90d5760fe13bbc7482302a..22130df162180e39e461b6caee9bfab1c78a9c40 100644 (file)
@@ -450,6 +450,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
        struct nfs_direct_req *dreq;
+       struct nfs_lock_context *l_ctx;
 
        dreq = nfs_direct_req_alloc();
        if (dreq == NULL)
@@ -457,9 +458,12 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
 
        dreq->inode = inode;
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
-       dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
-       if (dreq->l_ctx == NULL)
+       l_ctx = nfs_get_lock_context(dreq->ctx);
+       if (IS_ERR(l_ctx)) {
+               result = PTR_ERR(l_ctx);
                goto out_release;
+       }
+       dreq->l_ctx = l_ctx;
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
@@ -849,6 +853,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
        struct nfs_direct_req *dreq;
+       struct nfs_lock_context *l_ctx;
 
        dreq = nfs_direct_req_alloc();
        if (!dreq)
@@ -856,9 +861,12 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
 
        dreq->inode = inode;
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
-       dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
-       if (dreq->l_ctx == NULL)
+       l_ctx = nfs_get_lock_context(dreq->ctx);
+       if (IS_ERR(l_ctx)) {
+               result = PTR_ERR(l_ctx);
                goto out_release;
+       }
+       dreq->l_ctx = l_ctx;
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
index 9b47610338f59f03f6b4fdc0280d6aa61c266d4f..b5e2913dff2dcf3f28a00ec03e51c61672d937ce 100644 (file)
@@ -578,7 +578,7 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
                spin_unlock(&inode->i_lock);
                new = kmalloc(sizeof(*new), GFP_KERNEL);
                if (new == NULL)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
                nfs_init_lock_context(new);
                spin_lock(&inode->i_lock);
                res = __nfs_find_lock_context(ctx);
index 311a79681e2b16311724e25921d922768c999026..dfd764bd943d976d945388cb5c524bd177f1fc03 100644 (file)
@@ -102,6 +102,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
                   unsigned int offset, unsigned int count)
 {
        struct nfs_page         *req;
+       struct nfs_lock_context *l_ctx;
 
        /* try to allocate the request struct */
        req = nfs_page_alloc();
@@ -109,11 +110,12 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
                return ERR_PTR(-ENOMEM);
 
        /* get lock context early so we can deal with alloc failures */
-       req->wb_lock_context = nfs_get_lock_context(ctx);
-       if (req->wb_lock_context == NULL) {
+       l_ctx = nfs_get_lock_context(ctx);
+       if (IS_ERR(l_ctx)) {
                nfs_page_free(req);
-               return ERR_PTR(-ENOMEM);
+               return ERR_CAST(l_ctx);
        }
+       req->wb_lock_context = l_ctx;
 
        /* Initialize the request struct. Initially, we assume a
         * long write-back delay. This will be adjusted in
This page took 0.027922 seconds and 5 git commands to generate.