staging: lustre: remove RETURN macro
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / vvp_lock.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 */
30/*
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
33 *
34 * Implementation of cl_lock for VVP layer.
35 *
36 * Author: Nikita Danilov <nikita.danilov@sun.com>
37 */
38
39#define DEBUG_SUBSYSTEM S_LLITE
40
41
42#include <obd.h>
43#include <lustre_lite.h>
44
45#include "vvp_internal.h"
46
47/*****************************************************************************
48 *
49 * Vvp lock functions.
50 *
51 */
52
53/**
54 * Estimates lock value for the purpose of managing the lock cache during
55 * memory shortages.
56 *
57 * Locks for memory mapped files are almost infinitely precious, others are
58 * junk. "Mapped locks" are heavy, but not infinitely heavy, so that they are
59 * ordered within themselves by weights assigned from other layers.
60 */
61static unsigned long vvp_lock_weigh(const struct lu_env *env,
62 const struct cl_lock_slice *slice)
63{
64 struct ccc_object *cob = cl2ccc(slice->cls_obj);
65
0a3bdb00 66 return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0;
d7e09d03
PT
67}
68
69static const struct cl_lock_operations vvp_lock_ops = {
70 .clo_delete = ccc_lock_delete,
71 .clo_fini = ccc_lock_fini,
72 .clo_enqueue = ccc_lock_enqueue,
73 .clo_wait = ccc_lock_wait,
74 .clo_unuse = ccc_lock_unuse,
75 .clo_fits_into = ccc_lock_fits_into,
76 .clo_state = ccc_lock_state,
77 .clo_weigh = vvp_lock_weigh
78};
79
80int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
81 struct cl_lock *lock, const struct cl_io *io)
82{
83 return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops);
84}
This page took 0.064418 seconds and 5 git commands to generate.