[XFS] Introduce per-filesystem delwri pagebuf flushing to reduce
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_buf.c
index ba4767c04adfbf0fc3895a04086370f9f35b4d2b..2a8acd38fa1e244d8359d3fafd316732ce087107 100644 (file)
@@ -1,46 +1,20 @@
 /*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
  * published by the Free Software Foundation.
  *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like.  Any license provided herein, whether implied or
- * otherwise, applies only to this software file.  Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA  94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
-
-/*
- *     The xfs_buf.c code provides an abstract buffer cache model on top
- *     of the Linux page cache.  Cached metadata blocks for a file system
- *     are hashed to the inode for the block device.  xfs_buf.c assembles
- *     buffers (xfs_buf_t) on demand to aggregate such cached pages for I/O.
- *
- *      Written by Steve Lord, Jim Mostek, Russell Cattelan
- *                 and Rajagopal Ananthanarayanan ("ananth") at SGI.
- *
- */
-
 #include <linux/stddef.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/hash.h>
 #include <linux/kthread.h>
-
 #include "xfs_linux.h"
 
-/*
- * File wide globals
- */
-
 STATIC kmem_cache_t *pagebuf_zone;
 STATIC kmem_shaker_t pagebuf_shake;
+STATIC int xfsbufd(void *);
 STATIC int xfsbufd_wakeup(int, gfp_t);
 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
 
 STATIC struct workqueue_struct *xfslogd_workqueue;
 struct workqueue_struct *xfsdatad_workqueue;
 
-/*
- * Pagebuf debugging
- */
-
 #ifdef PAGEBUF_TRACE
 void
 pagebuf_trace(
@@ -112,10 +78,6 @@ ktrace_t *pagebuf_trace_buf;
 # define PB_GET_OWNER(pb)      do { } while (0)
 #endif
 
-/*
- * Pagebuf allocation / freeing.
- */
-
 #define pb_to_gfp(flags) \
        ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
          ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
@@ -123,7 +85,6 @@ ktrace_t *pagebuf_trace_buf;
 #define pb_to_km(flags) \
         (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
 
-
 #define pagebuf_allocate(flags) \
        kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
 #define pagebuf_deallocate(pb) \
@@ -181,8 +142,9 @@ set_page_region(
        size_t          offset,
        size_t          length)
 {
-       page->private |= page_region_mask(offset, length);
-       if (page->private == ~0UL)
+       set_page_private(page,
+               page_private(page) | page_region_mask(offset, length));
+       if (page_private(page) == ~0UL)
                SetPageUptodate(page);
 }
 
@@ -194,7 +156,7 @@ test_page_region(
 {
        unsigned long   mask = page_region_mask(offset, length);
 
-       return (mask && (page->private & mask) == mask);
+       return (mask && (page_private(page) & mask) == mask);
 }
 
 /*
@@ -285,7 +247,7 @@ _pagebuf_initialize(
         * most cases but may be reset (e.g. XFS recovery).
         */
        pb->pb_buffer_length = pb->pb_count_desired = range_length;
-       pb->pb_flags = flags | PBF_NONE;
+       pb->pb_flags = flags;
        pb->pb_bn = XFS_BUF_DADDR_NULL;
        atomic_set(&pb->pb_pin_count, 0);
        init_waitqueue_head(&pb->pb_waiters);
@@ -457,14 +419,8 @@ _pagebuf_lookup_pages(
                        unlock_page(bp->pb_pages[i]);
        }
 
-       if (page_count) {
-               /* if we have any uptodate pages, mark that in the buffer */
-               bp->pb_flags &= ~PBF_NONE;
-
-               /* if some pages aren't uptodate, mark that in the buffer */
-               if (page_count != bp->pb_page_count)
-                       bp->pb_flags |= PBF_PARTIAL;
-       }
+       if (page_count == bp->pb_page_count)
+               bp->pb_flags |= PBF_DONE;
 
        PB_TRACE(bp, "lookup_pages", (long)page_count);
        return error;
@@ -675,7 +631,7 @@ xfs_buf_read_flags(
 
        pb = xfs_buf_get_flags(target, ioff, isize, flags);
        if (pb) {
-               if (PBF_NOT_DONE(pb)) {
+               if (!XFS_BUF_ISDONE(pb)) {
                        PB_TRACE(pb, "read", (unsigned long)flags);
                        XFS_STATS_INC(pb_get_read);
                        pagebuf_iostart(pb, flags);
@@ -812,7 +768,7 @@ pagebuf_get_no_daddr(
        bp = pagebuf_allocate(0);
        if (unlikely(bp == NULL))
                goto fail;
-       _pagebuf_initialize(bp, target, 0, len, PBF_FORCEIO);
+       _pagebuf_initialize(bp, target, 0, len, 0);
 
  try_again:
        data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
@@ -875,39 +831,18 @@ pagebuf_rele(
 
        PB_TRACE(pb, "rele", pb->pb_relse);
 
-       /*
-        * pagebuf_lookup buffers are not hashed, not delayed write,
-        * and don't have their own release routines.  Special case.
-        */
-       if (unlikely(!hash)) {
-               ASSERT(!pb->pb_relse);
-               if (atomic_dec_and_test(&pb->pb_hold))
-                       xfs_buf_free(pb);
-               return;
-       }
-
        if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
-               int             do_free = 1;
-
                if (pb->pb_relse) {
                        atomic_inc(&pb->pb_hold);
                        spin_unlock(&hash->bh_lock);
                        (*(pb->pb_relse)) (pb);
-                       spin_lock(&hash->bh_lock);
-                       do_free = 0;
-               }
-
-               if (pb->pb_flags & PBF_FS_MANAGED) {
-                       do_free = 0;
-               }
-
-               if (do_free) {
-                       ASSERT((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == 0);
-                       list_del_init(&pb->pb_hash_list);
+               } else if (pb->pb_flags & PBF_FS_MANAGED) {
                        spin_unlock(&hash->bh_lock);
-                       pagebuf_free(pb);
                } else {
+                       ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)));
+                       list_del_init(&pb->pb_hash_list);
                        spin_unlock(&hash->bh_lock);
+                       pagebuf_free(pb);
                }
        } else {
                /*
@@ -1120,21 +1055,18 @@ pagebuf_iodone_work(
 void
 pagebuf_iodone(
        xfs_buf_t               *pb,
-       int                     dataio,
        int                     schedule)
 {
        pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
-       if (pb->pb_error == 0) {
-               pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE);
-       }
+       if (pb->pb_error == 0)
+               pb->pb_flags |= PBF_DONE;
 
        PB_TRACE(pb, "iodone", pb->pb_iodone);
 
        if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
                if (schedule) {
                        INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
-                       queue_work(dataio ? xfsdatad_workqueue :
-                               xfslogd_workqueue, &pb->pb_iodone_work);
+                       queue_work(xfslogd_workqueue, &pb->pb_iodone_work);
                } else {
                        pagebuf_iodone_work(pb);
                }
@@ -1234,7 +1166,7 @@ _pagebuf_iodone(
 {
        if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
                pb->pb_locked = 0;
-               pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), schedule);
+               pagebuf_iodone(pb, schedule);
        }
 }
 
@@ -1303,6 +1235,11 @@ _pagebuf_ioapply(
                rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
        }
 
+       if (pb->pb_flags & PBF_ORDERED) {
+               ASSERT(!(pb->pb_flags & PBF_READ));
+               rw = WRITE_BARRIER;
+       }
+
        /* Special code path for reading a sub page size pagebuf in --
         * we populate up the whole page, and hence the other metadata
         * in the same page.  This optimization is only valid when the
@@ -1556,6 +1493,30 @@ xfs_free_bufhash(
        btp->bt_hash = NULL;
 }
 
+/*
+ * buftarg list for delwrite queue processing
+ */
+STATIC LIST_HEAD(xfs_buftarg_list);
+STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
+
+STATIC void
+xfs_register_buftarg(
+       xfs_buftarg_t           *btp)
+{
+       spin_lock(&xfs_buftarg_lock);
+       list_add(&btp->bt_list, &xfs_buftarg_list);
+       spin_unlock(&xfs_buftarg_lock);
+}
+
+STATIC void
+xfs_unregister_buftarg(
+       xfs_buftarg_t           *btp)
+{
+       spin_lock(&xfs_buftarg_lock);
+       list_del(&btp->bt_list);
+       spin_unlock(&xfs_buftarg_lock);
+}
+
 void
 xfs_free_buftarg(
        xfs_buftarg_t           *btp,
@@ -1566,6 +1527,12 @@ xfs_free_buftarg(
                xfs_blkdev_put(btp->pbr_bdev);
        xfs_free_bufhash(btp);
        iput(btp->pbr_mapping->host);
+
+       /* unregister the buftarg first so that we don't get a
+        * wakeup finding a non-existent task */
+       xfs_unregister_buftarg(btp);
+       kthread_stop(btp->bt_task);
+
        kmem_free(btp, sizeof(*btp));
 }
 
@@ -1655,6 +1622,26 @@ xfs_mapping_buftarg(
        return 0;
 }
 
+STATIC int
+xfs_alloc_delwrite_queue(
+       xfs_buftarg_t           *btp)
+{
+       int     error = 0;
+
+       INIT_LIST_HEAD(&btp->bt_list);
+       INIT_LIST_HEAD(&btp->bt_delwrite_queue);
+       spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
+       btp->bt_flags = 0;
+       btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
+       if (IS_ERR(btp->bt_task)) {
+               error = PTR_ERR(btp->bt_task);
+               goto out_error;
+       }
+       xfs_register_buftarg(btp);
+out_error:
+       return error;
+}
+
 xfs_buftarg_t *
 xfs_alloc_buftarg(
        struct block_device     *bdev,
@@ -1670,6 +1657,8 @@ xfs_alloc_buftarg(
                goto error;
        if (xfs_mapping_buftarg(btp, bdev))
                goto error;
+       if (xfs_alloc_delwrite_queue(btp))
+               goto error;
        xfs_alloc_bufhash(btp, external);
        return btp;
 
@@ -1682,20 +1671,19 @@ error:
 /*
  * Pagebuf delayed write buffer handling
  */
-
-STATIC LIST_HEAD(pbd_delwrite_queue);
-STATIC DEFINE_SPINLOCK(pbd_delwrite_lock);
-
 STATIC void
 pagebuf_delwri_queue(
        xfs_buf_t               *pb,
        int                     unlock)
 {
+       struct list_head        *dwq = &pb->pb_target->bt_delwrite_queue;
+       spinlock_t              *dwlk = &pb->pb_target->bt_delwrite_lock;
+
        PB_TRACE(pb, "delwri_q", (long)unlock);
        ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
                                        (PBF_DELWRI|PBF_ASYNC));
 
-       spin_lock(&pbd_delwrite_lock);
+       spin_lock(dwlk);
        /* If already in the queue, dequeue and place at tail */
        if (!list_empty(&pb->pb_list)) {
                ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
@@ -1706,9 +1694,9 @@ pagebuf_delwri_queue(
        }
 
        pb->pb_flags |= _PBF_DELWRI_Q;
-       list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
+       list_add_tail(&pb->pb_list, dwq);
        pb->pb_queuetime = jiffies;
-       spin_unlock(&pbd_delwrite_lock);
+       spin_unlock(dwlk);
 
        if (unlock)
                pagebuf_unlock(pb);
@@ -1718,16 +1706,17 @@ void
 pagebuf_delwri_dequeue(
        xfs_buf_t               *pb)
 {
+       spinlock_t              *dwlk = &pb->pb_target->bt_delwrite_lock;
        int                     dequeued = 0;
 
-       spin_lock(&pbd_delwrite_lock);
+       spin_lock(dwlk);
        if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
                ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
                list_del_init(&pb->pb_list);
                dequeued = 1;
        }
        pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
-       spin_unlock(&pbd_delwrite_lock);
+       spin_unlock(dwlk);
 
        if (dequeued)
                pagebuf_rele(pb);
@@ -1742,21 +1731,22 @@ pagebuf_runall_queues(
        flush_workqueue(queue);
 }
 
-/* Defines for pagebuf daemon */
-STATIC struct task_struct *xfsbufd_task;
-STATIC int xfsbufd_force_flush;
-STATIC int xfsbufd_force_sleep;
-
 STATIC int
 xfsbufd_wakeup(
-       int             priority,
-       gfp_t           mask)
+       int                     priority,
+       gfp_t                   mask)
 {
-       if (xfsbufd_force_sleep)
-               return 0;
-       xfsbufd_force_flush = 1;
-       barrier();
-       wake_up_process(xfsbufd_task);
+       xfs_buftarg_t           *btp, *n;
+
+       spin_lock(&xfs_buftarg_lock);
+       list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) {
+               if (test_bit(BT_FORCE_SLEEP, &btp->bt_flags))
+                       continue;
+               set_bit(BT_FORCE_FLUSH, &btp->bt_flags);
+               barrier();
+               wake_up_process(btp->bt_task);
+       }
+       spin_unlock(&xfs_buftarg_lock);
        return 0;
 }
 
@@ -1766,31 +1756,34 @@ xfsbufd(
 {
        struct list_head        tmp;
        unsigned long           age;
-       xfs_buftarg_t           *target;
+       xfs_buftarg_t           *target = (xfs_buftarg_t *)data;
        xfs_buf_t               *pb, *n;
+       struct list_head        *dwq = &target->bt_delwrite_queue;
+       spinlock_t              *dwlk = &target->bt_delwrite_lock;
 
        current->flags |= PF_MEMALLOC;
 
        INIT_LIST_HEAD(&tmp);
        do {
                if (unlikely(freezing(current))) {
-                       xfsbufd_force_sleep = 1;
+                       set_bit(BT_FORCE_SLEEP, &target->bt_flags);
                        refrigerator();
                } else {
-                       xfsbufd_force_sleep = 0;
+                       clear_bit(BT_FORCE_SLEEP, &target->bt_flags);
                }
 
-               schedule_timeout_interruptible
-                       (xfs_buf_timer_centisecs * msecs_to_jiffies(10));
+               schedule_timeout_interruptible(
+                       xfs_buf_timer_centisecs * msecs_to_jiffies(10));
 
                age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
-               spin_lock(&pbd_delwrite_lock);
-               list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
+               spin_lock(dwlk);
+               list_for_each_entry_safe(pb, n, dwq, pb_list) {
                        PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
                        ASSERT(pb->pb_flags & PBF_DELWRI);
 
                        if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
-                               if (!xfsbufd_force_flush &&
+                               if (!test_bit(BT_FORCE_FLUSH,
+                                               &target->bt_flags) &&
                                    time_before(jiffies,
                                                pb->pb_queuetime + age)) {
                                        pagebuf_unlock(pb);
@@ -1802,11 +1795,11 @@ xfsbufd(
                                list_move(&pb->pb_list, &tmp);
                        }
                }
-               spin_unlock(&pbd_delwrite_lock);
+               spin_unlock(dwlk);
 
                while (!list_empty(&tmp)) {
                        pb = list_entry(tmp.next, xfs_buf_t, pb_list);
-                       target = pb->pb_target;
+                       ASSERT(target == pb->pb_target);
 
                        list_del_init(&pb->pb_list);
                        pagebuf_iostrategy(pb);
@@ -1817,7 +1810,7 @@ xfsbufd(
                if (as_list_len > 0)
                        purge_addresses();
 
-               xfsbufd_force_flush = 0;
+               clear_bit(BT_FORCE_FLUSH, &target->bt_flags);
        } while (!kthread_should_stop());
 
        return 0;
@@ -1836,17 +1829,17 @@ xfs_flush_buftarg(
        struct list_head        tmp;
        xfs_buf_t               *pb, *n;
        int                     pincount = 0;
+       struct list_head        *dwq = &target->bt_delwrite_queue;
+       spinlock_t              *dwlk = &target->bt_delwrite_lock;
 
        pagebuf_runall_queues(xfsdatad_workqueue);
        pagebuf_runall_queues(xfslogd_workqueue);
 
        INIT_LIST_HEAD(&tmp);
-       spin_lock(&pbd_delwrite_lock);
-       list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
-
-               if (pb->pb_target != target)
-                       continue;
+       spin_lock(dwlk);
+       list_for_each_entry_safe(pb, n, dwq, pb_list) {
 
+               ASSERT(pb->pb_target == target);
                ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
                PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
                if (pagebuf_ispin(pb)) {
@@ -1856,7 +1849,7 @@ xfs_flush_buftarg(
 
                list_move(&pb->pb_list, &tmp);
        }
-       spin_unlock(&pbd_delwrite_lock);
+       spin_unlock(dwlk);
 
        /*
         * Dropped the delayed write list lock, now walk the temporary list
@@ -1890,100 +1883,54 @@ xfs_flush_buftarg(
        return pincount;
 }
 
-STATIC int
-xfs_buf_daemons_start(void)
+int __init
+pagebuf_init(void)
 {
        int             error = -ENOMEM;
 
+#ifdef PAGEBUF_TRACE
+       pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
+#endif
+
+       pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
+       if (!pagebuf_zone)
+               goto out_free_trace_buf;
+
        xfslogd_workqueue = create_workqueue("xfslogd");
        if (!xfslogd_workqueue)
-               goto out;
+               goto out_free_buf_zone;
 
        xfsdatad_workqueue = create_workqueue("xfsdatad");
        if (!xfsdatad_workqueue)
                goto out_destroy_xfslogd_workqueue;
 
-       xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
-       if (IS_ERR(xfsbufd_task)) {
-               error = PTR_ERR(xfsbufd_task);
+       pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
+       if (!pagebuf_shake)
                goto out_destroy_xfsdatad_workqueue;
-       }
+
        return 0;
 
  out_destroy_xfsdatad_workqueue:
        destroy_workqueue(xfsdatad_workqueue);
  out_destroy_xfslogd_workqueue:
        destroy_workqueue(xfslogd_workqueue);
- out:
-       return error;
-}
-
-/*
- * Note: do not mark as __exit, it is called from pagebuf_terminate.
- */
-STATIC void
-xfs_buf_daemons_stop(void)
-{
-       kthread_stop(xfsbufd_task);
-       destroy_workqueue(xfslogd_workqueue);
-       destroy_workqueue(xfsdatad_workqueue);
-}
-
-/*
- *     Initialization and Termination
- */
-
-int __init
-pagebuf_init(void)
-{
-       int             error = -ENOMEM;
-
-       pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
-       if (!pagebuf_zone)
-               goto out;
-
-#ifdef PAGEBUF_TRACE
-       pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
-#endif
-
-       error = xfs_buf_daemons_start();
-       if (error)
-               goto out_free_buf_zone;
-
-       pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
-       if (!pagebuf_shake) {
-               error = -ENOMEM;
-               goto out_stop_daemons;
-       }
-
-       return 0;
-
- out_stop_daemons:
-       xfs_buf_daemons_stop();
  out_free_buf_zone:
+       kmem_zone_destroy(pagebuf_zone);
+ out_free_trace_buf:
 #ifdef PAGEBUF_TRACE
        ktrace_free(pagebuf_trace_buf);
 #endif
-       kmem_zone_destroy(pagebuf_zone);
- out:
        return error;
 }
 
-
-/*
- *     pagebuf_terminate.
- *
- *     Note: do not mark as __exit, this is also called from the __init code.
- */
 void
 pagebuf_terminate(void)
 {
-       xfs_buf_daemons_stop();
-
+       kmem_shake_deregister(pagebuf_shake);
+       destroy_workqueue(xfsdatad_workqueue);
+       destroy_workqueue(xfslogd_workqueue);
+       kmem_zone_destroy(pagebuf_zone);
 #ifdef PAGEBUF_TRACE
        ktrace_free(pagebuf_trace_buf);
 #endif
-
-       kmem_zone_destroy(pagebuf_zone);
-       kmem_shake_deregister(pagebuf_shake);
 }
This page took 0.03394 seconds and 5 git commands to generate.